{ "best_metric": null, "best_model_checkpoint": null, "epoch": 50.0, "eval_steps": 100, "global_step": 5600, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.01, "grad_norm": Infinity, "learning_rate": 0.0, "loss": 8.857, "step": 1 }, { "epoch": 0.02, "grad_norm": 2.5771467685699463, "learning_rate": 6e-07, "loss": 9.0055, "step": 2 }, { "epoch": 0.03, "grad_norm": 2.5344083309173584, "learning_rate": 1.2e-06, "loss": 8.8467, "step": 3 }, { "epoch": 0.04, "grad_norm": 2.5785155296325684, "learning_rate": 1.8e-06, "loss": 8.9835, "step": 4 }, { "epoch": 0.04, "grad_norm": 2.5720224380493164, "learning_rate": 2.4e-06, "loss": 9.0011, "step": 5 }, { "epoch": 0.05, "grad_norm": 2.5483667850494385, "learning_rate": 2.9999999999999997e-06, "loss": 8.8629, "step": 6 }, { "epoch": 0.06, "grad_norm": 2.550671100616455, "learning_rate": 3.6e-06, "loss": 9.07, "step": 7 }, { "epoch": 0.07, "grad_norm": 2.605531692504883, "learning_rate": 4.2e-06, "loss": 9.0519, "step": 8 }, { "epoch": 0.08, "grad_norm": 2.6086299419403076, "learning_rate": 4.8e-06, "loss": 9.0498, "step": 9 }, { "epoch": 0.09, "grad_norm": 2.5434601306915283, "learning_rate": 5.399999999999999e-06, "loss": 8.6956, "step": 10 }, { "epoch": 0.1, "grad_norm": 2.583211898803711, "learning_rate": 5.999999999999999e-06, "loss": 8.8429, "step": 11 }, { "epoch": 0.11, "grad_norm": 2.576791524887085, "learning_rate": 6.599999999999999e-06, "loss": 8.7839, "step": 12 }, { "epoch": 0.12, "grad_norm": 2.724433660507202, "learning_rate": 7.2e-06, "loss": 9.0123, "step": 13 }, { "epoch": 0.12, "grad_norm": 2.7472896575927734, "learning_rate": 7.799999999999998e-06, "loss": 8.7864, "step": 14 }, { "epoch": 0.13, "grad_norm": 2.798316717147827, "learning_rate": 8.4e-06, "loss": 8.8184, "step": 15 }, { "epoch": 0.14, "grad_norm": 2.821938991546631, "learning_rate": 8.999999999999999e-06, "loss": 8.7918, "step": 16 }, { "epoch": 0.15, "grad_norm": 3.066065788269043, "learning_rate": 9.6e-06, "loss": 8.7545, "step": 17 }, { "epoch": 0.16, "grad_norm": 2.990442991256714, "learning_rate": 1.02e-05, "loss": 8.4884, "step": 18 }, { "epoch": 0.17, "grad_norm": 3.1646595001220703, "learning_rate": 1.0799999999999998e-05, "loss": 8.7851, "step": 19 }, { "epoch": 0.18, "grad_norm": 3.23149037361145, "learning_rate": 1.14e-05, "loss": 8.4444, "step": 20 }, { "epoch": 0.19, "grad_norm": 3.3869054317474365, "learning_rate": 1.1999999999999999e-05, "loss": 8.5387, "step": 21 }, { "epoch": 0.2, "grad_norm": 3.4858381748199463, "learning_rate": 1.26e-05, "loss": 8.5025, "step": 22 }, { "epoch": 0.21, "grad_norm": 3.6255927085876465, "learning_rate": 1.3199999999999997e-05, "loss": 8.3918, "step": 23 }, { "epoch": 0.21, "grad_norm": 3.784517765045166, "learning_rate": 1.3799999999999998e-05, "loss": 8.4957, "step": 24 }, { "epoch": 0.22, "grad_norm": 3.8559398651123047, "learning_rate": 1.44e-05, "loss": 8.443, "step": 25 }, { "epoch": 0.23, "grad_norm": 4.133277416229248, "learning_rate": 1.4999999999999999e-05, "loss": 8.2685, "step": 26 }, { "epoch": 0.24, "grad_norm": 4.246262550354004, "learning_rate": 1.5599999999999996e-05, "loss": 8.0652, "step": 27 }, { "epoch": 0.25, "grad_norm": 4.553827285766602, "learning_rate": 1.6199999999999997e-05, "loss": 7.9432, "step": 28 }, { "epoch": 0.26, "grad_norm": 4.786936283111572, "learning_rate": 1.68e-05, "loss": 7.8762, "step": 29 }, { "epoch": 0.27, "grad_norm": 5.037590980529785, "learning_rate": 1.74e-05, "loss": 7.8238, "step": 30 }, { "epoch": 0.28, "grad_norm": 5.190773010253906, "learning_rate": 1.7999999999999997e-05, "loss": 7.8229, "step": 31 }, { "epoch": 0.29, "grad_norm": Infinity, "learning_rate": 1.7999999999999997e-05, "loss": 8.1275, "step": 32 }, { "epoch": 0.29, "grad_norm": 5.320779323577881, "learning_rate": 1.8599999999999998e-05, "loss": 7.5935, "step": 33 }, { "epoch": 0.3, "grad_norm": 5.777307987213135, "learning_rate": 1.92e-05, "loss": 7.6314, "step": 34 }, { "epoch": 0.31, "grad_norm": 6.084140300750732, "learning_rate": 1.98e-05, "loss": 7.4086, "step": 35 }, { "epoch": 0.32, "grad_norm": 6.326981544494629, "learning_rate": 2.04e-05, "loss": 7.1818, "step": 36 }, { "epoch": 0.33, "grad_norm": 6.600687503814697, "learning_rate": 2.1e-05, "loss": 6.999, "step": 37 }, { "epoch": 0.34, "grad_norm": 7.262928009033203, "learning_rate": 2.1599999999999996e-05, "loss": 7.098, "step": 38 }, { "epoch": 0.35, "grad_norm": 7.6612653732299805, "learning_rate": 2.2199999999999998e-05, "loss": 6.7866, "step": 39 }, { "epoch": 0.36, "grad_norm": 8.096963882446289, "learning_rate": 2.28e-05, "loss": 6.6355, "step": 40 }, { "epoch": 0.37, "grad_norm": 8.546558380126953, "learning_rate": 2.34e-05, "loss": 6.4434, "step": 41 }, { "epoch": 0.38, "grad_norm": 8.764568328857422, "learning_rate": 2.3999999999999997e-05, "loss": 6.1754, "step": 42 }, { "epoch": 0.38, "grad_norm": 8.918572425842285, "learning_rate": 2.4599999999999998e-05, "loss": 5.8441, "step": 43 }, { "epoch": 0.39, "grad_norm": 10.128335952758789, "learning_rate": 2.52e-05, "loss": 6.056, "step": 44 }, { "epoch": 0.4, "grad_norm": 9.886902809143066, "learning_rate": 2.5799999999999997e-05, "loss": 5.5803, "step": 45 }, { "epoch": 0.41, "grad_norm": 9.779302597045898, "learning_rate": 2.6399999999999995e-05, "loss": 5.3041, "step": 46 }, { "epoch": 0.42, "grad_norm": 10.12820053100586, "learning_rate": 2.6999999999999996e-05, "loss": 5.1445, "step": 47 }, { "epoch": 0.43, "grad_norm": 10.151182174682617, "learning_rate": 2.7599999999999997e-05, "loss": 4.955, "step": 48 }, { "epoch": 0.44, "grad_norm": 10.030814170837402, "learning_rate": 2.8199999999999998e-05, "loss": 4.7517, "step": 49 }, { "epoch": 0.45, "grad_norm": 10.629888534545898, "learning_rate": 2.88e-05, "loss": 4.7841, "step": 50 }, { "epoch": 0.46, "grad_norm": 9.391251564025879, "learning_rate": 2.94e-05, "loss": 4.3701, "step": 51 }, { "epoch": 0.46, "grad_norm": 9.37604808807373, "learning_rate": 2.9999999999999997e-05, "loss": 4.2677, "step": 52 }, { "epoch": 0.47, "grad_norm": 8.82244873046875, "learning_rate": 3.06e-05, "loss": 4.1149, "step": 53 }, { "epoch": 0.48, "grad_norm": 8.434287071228027, "learning_rate": 3.119999999999999e-05, "loss": 4.0036, "step": 54 }, { "epoch": 0.49, "grad_norm": 7.634397029876709, "learning_rate": 3.1799999999999994e-05, "loss": 3.8544, "step": 55 }, { "epoch": 0.5, "grad_norm": 6.66248893737793, "learning_rate": 3.2399999999999995e-05, "loss": 3.7289, "step": 56 }, { "epoch": 0.51, "grad_norm": 6.404279708862305, "learning_rate": 3.2999999999999996e-05, "loss": 3.677, "step": 57 }, { "epoch": 0.52, "grad_norm": 5.844570159912109, "learning_rate": 3.36e-05, "loss": 3.5791, "step": 58 }, { "epoch": 0.53, "grad_norm": 5.26457405090332, "learning_rate": 3.42e-05, "loss": 3.5138, "step": 59 }, { "epoch": 0.54, "grad_norm": 4.4815754890441895, "learning_rate": 3.48e-05, "loss": 3.4393, "step": 60 }, { "epoch": 0.54, "grad_norm": 3.832083225250244, "learning_rate": 3.539999999999999e-05, "loss": 3.382, "step": 61 }, { "epoch": 0.55, "grad_norm": 3.4209747314453125, "learning_rate": 3.5999999999999994e-05, "loss": 3.3589, "step": 62 }, { "epoch": 0.56, "grad_norm": 2.663290500640869, "learning_rate": 3.6599999999999995e-05, "loss": 3.3078, "step": 63 }, { "epoch": 0.57, "grad_norm": 2.2917840480804443, "learning_rate": 3.7199999999999996e-05, "loss": 3.2546, "step": 64 }, { "epoch": 0.58, "grad_norm": 1.849010944366455, "learning_rate": 3.78e-05, "loss": 3.2213, "step": 65 }, { "epoch": 0.59, "grad_norm": 1.583945870399475, "learning_rate": 3.84e-05, "loss": 3.19, "step": 66 }, { "epoch": 0.6, "grad_norm": 1.6001330614089966, "learning_rate": 3.9e-05, "loss": 3.1823, "step": 67 }, { "epoch": 0.61, "grad_norm": 1.4142842292785645, "learning_rate": 3.96e-05, "loss": 3.1601, "step": 68 }, { "epoch": 0.62, "grad_norm": 1.3812508583068848, "learning_rate": 4.02e-05, "loss": 3.1669, "step": 69 }, { "epoch": 0.62, "grad_norm": 1.1935653686523438, "learning_rate": 4.08e-05, "loss": 3.0948, "step": 70 }, { "epoch": 0.63, "grad_norm": 1.2233781814575195, "learning_rate": 4.14e-05, "loss": 3.0698, "step": 71 }, { "epoch": 0.64, "grad_norm": 1.3483402729034424, "learning_rate": 4.2e-05, "loss": 3.0577, "step": 72 }, { "epoch": 0.65, "grad_norm": 1.1394308805465698, "learning_rate": 4.259999999999999e-05, "loss": 3.0351, "step": 73 }, { "epoch": 0.66, "grad_norm": 0.8430189490318298, "learning_rate": 4.319999999999999e-05, "loss": 3.0126, "step": 74 }, { "epoch": 0.67, "grad_norm": 1.0437767505645752, "learning_rate": 4.3799999999999994e-05, "loss": 3.0426, "step": 75 }, { "epoch": 0.68, "grad_norm": 0.6937194466590881, "learning_rate": 4.4399999999999995e-05, "loss": 2.9787, "step": 76 }, { "epoch": 0.69, "grad_norm": 0.6742655634880066, "learning_rate": 4.4999999999999996e-05, "loss": 2.9759, "step": 77 }, { "epoch": 0.7, "grad_norm": 0.7938650846481323, "learning_rate": 4.56e-05, "loss": 2.9632, "step": 78 }, { "epoch": 0.71, "grad_norm": 0.8932818174362183, "learning_rate": 4.62e-05, "loss": 2.9627, "step": 79 }, { "epoch": 0.71, "grad_norm": 0.6115772724151611, "learning_rate": 4.68e-05, "loss": 2.9523, "step": 80 }, { "epoch": 0.72, "grad_norm": 0.7806911468505859, "learning_rate": 4.7399999999999993e-05, "loss": 2.9645, "step": 81 }, { "epoch": 0.73, "grad_norm": 0.5309816598892212, "learning_rate": 4.7999999999999994e-05, "loss": 2.9546, "step": 82 }, { "epoch": 0.74, "grad_norm": 0.42638012766838074, "learning_rate": 4.8599999999999995e-05, "loss": 2.9237, "step": 83 }, { "epoch": 0.75, "grad_norm": 0.4000401198863983, "learning_rate": 4.9199999999999997e-05, "loss": 2.9201, "step": 84 }, { "epoch": 0.76, "grad_norm": 0.4595623314380646, "learning_rate": 4.98e-05, "loss": 2.9188, "step": 85 }, { "epoch": 0.77, "grad_norm": 0.44960692524909973, "learning_rate": 5.04e-05, "loss": 2.9148, "step": 86 }, { "epoch": 0.78, "grad_norm": 0.3653245270252228, "learning_rate": 5.1e-05, "loss": 2.9165, "step": 87 }, { "epoch": 0.79, "grad_norm": 0.6640657782554626, "learning_rate": 5.1599999999999994e-05, "loss": 2.931, "step": 88 }, { "epoch": 0.79, "grad_norm": 0.5979864597320557, "learning_rate": 5.2199999999999995e-05, "loss": 2.8992, "step": 89 }, { "epoch": 0.8, "grad_norm": 0.34149181842803955, "learning_rate": 5.279999999999999e-05, "loss": 2.8991, "step": 90 }, { "epoch": 0.81, "grad_norm": 0.28454726934432983, "learning_rate": 5.339999999999999e-05, "loss": 2.898, "step": 91 }, { "epoch": 0.82, "grad_norm": 0.2736092805862427, "learning_rate": 5.399999999999999e-05, "loss": 2.9005, "step": 92 }, { "epoch": 0.83, "grad_norm": 0.25253403186798096, "learning_rate": 5.459999999999999e-05, "loss": 2.9024, "step": 93 }, { "epoch": 0.84, "grad_norm": 0.6596384644508362, "learning_rate": 5.519999999999999e-05, "loss": 2.9311, "step": 94 }, { "epoch": 0.85, "grad_norm": 0.5612274408340454, "learning_rate": 5.5799999999999994e-05, "loss": 2.8869, "step": 95 }, { "epoch": 0.86, "grad_norm": 0.2838483452796936, "learning_rate": 5.6399999999999995e-05, "loss": 2.8794, "step": 96 }, { "epoch": 0.87, "grad_norm": 0.22255806624889374, "learning_rate": 5.6999999999999996e-05, "loss": 2.8877, "step": 97 }, { "epoch": 0.88, "grad_norm": 0.2908608317375183, "learning_rate": 5.76e-05, "loss": 2.8888, "step": 98 }, { "epoch": 0.88, "grad_norm": 0.20237217843532562, "learning_rate": 5.82e-05, "loss": 2.8899, "step": 99 }, { "epoch": 0.89, "grad_norm": 0.19447995722293854, "learning_rate": 5.88e-05, "loss": 2.911, "step": 100 }, { "epoch": 0.89, "eval_cer": 1.0, "eval_loss": 2.9202330112457275, "eval_runtime": 25.3685, "eval_samples_per_second": 104.145, "eval_steps_per_second": 1.656, "eval_wer": 1.0, "step": 100 }, { "epoch": 0.9, "grad_norm": 0.5298409461975098, "learning_rate": 5.94e-05, "loss": 2.8743, "step": 101 }, { "epoch": 0.91, "grad_norm": 1.0151143074035645, "learning_rate": 5.9999999999999995e-05, "loss": 2.8804, "step": 102 }, { "epoch": 0.92, "grad_norm": 0.2283215969800949, "learning_rate": 6.0599999999999996e-05, "loss": 2.8721, "step": 103 }, { "epoch": 0.93, "grad_norm": 0.39570146799087524, "learning_rate": 6.12e-05, "loss": 2.8835, "step": 104 }, { "epoch": 0.94, "grad_norm": 0.49264252185821533, "learning_rate": 6.18e-05, "loss": 2.8768, "step": 105 }, { "epoch": 0.95, "grad_norm": 0.40177270770072937, "learning_rate": 6.239999999999999e-05, "loss": 2.9014, "step": 106 }, { "epoch": 0.96, "grad_norm": 0.3801201581954956, "learning_rate": 6.299999999999999e-05, "loss": 2.885, "step": 107 }, { "epoch": 0.96, "grad_norm": 0.6969913244247437, "learning_rate": 6.359999999999999e-05, "loss": 2.8762, "step": 108 }, { "epoch": 0.97, "grad_norm": 0.6147791147232056, "learning_rate": 6.419999999999999e-05, "loss": 2.8752, "step": 109 }, { "epoch": 0.98, "grad_norm": 0.3819006085395813, "learning_rate": 6.479999999999999e-05, "loss": 2.8739, "step": 110 }, { "epoch": 0.99, "grad_norm": 0.48288494348526, "learning_rate": 6.539999999999999e-05, "loss": 2.8775, "step": 111 }, { "epoch": 1.0, "grad_norm": 0.5739099383354187, "learning_rate": 6.599999999999999e-05, "loss": 2.8859, "step": 112 }, { "epoch": 1.01, "grad_norm": 0.25912415981292725, "learning_rate": 6.659999999999999e-05, "loss": 2.8655, "step": 113 }, { "epoch": 1.02, "grad_norm": 0.3443286120891571, "learning_rate": 6.72e-05, "loss": 2.8648, "step": 114 }, { "epoch": 1.03, "grad_norm": 0.823746919631958, "learning_rate": 6.78e-05, "loss": 2.869, "step": 115 }, { "epoch": 1.04, "grad_norm": 0.7289327383041382, "learning_rate": 6.84e-05, "loss": 2.872, "step": 116 }, { "epoch": 1.04, "grad_norm": 0.13738706707954407, "learning_rate": 6.9e-05, "loss": 2.8722, "step": 117 }, { "epoch": 1.05, "grad_norm": 0.5169634819030762, "learning_rate": 6.96e-05, "loss": 2.8835, "step": 118 }, { "epoch": 1.06, "grad_norm": 0.4314047694206238, "learning_rate": 7.02e-05, "loss": 2.8764, "step": 119 }, { "epoch": 1.07, "grad_norm": 0.29641491174697876, "learning_rate": 7.079999999999999e-05, "loss": 2.8572, "step": 120 }, { "epoch": 1.08, "grad_norm": 0.41689497232437134, "learning_rate": 7.139999999999999e-05, "loss": 2.8577, "step": 121 }, { "epoch": 1.09, "grad_norm": 0.45255449414253235, "learning_rate": 7.199999999999999e-05, "loss": 2.8569, "step": 122 }, { "epoch": 1.1, "grad_norm": 0.15350182354450226, "learning_rate": 7.259999999999999e-05, "loss": 2.8582, "step": 123 }, { "epoch": 1.11, "grad_norm": 0.44321611523628235, "learning_rate": 7.319999999999999e-05, "loss": 2.865, "step": 124 }, { "epoch": 1.12, "grad_norm": 0.5034608840942383, "learning_rate": 7.379999999999999e-05, "loss": 2.8892, "step": 125 }, { "epoch": 1.12, "grad_norm": 0.33695104718208313, "learning_rate": 7.439999999999999e-05, "loss": 2.8518, "step": 126 }, { "epoch": 1.13, "grad_norm": 0.1340596228837967, "learning_rate": 7.5e-05, "loss": 2.8534, "step": 127 }, { "epoch": 1.14, "grad_norm": 0.15238897502422333, "learning_rate": 7.56e-05, "loss": 2.8507, "step": 128 }, { "epoch": 1.15, "grad_norm": 0.5069476366043091, "learning_rate": 7.62e-05, "loss": 2.8568, "step": 129 }, { "epoch": 1.16, "grad_norm": 0.38025808334350586, "learning_rate": 7.68e-05, "loss": 2.8556, "step": 130 }, { "epoch": 1.17, "grad_norm": 0.5071201920509338, "learning_rate": 7.74e-05, "loss": 2.881, "step": 131 }, { "epoch": 1.18, "grad_norm": 0.10134594887495041, "learning_rate": 7.8e-05, "loss": 2.8519, "step": 132 }, { "epoch": 1.19, "grad_norm": 0.41321516036987305, "learning_rate": 7.86e-05, "loss": 2.851, "step": 133 }, { "epoch": 1.2, "grad_norm": 0.7913742065429688, "learning_rate": 7.92e-05, "loss": 2.848, "step": 134 }, { "epoch": 1.21, "grad_norm": 0.20887543261051178, "learning_rate": 7.98e-05, "loss": 2.853, "step": 135 }, { "epoch": 1.21, "grad_norm": 0.6060431599617004, "learning_rate": 8.04e-05, "loss": 2.8567, "step": 136 }, { "epoch": 1.22, "grad_norm": 0.3716076612472534, "learning_rate": 8.1e-05, "loss": 2.8746, "step": 137 }, { "epoch": 1.23, "grad_norm": 0.8390268087387085, "learning_rate": 8.16e-05, "loss": 2.853, "step": 138 }, { "epoch": 1.24, "grad_norm": 0.12195856869220734, "learning_rate": 8.22e-05, "loss": 2.8403, "step": 139 }, { "epoch": 1.25, "grad_norm": 0.6726837158203125, "learning_rate": 8.28e-05, "loss": 2.8468, "step": 140 }, { "epoch": 1.26, "grad_norm": 0.3162238299846649, "learning_rate": 8.34e-05, "loss": 2.8498, "step": 141 }, { "epoch": 1.27, "grad_norm": 0.5609211921691895, "learning_rate": 8.4e-05, "loss": 2.8494, "step": 142 }, { "epoch": 1.28, "grad_norm": 0.4808315932750702, "learning_rate": 8.459999999999998e-05, "loss": 2.8579, "step": 143 }, { "epoch": 1.29, "grad_norm": 0.10037563741207123, "learning_rate": 8.519999999999998e-05, "loss": 2.8612, "step": 144 }, { "epoch": 1.29, "grad_norm": 0.5827155709266663, "learning_rate": 8.579999999999998e-05, "loss": 2.8418, "step": 145 }, { "epoch": 1.3, "grad_norm": 0.16088470816612244, "learning_rate": 8.639999999999999e-05, "loss": 2.8429, "step": 146 }, { "epoch": 1.31, "grad_norm": 0.6953414678573608, "learning_rate": 8.699999999999999e-05, "loss": 2.8449, "step": 147 }, { "epoch": 1.32, "grad_norm": 0.28255772590637207, "learning_rate": 8.759999999999999e-05, "loss": 2.8393, "step": 148 }, { "epoch": 1.33, "grad_norm": 0.5030548572540283, "learning_rate": 8.819999999999999e-05, "loss": 2.8519, "step": 149 }, { "epoch": 1.34, "grad_norm": 0.5152975916862488, "learning_rate": 8.879999999999999e-05, "loss": 2.8604, "step": 150 }, { "epoch": 1.35, "grad_norm": 0.9333630800247192, "learning_rate": 8.939999999999999e-05, "loss": 2.8423, "step": 151 }, { "epoch": 1.36, "grad_norm": 0.4193758964538574, "learning_rate": 8.999999999999999e-05, "loss": 2.8411, "step": 152 }, { "epoch": 1.37, "grad_norm": 0.8759561777114868, "learning_rate": 9.059999999999999e-05, "loss": 2.8471, "step": 153 }, { "epoch": 1.38, "grad_norm": 0.1738540530204773, "learning_rate": 9.12e-05, "loss": 2.843, "step": 154 }, { "epoch": 1.38, "grad_norm": 0.7106860280036926, "learning_rate": 9.18e-05, "loss": 2.8416, "step": 155 }, { "epoch": 1.39, "grad_norm": 0.7692212462425232, "learning_rate": 9.24e-05, "loss": 2.8608, "step": 156 }, { "epoch": 1.4, "grad_norm": 0.10494925081729889, "learning_rate": 9.3e-05, "loss": 2.8365, "step": 157 }, { "epoch": 1.41, "grad_norm": 0.651297926902771, "learning_rate": 9.36e-05, "loss": 2.8454, "step": 158 }, { "epoch": 1.42, "grad_norm": 0.5796992778778076, "learning_rate": 9.419999999999999e-05, "loss": 2.8359, "step": 159 }, { "epoch": 1.43, "grad_norm": 0.2963010370731354, "learning_rate": 9.479999999999999e-05, "loss": 2.8367, "step": 160 }, { "epoch": 1.44, "grad_norm": 0.7967988848686218, "learning_rate": 9.539999999999999e-05, "loss": 2.8444, "step": 161 }, { "epoch": 1.45, "grad_norm": 0.2917342483997345, "learning_rate": 9.599999999999999e-05, "loss": 2.8668, "step": 162 }, { "epoch": 1.46, "grad_norm": 0.21879711747169495, "learning_rate": 9.659999999999999e-05, "loss": 2.8316, "step": 163 }, { "epoch": 1.46, "grad_norm": 0.09713856130838394, "learning_rate": 9.719999999999999e-05, "loss": 2.8323, "step": 164 }, { "epoch": 1.47, "grad_norm": 0.16893808543682098, "learning_rate": 9.779999999999999e-05, "loss": 2.8323, "step": 165 }, { "epoch": 1.48, "grad_norm": 0.21297118067741394, "learning_rate": 9.839999999999999e-05, "loss": 2.8347, "step": 166 }, { "epoch": 1.49, "grad_norm": 0.09424562007188797, "learning_rate": 9.9e-05, "loss": 2.8343, "step": 167 }, { "epoch": 1.5, "grad_norm": 0.13788117468357086, "learning_rate": 9.96e-05, "loss": 2.8413, "step": 168 }, { "epoch": 1.51, "grad_norm": 0.44669249653816223, "learning_rate": 0.0001002, "loss": 2.8367, "step": 169 }, { "epoch": 1.52, "grad_norm": 0.1038396805524826, "learning_rate": 0.0001008, "loss": 2.8268, "step": 170 }, { "epoch": 1.53, "grad_norm": 0.32863888144493103, "learning_rate": 0.0001014, "loss": 2.8248, "step": 171 }, { "epoch": 1.54, "grad_norm": 0.2267109751701355, "learning_rate": 0.000102, "loss": 2.8296, "step": 172 }, { "epoch": 1.54, "grad_norm": 0.2983206510543823, "learning_rate": 0.0001026, "loss": 2.8313, "step": 173 }, { "epoch": 1.55, "grad_norm": 0.20100681483745575, "learning_rate": 0.00010319999999999999, "loss": 2.8315, "step": 174 }, { "epoch": 1.56, "grad_norm": 0.17761147022247314, "learning_rate": 0.00010379999999999999, "loss": 2.8338, "step": 175 }, { "epoch": 1.57, "grad_norm": 0.14317427575588226, "learning_rate": 0.00010439999999999999, "loss": 2.8239, "step": 176 }, { "epoch": 1.58, "grad_norm": 0.12552930414676666, "learning_rate": 0.00010499999999999999, "loss": 2.8211, "step": 177 }, { "epoch": 1.59, "grad_norm": 0.12438877671957016, "learning_rate": 0.00010559999999999998, "loss": 2.8225, "step": 178 }, { "epoch": 1.6, "grad_norm": 0.12741345167160034, "learning_rate": 0.00010619999999999998, "loss": 2.8194, "step": 179 }, { "epoch": 1.61, "grad_norm": 0.13339456915855408, "learning_rate": 0.00010679999999999998, "loss": 2.8235, "step": 180 }, { "epoch": 1.62, "grad_norm": 0.3932964503765106, "learning_rate": 0.00010739999999999998, "loss": 2.8305, "step": 181 }, { "epoch": 1.62, "grad_norm": 0.3690793514251709, "learning_rate": 0.00010799999999999998, "loss": 2.8183, "step": 182 }, { "epoch": 1.63, "grad_norm": 0.5288318991661072, "learning_rate": 0.00010859999999999998, "loss": 2.8143, "step": 183 }, { "epoch": 1.64, "grad_norm": 0.4029388725757599, "learning_rate": 0.00010919999999999998, "loss": 2.8117, "step": 184 }, { "epoch": 1.65, "grad_norm": 0.6303567290306091, "learning_rate": 0.00010979999999999999, "loss": 2.8071, "step": 185 }, { "epoch": 1.66, "grad_norm": 0.6404778957366943, "learning_rate": 0.00011039999999999999, "loss": 2.802, "step": 186 }, { "epoch": 1.67, "grad_norm": 1.1146502494812012, "learning_rate": 0.00011099999999999999, "loss": 2.8075, "step": 187 }, { "epoch": 1.68, "grad_norm": 0.7937551736831665, "learning_rate": 0.00011159999999999999, "loss": 2.7986, "step": 188 }, { "epoch": 1.69, "grad_norm": 0.46796396374702454, "learning_rate": 0.00011219999999999999, "loss": 2.7816, "step": 189 }, { "epoch": 1.7, "grad_norm": 0.5845624208450317, "learning_rate": 0.00011279999999999999, "loss": 2.7793, "step": 190 }, { "epoch": 1.71, "grad_norm": 0.4756370484828949, "learning_rate": 0.00011339999999999999, "loss": 2.7683, "step": 191 }, { "epoch": 1.71, "grad_norm": 1.1186546087265015, "learning_rate": 0.00011399999999999999, "loss": 2.7664, "step": 192 }, { "epoch": 1.72, "grad_norm": 1.1542567014694214, "learning_rate": 0.0001146, "loss": 2.7696, "step": 193 }, { "epoch": 1.73, "grad_norm": 1.6869224309921265, "learning_rate": 0.0001152, "loss": 2.7689, "step": 194 }, { "epoch": 1.74, "grad_norm": 0.7766700983047485, "learning_rate": 0.0001158, "loss": 2.7333, "step": 195 }, { "epoch": 1.75, "grad_norm": 0.721091091632843, "learning_rate": 0.0001164, "loss": 2.7311, "step": 196 }, { "epoch": 1.76, "grad_norm": 0.5262578725814819, "learning_rate": 0.000117, "loss": 2.7136, "step": 197 }, { "epoch": 1.77, "grad_norm": 0.6685904264450073, "learning_rate": 0.0001176, "loss": 2.6956, "step": 198 }, { "epoch": 1.78, "grad_norm": 0.45331716537475586, "learning_rate": 0.0001182, "loss": 2.6695, "step": 199 }, { "epoch": 1.79, "grad_norm": 0.7517866492271423, "learning_rate": 0.0001188, "loss": 2.6638, "step": 200 }, { "epoch": 1.79, "eval_cer": 1.0, "eval_loss": 2.6310031414031982, "eval_runtime": 21.5145, "eval_samples_per_second": 122.801, "eval_steps_per_second": 1.952, "eval_wer": 1.0, "step": 200 }, { "epoch": 1.79, "grad_norm": 0.4016678035259247, "learning_rate": 0.0001194, "loss": 2.6273, "step": 201 }, { "epoch": 1.8, "grad_norm": 0.6813556551933289, "learning_rate": 0.00011999999999999999, "loss": 2.6077, "step": 202 }, { "epoch": 1.81, "grad_norm": 0.5532939434051514, "learning_rate": 0.00012059999999999999, "loss": 2.583, "step": 203 }, { "epoch": 1.82, "grad_norm": 1.6075173616409302, "learning_rate": 0.00012119999999999999, "loss": 2.5695, "step": 204 }, { "epoch": 1.83, "grad_norm": 0.8583607077598572, "learning_rate": 0.00012179999999999999, "loss": 2.5337, "step": 205 }, { "epoch": 1.84, "grad_norm": 2.8024051189422607, "learning_rate": 0.0001224, "loss": 2.6199, "step": 206 }, { "epoch": 1.85, "grad_norm": 0.9846538305282593, "learning_rate": 0.00012299999999999998, "loss": 2.4776, "step": 207 }, { "epoch": 1.86, "grad_norm": 1.8579438924789429, "learning_rate": 0.0001236, "loss": 2.5176, "step": 208 }, { "epoch": 1.87, "grad_norm": 0.7917272448539734, "learning_rate": 0.00012419999999999998, "loss": 2.4321, "step": 209 }, { "epoch": 1.88, "grad_norm": 2.09323787689209, "learning_rate": 0.00012479999999999997, "loss": 2.4381, "step": 210 }, { "epoch": 1.88, "grad_norm": 1.4129819869995117, "learning_rate": 0.00012539999999999999, "loss": 2.3959, "step": 211 }, { "epoch": 1.89, "grad_norm": 0.6987126469612122, "learning_rate": 0.00012599999999999997, "loss": 2.3383, "step": 212 }, { "epoch": 1.9, "grad_norm": 1.3300609588623047, "learning_rate": 0.0001266, "loss": 2.3031, "step": 213 }, { "epoch": 1.91, "grad_norm": 0.7649120092391968, "learning_rate": 0.00012719999999999997, "loss": 2.2646, "step": 214 }, { "epoch": 1.92, "grad_norm": 0.8899486064910889, "learning_rate": 0.0001278, "loss": 2.2044, "step": 215 }, { "epoch": 1.93, "grad_norm": 1.0937632322311401, "learning_rate": 0.00012839999999999998, "loss": 2.1425, "step": 216 }, { "epoch": 1.94, "grad_norm": 0.6736981868743896, "learning_rate": 0.000129, "loss": 2.1169, "step": 217 }, { "epoch": 1.95, "grad_norm": 0.6996066570281982, "learning_rate": 0.00012959999999999998, "loss": 2.0965, "step": 218 }, { "epoch": 1.96, "grad_norm": 0.8732956051826477, "learning_rate": 0.0001302, "loss": 2.0275, "step": 219 }, { "epoch": 1.96, "grad_norm": 0.6698563098907471, "learning_rate": 0.00013079999999999998, "loss": 1.9618, "step": 220 }, { "epoch": 1.97, "grad_norm": 0.7226149439811707, "learning_rate": 0.0001314, "loss": 1.9156, "step": 221 }, { "epoch": 1.98, "grad_norm": 0.8683639764785767, "learning_rate": 0.00013199999999999998, "loss": 1.8771, "step": 222 }, { "epoch": 1.99, "grad_norm": 0.6471112370491028, "learning_rate": 0.0001326, "loss": 1.833, "step": 223 }, { "epoch": 2.0, "grad_norm": 0.917619526386261, "learning_rate": 0.00013319999999999999, "loss": 1.7896, "step": 224 }, { "epoch": 2.01, "grad_norm": 0.6950381398200989, "learning_rate": 0.0001338, "loss": 1.7237, "step": 225 }, { "epoch": 2.02, "grad_norm": 0.6106230616569519, "learning_rate": 0.0001344, "loss": 1.6922, "step": 226 }, { "epoch": 2.03, "grad_norm": 0.7221829295158386, "learning_rate": 0.000135, "loss": 1.6354, "step": 227 }, { "epoch": 2.04, "grad_norm": 0.6865891218185425, "learning_rate": 0.0001356, "loss": 1.5929, "step": 228 }, { "epoch": 2.04, "grad_norm": 0.6008340716362, "learning_rate": 0.0001362, "loss": 1.5578, "step": 229 }, { "epoch": 2.05, "grad_norm": 0.544625461101532, "learning_rate": 0.0001368, "loss": 1.5526, "step": 230 }, { "epoch": 2.06, "grad_norm": 0.6607970595359802, "learning_rate": 0.0001374, "loss": 1.4816, "step": 231 }, { "epoch": 2.07, "grad_norm": 0.5788168907165527, "learning_rate": 0.000138, "loss": 1.4238, "step": 232 }, { "epoch": 2.08, "grad_norm": 0.6171405911445618, "learning_rate": 0.0001386, "loss": 1.3814, "step": 233 }, { "epoch": 2.09, "grad_norm": 0.5263203382492065, "learning_rate": 0.0001392, "loss": 1.3584, "step": 234 }, { "epoch": 2.1, "grad_norm": 0.7827364802360535, "learning_rate": 0.00013979999999999998, "loss": 1.3101, "step": 235 }, { "epoch": 2.11, "grad_norm": 0.8221073746681213, "learning_rate": 0.0001404, "loss": 1.3303, "step": 236 }, { "epoch": 2.12, "grad_norm": 1.6871161460876465, "learning_rate": 0.00014099999999999998, "loss": 1.3629, "step": 237 }, { "epoch": 2.12, "grad_norm": 0.5879709720611572, "learning_rate": 0.00014159999999999997, "loss": 1.2105, "step": 238 }, { "epoch": 2.13, "grad_norm": 1.0625423192977905, "learning_rate": 0.0001422, "loss": 1.2261, "step": 239 }, { "epoch": 2.14, "grad_norm": 0.9403135180473328, "learning_rate": 0.00014279999999999997, "loss": 1.1616, "step": 240 }, { "epoch": 2.15, "grad_norm": 0.7435916662216187, "learning_rate": 0.0001434, "loss": 1.133, "step": 241 }, { "epoch": 2.16, "grad_norm": 0.6294271349906921, "learning_rate": 0.00014399999999999998, "loss": 1.1262, "step": 242 }, { "epoch": 2.17, "grad_norm": 0.5731372237205505, "learning_rate": 0.0001446, "loss": 1.1516, "step": 243 }, { "epoch": 2.18, "grad_norm": 0.9239470958709717, "learning_rate": 0.00014519999999999998, "loss": 1.0654, "step": 244 }, { "epoch": 2.19, "grad_norm": 0.5397454500198364, "learning_rate": 0.0001458, "loss": 1.0191, "step": 245 }, { "epoch": 2.2, "grad_norm": 0.6285163760185242, "learning_rate": 0.00014639999999999998, "loss": 0.9882, "step": 246 }, { "epoch": 2.21, "grad_norm": 0.47989338636398315, "learning_rate": 0.000147, "loss": 0.9555, "step": 247 }, { "epoch": 2.21, "grad_norm": 0.5626522302627563, "learning_rate": 0.00014759999999999998, "loss": 0.9482, "step": 248 }, { "epoch": 2.22, "grad_norm": 0.5631589889526367, "learning_rate": 0.0001482, "loss": 1.0041, "step": 249 }, { "epoch": 2.23, "grad_norm": 0.4852822721004486, "learning_rate": 0.00014879999999999998, "loss": 0.8956, "step": 250 }, { "epoch": 2.24, "grad_norm": 0.5496442317962646, "learning_rate": 0.0001494, "loss": 0.8809, "step": 251 }, { "epoch": 2.25, "grad_norm": 0.5785245299339294, "learning_rate": 0.00015, "loss": 0.8292, "step": 252 }, { "epoch": 2.26, "grad_norm": 0.44672396779060364, "learning_rate": 0.00015059999999999997, "loss": 0.8298, "step": 253 }, { "epoch": 2.27, "grad_norm": 0.419998437166214, "learning_rate": 0.0001512, "loss": 0.8039, "step": 254 }, { "epoch": 2.28, "grad_norm": 0.4560867249965668, "learning_rate": 0.00015179999999999998, "loss": 0.8389, "step": 255 }, { "epoch": 2.29, "grad_norm": 0.49839121103286743, "learning_rate": 0.0001524, "loss": 0.8024, "step": 256 }, { "epoch": 2.29, "grad_norm": 0.41201063990592957, "learning_rate": 0.00015299999999999998, "loss": 0.7551, "step": 257 }, { "epoch": 2.3, "grad_norm": 0.3268738090991974, "learning_rate": 0.0001536, "loss": 0.7296, "step": 258 }, { "epoch": 2.31, "grad_norm": 0.46522414684295654, "learning_rate": 0.00015419999999999998, "loss": 0.7201, "step": 259 }, { "epoch": 2.32, "grad_norm": 0.26850831508636475, "learning_rate": 0.0001548, "loss": 0.6957, "step": 260 }, { "epoch": 2.33, "grad_norm": 0.38073208928108215, "learning_rate": 0.00015539999999999998, "loss": 0.7161, "step": 261 }, { "epoch": 2.34, "grad_norm": 0.3288465738296509, "learning_rate": 0.000156, "loss": 0.7478, "step": 262 }, { "epoch": 2.35, "grad_norm": 0.535457193851471, "learning_rate": 0.00015659999999999998, "loss": 0.6699, "step": 263 }, { "epoch": 2.36, "grad_norm": 0.46445024013519287, "learning_rate": 0.0001572, "loss": 0.6405, "step": 264 }, { "epoch": 2.37, "grad_norm": 0.37084606289863586, "learning_rate": 0.0001578, "loss": 0.6309, "step": 265 }, { "epoch": 2.38, "grad_norm": 0.38821446895599365, "learning_rate": 0.0001584, "loss": 0.6272, "step": 266 }, { "epoch": 2.38, "grad_norm": 0.2973250150680542, "learning_rate": 0.000159, "loss": 0.6242, "step": 267 }, { "epoch": 2.39, "grad_norm": 0.4151294529438019, "learning_rate": 0.0001596, "loss": 0.6912, "step": 268 }, { "epoch": 2.4, "grad_norm": 0.3166775107383728, "learning_rate": 0.0001602, "loss": 0.5899, "step": 269 }, { "epoch": 2.41, "grad_norm": 0.426556795835495, "learning_rate": 0.0001608, "loss": 0.5855, "step": 270 }, { "epoch": 2.42, "grad_norm": 0.27777695655822754, "learning_rate": 0.0001614, "loss": 0.5672, "step": 271 }, { "epoch": 2.43, "grad_norm": 0.32402169704437256, "learning_rate": 0.000162, "loss": 0.5713, "step": 272 }, { "epoch": 2.44, "grad_norm": 0.350260466337204, "learning_rate": 0.0001626, "loss": 0.5935, "step": 273 }, { "epoch": 2.45, "grad_norm": 0.3984275162220001, "learning_rate": 0.0001632, "loss": 0.6266, "step": 274 }, { "epoch": 2.46, "grad_norm": 0.39264658093452454, "learning_rate": 0.0001638, "loss": 0.5461, "step": 275 }, { "epoch": 2.46, "grad_norm": 0.27602052688598633, "learning_rate": 0.0001644, "loss": 0.5409, "step": 276 }, { "epoch": 2.47, "grad_norm": 0.3373797833919525, "learning_rate": 0.000165, "loss": 0.5357, "step": 277 }, { "epoch": 2.48, "grad_norm": 0.2839423418045044, "learning_rate": 0.0001656, "loss": 0.5114, "step": 278 }, { "epoch": 2.49, "grad_norm": 0.25080233812332153, "learning_rate": 0.0001662, "loss": 0.509, "step": 279 }, { "epoch": 2.5, "grad_norm": 0.32631421089172363, "learning_rate": 0.0001668, "loss": 0.5674, "step": 280 }, { "epoch": 2.51, "grad_norm": 0.3136104643344879, "learning_rate": 0.0001674, "loss": 0.5381, "step": 281 }, { "epoch": 2.52, "grad_norm": 0.25540921092033386, "learning_rate": 0.000168, "loss": 0.4858, "step": 282 }, { "epoch": 2.53, "grad_norm": 0.1932198703289032, "learning_rate": 0.0001686, "loss": 0.4748, "step": 283 }, { "epoch": 2.54, "grad_norm": 0.2537154257297516, "learning_rate": 0.00016919999999999997, "loss": 0.488, "step": 284 }, { "epoch": 2.54, "grad_norm": 0.27697309851646423, "learning_rate": 0.00016979999999999998, "loss": 0.4721, "step": 285 }, { "epoch": 2.55, "grad_norm": 0.3219203054904938, "learning_rate": 0.00017039999999999997, "loss": 0.4909, "step": 286 }, { "epoch": 2.56, "grad_norm": 0.29123619198799133, "learning_rate": 0.00017099999999999998, "loss": 0.5271, "step": 287 }, { "epoch": 2.57, "grad_norm": 0.308193176984787, "learning_rate": 0.00017159999999999997, "loss": 0.4632, "step": 288 }, { "epoch": 2.58, "grad_norm": 0.18822936713695526, "learning_rate": 0.00017219999999999998, "loss": 0.4514, "step": 289 }, { "epoch": 2.59, "grad_norm": 0.25762876868247986, "learning_rate": 0.00017279999999999997, "loss": 0.453, "step": 290 }, { "epoch": 2.6, "grad_norm": 0.22614410519599915, "learning_rate": 0.00017339999999999996, "loss": 0.4392, "step": 291 }, { "epoch": 2.61, "grad_norm": 0.23655934631824493, "learning_rate": 0.00017399999999999997, "loss": 0.4501, "step": 292 }, { "epoch": 2.62, "grad_norm": 0.308523029088974, "learning_rate": 0.00017459999999999996, "loss": 0.5553, "step": 293 }, { "epoch": 2.62, "grad_norm": 0.21490143239498138, "learning_rate": 0.00017519999999999998, "loss": 0.4264, "step": 294 }, { "epoch": 2.63, "grad_norm": 0.23168258368968964, "learning_rate": 0.00017579999999999996, "loss": 0.4151, "step": 295 }, { "epoch": 2.64, "grad_norm": 0.22771677374839783, "learning_rate": 0.00017639999999999998, "loss": 0.4113, "step": 296 }, { "epoch": 2.65, "grad_norm": 0.2682896852493286, "learning_rate": 0.00017699999999999997, "loss": 0.4089, "step": 297 }, { "epoch": 2.66, "grad_norm": 0.24218544363975525, "learning_rate": 0.00017759999999999998, "loss": 0.4204, "step": 298 }, { "epoch": 2.67, "grad_norm": 0.32331299781799316, "learning_rate": 0.00017819999999999997, "loss": 0.4884, "step": 299 }, { "epoch": 2.68, "grad_norm": 0.22354701161384583, "learning_rate": 0.00017879999999999998, "loss": 0.3898, "step": 300 }, { "epoch": 2.68, "eval_cer": 0.09684936088968066, "eval_loss": 0.38924866914749146, "eval_runtime": 22.3523, "eval_samples_per_second": 118.198, "eval_steps_per_second": 1.879, "eval_wer": 0.33661245537485124, "step": 300 }, { "epoch": 2.69, "grad_norm": 0.1929866522550583, "learning_rate": 0.00017939999999999997, "loss": 0.3921, "step": 301 }, { "epoch": 2.7, "grad_norm": 0.2592866122722626, "learning_rate": 0.00017999999999999998, "loss": 0.3905, "step": 302 }, { "epoch": 2.71, "grad_norm": 0.2864495515823364, "learning_rate": 0.00018059999999999997, "loss": 0.3885, "step": 303 }, { "epoch": 2.71, "grad_norm": 0.21913693845272064, "learning_rate": 0.00018119999999999999, "loss": 0.4009, "step": 304 }, { "epoch": 2.72, "grad_norm": 0.3071497082710266, "learning_rate": 0.00018179999999999997, "loss": 0.4195, "step": 305 }, { "epoch": 2.73, "grad_norm": 0.22289547324180603, "learning_rate": 0.0001824, "loss": 0.4112, "step": 306 }, { "epoch": 2.74, "grad_norm": 0.31046903133392334, "learning_rate": 0.00018299999999999998, "loss": 0.3764, "step": 307 }, { "epoch": 2.75, "grad_norm": 0.2204066812992096, "learning_rate": 0.0001836, "loss": 0.3691, "step": 308 }, { "epoch": 2.76, "grad_norm": 0.20296648144721985, "learning_rate": 0.00018419999999999998, "loss": 0.3492, "step": 309 }, { "epoch": 2.77, "grad_norm": 0.2594054937362671, "learning_rate": 0.0001848, "loss": 0.3495, "step": 310 }, { "epoch": 2.78, "grad_norm": 0.21860192716121674, "learning_rate": 0.00018539999999999998, "loss": 0.4076, "step": 311 }, { "epoch": 2.79, "grad_norm": 0.36586835980415344, "learning_rate": 0.000186, "loss": 0.4302, "step": 312 }, { "epoch": 2.79, "grad_norm": 0.26571759581565857, "learning_rate": 0.00018659999999999998, "loss": 0.3518, "step": 313 }, { "epoch": 2.8, "grad_norm": 0.23198798298835754, "learning_rate": 0.0001872, "loss": 0.3443, "step": 314 }, { "epoch": 2.81, "grad_norm": 0.2801920175552368, "learning_rate": 0.00018779999999999998, "loss": 0.3414, "step": 315 }, { "epoch": 2.82, "grad_norm": 0.16064494848251343, "learning_rate": 0.00018839999999999997, "loss": 0.3418, "step": 316 }, { "epoch": 2.83, "grad_norm": 0.34374213218688965, "learning_rate": 0.00018899999999999999, "loss": 0.3635, "step": 317 }, { "epoch": 2.84, "grad_norm": 0.26433858275413513, "learning_rate": 0.00018959999999999997, "loss": 0.4407, "step": 318 }, { "epoch": 2.85, "grad_norm": 0.40656784176826477, "learning_rate": 0.0001902, "loss": 0.3366, "step": 319 }, { "epoch": 2.86, "grad_norm": 0.3449411690235138, "learning_rate": 0.00019079999999999998, "loss": 0.337, "step": 320 }, { "epoch": 2.87, "grad_norm": 0.2536275386810303, "learning_rate": 0.0001914, "loss": 0.3441, "step": 321 }, { "epoch": 2.88, "grad_norm": 0.28934991359710693, "learning_rate": 0.00019199999999999998, "loss": 0.3376, "step": 322 }, { "epoch": 2.88, "grad_norm": 0.28276798129081726, "learning_rate": 0.0001926, "loss": 0.3569, "step": 323 }, { "epoch": 2.89, "grad_norm": 0.3971540927886963, "learning_rate": 0.00019319999999999998, "loss": 0.4098, "step": 324 }, { "epoch": 2.9, "grad_norm": 0.5473229289054871, "learning_rate": 0.0001938, "loss": 0.3406, "step": 325 }, { "epoch": 2.91, "grad_norm": 0.36361709237098694, "learning_rate": 0.00019439999999999998, "loss": 0.3197, "step": 326 }, { "epoch": 2.92, "grad_norm": 0.35563981533050537, "learning_rate": 0.000195, "loss": 0.3355, "step": 327 }, { "epoch": 2.93, "grad_norm": 0.3679071366786957, "learning_rate": 0.00019559999999999998, "loss": 0.3351, "step": 328 }, { "epoch": 2.94, "grad_norm": 0.2461666613817215, "learning_rate": 0.0001962, "loss": 0.3145, "step": 329 }, { "epoch": 2.95, "grad_norm": 0.4335785210132599, "learning_rate": 0.00019679999999999999, "loss": 0.3834, "step": 330 }, { "epoch": 2.96, "grad_norm": 0.23562943935394287, "learning_rate": 0.0001974, "loss": 0.359, "step": 331 }, { "epoch": 2.96, "grad_norm": 0.25963151454925537, "learning_rate": 0.000198, "loss": 0.3103, "step": 332 }, { "epoch": 2.97, "grad_norm": 0.17004214227199554, "learning_rate": 0.0001986, "loss": 0.3078, "step": 333 }, { "epoch": 2.98, "grad_norm": 0.31410500407218933, "learning_rate": 0.0001992, "loss": 0.3279, "step": 334 }, { "epoch": 2.99, "grad_norm": 0.18988707661628723, "learning_rate": 0.0001998, "loss": 0.3153, "step": 335 }, { "epoch": 3.0, "grad_norm": 0.3162887394428253, "learning_rate": 0.0002004, "loss": 0.3523, "step": 336 }, { "epoch": 3.01, "grad_norm": 0.2362556755542755, "learning_rate": 0.000201, "loss": 0.2923, "step": 337 }, { "epoch": 3.02, "grad_norm": 0.2226056605577469, "learning_rate": 0.0002016, "loss": 0.2763, "step": 338 }, { "epoch": 3.03, "grad_norm": 0.18942689895629883, "learning_rate": 0.0002022, "loss": 0.2864, "step": 339 }, { "epoch": 3.04, "grad_norm": 0.18445442616939545, "learning_rate": 0.0002028, "loss": 0.2725, "step": 340 }, { "epoch": 3.04, "grad_norm": 0.16619300842285156, "learning_rate": 0.00020339999999999998, "loss": 0.2787, "step": 341 }, { "epoch": 3.05, "grad_norm": 0.2150353491306305, "learning_rate": 0.000204, "loss": 0.3169, "step": 342 }, { "epoch": 3.06, "grad_norm": 0.1720004379749298, "learning_rate": 0.00020459999999999999, "loss": 0.2789, "step": 343 }, { "epoch": 3.07, "grad_norm": 0.18419624865055084, "learning_rate": 0.0002052, "loss": 0.2567, "step": 344 }, { "epoch": 3.08, "grad_norm": 0.1442434936761856, "learning_rate": 0.0002058, "loss": 0.2555, "step": 345 }, { "epoch": 3.09, "grad_norm": 0.20136016607284546, "learning_rate": 0.00020639999999999998, "loss": 0.2687, "step": 346 }, { "epoch": 3.1, "grad_norm": 0.19720318913459778, "learning_rate": 0.00020699999999999996, "loss": 0.2695, "step": 347 }, { "epoch": 3.11, "grad_norm": 0.21785709261894226, "learning_rate": 0.00020759999999999998, "loss": 0.2665, "step": 348 }, { "epoch": 3.12, "grad_norm": 0.258958637714386, "learning_rate": 0.00020819999999999996, "loss": 0.3171, "step": 349 }, { "epoch": 3.12, "grad_norm": 0.18629953265190125, "learning_rate": 0.00020879999999999998, "loss": 0.2549, "step": 350 }, { "epoch": 3.13, "grad_norm": 0.19203972816467285, "learning_rate": 0.00020939999999999997, "loss": 0.2583, "step": 351 }, { "epoch": 3.14, "grad_norm": 0.1483580619096756, "learning_rate": 0.00020999999999999998, "loss": 0.2569, "step": 352 }, { "epoch": 3.15, "grad_norm": 0.2105817049741745, "learning_rate": 0.00021059999999999997, "loss": 0.2571, "step": 353 }, { "epoch": 3.16, "grad_norm": 0.18942958116531372, "learning_rate": 0.00021119999999999996, "loss": 0.2649, "step": 354 }, { "epoch": 3.17, "grad_norm": 0.2832517623901367, "learning_rate": 0.00021179999999999997, "loss": 0.3367, "step": 355 }, { "epoch": 3.18, "grad_norm": 0.2398645132780075, "learning_rate": 0.00021239999999999996, "loss": 0.2547, "step": 356 }, { "epoch": 3.19, "grad_norm": 0.2130969762802124, "learning_rate": 0.00021299999999999997, "loss": 0.244, "step": 357 }, { "epoch": 3.2, "grad_norm": 0.18089188635349274, "learning_rate": 0.00021359999999999996, "loss": 0.2409, "step": 358 }, { "epoch": 3.21, "grad_norm": 0.19771325588226318, "learning_rate": 0.00021419999999999998, "loss": 0.244, "step": 359 }, { "epoch": 3.21, "grad_norm": 0.22420358657836914, "learning_rate": 0.00021479999999999996, "loss": 0.2496, "step": 360 }, { "epoch": 3.22, "grad_norm": 0.2420346587896347, "learning_rate": 0.00021539999999999998, "loss": 0.2755, "step": 361 }, { "epoch": 3.23, "grad_norm": 0.21282508969306946, "learning_rate": 0.00021599999999999996, "loss": 0.2572, "step": 362 }, { "epoch": 3.24, "grad_norm": 0.16155952215194702, "learning_rate": 0.00021659999999999998, "loss": 0.2462, "step": 363 }, { "epoch": 3.25, "grad_norm": 0.20106732845306396, "learning_rate": 0.00021719999999999997, "loss": 0.2489, "step": 364 }, { "epoch": 3.26, "grad_norm": 0.15768885612487793, "learning_rate": 0.00021779999999999998, "loss": 0.2322, "step": 365 }, { "epoch": 3.27, "grad_norm": 0.1964852511882782, "learning_rate": 0.00021839999999999997, "loss": 0.2312, "step": 366 }, { "epoch": 3.28, "grad_norm": 0.20234230160713196, "learning_rate": 0.00021899999999999998, "loss": 0.27, "step": 367 }, { "epoch": 3.29, "grad_norm": 0.18610413372516632, "learning_rate": 0.00021959999999999997, "loss": 0.264, "step": 368 }, { "epoch": 3.29, "grad_norm": 0.1503794938325882, "learning_rate": 0.00022019999999999999, "loss": 0.2441, "step": 369 }, { "epoch": 3.3, "grad_norm": 0.1564638763666153, "learning_rate": 0.00022079999999999997, "loss": 0.2397, "step": 370 }, { "epoch": 3.31, "grad_norm": 0.1418813318014145, "learning_rate": 0.0002214, "loss": 0.2311, "step": 371 }, { "epoch": 3.32, "grad_norm": 0.13767391443252563, "learning_rate": 0.00022199999999999998, "loss": 0.2355, "step": 372 }, { "epoch": 3.33, "grad_norm": 0.17412413656711578, "learning_rate": 0.0002226, "loss": 0.2443, "step": 373 }, { "epoch": 3.34, "grad_norm": 0.19369201362133026, "learning_rate": 0.00022319999999999998, "loss": 0.269, "step": 374 }, { "epoch": 3.35, "grad_norm": 0.15970537066459656, "learning_rate": 0.0002238, "loss": 0.2268, "step": 375 }, { "epoch": 3.36, "grad_norm": 0.14835906028747559, "learning_rate": 0.00022439999999999998, "loss": 0.2322, "step": 376 }, { "epoch": 3.37, "grad_norm": 0.16173814237117767, "learning_rate": 0.000225, "loss": 0.2218, "step": 377 }, { "epoch": 3.38, "grad_norm": 0.16744820773601532, "learning_rate": 0.00022559999999999998, "loss": 0.2378, "step": 378 }, { "epoch": 3.38, "grad_norm": 0.1666780412197113, "learning_rate": 0.00022619999999999997, "loss": 0.2364, "step": 379 }, { "epoch": 3.39, "grad_norm": 0.24702821671962738, "learning_rate": 0.00022679999999999998, "loss": 0.2624, "step": 380 }, { "epoch": 3.4, "grad_norm": 0.1768096536397934, "learning_rate": 0.00022739999999999997, "loss": 0.2365, "step": 381 }, { "epoch": 3.41, "grad_norm": 0.16908930242061615, "learning_rate": 0.00022799999999999999, "loss": 0.22, "step": 382 }, { "epoch": 3.42, "grad_norm": 0.2275974303483963, "learning_rate": 0.00022859999999999997, "loss": 0.2147, "step": 383 }, { "epoch": 3.43, "grad_norm": 0.17168572545051575, "learning_rate": 0.0002292, "loss": 0.2287, "step": 384 }, { "epoch": 3.44, "grad_norm": 0.24577546119689941, "learning_rate": 0.00022979999999999997, "loss": 0.2518, "step": 385 }, { "epoch": 3.45, "grad_norm": 0.2721084654331207, "learning_rate": 0.0002304, "loss": 0.2791, "step": 386 }, { "epoch": 3.46, "grad_norm": 0.16132959723472595, "learning_rate": 0.00023099999999999998, "loss": 0.2062, "step": 387 }, { "epoch": 3.46, "grad_norm": 0.17369233071804047, "learning_rate": 0.0002316, "loss": 0.2196, "step": 388 }, { "epoch": 3.47, "grad_norm": 0.14189645648002625, "learning_rate": 0.00023219999999999998, "loss": 0.2201, "step": 389 }, { "epoch": 3.48, "grad_norm": 0.1626410335302353, "learning_rate": 0.0002328, "loss": 0.2223, "step": 390 }, { "epoch": 3.49, "grad_norm": 0.18501843512058258, "learning_rate": 0.00023339999999999998, "loss": 0.2265, "step": 391 }, { "epoch": 3.5, "grad_norm": 0.21089443564414978, "learning_rate": 0.000234, "loss": 0.2517, "step": 392 }, { "epoch": 3.51, "grad_norm": 0.25956982374191284, "learning_rate": 0.00023459999999999998, "loss": 0.2559, "step": 393 }, { "epoch": 3.52, "grad_norm": 0.13847751915454865, "learning_rate": 0.0002352, "loss": 0.2118, "step": 394 }, { "epoch": 3.53, "grad_norm": 0.18859167397022247, "learning_rate": 0.00023579999999999999, "loss": 0.2209, "step": 395 }, { "epoch": 3.54, "grad_norm": 0.15580683946609497, "learning_rate": 0.0002364, "loss": 0.2025, "step": 396 }, { "epoch": 3.54, "grad_norm": 0.1728525608778, "learning_rate": 0.000237, "loss": 0.2298, "step": 397 }, { "epoch": 3.55, "grad_norm": 0.22779783606529236, "learning_rate": 0.0002376, "loss": 0.2489, "step": 398 }, { "epoch": 3.56, "grad_norm": 0.23927363753318787, "learning_rate": 0.0002382, "loss": 0.2566, "step": 399 }, { "epoch": 3.57, "grad_norm": 0.19308319687843323, "learning_rate": 0.0002388, "loss": 0.2156, "step": 400 }, { "epoch": 3.57, "eval_cer": 0.059137905762704224, "eval_loss": 0.22499564290046692, "eval_runtime": 21.7981, "eval_samples_per_second": 121.203, "eval_steps_per_second": 1.927, "eval_wer": 0.2090241967473225, "step": 400 }, { "epoch": 3.58, "grad_norm": 0.15958182513713837, "learning_rate": 0.0002394, "loss": 0.2196, "step": 401 }, { "epoch": 3.59, "grad_norm": 0.17798563838005066, "learning_rate": 0.00023999999999999998, "loss": 0.2133, "step": 402 }, { "epoch": 3.6, "grad_norm": 0.1666220873594284, "learning_rate": 0.0002406, "loss": 0.2002, "step": 403 }, { "epoch": 3.61, "grad_norm": 0.1777678281068802, "learning_rate": 0.00024119999999999998, "loss": 0.2215, "step": 404 }, { "epoch": 3.62, "grad_norm": 0.27465999126434326, "learning_rate": 0.0002418, "loss": 0.2546, "step": 405 }, { "epoch": 3.62, "grad_norm": 0.2463161200284958, "learning_rate": 0.00024239999999999998, "loss": 0.2158, "step": 406 }, { "epoch": 3.63, "grad_norm": 0.1615164428949356, "learning_rate": 0.000243, "loss": 0.2107, "step": 407 }, { "epoch": 3.64, "grad_norm": 0.22111576795578003, "learning_rate": 0.00024359999999999999, "loss": 0.2044, "step": 408 }, { "epoch": 3.65, "grad_norm": 0.14725008606910706, "learning_rate": 0.00024419999999999997, "loss": 0.2099, "step": 409 }, { "epoch": 3.66, "grad_norm": 0.20693030953407288, "learning_rate": 0.0002448, "loss": 0.2324, "step": 410 }, { "epoch": 3.67, "grad_norm": 0.2718674838542938, "learning_rate": 0.00024539999999999995, "loss": 0.2529, "step": 411 }, { "epoch": 3.68, "grad_norm": 0.2779533565044403, "learning_rate": 0.00024599999999999996, "loss": 0.2105, "step": 412 }, { "epoch": 3.69, "grad_norm": 0.1879548728466034, "learning_rate": 0.0002466, "loss": 0.2089, "step": 413 }, { "epoch": 3.7, "grad_norm": 0.22634433209896088, "learning_rate": 0.0002472, "loss": 0.202, "step": 414 }, { "epoch": 3.71, "grad_norm": 0.17272689938545227, "learning_rate": 0.00024779999999999995, "loss": 0.1992, "step": 415 }, { "epoch": 3.71, "grad_norm": 0.18425069749355316, "learning_rate": 0.00024839999999999997, "loss": 0.2011, "step": 416 }, { "epoch": 3.72, "grad_norm": 0.2612092196941376, "learning_rate": 0.000249, "loss": 0.238, "step": 417 }, { "epoch": 3.73, "grad_norm": 0.25745177268981934, "learning_rate": 0.00024959999999999994, "loss": 0.2373, "step": 418 }, { "epoch": 3.74, "grad_norm": 0.19050335884094238, "learning_rate": 0.00025019999999999996, "loss": 0.2077, "step": 419 }, { "epoch": 3.75, "grad_norm": 0.1341092884540558, "learning_rate": 0.00025079999999999997, "loss": 0.195, "step": 420 }, { "epoch": 3.76, "grad_norm": 0.20959509909152985, "learning_rate": 0.0002514, "loss": 0.2142, "step": 421 }, { "epoch": 3.77, "grad_norm": 0.13756853342056274, "learning_rate": 0.00025199999999999995, "loss": 0.2017, "step": 422 }, { "epoch": 3.78, "grad_norm": 0.18742893636226654, "learning_rate": 0.00025259999999999996, "loss": 0.2258, "step": 423 }, { "epoch": 3.79, "grad_norm": 0.25547361373901367, "learning_rate": 0.0002532, "loss": 0.2334, "step": 424 }, { "epoch": 3.79, "grad_norm": 0.19071930646896362, "learning_rate": 0.0002538, "loss": 0.2154, "step": 425 }, { "epoch": 3.8, "grad_norm": 0.166627898812294, "learning_rate": 0.00025439999999999995, "loss": 0.1889, "step": 426 }, { "epoch": 3.81, "grad_norm": 0.13609127700328827, "learning_rate": 0.00025499999999999996, "loss": 0.1815, "step": 427 }, { "epoch": 3.82, "grad_norm": 0.17250128090381622, "learning_rate": 0.0002556, "loss": 0.1946, "step": 428 }, { "epoch": 3.83, "grad_norm": 0.14866022765636444, "learning_rate": 0.0002562, "loss": 0.2075, "step": 429 }, { "epoch": 3.84, "grad_norm": 0.2631528377532959, "learning_rate": 0.00025679999999999995, "loss": 0.2687, "step": 430 }, { "epoch": 3.85, "grad_norm": 0.17119698226451874, "learning_rate": 0.00025739999999999997, "loss": 0.1888, "step": 431 }, { "epoch": 3.86, "grad_norm": 0.14718417823314667, "learning_rate": 0.000258, "loss": 0.1934, "step": 432 }, { "epoch": 3.87, "grad_norm": 0.126860111951828, "learning_rate": 0.0002586, "loss": 0.186, "step": 433 }, { "epoch": 3.88, "grad_norm": 0.14548039436340332, "learning_rate": 0.00025919999999999996, "loss": 0.1956, "step": 434 }, { "epoch": 3.88, "grad_norm": 0.15526461601257324, "learning_rate": 0.00025979999999999997, "loss": 0.1991, "step": 435 }, { "epoch": 3.89, "grad_norm": 0.1961231231689453, "learning_rate": 0.0002604, "loss": 0.2342, "step": 436 }, { "epoch": 3.9, "grad_norm": 0.12892864644527435, "learning_rate": 0.000261, "loss": 0.1939, "step": 437 }, { "epoch": 3.91, "grad_norm": 0.1452789306640625, "learning_rate": 0.00026159999999999996, "loss": 0.189, "step": 438 }, { "epoch": 3.92, "grad_norm": 0.12380103021860123, "learning_rate": 0.0002622, "loss": 0.1954, "step": 439 }, { "epoch": 3.93, "grad_norm": 0.1424674391746521, "learning_rate": 0.0002628, "loss": 0.2032, "step": 440 }, { "epoch": 3.94, "grad_norm": 0.12863987684249878, "learning_rate": 0.00026339999999999995, "loss": 0.1796, "step": 441 }, { "epoch": 3.95, "grad_norm": 0.19804953038692474, "learning_rate": 0.00026399999999999997, "loss": 0.2079, "step": 442 }, { "epoch": 3.96, "grad_norm": 0.20673587918281555, "learning_rate": 0.0002646, "loss": 0.228, "step": 443 }, { "epoch": 3.96, "grad_norm": 0.1997382789850235, "learning_rate": 0.0002652, "loss": 0.1879, "step": 444 }, { "epoch": 3.97, "grad_norm": 0.13489511609077454, "learning_rate": 0.00026579999999999996, "loss": 0.1888, "step": 445 }, { "epoch": 3.98, "grad_norm": 0.20545919239521027, "learning_rate": 0.00026639999999999997, "loss": 0.1823, "step": 446 }, { "epoch": 3.99, "grad_norm": 0.1583697646856308, "learning_rate": 0.000267, "loss": 0.1908, "step": 447 }, { "epoch": 4.0, "grad_norm": 0.24280981719493866, "learning_rate": 0.0002676, "loss": 0.2184, "step": 448 }, { "epoch": 4.01, "grad_norm": 0.13582612574100494, "learning_rate": 0.00026819999999999996, "loss": 0.1597, "step": 449 }, { "epoch": 4.02, "grad_norm": 0.17924658954143524, "learning_rate": 0.0002688, "loss": 0.1655, "step": 450 }, { "epoch": 4.03, "grad_norm": 0.14039453864097595, "learning_rate": 0.0002694, "loss": 0.1585, "step": 451 }, { "epoch": 4.04, "grad_norm": 0.16404248774051666, "learning_rate": 0.00027, "loss": 0.156, "step": 452 }, { "epoch": 4.04, "grad_norm": 0.14572083950042725, "learning_rate": 0.00027059999999999996, "loss": 0.1527, "step": 453 }, { "epoch": 4.05, "grad_norm": 0.21694505214691162, "learning_rate": 0.0002712, "loss": 0.1679, "step": 454 }, { "epoch": 4.06, "grad_norm": 0.20453929901123047, "learning_rate": 0.0002718, "loss": 0.1739, "step": 455 }, { "epoch": 4.07, "grad_norm": 0.16913868486881256, "learning_rate": 0.0002724, "loss": 0.1656, "step": 456 }, { "epoch": 4.08, "grad_norm": 0.14071783423423767, "learning_rate": 0.00027299999999999997, "loss": 0.1531, "step": 457 }, { "epoch": 4.09, "grad_norm": 0.1545068472623825, "learning_rate": 0.0002736, "loss": 0.157, "step": 458 }, { "epoch": 4.1, "grad_norm": 0.1283985674381256, "learning_rate": 0.0002742, "loss": 0.1633, "step": 459 }, { "epoch": 4.11, "grad_norm": 0.17630359530448914, "learning_rate": 0.0002748, "loss": 0.1709, "step": 460 }, { "epoch": 4.12, "grad_norm": 0.19642595946788788, "learning_rate": 0.00027539999999999997, "loss": 0.1938, "step": 461 }, { "epoch": 4.12, "grad_norm": 0.1480659544467926, "learning_rate": 0.000276, "loss": 0.1502, "step": 462 }, { "epoch": 4.13, "grad_norm": 0.14618328213691711, "learning_rate": 0.0002766, "loss": 0.1557, "step": 463 }, { "epoch": 4.14, "grad_norm": 0.1548188030719757, "learning_rate": 0.0002772, "loss": 0.1548, "step": 464 }, { "epoch": 4.15, "grad_norm": 0.14934171736240387, "learning_rate": 0.0002778, "loss": 0.1491, "step": 465 }, { "epoch": 4.16, "grad_norm": 0.16205880045890808, "learning_rate": 0.0002784, "loss": 0.1549, "step": 466 }, { "epoch": 4.17, "grad_norm": 0.19758005440235138, "learning_rate": 0.000279, "loss": 0.1907, "step": 467 }, { "epoch": 4.18, "grad_norm": 0.14266394078731537, "learning_rate": 0.00027959999999999997, "loss": 0.1456, "step": 468 }, { "epoch": 4.19, "grad_norm": 0.18047606945037842, "learning_rate": 0.0002802, "loss": 0.1656, "step": 469 }, { "epoch": 4.2, "grad_norm": 0.1618647575378418, "learning_rate": 0.0002808, "loss": 0.164, "step": 470 }, { "epoch": 4.21, "grad_norm": 0.1894494742155075, "learning_rate": 0.00028139999999999996, "loss": 0.1477, "step": 471 }, { "epoch": 4.21, "grad_norm": 0.15157738327980042, "learning_rate": 0.00028199999999999997, "loss": 0.142, "step": 472 }, { "epoch": 4.22, "grad_norm": 0.20689848065376282, "learning_rate": 0.0002826, "loss": 0.1747, "step": 473 }, { "epoch": 4.23, "grad_norm": 0.15236985683441162, "learning_rate": 0.00028319999999999994, "loss": 0.1523, "step": 474 }, { "epoch": 4.24, "grad_norm": 0.1496431976556778, "learning_rate": 0.00028379999999999996, "loss": 0.1457, "step": 475 }, { "epoch": 4.25, "grad_norm": 0.12969504296779633, "learning_rate": 0.0002844, "loss": 0.1522, "step": 476 }, { "epoch": 4.26, "grad_norm": 0.15846145153045654, "learning_rate": 0.000285, "loss": 0.1397, "step": 477 }, { "epoch": 4.27, "grad_norm": 0.13522788882255554, "learning_rate": 0.00028559999999999995, "loss": 0.1503, "step": 478 }, { "epoch": 4.28, "grad_norm": 0.17433424293994904, "learning_rate": 0.00028619999999999996, "loss": 0.1656, "step": 479 }, { "epoch": 4.29, "grad_norm": 0.19288386404514313, "learning_rate": 0.0002868, "loss": 0.1653, "step": 480 }, { "epoch": 4.29, "grad_norm": 0.16040611267089844, "learning_rate": 0.00028739999999999994, "loss": 0.1561, "step": 481 }, { "epoch": 4.3, "grad_norm": 0.14547991752624512, "learning_rate": 0.00028799999999999995, "loss": 0.1482, "step": 482 }, { "epoch": 4.31, "grad_norm": 0.1573590487241745, "learning_rate": 0.00028859999999999997, "loss": 0.1459, "step": 483 }, { "epoch": 4.32, "grad_norm": 0.13309909403324127, "learning_rate": 0.0002892, "loss": 0.15, "step": 484 }, { "epoch": 4.33, "grad_norm": 0.16878025233745575, "learning_rate": 0.00028979999999999994, "loss": 0.1532, "step": 485 }, { "epoch": 4.34, "grad_norm": 0.19783174991607666, "learning_rate": 0.00029039999999999996, "loss": 0.1755, "step": 486 }, { "epoch": 4.35, "grad_norm": 0.24179938435554504, "learning_rate": 0.00029099999999999997, "loss": 0.1467, "step": 487 }, { "epoch": 4.36, "grad_norm": 0.13137803971767426, "learning_rate": 0.0002916, "loss": 0.149, "step": 488 }, { "epoch": 4.37, "grad_norm": 0.19716697931289673, "learning_rate": 0.00029219999999999995, "loss": 0.1416, "step": 489 }, { "epoch": 4.38, "grad_norm": 0.13461431860923767, "learning_rate": 0.00029279999999999996, "loss": 0.1491, "step": 490 }, { "epoch": 4.38, "grad_norm": 0.20108233392238617, "learning_rate": 0.0002934, "loss": 0.1454, "step": 491 }, { "epoch": 4.39, "grad_norm": 0.22257226705551147, "learning_rate": 0.000294, "loss": 0.1836, "step": 492 }, { "epoch": 4.4, "grad_norm": 0.13674646615982056, "learning_rate": 0.00029459999999999995, "loss": 0.1431, "step": 493 }, { "epoch": 4.41, "grad_norm": 0.18464460968971252, "learning_rate": 0.00029519999999999997, "loss": 0.1584, "step": 494 }, { "epoch": 4.42, "grad_norm": 0.12371817231178284, "learning_rate": 0.0002958, "loss": 0.148, "step": 495 }, { "epoch": 4.43, "grad_norm": 0.18389379978179932, "learning_rate": 0.0002964, "loss": 0.1516, "step": 496 }, { "epoch": 4.44, "grad_norm": 0.16786426305770874, "learning_rate": 0.00029699999999999996, "loss": 0.1612, "step": 497 }, { "epoch": 4.45, "grad_norm": 0.22654010355472565, "learning_rate": 0.00029759999999999997, "loss": 0.1589, "step": 498 }, { "epoch": 4.46, "grad_norm": 0.18563547730445862, "learning_rate": 0.0002982, "loss": 0.1576, "step": 499 }, { "epoch": 4.46, "grad_norm": 0.13945861160755157, "learning_rate": 0.0002988, "loss": 0.1517, "step": 500 }, { "epoch": 4.46, "eval_cer": 0.047389250318521606, "eval_loss": 0.18336819112300873, "eval_runtime": 21.8812, "eval_samples_per_second": 120.743, "eval_steps_per_second": 1.919, "eval_wer": 0.16951606505355019, "step": 500 }, { "epoch": 4.47, "grad_norm": 0.1721625030040741, "learning_rate": 0.00029939999999999996, "loss": 0.139, "step": 501 }, { "epoch": 4.48, "grad_norm": 0.14510883390903473, "learning_rate": 0.0003, "loss": 0.1509, "step": 502 }, { "epoch": 4.49, "grad_norm": 0.14989228546619415, "learning_rate": 0.0002999411764705882, "loss": 0.1467, "step": 503 }, { "epoch": 4.5, "grad_norm": 0.17836323380470276, "learning_rate": 0.00029988235294117646, "loss": 0.153, "step": 504 }, { "epoch": 4.51, "grad_norm": 0.19929830729961395, "learning_rate": 0.00029982352941176465, "loss": 0.1533, "step": 505 }, { "epoch": 4.52, "grad_norm": 0.14675268530845642, "learning_rate": 0.00029976470588235295, "loss": 0.14, "step": 506 }, { "epoch": 4.53, "grad_norm": 0.18037748336791992, "learning_rate": 0.00029970588235294114, "loss": 0.1436, "step": 507 }, { "epoch": 4.54, "grad_norm": 0.1641611009836197, "learning_rate": 0.0002996470588235294, "loss": 0.1404, "step": 508 }, { "epoch": 4.54, "grad_norm": 0.1628531962633133, "learning_rate": 0.0002995882352941176, "loss": 0.1502, "step": 509 }, { "epoch": 4.55, "grad_norm": 0.19253024458885193, "learning_rate": 0.00029952941176470587, "loss": 0.1695, "step": 510 }, { "epoch": 4.56, "grad_norm": 0.21144042909145355, "learning_rate": 0.0002994705882352941, "loss": 0.1739, "step": 511 }, { "epoch": 4.57, "grad_norm": 0.11775948852300644, "learning_rate": 0.00029941176470588235, "loss": 0.1422, "step": 512 }, { "epoch": 4.58, "grad_norm": 0.1344527155160904, "learning_rate": 0.00029935294117647054, "loss": 0.1527, "step": 513 }, { "epoch": 4.59, "grad_norm": 0.12983818352222443, "learning_rate": 0.0002992941176470588, "loss": 0.1489, "step": 514 }, { "epoch": 4.6, "grad_norm": 0.15714521706104279, "learning_rate": 0.00029923529411764703, "loss": 0.1448, "step": 515 }, { "epoch": 4.61, "grad_norm": 0.13811440765857697, "learning_rate": 0.0002991764705882353, "loss": 0.1434, "step": 516 }, { "epoch": 4.62, "grad_norm": 0.21657413244247437, "learning_rate": 0.0002991176470588235, "loss": 0.1848, "step": 517 }, { "epoch": 4.62, "grad_norm": 0.14271414279937744, "learning_rate": 0.0002990588235294117, "loss": 0.1323, "step": 518 }, { "epoch": 4.63, "grad_norm": 0.13417980074882507, "learning_rate": 0.000299, "loss": 0.1459, "step": 519 }, { "epoch": 4.64, "grad_norm": 0.13681019842624664, "learning_rate": 0.0002989411764705882, "loss": 0.1434, "step": 520 }, { "epoch": 4.65, "grad_norm": 0.1443687230348587, "learning_rate": 0.00029888235294117644, "loss": 0.1456, "step": 521 }, { "epoch": 4.66, "grad_norm": 0.13649730384349823, "learning_rate": 0.0002988235294117647, "loss": 0.1482, "step": 522 }, { "epoch": 4.67, "grad_norm": 0.2059570699930191, "learning_rate": 0.0002987647058823529, "loss": 0.1747, "step": 523 }, { "epoch": 4.68, "grad_norm": 0.13787458837032318, "learning_rate": 0.00029870588235294117, "loss": 0.1427, "step": 524 }, { "epoch": 4.69, "grad_norm": 0.13346923887729645, "learning_rate": 0.0002986470588235294, "loss": 0.141, "step": 525 }, { "epoch": 4.7, "grad_norm": 0.11142217367887497, "learning_rate": 0.0002985882352941176, "loss": 0.1331, "step": 526 }, { "epoch": 4.71, "grad_norm": 0.14373567700386047, "learning_rate": 0.00029852941176470584, "loss": 0.1341, "step": 527 }, { "epoch": 4.71, "grad_norm": 0.11874829977750778, "learning_rate": 0.0002984705882352941, "loss": 0.1452, "step": 528 }, { "epoch": 4.72, "grad_norm": 0.18138618767261505, "learning_rate": 0.00029841176470588233, "loss": 0.1531, "step": 529 }, { "epoch": 4.73, "grad_norm": 0.15628069639205933, "learning_rate": 0.0002983529411764706, "loss": 0.1584, "step": 530 }, { "epoch": 4.74, "grad_norm": 0.1584649682044983, "learning_rate": 0.00029829411764705876, "loss": 0.1381, "step": 531 }, { "epoch": 4.75, "grad_norm": 0.11128696799278259, "learning_rate": 0.00029823529411764706, "loss": 0.1366, "step": 532 }, { "epoch": 4.76, "grad_norm": 0.13757440447807312, "learning_rate": 0.00029817647058823525, "loss": 0.1492, "step": 533 }, { "epoch": 4.77, "grad_norm": 0.14631593227386475, "learning_rate": 0.0002981176470588235, "loss": 0.1423, "step": 534 }, { "epoch": 4.78, "grad_norm": 0.1369020640850067, "learning_rate": 0.00029805882352941174, "loss": 0.1432, "step": 535 }, { "epoch": 4.79, "grad_norm": 0.24188145995140076, "learning_rate": 0.000298, "loss": 0.1641, "step": 536 }, { "epoch": 4.79, "grad_norm": 0.13066911697387695, "learning_rate": 0.0002979411764705882, "loss": 0.1384, "step": 537 }, { "epoch": 4.8, "grad_norm": 0.13668803870677948, "learning_rate": 0.0002978823529411764, "loss": 0.1372, "step": 538 }, { "epoch": 4.81, "grad_norm": 0.11504946649074554, "learning_rate": 0.0002978235294117647, "loss": 0.1339, "step": 539 }, { "epoch": 4.82, "grad_norm": 0.16001489758491516, "learning_rate": 0.0002977647058823529, "loss": 0.1445, "step": 540 }, { "epoch": 4.83, "grad_norm": 0.12840960919857025, "learning_rate": 0.00029770588235294114, "loss": 0.1342, "step": 541 }, { "epoch": 4.84, "grad_norm": 0.19975897669792175, "learning_rate": 0.0002976470588235294, "loss": 0.1696, "step": 542 }, { "epoch": 4.85, "grad_norm": 0.13238808512687683, "learning_rate": 0.00029758823529411763, "loss": 0.1426, "step": 543 }, { "epoch": 4.86, "grad_norm": 0.1489226073026657, "learning_rate": 0.00029752941176470587, "loss": 0.1349, "step": 544 }, { "epoch": 4.87, "grad_norm": 0.10776083916425705, "learning_rate": 0.0002974705882352941, "loss": 0.1351, "step": 545 }, { "epoch": 4.88, "grad_norm": 0.12893898785114288, "learning_rate": 0.0002974117647058823, "loss": 0.1235, "step": 546 }, { "epoch": 4.88, "grad_norm": 0.14186742901802063, "learning_rate": 0.00029735294117647055, "loss": 0.1424, "step": 547 }, { "epoch": 4.89, "grad_norm": 0.18122699856758118, "learning_rate": 0.0002972941176470588, "loss": 0.1575, "step": 548 }, { "epoch": 4.9, "grad_norm": 0.14512194693088531, "learning_rate": 0.00029723529411764704, "loss": 0.1374, "step": 549 }, { "epoch": 4.91, "grad_norm": 0.12667463719844818, "learning_rate": 0.0002971764705882353, "loss": 0.1422, "step": 550 }, { "epoch": 4.92, "grad_norm": 0.10744798183441162, "learning_rate": 0.00029711764705882347, "loss": 0.1231, "step": 551 }, { "epoch": 4.93, "grad_norm": 0.12515398859977722, "learning_rate": 0.00029705882352941177, "loss": 0.1458, "step": 552 }, { "epoch": 4.94, "grad_norm": 0.13184143602848053, "learning_rate": 0.00029699999999999996, "loss": 0.1361, "step": 553 }, { "epoch": 4.95, "grad_norm": 0.17081902921199799, "learning_rate": 0.0002969411764705882, "loss": 0.1616, "step": 554 }, { "epoch": 4.96, "grad_norm": 0.17962642014026642, "learning_rate": 0.00029688235294117644, "loss": 0.1613, "step": 555 }, { "epoch": 4.96, "grad_norm": 0.14994260668754578, "learning_rate": 0.0002968235294117647, "loss": 0.1378, "step": 556 }, { "epoch": 4.97, "grad_norm": 0.13085141777992249, "learning_rate": 0.00029676470588235293, "loss": 0.1262, "step": 557 }, { "epoch": 4.98, "grad_norm": 0.11866625398397446, "learning_rate": 0.00029670588235294117, "loss": 0.1397, "step": 558 }, { "epoch": 4.99, "grad_norm": 0.14264865219593048, "learning_rate": 0.00029664705882352936, "loss": 0.1436, "step": 559 }, { "epoch": 5.0, "grad_norm": 0.16405989229679108, "learning_rate": 0.0002965882352941176, "loss": 0.1387, "step": 560 }, { "epoch": 5.01, "grad_norm": 0.11050975322723389, "learning_rate": 0.00029652941176470585, "loss": 0.1179, "step": 561 }, { "epoch": 5.02, "grad_norm": 0.10286074876785278, "learning_rate": 0.0002964705882352941, "loss": 0.1072, "step": 562 }, { "epoch": 5.03, "grad_norm": 0.11145926266908646, "learning_rate": 0.00029641176470588234, "loss": 0.1139, "step": 563 }, { "epoch": 5.04, "grad_norm": 0.1139025166630745, "learning_rate": 0.0002963529411764705, "loss": 0.1105, "step": 564 }, { "epoch": 5.04, "grad_norm": 0.11843300610780716, "learning_rate": 0.0002962941176470588, "loss": 0.1057, "step": 565 }, { "epoch": 5.05, "grad_norm": 0.13360877335071564, "learning_rate": 0.000296235294117647, "loss": 0.1109, "step": 566 }, { "epoch": 5.06, "grad_norm": 0.16410833597183228, "learning_rate": 0.0002961764705882353, "loss": 0.1193, "step": 567 }, { "epoch": 5.07, "grad_norm": 0.1047048345208168, "learning_rate": 0.0002961176470588235, "loss": 0.1105, "step": 568 }, { "epoch": 5.08, "grad_norm": 0.11158055067062378, "learning_rate": 0.00029605882352941174, "loss": 0.1051, "step": 569 }, { "epoch": 5.09, "grad_norm": 0.10403604805469513, "learning_rate": 0.000296, "loss": 0.1106, "step": 570 }, { "epoch": 5.1, "grad_norm": 0.11520044505596161, "learning_rate": 0.00029594117647058823, "loss": 0.11, "step": 571 }, { "epoch": 5.11, "grad_norm": 0.1380784809589386, "learning_rate": 0.00029588235294117647, "loss": 0.1066, "step": 572 }, { "epoch": 5.12, "grad_norm": 0.1517052948474884, "learning_rate": 0.00029582352941176466, "loss": 0.1411, "step": 573 }, { "epoch": 5.12, "grad_norm": 0.1428508311510086, "learning_rate": 0.0002957647058823529, "loss": 0.1069, "step": 574 }, { "epoch": 5.13, "grad_norm": 0.11712061613798141, "learning_rate": 0.00029570588235294115, "loss": 0.1083, "step": 575 }, { "epoch": 5.14, "grad_norm": 0.12171980738639832, "learning_rate": 0.0002956470588235294, "loss": 0.1052, "step": 576 }, { "epoch": 5.15, "grad_norm": 0.14088501036167145, "learning_rate": 0.00029558823529411763, "loss": 0.1142, "step": 577 }, { "epoch": 5.16, "grad_norm": 0.11284144222736359, "learning_rate": 0.0002955294117647059, "loss": 0.0995, "step": 578 }, { "epoch": 5.17, "grad_norm": 0.1761561781167984, "learning_rate": 0.00029547058823529407, "loss": 0.1189, "step": 579 }, { "epoch": 5.18, "grad_norm": 0.1454172134399414, "learning_rate": 0.0002954117647058823, "loss": 0.109, "step": 580 }, { "epoch": 5.19, "grad_norm": 0.10470675677061081, "learning_rate": 0.00029535294117647055, "loss": 0.1041, "step": 581 }, { "epoch": 5.2, "grad_norm": 0.13161127269268036, "learning_rate": 0.0002952941176470588, "loss": 0.1081, "step": 582 }, { "epoch": 5.21, "grad_norm": 0.1211889311671257, "learning_rate": 0.00029523529411764704, "loss": 0.1009, "step": 583 }, { "epoch": 5.21, "grad_norm": 0.10698932409286499, "learning_rate": 0.0002951764705882353, "loss": 0.0989, "step": 584 }, { "epoch": 5.22, "grad_norm": 0.1922842413187027, "learning_rate": 0.00029511764705882353, "loss": 0.1321, "step": 585 }, { "epoch": 5.23, "grad_norm": 0.14545674622058868, "learning_rate": 0.0002950588235294117, "loss": 0.1151, "step": 586 }, { "epoch": 5.24, "grad_norm": 0.10841576009988785, "learning_rate": 0.00029499999999999996, "loss": 0.0999, "step": 587 }, { "epoch": 5.25, "grad_norm": 0.12169073522090912, "learning_rate": 0.0002949411764705882, "loss": 0.1161, "step": 588 }, { "epoch": 5.26, "grad_norm": 0.1342030167579651, "learning_rate": 0.00029488235294117645, "loss": 0.1053, "step": 589 }, { "epoch": 5.27, "grad_norm": 0.10973326116800308, "learning_rate": 0.0002948235294117647, "loss": 0.1066, "step": 590 }, { "epoch": 5.28, "grad_norm": 0.1585254669189453, "learning_rate": 0.00029476470588235293, "loss": 0.1154, "step": 591 }, { "epoch": 5.29, "grad_norm": 0.14881926774978638, "learning_rate": 0.0002947058823529411, "loss": 0.1177, "step": 592 }, { "epoch": 5.29, "grad_norm": 0.1331123262643814, "learning_rate": 0.00029464705882352937, "loss": 0.1101, "step": 593 }, { "epoch": 5.3, "grad_norm": 0.11872667074203491, "learning_rate": 0.0002945882352941176, "loss": 0.0973, "step": 594 }, { "epoch": 5.31, "grad_norm": 0.11906508356332779, "learning_rate": 0.00029452941176470585, "loss": 0.1039, "step": 595 }, { "epoch": 5.32, "grad_norm": 0.1454726755619049, "learning_rate": 0.0002944705882352941, "loss": 0.1054, "step": 596 }, { "epoch": 5.33, "grad_norm": 0.13531070947647095, "learning_rate": 0.00029441176470588234, "loss": 0.1064, "step": 597 }, { "epoch": 5.34, "grad_norm": 0.19511525332927704, "learning_rate": 0.0002943529411764706, "loss": 0.1233, "step": 598 }, { "epoch": 5.35, "grad_norm": 0.11403120309114456, "learning_rate": 0.0002942941176470588, "loss": 0.1138, "step": 599 }, { "epoch": 5.36, "grad_norm": 0.11977498233318329, "learning_rate": 0.00029423529411764707, "loss": 0.1059, "step": 600 }, { "epoch": 5.36, "eval_cer": 0.042773975563097345, "eval_loss": 0.1667700558900833, "eval_runtime": 21.8271, "eval_samples_per_second": 121.042, "eval_steps_per_second": 1.924, "eval_wer": 0.15021816739389132, "step": 600 }, { "epoch": 5.37, "grad_norm": 0.11358664929866791, "learning_rate": 0.00029417647058823526, "loss": 0.1036, "step": 601 }, { "epoch": 5.38, "grad_norm": 0.11147243529558182, "learning_rate": 0.0002941176470588235, "loss": 0.1082, "step": 602 }, { "epoch": 5.38, "grad_norm": 0.1350492686033249, "learning_rate": 0.00029405882352941175, "loss": 0.1051, "step": 603 }, { "epoch": 5.39, "grad_norm": 0.1623750776052475, "learning_rate": 0.000294, "loss": 0.1249, "step": 604 }, { "epoch": 5.4, "grad_norm": 0.13666796684265137, "learning_rate": 0.00029394117647058823, "loss": 0.1104, "step": 605 }, { "epoch": 5.41, "grad_norm": 0.10063891112804413, "learning_rate": 0.0002938823529411764, "loss": 0.111, "step": 606 }, { "epoch": 5.42, "grad_norm": 0.11101557314395905, "learning_rate": 0.00029382352941176467, "loss": 0.1014, "step": 607 }, { "epoch": 5.43, "grad_norm": 0.11853890120983124, "learning_rate": 0.0002937647058823529, "loss": 0.1067, "step": 608 }, { "epoch": 5.44, "grad_norm": 0.12706832587718964, "learning_rate": 0.00029370588235294115, "loss": 0.1073, "step": 609 }, { "epoch": 5.45, "grad_norm": 0.16450826823711395, "learning_rate": 0.0002936470588235294, "loss": 0.1272, "step": 610 }, { "epoch": 5.46, "grad_norm": 0.10771495848894119, "learning_rate": 0.00029358823529411764, "loss": 0.111, "step": 611 }, { "epoch": 5.46, "grad_norm": 0.10839465260505676, "learning_rate": 0.00029352941176470583, "loss": 0.099, "step": 612 }, { "epoch": 5.47, "grad_norm": 0.12419650703668594, "learning_rate": 0.00029347058823529413, "loss": 0.1043, "step": 613 }, { "epoch": 5.48, "grad_norm": 0.10811444371938705, "learning_rate": 0.0002934117647058823, "loss": 0.1082, "step": 614 }, { "epoch": 5.49, "grad_norm": 0.10220162570476532, "learning_rate": 0.00029335294117647056, "loss": 0.099, "step": 615 }, { "epoch": 5.5, "grad_norm": 0.14317558705806732, "learning_rate": 0.0002932941176470588, "loss": 0.1037, "step": 616 }, { "epoch": 5.51, "grad_norm": 0.1705309897661209, "learning_rate": 0.00029323529411764705, "loss": 0.127, "step": 617 }, { "epoch": 5.52, "grad_norm": 0.0989428237080574, "learning_rate": 0.0002931764705882353, "loss": 0.1117, "step": 618 }, { "epoch": 5.53, "grad_norm": 0.12371882796287537, "learning_rate": 0.0002931176470588235, "loss": 0.1131, "step": 619 }, { "epoch": 5.54, "grad_norm": 0.11418908834457397, "learning_rate": 0.0002930588235294117, "loss": 0.1058, "step": 620 }, { "epoch": 5.54, "grad_norm": 0.1010790467262268, "learning_rate": 0.00029299999999999997, "loss": 0.102, "step": 621 }, { "epoch": 5.55, "grad_norm": 0.14377234876155853, "learning_rate": 0.0002929411764705882, "loss": 0.1122, "step": 622 }, { "epoch": 5.56, "grad_norm": 0.1575341671705246, "learning_rate": 0.00029288235294117645, "loss": 0.1126, "step": 623 }, { "epoch": 5.57, "grad_norm": 0.12532857060432434, "learning_rate": 0.0002928235294117647, "loss": 0.1055, "step": 624 }, { "epoch": 5.58, "grad_norm": 0.1194140687584877, "learning_rate": 0.0002927647058823529, "loss": 0.0976, "step": 625 }, { "epoch": 5.59, "grad_norm": 0.11946146935224533, "learning_rate": 0.0002927058823529412, "loss": 0.1045, "step": 626 }, { "epoch": 5.6, "grad_norm": 0.10743141919374466, "learning_rate": 0.00029264705882352937, "loss": 0.1, "step": 627 }, { "epoch": 5.61, "grad_norm": 0.12224756926298141, "learning_rate": 0.0002925882352941176, "loss": 0.1013, "step": 628 }, { "epoch": 5.62, "grad_norm": 0.18434454500675201, "learning_rate": 0.00029252941176470586, "loss": 0.1182, "step": 629 }, { "epoch": 5.62, "grad_norm": 0.10739492624998093, "learning_rate": 0.0002924705882352941, "loss": 0.1031, "step": 630 }, { "epoch": 5.63, "grad_norm": 0.121010921895504, "learning_rate": 0.00029241176470588235, "loss": 0.1054, "step": 631 }, { "epoch": 5.64, "grad_norm": 0.11945497244596481, "learning_rate": 0.00029235294117647054, "loss": 0.0955, "step": 632 }, { "epoch": 5.65, "grad_norm": 0.11026103794574738, "learning_rate": 0.00029229411764705883, "loss": 0.1097, "step": 633 }, { "epoch": 5.66, "grad_norm": 0.11731443554162979, "learning_rate": 0.000292235294117647, "loss": 0.1008, "step": 634 }, { "epoch": 5.67, "grad_norm": 0.19039209187030792, "learning_rate": 0.00029217647058823527, "loss": 0.135, "step": 635 }, { "epoch": 5.68, "grad_norm": 0.12411346286535263, "learning_rate": 0.0002921176470588235, "loss": 0.1054, "step": 636 }, { "epoch": 5.69, "grad_norm": 0.12534594535827637, "learning_rate": 0.00029205882352941175, "loss": 0.1025, "step": 637 }, { "epoch": 5.7, "grad_norm": 0.11824847012758255, "learning_rate": 0.000292, "loss": 0.1018, "step": 638 }, { "epoch": 5.71, "grad_norm": 0.11353108286857605, "learning_rate": 0.0002919411764705882, "loss": 0.1052, "step": 639 }, { "epoch": 5.71, "grad_norm": 0.12157388031482697, "learning_rate": 0.00029188235294117643, "loss": 0.0997, "step": 640 }, { "epoch": 5.72, "grad_norm": 0.13171547651290894, "learning_rate": 0.00029182352941176467, "loss": 0.1024, "step": 641 }, { "epoch": 5.73, "grad_norm": 0.18709394335746765, "learning_rate": 0.0002917647058823529, "loss": 0.1241, "step": 642 }, { "epoch": 5.74, "grad_norm": 0.12204131484031677, "learning_rate": 0.00029170588235294116, "loss": 0.1061, "step": 643 }, { "epoch": 5.75, "grad_norm": 0.1004999428987503, "learning_rate": 0.0002916470588235294, "loss": 0.1122, "step": 644 }, { "epoch": 5.76, "grad_norm": 0.12732595205307007, "learning_rate": 0.0002915882352941176, "loss": 0.1086, "step": 645 }, { "epoch": 5.77, "grad_norm": 0.12090475857257843, "learning_rate": 0.0002915294117647059, "loss": 0.112, "step": 646 }, { "epoch": 5.78, "grad_norm": 0.11316008865833282, "learning_rate": 0.0002914705882352941, "loss": 0.1057, "step": 647 }, { "epoch": 5.79, "grad_norm": 0.1507110595703125, "learning_rate": 0.0002914117647058823, "loss": 0.1152, "step": 648 }, { "epoch": 5.79, "grad_norm": 0.12285605818033218, "learning_rate": 0.00029135294117647057, "loss": 0.1077, "step": 649 }, { "epoch": 5.8, "grad_norm": 0.1024705171585083, "learning_rate": 0.0002912941176470588, "loss": 0.0996, "step": 650 }, { "epoch": 5.81, "grad_norm": 0.1180039569735527, "learning_rate": 0.00029123529411764705, "loss": 0.0967, "step": 651 }, { "epoch": 5.82, "grad_norm": 0.11817004531621933, "learning_rate": 0.00029117647058823524, "loss": 0.1015, "step": 652 }, { "epoch": 5.83, "grad_norm": 0.12755826115608215, "learning_rate": 0.0002911176470588235, "loss": 0.0987, "step": 653 }, { "epoch": 5.84, "grad_norm": 0.20057161152362823, "learning_rate": 0.00029105882352941173, "loss": 0.1245, "step": 654 }, { "epoch": 5.85, "grad_norm": 0.11148959398269653, "learning_rate": 0.00029099999999999997, "loss": 0.0975, "step": 655 }, { "epoch": 5.86, "grad_norm": 0.10137537866830826, "learning_rate": 0.0002909411764705882, "loss": 0.1051, "step": 656 }, { "epoch": 5.87, "grad_norm": 0.10529523342847824, "learning_rate": 0.00029088235294117646, "loss": 0.1043, "step": 657 }, { "epoch": 5.88, "grad_norm": 0.11081627756357193, "learning_rate": 0.00029082352941176465, "loss": 0.0981, "step": 658 }, { "epoch": 5.88, "grad_norm": 0.11853086948394775, "learning_rate": 0.00029076470588235295, "loss": 0.1061, "step": 659 }, { "epoch": 5.89, "grad_norm": 0.18606580793857574, "learning_rate": 0.00029070588235294113, "loss": 0.1187, "step": 660 }, { "epoch": 5.9, "grad_norm": 0.11597760021686554, "learning_rate": 0.0002906470588235294, "loss": 0.1109, "step": 661 }, { "epoch": 5.91, "grad_norm": 0.10947950929403305, "learning_rate": 0.0002905882352941176, "loss": 0.1065, "step": 662 }, { "epoch": 5.92, "grad_norm": 0.1000477746129036, "learning_rate": 0.00029052941176470587, "loss": 0.0936, "step": 663 }, { "epoch": 5.93, "grad_norm": 0.11304661631584167, "learning_rate": 0.0002904705882352941, "loss": 0.1028, "step": 664 }, { "epoch": 5.94, "grad_norm": 0.10919023305177689, "learning_rate": 0.0002904117647058823, "loss": 0.099, "step": 665 }, { "epoch": 5.95, "grad_norm": 0.1719236522912979, "learning_rate": 0.0002903529411764706, "loss": 0.1139, "step": 666 }, { "epoch": 5.96, "grad_norm": 0.1882009357213974, "learning_rate": 0.0002902941176470588, "loss": 0.1242, "step": 667 }, { "epoch": 5.96, "grad_norm": 0.11197476089000702, "learning_rate": 0.00029023529411764703, "loss": 0.1034, "step": 668 }, { "epoch": 5.97, "grad_norm": 0.1102653369307518, "learning_rate": 0.00029017647058823527, "loss": 0.0969, "step": 669 }, { "epoch": 5.98, "grad_norm": 0.10863769054412842, "learning_rate": 0.0002901176470588235, "loss": 0.0934, "step": 670 }, { "epoch": 5.99, "grad_norm": 0.12643195688724518, "learning_rate": 0.00029005882352941176, "loss": 0.0985, "step": 671 }, { "epoch": 6.0, "grad_norm": 0.14720340073108673, "learning_rate": 0.00029, "loss": 0.0892, "step": 672 }, { "epoch": 6.01, "grad_norm": 0.11089643836021423, "learning_rate": 0.0002899411764705882, "loss": 0.0855, "step": 673 }, { "epoch": 6.02, "grad_norm": 0.09804321080446243, "learning_rate": 0.00028988235294117643, "loss": 0.0903, "step": 674 }, { "epoch": 6.03, "grad_norm": 0.11876717209815979, "learning_rate": 0.0002898235294117647, "loss": 0.0864, "step": 675 }, { "epoch": 6.04, "grad_norm": 0.12075202167034149, "learning_rate": 0.0002897647058823529, "loss": 0.0887, "step": 676 }, { "epoch": 6.04, "grad_norm": 0.10439233481884003, "learning_rate": 0.00028970588235294116, "loss": 0.0878, "step": 677 }, { "epoch": 6.05, "grad_norm": 0.1481756716966629, "learning_rate": 0.00028964705882352935, "loss": 0.0866, "step": 678 }, { "epoch": 6.06, "grad_norm": 0.15272961556911469, "learning_rate": 0.00028958823529411765, "loss": 0.0986, "step": 679 }, { "epoch": 6.07, "grad_norm": 0.10657694935798645, "learning_rate": 0.00028952941176470584, "loss": 0.0892, "step": 680 }, { "epoch": 6.08, "grad_norm": 0.11053676903247833, "learning_rate": 0.0002894705882352941, "loss": 0.0812, "step": 681 }, { "epoch": 6.09, "grad_norm": 0.10250803083181381, "learning_rate": 0.00028941176470588233, "loss": 0.0771, "step": 682 }, { "epoch": 6.1, "grad_norm": 0.09995327144861221, "learning_rate": 0.00028935294117647057, "loss": 0.078, "step": 683 }, { "epoch": 6.11, "grad_norm": 0.11875566095113754, "learning_rate": 0.0002892941176470588, "loss": 0.0762, "step": 684 }, { "epoch": 6.12, "grad_norm": 0.17253772914409637, "learning_rate": 0.00028923529411764706, "loss": 0.0989, "step": 685 }, { "epoch": 6.12, "grad_norm": 0.11323681473731995, "learning_rate": 0.00028917647058823525, "loss": 0.082, "step": 686 }, { "epoch": 6.13, "grad_norm": 0.11157402396202087, "learning_rate": 0.0002891176470588235, "loss": 0.0789, "step": 687 }, { "epoch": 6.14, "grad_norm": 0.10261103510856628, "learning_rate": 0.00028905882352941173, "loss": 0.082, "step": 688 }, { "epoch": 6.15, "grad_norm": 0.10514463484287262, "learning_rate": 0.000289, "loss": 0.0778, "step": 689 }, { "epoch": 6.16, "grad_norm": 0.14041176438331604, "learning_rate": 0.0002889411764705882, "loss": 0.0798, "step": 690 }, { "epoch": 6.17, "grad_norm": 0.19121907651424408, "learning_rate": 0.0002888823529411764, "loss": 0.1093, "step": 691 }, { "epoch": 6.18, "grad_norm": 0.10961633175611496, "learning_rate": 0.0002888235294117647, "loss": 0.0888, "step": 692 }, { "epoch": 6.19, "grad_norm": 0.09609069675207138, "learning_rate": 0.0002887647058823529, "loss": 0.0781, "step": 693 }, { "epoch": 6.2, "grad_norm": 0.11091334372758865, "learning_rate": 0.00028870588235294114, "loss": 0.0908, "step": 694 }, { "epoch": 6.21, "grad_norm": 0.11728901416063309, "learning_rate": 0.0002886470588235294, "loss": 0.0898, "step": 695 }, { "epoch": 6.21, "grad_norm": 0.11376498639583588, "learning_rate": 0.00028858823529411763, "loss": 0.0848, "step": 696 }, { "epoch": 6.22, "grad_norm": 0.18690404295921326, "learning_rate": 0.00028852941176470587, "loss": 0.1101, "step": 697 }, { "epoch": 6.23, "grad_norm": 0.10487819463014603, "learning_rate": 0.00028847058823529406, "loss": 0.0858, "step": 698 }, { "epoch": 6.24, "grad_norm": 0.09847170114517212, "learning_rate": 0.00028841176470588236, "loss": 0.0828, "step": 699 }, { "epoch": 6.25, "grad_norm": 0.09805063903331757, "learning_rate": 0.00028835294117647055, "loss": 0.0825, "step": 700 }, { "epoch": 6.25, "eval_cer": 0.03928996929414108, "eval_loss": 0.16620230674743652, "eval_runtime": 22.7244, "eval_samples_per_second": 116.263, "eval_steps_per_second": 1.848, "eval_wer": 0.14061880206267355, "step": 700 }, { "epoch": 6.26, "grad_norm": 0.11333134770393372, "learning_rate": 0.0002882941176470588, "loss": 0.0823, "step": 701 }, { "epoch": 6.27, "grad_norm": 0.09311158955097198, "learning_rate": 0.00028823529411764703, "loss": 0.0745, "step": 702 }, { "epoch": 6.28, "grad_norm": 0.11538677662611008, "learning_rate": 0.0002881764705882353, "loss": 0.0822, "step": 703 }, { "epoch": 6.29, "grad_norm": 0.15968160331249237, "learning_rate": 0.0002881176470588235, "loss": 0.0889, "step": 704 }, { "epoch": 6.29, "grad_norm": 0.12699197232723236, "learning_rate": 0.00028805882352941176, "loss": 0.0884, "step": 705 }, { "epoch": 6.3, "grad_norm": 0.09725473076105118, "learning_rate": 0.00028799999999999995, "loss": 0.0818, "step": 706 }, { "epoch": 6.31, "grad_norm": 0.11126311868429184, "learning_rate": 0.0002879411764705882, "loss": 0.0866, "step": 707 }, { "epoch": 6.32, "grad_norm": 0.12113364040851593, "learning_rate": 0.00028788235294117644, "loss": 0.0846, "step": 708 }, { "epoch": 6.33, "grad_norm": 0.11363382637500763, "learning_rate": 0.0002878235294117647, "loss": 0.0838, "step": 709 }, { "epoch": 6.34, "grad_norm": 0.17675095796585083, "learning_rate": 0.0002877647058823529, "loss": 0.1035, "step": 710 }, { "epoch": 6.35, "grad_norm": 0.12177148461341858, "learning_rate": 0.0002877058823529411, "loss": 0.0893, "step": 711 }, { "epoch": 6.36, "grad_norm": 0.10355890542268753, "learning_rate": 0.0002876470588235294, "loss": 0.0812, "step": 712 }, { "epoch": 6.37, "grad_norm": 0.11242563277482986, "learning_rate": 0.0002875882352941176, "loss": 0.0747, "step": 713 }, { "epoch": 6.38, "grad_norm": 0.12086278945207596, "learning_rate": 0.00028752941176470585, "loss": 0.0832, "step": 714 }, { "epoch": 6.38, "grad_norm": 0.10405004024505615, "learning_rate": 0.0002874705882352941, "loss": 0.0785, "step": 715 }, { "epoch": 6.39, "grad_norm": 0.17571356892585754, "learning_rate": 0.00028741176470588233, "loss": 0.0951, "step": 716 }, { "epoch": 6.4, "grad_norm": 0.10275229066610336, "learning_rate": 0.0002873529411764706, "loss": 0.0838, "step": 717 }, { "epoch": 6.41, "grad_norm": 0.09858614951372147, "learning_rate": 0.0002872941176470588, "loss": 0.0854, "step": 718 }, { "epoch": 6.42, "grad_norm": 0.11851891875267029, "learning_rate": 0.000287235294117647, "loss": 0.0809, "step": 719 }, { "epoch": 6.43, "grad_norm": 0.10504165291786194, "learning_rate": 0.00028717647058823525, "loss": 0.083, "step": 720 }, { "epoch": 6.44, "grad_norm": 0.10159049928188324, "learning_rate": 0.0002871176470588235, "loss": 0.0781, "step": 721 }, { "epoch": 6.45, "grad_norm": 0.17435893416404724, "learning_rate": 0.00028705882352941174, "loss": 0.0963, "step": 722 }, { "epoch": 6.46, "grad_norm": 0.11468575149774551, "learning_rate": 0.000287, "loss": 0.0885, "step": 723 }, { "epoch": 6.46, "grad_norm": 0.09264139831066132, "learning_rate": 0.00028694117647058817, "loss": 0.0801, "step": 724 }, { "epoch": 6.47, "grad_norm": 0.12143049389123917, "learning_rate": 0.00028688235294117647, "loss": 0.0918, "step": 725 }, { "epoch": 6.48, "grad_norm": 0.15513074398040771, "learning_rate": 0.00028682352941176466, "loss": 0.0837, "step": 726 }, { "epoch": 6.49, "grad_norm": 0.09233376383781433, "learning_rate": 0.00028676470588235296, "loss": 0.0744, "step": 727 }, { "epoch": 6.5, "grad_norm": 0.14203836023807526, "learning_rate": 0.00028670588235294115, "loss": 0.0887, "step": 728 }, { "epoch": 6.51, "grad_norm": 0.14789164066314697, "learning_rate": 0.0002866470588235294, "loss": 0.0991, "step": 729 }, { "epoch": 6.52, "grad_norm": 0.10407066345214844, "learning_rate": 0.00028658823529411763, "loss": 0.0868, "step": 730 }, { "epoch": 6.53, "grad_norm": 0.10366200655698776, "learning_rate": 0.0002865294117647059, "loss": 0.0813, "step": 731 }, { "epoch": 6.54, "grad_norm": 0.11755690723657608, "learning_rate": 0.0002864705882352941, "loss": 0.0829, "step": 732 }, { "epoch": 6.54, "grad_norm": 0.0985301211476326, "learning_rate": 0.0002864117647058823, "loss": 0.0759, "step": 733 }, { "epoch": 6.55, "grad_norm": 0.10445389896631241, "learning_rate": 0.00028635294117647055, "loss": 0.074, "step": 734 }, { "epoch": 6.56, "grad_norm": 0.19053040444850922, "learning_rate": 0.0002862941176470588, "loss": 0.1166, "step": 735 }, { "epoch": 6.57, "grad_norm": 0.10317280888557434, "learning_rate": 0.00028623529411764704, "loss": 0.0845, "step": 736 }, { "epoch": 6.58, "grad_norm": 0.0945534035563469, "learning_rate": 0.0002861764705882353, "loss": 0.0769, "step": 737 }, { "epoch": 6.59, "grad_norm": 0.10750401020050049, "learning_rate": 0.0002861176470588235, "loss": 0.0849, "step": 738 }, { "epoch": 6.6, "grad_norm": 0.11884390562772751, "learning_rate": 0.0002860588235294117, "loss": 0.0834, "step": 739 }, { "epoch": 6.61, "grad_norm": 0.10446945577859879, "learning_rate": 0.00028599999999999996, "loss": 0.0803, "step": 740 }, { "epoch": 6.62, "grad_norm": 0.170147106051445, "learning_rate": 0.0002859411764705882, "loss": 0.106, "step": 741 }, { "epoch": 6.62, "grad_norm": 0.12705525755882263, "learning_rate": 0.00028588235294117645, "loss": 0.0899, "step": 742 }, { "epoch": 6.63, "grad_norm": 0.09435348957777023, "learning_rate": 0.0002858235294117647, "loss": 0.0855, "step": 743 }, { "epoch": 6.64, "grad_norm": 0.10736219584941864, "learning_rate": 0.00028576470588235293, "loss": 0.0775, "step": 744 }, { "epoch": 6.65, "grad_norm": 0.11375342309474945, "learning_rate": 0.0002857058823529412, "loss": 0.0836, "step": 745 }, { "epoch": 6.66, "grad_norm": 0.11772213876247406, "learning_rate": 0.00028564705882352937, "loss": 0.0828, "step": 746 }, { "epoch": 6.67, "grad_norm": 0.18420763313770294, "learning_rate": 0.0002855882352941176, "loss": 0.0961, "step": 747 }, { "epoch": 6.68, "grad_norm": 0.10926060378551483, "learning_rate": 0.00028552941176470585, "loss": 0.0871, "step": 748 }, { "epoch": 6.69, "grad_norm": 0.12082089483737946, "learning_rate": 0.0002854705882352941, "loss": 0.0888, "step": 749 }, { "epoch": 6.7, "grad_norm": 0.11248403042554855, "learning_rate": 0.00028541176470588234, "loss": 0.0875, "step": 750 }, { "epoch": 6.71, "grad_norm": 0.10935381054878235, "learning_rate": 0.0002853529411764706, "loss": 0.0829, "step": 751 }, { "epoch": 6.71, "grad_norm": 0.12610478699207306, "learning_rate": 0.00028529411764705877, "loss": 0.0799, "step": 752 }, { "epoch": 6.72, "grad_norm": 0.13737954199314117, "learning_rate": 0.000285235294117647, "loss": 0.0953, "step": 753 }, { "epoch": 6.73, "grad_norm": 0.16050100326538086, "learning_rate": 0.00028517647058823526, "loss": 0.1001, "step": 754 }, { "epoch": 6.74, "grad_norm": 0.12358574569225311, "learning_rate": 0.0002851176470588235, "loss": 0.0817, "step": 755 }, { "epoch": 6.75, "grad_norm": 0.1142040342092514, "learning_rate": 0.00028505882352941175, "loss": 0.0835, "step": 756 }, { "epoch": 6.76, "grad_norm": 0.10749552398920059, "learning_rate": 0.000285, "loss": 0.0834, "step": 757 }, { "epoch": 6.77, "grad_norm": 0.10637697577476501, "learning_rate": 0.00028494117647058823, "loss": 0.0774, "step": 758 }, { "epoch": 6.78, "grad_norm": 0.12128923088312149, "learning_rate": 0.0002848823529411764, "loss": 0.0817, "step": 759 }, { "epoch": 6.79, "grad_norm": 0.15798382461071014, "learning_rate": 0.0002848235294117647, "loss": 0.1014, "step": 760 }, { "epoch": 6.79, "grad_norm": 0.11042901873588562, "learning_rate": 0.0002847647058823529, "loss": 0.0789, "step": 761 }, { "epoch": 6.8, "grad_norm": 0.10593892633914948, "learning_rate": 0.00028470588235294115, "loss": 0.0795, "step": 762 }, { "epoch": 6.81, "grad_norm": 0.10690458863973618, "learning_rate": 0.0002846470588235294, "loss": 0.0757, "step": 763 }, { "epoch": 6.82, "grad_norm": 0.1336659938097, "learning_rate": 0.00028458823529411764, "loss": 0.0826, "step": 764 }, { "epoch": 6.83, "grad_norm": 0.10572272539138794, "learning_rate": 0.0002845294117647059, "loss": 0.0831, "step": 765 }, { "epoch": 6.84, "grad_norm": 0.14707894623279572, "learning_rate": 0.00028447058823529407, "loss": 0.0923, "step": 766 }, { "epoch": 6.85, "grad_norm": 0.11748572438955307, "learning_rate": 0.0002844117647058823, "loss": 0.0855, "step": 767 }, { "epoch": 6.86, "grad_norm": 0.10177357494831085, "learning_rate": 0.00028435294117647056, "loss": 0.0794, "step": 768 }, { "epoch": 6.87, "grad_norm": 0.11586737632751465, "learning_rate": 0.0002842941176470588, "loss": 0.0819, "step": 769 }, { "epoch": 6.88, "grad_norm": 0.10180850327014923, "learning_rate": 0.00028423529411764704, "loss": 0.0766, "step": 770 }, { "epoch": 6.88, "grad_norm": 0.10513108968734741, "learning_rate": 0.0002841764705882353, "loss": 0.0759, "step": 771 }, { "epoch": 6.89, "grad_norm": 0.1731770634651184, "learning_rate": 0.0002841176470588235, "loss": 0.1055, "step": 772 }, { "epoch": 6.9, "grad_norm": 0.11565803736448288, "learning_rate": 0.0002840588235294118, "loss": 0.0882, "step": 773 }, { "epoch": 6.91, "grad_norm": 0.09913301467895508, "learning_rate": 0.00028399999999999996, "loss": 0.0874, "step": 774 }, { "epoch": 6.92, "grad_norm": 0.10817453265190125, "learning_rate": 0.0002839411764705882, "loss": 0.0811, "step": 775 }, { "epoch": 6.93, "grad_norm": 0.09647570550441742, "learning_rate": 0.00028388235294117645, "loss": 0.079, "step": 776 }, { "epoch": 6.94, "grad_norm": 0.10882366448640823, "learning_rate": 0.0002838235294117647, "loss": 0.0809, "step": 777 }, { "epoch": 6.95, "grad_norm": 0.11249500513076782, "learning_rate": 0.00028376470588235294, "loss": 0.0757, "step": 778 }, { "epoch": 6.96, "grad_norm": 0.19009627401828766, "learning_rate": 0.00028370588235294113, "loss": 0.1112, "step": 779 }, { "epoch": 6.96, "grad_norm": 0.10359571129083633, "learning_rate": 0.00028364705882352937, "loss": 0.0851, "step": 780 }, { "epoch": 6.97, "grad_norm": 0.12161215394735336, "learning_rate": 0.0002835882352941176, "loss": 0.0765, "step": 781 }, { "epoch": 6.98, "grad_norm": 0.09756043553352356, "learning_rate": 0.00028352941176470586, "loss": 0.0805, "step": 782 }, { "epoch": 6.99, "grad_norm": 0.11786554753780365, "learning_rate": 0.0002834705882352941, "loss": 0.0743, "step": 783 }, { "epoch": 7.0, "grad_norm": 0.1479356288909912, "learning_rate": 0.00028341176470588234, "loss": 0.0802, "step": 784 }, { "epoch": 7.01, "grad_norm": 0.08490903675556183, "learning_rate": 0.00028335294117647053, "loss": 0.0692, "step": 785 }, { "epoch": 7.02, "grad_norm": 0.08878425508737564, "learning_rate": 0.00028329411764705883, "loss": 0.0697, "step": 786 }, { "epoch": 7.03, "grad_norm": 0.0996265783905983, "learning_rate": 0.000283235294117647, "loss": 0.0697, "step": 787 }, { "epoch": 7.04, "grad_norm": 0.09276427328586578, "learning_rate": 0.00028317647058823526, "loss": 0.0656, "step": 788 }, { "epoch": 7.04, "grad_norm": 0.08711417764425278, "learning_rate": 0.0002831176470588235, "loss": 0.0622, "step": 789 }, { "epoch": 7.05, "grad_norm": 0.11598426848649979, "learning_rate": 0.00028305882352941175, "loss": 0.0689, "step": 790 }, { "epoch": 7.06, "grad_norm": 0.13975971937179565, "learning_rate": 0.000283, "loss": 0.0851, "step": 791 }, { "epoch": 7.07, "grad_norm": 0.09264056384563446, "learning_rate": 0.0002829411764705882, "loss": 0.0723, "step": 792 }, { "epoch": 7.08, "grad_norm": 0.09951192885637283, "learning_rate": 0.0002828823529411765, "loss": 0.0676, "step": 793 }, { "epoch": 7.09, "grad_norm": 0.12242364138364792, "learning_rate": 0.00028282352941176467, "loss": 0.0749, "step": 794 }, { "epoch": 7.1, "grad_norm": 0.09080631285905838, "learning_rate": 0.0002827647058823529, "loss": 0.0653, "step": 795 }, { "epoch": 7.11, "grad_norm": 0.110308937728405, "learning_rate": 0.00028270588235294116, "loss": 0.0672, "step": 796 }, { "epoch": 7.12, "grad_norm": 0.18142534792423248, "learning_rate": 0.0002826470588235294, "loss": 0.0875, "step": 797 }, { "epoch": 7.12, "grad_norm": 0.09989775717258453, "learning_rate": 0.00028258823529411764, "loss": 0.0675, "step": 798 }, { "epoch": 7.13, "grad_norm": 0.09028211981058121, "learning_rate": 0.00028252941176470583, "loss": 0.0658, "step": 799 }, { "epoch": 7.14, "grad_norm": 0.13056732714176178, "learning_rate": 0.0002824705882352941, "loss": 0.0679, "step": 800 }, { "epoch": 7.14, "eval_cer": 0.03930124439533512, "eval_loss": 0.17467264831066132, "eval_runtime": 21.9956, "eval_samples_per_second": 120.115, "eval_steps_per_second": 1.909, "eval_wer": 0.135739785799286, "step": 800 }, { "epoch": 7.15, "grad_norm": 0.11192794889211655, "learning_rate": 0.0002824117647058823, "loss": 0.0643, "step": 801 }, { "epoch": 7.16, "grad_norm": 0.11463401466608047, "learning_rate": 0.00028235294117647056, "loss": 0.0716, "step": 802 }, { "epoch": 7.17, "grad_norm": 0.1752871721982956, "learning_rate": 0.0002822941176470588, "loss": 0.0864, "step": 803 }, { "epoch": 7.18, "grad_norm": 0.16184575855731964, "learning_rate": 0.00028223529411764705, "loss": 0.0736, "step": 804 }, { "epoch": 7.19, "grad_norm": 0.08955268561840057, "learning_rate": 0.00028217647058823524, "loss": 0.065, "step": 805 }, { "epoch": 7.2, "grad_norm": 0.13877011835575104, "learning_rate": 0.00028211764705882354, "loss": 0.0702, "step": 806 }, { "epoch": 7.21, "grad_norm": 0.13071797788143158, "learning_rate": 0.0002820588235294117, "loss": 0.0765, "step": 807 }, { "epoch": 7.21, "grad_norm": 0.09239855408668518, "learning_rate": 0.00028199999999999997, "loss": 0.0658, "step": 808 }, { "epoch": 7.22, "grad_norm": 0.18327827751636505, "learning_rate": 0.0002819411764705882, "loss": 0.0807, "step": 809 }, { "epoch": 7.23, "grad_norm": 0.19865559041500092, "learning_rate": 0.00028188235294117646, "loss": 0.0764, "step": 810 }, { "epoch": 7.24, "grad_norm": 0.09919494390487671, "learning_rate": 0.0002818235294117647, "loss": 0.0692, "step": 811 }, { "epoch": 7.25, "grad_norm": 0.12102092057466507, "learning_rate": 0.0002817647058823529, "loss": 0.0686, "step": 812 }, { "epoch": 7.26, "grad_norm": 0.1632012128829956, "learning_rate": 0.00028170588235294113, "loss": 0.0767, "step": 813 }, { "epoch": 7.27, "grad_norm": 0.10684625804424286, "learning_rate": 0.0002816470588235294, "loss": 0.0663, "step": 814 }, { "epoch": 7.28, "grad_norm": 0.13295963406562805, "learning_rate": 0.0002815882352941176, "loss": 0.0681, "step": 815 }, { "epoch": 7.29, "grad_norm": 0.2165580689907074, "learning_rate": 0.00028152941176470586, "loss": 0.0869, "step": 816 }, { "epoch": 7.29, "grad_norm": 0.13848912715911865, "learning_rate": 0.0002814705882352941, "loss": 0.0725, "step": 817 }, { "epoch": 7.3, "grad_norm": 0.08974460512399673, "learning_rate": 0.0002814117647058823, "loss": 0.0726, "step": 818 }, { "epoch": 7.31, "grad_norm": 0.13465768098831177, "learning_rate": 0.0002813529411764706, "loss": 0.0731, "step": 819 }, { "epoch": 7.32, "grad_norm": 0.1530093252658844, "learning_rate": 0.0002812941176470588, "loss": 0.0736, "step": 820 }, { "epoch": 7.33, "grad_norm": 0.11127042770385742, "learning_rate": 0.000281235294117647, "loss": 0.0712, "step": 821 }, { "epoch": 7.34, "grad_norm": 0.17609044909477234, "learning_rate": 0.00028117647058823527, "loss": 0.0815, "step": 822 }, { "epoch": 7.35, "grad_norm": 0.16929319500923157, "learning_rate": 0.0002811176470588235, "loss": 0.0699, "step": 823 }, { "epoch": 7.36, "grad_norm": 0.12846098840236664, "learning_rate": 0.00028105882352941176, "loss": 0.08, "step": 824 }, { "epoch": 7.37, "grad_norm": 0.1075296625494957, "learning_rate": 0.00028099999999999995, "loss": 0.0641, "step": 825 }, { "epoch": 7.38, "grad_norm": 0.13904526829719543, "learning_rate": 0.00028094117647058824, "loss": 0.0667, "step": 826 }, { "epoch": 7.38, "grad_norm": 0.147836834192276, "learning_rate": 0.00028088235294117643, "loss": 0.0727, "step": 827 }, { "epoch": 7.39, "grad_norm": 0.15775331854820251, "learning_rate": 0.0002808235294117647, "loss": 0.0784, "step": 828 }, { "epoch": 7.4, "grad_norm": 0.1532425731420517, "learning_rate": 0.0002807647058823529, "loss": 0.076, "step": 829 }, { "epoch": 7.41, "grad_norm": 0.14072157442569733, "learning_rate": 0.00028070588235294116, "loss": 0.0751, "step": 830 }, { "epoch": 7.42, "grad_norm": 0.09857498109340668, "learning_rate": 0.0002806470588235294, "loss": 0.066, "step": 831 }, { "epoch": 7.43, "grad_norm": 0.12894302606582642, "learning_rate": 0.00028058823529411765, "loss": 0.0678, "step": 832 }, { "epoch": 7.44, "grad_norm": 0.14168338477611542, "learning_rate": 0.00028052941176470584, "loss": 0.0708, "step": 833 }, { "epoch": 7.45, "grad_norm": 0.18657492101192474, "learning_rate": 0.0002804705882352941, "loss": 0.0736, "step": 834 }, { "epoch": 7.46, "grad_norm": 0.12351266294717789, "learning_rate": 0.0002804117647058823, "loss": 0.0687, "step": 835 }, { "epoch": 7.46, "grad_norm": 0.11583027243614197, "learning_rate": 0.00028035294117647057, "loss": 0.0714, "step": 836 }, { "epoch": 7.47, "grad_norm": 0.10590634495019913, "learning_rate": 0.0002802941176470588, "loss": 0.0646, "step": 837 }, { "epoch": 7.48, "grad_norm": 0.09078478068113327, "learning_rate": 0.000280235294117647, "loss": 0.0587, "step": 838 }, { "epoch": 7.49, "grad_norm": 0.10528270900249481, "learning_rate": 0.0002801764705882353, "loss": 0.0645, "step": 839 }, { "epoch": 7.5, "grad_norm": 0.14622372388839722, "learning_rate": 0.0002801176470588235, "loss": 0.0824, "step": 840 }, { "epoch": 7.51, "grad_norm": 0.1434820294380188, "learning_rate": 0.00028005882352941173, "loss": 0.0822, "step": 841 }, { "epoch": 7.52, "grad_norm": 0.10534597933292389, "learning_rate": 0.00028, "loss": 0.0691, "step": 842 }, { "epoch": 7.53, "grad_norm": 0.10406238585710526, "learning_rate": 0.0002799411764705882, "loss": 0.0737, "step": 843 }, { "epoch": 7.54, "grad_norm": 0.09948386251926422, "learning_rate": 0.00027988235294117646, "loss": 0.0674, "step": 844 }, { "epoch": 7.54, "grad_norm": 0.11599912494421005, "learning_rate": 0.0002798235294117647, "loss": 0.0692, "step": 845 }, { "epoch": 7.55, "grad_norm": 0.1225421354174614, "learning_rate": 0.0002797647058823529, "loss": 0.0661, "step": 846 }, { "epoch": 7.56, "grad_norm": 0.1687183827161789, "learning_rate": 0.00027970588235294114, "loss": 0.0936, "step": 847 }, { "epoch": 7.57, "grad_norm": 0.10220091044902802, "learning_rate": 0.0002796470588235294, "loss": 0.0719, "step": 848 }, { "epoch": 7.58, "grad_norm": 0.09554179757833481, "learning_rate": 0.0002795882352941176, "loss": 0.0711, "step": 849 }, { "epoch": 7.59, "grad_norm": 0.0986250564455986, "learning_rate": 0.00027952941176470587, "loss": 0.0739, "step": 850 }, { "epoch": 7.6, "grad_norm": 0.10000172257423401, "learning_rate": 0.00027947058823529406, "loss": 0.0702, "step": 851 }, { "epoch": 7.61, "grad_norm": 0.10283897817134857, "learning_rate": 0.00027941176470588236, "loss": 0.0747, "step": 852 }, { "epoch": 7.62, "grad_norm": 0.17079313099384308, "learning_rate": 0.00027935294117647054, "loss": 0.0862, "step": 853 }, { "epoch": 7.62, "grad_norm": 0.0937611535191536, "learning_rate": 0.0002792941176470588, "loss": 0.0649, "step": 854 }, { "epoch": 7.63, "grad_norm": 0.09779685735702515, "learning_rate": 0.00027923529411764703, "loss": 0.0751, "step": 855 }, { "epoch": 7.64, "grad_norm": 0.0809277668595314, "learning_rate": 0.0002791764705882353, "loss": 0.0582, "step": 856 }, { "epoch": 7.65, "grad_norm": 0.10247834771871567, "learning_rate": 0.0002791176470588235, "loss": 0.0718, "step": 857 }, { "epoch": 7.66, "grad_norm": 0.10511092841625214, "learning_rate": 0.0002790588235294117, "loss": 0.0716, "step": 858 }, { "epoch": 7.67, "grad_norm": 0.1589038223028183, "learning_rate": 0.000279, "loss": 0.0808, "step": 859 }, { "epoch": 7.68, "grad_norm": 0.10465843230485916, "learning_rate": 0.0002789411764705882, "loss": 0.078, "step": 860 }, { "epoch": 7.69, "grad_norm": 0.10627088695764542, "learning_rate": 0.00027888235294117644, "loss": 0.0671, "step": 861 }, { "epoch": 7.7, "grad_norm": 0.09457018971443176, "learning_rate": 0.0002788235294117647, "loss": 0.0698, "step": 862 }, { "epoch": 7.71, "grad_norm": 0.08960054069757462, "learning_rate": 0.0002787647058823529, "loss": 0.0665, "step": 863 }, { "epoch": 7.71, "grad_norm": 0.09914160519838333, "learning_rate": 0.00027870588235294117, "loss": 0.0717, "step": 864 }, { "epoch": 7.72, "grad_norm": 0.11835897713899612, "learning_rate": 0.0002786470588235294, "loss": 0.0762, "step": 865 }, { "epoch": 7.73, "grad_norm": 0.15103641152381897, "learning_rate": 0.0002785882352941176, "loss": 0.0858, "step": 866 }, { "epoch": 7.74, "grad_norm": 0.08144506067037582, "learning_rate": 0.00027852941176470584, "loss": 0.0662, "step": 867 }, { "epoch": 7.75, "grad_norm": 0.09567223489284515, "learning_rate": 0.0002784705882352941, "loss": 0.0704, "step": 868 }, { "epoch": 7.76, "grad_norm": 0.1075381338596344, "learning_rate": 0.00027841176470588233, "loss": 0.0699, "step": 869 }, { "epoch": 7.77, "grad_norm": 0.09838805347681046, "learning_rate": 0.0002783529411764706, "loss": 0.0615, "step": 870 }, { "epoch": 7.78, "grad_norm": 0.10799439996480942, "learning_rate": 0.00027829411764705876, "loss": 0.0621, "step": 871 }, { "epoch": 7.79, "grad_norm": 0.13640907406806946, "learning_rate": 0.00027823529411764706, "loss": 0.0845, "step": 872 }, { "epoch": 7.79, "grad_norm": 0.09985334426164627, "learning_rate": 0.00027817647058823525, "loss": 0.0702, "step": 873 }, { "epoch": 7.8, "grad_norm": 0.10088913887739182, "learning_rate": 0.0002781176470588235, "loss": 0.0711, "step": 874 }, { "epoch": 7.81, "grad_norm": 0.08682433515787125, "learning_rate": 0.00027805882352941174, "loss": 0.0684, "step": 875 }, { "epoch": 7.82, "grad_norm": 0.10389798134565353, "learning_rate": 0.000278, "loss": 0.0716, "step": 876 }, { "epoch": 7.83, "grad_norm": 0.11450890451669693, "learning_rate": 0.0002779411764705882, "loss": 0.0739, "step": 877 }, { "epoch": 7.84, "grad_norm": 0.13690023124217987, "learning_rate": 0.00027788235294117647, "loss": 0.072, "step": 878 }, { "epoch": 7.85, "grad_norm": 0.08232972025871277, "learning_rate": 0.00027782352941176466, "loss": 0.0654, "step": 879 }, { "epoch": 7.86, "grad_norm": 0.09728033095598221, "learning_rate": 0.0002777647058823529, "loss": 0.0717, "step": 880 }, { "epoch": 7.87, "grad_norm": 0.08549140393733978, "learning_rate": 0.00027770588235294114, "loss": 0.0682, "step": 881 }, { "epoch": 7.88, "grad_norm": 0.09016639739274979, "learning_rate": 0.0002776470588235294, "loss": 0.0588, "step": 882 }, { "epoch": 7.88, "grad_norm": 0.10060504823923111, "learning_rate": 0.00027758823529411763, "loss": 0.0636, "step": 883 }, { "epoch": 7.89, "grad_norm": 0.1495007872581482, "learning_rate": 0.0002775294117647058, "loss": 0.0793, "step": 884 }, { "epoch": 7.9, "grad_norm": 0.09040667861700058, "learning_rate": 0.0002774705882352941, "loss": 0.0675, "step": 885 }, { "epoch": 7.91, "grad_norm": 0.09728195518255234, "learning_rate": 0.0002774117647058823, "loss": 0.0691, "step": 886 }, { "epoch": 7.92, "grad_norm": 0.10015254467725754, "learning_rate": 0.0002773529411764706, "loss": 0.0631, "step": 887 }, { "epoch": 7.93, "grad_norm": 0.08846605569124222, "learning_rate": 0.0002772941176470588, "loss": 0.0653, "step": 888 }, { "epoch": 7.94, "grad_norm": 0.10071244090795517, "learning_rate": 0.00027723529411764704, "loss": 0.0629, "step": 889 }, { "epoch": 7.95, "grad_norm": 0.11886356770992279, "learning_rate": 0.0002771764705882353, "loss": 0.0725, "step": 890 }, { "epoch": 7.96, "grad_norm": 0.13261528313159943, "learning_rate": 0.0002771176470588235, "loss": 0.0849, "step": 891 }, { "epoch": 7.96, "grad_norm": 0.09412933886051178, "learning_rate": 0.00027705882352941177, "loss": 0.0683, "step": 892 }, { "epoch": 7.97, "grad_norm": 0.0950956642627716, "learning_rate": 0.00027699999999999996, "loss": 0.0742, "step": 893 }, { "epoch": 7.98, "grad_norm": 0.08235663175582886, "learning_rate": 0.0002769411764705882, "loss": 0.0664, "step": 894 }, { "epoch": 7.99, "grad_norm": 0.10337395966053009, "learning_rate": 0.00027688235294117644, "loss": 0.0696, "step": 895 }, { "epoch": 8.0, "grad_norm": 0.13153767585754395, "learning_rate": 0.0002768235294117647, "loss": 0.0798, "step": 896 }, { "epoch": 8.01, "grad_norm": 0.09291201084852219, "learning_rate": 0.00027676470588235293, "loss": 0.0643, "step": 897 }, { "epoch": 8.02, "grad_norm": 0.07855793088674545, "learning_rate": 0.0002767058823529412, "loss": 0.0522, "step": 898 }, { "epoch": 8.03, "grad_norm": 0.08614505082368851, "learning_rate": 0.00027664705882352936, "loss": 0.0538, "step": 899 }, { "epoch": 8.04, "grad_norm": 0.09544532001018524, "learning_rate": 0.0002765882352941176, "loss": 0.0602, "step": 900 }, { "epoch": 8.04, "eval_cer": 0.038962991359514115, "eval_loss": 0.17666253447532654, "eval_runtime": 21.8806, "eval_samples_per_second": 120.746, "eval_steps_per_second": 1.92, "eval_wer": 0.13335977786592623, "step": 900 }, { "epoch": 8.04, "grad_norm": 0.09941945225000381, "learning_rate": 0.00027652941176470585, "loss": 0.0578, "step": 901 }, { "epoch": 8.05, "grad_norm": 0.11371108889579773, "learning_rate": 0.0002764705882352941, "loss": 0.0526, "step": 902 }, { "epoch": 8.06, "grad_norm": 0.14651525020599365, "learning_rate": 0.00027641176470588234, "loss": 0.0683, "step": 903 }, { "epoch": 8.07, "grad_norm": 0.08412709087133408, "learning_rate": 0.0002763529411764706, "loss": 0.056, "step": 904 }, { "epoch": 8.08, "grad_norm": 0.08637087792158127, "learning_rate": 0.0002762941176470588, "loss": 0.0573, "step": 905 }, { "epoch": 8.09, "grad_norm": 0.1048191487789154, "learning_rate": 0.000276235294117647, "loss": 0.0633, "step": 906 }, { "epoch": 8.1, "grad_norm": 0.08025553077459335, "learning_rate": 0.00027617647058823526, "loss": 0.0521, "step": 907 }, { "epoch": 8.11, "grad_norm": 0.10991553962230682, "learning_rate": 0.0002761176470588235, "loss": 0.059, "step": 908 }, { "epoch": 8.12, "grad_norm": 0.14864669740200043, "learning_rate": 0.00027605882352941174, "loss": 0.0785, "step": 909 }, { "epoch": 8.12, "grad_norm": 0.09984641522169113, "learning_rate": 0.000276, "loss": 0.0609, "step": 910 }, { "epoch": 8.13, "grad_norm": 0.0754564106464386, "learning_rate": 0.00027594117647058823, "loss": 0.0508, "step": 911 }, { "epoch": 8.14, "grad_norm": 0.10399307310581207, "learning_rate": 0.0002758823529411764, "loss": 0.0604, "step": 912 }, { "epoch": 8.15, "grad_norm": 0.09703277051448822, "learning_rate": 0.00027582352941176466, "loss": 0.0608, "step": 913 }, { "epoch": 8.16, "grad_norm": 0.0977257639169693, "learning_rate": 0.0002757647058823529, "loss": 0.0494, "step": 914 }, { "epoch": 8.17, "grad_norm": 0.1973361223936081, "learning_rate": 0.00027570588235294115, "loss": 0.0769, "step": 915 }, { "epoch": 8.18, "grad_norm": 0.09270834177732468, "learning_rate": 0.0002756470588235294, "loss": 0.061, "step": 916 }, { "epoch": 8.19, "grad_norm": 0.08389316499233246, "learning_rate": 0.00027558823529411764, "loss": 0.0582, "step": 917 }, { "epoch": 8.2, "grad_norm": 0.0841669887304306, "learning_rate": 0.0002755294117647059, "loss": 0.0593, "step": 918 }, { "epoch": 8.21, "grad_norm": 0.0851660966873169, "learning_rate": 0.00027547058823529407, "loss": 0.0519, "step": 919 }, { "epoch": 8.21, "grad_norm": 0.08716004341840744, "learning_rate": 0.00027541176470588237, "loss": 0.0559, "step": 920 }, { "epoch": 8.22, "grad_norm": 0.1524549275636673, "learning_rate": 0.00027535294117647056, "loss": 0.0747, "step": 921 }, { "epoch": 8.23, "grad_norm": 0.09230168163776398, "learning_rate": 0.0002752941176470588, "loss": 0.065, "step": 922 }, { "epoch": 8.24, "grad_norm": 0.0995868593454361, "learning_rate": 0.00027523529411764704, "loss": 0.0605, "step": 923 }, { "epoch": 8.25, "grad_norm": 0.09459994733333588, "learning_rate": 0.0002751764705882353, "loss": 0.0565, "step": 924 }, { "epoch": 8.26, "grad_norm": 0.09015697985887527, "learning_rate": 0.00027511764705882353, "loss": 0.0564, "step": 925 }, { "epoch": 8.27, "grad_norm": 0.0839724987745285, "learning_rate": 0.0002750588235294117, "loss": 0.0576, "step": 926 }, { "epoch": 8.28, "grad_norm": 0.12400028854608536, "learning_rate": 0.00027499999999999996, "loss": 0.066, "step": 927 }, { "epoch": 8.29, "grad_norm": 0.14414748549461365, "learning_rate": 0.0002749411764705882, "loss": 0.0691, "step": 928 }, { "epoch": 8.29, "grad_norm": 0.08903595060110092, "learning_rate": 0.00027488235294117645, "loss": 0.0622, "step": 929 }, { "epoch": 8.3, "grad_norm": 0.09169638156890869, "learning_rate": 0.0002748235294117647, "loss": 0.0618, "step": 930 }, { "epoch": 8.31, "grad_norm": 0.0896272212266922, "learning_rate": 0.00027476470588235294, "loss": 0.0555, "step": 931 }, { "epoch": 8.32, "grad_norm": 0.08792594820261002, "learning_rate": 0.0002747058823529411, "loss": 0.0546, "step": 932 }, { "epoch": 8.33, "grad_norm": 0.10805311053991318, "learning_rate": 0.0002746470588235294, "loss": 0.0553, "step": 933 }, { "epoch": 8.34, "grad_norm": 0.16416336596012115, "learning_rate": 0.0002745882352941176, "loss": 0.0748, "step": 934 }, { "epoch": 8.35, "grad_norm": 0.08754893392324448, "learning_rate": 0.00027452941176470586, "loss": 0.0604, "step": 935 }, { "epoch": 8.36, "grad_norm": 0.09177000820636749, "learning_rate": 0.0002744705882352941, "loss": 0.0576, "step": 936 }, { "epoch": 8.37, "grad_norm": 0.09111658483743668, "learning_rate": 0.00027441176470588234, "loss": 0.0607, "step": 937 }, { "epoch": 8.38, "grad_norm": 0.0870172306895256, "learning_rate": 0.0002743529411764706, "loss": 0.056, "step": 938 }, { "epoch": 8.38, "grad_norm": 0.09112705290317535, "learning_rate": 0.0002742941176470588, "loss": 0.059, "step": 939 }, { "epoch": 8.39, "grad_norm": 0.1845976859331131, "learning_rate": 0.000274235294117647, "loss": 0.0888, "step": 940 }, { "epoch": 8.4, "grad_norm": 0.09225645661354065, "learning_rate": 0.00027417647058823526, "loss": 0.0555, "step": 941 }, { "epoch": 8.41, "grad_norm": 0.08703172951936722, "learning_rate": 0.0002741176470588235, "loss": 0.0626, "step": 942 }, { "epoch": 8.42, "grad_norm": 0.09380820393562317, "learning_rate": 0.00027405882352941175, "loss": 0.0588, "step": 943 }, { "epoch": 8.43, "grad_norm": 0.09199957549571991, "learning_rate": 0.000274, "loss": 0.0533, "step": 944 }, { "epoch": 8.44, "grad_norm": 0.09230124950408936, "learning_rate": 0.0002739411764705882, "loss": 0.0618, "step": 945 }, { "epoch": 8.45, "grad_norm": 0.1362978219985962, "learning_rate": 0.0002738823529411765, "loss": 0.0731, "step": 946 }, { "epoch": 8.46, "grad_norm": 0.10372074693441391, "learning_rate": 0.00027382352941176467, "loss": 0.0559, "step": 947 }, { "epoch": 8.46, "grad_norm": 0.09024462848901749, "learning_rate": 0.0002737647058823529, "loss": 0.0576, "step": 948 }, { "epoch": 8.47, "grad_norm": 0.09534650295972824, "learning_rate": 0.00027370588235294116, "loss": 0.0597, "step": 949 }, { "epoch": 8.48, "grad_norm": 0.09491518139839172, "learning_rate": 0.0002736470588235294, "loss": 0.0578, "step": 950 }, { "epoch": 8.49, "grad_norm": 0.08020088076591492, "learning_rate": 0.00027358823529411764, "loss": 0.0509, "step": 951 }, { "epoch": 8.5, "grad_norm": 0.11732397228479385, "learning_rate": 0.00027352941176470583, "loss": 0.0602, "step": 952 }, { "epoch": 8.51, "grad_norm": 0.11543024331331253, "learning_rate": 0.00027347058823529413, "loss": 0.0705, "step": 953 }, { "epoch": 8.52, "grad_norm": 0.09199339896440506, "learning_rate": 0.0002734117647058823, "loss": 0.0627, "step": 954 }, { "epoch": 8.53, "grad_norm": 0.08132177591323853, "learning_rate": 0.00027335294117647056, "loss": 0.0564, "step": 955 }, { "epoch": 8.54, "grad_norm": 0.08557543158531189, "learning_rate": 0.0002732941176470588, "loss": 0.0588, "step": 956 }, { "epoch": 8.54, "grad_norm": 0.08863170444965363, "learning_rate": 0.00027323529411764705, "loss": 0.0569, "step": 957 }, { "epoch": 8.55, "grad_norm": 0.09457981586456299, "learning_rate": 0.0002731764705882353, "loss": 0.063, "step": 958 }, { "epoch": 8.56, "grad_norm": 0.13630041480064392, "learning_rate": 0.0002731176470588235, "loss": 0.0724, "step": 959 }, { "epoch": 8.57, "grad_norm": 0.0962732583284378, "learning_rate": 0.0002730588235294117, "loss": 0.0612, "step": 960 }, { "epoch": 8.58, "grad_norm": 0.0848195031285286, "learning_rate": 0.00027299999999999997, "loss": 0.0565, "step": 961 }, { "epoch": 8.59, "grad_norm": 0.08632772415876389, "learning_rate": 0.0002729411764705882, "loss": 0.0576, "step": 962 }, { "epoch": 8.6, "grad_norm": 0.09068741649389267, "learning_rate": 0.00027288235294117645, "loss": 0.0539, "step": 963 }, { "epoch": 8.61, "grad_norm": 0.08290354162454605, "learning_rate": 0.0002728235294117647, "loss": 0.0525, "step": 964 }, { "epoch": 8.62, "grad_norm": 0.14676202833652496, "learning_rate": 0.0002727647058823529, "loss": 0.081, "step": 965 }, { "epoch": 8.62, "grad_norm": 0.09038417041301727, "learning_rate": 0.0002727058823529412, "loss": 0.0514, "step": 966 }, { "epoch": 8.63, "grad_norm": 0.09433845430612564, "learning_rate": 0.0002726470588235294, "loss": 0.059, "step": 967 }, { "epoch": 8.64, "grad_norm": 0.0914558544754982, "learning_rate": 0.0002725882352941176, "loss": 0.0634, "step": 968 }, { "epoch": 8.65, "grad_norm": 0.09283778071403503, "learning_rate": 0.00027252941176470586, "loss": 0.0632, "step": 969 }, { "epoch": 8.66, "grad_norm": 0.08448383957147598, "learning_rate": 0.0002724705882352941, "loss": 0.0542, "step": 970 }, { "epoch": 8.67, "grad_norm": 0.16179527342319489, "learning_rate": 0.00027241176470588235, "loss": 0.0779, "step": 971 }, { "epoch": 8.68, "grad_norm": 0.10197985172271729, "learning_rate": 0.00027235294117647054, "loss": 0.0581, "step": 972 }, { "epoch": 8.69, "grad_norm": 0.09279035031795502, "learning_rate": 0.0002722941176470588, "loss": 0.0582, "step": 973 }, { "epoch": 8.7, "grad_norm": 0.08126161992549896, "learning_rate": 0.000272235294117647, "loss": 0.0582, "step": 974 }, { "epoch": 8.71, "grad_norm": 0.09172060340642929, "learning_rate": 0.00027217647058823527, "loss": 0.0556, "step": 975 }, { "epoch": 8.71, "grad_norm": 0.08291953802108765, "learning_rate": 0.0002721176470588235, "loss": 0.0515, "step": 976 }, { "epoch": 8.72, "grad_norm": 0.11062249541282654, "learning_rate": 0.00027205882352941175, "loss": 0.0574, "step": 977 }, { "epoch": 8.73, "grad_norm": 0.13899770379066467, "learning_rate": 0.00027199999999999994, "loss": 0.0749, "step": 978 }, { "epoch": 8.74, "grad_norm": 0.09075085073709488, "learning_rate": 0.00027194117647058824, "loss": 0.0565, "step": 979 }, { "epoch": 8.75, "grad_norm": 0.08652327954769135, "learning_rate": 0.00027188235294117643, "loss": 0.0562, "step": 980 }, { "epoch": 8.76, "grad_norm": 0.08490996062755585, "learning_rate": 0.0002718235294117647, "loss": 0.0544, "step": 981 }, { "epoch": 8.77, "grad_norm": 0.08791162073612213, "learning_rate": 0.0002717647058823529, "loss": 0.0486, "step": 982 }, { "epoch": 8.78, "grad_norm": 0.10936279594898224, "learning_rate": 0.00027170588235294116, "loss": 0.0601, "step": 983 }, { "epoch": 8.79, "grad_norm": 0.1718919575214386, "learning_rate": 0.0002716470588235294, "loss": 0.0714, "step": 984 }, { "epoch": 8.79, "grad_norm": 0.08547305315732956, "learning_rate": 0.0002715882352941176, "loss": 0.0567, "step": 985 }, { "epoch": 8.8, "grad_norm": 0.08878172934055328, "learning_rate": 0.0002715294117647059, "loss": 0.0605, "step": 986 }, { "epoch": 8.81, "grad_norm": 0.08798754215240479, "learning_rate": 0.0002714705882352941, "loss": 0.062, "step": 987 }, { "epoch": 8.82, "grad_norm": 0.08759734779596329, "learning_rate": 0.0002714117647058823, "loss": 0.0579, "step": 988 }, { "epoch": 8.83, "grad_norm": 0.10001529008150101, "learning_rate": 0.00027135294117647057, "loss": 0.066, "step": 989 }, { "epoch": 8.84, "grad_norm": 0.16819550096988678, "learning_rate": 0.0002712941176470588, "loss": 0.0866, "step": 990 }, { "epoch": 8.85, "grad_norm": 0.07452823966741562, "learning_rate": 0.00027123529411764705, "loss": 0.0551, "step": 991 }, { "epoch": 8.86, "grad_norm": 0.09405795484781265, "learning_rate": 0.0002711764705882353, "loss": 0.0615, "step": 992 }, { "epoch": 8.87, "grad_norm": 0.0971001610159874, "learning_rate": 0.0002711176470588235, "loss": 0.0579, "step": 993 }, { "epoch": 8.88, "grad_norm": 0.09090657532215118, "learning_rate": 0.00027105882352941173, "loss": 0.0551, "step": 994 }, { "epoch": 8.88, "grad_norm": 0.09210459142923355, "learning_rate": 0.000271, "loss": 0.0565, "step": 995 }, { "epoch": 8.89, "grad_norm": 0.14499081671237946, "learning_rate": 0.0002709411764705882, "loss": 0.0641, "step": 996 }, { "epoch": 8.9, "grad_norm": 0.08520587533712387, "learning_rate": 0.00027088235294117646, "loss": 0.0622, "step": 997 }, { "epoch": 8.91, "grad_norm": 0.08412635326385498, "learning_rate": 0.00027082352941176465, "loss": 0.0589, "step": 998 }, { "epoch": 8.92, "grad_norm": 0.09470722079277039, "learning_rate": 0.00027076470588235295, "loss": 0.0557, "step": 999 }, { "epoch": 8.93, "grad_norm": 0.09419083595275879, "learning_rate": 0.00027070588235294114, "loss": 0.0587, "step": 1000 }, { "epoch": 8.93, "eval_cer": 0.037576153912648035, "eval_loss": 0.1708151251077652, "eval_runtime": 22.2458, "eval_samples_per_second": 118.764, "eval_steps_per_second": 1.888, "eval_wer": 0.12915509718365728, "step": 1000 }, { "epoch": 8.94, "grad_norm": 0.08441416174173355, "learning_rate": 0.0002706470588235294, "loss": 0.056, "step": 1001 }, { "epoch": 8.95, "grad_norm": 0.14337719976902008, "learning_rate": 0.0002705882352941176, "loss": 0.0599, "step": 1002 }, { "epoch": 8.96, "grad_norm": 0.13183283805847168, "learning_rate": 0.00027052941176470587, "loss": 0.0725, "step": 1003 }, { "epoch": 8.96, "grad_norm": 0.08453375846147537, "learning_rate": 0.0002704705882352941, "loss": 0.0656, "step": 1004 }, { "epoch": 8.97, "grad_norm": 0.09020174294710159, "learning_rate": 0.00027041176470588235, "loss": 0.0578, "step": 1005 }, { "epoch": 8.98, "grad_norm": 0.09032034873962402, "learning_rate": 0.00027035294117647054, "loss": 0.0605, "step": 1006 }, { "epoch": 8.99, "grad_norm": 0.10323207825422287, "learning_rate": 0.0002702941176470588, "loss": 0.0608, "step": 1007 }, { "epoch": 9.0, "grad_norm": 0.10484860837459564, "learning_rate": 0.00027023529411764703, "loss": 0.0621, "step": 1008 }, { "epoch": 9.01, "grad_norm": 0.08602391928434372, "learning_rate": 0.0002701764705882353, "loss": 0.0532, "step": 1009 }, { "epoch": 9.02, "grad_norm": 0.08654514700174332, "learning_rate": 0.0002701176470588235, "loss": 0.0499, "step": 1010 }, { "epoch": 9.03, "grad_norm": 0.07744330167770386, "learning_rate": 0.0002700588235294117, "loss": 0.0555, "step": 1011 }, { "epoch": 9.04, "grad_norm": 0.07674050331115723, "learning_rate": 0.00027, "loss": 0.0471, "step": 1012 }, { "epoch": 9.04, "grad_norm": 0.0944066122174263, "learning_rate": 0.0002699411764705882, "loss": 0.0498, "step": 1013 }, { "epoch": 9.05, "grad_norm": 0.11238271743059158, "learning_rate": 0.00026988235294117644, "loss": 0.0537, "step": 1014 }, { "epoch": 9.06, "grad_norm": 0.13990385830402374, "learning_rate": 0.0002698235294117647, "loss": 0.0631, "step": 1015 }, { "epoch": 9.07, "grad_norm": 0.09597953408956528, "learning_rate": 0.0002697647058823529, "loss": 0.0525, "step": 1016 }, { "epoch": 9.08, "grad_norm": 0.08831837773323059, "learning_rate": 0.00026970588235294117, "loss": 0.0474, "step": 1017 }, { "epoch": 9.09, "grad_norm": 0.09950924664735794, "learning_rate": 0.0002696470588235294, "loss": 0.0556, "step": 1018 }, { "epoch": 9.1, "grad_norm": 0.09094338119029999, "learning_rate": 0.00026958823529411765, "loss": 0.0532, "step": 1019 }, { "epoch": 9.11, "grad_norm": 0.09362645447254181, "learning_rate": 0.00026952941176470584, "loss": 0.0489, "step": 1020 }, { "epoch": 9.12, "grad_norm": 0.1337110996246338, "learning_rate": 0.0002694705882352941, "loss": 0.0753, "step": 1021 }, { "epoch": 9.12, "grad_norm": 0.09633689373731613, "learning_rate": 0.00026941176470588233, "loss": 0.0542, "step": 1022 }, { "epoch": 9.13, "grad_norm": 0.07622726261615753, "learning_rate": 0.00026935294117647057, "loss": 0.0465, "step": 1023 }, { "epoch": 9.14, "grad_norm": 0.08054569363594055, "learning_rate": 0.0002692941176470588, "loss": 0.0549, "step": 1024 }, { "epoch": 9.15, "grad_norm": 0.09806215763092041, "learning_rate": 0.00026923529411764706, "loss": 0.0507, "step": 1025 }, { "epoch": 9.16, "grad_norm": 0.10202576220035553, "learning_rate": 0.00026917647058823525, "loss": 0.0531, "step": 1026 }, { "epoch": 9.17, "grad_norm": 0.15359221398830414, "learning_rate": 0.0002691176470588235, "loss": 0.0692, "step": 1027 }, { "epoch": 9.18, "grad_norm": 0.11079756170511246, "learning_rate": 0.00026905882352941174, "loss": 0.0526, "step": 1028 }, { "epoch": 9.19, "grad_norm": 0.08722373843193054, "learning_rate": 0.000269, "loss": 0.0507, "step": 1029 }, { "epoch": 9.2, "grad_norm": 0.09620029479265213, "learning_rate": 0.0002689411764705882, "loss": 0.0533, "step": 1030 }, { "epoch": 9.21, "grad_norm": 0.12046362459659576, "learning_rate": 0.0002688823529411764, "loss": 0.0549, "step": 1031 }, { "epoch": 9.21, "grad_norm": 0.10825692117214203, "learning_rate": 0.0002688235294117647, "loss": 0.0513, "step": 1032 }, { "epoch": 9.22, "grad_norm": 0.14859241247177124, "learning_rate": 0.0002687647058823529, "loss": 0.0663, "step": 1033 }, { "epoch": 9.23, "grad_norm": 0.13949048519134521, "learning_rate": 0.00026870588235294114, "loss": 0.0593, "step": 1034 }, { "epoch": 9.24, "grad_norm": 0.08228960633277893, "learning_rate": 0.0002686470588235294, "loss": 0.0503, "step": 1035 }, { "epoch": 9.25, "grad_norm": 0.07926836609840393, "learning_rate": 0.00026858823529411763, "loss": 0.0546, "step": 1036 }, { "epoch": 9.26, "grad_norm": 0.09826277196407318, "learning_rate": 0.00026852941176470587, "loss": 0.0516, "step": 1037 }, { "epoch": 9.27, "grad_norm": 0.09743984788656235, "learning_rate": 0.0002684705882352941, "loss": 0.0514, "step": 1038 }, { "epoch": 9.28, "grad_norm": 0.12605629861354828, "learning_rate": 0.0002684117647058823, "loss": 0.0618, "step": 1039 }, { "epoch": 9.29, "grad_norm": 0.14350754022598267, "learning_rate": 0.00026835294117647055, "loss": 0.0692, "step": 1040 }, { "epoch": 9.29, "grad_norm": 0.10322259366512299, "learning_rate": 0.0002682941176470588, "loss": 0.0553, "step": 1041 }, { "epoch": 9.3, "grad_norm": 0.10587611794471741, "learning_rate": 0.00026823529411764704, "loss": 0.0534, "step": 1042 }, { "epoch": 9.31, "grad_norm": 0.09072357416152954, "learning_rate": 0.0002681764705882353, "loss": 0.0497, "step": 1043 }, { "epoch": 9.32, "grad_norm": 0.0994672104716301, "learning_rate": 0.00026811764705882347, "loss": 0.0501, "step": 1044 }, { "epoch": 9.33, "grad_norm": 0.1119416207075119, "learning_rate": 0.00026805882352941177, "loss": 0.0542, "step": 1045 }, { "epoch": 9.34, "grad_norm": 0.1785297393798828, "learning_rate": 0.00026799999999999995, "loss": 0.0716, "step": 1046 }, { "epoch": 9.35, "grad_norm": 0.08592676371335983, "learning_rate": 0.00026794117647058825, "loss": 0.0537, "step": 1047 }, { "epoch": 9.36, "grad_norm": 0.08539824932813644, "learning_rate": 0.00026788235294117644, "loss": 0.0523, "step": 1048 }, { "epoch": 9.37, "grad_norm": 0.08961360156536102, "learning_rate": 0.0002678235294117647, "loss": 0.05, "step": 1049 }, { "epoch": 9.38, "grad_norm": 0.10039302706718445, "learning_rate": 0.00026776470588235293, "loss": 0.0521, "step": 1050 }, { "epoch": 9.38, "grad_norm": 0.09265781193971634, "learning_rate": 0.00026770588235294117, "loss": 0.0495, "step": 1051 }, { "epoch": 9.39, "grad_norm": 0.11948375403881073, "learning_rate": 0.0002676470588235294, "loss": 0.0617, "step": 1052 }, { "epoch": 9.4, "grad_norm": 0.10758794844150543, "learning_rate": 0.0002675882352941176, "loss": 0.0583, "step": 1053 }, { "epoch": 9.41, "grad_norm": 0.10403919965028763, "learning_rate": 0.00026752941176470585, "loss": 0.0504, "step": 1054 }, { "epoch": 9.42, "grad_norm": 0.08936557173728943, "learning_rate": 0.0002674705882352941, "loss": 0.0488, "step": 1055 }, { "epoch": 9.43, "grad_norm": 0.09061574190855026, "learning_rate": 0.00026741176470588233, "loss": 0.0477, "step": 1056 }, { "epoch": 9.44, "grad_norm": 0.1119331419467926, "learning_rate": 0.0002673529411764706, "loss": 0.0562, "step": 1057 }, { "epoch": 9.45, "grad_norm": 0.19698235392570496, "learning_rate": 0.0002672941176470588, "loss": 0.078, "step": 1058 }, { "epoch": 9.46, "grad_norm": 0.07684513926506042, "learning_rate": 0.000267235294117647, "loss": 0.0527, "step": 1059 }, { "epoch": 9.46, "grad_norm": 0.08601843565702438, "learning_rate": 0.00026717647058823525, "loss": 0.0489, "step": 1060 }, { "epoch": 9.47, "grad_norm": 0.09158769994974136, "learning_rate": 0.0002671176470588235, "loss": 0.051, "step": 1061 }, { "epoch": 9.48, "grad_norm": 0.08711161464452744, "learning_rate": 0.00026705882352941174, "loss": 0.0511, "step": 1062 }, { "epoch": 9.49, "grad_norm": 0.09953027963638306, "learning_rate": 0.000267, "loss": 0.0522, "step": 1063 }, { "epoch": 9.5, "grad_norm": 0.12099027633666992, "learning_rate": 0.00026694117647058823, "loss": 0.0565, "step": 1064 }, { "epoch": 9.51, "grad_norm": 0.11144962161779404, "learning_rate": 0.00026688235294117647, "loss": 0.0611, "step": 1065 }, { "epoch": 9.52, "grad_norm": 0.08424019068479538, "learning_rate": 0.00026682352941176466, "loss": 0.0531, "step": 1066 }, { "epoch": 9.53, "grad_norm": 0.10561881214380264, "learning_rate": 0.0002667647058823529, "loss": 0.0562, "step": 1067 }, { "epoch": 9.54, "grad_norm": 0.07985714077949524, "learning_rate": 0.00026670588235294115, "loss": 0.0502, "step": 1068 }, { "epoch": 9.54, "grad_norm": 0.09215716272592545, "learning_rate": 0.0002666470588235294, "loss": 0.058, "step": 1069 }, { "epoch": 9.55, "grad_norm": 0.0968819111585617, "learning_rate": 0.00026658823529411763, "loss": 0.0486, "step": 1070 }, { "epoch": 9.56, "grad_norm": 0.14727649092674255, "learning_rate": 0.0002665294117647059, "loss": 0.0675, "step": 1071 }, { "epoch": 9.57, "grad_norm": 0.08814878016710281, "learning_rate": 0.00026647058823529407, "loss": 0.0487, "step": 1072 }, { "epoch": 9.58, "grad_norm": 0.09155187755823135, "learning_rate": 0.0002664117647058823, "loss": 0.0536, "step": 1073 }, { "epoch": 9.59, "grad_norm": 0.08814623206853867, "learning_rate": 0.00026635294117647055, "loss": 0.051, "step": 1074 }, { "epoch": 9.6, "grad_norm": 0.08754730969667435, "learning_rate": 0.0002662941176470588, "loss": 0.048, "step": 1075 }, { "epoch": 9.61, "grad_norm": 0.1041862815618515, "learning_rate": 0.00026623529411764704, "loss": 0.0523, "step": 1076 }, { "epoch": 9.62, "grad_norm": 0.1607969105243683, "learning_rate": 0.0002661764705882353, "loss": 0.0684, "step": 1077 }, { "epoch": 9.62, "grad_norm": 0.08164142072200775, "learning_rate": 0.00026611764705882353, "loss": 0.053, "step": 1078 }, { "epoch": 9.63, "grad_norm": 0.1007528230547905, "learning_rate": 0.0002660588235294117, "loss": 0.0553, "step": 1079 }, { "epoch": 9.64, "grad_norm": 0.0847950130701065, "learning_rate": 0.000266, "loss": 0.0493, "step": 1080 }, { "epoch": 9.65, "grad_norm": 0.08259861171245575, "learning_rate": 0.0002659411764705882, "loss": 0.0518, "step": 1081 }, { "epoch": 9.66, "grad_norm": 0.09309901297092438, "learning_rate": 0.00026588235294117645, "loss": 0.0542, "step": 1082 }, { "epoch": 9.67, "grad_norm": 0.13553769886493683, "learning_rate": 0.0002658235294117647, "loss": 0.0612, "step": 1083 }, { "epoch": 9.68, "grad_norm": 0.08014171570539474, "learning_rate": 0.00026576470588235293, "loss": 0.0534, "step": 1084 }, { "epoch": 9.69, "grad_norm": 0.08530393987894058, "learning_rate": 0.0002657058823529412, "loss": 0.0506, "step": 1085 }, { "epoch": 9.7, "grad_norm": 0.08911828696727753, "learning_rate": 0.00026564705882352937, "loss": 0.0534, "step": 1086 }, { "epoch": 9.71, "grad_norm": 0.0889921635389328, "learning_rate": 0.0002655882352941176, "loss": 0.0578, "step": 1087 }, { "epoch": 9.71, "grad_norm": 0.09011814743280411, "learning_rate": 0.00026552941176470585, "loss": 0.046, "step": 1088 }, { "epoch": 9.72, "grad_norm": 0.11802621185779572, "learning_rate": 0.0002654705882352941, "loss": 0.0532, "step": 1089 }, { "epoch": 9.73, "grad_norm": 0.10768546909093857, "learning_rate": 0.00026541176470588234, "loss": 0.0606, "step": 1090 }, { "epoch": 9.74, "grad_norm": 0.0835331603884697, "learning_rate": 0.0002653529411764706, "loss": 0.0553, "step": 1091 }, { "epoch": 9.75, "grad_norm": 0.08594707399606705, "learning_rate": 0.0002652941176470588, "loss": 0.0506, "step": 1092 }, { "epoch": 9.76, "grad_norm": 0.07250364124774933, "learning_rate": 0.00026523529411764707, "loss": 0.0477, "step": 1093 }, { "epoch": 9.77, "grad_norm": 0.09176357835531235, "learning_rate": 0.00026517647058823526, "loss": 0.0534, "step": 1094 }, { "epoch": 9.78, "grad_norm": 0.10806913673877716, "learning_rate": 0.0002651176470588235, "loss": 0.0558, "step": 1095 }, { "epoch": 9.79, "grad_norm": 0.1502862423658371, "learning_rate": 0.00026505882352941175, "loss": 0.0693, "step": 1096 }, { "epoch": 9.79, "grad_norm": 0.07961053401231766, "learning_rate": 0.000265, "loss": 0.0505, "step": 1097 }, { "epoch": 9.8, "grad_norm": 0.08200050890445709, "learning_rate": 0.00026494117647058823, "loss": 0.0571, "step": 1098 }, { "epoch": 9.81, "grad_norm": 0.07281451672315598, "learning_rate": 0.0002648823529411764, "loss": 0.0523, "step": 1099 }, { "epoch": 9.82, "grad_norm": 0.0782695934176445, "learning_rate": 0.00026482352941176467, "loss": 0.0517, "step": 1100 }, { "epoch": 9.82, "eval_cer": 0.037222867408568325, "eval_loss": 0.16769397258758545, "eval_runtime": 22.665, "eval_samples_per_second": 116.567, "eval_steps_per_second": 1.853, "eval_wer": 0.12554541848472828, "step": 1100 }, { "epoch": 9.83, "grad_norm": 0.08382805436849594, "learning_rate": 0.0002647647058823529, "loss": 0.0457, "step": 1101 }, { "epoch": 9.84, "grad_norm": 0.14687584340572357, "learning_rate": 0.00026470588235294115, "loss": 0.064, "step": 1102 }, { "epoch": 9.85, "grad_norm": 0.08547946810722351, "learning_rate": 0.0002646470588235294, "loss": 0.0513, "step": 1103 }, { "epoch": 9.86, "grad_norm": 0.08478090167045593, "learning_rate": 0.00026458823529411764, "loss": 0.0551, "step": 1104 }, { "epoch": 9.87, "grad_norm": 0.08604072779417038, "learning_rate": 0.00026452941176470583, "loss": 0.0534, "step": 1105 }, { "epoch": 9.88, "grad_norm": 0.08539281785488129, "learning_rate": 0.0002644705882352941, "loss": 0.0553, "step": 1106 }, { "epoch": 9.88, "grad_norm": 0.08788000047206879, "learning_rate": 0.0002644117647058823, "loss": 0.0427, "step": 1107 }, { "epoch": 9.89, "grad_norm": 0.13512328267097473, "learning_rate": 0.00026435294117647056, "loss": 0.0618, "step": 1108 }, { "epoch": 9.9, "grad_norm": 0.07933490723371506, "learning_rate": 0.0002642941176470588, "loss": 0.0589, "step": 1109 }, { "epoch": 9.91, "grad_norm": 0.0664326548576355, "learning_rate": 0.00026423529411764705, "loss": 0.0473, "step": 1110 }, { "epoch": 9.92, "grad_norm": 0.07653143256902695, "learning_rate": 0.0002641764705882353, "loss": 0.0494, "step": 1111 }, { "epoch": 9.93, "grad_norm": 0.08729689568281174, "learning_rate": 0.0002641176470588235, "loss": 0.0507, "step": 1112 }, { "epoch": 9.94, "grad_norm": 0.0986635610461235, "learning_rate": 0.0002640588235294118, "loss": 0.0612, "step": 1113 }, { "epoch": 9.95, "grad_norm": 0.11171715706586838, "learning_rate": 0.00026399999999999997, "loss": 0.0602, "step": 1114 }, { "epoch": 9.96, "grad_norm": 0.12350744754076004, "learning_rate": 0.0002639411764705882, "loss": 0.0624, "step": 1115 }, { "epoch": 9.96, "grad_norm": 0.07649248093366623, "learning_rate": 0.00026388235294117645, "loss": 0.0545, "step": 1116 }, { "epoch": 9.97, "grad_norm": 0.0775519609451294, "learning_rate": 0.0002638235294117647, "loss": 0.0448, "step": 1117 }, { "epoch": 9.98, "grad_norm": 0.08271569758653641, "learning_rate": 0.00026376470588235294, "loss": 0.0562, "step": 1118 }, { "epoch": 9.99, "grad_norm": 0.10066267102956772, "learning_rate": 0.00026370588235294113, "loss": 0.05, "step": 1119 }, { "epoch": 10.0, "grad_norm": 0.11251965910196304, "learning_rate": 0.00026364705882352937, "loss": 0.0498, "step": 1120 }, { "epoch": 10.01, "grad_norm": 0.08379779756069183, "learning_rate": 0.0002635882352941176, "loss": 0.0485, "step": 1121 }, { "epoch": 10.02, "grad_norm": 0.08651068806648254, "learning_rate": 0.00026352941176470586, "loss": 0.0461, "step": 1122 }, { "epoch": 10.03, "grad_norm": 0.08629054576158524, "learning_rate": 0.0002634705882352941, "loss": 0.0416, "step": 1123 }, { "epoch": 10.04, "grad_norm": 0.08721557259559631, "learning_rate": 0.00026341176470588235, "loss": 0.0491, "step": 1124 }, { "epoch": 10.04, "grad_norm": 0.0717180147767067, "learning_rate": 0.00026335294117647054, "loss": 0.0443, "step": 1125 }, { "epoch": 10.05, "grad_norm": 0.131173238158226, "learning_rate": 0.00026329411764705883, "loss": 0.0584, "step": 1126 }, { "epoch": 10.06, "grad_norm": 0.12688426673412323, "learning_rate": 0.000263235294117647, "loss": 0.0564, "step": 1127 }, { "epoch": 10.07, "grad_norm": 0.0769183412194252, "learning_rate": 0.00026317647058823527, "loss": 0.0487, "step": 1128 }, { "epoch": 10.08, "grad_norm": 0.08034687489271164, "learning_rate": 0.0002631176470588235, "loss": 0.0427, "step": 1129 }, { "epoch": 10.09, "grad_norm": 0.09149681776762009, "learning_rate": 0.00026305882352941175, "loss": 0.0442, "step": 1130 }, { "epoch": 10.1, "grad_norm": 0.07682140916585922, "learning_rate": 0.000263, "loss": 0.0405, "step": 1131 }, { "epoch": 10.11, "grad_norm": 0.09561823308467865, "learning_rate": 0.0002629411764705882, "loss": 0.0408, "step": 1132 }, { "epoch": 10.12, "grad_norm": 0.13112470507621765, "learning_rate": 0.00026288235294117643, "loss": 0.0586, "step": 1133 }, { "epoch": 10.12, "grad_norm": 0.081934355199337, "learning_rate": 0.00026282352941176467, "loss": 0.0436, "step": 1134 }, { "epoch": 10.13, "grad_norm": 0.08208665251731873, "learning_rate": 0.0002627647058823529, "loss": 0.0418, "step": 1135 }, { "epoch": 10.14, "grad_norm": 0.08310721814632416, "learning_rate": 0.00026270588235294116, "loss": 0.0474, "step": 1136 }, { "epoch": 10.15, "grad_norm": 0.07716703414916992, "learning_rate": 0.0002626470588235294, "loss": 0.042, "step": 1137 }, { "epoch": 10.16, "grad_norm": 0.08447854965925217, "learning_rate": 0.0002625882352941176, "loss": 0.0465, "step": 1138 }, { "epoch": 10.17, "grad_norm": 0.1743331402540207, "learning_rate": 0.0002625294117647059, "loss": 0.0682, "step": 1139 }, { "epoch": 10.18, "grad_norm": 0.08664188534021378, "learning_rate": 0.0002624705882352941, "loss": 0.0433, "step": 1140 }, { "epoch": 10.19, "grad_norm": 0.0768757164478302, "learning_rate": 0.0002624117647058823, "loss": 0.047, "step": 1141 }, { "epoch": 10.2, "grad_norm": 0.07780515402555466, "learning_rate": 0.00026235294117647057, "loss": 0.045, "step": 1142 }, { "epoch": 10.21, "grad_norm": 0.09276898950338364, "learning_rate": 0.0002622941176470588, "loss": 0.0437, "step": 1143 }, { "epoch": 10.21, "grad_norm": 0.08791891485452652, "learning_rate": 0.00026223529411764705, "loss": 0.0421, "step": 1144 }, { "epoch": 10.22, "grad_norm": 0.1696498543024063, "learning_rate": 0.00026217647058823524, "loss": 0.0631, "step": 1145 }, { "epoch": 10.23, "grad_norm": 0.08368546515703201, "learning_rate": 0.00026211764705882354, "loss": 0.0446, "step": 1146 }, { "epoch": 10.24, "grad_norm": 0.0758795365691185, "learning_rate": 0.00026205882352941173, "loss": 0.0454, "step": 1147 }, { "epoch": 10.25, "grad_norm": 0.08984597027301788, "learning_rate": 0.00026199999999999997, "loss": 0.0521, "step": 1148 }, { "epoch": 10.26, "grad_norm": 0.08602012693881989, "learning_rate": 0.0002619411764705882, "loss": 0.0505, "step": 1149 }, { "epoch": 10.27, "grad_norm": 0.08585749566555023, "learning_rate": 0.00026188235294117646, "loss": 0.0436, "step": 1150 }, { "epoch": 10.28, "grad_norm": 0.11137910187244415, "learning_rate": 0.0002618235294117647, "loss": 0.0528, "step": 1151 }, { "epoch": 10.29, "grad_norm": 0.13748031854629517, "learning_rate": 0.00026176470588235295, "loss": 0.0627, "step": 1152 }, { "epoch": 10.29, "grad_norm": 0.08785070478916168, "learning_rate": 0.00026170588235294113, "loss": 0.0528, "step": 1153 }, { "epoch": 10.3, "grad_norm": 0.08275540918111801, "learning_rate": 0.0002616470588235294, "loss": 0.0464, "step": 1154 }, { "epoch": 10.31, "grad_norm": 0.080641008913517, "learning_rate": 0.0002615882352941176, "loss": 0.0436, "step": 1155 }, { "epoch": 10.32, "grad_norm": 0.07808531075716019, "learning_rate": 0.00026152941176470586, "loss": 0.0428, "step": 1156 }, { "epoch": 10.33, "grad_norm": 0.0999738797545433, "learning_rate": 0.0002614705882352941, "loss": 0.0483, "step": 1157 }, { "epoch": 10.34, "grad_norm": 0.13058152794837952, "learning_rate": 0.0002614117647058823, "loss": 0.0681, "step": 1158 }, { "epoch": 10.35, "grad_norm": 0.07405345141887665, "learning_rate": 0.0002613529411764706, "loss": 0.0453, "step": 1159 }, { "epoch": 10.36, "grad_norm": 0.07965939491987228, "learning_rate": 0.0002612941176470588, "loss": 0.0475, "step": 1160 }, { "epoch": 10.37, "grad_norm": 0.08219412714242935, "learning_rate": 0.00026123529411764703, "loss": 0.0457, "step": 1161 }, { "epoch": 10.38, "grad_norm": 0.07677296549081802, "learning_rate": 0.00026117647058823527, "loss": 0.0396, "step": 1162 }, { "epoch": 10.38, "grad_norm": 0.09775242209434509, "learning_rate": 0.0002611176470588235, "loss": 0.0557, "step": 1163 }, { "epoch": 10.39, "grad_norm": 0.12974615395069122, "learning_rate": 0.00026105882352941176, "loss": 0.056, "step": 1164 }, { "epoch": 10.4, "grad_norm": 0.07911017537117004, "learning_rate": 0.000261, "loss": 0.0476, "step": 1165 }, { "epoch": 10.41, "grad_norm": 0.08935990184545517, "learning_rate": 0.0002609411764705882, "loss": 0.0459, "step": 1166 }, { "epoch": 10.42, "grad_norm": 0.09498455375432968, "learning_rate": 0.00026088235294117643, "loss": 0.047, "step": 1167 }, { "epoch": 10.43, "grad_norm": 0.07545097917318344, "learning_rate": 0.0002608235294117647, "loss": 0.0451, "step": 1168 }, { "epoch": 10.44, "grad_norm": 0.08683010190725327, "learning_rate": 0.0002607647058823529, "loss": 0.0483, "step": 1169 }, { "epoch": 10.45, "grad_norm": 0.17454423010349274, "learning_rate": 0.00026070588235294116, "loss": 0.0663, "step": 1170 }, { "epoch": 10.46, "grad_norm": 0.08547163009643555, "learning_rate": 0.00026064705882352935, "loss": 0.0475, "step": 1171 }, { "epoch": 10.46, "grad_norm": 0.09390612691640854, "learning_rate": 0.00026058823529411765, "loss": 0.0544, "step": 1172 }, { "epoch": 10.47, "grad_norm": 0.08010204136371613, "learning_rate": 0.00026052941176470584, "loss": 0.0416, "step": 1173 }, { "epoch": 10.48, "grad_norm": 0.0884295329451561, "learning_rate": 0.0002604705882352941, "loss": 0.0484, "step": 1174 }, { "epoch": 10.49, "grad_norm": 0.10123898088932037, "learning_rate": 0.00026041176470588233, "loss": 0.0426, "step": 1175 }, { "epoch": 10.5, "grad_norm": 0.11035466194152832, "learning_rate": 0.00026035294117647057, "loss": 0.0535, "step": 1176 }, { "epoch": 10.51, "grad_norm": 0.10585707426071167, "learning_rate": 0.0002602941176470588, "loss": 0.0598, "step": 1177 }, { "epoch": 10.52, "grad_norm": 0.08312973380088806, "learning_rate": 0.00026023529411764706, "loss": 0.0439, "step": 1178 }, { "epoch": 10.53, "grad_norm": 0.07807246595621109, "learning_rate": 0.0002601764705882353, "loss": 0.041, "step": 1179 }, { "epoch": 10.54, "grad_norm": 0.07961349189281464, "learning_rate": 0.0002601176470588235, "loss": 0.0417, "step": 1180 }, { "epoch": 10.54, "grad_norm": 0.08511442691087723, "learning_rate": 0.00026005882352941173, "loss": 0.049, "step": 1181 }, { "epoch": 10.55, "grad_norm": 0.09853603690862656, "learning_rate": 0.00026, "loss": 0.0516, "step": 1182 }, { "epoch": 10.56, "grad_norm": 0.1274108737707138, "learning_rate": 0.0002599411764705882, "loss": 0.0561, "step": 1183 }, { "epoch": 10.57, "grad_norm": 0.0804334208369255, "learning_rate": 0.00025988235294117646, "loss": 0.0442, "step": 1184 }, { "epoch": 10.58, "grad_norm": 0.08435020595788956, "learning_rate": 0.0002598235294117647, "loss": 0.0426, "step": 1185 }, { "epoch": 10.59, "grad_norm": 0.08223892748355865, "learning_rate": 0.0002597647058823529, "loss": 0.0491, "step": 1186 }, { "epoch": 10.6, "grad_norm": 0.07661667466163635, "learning_rate": 0.00025970588235294114, "loss": 0.0485, "step": 1187 }, { "epoch": 10.61, "grad_norm": 0.09493746608495712, "learning_rate": 0.0002596470588235294, "loss": 0.0517, "step": 1188 }, { "epoch": 10.62, "grad_norm": 0.13816070556640625, "learning_rate": 0.0002595882352941176, "loss": 0.0591, "step": 1189 }, { "epoch": 10.62, "grad_norm": 0.07286964356899261, "learning_rate": 0.00025952941176470587, "loss": 0.0465, "step": 1190 }, { "epoch": 10.63, "grad_norm": 0.07412020862102509, "learning_rate": 0.00025947058823529406, "loss": 0.049, "step": 1191 }, { "epoch": 10.64, "grad_norm": 0.0823870301246643, "learning_rate": 0.00025941176470588236, "loss": 0.0504, "step": 1192 }, { "epoch": 10.65, "grad_norm": 0.07828681915998459, "learning_rate": 0.00025935294117647055, "loss": 0.0495, "step": 1193 }, { "epoch": 10.66, "grad_norm": 0.08328825980424881, "learning_rate": 0.0002592941176470588, "loss": 0.0439, "step": 1194 }, { "epoch": 10.67, "grad_norm": 0.1659298837184906, "learning_rate": 0.00025923529411764703, "loss": 0.0686, "step": 1195 }, { "epoch": 10.68, "grad_norm": 0.08040114492177963, "learning_rate": 0.0002591764705882353, "loss": 0.0522, "step": 1196 }, { "epoch": 10.69, "grad_norm": 0.07643823325634003, "learning_rate": 0.0002591176470588235, "loss": 0.0459, "step": 1197 }, { "epoch": 10.7, "grad_norm": 0.08668006211519241, "learning_rate": 0.00025905882352941176, "loss": 0.0501, "step": 1198 }, { "epoch": 10.71, "grad_norm": 0.07934591919183731, "learning_rate": 0.00025899999999999995, "loss": 0.0508, "step": 1199 }, { "epoch": 10.71, "grad_norm": 0.08404715359210968, "learning_rate": 0.0002589411764705882, "loss": 0.0413, "step": 1200 }, { "epoch": 10.71, "eval_cer": 0.036076565453841615, "eval_loss": 0.1771070957183838, "eval_runtime": 22.5753, "eval_samples_per_second": 117.031, "eval_steps_per_second": 1.86, "eval_wer": 0.12336374454581515, "step": 1200 }, { "epoch": 10.72, "grad_norm": 0.12253329157829285, "learning_rate": 0.00025888235294117644, "loss": 0.063, "step": 1201 }, { "epoch": 10.73, "grad_norm": 0.16100212931632996, "learning_rate": 0.0002588235294117647, "loss": 0.0538, "step": 1202 }, { "epoch": 10.74, "grad_norm": 0.08247417956590652, "learning_rate": 0.0002587647058823529, "loss": 0.0509, "step": 1203 }, { "epoch": 10.75, "grad_norm": 0.07974733412265778, "learning_rate": 0.0002587058823529411, "loss": 0.0464, "step": 1204 }, { "epoch": 10.76, "grad_norm": 0.07784251123666763, "learning_rate": 0.0002586470588235294, "loss": 0.044, "step": 1205 }, { "epoch": 10.77, "grad_norm": 0.08174080401659012, "learning_rate": 0.0002585882352941176, "loss": 0.0428, "step": 1206 }, { "epoch": 10.78, "grad_norm": 0.10989406704902649, "learning_rate": 0.0002585294117647059, "loss": 0.0502, "step": 1207 }, { "epoch": 10.79, "grad_norm": 0.14650195837020874, "learning_rate": 0.0002584705882352941, "loss": 0.0724, "step": 1208 }, { "epoch": 10.79, "grad_norm": 0.07386109977960587, "learning_rate": 0.00025841176470588233, "loss": 0.0496, "step": 1209 }, { "epoch": 10.8, "grad_norm": 0.08304432779550552, "learning_rate": 0.0002583529411764706, "loss": 0.0527, "step": 1210 }, { "epoch": 10.81, "grad_norm": 0.08041701465845108, "learning_rate": 0.0002582941176470588, "loss": 0.0457, "step": 1211 }, { "epoch": 10.82, "grad_norm": 0.08432194590568542, "learning_rate": 0.00025823529411764706, "loss": 0.0459, "step": 1212 }, { "epoch": 10.83, "grad_norm": 0.07892563939094543, "learning_rate": 0.00025817647058823525, "loss": 0.0399, "step": 1213 }, { "epoch": 10.84, "grad_norm": 0.15423566102981567, "learning_rate": 0.0002581176470588235, "loss": 0.0747, "step": 1214 }, { "epoch": 10.85, "grad_norm": 0.0816345140337944, "learning_rate": 0.00025805882352941174, "loss": 0.0502, "step": 1215 }, { "epoch": 10.86, "grad_norm": 0.07691647857427597, "learning_rate": 0.000258, "loss": 0.0472, "step": 1216 }, { "epoch": 10.87, "grad_norm": 0.08117283135652542, "learning_rate": 0.0002579411764705882, "loss": 0.0437, "step": 1217 }, { "epoch": 10.88, "grad_norm": 0.09006890654563904, "learning_rate": 0.00025788235294117647, "loss": 0.0438, "step": 1218 }, { "epoch": 10.88, "grad_norm": 0.08083365112543106, "learning_rate": 0.00025782352941176466, "loss": 0.0518, "step": 1219 }, { "epoch": 10.89, "grad_norm": 0.11542165279388428, "learning_rate": 0.0002577647058823529, "loss": 0.0543, "step": 1220 }, { "epoch": 10.9, "grad_norm": 0.10748816281557083, "learning_rate": 0.00025770588235294115, "loss": 0.0562, "step": 1221 }, { "epoch": 10.91, "grad_norm": 0.08316796272993088, "learning_rate": 0.0002576470588235294, "loss": 0.049, "step": 1222 }, { "epoch": 10.92, "grad_norm": 0.09037583321332932, "learning_rate": 0.00025758823529411763, "loss": 0.0502, "step": 1223 }, { "epoch": 10.93, "grad_norm": 0.10377881675958633, "learning_rate": 0.0002575294117647059, "loss": 0.0494, "step": 1224 }, { "epoch": 10.94, "grad_norm": 0.08607975393533707, "learning_rate": 0.0002574705882352941, "loss": 0.0505, "step": 1225 }, { "epoch": 10.95, "grad_norm": 0.10056962072849274, "learning_rate": 0.0002574117647058823, "loss": 0.0499, "step": 1226 }, { "epoch": 10.96, "grad_norm": 0.1339590847492218, "learning_rate": 0.00025735294117647055, "loss": 0.0637, "step": 1227 }, { "epoch": 10.96, "grad_norm": 0.07969089597463608, "learning_rate": 0.0002572941176470588, "loss": 0.0464, "step": 1228 }, { "epoch": 10.97, "grad_norm": 0.08407177776098251, "learning_rate": 0.00025723529411764704, "loss": 0.0479, "step": 1229 }, { "epoch": 10.98, "grad_norm": 0.12283366918563843, "learning_rate": 0.0002571764705882353, "loss": 0.0509, "step": 1230 }, { "epoch": 10.99, "grad_norm": 0.12030702829360962, "learning_rate": 0.0002571176470588235, "loss": 0.0492, "step": 1231 }, { "epoch": 11.0, "grad_norm": 0.09268844872713089, "learning_rate": 0.0002570588235294117, "loss": 0.0451, "step": 1232 }, { "epoch": 11.01, "grad_norm": 0.1249970942735672, "learning_rate": 0.00025699999999999996, "loss": 0.045, "step": 1233 }, { "epoch": 11.02, "grad_norm": 0.10564065724611282, "learning_rate": 0.0002569411764705882, "loss": 0.0414, "step": 1234 }, { "epoch": 11.03, "grad_norm": 0.06608693301677704, "learning_rate": 0.00025688235294117645, "loss": 0.0396, "step": 1235 }, { "epoch": 11.04, "grad_norm": 0.09664716571569443, "learning_rate": 0.0002568235294117647, "loss": 0.0399, "step": 1236 }, { "epoch": 11.04, "grad_norm": 0.12408290803432465, "learning_rate": 0.00025676470588235293, "loss": 0.0468, "step": 1237 }, { "epoch": 11.05, "grad_norm": 0.11983111500740051, "learning_rate": 0.0002567058823529412, "loss": 0.0457, "step": 1238 }, { "epoch": 11.06, "grad_norm": 0.10642583668231964, "learning_rate": 0.00025664705882352936, "loss": 0.0505, "step": 1239 }, { "epoch": 11.07, "grad_norm": 0.10309722274541855, "learning_rate": 0.00025658823529411766, "loss": 0.0462, "step": 1240 }, { "epoch": 11.08, "grad_norm": 0.11561737209558487, "learning_rate": 0.00025652941176470585, "loss": 0.048, "step": 1241 }, { "epoch": 11.09, "grad_norm": 0.0772339478135109, "learning_rate": 0.0002564705882352941, "loss": 0.0423, "step": 1242 }, { "epoch": 11.1, "grad_norm": 0.08576243370771408, "learning_rate": 0.00025641176470588234, "loss": 0.0464, "step": 1243 }, { "epoch": 11.11, "grad_norm": 0.10407901555299759, "learning_rate": 0.0002563529411764706, "loss": 0.0444, "step": 1244 }, { "epoch": 11.12, "grad_norm": 0.22960324585437775, "learning_rate": 0.0002562941176470588, "loss": 0.0631, "step": 1245 }, { "epoch": 11.12, "grad_norm": 0.06789670139551163, "learning_rate": 0.000256235294117647, "loss": 0.0453, "step": 1246 }, { "epoch": 11.13, "grad_norm": 0.08458477258682251, "learning_rate": 0.00025617647058823526, "loss": 0.0435, "step": 1247 }, { "epoch": 11.14, "grad_norm": 0.09180407226085663, "learning_rate": 0.0002561176470588235, "loss": 0.0473, "step": 1248 }, { "epoch": 11.15, "grad_norm": 0.07602664083242416, "learning_rate": 0.00025605882352941174, "loss": 0.0395, "step": 1249 }, { "epoch": 11.16, "grad_norm": 0.09161707758903503, "learning_rate": 0.000256, "loss": 0.0413, "step": 1250 }, { "epoch": 11.17, "grad_norm": 0.28046804666519165, "learning_rate": 0.00025594117647058823, "loss": 0.0724, "step": 1251 }, { "epoch": 11.18, "grad_norm": 0.07246720790863037, "learning_rate": 0.0002558823529411764, "loss": 0.0433, "step": 1252 }, { "epoch": 11.19, "grad_norm": 0.07940445095300674, "learning_rate": 0.0002558235294117647, "loss": 0.0445, "step": 1253 }, { "epoch": 11.2, "grad_norm": 0.07803329825401306, "learning_rate": 0.0002557647058823529, "loss": 0.0375, "step": 1254 }, { "epoch": 11.21, "grad_norm": 0.07922541350126266, "learning_rate": 0.00025570588235294115, "loss": 0.0487, "step": 1255 }, { "epoch": 11.21, "grad_norm": 0.09134754538536072, "learning_rate": 0.0002556470588235294, "loss": 0.047, "step": 1256 }, { "epoch": 11.22, "grad_norm": 0.1504497528076172, "learning_rate": 0.00025558823529411764, "loss": 0.0599, "step": 1257 }, { "epoch": 11.23, "grad_norm": 0.07694713026285172, "learning_rate": 0.0002555294117647059, "loss": 0.0469, "step": 1258 }, { "epoch": 11.24, "grad_norm": 0.07742314040660858, "learning_rate": 0.00025547058823529407, "loss": 0.0426, "step": 1259 }, { "epoch": 11.25, "grad_norm": 0.07481490820646286, "learning_rate": 0.0002554117647058823, "loss": 0.043, "step": 1260 }, { "epoch": 11.26, "grad_norm": 0.08957730233669281, "learning_rate": 0.00025535294117647056, "loss": 0.0443, "step": 1261 }, { "epoch": 11.27, "grad_norm": 0.0939517617225647, "learning_rate": 0.0002552941176470588, "loss": 0.0516, "step": 1262 }, { "epoch": 11.28, "grad_norm": 0.12942364811897278, "learning_rate": 0.00025523529411764704, "loss": 0.0492, "step": 1263 }, { "epoch": 11.29, "grad_norm": 0.16845741868019104, "learning_rate": 0.0002551764705882353, "loss": 0.0649, "step": 1264 }, { "epoch": 11.29, "grad_norm": 0.08106888830661774, "learning_rate": 0.0002551176470588235, "loss": 0.0453, "step": 1265 }, { "epoch": 11.3, "grad_norm": 0.08626700192689896, "learning_rate": 0.0002550588235294118, "loss": 0.0469, "step": 1266 }, { "epoch": 11.31, "grad_norm": 0.07476877421140671, "learning_rate": 0.00025499999999999996, "loss": 0.04, "step": 1267 }, { "epoch": 11.32, "grad_norm": 0.06678514182567596, "learning_rate": 0.0002549411764705882, "loss": 0.0379, "step": 1268 }, { "epoch": 11.33, "grad_norm": 0.09826838225126266, "learning_rate": 0.00025488235294117645, "loss": 0.0526, "step": 1269 }, { "epoch": 11.34, "grad_norm": 0.12593460083007812, "learning_rate": 0.0002548235294117647, "loss": 0.0571, "step": 1270 }, { "epoch": 11.35, "grad_norm": 0.08111362159252167, "learning_rate": 0.00025476470588235294, "loss": 0.0439, "step": 1271 }, { "epoch": 11.36, "grad_norm": 0.08590488880872726, "learning_rate": 0.0002547058823529411, "loss": 0.0481, "step": 1272 }, { "epoch": 11.37, "grad_norm": 0.07635786384344101, "learning_rate": 0.0002546470588235294, "loss": 0.0449, "step": 1273 }, { "epoch": 11.38, "grad_norm": 0.07801787555217743, "learning_rate": 0.0002545882352941176, "loss": 0.0419, "step": 1274 }, { "epoch": 11.38, "grad_norm": 0.07785259187221527, "learning_rate": 0.00025452941176470586, "loss": 0.0401, "step": 1275 }, { "epoch": 11.39, "grad_norm": 0.14835655689239502, "learning_rate": 0.0002544705882352941, "loss": 0.0606, "step": 1276 }, { "epoch": 11.4, "grad_norm": 0.06783640384674072, "learning_rate": 0.00025441176470588234, "loss": 0.0444, "step": 1277 }, { "epoch": 11.41, "grad_norm": 0.07653181999921799, "learning_rate": 0.0002543529411764706, "loss": 0.0432, "step": 1278 }, { "epoch": 11.42, "grad_norm": 0.08609187602996826, "learning_rate": 0.0002542941176470588, "loss": 0.0405, "step": 1279 }, { "epoch": 11.43, "grad_norm": 0.08816090226173401, "learning_rate": 0.000254235294117647, "loss": 0.0442, "step": 1280 }, { "epoch": 11.44, "grad_norm": 0.0865228921175003, "learning_rate": 0.00025417647058823526, "loss": 0.0409, "step": 1281 }, { "epoch": 11.45, "grad_norm": 0.16418685019016266, "learning_rate": 0.0002541176470588235, "loss": 0.0623, "step": 1282 }, { "epoch": 11.46, "grad_norm": 0.07710708677768707, "learning_rate": 0.00025405882352941175, "loss": 0.0429, "step": 1283 }, { "epoch": 11.46, "grad_norm": 0.0758427307009697, "learning_rate": 0.000254, "loss": 0.0457, "step": 1284 }, { "epoch": 11.47, "grad_norm": 0.07040686160326004, "learning_rate": 0.0002539411764705882, "loss": 0.0446, "step": 1285 }, { "epoch": 11.48, "grad_norm": 0.07687915861606598, "learning_rate": 0.0002538823529411765, "loss": 0.0491, "step": 1286 }, { "epoch": 11.49, "grad_norm": 0.07504872232675552, "learning_rate": 0.00025382352941176467, "loss": 0.0455, "step": 1287 }, { "epoch": 11.5, "grad_norm": 0.10116589814424515, "learning_rate": 0.0002537647058823529, "loss": 0.0481, "step": 1288 }, { "epoch": 11.51, "grad_norm": 0.1314215064048767, "learning_rate": 0.00025370588235294116, "loss": 0.0669, "step": 1289 }, { "epoch": 11.52, "grad_norm": 0.0785100981593132, "learning_rate": 0.0002536470588235294, "loss": 0.0442, "step": 1290 }, { "epoch": 11.53, "grad_norm": 0.07649140805006027, "learning_rate": 0.00025358823529411764, "loss": 0.0462, "step": 1291 }, { "epoch": 11.54, "grad_norm": 0.07404986768960953, "learning_rate": 0.00025352941176470583, "loss": 0.0394, "step": 1292 }, { "epoch": 11.54, "grad_norm": 0.08178161829710007, "learning_rate": 0.0002534705882352941, "loss": 0.0442, "step": 1293 }, { "epoch": 11.55, "grad_norm": 0.11859782785177231, "learning_rate": 0.0002534117647058823, "loss": 0.0451, "step": 1294 }, { "epoch": 11.56, "grad_norm": 0.15042392909526825, "learning_rate": 0.00025335294117647056, "loss": 0.0684, "step": 1295 }, { "epoch": 11.57, "grad_norm": 0.08013623207807541, "learning_rate": 0.0002532941176470588, "loss": 0.0392, "step": 1296 }, { "epoch": 11.58, "grad_norm": 0.08386145532131195, "learning_rate": 0.00025323529411764705, "loss": 0.0463, "step": 1297 }, { "epoch": 11.59, "grad_norm": 0.08066698908805847, "learning_rate": 0.00025317647058823524, "loss": 0.0444, "step": 1298 }, { "epoch": 11.6, "grad_norm": 0.08640114963054657, "learning_rate": 0.00025311764705882354, "loss": 0.0486, "step": 1299 }, { "epoch": 11.61, "grad_norm": 0.09660367667675018, "learning_rate": 0.0002530588235294117, "loss": 0.0418, "step": 1300 }, { "epoch": 11.61, "eval_cer": 0.03582475486050821, "eval_loss": 0.17305758595466614, "eval_runtime": 22.1946, "eval_samples_per_second": 119.038, "eval_steps_per_second": 1.892, "eval_wer": 0.12292740975803253, "step": 1300 }, { "epoch": 11.62, "grad_norm": 0.11291833966970444, "learning_rate": 0.00025299999999999997, "loss": 0.0451, "step": 1301 }, { "epoch": 11.62, "grad_norm": 0.08570745587348938, "learning_rate": 0.0002529411764705882, "loss": 0.0484, "step": 1302 }, { "epoch": 11.63, "grad_norm": 0.08498316258192062, "learning_rate": 0.00025288235294117646, "loss": 0.0454, "step": 1303 }, { "epoch": 11.64, "grad_norm": 0.08248021453619003, "learning_rate": 0.0002528235294117647, "loss": 0.0424, "step": 1304 }, { "epoch": 11.65, "grad_norm": 0.08129890263080597, "learning_rate": 0.0002527647058823529, "loss": 0.0428, "step": 1305 }, { "epoch": 11.66, "grad_norm": 0.07866654545068741, "learning_rate": 0.0002527058823529412, "loss": 0.0365, "step": 1306 }, { "epoch": 11.67, "grad_norm": 0.14105282723903656, "learning_rate": 0.0002526470588235294, "loss": 0.064, "step": 1307 }, { "epoch": 11.68, "grad_norm": 0.07599008828401566, "learning_rate": 0.0002525882352941176, "loss": 0.0411, "step": 1308 }, { "epoch": 11.69, "grad_norm": 0.0781768336892128, "learning_rate": 0.00025252941176470586, "loss": 0.0431, "step": 1309 }, { "epoch": 11.7, "grad_norm": 0.07411547005176544, "learning_rate": 0.0002524705882352941, "loss": 0.0436, "step": 1310 }, { "epoch": 11.71, "grad_norm": 0.09701592475175858, "learning_rate": 0.00025241176470588235, "loss": 0.0467, "step": 1311 }, { "epoch": 11.71, "grad_norm": 0.0785735622048378, "learning_rate": 0.0002523529411764706, "loss": 0.0419, "step": 1312 }, { "epoch": 11.72, "grad_norm": 0.13147878646850586, "learning_rate": 0.0002522941176470588, "loss": 0.0587, "step": 1313 }, { "epoch": 11.73, "grad_norm": 0.11371983587741852, "learning_rate": 0.000252235294117647, "loss": 0.0559, "step": 1314 }, { "epoch": 11.74, "grad_norm": 0.07586345821619034, "learning_rate": 0.00025217647058823527, "loss": 0.0484, "step": 1315 }, { "epoch": 11.75, "grad_norm": 0.07440981268882751, "learning_rate": 0.0002521176470588235, "loss": 0.0448, "step": 1316 }, { "epoch": 11.76, "grad_norm": 0.07169202715158463, "learning_rate": 0.00025205882352941176, "loss": 0.04, "step": 1317 }, { "epoch": 11.77, "grad_norm": 0.08172278851270676, "learning_rate": 0.00025199999999999995, "loss": 0.04, "step": 1318 }, { "epoch": 11.78, "grad_norm": 0.0898115262389183, "learning_rate": 0.00025194117647058824, "loss": 0.0399, "step": 1319 }, { "epoch": 11.79, "grad_norm": 0.10978999733924866, "learning_rate": 0.00025188235294117643, "loss": 0.0505, "step": 1320 }, { "epoch": 11.79, "grad_norm": 0.07988785952329636, "learning_rate": 0.0002518235294117647, "loss": 0.0439, "step": 1321 }, { "epoch": 11.8, "grad_norm": 0.078738734126091, "learning_rate": 0.0002517647058823529, "loss": 0.0447, "step": 1322 }, { "epoch": 11.81, "grad_norm": 0.08227694779634476, "learning_rate": 0.00025170588235294116, "loss": 0.0452, "step": 1323 }, { "epoch": 11.82, "grad_norm": 0.06910045444965363, "learning_rate": 0.0002516470588235294, "loss": 0.0354, "step": 1324 }, { "epoch": 11.83, "grad_norm": 0.07851975411176682, "learning_rate": 0.00025158823529411765, "loss": 0.0404, "step": 1325 }, { "epoch": 11.84, "grad_norm": 0.13973665237426758, "learning_rate": 0.00025152941176470584, "loss": 0.0622, "step": 1326 }, { "epoch": 11.85, "grad_norm": 0.07589449733495712, "learning_rate": 0.0002514705882352941, "loss": 0.0482, "step": 1327 }, { "epoch": 11.86, "grad_norm": 0.07309146970510483, "learning_rate": 0.0002514117647058823, "loss": 0.0426, "step": 1328 }, { "epoch": 11.87, "grad_norm": 0.08154064416885376, "learning_rate": 0.00025135294117647057, "loss": 0.043, "step": 1329 }, { "epoch": 11.88, "grad_norm": 0.0723952054977417, "learning_rate": 0.0002512941176470588, "loss": 0.0375, "step": 1330 }, { "epoch": 11.88, "grad_norm": 0.07835827022790909, "learning_rate": 0.000251235294117647, "loss": 0.041, "step": 1331 }, { "epoch": 11.89, "grad_norm": 0.13197559118270874, "learning_rate": 0.0002511764705882353, "loss": 0.056, "step": 1332 }, { "epoch": 11.9, "grad_norm": 0.08319175988435745, "learning_rate": 0.0002511176470588235, "loss": 0.0418, "step": 1333 }, { "epoch": 11.91, "grad_norm": 0.07897458225488663, "learning_rate": 0.00025105882352941173, "loss": 0.0479, "step": 1334 }, { "epoch": 11.92, "grad_norm": 0.0702613964676857, "learning_rate": 0.000251, "loss": 0.0446, "step": 1335 }, { "epoch": 11.93, "grad_norm": 0.07538347691297531, "learning_rate": 0.0002509411764705882, "loss": 0.0517, "step": 1336 }, { "epoch": 11.94, "grad_norm": 0.0804985836148262, "learning_rate": 0.00025088235294117646, "loss": 0.0441, "step": 1337 }, { "epoch": 11.95, "grad_norm": 0.09818108379840851, "learning_rate": 0.0002508235294117647, "loss": 0.0455, "step": 1338 }, { "epoch": 11.96, "grad_norm": 0.11883387714624405, "learning_rate": 0.00025076470588235295, "loss": 0.0519, "step": 1339 }, { "epoch": 11.96, "grad_norm": 0.07313095778226852, "learning_rate": 0.00025070588235294114, "loss": 0.0445, "step": 1340 }, { "epoch": 11.97, "grad_norm": 0.07245111465454102, "learning_rate": 0.0002506470588235294, "loss": 0.0427, "step": 1341 }, { "epoch": 11.98, "grad_norm": 0.07658955454826355, "learning_rate": 0.0002505882352941176, "loss": 0.0456, "step": 1342 }, { "epoch": 11.99, "grad_norm": 0.08219477534294128, "learning_rate": 0.00025052941176470587, "loss": 0.0358, "step": 1343 }, { "epoch": 12.0, "grad_norm": 0.10890087485313416, "learning_rate": 0.0002504705882352941, "loss": 0.0458, "step": 1344 }, { "epoch": 12.01, "grad_norm": 0.06425008177757263, "learning_rate": 0.00025041176470588236, "loss": 0.0399, "step": 1345 }, { "epoch": 12.02, "grad_norm": 0.06577488034963608, "learning_rate": 0.00025035294117647054, "loss": 0.0372, "step": 1346 }, { "epoch": 12.03, "grad_norm": 0.07996486872434616, "learning_rate": 0.0002502941176470588, "loss": 0.0445, "step": 1347 }, { "epoch": 12.04, "grad_norm": 0.0695604607462883, "learning_rate": 0.00025023529411764703, "loss": 0.0398, "step": 1348 }, { "epoch": 12.04, "grad_norm": 0.06891915202140808, "learning_rate": 0.0002501764705882353, "loss": 0.0373, "step": 1349 }, { "epoch": 12.05, "grad_norm": 0.10478998720645905, "learning_rate": 0.0002501176470588235, "loss": 0.0454, "step": 1350 }, { "epoch": 12.06, "grad_norm": 0.12049918621778488, "learning_rate": 0.0002500588235294117, "loss": 0.0539, "step": 1351 }, { "epoch": 12.07, "grad_norm": 0.07435008138418198, "learning_rate": 0.00025, "loss": 0.0379, "step": 1352 }, { "epoch": 12.08, "grad_norm": 0.08182618021965027, "learning_rate": 0.0002499411764705882, "loss": 0.0429, "step": 1353 }, { "epoch": 12.09, "grad_norm": 0.06991595774888992, "learning_rate": 0.00024988235294117644, "loss": 0.0384, "step": 1354 }, { "epoch": 12.1, "grad_norm": 0.07907950133085251, "learning_rate": 0.0002498235294117647, "loss": 0.0405, "step": 1355 }, { "epoch": 12.11, "grad_norm": 0.0810050368309021, "learning_rate": 0.0002497647058823529, "loss": 0.0402, "step": 1356 }, { "epoch": 12.12, "grad_norm": 0.12017211318016052, "learning_rate": 0.00024970588235294117, "loss": 0.049, "step": 1357 }, { "epoch": 12.12, "grad_norm": 0.06470083445310593, "learning_rate": 0.0002496470588235294, "loss": 0.0407, "step": 1358 }, { "epoch": 12.13, "grad_norm": 0.06676702201366425, "learning_rate": 0.0002495882352941176, "loss": 0.0375, "step": 1359 }, { "epoch": 12.14, "grad_norm": 0.07182049006223679, "learning_rate": 0.00024952941176470584, "loss": 0.0355, "step": 1360 }, { "epoch": 12.15, "grad_norm": 0.06126086786389351, "learning_rate": 0.0002494705882352941, "loss": 0.0364, "step": 1361 }, { "epoch": 12.16, "grad_norm": 0.08764153718948364, "learning_rate": 0.00024941176470588233, "loss": 0.0487, "step": 1362 }, { "epoch": 12.17, "grad_norm": 0.13072869181632996, "learning_rate": 0.0002493529411764706, "loss": 0.0502, "step": 1363 }, { "epoch": 12.18, "grad_norm": 0.07012687623500824, "learning_rate": 0.00024929411764705876, "loss": 0.0398, "step": 1364 }, { "epoch": 12.19, "grad_norm": 0.07666277885437012, "learning_rate": 0.00024923529411764706, "loss": 0.0376, "step": 1365 }, { "epoch": 12.2, "grad_norm": 0.07735677808523178, "learning_rate": 0.00024917647058823525, "loss": 0.04, "step": 1366 }, { "epoch": 12.21, "grad_norm": 0.07179579883813858, "learning_rate": 0.00024911764705882355, "loss": 0.0383, "step": 1367 }, { "epoch": 12.21, "grad_norm": 0.07943487912416458, "learning_rate": 0.00024905882352941174, "loss": 0.0393, "step": 1368 }, { "epoch": 12.22, "grad_norm": 0.1343044489622116, "learning_rate": 0.000249, "loss": 0.0517, "step": 1369 }, { "epoch": 12.23, "grad_norm": 0.0660211443901062, "learning_rate": 0.0002489411764705882, "loss": 0.0405, "step": 1370 }, { "epoch": 12.24, "grad_norm": 0.06342874467372894, "learning_rate": 0.00024888235294117647, "loss": 0.0351, "step": 1371 }, { "epoch": 12.25, "grad_norm": 0.06716018170118332, "learning_rate": 0.0002488235294117647, "loss": 0.0352, "step": 1372 }, { "epoch": 12.26, "grad_norm": 0.08210413157939911, "learning_rate": 0.0002487647058823529, "loss": 0.0403, "step": 1373 }, { "epoch": 12.27, "grad_norm": 0.07900650054216385, "learning_rate": 0.00024870588235294114, "loss": 0.0394, "step": 1374 }, { "epoch": 12.28, "grad_norm": 0.0946178138256073, "learning_rate": 0.0002486470588235294, "loss": 0.042, "step": 1375 }, { "epoch": 12.29, "grad_norm": 0.10450688749551773, "learning_rate": 0.00024858823529411763, "loss": 0.0461, "step": 1376 }, { "epoch": 12.29, "grad_norm": 0.07046644389629364, "learning_rate": 0.0002485294117647059, "loss": 0.0387, "step": 1377 }, { "epoch": 12.3, "grad_norm": 0.06566546112298965, "learning_rate": 0.0002484705882352941, "loss": 0.0379, "step": 1378 }, { "epoch": 12.31, "grad_norm": 0.07577628642320633, "learning_rate": 0.0002484117647058823, "loss": 0.0381, "step": 1379 }, { "epoch": 12.32, "grad_norm": 0.0703827366232872, "learning_rate": 0.00024835294117647055, "loss": 0.0389, "step": 1380 }, { "epoch": 12.33, "grad_norm": 0.08395031094551086, "learning_rate": 0.0002482941176470588, "loss": 0.0414, "step": 1381 }, { "epoch": 12.34, "grad_norm": 0.1367170810699463, "learning_rate": 0.00024823529411764704, "loss": 0.0547, "step": 1382 }, { "epoch": 12.35, "grad_norm": 0.07022585719823837, "learning_rate": 0.0002481764705882353, "loss": 0.0436, "step": 1383 }, { "epoch": 12.36, "grad_norm": 0.07005397975444794, "learning_rate": 0.0002481176470588235, "loss": 0.0379, "step": 1384 }, { "epoch": 12.37, "grad_norm": 0.07170049101114273, "learning_rate": 0.00024805882352941177, "loss": 0.0361, "step": 1385 }, { "epoch": 12.38, "grad_norm": 0.07410118728876114, "learning_rate": 0.00024799999999999996, "loss": 0.039, "step": 1386 }, { "epoch": 12.38, "grad_norm": 0.0882721021771431, "learning_rate": 0.0002479411764705882, "loss": 0.0349, "step": 1387 }, { "epoch": 12.39, "grad_norm": 0.12550528347492218, "learning_rate": 0.00024788235294117644, "loss": 0.0518, "step": 1388 }, { "epoch": 12.4, "grad_norm": 0.0727790966629982, "learning_rate": 0.0002478235294117647, "loss": 0.0394, "step": 1389 }, { "epoch": 12.41, "grad_norm": 0.07850344479084015, "learning_rate": 0.00024776470588235293, "loss": 0.0386, "step": 1390 }, { "epoch": 12.42, "grad_norm": 0.08455193042755127, "learning_rate": 0.0002477058823529412, "loss": 0.0442, "step": 1391 }, { "epoch": 12.43, "grad_norm": 0.07608269900083542, "learning_rate": 0.00024764705882352936, "loss": 0.0422, "step": 1392 }, { "epoch": 12.44, "grad_norm": 0.07037638127803802, "learning_rate": 0.0002475882352941176, "loss": 0.035, "step": 1393 }, { "epoch": 12.45, "grad_norm": 0.1343112289905548, "learning_rate": 0.00024752941176470585, "loss": 0.0566, "step": 1394 }, { "epoch": 12.46, "grad_norm": 0.07384752482175827, "learning_rate": 0.0002474705882352941, "loss": 0.043, "step": 1395 }, { "epoch": 12.46, "grad_norm": 0.0817333459854126, "learning_rate": 0.00024741176470588234, "loss": 0.0449, "step": 1396 }, { "epoch": 12.47, "grad_norm": 0.07389001548290253, "learning_rate": 0.0002473529411764706, "loss": 0.0395, "step": 1397 }, { "epoch": 12.48, "grad_norm": 0.08193815499544144, "learning_rate": 0.0002472941176470588, "loss": 0.0419, "step": 1398 }, { "epoch": 12.49, "grad_norm": 0.06868834048509598, "learning_rate": 0.000247235294117647, "loss": 0.0352, "step": 1399 }, { "epoch": 12.5, "grad_norm": 0.10745295137166977, "learning_rate": 0.0002471764705882353, "loss": 0.0424, "step": 1400 }, { "epoch": 12.5, "eval_cer": 0.03482127085423925, "eval_loss": 0.17963390052318573, "eval_runtime": 22.1408, "eval_samples_per_second": 119.327, "eval_steps_per_second": 1.897, "eval_wer": 0.11909956366521222, "step": 1400 }, { "epoch": 12.51, "grad_norm": 0.11358443647623062, "learning_rate": 0.0002471176470588235, "loss": 0.0515, "step": 1401 }, { "epoch": 12.52, "grad_norm": 0.0776226669549942, "learning_rate": 0.00024705882352941174, "loss": 0.0415, "step": 1402 }, { "epoch": 12.53, "grad_norm": 0.08852792531251907, "learning_rate": 0.000247, "loss": 0.0367, "step": 1403 }, { "epoch": 12.54, "grad_norm": 0.07338748872280121, "learning_rate": 0.00024694117647058823, "loss": 0.0406, "step": 1404 }, { "epoch": 12.54, "grad_norm": 0.0752982348203659, "learning_rate": 0.0002468823529411765, "loss": 0.0399, "step": 1405 }, { "epoch": 12.55, "grad_norm": 0.10211584717035294, "learning_rate": 0.00024682352941176466, "loss": 0.0399, "step": 1406 }, { "epoch": 12.56, "grad_norm": 0.1165146678686142, "learning_rate": 0.0002467647058823529, "loss": 0.0546, "step": 1407 }, { "epoch": 12.57, "grad_norm": 0.07215819507837296, "learning_rate": 0.00024670588235294115, "loss": 0.0393, "step": 1408 }, { "epoch": 12.58, "grad_norm": 0.07633297890424728, "learning_rate": 0.0002466470588235294, "loss": 0.0428, "step": 1409 }, { "epoch": 12.59, "grad_norm": 0.07824792712926865, "learning_rate": 0.00024658823529411764, "loss": 0.0397, "step": 1410 }, { "epoch": 12.6, "grad_norm": 0.07324550300836563, "learning_rate": 0.0002465294117647059, "loss": 0.043, "step": 1411 }, { "epoch": 12.61, "grad_norm": 0.08240360021591187, "learning_rate": 0.00024647058823529407, "loss": 0.039, "step": 1412 }, { "epoch": 12.62, "grad_norm": 0.15403307974338531, "learning_rate": 0.00024641176470588237, "loss": 0.0539, "step": 1413 }, { "epoch": 12.62, "grad_norm": 0.07089138776063919, "learning_rate": 0.00024635294117647056, "loss": 0.0394, "step": 1414 }, { "epoch": 12.63, "grad_norm": 0.06545587629079819, "learning_rate": 0.0002462941176470588, "loss": 0.0354, "step": 1415 }, { "epoch": 12.64, "grad_norm": 0.07407490164041519, "learning_rate": 0.00024623529411764704, "loss": 0.0414, "step": 1416 }, { "epoch": 12.65, "grad_norm": 0.07717980444431305, "learning_rate": 0.0002461764705882353, "loss": 0.0404, "step": 1417 }, { "epoch": 12.66, "grad_norm": 0.07711130380630493, "learning_rate": 0.00024611764705882353, "loss": 0.0374, "step": 1418 }, { "epoch": 12.67, "grad_norm": 0.12361445277929306, "learning_rate": 0.0002460588235294117, "loss": 0.0503, "step": 1419 }, { "epoch": 12.68, "grad_norm": 0.07513695955276489, "learning_rate": 0.00024599999999999996, "loss": 0.0405, "step": 1420 }, { "epoch": 12.69, "grad_norm": 0.07495496422052383, "learning_rate": 0.0002459411764705882, "loss": 0.0428, "step": 1421 }, { "epoch": 12.7, "grad_norm": 0.07376106828451157, "learning_rate": 0.00024588235294117645, "loss": 0.0372, "step": 1422 }, { "epoch": 12.71, "grad_norm": 0.0759698674082756, "learning_rate": 0.0002458235294117647, "loss": 0.0369, "step": 1423 }, { "epoch": 12.71, "grad_norm": 0.06762997061014175, "learning_rate": 0.00024576470588235294, "loss": 0.0354, "step": 1424 }, { "epoch": 12.72, "grad_norm": 0.09362104535102844, "learning_rate": 0.0002457058823529411, "loss": 0.0424, "step": 1425 }, { "epoch": 12.73, "grad_norm": 0.12256449460983276, "learning_rate": 0.0002456470588235294, "loss": 0.0449, "step": 1426 }, { "epoch": 12.74, "grad_norm": 0.07384248822927475, "learning_rate": 0.0002455882352941176, "loss": 0.0453, "step": 1427 }, { "epoch": 12.75, "grad_norm": 0.07044611871242523, "learning_rate": 0.00024552941176470586, "loss": 0.0397, "step": 1428 }, { "epoch": 12.76, "grad_norm": 0.0751928761601448, "learning_rate": 0.0002454705882352941, "loss": 0.0391, "step": 1429 }, { "epoch": 12.77, "grad_norm": 0.07614405453205109, "learning_rate": 0.00024541176470588234, "loss": 0.0372, "step": 1430 }, { "epoch": 12.78, "grad_norm": 0.08169575035572052, "learning_rate": 0.0002453529411764706, "loss": 0.043, "step": 1431 }, { "epoch": 12.79, "grad_norm": 0.16052187979221344, "learning_rate": 0.0002452941176470588, "loss": 0.0561, "step": 1432 }, { "epoch": 12.79, "grad_norm": 0.08795813471078873, "learning_rate": 0.00024523529411764707, "loss": 0.0384, "step": 1433 }, { "epoch": 12.8, "grad_norm": 0.07420073449611664, "learning_rate": 0.00024517647058823526, "loss": 0.0398, "step": 1434 }, { "epoch": 12.81, "grad_norm": 0.07348296046257019, "learning_rate": 0.0002451176470588235, "loss": 0.0405, "step": 1435 }, { "epoch": 12.82, "grad_norm": 0.07852587848901749, "learning_rate": 0.00024505882352941175, "loss": 0.0398, "step": 1436 }, { "epoch": 12.83, "grad_norm": 0.08845631778240204, "learning_rate": 0.000245, "loss": 0.0409, "step": 1437 }, { "epoch": 12.84, "grad_norm": 0.13187581300735474, "learning_rate": 0.00024494117647058824, "loss": 0.0515, "step": 1438 }, { "epoch": 12.85, "grad_norm": 0.07846330851316452, "learning_rate": 0.0002448823529411764, "loss": 0.041, "step": 1439 }, { "epoch": 12.86, "grad_norm": 0.07889274507761002, "learning_rate": 0.00024482352941176467, "loss": 0.0415, "step": 1440 }, { "epoch": 12.87, "grad_norm": 0.0869123712182045, "learning_rate": 0.0002447647058823529, "loss": 0.0413, "step": 1441 }, { "epoch": 12.88, "grad_norm": 0.0797288566827774, "learning_rate": 0.00024470588235294116, "loss": 0.0452, "step": 1442 }, { "epoch": 12.88, "grad_norm": 0.08006476610898972, "learning_rate": 0.0002446470588235294, "loss": 0.0405, "step": 1443 }, { "epoch": 12.89, "grad_norm": 0.1381281018257141, "learning_rate": 0.00024458823529411764, "loss": 0.0633, "step": 1444 }, { "epoch": 12.9, "grad_norm": 0.07537613064050674, "learning_rate": 0.00024452941176470583, "loss": 0.0411, "step": 1445 }, { "epoch": 12.91, "grad_norm": 0.075586698949337, "learning_rate": 0.00024447058823529413, "loss": 0.0396, "step": 1446 }, { "epoch": 12.92, "grad_norm": 0.07694211602210999, "learning_rate": 0.0002444117647058823, "loss": 0.0417, "step": 1447 }, { "epoch": 12.93, "grad_norm": 0.07583875954151154, "learning_rate": 0.00024435294117647056, "loss": 0.0391, "step": 1448 }, { "epoch": 12.94, "grad_norm": 0.08428283780813217, "learning_rate": 0.0002442941176470588, "loss": 0.039, "step": 1449 }, { "epoch": 12.95, "grad_norm": 0.10015322268009186, "learning_rate": 0.00024423529411764705, "loss": 0.0438, "step": 1450 }, { "epoch": 12.96, "grad_norm": 0.09561532735824585, "learning_rate": 0.0002441764705882353, "loss": 0.0471, "step": 1451 }, { "epoch": 12.96, "grad_norm": 0.07340362668037415, "learning_rate": 0.0002441176470588235, "loss": 0.0399, "step": 1452 }, { "epoch": 12.97, "grad_norm": 0.07676450163125992, "learning_rate": 0.00024405882352941172, "loss": 0.0374, "step": 1453 }, { "epoch": 12.98, "grad_norm": 0.09453106671571732, "learning_rate": 0.000244, "loss": 0.0432, "step": 1454 }, { "epoch": 12.99, "grad_norm": 0.09403645992279053, "learning_rate": 0.0002439411764705882, "loss": 0.0406, "step": 1455 }, { "epoch": 13.0, "grad_norm": 0.14890922605991364, "learning_rate": 0.00024388235294117645, "loss": 0.0434, "step": 1456 }, { "epoch": 13.01, "grad_norm": 0.07152199000120163, "learning_rate": 0.00024382352941176467, "loss": 0.0424, "step": 1457 }, { "epoch": 13.02, "grad_norm": 0.08089304715394974, "learning_rate": 0.00024376470588235291, "loss": 0.0423, "step": 1458 }, { "epoch": 13.03, "grad_norm": 0.06755346804857254, "learning_rate": 0.00024370588235294116, "loss": 0.036, "step": 1459 }, { "epoch": 13.04, "grad_norm": 0.0716506764292717, "learning_rate": 0.00024364705882352937, "loss": 0.0376, "step": 1460 }, { "epoch": 13.04, "grad_norm": 0.07225367426872253, "learning_rate": 0.00024358823529411764, "loss": 0.0424, "step": 1461 }, { "epoch": 13.05, "grad_norm": 0.09959781169891357, "learning_rate": 0.00024352941176470586, "loss": 0.0487, "step": 1462 }, { "epoch": 13.06, "grad_norm": 0.10509227961301804, "learning_rate": 0.00024347058823529408, "loss": 0.0452, "step": 1463 }, { "epoch": 13.07, "grad_norm": 0.06608708202838898, "learning_rate": 0.00024341176470588235, "loss": 0.0387, "step": 1464 }, { "epoch": 13.08, "grad_norm": 0.06264564394950867, "learning_rate": 0.00024335294117647056, "loss": 0.0387, "step": 1465 }, { "epoch": 13.09, "grad_norm": 0.07046733051538467, "learning_rate": 0.0002432941176470588, "loss": 0.0348, "step": 1466 }, { "epoch": 13.1, "grad_norm": 0.0691027045249939, "learning_rate": 0.00024323529411764705, "loss": 0.0363, "step": 1467 }, { "epoch": 13.11, "grad_norm": 0.07697401940822601, "learning_rate": 0.00024317647058823527, "loss": 0.0355, "step": 1468 }, { "epoch": 13.12, "grad_norm": 0.12853693962097168, "learning_rate": 0.0002431176470588235, "loss": 0.0493, "step": 1469 }, { "epoch": 13.12, "grad_norm": 0.06530895829200745, "learning_rate": 0.00024305882352941173, "loss": 0.0376, "step": 1470 }, { "epoch": 13.13, "grad_norm": 0.06249760091304779, "learning_rate": 0.000243, "loss": 0.0357, "step": 1471 }, { "epoch": 13.14, "grad_norm": 0.073604054749012, "learning_rate": 0.00024294117647058821, "loss": 0.0384, "step": 1472 }, { "epoch": 13.15, "grad_norm": 0.0683189257979393, "learning_rate": 0.00024288235294117643, "loss": 0.0387, "step": 1473 }, { "epoch": 13.16, "grad_norm": 0.08956163376569748, "learning_rate": 0.0002428235294117647, "loss": 0.0411, "step": 1474 }, { "epoch": 13.17, "grad_norm": 0.13329704105854034, "learning_rate": 0.00024276470588235292, "loss": 0.0559, "step": 1475 }, { "epoch": 13.18, "grad_norm": 0.06661229580640793, "learning_rate": 0.00024270588235294113, "loss": 0.0371, "step": 1476 }, { "epoch": 13.19, "grad_norm": 0.07215193659067154, "learning_rate": 0.0002426470588235294, "loss": 0.0422, "step": 1477 }, { "epoch": 13.2, "grad_norm": 0.06695840507745743, "learning_rate": 0.00024258823529411762, "loss": 0.0355, "step": 1478 }, { "epoch": 13.21, "grad_norm": 0.07014728337526321, "learning_rate": 0.00024252941176470586, "loss": 0.0336, "step": 1479 }, { "epoch": 13.21, "grad_norm": 0.06828466057777405, "learning_rate": 0.0002424705882352941, "loss": 0.0311, "step": 1480 }, { "epoch": 13.22, "grad_norm": 0.13612179458141327, "learning_rate": 0.00024241176470588232, "loss": 0.0482, "step": 1481 }, { "epoch": 13.23, "grad_norm": 0.0706053376197815, "learning_rate": 0.00024235294117647057, "loss": 0.0381, "step": 1482 }, { "epoch": 13.24, "grad_norm": 0.08331095427274704, "learning_rate": 0.00024229411764705878, "loss": 0.0379, "step": 1483 }, { "epoch": 13.25, "grad_norm": 0.06359073519706726, "learning_rate": 0.00024223529411764705, "loss": 0.0337, "step": 1484 }, { "epoch": 13.26, "grad_norm": 0.07826108485460281, "learning_rate": 0.00024217647058823527, "loss": 0.0356, "step": 1485 }, { "epoch": 13.27, "grad_norm": 0.07564547657966614, "learning_rate": 0.0002421176470588235, "loss": 0.0373, "step": 1486 }, { "epoch": 13.28, "grad_norm": 0.10281079262495041, "learning_rate": 0.00024205882352941176, "loss": 0.0389, "step": 1487 }, { "epoch": 13.29, "grad_norm": 0.10683814436197281, "learning_rate": 0.00024199999999999997, "loss": 0.0477, "step": 1488 }, { "epoch": 13.29, "grad_norm": 0.06714615225791931, "learning_rate": 0.00024194117647058822, "loss": 0.0399, "step": 1489 }, { "epoch": 13.3, "grad_norm": 0.06968620419502258, "learning_rate": 0.00024188235294117646, "loss": 0.0363, "step": 1490 }, { "epoch": 13.31, "grad_norm": 0.06511169672012329, "learning_rate": 0.00024182352941176468, "loss": 0.0415, "step": 1491 }, { "epoch": 13.32, "grad_norm": 0.06858762353658676, "learning_rate": 0.00024176470588235292, "loss": 0.0379, "step": 1492 }, { "epoch": 13.33, "grad_norm": 0.08811775594949722, "learning_rate": 0.00024170588235294114, "loss": 0.0421, "step": 1493 }, { "epoch": 13.34, "grad_norm": 0.12302280217409134, "learning_rate": 0.0002416470588235294, "loss": 0.0596, "step": 1494 }, { "epoch": 13.35, "grad_norm": 0.07077848166227341, "learning_rate": 0.00024158823529411762, "loss": 0.0381, "step": 1495 }, { "epoch": 13.36, "grad_norm": 0.08123049885034561, "learning_rate": 0.00024152941176470584, "loss": 0.0375, "step": 1496 }, { "epoch": 13.37, "grad_norm": 0.07264683395624161, "learning_rate": 0.0002414705882352941, "loss": 0.04, "step": 1497 }, { "epoch": 13.38, "grad_norm": 0.07483851909637451, "learning_rate": 0.00024141176470588233, "loss": 0.04, "step": 1498 }, { "epoch": 13.38, "grad_norm": 0.07415857166051865, "learning_rate": 0.00024135294117647057, "loss": 0.034, "step": 1499 }, { "epoch": 13.39, "grad_norm": 0.10655782371759415, "learning_rate": 0.0002412941176470588, "loss": 0.0469, "step": 1500 }, { "epoch": 13.39, "eval_cer": 0.03583227159463756, "eval_loss": 0.18480569124221802, "eval_runtime": 21.8993, "eval_samples_per_second": 120.643, "eval_steps_per_second": 1.918, "eval_wer": 0.12072590241967474, "step": 1500 }, { "epoch": 13.4, "grad_norm": 0.07293380796909332, "learning_rate": 0.00024123529411764703, "loss": 0.038, "step": 1501 }, { "epoch": 13.41, "grad_norm": 0.07702398300170898, "learning_rate": 0.00024117647058823527, "loss": 0.0401, "step": 1502 }, { "epoch": 13.42, "grad_norm": 0.07244368642568588, "learning_rate": 0.00024111764705882352, "loss": 0.0404, "step": 1503 }, { "epoch": 13.43, "grad_norm": 0.06752533465623856, "learning_rate": 0.00024105882352941176, "loss": 0.035, "step": 1504 }, { "epoch": 13.44, "grad_norm": 0.06490998715162277, "learning_rate": 0.00024099999999999998, "loss": 0.0363, "step": 1505 }, { "epoch": 13.45, "grad_norm": 0.11970426887273788, "learning_rate": 0.0002409411764705882, "loss": 0.0499, "step": 1506 }, { "epoch": 13.46, "grad_norm": 0.06805020570755005, "learning_rate": 0.00024088235294117646, "loss": 0.0369, "step": 1507 }, { "epoch": 13.46, "grad_norm": 0.0638478472828865, "learning_rate": 0.00024082352941176468, "loss": 0.0382, "step": 1508 }, { "epoch": 13.47, "grad_norm": 0.05978140980005264, "learning_rate": 0.0002407647058823529, "loss": 0.0308, "step": 1509 }, { "epoch": 13.48, "grad_norm": 0.06912688165903091, "learning_rate": 0.00024070588235294117, "loss": 0.0346, "step": 1510 }, { "epoch": 13.49, "grad_norm": 0.08909628540277481, "learning_rate": 0.00024064705882352938, "loss": 0.0399, "step": 1511 }, { "epoch": 13.5, "grad_norm": 0.10677731782197952, "learning_rate": 0.00024058823529411763, "loss": 0.0439, "step": 1512 }, { "epoch": 13.51, "grad_norm": 0.10795870423316956, "learning_rate": 0.00024052941176470587, "loss": 0.0493, "step": 1513 }, { "epoch": 13.52, "grad_norm": 0.06797435134649277, "learning_rate": 0.00024047058823529409, "loss": 0.0397, "step": 1514 }, { "epoch": 13.53, "grad_norm": 0.07106486707925797, "learning_rate": 0.00024041176470588233, "loss": 0.0352, "step": 1515 }, { "epoch": 13.54, "grad_norm": 0.07276222109794617, "learning_rate": 0.00024035294117647057, "loss": 0.0387, "step": 1516 }, { "epoch": 13.54, "grad_norm": 0.07053562998771667, "learning_rate": 0.00024029411764705882, "loss": 0.037, "step": 1517 }, { "epoch": 13.55, "grad_norm": 0.08380800485610962, "learning_rate": 0.00024023529411764703, "loss": 0.0375, "step": 1518 }, { "epoch": 13.56, "grad_norm": 0.11110502481460571, "learning_rate": 0.00024017647058823525, "loss": 0.0526, "step": 1519 }, { "epoch": 13.57, "grad_norm": 0.06970023363828659, "learning_rate": 0.00024011764705882352, "loss": 0.0419, "step": 1520 }, { "epoch": 13.58, "grad_norm": 0.06434200704097748, "learning_rate": 0.00024005882352941174, "loss": 0.0392, "step": 1521 }, { "epoch": 13.59, "grad_norm": 0.07295121997594833, "learning_rate": 0.00023999999999999998, "loss": 0.0367, "step": 1522 }, { "epoch": 13.6, "grad_norm": 0.07384601980447769, "learning_rate": 0.00023994117647058822, "loss": 0.0338, "step": 1523 }, { "epoch": 13.61, "grad_norm": 0.0841047614812851, "learning_rate": 0.00023988235294117644, "loss": 0.0347, "step": 1524 }, { "epoch": 13.62, "grad_norm": 0.153300479054451, "learning_rate": 0.00023982352941176468, "loss": 0.0684, "step": 1525 }, { "epoch": 13.62, "grad_norm": 0.06421291083097458, "learning_rate": 0.00023976470588235293, "loss": 0.0375, "step": 1526 }, { "epoch": 13.63, "grad_norm": 0.06903710216283798, "learning_rate": 0.00023970588235294117, "loss": 0.0423, "step": 1527 }, { "epoch": 13.64, "grad_norm": 0.06346481293439865, "learning_rate": 0.00023964705882352939, "loss": 0.0369, "step": 1528 }, { "epoch": 13.65, "grad_norm": 0.06991536915302277, "learning_rate": 0.0002395882352941176, "loss": 0.0377, "step": 1529 }, { "epoch": 13.66, "grad_norm": 0.07881798595190048, "learning_rate": 0.00023952941176470587, "loss": 0.0424, "step": 1530 }, { "epoch": 13.67, "grad_norm": 0.14213302731513977, "learning_rate": 0.0002394705882352941, "loss": 0.0601, "step": 1531 }, { "epoch": 13.68, "grad_norm": 0.06558680534362793, "learning_rate": 0.00023941176470588236, "loss": 0.0355, "step": 1532 }, { "epoch": 13.69, "grad_norm": 0.06903904676437378, "learning_rate": 0.00023935294117647058, "loss": 0.036, "step": 1533 }, { "epoch": 13.7, "grad_norm": 0.07653883099555969, "learning_rate": 0.0002392941176470588, "loss": 0.0415, "step": 1534 }, { "epoch": 13.71, "grad_norm": 0.07556813955307007, "learning_rate": 0.00023923529411764704, "loss": 0.0326, "step": 1535 }, { "epoch": 13.71, "grad_norm": 0.08494540303945541, "learning_rate": 0.00023917647058823528, "loss": 0.0404, "step": 1536 }, { "epoch": 13.72, "grad_norm": 0.0914946123957634, "learning_rate": 0.00023911764705882352, "loss": 0.0392, "step": 1537 }, { "epoch": 13.73, "grad_norm": 0.09990125149488449, "learning_rate": 0.00023905882352941174, "loss": 0.0477, "step": 1538 }, { "epoch": 13.74, "grad_norm": 0.06531285494565964, "learning_rate": 0.00023899999999999998, "loss": 0.0368, "step": 1539 }, { "epoch": 13.75, "grad_norm": 0.0672503188252449, "learning_rate": 0.00023894117647058823, "loss": 0.0355, "step": 1540 }, { "epoch": 13.76, "grad_norm": 0.06878861039876938, "learning_rate": 0.00023888235294117644, "loss": 0.0333, "step": 1541 }, { "epoch": 13.77, "grad_norm": 0.07407777011394501, "learning_rate": 0.00023882352941176466, "loss": 0.0361, "step": 1542 }, { "epoch": 13.78, "grad_norm": 0.09538985043764114, "learning_rate": 0.00023876470588235293, "loss": 0.0405, "step": 1543 }, { "epoch": 13.79, "grad_norm": 0.1270383596420288, "learning_rate": 0.00023870588235294114, "loss": 0.051, "step": 1544 }, { "epoch": 13.79, "grad_norm": 0.06746938079595566, "learning_rate": 0.00023864705882352942, "loss": 0.0371, "step": 1545 }, { "epoch": 13.8, "grad_norm": 0.0679408609867096, "learning_rate": 0.00023858823529411763, "loss": 0.0384, "step": 1546 }, { "epoch": 13.81, "grad_norm": 0.06483151018619537, "learning_rate": 0.00023852941176470585, "loss": 0.0349, "step": 1547 }, { "epoch": 13.82, "grad_norm": 0.07324409484863281, "learning_rate": 0.0002384705882352941, "loss": 0.0382, "step": 1548 }, { "epoch": 13.83, "grad_norm": 0.08959255367517471, "learning_rate": 0.00023841176470588233, "loss": 0.0379, "step": 1549 }, { "epoch": 13.84, "grad_norm": 0.1187070682644844, "learning_rate": 0.00023835294117647058, "loss": 0.0512, "step": 1550 }, { "epoch": 13.85, "grad_norm": 0.07361667603254318, "learning_rate": 0.0002382941176470588, "loss": 0.0415, "step": 1551 }, { "epoch": 13.86, "grad_norm": 0.07351993024349213, "learning_rate": 0.000238235294117647, "loss": 0.0367, "step": 1552 }, { "epoch": 13.87, "grad_norm": 0.0712309330701828, "learning_rate": 0.00023817647058823528, "loss": 0.0398, "step": 1553 }, { "epoch": 13.88, "grad_norm": 0.06710576266050339, "learning_rate": 0.0002381176470588235, "loss": 0.0391, "step": 1554 }, { "epoch": 13.88, "grad_norm": 0.08506819605827332, "learning_rate": 0.00023805882352941177, "loss": 0.0373, "step": 1555 }, { "epoch": 13.89, "grad_norm": 0.16108375787734985, "learning_rate": 0.00023799999999999998, "loss": 0.0627, "step": 1556 }, { "epoch": 13.9, "grad_norm": 0.06580577790737152, "learning_rate": 0.0002379411764705882, "loss": 0.0333, "step": 1557 }, { "epoch": 13.91, "grad_norm": 0.07547849416732788, "learning_rate": 0.00023788235294117644, "loss": 0.0396, "step": 1558 }, { "epoch": 13.92, "grad_norm": 0.07966912537813187, "learning_rate": 0.0002378235294117647, "loss": 0.0391, "step": 1559 }, { "epoch": 13.93, "grad_norm": 0.06370997428894043, "learning_rate": 0.00023776470588235293, "loss": 0.0319, "step": 1560 }, { "epoch": 13.94, "grad_norm": 0.078092560172081, "learning_rate": 0.00023770588235294115, "loss": 0.0402, "step": 1561 }, { "epoch": 13.95, "grad_norm": 0.08986909687519073, "learning_rate": 0.0002376470588235294, "loss": 0.0412, "step": 1562 }, { "epoch": 13.96, "grad_norm": 0.10101500153541565, "learning_rate": 0.00023758823529411763, "loss": 0.0501, "step": 1563 }, { "epoch": 13.96, "grad_norm": 0.07079905271530151, "learning_rate": 0.00023752941176470585, "loss": 0.0386, "step": 1564 }, { "epoch": 13.97, "grad_norm": 0.07269637286663055, "learning_rate": 0.00023747058823529412, "loss": 0.0366, "step": 1565 }, { "epoch": 13.98, "grad_norm": 0.07832439988851547, "learning_rate": 0.00023741176470588234, "loss": 0.0407, "step": 1566 }, { "epoch": 13.99, "grad_norm": 0.09018362313508987, "learning_rate": 0.00023735294117647055, "loss": 0.0439, "step": 1567 }, { "epoch": 14.0, "grad_norm": 0.13287939131259918, "learning_rate": 0.00023729411764705882, "loss": 0.0475, "step": 1568 }, { "epoch": 14.01, "grad_norm": 0.06339030712842941, "learning_rate": 0.00023723529411764704, "loss": 0.0317, "step": 1569 }, { "epoch": 14.02, "grad_norm": 0.06684370338916779, "learning_rate": 0.00023717647058823526, "loss": 0.038, "step": 1570 }, { "epoch": 14.03, "grad_norm": 0.06527197360992432, "learning_rate": 0.0002371176470588235, "loss": 0.0366, "step": 1571 }, { "epoch": 14.04, "grad_norm": 0.06343524903059006, "learning_rate": 0.00023705882352941174, "loss": 0.0365, "step": 1572 }, { "epoch": 14.04, "grad_norm": 0.05973381549119949, "learning_rate": 0.000237, "loss": 0.0328, "step": 1573 }, { "epoch": 14.05, "grad_norm": 0.08552396297454834, "learning_rate": 0.0002369411764705882, "loss": 0.0356, "step": 1574 }, { "epoch": 14.06, "grad_norm": 0.12069303542375565, "learning_rate": 0.00023688235294117645, "loss": 0.0433, "step": 1575 }, { "epoch": 14.07, "grad_norm": 0.06969575583934784, "learning_rate": 0.0002368235294117647, "loss": 0.0355, "step": 1576 }, { "epoch": 14.08, "grad_norm": 0.07012692093849182, "learning_rate": 0.0002367647058823529, "loss": 0.0387, "step": 1577 }, { "epoch": 14.09, "grad_norm": 0.06612176448106766, "learning_rate": 0.00023670588235294118, "loss": 0.033, "step": 1578 }, { "epoch": 14.1, "grad_norm": 0.06898734718561172, "learning_rate": 0.0002366470588235294, "loss": 0.0353, "step": 1579 }, { "epoch": 14.11, "grad_norm": 0.0772644579410553, "learning_rate": 0.0002365882352941176, "loss": 0.0318, "step": 1580 }, { "epoch": 14.12, "grad_norm": 0.10841916501522064, "learning_rate": 0.00023652941176470588, "loss": 0.0451, "step": 1581 }, { "epoch": 14.12, "grad_norm": 0.06888675689697266, "learning_rate": 0.0002364705882352941, "loss": 0.0362, "step": 1582 }, { "epoch": 14.13, "grad_norm": 0.06781990081071854, "learning_rate": 0.00023641176470588234, "loss": 0.0349, "step": 1583 }, { "epoch": 14.14, "grad_norm": 0.060208071023225784, "learning_rate": 0.00023635294117647056, "loss": 0.0331, "step": 1584 }, { "epoch": 14.15, "grad_norm": 0.0651528388261795, "learning_rate": 0.0002362941176470588, "loss": 0.0359, "step": 1585 }, { "epoch": 14.16, "grad_norm": 0.0784277617931366, "learning_rate": 0.00023623529411764704, "loss": 0.0386, "step": 1586 }, { "epoch": 14.17, "grad_norm": 0.1297052502632141, "learning_rate": 0.00023617647058823526, "loss": 0.0465, "step": 1587 }, { "epoch": 14.18, "grad_norm": 0.06815207749605179, "learning_rate": 0.00023611764705882353, "loss": 0.0353, "step": 1588 }, { "epoch": 14.19, "grad_norm": 0.06945068389177322, "learning_rate": 0.00023605882352941175, "loss": 0.0339, "step": 1589 }, { "epoch": 14.2, "grad_norm": 0.07188992947340012, "learning_rate": 0.00023599999999999996, "loss": 0.0353, "step": 1590 }, { "epoch": 14.21, "grad_norm": 0.07547110319137573, "learning_rate": 0.00023594117647058823, "loss": 0.0381, "step": 1591 }, { "epoch": 14.21, "grad_norm": 0.07905665785074234, "learning_rate": 0.00023588235294117645, "loss": 0.0355, "step": 1592 }, { "epoch": 14.22, "grad_norm": 0.14025074243545532, "learning_rate": 0.0002358235294117647, "loss": 0.0597, "step": 1593 }, { "epoch": 14.23, "grad_norm": 0.06963734328746796, "learning_rate": 0.0002357647058823529, "loss": 0.0364, "step": 1594 }, { "epoch": 14.24, "grad_norm": 0.06071014329791069, "learning_rate": 0.00023570588235294115, "loss": 0.0358, "step": 1595 }, { "epoch": 14.25, "grad_norm": 0.06164901331067085, "learning_rate": 0.0002356470588235294, "loss": 0.032, "step": 1596 }, { "epoch": 14.26, "grad_norm": 0.06867073476314545, "learning_rate": 0.0002355882352941176, "loss": 0.0362, "step": 1597 }, { "epoch": 14.27, "grad_norm": 0.07247691601514816, "learning_rate": 0.00023552941176470588, "loss": 0.0364, "step": 1598 }, { "epoch": 14.28, "grad_norm": 0.10158004611730576, "learning_rate": 0.0002354705882352941, "loss": 0.0448, "step": 1599 }, { "epoch": 14.29, "grad_norm": 0.10288799554109573, "learning_rate": 0.00023541176470588232, "loss": 0.0414, "step": 1600 }, { "epoch": 14.29, "eval_cer": 0.03673427969016022, "eval_loss": 0.18632982671260834, "eval_runtime": 22.0305, "eval_samples_per_second": 119.925, "eval_steps_per_second": 1.906, "eval_wer": 0.12126140420468068, "step": 1600 }, { "epoch": 14.29, "grad_norm": 0.07562468200922012, "learning_rate": 0.0002353529411764706, "loss": 0.037, "step": 1601 }, { "epoch": 14.3, "grad_norm": 0.0639897957444191, "learning_rate": 0.0002352941176470588, "loss": 0.0337, "step": 1602 }, { "epoch": 14.31, "grad_norm": 0.07470469176769257, "learning_rate": 0.00023523529411764702, "loss": 0.0376, "step": 1603 }, { "epoch": 14.32, "grad_norm": 0.07621467858552933, "learning_rate": 0.0002351764705882353, "loss": 0.0343, "step": 1604 }, { "epoch": 14.33, "grad_norm": 0.09280934184789658, "learning_rate": 0.0002351176470588235, "loss": 0.0375, "step": 1605 }, { "epoch": 14.34, "grad_norm": 0.12475255876779556, "learning_rate": 0.00023505882352941175, "loss": 0.0496, "step": 1606 }, { "epoch": 14.35, "grad_norm": 0.0690474882721901, "learning_rate": 0.00023499999999999997, "loss": 0.036, "step": 1607 }, { "epoch": 14.36, "grad_norm": 0.07725930213928223, "learning_rate": 0.0002349411764705882, "loss": 0.0397, "step": 1608 }, { "epoch": 14.37, "grad_norm": 0.0686090812087059, "learning_rate": 0.00023488235294117645, "loss": 0.036, "step": 1609 }, { "epoch": 14.38, "grad_norm": 0.06346847862005234, "learning_rate": 0.00023482352941176467, "loss": 0.0362, "step": 1610 }, { "epoch": 14.38, "grad_norm": 0.0800243616104126, "learning_rate": 0.00023476470588235294, "loss": 0.032, "step": 1611 }, { "epoch": 14.39, "grad_norm": 0.12318249046802521, "learning_rate": 0.00023470588235294116, "loss": 0.0464, "step": 1612 }, { "epoch": 14.4, "grad_norm": 0.06889480352401733, "learning_rate": 0.00023464705882352937, "loss": 0.0375, "step": 1613 }, { "epoch": 14.41, "grad_norm": 0.07333625853061676, "learning_rate": 0.00023458823529411764, "loss": 0.0377, "step": 1614 }, { "epoch": 14.42, "grad_norm": 0.0626189261674881, "learning_rate": 0.00023452941176470586, "loss": 0.0328, "step": 1615 }, { "epoch": 14.43, "grad_norm": 0.08424117416143417, "learning_rate": 0.0002344705882352941, "loss": 0.0424, "step": 1616 }, { "epoch": 14.44, "grad_norm": 0.0901605561375618, "learning_rate": 0.00023441176470588232, "loss": 0.0408, "step": 1617 }, { "epoch": 14.45, "grad_norm": 0.10998565703630447, "learning_rate": 0.00023435294117647056, "loss": 0.0472, "step": 1618 }, { "epoch": 14.46, "grad_norm": 0.06640747934579849, "learning_rate": 0.0002342941176470588, "loss": 0.0403, "step": 1619 }, { "epoch": 14.46, "grad_norm": 0.07842729985713959, "learning_rate": 0.00023423529411764702, "loss": 0.0376, "step": 1620 }, { "epoch": 14.47, "grad_norm": 0.06519323587417603, "learning_rate": 0.0002341764705882353, "loss": 0.0377, "step": 1621 }, { "epoch": 14.48, "grad_norm": 0.06695389747619629, "learning_rate": 0.0002341176470588235, "loss": 0.0344, "step": 1622 }, { "epoch": 14.49, "grad_norm": 0.06938100606203079, "learning_rate": 0.00023405882352941173, "loss": 0.0341, "step": 1623 }, { "epoch": 14.5, "grad_norm": 0.11317170411348343, "learning_rate": 0.000234, "loss": 0.0471, "step": 1624 }, { "epoch": 14.51, "grad_norm": 0.09052317589521408, "learning_rate": 0.0002339411764705882, "loss": 0.0425, "step": 1625 }, { "epoch": 14.52, "grad_norm": 0.06292369216680527, "learning_rate": 0.00023388235294117646, "loss": 0.0324, "step": 1626 }, { "epoch": 14.53, "grad_norm": 0.06844203919172287, "learning_rate": 0.0002338235294117647, "loss": 0.0318, "step": 1627 }, { "epoch": 14.54, "grad_norm": 0.07209029793739319, "learning_rate": 0.00023376470588235292, "loss": 0.0321, "step": 1628 }, { "epoch": 14.54, "grad_norm": 0.07459453493356705, "learning_rate": 0.00023370588235294116, "loss": 0.0373, "step": 1629 }, { "epoch": 14.55, "grad_norm": 0.07792117446660995, "learning_rate": 0.00023364705882352938, "loss": 0.0334, "step": 1630 }, { "epoch": 14.56, "grad_norm": 0.10604255646467209, "learning_rate": 0.00023358823529411765, "loss": 0.0535, "step": 1631 }, { "epoch": 14.57, "grad_norm": 0.06339162588119507, "learning_rate": 0.00023352941176470586, "loss": 0.0334, "step": 1632 }, { "epoch": 14.58, "grad_norm": 0.06921108067035675, "learning_rate": 0.00023347058823529408, "loss": 0.0372, "step": 1633 }, { "epoch": 14.59, "grad_norm": 0.058756157755851746, "learning_rate": 0.00023341176470588235, "loss": 0.0315, "step": 1634 }, { "epoch": 14.6, "grad_norm": 0.07588847726583481, "learning_rate": 0.00023335294117647057, "loss": 0.0353, "step": 1635 }, { "epoch": 14.61, "grad_norm": 0.08001239597797394, "learning_rate": 0.00023329411764705878, "loss": 0.0318, "step": 1636 }, { "epoch": 14.62, "grad_norm": 0.11674462258815765, "learning_rate": 0.00023323529411764705, "loss": 0.0436, "step": 1637 }, { "epoch": 14.62, "grad_norm": 0.07083316892385483, "learning_rate": 0.00023317647058823527, "loss": 0.0394, "step": 1638 }, { "epoch": 14.63, "grad_norm": 0.07271532714366913, "learning_rate": 0.0002331176470588235, "loss": 0.0364, "step": 1639 }, { "epoch": 14.64, "grad_norm": 0.07413634657859802, "learning_rate": 0.00023305882352941176, "loss": 0.0378, "step": 1640 }, { "epoch": 14.65, "grad_norm": 0.07597372680902481, "learning_rate": 0.00023299999999999997, "loss": 0.0334, "step": 1641 }, { "epoch": 14.66, "grad_norm": 0.06561561673879623, "learning_rate": 0.00023294117647058821, "loss": 0.0337, "step": 1642 }, { "epoch": 14.67, "grad_norm": 0.12736088037490845, "learning_rate": 0.00023288235294117643, "loss": 0.0436, "step": 1643 }, { "epoch": 14.68, "grad_norm": 0.06355338543653488, "learning_rate": 0.0002328235294117647, "loss": 0.0367, "step": 1644 }, { "epoch": 14.69, "grad_norm": 0.06789432466030121, "learning_rate": 0.00023276470588235292, "loss": 0.0348, "step": 1645 }, { "epoch": 14.7, "grad_norm": 0.0628594234585762, "learning_rate": 0.00023270588235294113, "loss": 0.0351, "step": 1646 }, { "epoch": 14.71, "grad_norm": 0.0669761672616005, "learning_rate": 0.0002326470588235294, "loss": 0.0358, "step": 1647 }, { "epoch": 14.71, "grad_norm": 0.0717419907450676, "learning_rate": 0.00023258823529411762, "loss": 0.032, "step": 1648 }, { "epoch": 14.72, "grad_norm": 0.08335498720407486, "learning_rate": 0.00023252941176470586, "loss": 0.0329, "step": 1649 }, { "epoch": 14.73, "grad_norm": 0.13398894667625427, "learning_rate": 0.0002324705882352941, "loss": 0.0498, "step": 1650 }, { "epoch": 14.74, "grad_norm": 0.06331564486026764, "learning_rate": 0.00023241176470588232, "loss": 0.0348, "step": 1651 }, { "epoch": 14.75, "grad_norm": 0.07277001440525055, "learning_rate": 0.00023235294117647057, "loss": 0.039, "step": 1652 }, { "epoch": 14.76, "grad_norm": 0.07185853272676468, "learning_rate": 0.00023229411764705878, "loss": 0.0409, "step": 1653 }, { "epoch": 14.77, "grad_norm": 0.06759529560804367, "learning_rate": 0.00023223529411764705, "loss": 0.0334, "step": 1654 }, { "epoch": 14.78, "grad_norm": 0.0870920717716217, "learning_rate": 0.00023217647058823527, "loss": 0.0356, "step": 1655 }, { "epoch": 14.79, "grad_norm": 0.13036197423934937, "learning_rate": 0.0002321176470588235, "loss": 0.0565, "step": 1656 }, { "epoch": 14.79, "grad_norm": 0.05877867713570595, "learning_rate": 0.00023205882352941176, "loss": 0.0314, "step": 1657 }, { "epoch": 14.8, "grad_norm": 0.06166622415184975, "learning_rate": 0.00023199999999999997, "loss": 0.038, "step": 1658 }, { "epoch": 14.81, "grad_norm": 0.06303553283214569, "learning_rate": 0.00023194117647058822, "loss": 0.0352, "step": 1659 }, { "epoch": 14.82, "grad_norm": 0.07221624255180359, "learning_rate": 0.00023188235294117646, "loss": 0.0357, "step": 1660 }, { "epoch": 14.83, "grad_norm": 0.07816413044929504, "learning_rate": 0.00023182352941176468, "loss": 0.0322, "step": 1661 }, { "epoch": 14.84, "grad_norm": 0.13044504821300507, "learning_rate": 0.00023176470588235292, "loss": 0.0458, "step": 1662 }, { "epoch": 14.85, "grad_norm": 0.06987060606479645, "learning_rate": 0.00023170588235294116, "loss": 0.0382, "step": 1663 }, { "epoch": 14.86, "grad_norm": 0.0673326849937439, "learning_rate": 0.0002316470588235294, "loss": 0.0356, "step": 1664 }, { "epoch": 14.87, "grad_norm": 0.08185671269893646, "learning_rate": 0.00023158823529411762, "loss": 0.0375, "step": 1665 }, { "epoch": 14.88, "grad_norm": 0.07717020064592361, "learning_rate": 0.00023152941176470584, "loss": 0.0369, "step": 1666 }, { "epoch": 14.88, "grad_norm": 0.06798449158668518, "learning_rate": 0.0002314705882352941, "loss": 0.0332, "step": 1667 }, { "epoch": 14.89, "grad_norm": 0.1054532453417778, "learning_rate": 0.00023141176470588233, "loss": 0.0397, "step": 1668 }, { "epoch": 14.9, "grad_norm": 0.06646037101745605, "learning_rate": 0.00023135294117647054, "loss": 0.0363, "step": 1669 }, { "epoch": 14.91, "grad_norm": 0.0664471909403801, "learning_rate": 0.00023129411764705881, "loss": 0.0381, "step": 1670 }, { "epoch": 14.92, "grad_norm": 0.07188865542411804, "learning_rate": 0.00023123529411764703, "loss": 0.0356, "step": 1671 }, { "epoch": 14.93, "grad_norm": 0.06646988540887833, "learning_rate": 0.00023117647058823527, "loss": 0.037, "step": 1672 }, { "epoch": 14.94, "grad_norm": 0.07141093164682388, "learning_rate": 0.00023111764705882352, "loss": 0.0335, "step": 1673 }, { "epoch": 14.95, "grad_norm": 0.09563656896352768, "learning_rate": 0.00023105882352941173, "loss": 0.0436, "step": 1674 }, { "epoch": 14.96, "grad_norm": 0.11324489116668701, "learning_rate": 0.00023099999999999998, "loss": 0.0485, "step": 1675 }, { "epoch": 14.96, "grad_norm": 0.07574322819709778, "learning_rate": 0.00023094117647058822, "loss": 0.0392, "step": 1676 }, { "epoch": 14.97, "grad_norm": 0.06677575409412384, "learning_rate": 0.00023088235294117646, "loss": 0.0346, "step": 1677 }, { "epoch": 14.98, "grad_norm": 0.07006186246871948, "learning_rate": 0.00023082352941176468, "loss": 0.033, "step": 1678 }, { "epoch": 14.99, "grad_norm": 0.08206184208393097, "learning_rate": 0.0002307647058823529, "loss": 0.0361, "step": 1679 }, { "epoch": 15.0, "grad_norm": 0.10202016681432724, "learning_rate": 0.00023070588235294117, "loss": 0.0422, "step": 1680 }, { "epoch": 15.01, "grad_norm": 0.06775103509426117, "learning_rate": 0.00023064705882352938, "loss": 0.0414, "step": 1681 }, { "epoch": 15.02, "grad_norm": 0.059331733733415604, "learning_rate": 0.00023058823529411763, "loss": 0.0315, "step": 1682 }, { "epoch": 15.03, "grad_norm": 0.06496758759021759, "learning_rate": 0.00023052941176470587, "loss": 0.0319, "step": 1683 }, { "epoch": 15.04, "grad_norm": 0.06576515734195709, "learning_rate": 0.0002304705882352941, "loss": 0.0325, "step": 1684 }, { "epoch": 15.04, "grad_norm": 0.07281138747930527, "learning_rate": 0.00023041176470588233, "loss": 0.0342, "step": 1685 }, { "epoch": 15.05, "grad_norm": 0.10825911909341812, "learning_rate": 0.00023035294117647057, "loss": 0.0447, "step": 1686 }, { "epoch": 15.06, "grad_norm": 0.11847017705440521, "learning_rate": 0.00023029411764705882, "loss": 0.0387, "step": 1687 }, { "epoch": 15.07, "grad_norm": 0.06485091149806976, "learning_rate": 0.00023023529411764703, "loss": 0.0318, "step": 1688 }, { "epoch": 15.08, "grad_norm": 0.0644741952419281, "learning_rate": 0.00023017647058823525, "loss": 0.0358, "step": 1689 }, { "epoch": 15.09, "grad_norm": 0.06077628210186958, "learning_rate": 0.00023011764705882352, "loss": 0.0286, "step": 1690 }, { "epoch": 15.1, "grad_norm": 0.06924694031476974, "learning_rate": 0.00023005882352941174, "loss": 0.0362, "step": 1691 }, { "epoch": 15.11, "grad_norm": 0.0772029235959053, "learning_rate": 0.00023, "loss": 0.0324, "step": 1692 }, { "epoch": 15.12, "grad_norm": 0.10378186404705048, "learning_rate": 0.00022994117647058822, "loss": 0.0486, "step": 1693 }, { "epoch": 15.12, "grad_norm": 0.0626181811094284, "learning_rate": 0.00022988235294117644, "loss": 0.0342, "step": 1694 }, { "epoch": 15.13, "grad_norm": 0.06330876797437668, "learning_rate": 0.00022982352941176468, "loss": 0.034, "step": 1695 }, { "epoch": 15.14, "grad_norm": 0.059350576251745224, "learning_rate": 0.00022976470588235293, "loss": 0.0304, "step": 1696 }, { "epoch": 15.15, "grad_norm": 0.06034482270479202, "learning_rate": 0.00022970588235294117, "loss": 0.0312, "step": 1697 }, { "epoch": 15.16, "grad_norm": 0.07836218178272247, "learning_rate": 0.00022964705882352939, "loss": 0.0391, "step": 1698 }, { "epoch": 15.17, "grad_norm": 0.12190468609333038, "learning_rate": 0.00022958823529411763, "loss": 0.0511, "step": 1699 }, { "epoch": 15.18, "grad_norm": 0.06269179284572601, "learning_rate": 0.00022952941176470587, "loss": 0.0338, "step": 1700 }, { "epoch": 15.18, "eval_cer": 0.03474610351294569, "eval_loss": 0.1889059841632843, "eval_runtime": 22.7906, "eval_samples_per_second": 115.925, "eval_steps_per_second": 1.843, "eval_wer": 0.11771122570408568, "step": 1700 }, { "epoch": 15.19, "grad_norm": 0.06443001329898834, "learning_rate": 0.0002294705882352941, "loss": 0.0341, "step": 1701 }, { "epoch": 15.2, "grad_norm": 0.06785762310028076, "learning_rate": 0.0002294117647058823, "loss": 0.0317, "step": 1702 }, { "epoch": 15.21, "grad_norm": 0.0721161812543869, "learning_rate": 0.00022935294117647058, "loss": 0.0297, "step": 1703 }, { "epoch": 15.21, "grad_norm": 0.08249220252037048, "learning_rate": 0.0002292941176470588, "loss": 0.0326, "step": 1704 }, { "epoch": 15.22, "grad_norm": 0.1352924406528473, "learning_rate": 0.00022923529411764706, "loss": 0.053, "step": 1705 }, { "epoch": 15.23, "grad_norm": 0.06891375035047531, "learning_rate": 0.00022917647058823528, "loss": 0.0354, "step": 1706 }, { "epoch": 15.24, "grad_norm": 0.06851013004779816, "learning_rate": 0.0002291176470588235, "loss": 0.0357, "step": 1707 }, { "epoch": 15.25, "grad_norm": 0.06332221627235413, "learning_rate": 0.00022905882352941174, "loss": 0.032, "step": 1708 }, { "epoch": 15.26, "grad_norm": 0.06636039167642593, "learning_rate": 0.00022899999999999998, "loss": 0.0337, "step": 1709 }, { "epoch": 15.27, "grad_norm": 0.0747261643409729, "learning_rate": 0.00022894117647058823, "loss": 0.0349, "step": 1710 }, { "epoch": 15.28, "grad_norm": 0.09190411120653152, "learning_rate": 0.00022888235294117644, "loss": 0.036, "step": 1711 }, { "epoch": 15.29, "grad_norm": 0.11545003950595856, "learning_rate": 0.00022882352941176466, "loss": 0.0504, "step": 1712 }, { "epoch": 15.29, "grad_norm": 0.06128847226500511, "learning_rate": 0.00022876470588235293, "loss": 0.0334, "step": 1713 }, { "epoch": 15.3, "grad_norm": 0.06289771944284439, "learning_rate": 0.00022870588235294115, "loss": 0.0327, "step": 1714 }, { "epoch": 15.31, "grad_norm": 0.06811051070690155, "learning_rate": 0.00022864705882352942, "loss": 0.0327, "step": 1715 }, { "epoch": 15.32, "grad_norm": 0.07467852532863617, "learning_rate": 0.00022858823529411763, "loss": 0.0316, "step": 1716 }, { "epoch": 15.33, "grad_norm": 0.07979732006788254, "learning_rate": 0.00022852941176470585, "loss": 0.0342, "step": 1717 }, { "epoch": 15.34, "grad_norm": 0.11657968163490295, "learning_rate": 0.0002284705882352941, "loss": 0.043, "step": 1718 }, { "epoch": 15.35, "grad_norm": 0.06508392840623856, "learning_rate": 0.00022841176470588234, "loss": 0.0334, "step": 1719 }, { "epoch": 15.36, "grad_norm": 0.07240886241197586, "learning_rate": 0.00022835294117647058, "loss": 0.0364, "step": 1720 }, { "epoch": 15.37, "grad_norm": 0.07049129158258438, "learning_rate": 0.0002282941176470588, "loss": 0.0333, "step": 1721 }, { "epoch": 15.38, "grad_norm": 0.07092287391424179, "learning_rate": 0.00022823529411764704, "loss": 0.0347, "step": 1722 }, { "epoch": 15.38, "grad_norm": 0.0840243473649025, "learning_rate": 0.00022817647058823528, "loss": 0.0387, "step": 1723 }, { "epoch": 15.39, "grad_norm": 0.1114409863948822, "learning_rate": 0.0002281176470588235, "loss": 0.0451, "step": 1724 }, { "epoch": 15.4, "grad_norm": 0.06822095811367035, "learning_rate": 0.00022805882352941177, "loss": 0.0366, "step": 1725 }, { "epoch": 15.41, "grad_norm": 0.06745601445436478, "learning_rate": 0.00022799999999999999, "loss": 0.0322, "step": 1726 }, { "epoch": 15.42, "grad_norm": 0.06213675066828728, "learning_rate": 0.0002279411764705882, "loss": 0.0301, "step": 1727 }, { "epoch": 15.43, "grad_norm": 0.06839065253734589, "learning_rate": 0.00022788235294117647, "loss": 0.0324, "step": 1728 }, { "epoch": 15.44, "grad_norm": 0.07828672975301743, "learning_rate": 0.0002278235294117647, "loss": 0.0367, "step": 1729 }, { "epoch": 15.45, "grad_norm": 0.11986172199249268, "learning_rate": 0.0002277647058823529, "loss": 0.044, "step": 1730 }, { "epoch": 15.46, "grad_norm": 0.06630124896764755, "learning_rate": 0.00022770588235294115, "loss": 0.0362, "step": 1731 }, { "epoch": 15.46, "grad_norm": 0.07336899638175964, "learning_rate": 0.0002276470588235294, "loss": 0.0379, "step": 1732 }, { "epoch": 15.47, "grad_norm": 0.0715327188372612, "learning_rate": 0.00022758823529411764, "loss": 0.0394, "step": 1733 }, { "epoch": 15.48, "grad_norm": 0.07718680799007416, "learning_rate": 0.00022752941176470585, "loss": 0.0346, "step": 1734 }, { "epoch": 15.49, "grad_norm": 0.07004740089178085, "learning_rate": 0.0002274705882352941, "loss": 0.0297, "step": 1735 }, { "epoch": 15.5, "grad_norm": 0.08288314938545227, "learning_rate": 0.00022741176470588234, "loss": 0.0292, "step": 1736 }, { "epoch": 15.51, "grad_norm": 0.08451540023088455, "learning_rate": 0.00022735294117647055, "loss": 0.0353, "step": 1737 }, { "epoch": 15.52, "grad_norm": 0.06629110872745514, "learning_rate": 0.00022729411764705883, "loss": 0.0311, "step": 1738 }, { "epoch": 15.53, "grad_norm": 0.06104528158903122, "learning_rate": 0.00022723529411764704, "loss": 0.0318, "step": 1739 }, { "epoch": 15.54, "grad_norm": 0.07883122563362122, "learning_rate": 0.00022717647058823526, "loss": 0.0357, "step": 1740 }, { "epoch": 15.54, "grad_norm": 0.06872067600488663, "learning_rate": 0.00022711764705882353, "loss": 0.0367, "step": 1741 }, { "epoch": 15.55, "grad_norm": 0.08901038765907288, "learning_rate": 0.00022705882352941174, "loss": 0.0344, "step": 1742 }, { "epoch": 15.56, "grad_norm": 0.14025089144706726, "learning_rate": 0.000227, "loss": 0.048, "step": 1743 }, { "epoch": 15.57, "grad_norm": 0.06312990188598633, "learning_rate": 0.0002269411764705882, "loss": 0.0316, "step": 1744 }, { "epoch": 15.58, "grad_norm": 0.06658343970775604, "learning_rate": 0.00022688235294117645, "loss": 0.0355, "step": 1745 }, { "epoch": 15.59, "grad_norm": 0.07009308785200119, "learning_rate": 0.0002268235294117647, "loss": 0.0325, "step": 1746 }, { "epoch": 15.6, "grad_norm": 0.06212510168552399, "learning_rate": 0.0002267647058823529, "loss": 0.0336, "step": 1747 }, { "epoch": 15.61, "grad_norm": 0.08150997757911682, "learning_rate": 0.00022670588235294118, "loss": 0.0371, "step": 1748 }, { "epoch": 15.62, "grad_norm": 0.10127957910299301, "learning_rate": 0.0002266470588235294, "loss": 0.0421, "step": 1749 }, { "epoch": 15.62, "grad_norm": 0.06718476861715317, "learning_rate": 0.0002265882352941176, "loss": 0.0366, "step": 1750 }, { "epoch": 15.63, "grad_norm": 0.0708250179886818, "learning_rate": 0.00022652941176470588, "loss": 0.0364, "step": 1751 }, { "epoch": 15.64, "grad_norm": 0.060382526367902756, "learning_rate": 0.0002264705882352941, "loss": 0.0319, "step": 1752 }, { "epoch": 15.65, "grad_norm": 0.07351882755756378, "learning_rate": 0.00022641176470588234, "loss": 0.0334, "step": 1753 }, { "epoch": 15.66, "grad_norm": 0.08200761675834656, "learning_rate": 0.00022635294117647056, "loss": 0.0326, "step": 1754 }, { "epoch": 15.67, "grad_norm": 0.13429179787635803, "learning_rate": 0.0002262941176470588, "loss": 0.0466, "step": 1755 }, { "epoch": 15.68, "grad_norm": 0.06309697031974792, "learning_rate": 0.00022623529411764704, "loss": 0.033, "step": 1756 }, { "epoch": 15.69, "grad_norm": 0.0706702172756195, "learning_rate": 0.00022617647058823526, "loss": 0.0344, "step": 1757 }, { "epoch": 15.7, "grad_norm": 0.07139620929956436, "learning_rate": 0.00022611764705882353, "loss": 0.0353, "step": 1758 }, { "epoch": 15.71, "grad_norm": 0.06745688617229462, "learning_rate": 0.00022605882352941175, "loss": 0.0311, "step": 1759 }, { "epoch": 15.71, "grad_norm": 0.08064214885234833, "learning_rate": 0.00022599999999999996, "loss": 0.034, "step": 1760 }, { "epoch": 15.72, "grad_norm": 0.10125376284122467, "learning_rate": 0.00022594117647058823, "loss": 0.0384, "step": 1761 }, { "epoch": 15.73, "grad_norm": 0.13965125381946564, "learning_rate": 0.00022588235294117645, "loss": 0.0414, "step": 1762 }, { "epoch": 15.74, "grad_norm": 0.06126793846487999, "learning_rate": 0.00022582352941176467, "loss": 0.0335, "step": 1763 }, { "epoch": 15.75, "grad_norm": 0.06566361337900162, "learning_rate": 0.00022576470588235294, "loss": 0.0339, "step": 1764 }, { "epoch": 15.76, "grad_norm": 0.0652477964758873, "learning_rate": 0.00022570588235294115, "loss": 0.0319, "step": 1765 }, { "epoch": 15.77, "grad_norm": 0.07817530632019043, "learning_rate": 0.0002256470588235294, "loss": 0.0336, "step": 1766 }, { "epoch": 15.78, "grad_norm": 0.09044196456670761, "learning_rate": 0.00022558823529411761, "loss": 0.0346, "step": 1767 }, { "epoch": 15.79, "grad_norm": 0.12936484813690186, "learning_rate": 0.00022552941176470586, "loss": 0.0455, "step": 1768 }, { "epoch": 15.79, "grad_norm": 0.06324309855699539, "learning_rate": 0.0002254705882352941, "loss": 0.0333, "step": 1769 }, { "epoch": 15.8, "grad_norm": 0.07502604275941849, "learning_rate": 0.00022541176470588232, "loss": 0.033, "step": 1770 }, { "epoch": 15.81, "grad_norm": 0.07541787624359131, "learning_rate": 0.0002253529411764706, "loss": 0.0348, "step": 1771 }, { "epoch": 15.82, "grad_norm": 0.06684520840644836, "learning_rate": 0.0002252941176470588, "loss": 0.0362, "step": 1772 }, { "epoch": 15.83, "grad_norm": 0.07394609600305557, "learning_rate": 0.00022523529411764702, "loss": 0.0328, "step": 1773 }, { "epoch": 15.84, "grad_norm": 0.14768072962760925, "learning_rate": 0.0002251764705882353, "loss": 0.056, "step": 1774 }, { "epoch": 15.85, "grad_norm": 0.06404601037502289, "learning_rate": 0.0002251176470588235, "loss": 0.0345, "step": 1775 }, { "epoch": 15.86, "grad_norm": 0.06520042568445206, "learning_rate": 0.00022505882352941175, "loss": 0.0322, "step": 1776 }, { "epoch": 15.87, "grad_norm": 0.07000089436769485, "learning_rate": 0.000225, "loss": 0.0339, "step": 1777 }, { "epoch": 15.88, "grad_norm": 0.07399477809667587, "learning_rate": 0.0002249411764705882, "loss": 0.0353, "step": 1778 }, { "epoch": 15.88, "grad_norm": 0.07349999994039536, "learning_rate": 0.00022488235294117645, "loss": 0.0355, "step": 1779 }, { "epoch": 15.89, "grad_norm": 0.12206725031137466, "learning_rate": 0.00022482352941176467, "loss": 0.0452, "step": 1780 }, { "epoch": 15.9, "grad_norm": 0.0694836750626564, "learning_rate": 0.00022476470588235294, "loss": 0.0354, "step": 1781 }, { "epoch": 15.91, "grad_norm": 0.06848176568746567, "learning_rate": 0.00022470588235294116, "loss": 0.0336, "step": 1782 }, { "epoch": 15.92, "grad_norm": 0.0721769779920578, "learning_rate": 0.00022464705882352937, "loss": 0.0336, "step": 1783 }, { "epoch": 15.93, "grad_norm": 0.07481008768081665, "learning_rate": 0.00022458823529411764, "loss": 0.0362, "step": 1784 }, { "epoch": 15.94, "grad_norm": 0.06670886278152466, "learning_rate": 0.00022452941176470586, "loss": 0.0306, "step": 1785 }, { "epoch": 15.95, "grad_norm": 0.10526525974273682, "learning_rate": 0.0002244705882352941, "loss": 0.0347, "step": 1786 }, { "epoch": 15.96, "grad_norm": 0.09913452714681625, "learning_rate": 0.00022441176470588235, "loss": 0.0457, "step": 1787 }, { "epoch": 15.96, "grad_norm": 0.07062456011772156, "learning_rate": 0.00022435294117647056, "loss": 0.0342, "step": 1788 }, { "epoch": 15.97, "grad_norm": 0.07000609487295151, "learning_rate": 0.0002242941176470588, "loss": 0.033, "step": 1789 }, { "epoch": 15.98, "grad_norm": 0.07293125241994858, "learning_rate": 0.00022423529411764702, "loss": 0.033, "step": 1790 }, { "epoch": 15.99, "grad_norm": 0.08094222098588943, "learning_rate": 0.0002241764705882353, "loss": 0.0377, "step": 1791 }, { "epoch": 16.0, "grad_norm": 0.09451135993003845, "learning_rate": 0.0002241176470588235, "loss": 0.0416, "step": 1792 }, { "epoch": 16.01, "grad_norm": 0.0639321580529213, "learning_rate": 0.00022405882352941173, "loss": 0.0316, "step": 1793 }, { "epoch": 16.02, "grad_norm": 0.06530879437923431, "learning_rate": 0.000224, "loss": 0.0307, "step": 1794 }, { "epoch": 16.03, "grad_norm": 0.058440230786800385, "learning_rate": 0.0002239411764705882, "loss": 0.0277, "step": 1795 }, { "epoch": 16.04, "grad_norm": 0.08349408954381943, "learning_rate": 0.00022388235294117643, "loss": 0.0305, "step": 1796 }, { "epoch": 16.04, "grad_norm": 0.08139226585626602, "learning_rate": 0.0002238235294117647, "loss": 0.0345, "step": 1797 }, { "epoch": 16.05, "grad_norm": 0.1024152860045433, "learning_rate": 0.00022376470588235292, "loss": 0.0389, "step": 1798 }, { "epoch": 16.06, "grad_norm": 0.10898366570472717, "learning_rate": 0.00022370588235294116, "loss": 0.0399, "step": 1799 }, { "epoch": 16.07, "grad_norm": 0.06472934037446976, "learning_rate": 0.0002236470588235294, "loss": 0.0334, "step": 1800 }, { "epoch": 16.07, "eval_cer": 0.035982606277224674, "eval_loss": 0.19003820419311523, "eval_runtime": 22.2377, "eval_samples_per_second": 118.807, "eval_steps_per_second": 1.889, "eval_wer": 0.11878222927409758, "step": 1800 }, { "epoch": 16.08, "grad_norm": 0.05682501941919327, "learning_rate": 0.00022358823529411762, "loss": 0.0294, "step": 1801 }, { "epoch": 16.09, "grad_norm": 0.06381959468126297, "learning_rate": 0.00022352941176470586, "loss": 0.0325, "step": 1802 }, { "epoch": 16.1, "grad_norm": 0.07066192477941513, "learning_rate": 0.00022347058823529408, "loss": 0.0312, "step": 1803 }, { "epoch": 16.11, "grad_norm": 0.07962921261787415, "learning_rate": 0.00022341176470588235, "loss": 0.033, "step": 1804 }, { "epoch": 16.12, "grad_norm": 0.12375257909297943, "learning_rate": 0.00022335294117647057, "loss": 0.0473, "step": 1805 }, { "epoch": 16.12, "grad_norm": 0.06608864665031433, "learning_rate": 0.00022329411764705878, "loss": 0.0314, "step": 1806 }, { "epoch": 16.13, "grad_norm": 0.06043463572859764, "learning_rate": 0.00022323529411764705, "loss": 0.0268, "step": 1807 }, { "epoch": 16.14, "grad_norm": 0.06614497303962708, "learning_rate": 0.00022317647058823527, "loss": 0.0318, "step": 1808 }, { "epoch": 16.15, "grad_norm": 0.06933463364839554, "learning_rate": 0.0002231176470588235, "loss": 0.0318, "step": 1809 }, { "epoch": 16.16, "grad_norm": 0.07098463922739029, "learning_rate": 0.00022305882352941176, "loss": 0.0277, "step": 1810 }, { "epoch": 16.17, "grad_norm": 0.13402563333511353, "learning_rate": 0.00022299999999999997, "loss": 0.0474, "step": 1811 }, { "epoch": 16.18, "grad_norm": 0.06053219735622406, "learning_rate": 0.00022294117647058822, "loss": 0.0311, "step": 1812 }, { "epoch": 16.19, "grad_norm": 0.06736651808023453, "learning_rate": 0.00022288235294117643, "loss": 0.0298, "step": 1813 }, { "epoch": 16.2, "grad_norm": 0.06711181253194809, "learning_rate": 0.0002228235294117647, "loss": 0.0284, "step": 1814 }, { "epoch": 16.21, "grad_norm": 0.07631996273994446, "learning_rate": 0.00022276470588235292, "loss": 0.0356, "step": 1815 }, { "epoch": 16.21, "grad_norm": 0.06564201414585114, "learning_rate": 0.00022270588235294114, "loss": 0.0341, "step": 1816 }, { "epoch": 16.22, "grad_norm": 0.11746109277009964, "learning_rate": 0.0002226470588235294, "loss": 0.0473, "step": 1817 }, { "epoch": 16.23, "grad_norm": 0.06227483972907066, "learning_rate": 0.00022258823529411762, "loss": 0.034, "step": 1818 }, { "epoch": 16.24, "grad_norm": 0.06249179318547249, "learning_rate": 0.00022252941176470587, "loss": 0.0332, "step": 1819 }, { "epoch": 16.25, "grad_norm": 0.05764083191752434, "learning_rate": 0.0002224705882352941, "loss": 0.0282, "step": 1820 }, { "epoch": 16.26, "grad_norm": 0.057898227125406265, "learning_rate": 0.00022241176470588233, "loss": 0.0273, "step": 1821 }, { "epoch": 16.27, "grad_norm": 0.07494675368070602, "learning_rate": 0.00022235294117647057, "loss": 0.0317, "step": 1822 }, { "epoch": 16.28, "grad_norm": 0.09450692683458328, "learning_rate": 0.0002222941176470588, "loss": 0.0351, "step": 1823 }, { "epoch": 16.29, "grad_norm": 0.1166757121682167, "learning_rate": 0.00022223529411764706, "loss": 0.0378, "step": 1824 }, { "epoch": 16.29, "grad_norm": 0.06373901665210724, "learning_rate": 0.00022217647058823527, "loss": 0.0367, "step": 1825 }, { "epoch": 16.3, "grad_norm": 0.058000992983579636, "learning_rate": 0.0002221176470588235, "loss": 0.0297, "step": 1826 }, { "epoch": 16.31, "grad_norm": 0.07125075161457062, "learning_rate": 0.00022205882352941176, "loss": 0.0366, "step": 1827 }, { "epoch": 16.32, "grad_norm": 0.07212217152118683, "learning_rate": 0.00022199999999999998, "loss": 0.0317, "step": 1828 }, { "epoch": 16.33, "grad_norm": 0.08258946239948273, "learning_rate": 0.0002219411764705882, "loss": 0.0325, "step": 1829 }, { "epoch": 16.34, "grad_norm": 0.11259468644857407, "learning_rate": 0.00022188235294117646, "loss": 0.0407, "step": 1830 }, { "epoch": 16.35, "grad_norm": 0.05695272982120514, "learning_rate": 0.00022182352941176468, "loss": 0.0334, "step": 1831 }, { "epoch": 16.36, "grad_norm": 0.05781324952840805, "learning_rate": 0.00022176470588235292, "loss": 0.035, "step": 1832 }, { "epoch": 16.37, "grad_norm": 0.07321536540985107, "learning_rate": 0.00022170588235294117, "loss": 0.0376, "step": 1833 }, { "epoch": 16.38, "grad_norm": 0.06028003245592117, "learning_rate": 0.00022164705882352938, "loss": 0.0263, "step": 1834 }, { "epoch": 16.38, "grad_norm": 0.07853641360998154, "learning_rate": 0.00022158823529411762, "loss": 0.0323, "step": 1835 }, { "epoch": 16.39, "grad_norm": 0.12827949225902557, "learning_rate": 0.00022152941176470587, "loss": 0.045, "step": 1836 }, { "epoch": 16.4, "grad_norm": 0.05804058164358139, "learning_rate": 0.0002214705882352941, "loss": 0.0291, "step": 1837 }, { "epoch": 16.41, "grad_norm": 0.06798772513866425, "learning_rate": 0.00022141176470588233, "loss": 0.0359, "step": 1838 }, { "epoch": 16.42, "grad_norm": 0.07073916494846344, "learning_rate": 0.00022135294117647054, "loss": 0.0343, "step": 1839 }, { "epoch": 16.43, "grad_norm": 0.06816134601831436, "learning_rate": 0.00022129411764705881, "loss": 0.0334, "step": 1840 }, { "epoch": 16.44, "grad_norm": 0.0676409974694252, "learning_rate": 0.00022123529411764703, "loss": 0.0316, "step": 1841 }, { "epoch": 16.45, "grad_norm": 0.17641691863536835, "learning_rate": 0.0002211764705882353, "loss": 0.0475, "step": 1842 }, { "epoch": 16.46, "grad_norm": 0.07010889798402786, "learning_rate": 0.00022111764705882352, "loss": 0.0329, "step": 1843 }, { "epoch": 16.46, "grad_norm": 0.06269314140081406, "learning_rate": 0.00022105882352941173, "loss": 0.0323, "step": 1844 }, { "epoch": 16.47, "grad_norm": 0.06245392560958862, "learning_rate": 0.00022099999999999998, "loss": 0.0346, "step": 1845 }, { "epoch": 16.48, "grad_norm": 0.06329552084207535, "learning_rate": 0.00022094117647058822, "loss": 0.0295, "step": 1846 }, { "epoch": 16.49, "grad_norm": 0.07149852812290192, "learning_rate": 0.00022088235294117646, "loss": 0.033, "step": 1847 }, { "epoch": 16.5, "grad_norm": 0.1021699607372284, "learning_rate": 0.00022082352941176468, "loss": 0.0383, "step": 1848 }, { "epoch": 16.51, "grad_norm": 0.08884425461292267, "learning_rate": 0.0002207647058823529, "loss": 0.0373, "step": 1849 }, { "epoch": 16.52, "grad_norm": 0.06852715462446213, "learning_rate": 0.00022070588235294117, "loss": 0.0335, "step": 1850 }, { "epoch": 16.53, "grad_norm": 0.06676394492387772, "learning_rate": 0.00022064705882352938, "loss": 0.0345, "step": 1851 }, { "epoch": 16.54, "grad_norm": 0.06098376214504242, "learning_rate": 0.00022058823529411765, "loss": 0.0304, "step": 1852 }, { "epoch": 16.54, "grad_norm": 0.06835677474737167, "learning_rate": 0.00022052941176470587, "loss": 0.0301, "step": 1853 }, { "epoch": 16.55, "grad_norm": 0.07412328571081161, "learning_rate": 0.0002204705882352941, "loss": 0.0339, "step": 1854 }, { "epoch": 16.56, "grad_norm": 0.1628560721874237, "learning_rate": 0.00022041176470588233, "loss": 0.0498, "step": 1855 }, { "epoch": 16.57, "grad_norm": 0.07015007734298706, "learning_rate": 0.00022035294117647057, "loss": 0.0363, "step": 1856 }, { "epoch": 16.58, "grad_norm": 0.05822065472602844, "learning_rate": 0.0002202941176470588, "loss": 0.0312, "step": 1857 }, { "epoch": 16.59, "grad_norm": 0.06740330904722214, "learning_rate": 0.00022023529411764703, "loss": 0.0381, "step": 1858 }, { "epoch": 16.6, "grad_norm": 0.06438733637332916, "learning_rate": 0.00022017647058823528, "loss": 0.0305, "step": 1859 }, { "epoch": 16.61, "grad_norm": 0.07315194606781006, "learning_rate": 0.00022011764705882352, "loss": 0.0314, "step": 1860 }, { "epoch": 16.62, "grad_norm": 0.13501623272895813, "learning_rate": 0.00022005882352941174, "loss": 0.0506, "step": 1861 }, { "epoch": 16.62, "grad_norm": 0.0613023079931736, "learning_rate": 0.00021999999999999995, "loss": 0.0321, "step": 1862 }, { "epoch": 16.63, "grad_norm": 0.06343629956245422, "learning_rate": 0.00021994117647058822, "loss": 0.0318, "step": 1863 }, { "epoch": 16.64, "grad_norm": 0.061639804393053055, "learning_rate": 0.00021988235294117644, "loss": 0.0339, "step": 1864 }, { "epoch": 16.65, "grad_norm": 0.06862615793943405, "learning_rate": 0.0002198235294117647, "loss": 0.0346, "step": 1865 }, { "epoch": 16.66, "grad_norm": 0.06971698999404907, "learning_rate": 0.00021976470588235293, "loss": 0.0347, "step": 1866 }, { "epoch": 16.67, "grad_norm": 0.13029135763645172, "learning_rate": 0.00021970588235294114, "loss": 0.0438, "step": 1867 }, { "epoch": 16.68, "grad_norm": 0.06730790436267853, "learning_rate": 0.0002196470588235294, "loss": 0.0307, "step": 1868 }, { "epoch": 16.69, "grad_norm": 0.06171699985861778, "learning_rate": 0.00021958823529411763, "loss": 0.0326, "step": 1869 }, { "epoch": 16.7, "grad_norm": 0.06088051572442055, "learning_rate": 0.00021952941176470587, "loss": 0.0306, "step": 1870 }, { "epoch": 16.71, "grad_norm": 0.06121975556015968, "learning_rate": 0.0002194705882352941, "loss": 0.0289, "step": 1871 }, { "epoch": 16.71, "grad_norm": 0.07202765345573425, "learning_rate": 0.00021941176470588233, "loss": 0.0348, "step": 1872 }, { "epoch": 16.72, "grad_norm": 0.09348710626363754, "learning_rate": 0.00021935294117647058, "loss": 0.0331, "step": 1873 }, { "epoch": 16.73, "grad_norm": 0.10430654883384705, "learning_rate": 0.0002192941176470588, "loss": 0.0424, "step": 1874 }, { "epoch": 16.74, "grad_norm": 0.06490690261125565, "learning_rate": 0.00021923529411764706, "loss": 0.0302, "step": 1875 }, { "epoch": 16.75, "grad_norm": 0.059396229684352875, "learning_rate": 0.00021917647058823528, "loss": 0.0298, "step": 1876 }, { "epoch": 16.76, "grad_norm": 0.061630092561244965, "learning_rate": 0.0002191176470588235, "loss": 0.0279, "step": 1877 }, { "epoch": 16.77, "grad_norm": 0.06576143205165863, "learning_rate": 0.00021905882352941174, "loss": 0.029, "step": 1878 }, { "epoch": 16.78, "grad_norm": 0.08609016239643097, "learning_rate": 0.00021899999999999998, "loss": 0.0292, "step": 1879 }, { "epoch": 16.79, "grad_norm": 0.1195787563920021, "learning_rate": 0.00021894117647058823, "loss": 0.0422, "step": 1880 }, { "epoch": 16.79, "grad_norm": 0.06603191047906876, "learning_rate": 0.00021888235294117644, "loss": 0.0322, "step": 1881 }, { "epoch": 16.8, "grad_norm": 0.07025889307260513, "learning_rate": 0.0002188235294117647, "loss": 0.0341, "step": 1882 }, { "epoch": 16.81, "grad_norm": 0.06338518857955933, "learning_rate": 0.00021876470588235293, "loss": 0.0307, "step": 1883 }, { "epoch": 16.82, "grad_norm": 0.06663335859775543, "learning_rate": 0.00021870588235294115, "loss": 0.0313, "step": 1884 }, { "epoch": 16.83, "grad_norm": 0.07321462780237198, "learning_rate": 0.00021864705882352942, "loss": 0.0313, "step": 1885 }, { "epoch": 16.84, "grad_norm": 0.11784330010414124, "learning_rate": 0.00021858823529411763, "loss": 0.0417, "step": 1886 }, { "epoch": 16.85, "grad_norm": 0.07092329859733582, "learning_rate": 0.00021852941176470585, "loss": 0.0345, "step": 1887 }, { "epoch": 16.86, "grad_norm": 0.07185757905244827, "learning_rate": 0.00021847058823529412, "loss": 0.0355, "step": 1888 }, { "epoch": 16.87, "grad_norm": 0.05557725206017494, "learning_rate": 0.00021841176470588234, "loss": 0.0278, "step": 1889 }, { "epoch": 16.88, "grad_norm": 0.07115907222032547, "learning_rate": 0.00021835294117647055, "loss": 0.0378, "step": 1890 }, { "epoch": 16.88, "grad_norm": 0.06429091095924377, "learning_rate": 0.0002182941176470588, "loss": 0.0304, "step": 1891 }, { "epoch": 16.89, "grad_norm": 0.10283501446247101, "learning_rate": 0.00021823529411764704, "loss": 0.0436, "step": 1892 }, { "epoch": 16.9, "grad_norm": 0.06136798858642578, "learning_rate": 0.00021817647058823528, "loss": 0.0357, "step": 1893 }, { "epoch": 16.91, "grad_norm": 0.05844808369874954, "learning_rate": 0.0002181176470588235, "loss": 0.0299, "step": 1894 }, { "epoch": 16.92, "grad_norm": 0.07017340511083603, "learning_rate": 0.00021805882352941174, "loss": 0.0343, "step": 1895 }, { "epoch": 16.93, "grad_norm": 0.07959800958633423, "learning_rate": 0.00021799999999999999, "loss": 0.0317, "step": 1896 }, { "epoch": 16.94, "grad_norm": 0.06927252560853958, "learning_rate": 0.0002179411764705882, "loss": 0.0307, "step": 1897 }, { "epoch": 16.95, "grad_norm": 0.07652056962251663, "learning_rate": 0.00021788235294117647, "loss": 0.0279, "step": 1898 }, { "epoch": 16.96, "grad_norm": 0.08846669644117355, "learning_rate": 0.0002178235294117647, "loss": 0.039, "step": 1899 }, { "epoch": 16.96, "grad_norm": 0.062216632068157196, "learning_rate": 0.0002177647058823529, "loss": 0.0315, "step": 1900 }, { "epoch": 16.96, "eval_cer": 0.034584493729164556, "eval_loss": 0.19013793766498566, "eval_runtime": 22.6106, "eval_samples_per_second": 116.848, "eval_steps_per_second": 1.858, "eval_wer": 0.11576755255850853, "step": 1900 }, { "epoch": 16.97, "grad_norm": 0.06767335534095764, "learning_rate": 0.00021770588235294118, "loss": 0.0341, "step": 1901 }, { "epoch": 16.98, "grad_norm": 0.06550324708223343, "learning_rate": 0.0002176470588235294, "loss": 0.0291, "step": 1902 }, { "epoch": 16.99, "grad_norm": 0.08068516105413437, "learning_rate": 0.00021758823529411764, "loss": 0.0304, "step": 1903 }, { "epoch": 17.0, "grad_norm": 0.08599540591239929, "learning_rate": 0.00021752941176470585, "loss": 0.0407, "step": 1904 }, { "epoch": 17.01, "grad_norm": 0.0673895999789238, "learning_rate": 0.0002174705882352941, "loss": 0.0321, "step": 1905 }, { "epoch": 17.02, "grad_norm": 0.06604141741991043, "learning_rate": 0.00021741176470588234, "loss": 0.0345, "step": 1906 }, { "epoch": 17.03, "grad_norm": 0.062167853116989136, "learning_rate": 0.00021735294117647056, "loss": 0.0302, "step": 1907 }, { "epoch": 17.04, "grad_norm": 0.0598611906170845, "learning_rate": 0.00021729411764705883, "loss": 0.0314, "step": 1908 }, { "epoch": 17.04, "grad_norm": 0.06386779248714447, "learning_rate": 0.00021723529411764704, "loss": 0.0278, "step": 1909 }, { "epoch": 17.05, "grad_norm": 0.0866820365190506, "learning_rate": 0.00021717647058823526, "loss": 0.0293, "step": 1910 }, { "epoch": 17.06, "grad_norm": 0.09303127974271774, "learning_rate": 0.00021711764705882353, "loss": 0.0362, "step": 1911 }, { "epoch": 17.07, "grad_norm": 0.060847409069538116, "learning_rate": 0.00021705882352941175, "loss": 0.0329, "step": 1912 }, { "epoch": 17.08, "grad_norm": 0.05897834151983261, "learning_rate": 0.000217, "loss": 0.0247, "step": 1913 }, { "epoch": 17.09, "grad_norm": 0.0682649239897728, "learning_rate": 0.0002169411764705882, "loss": 0.0302, "step": 1914 }, { "epoch": 17.1, "grad_norm": 0.07063417881727219, "learning_rate": 0.00021688235294117645, "loss": 0.0336, "step": 1915 }, { "epoch": 17.11, "grad_norm": 0.09338242560625076, "learning_rate": 0.0002168235294117647, "loss": 0.0362, "step": 1916 }, { "epoch": 17.12, "grad_norm": 0.14794890582561493, "learning_rate": 0.0002167647058823529, "loss": 0.0447, "step": 1917 }, { "epoch": 17.12, "grad_norm": 0.06314072012901306, "learning_rate": 0.00021670588235294118, "loss": 0.0321, "step": 1918 }, { "epoch": 17.13, "grad_norm": 0.061584699898958206, "learning_rate": 0.0002166470588235294, "loss": 0.032, "step": 1919 }, { "epoch": 17.14, "grad_norm": 0.06706103682518005, "learning_rate": 0.0002165882352941176, "loss": 0.03, "step": 1920 }, { "epoch": 17.15, "grad_norm": 0.06886307895183563, "learning_rate": 0.00021652941176470588, "loss": 0.0331, "step": 1921 }, { "epoch": 17.16, "grad_norm": 0.06668057292699814, "learning_rate": 0.0002164705882352941, "loss": 0.0306, "step": 1922 }, { "epoch": 17.17, "grad_norm": 0.1145925298333168, "learning_rate": 0.00021641176470588232, "loss": 0.0411, "step": 1923 }, { "epoch": 17.18, "grad_norm": 0.06388647109270096, "learning_rate": 0.00021635294117647059, "loss": 0.0343, "step": 1924 }, { "epoch": 17.19, "grad_norm": 0.059622038155794144, "learning_rate": 0.0002162941176470588, "loss": 0.0265, "step": 1925 }, { "epoch": 17.2, "grad_norm": 0.07900681346654892, "learning_rate": 0.00021623529411764705, "loss": 0.0359, "step": 1926 }, { "epoch": 17.21, "grad_norm": 0.06877920031547546, "learning_rate": 0.00021617647058823526, "loss": 0.0271, "step": 1927 }, { "epoch": 17.21, "grad_norm": 0.08047014474868774, "learning_rate": 0.0002161176470588235, "loss": 0.0274, "step": 1928 }, { "epoch": 17.22, "grad_norm": 0.1356843262910843, "learning_rate": 0.00021605882352941175, "loss": 0.0462, "step": 1929 }, { "epoch": 17.23, "grad_norm": 0.06882345676422119, "learning_rate": 0.00021599999999999996, "loss": 0.0321, "step": 1930 }, { "epoch": 17.24, "grad_norm": 0.06174418702721596, "learning_rate": 0.00021594117647058824, "loss": 0.0308, "step": 1931 }, { "epoch": 17.25, "grad_norm": 0.06057072803378105, "learning_rate": 0.00021588235294117645, "loss": 0.0329, "step": 1932 }, { "epoch": 17.26, "grad_norm": 0.06363937258720398, "learning_rate": 0.00021582352941176467, "loss": 0.0329, "step": 1933 }, { "epoch": 17.27, "grad_norm": 0.06881600618362427, "learning_rate": 0.00021576470588235294, "loss": 0.0333, "step": 1934 }, { "epoch": 17.28, "grad_norm": 0.07912091165781021, "learning_rate": 0.00021570588235294115, "loss": 0.0361, "step": 1935 }, { "epoch": 17.29, "grad_norm": 0.08154988288879395, "learning_rate": 0.0002156470588235294, "loss": 0.036, "step": 1936 }, { "epoch": 17.29, "grad_norm": 0.06133905425667763, "learning_rate": 0.00021558823529411764, "loss": 0.0329, "step": 1937 }, { "epoch": 17.3, "grad_norm": 0.0610046423971653, "learning_rate": 0.00021552941176470586, "loss": 0.0307, "step": 1938 }, { "epoch": 17.31, "grad_norm": 0.054651498794555664, "learning_rate": 0.0002154705882352941, "loss": 0.0284, "step": 1939 }, { "epoch": 17.32, "grad_norm": 0.06436837464570999, "learning_rate": 0.00021541176470588232, "loss": 0.0329, "step": 1940 }, { "epoch": 17.33, "grad_norm": 0.07338185608386993, "learning_rate": 0.0002153529411764706, "loss": 0.0322, "step": 1941 }, { "epoch": 17.34, "grad_norm": 0.15808679163455963, "learning_rate": 0.0002152941176470588, "loss": 0.0529, "step": 1942 }, { "epoch": 17.35, "grad_norm": 0.0614892840385437, "learning_rate": 0.00021523529411764702, "loss": 0.0334, "step": 1943 }, { "epoch": 17.36, "grad_norm": 0.05659007653594017, "learning_rate": 0.0002151764705882353, "loss": 0.0287, "step": 1944 }, { "epoch": 17.37, "grad_norm": 0.06882838159799576, "learning_rate": 0.0002151176470588235, "loss": 0.0292, "step": 1945 }, { "epoch": 17.38, "grad_norm": 0.06403681635856628, "learning_rate": 0.00021505882352941175, "loss": 0.0338, "step": 1946 }, { "epoch": 17.38, "grad_norm": 0.07217351347208023, "learning_rate": 0.000215, "loss": 0.0263, "step": 1947 }, { "epoch": 17.39, "grad_norm": 0.12260174751281738, "learning_rate": 0.0002149411764705882, "loss": 0.05, "step": 1948 }, { "epoch": 17.4, "grad_norm": 0.06135178729891777, "learning_rate": 0.00021488235294117645, "loss": 0.0327, "step": 1949 }, { "epoch": 17.41, "grad_norm": 0.06472061574459076, "learning_rate": 0.00021482352941176467, "loss": 0.0362, "step": 1950 }, { "epoch": 17.42, "grad_norm": 0.06496337801218033, "learning_rate": 0.00021476470588235294, "loss": 0.0244, "step": 1951 }, { "epoch": 17.43, "grad_norm": 0.06565911322832108, "learning_rate": 0.00021470588235294116, "loss": 0.0305, "step": 1952 }, { "epoch": 17.44, "grad_norm": 0.0685436949133873, "learning_rate": 0.00021464705882352937, "loss": 0.0283, "step": 1953 }, { "epoch": 17.45, "grad_norm": 0.1360541582107544, "learning_rate": 0.00021458823529411764, "loss": 0.0429, "step": 1954 }, { "epoch": 17.46, "grad_norm": 0.060156237334012985, "learning_rate": 0.00021452941176470586, "loss": 0.0355, "step": 1955 }, { "epoch": 17.46, "grad_norm": 0.06217309832572937, "learning_rate": 0.00021447058823529408, "loss": 0.0334, "step": 1956 }, { "epoch": 17.47, "grad_norm": 0.06349260360002518, "learning_rate": 0.00021441176470588235, "loss": 0.0322, "step": 1957 }, { "epoch": 17.48, "grad_norm": 0.06153681501746178, "learning_rate": 0.00021435294117647056, "loss": 0.0285, "step": 1958 }, { "epoch": 17.49, "grad_norm": 0.05936651676893234, "learning_rate": 0.0002142941176470588, "loss": 0.0297, "step": 1959 }, { "epoch": 17.5, "grad_norm": 0.08301527798175812, "learning_rate": 0.00021423529411764705, "loss": 0.0289, "step": 1960 }, { "epoch": 17.51, "grad_norm": 0.1044265478849411, "learning_rate": 0.00021417647058823527, "loss": 0.0403, "step": 1961 }, { "epoch": 17.52, "grad_norm": 0.056587181985378265, "learning_rate": 0.0002141176470588235, "loss": 0.0296, "step": 1962 }, { "epoch": 17.53, "grad_norm": 0.06559578329324722, "learning_rate": 0.00021405882352941173, "loss": 0.0336, "step": 1963 }, { "epoch": 17.54, "grad_norm": 0.060637760907411575, "learning_rate": 0.000214, "loss": 0.033, "step": 1964 }, { "epoch": 17.54, "grad_norm": 0.07044398784637451, "learning_rate": 0.00021394117647058821, "loss": 0.0361, "step": 1965 }, { "epoch": 17.55, "grad_norm": 0.07613010704517365, "learning_rate": 0.00021388235294117643, "loss": 0.0266, "step": 1966 }, { "epoch": 17.56, "grad_norm": 0.18630178272724152, "learning_rate": 0.0002138235294117647, "loss": 0.0453, "step": 1967 }, { "epoch": 17.57, "grad_norm": 0.07019291073083878, "learning_rate": 0.00021376470588235292, "loss": 0.0319, "step": 1968 }, { "epoch": 17.58, "grad_norm": 0.07487376779317856, "learning_rate": 0.00021370588235294116, "loss": 0.0349, "step": 1969 }, { "epoch": 17.59, "grad_norm": 0.06258773803710938, "learning_rate": 0.0002136470588235294, "loss": 0.0267, "step": 1970 }, { "epoch": 17.6, "grad_norm": 0.06805012375116348, "learning_rate": 0.00021358823529411762, "loss": 0.0336, "step": 1971 }, { "epoch": 17.61, "grad_norm": 0.07135777920484543, "learning_rate": 0.00021352941176470586, "loss": 0.0312, "step": 1972 }, { "epoch": 17.62, "grad_norm": 0.12469202280044556, "learning_rate": 0.00021347058823529408, "loss": 0.0427, "step": 1973 }, { "epoch": 17.62, "grad_norm": 0.05374273657798767, "learning_rate": 0.00021341176470588235, "loss": 0.0306, "step": 1974 }, { "epoch": 17.63, "grad_norm": 0.05935364589095116, "learning_rate": 0.00021335294117647057, "loss": 0.0287, "step": 1975 }, { "epoch": 17.64, "grad_norm": 0.05376312881708145, "learning_rate": 0.00021329411764705878, "loss": 0.0275, "step": 1976 }, { "epoch": 17.65, "grad_norm": 0.06635450571775436, "learning_rate": 0.00021323529411764705, "loss": 0.0329, "step": 1977 }, { "epoch": 17.66, "grad_norm": 0.060057323426008224, "learning_rate": 0.00021317647058823527, "loss": 0.027, "step": 1978 }, { "epoch": 17.67, "grad_norm": 0.11194895207881927, "learning_rate": 0.0002131176470588235, "loss": 0.0441, "step": 1979 }, { "epoch": 17.68, "grad_norm": 0.06371347606182098, "learning_rate": 0.00021305882352941176, "loss": 0.0367, "step": 1980 }, { "epoch": 17.69, "grad_norm": 0.06562800705432892, "learning_rate": 0.00021299999999999997, "loss": 0.0305, "step": 1981 }, { "epoch": 17.7, "grad_norm": 0.05915699526667595, "learning_rate": 0.00021294117647058822, "loss": 0.0298, "step": 1982 }, { "epoch": 17.71, "grad_norm": 0.06269513815641403, "learning_rate": 0.00021288235294117646, "loss": 0.028, "step": 1983 }, { "epoch": 17.71, "grad_norm": 0.07125990092754364, "learning_rate": 0.0002128235294117647, "loss": 0.0318, "step": 1984 }, { "epoch": 17.72, "grad_norm": 0.11740262806415558, "learning_rate": 0.00021276470588235292, "loss": 0.0415, "step": 1985 }, { "epoch": 17.73, "grad_norm": 0.11232439428567886, "learning_rate": 0.00021270588235294114, "loss": 0.0418, "step": 1986 }, { "epoch": 17.74, "grad_norm": 0.061971794813871384, "learning_rate": 0.0002126470588235294, "loss": 0.0313, "step": 1987 }, { "epoch": 17.75, "grad_norm": 0.06482931226491928, "learning_rate": 0.00021258823529411762, "loss": 0.0286, "step": 1988 }, { "epoch": 17.76, "grad_norm": 0.06639612466096878, "learning_rate": 0.00021252941176470584, "loss": 0.03, "step": 1989 }, { "epoch": 17.77, "grad_norm": 0.06994175165891647, "learning_rate": 0.0002124705882352941, "loss": 0.0346, "step": 1990 }, { "epoch": 17.78, "grad_norm": 0.07315286248922348, "learning_rate": 0.00021241176470588233, "loss": 0.0284, "step": 1991 }, { "epoch": 17.79, "grad_norm": 0.12527918815612793, "learning_rate": 0.00021235294117647057, "loss": 0.0406, "step": 1992 }, { "epoch": 17.79, "grad_norm": 0.07032306492328644, "learning_rate": 0.0002122941176470588, "loss": 0.0318, "step": 1993 }, { "epoch": 17.8, "grad_norm": 0.07027847319841385, "learning_rate": 0.00021223529411764703, "loss": 0.0357, "step": 1994 }, { "epoch": 17.81, "grad_norm": 0.06513828784227371, "learning_rate": 0.00021217647058823527, "loss": 0.0287, "step": 1995 }, { "epoch": 17.82, "grad_norm": 0.07004145532846451, "learning_rate": 0.00021211764705882352, "loss": 0.0361, "step": 1996 }, { "epoch": 17.83, "grad_norm": 0.06596864014863968, "learning_rate": 0.00021205882352941176, "loss": 0.0299, "step": 1997 }, { "epoch": 17.84, "grad_norm": 0.14162759482860565, "learning_rate": 0.00021199999999999998, "loss": 0.0493, "step": 1998 }, { "epoch": 17.85, "grad_norm": 0.05906546860933304, "learning_rate": 0.0002119411764705882, "loss": 0.0337, "step": 1999 }, { "epoch": 17.86, "grad_norm": 0.05804269760847092, "learning_rate": 0.00021188235294117646, "loss": 0.0317, "step": 2000 }, { "epoch": 17.86, "eval_cer": 0.03409214764369177, "eval_loss": 0.1789909154176712, "eval_runtime": 22.8941, "eval_samples_per_second": 115.401, "eval_steps_per_second": 1.835, "eval_wer": 0.11342721142403808, "step": 2000 }, { "epoch": 17.87, "grad_norm": 0.05982355773448944, "learning_rate": 0.00021182352941176468, "loss": 0.0304, "step": 2001 }, { "epoch": 17.88, "grad_norm": 0.061808496713638306, "learning_rate": 0.00021176470588235295, "loss": 0.0344, "step": 2002 }, { "epoch": 17.88, "grad_norm": 0.0662093535065651, "learning_rate": 0.00021170588235294117, "loss": 0.0319, "step": 2003 }, { "epoch": 17.89, "grad_norm": 0.1215112954378128, "learning_rate": 0.00021164705882352938, "loss": 0.0425, "step": 2004 }, { "epoch": 17.9, "grad_norm": 0.057328928261995316, "learning_rate": 0.00021158823529411763, "loss": 0.0328, "step": 2005 }, { "epoch": 17.91, "grad_norm": 0.06580270081758499, "learning_rate": 0.00021152941176470587, "loss": 0.0344, "step": 2006 }, { "epoch": 17.92, "grad_norm": 0.06612606346607208, "learning_rate": 0.0002114705882352941, "loss": 0.0304, "step": 2007 }, { "epoch": 17.93, "grad_norm": 0.06731640547513962, "learning_rate": 0.00021141176470588233, "loss": 0.0297, "step": 2008 }, { "epoch": 17.94, "grad_norm": 0.06695950776338577, "learning_rate": 0.00021135294117647055, "loss": 0.029, "step": 2009 }, { "epoch": 17.95, "grad_norm": 0.0974825844168663, "learning_rate": 0.00021129411764705882, "loss": 0.0375, "step": 2010 }, { "epoch": 17.96, "grad_norm": 0.09003818035125732, "learning_rate": 0.00021123529411764703, "loss": 0.0384, "step": 2011 }, { "epoch": 17.96, "grad_norm": 0.05922992527484894, "learning_rate": 0.0002111764705882353, "loss": 0.0315, "step": 2012 }, { "epoch": 17.97, "grad_norm": 0.05970041826367378, "learning_rate": 0.00021111764705882352, "loss": 0.0301, "step": 2013 }, { "epoch": 17.98, "grad_norm": 0.07018531113862991, "learning_rate": 0.00021105882352941174, "loss": 0.0293, "step": 2014 }, { "epoch": 17.99, "grad_norm": 0.07534600049257278, "learning_rate": 0.00021099999999999998, "loss": 0.0329, "step": 2015 }, { "epoch": 18.0, "grad_norm": 0.08939068019390106, "learning_rate": 0.00021094117647058822, "loss": 0.0344, "step": 2016 }, { "epoch": 18.01, "grad_norm": 0.05164641514420509, "learning_rate": 0.00021088235294117644, "loss": 0.0298, "step": 2017 }, { "epoch": 18.02, "grad_norm": 0.064960777759552, "learning_rate": 0.00021082352941176468, "loss": 0.0287, "step": 2018 }, { "epoch": 18.03, "grad_norm": 0.06120393052697182, "learning_rate": 0.00021076470588235293, "loss": 0.0317, "step": 2019 }, { "epoch": 18.04, "grad_norm": 0.07327283173799515, "learning_rate": 0.00021070588235294117, "loss": 0.031, "step": 2020 }, { "epoch": 18.04, "grad_norm": 0.06303515285253525, "learning_rate": 0.00021064705882352939, "loss": 0.0263, "step": 2021 }, { "epoch": 18.05, "grad_norm": 0.09693946689367294, "learning_rate": 0.0002105882352941176, "loss": 0.0358, "step": 2022 }, { "epoch": 18.06, "grad_norm": 0.09954741597175598, "learning_rate": 0.00021052941176470587, "loss": 0.0397, "step": 2023 }, { "epoch": 18.07, "grad_norm": 0.06713460385799408, "learning_rate": 0.0002104705882352941, "loss": 0.0334, "step": 2024 }, { "epoch": 18.08, "grad_norm": 0.0598839707672596, "learning_rate": 0.00021041176470588236, "loss": 0.0303, "step": 2025 }, { "epoch": 18.09, "grad_norm": 0.05972038954496384, "learning_rate": 0.00021035294117647058, "loss": 0.0299, "step": 2026 }, { "epoch": 18.1, "grad_norm": 0.06614918261766434, "learning_rate": 0.0002102941176470588, "loss": 0.0302, "step": 2027 }, { "epoch": 18.11, "grad_norm": 0.07636161893606186, "learning_rate": 0.00021023529411764703, "loss": 0.0351, "step": 2028 }, { "epoch": 18.12, "grad_norm": 0.09338352084159851, "learning_rate": 0.00021017647058823528, "loss": 0.0341, "step": 2029 }, { "epoch": 18.12, "grad_norm": 0.06121871992945671, "learning_rate": 0.00021011764705882352, "loss": 0.0321, "step": 2030 }, { "epoch": 18.13, "grad_norm": 0.05937650427222252, "learning_rate": 0.00021005882352941174, "loss": 0.0279, "step": 2031 }, { "epoch": 18.14, "grad_norm": 0.06002294272184372, "learning_rate": 0.00020999999999999998, "loss": 0.0262, "step": 2032 }, { "epoch": 18.15, "grad_norm": 0.06456071138381958, "learning_rate": 0.00020994117647058823, "loss": 0.0267, "step": 2033 }, { "epoch": 18.16, "grad_norm": 0.0755121260881424, "learning_rate": 0.00020988235294117644, "loss": 0.0326, "step": 2034 }, { "epoch": 18.17, "grad_norm": 0.12616093456745148, "learning_rate": 0.0002098235294117647, "loss": 0.0461, "step": 2035 }, { "epoch": 18.18, "grad_norm": 0.05936598777770996, "learning_rate": 0.00020976470588235293, "loss": 0.0288, "step": 2036 }, { "epoch": 18.19, "grad_norm": 0.06453947722911835, "learning_rate": 0.00020970588235294114, "loss": 0.0314, "step": 2037 }, { "epoch": 18.2, "grad_norm": 0.06521280854940414, "learning_rate": 0.0002096470588235294, "loss": 0.0315, "step": 2038 }, { "epoch": 18.21, "grad_norm": 0.05086246132850647, "learning_rate": 0.00020958823529411763, "loss": 0.0255, "step": 2039 }, { "epoch": 18.21, "grad_norm": 0.06328276544809341, "learning_rate": 0.00020952941176470587, "loss": 0.0277, "step": 2040 }, { "epoch": 18.22, "grad_norm": 0.15216706693172455, "learning_rate": 0.0002094705882352941, "loss": 0.0422, "step": 2041 }, { "epoch": 18.23, "grad_norm": 0.0609402172267437, "learning_rate": 0.00020941176470588233, "loss": 0.0301, "step": 2042 }, { "epoch": 18.24, "grad_norm": 0.06806506961584091, "learning_rate": 0.00020935294117647058, "loss": 0.029, "step": 2043 }, { "epoch": 18.25, "grad_norm": 0.06164085119962692, "learning_rate": 0.0002092941176470588, "loss": 0.0311, "step": 2044 }, { "epoch": 18.26, "grad_norm": 0.06438139081001282, "learning_rate": 0.00020923529411764706, "loss": 0.027, "step": 2045 }, { "epoch": 18.27, "grad_norm": 0.06687790155410767, "learning_rate": 0.00020917647058823528, "loss": 0.0283, "step": 2046 }, { "epoch": 18.28, "grad_norm": 0.09623175859451294, "learning_rate": 0.0002091176470588235, "loss": 0.0284, "step": 2047 }, { "epoch": 18.29, "grad_norm": 0.09724742919206619, "learning_rate": 0.00020905882352941177, "loss": 0.0385, "step": 2048 }, { "epoch": 18.29, "grad_norm": 0.06700415909290314, "learning_rate": 0.00020899999999999998, "loss": 0.0281, "step": 2049 }, { "epoch": 18.3, "grad_norm": 0.05784018710255623, "learning_rate": 0.0002089411764705882, "loss": 0.0276, "step": 2050 }, { "epoch": 18.31, "grad_norm": 0.06372254341840744, "learning_rate": 0.00020888235294117644, "loss": 0.0308, "step": 2051 }, { "epoch": 18.32, "grad_norm": 0.057946398854255676, "learning_rate": 0.0002088235294117647, "loss": 0.0318, "step": 2052 }, { "epoch": 18.33, "grad_norm": 0.08636609464883804, "learning_rate": 0.00020876470588235293, "loss": 0.032, "step": 2053 }, { "epoch": 18.34, "grad_norm": 0.12471861392259598, "learning_rate": 0.00020870588235294115, "loss": 0.0416, "step": 2054 }, { "epoch": 18.35, "grad_norm": 0.05486654117703438, "learning_rate": 0.0002086470588235294, "loss": 0.0322, "step": 2055 }, { "epoch": 18.36, "grad_norm": 0.05568893253803253, "learning_rate": 0.00020858823529411763, "loss": 0.0301, "step": 2056 }, { "epoch": 18.37, "grad_norm": 0.06621300429105759, "learning_rate": 0.00020852941176470585, "loss": 0.0311, "step": 2057 }, { "epoch": 18.38, "grad_norm": 0.05489505082368851, "learning_rate": 0.00020847058823529412, "loss": 0.0258, "step": 2058 }, { "epoch": 18.38, "grad_norm": 0.06559371203184128, "learning_rate": 0.00020841176470588234, "loss": 0.0278, "step": 2059 }, { "epoch": 18.39, "grad_norm": 0.1418338567018509, "learning_rate": 0.00020835294117647055, "loss": 0.0419, "step": 2060 }, { "epoch": 18.4, "grad_norm": 0.06461971998214722, "learning_rate": 0.00020829411764705882, "loss": 0.0308, "step": 2061 }, { "epoch": 18.41, "grad_norm": 0.05940650403499603, "learning_rate": 0.00020823529411764704, "loss": 0.0273, "step": 2062 }, { "epoch": 18.42, "grad_norm": 0.06172783672809601, "learning_rate": 0.00020817647058823528, "loss": 0.0297, "step": 2063 }, { "epoch": 18.43, "grad_norm": 0.06961867213249207, "learning_rate": 0.0002081176470588235, "loss": 0.0302, "step": 2064 }, { "epoch": 18.44, "grad_norm": 0.06662897765636444, "learning_rate": 0.00020805882352941174, "loss": 0.0279, "step": 2065 }, { "epoch": 18.45, "grad_norm": 0.12071380764245987, "learning_rate": 0.000208, "loss": 0.0431, "step": 2066 }, { "epoch": 18.46, "grad_norm": 0.05877776816487312, "learning_rate": 0.0002079411764705882, "loss": 0.0299, "step": 2067 }, { "epoch": 18.46, "grad_norm": 0.06061911582946777, "learning_rate": 0.00020788235294117647, "loss": 0.0298, "step": 2068 }, { "epoch": 18.47, "grad_norm": 0.05871176719665527, "learning_rate": 0.0002078235294117647, "loss": 0.0281, "step": 2069 }, { "epoch": 18.48, "grad_norm": 0.06598810106515884, "learning_rate": 0.0002077647058823529, "loss": 0.0315, "step": 2070 }, { "epoch": 18.49, "grad_norm": 0.06471865624189377, "learning_rate": 0.00020770588235294118, "loss": 0.0291, "step": 2071 }, { "epoch": 18.5, "grad_norm": 0.08320900797843933, "learning_rate": 0.0002076470588235294, "loss": 0.0324, "step": 2072 }, { "epoch": 18.51, "grad_norm": 0.11166468262672424, "learning_rate": 0.00020758823529411764, "loss": 0.0378, "step": 2073 }, { "epoch": 18.52, "grad_norm": 0.06702893227338791, "learning_rate": 0.00020752941176470585, "loss": 0.0308, "step": 2074 }, { "epoch": 18.53, "grad_norm": 0.062195952981710434, "learning_rate": 0.0002074705882352941, "loss": 0.0286, "step": 2075 }, { "epoch": 18.54, "grad_norm": 0.058582693338394165, "learning_rate": 0.00020741176470588234, "loss": 0.0271, "step": 2076 }, { "epoch": 18.54, "grad_norm": 0.06748209148645401, "learning_rate": 0.00020735294117647056, "loss": 0.0327, "step": 2077 }, { "epoch": 18.55, "grad_norm": 0.09048274159431458, "learning_rate": 0.00020729411764705883, "loss": 0.0285, "step": 2078 }, { "epoch": 18.56, "grad_norm": 0.13230320811271667, "learning_rate": 0.00020723529411764704, "loss": 0.0429, "step": 2079 }, { "epoch": 18.57, "grad_norm": 0.05814466252923012, "learning_rate": 0.00020717647058823526, "loss": 0.0254, "step": 2080 }, { "epoch": 18.58, "grad_norm": 0.06753221154212952, "learning_rate": 0.00020711764705882353, "loss": 0.0336, "step": 2081 }, { "epoch": 18.59, "grad_norm": 0.06285896897315979, "learning_rate": 0.00020705882352941175, "loss": 0.0269, "step": 2082 }, { "epoch": 18.6, "grad_norm": 0.06398814171552658, "learning_rate": 0.00020699999999999996, "loss": 0.0268, "step": 2083 }, { "epoch": 18.61, "grad_norm": 0.07129596918821335, "learning_rate": 0.00020694117647058823, "loss": 0.0334, "step": 2084 }, { "epoch": 18.62, "grad_norm": 0.14242440462112427, "learning_rate": 0.00020688235294117645, "loss": 0.0466, "step": 2085 }, { "epoch": 18.62, "grad_norm": 0.05649041011929512, "learning_rate": 0.0002068235294117647, "loss": 0.0289, "step": 2086 }, { "epoch": 18.63, "grad_norm": 0.054231468588113785, "learning_rate": 0.0002067647058823529, "loss": 0.0301, "step": 2087 }, { "epoch": 18.64, "grad_norm": 0.05468952655792236, "learning_rate": 0.00020670588235294115, "loss": 0.0279, "step": 2088 }, { "epoch": 18.65, "grad_norm": 0.05843213573098183, "learning_rate": 0.0002066470588235294, "loss": 0.0335, "step": 2089 }, { "epoch": 18.66, "grad_norm": 0.06207503005862236, "learning_rate": 0.0002065882352941176, "loss": 0.0306, "step": 2090 }, { "epoch": 18.67, "grad_norm": 0.10590814799070358, "learning_rate": 0.00020652941176470588, "loss": 0.0416, "step": 2091 }, { "epoch": 18.68, "grad_norm": 0.06286563724279404, "learning_rate": 0.0002064705882352941, "loss": 0.0336, "step": 2092 }, { "epoch": 18.69, "grad_norm": 0.06435458362102509, "learning_rate": 0.00020641176470588232, "loss": 0.0303, "step": 2093 }, { "epoch": 18.7, "grad_norm": 0.06846845149993896, "learning_rate": 0.00020635294117647059, "loss": 0.0312, "step": 2094 }, { "epoch": 18.71, "grad_norm": 0.06863770633935928, "learning_rate": 0.0002062941176470588, "loss": 0.033, "step": 2095 }, { "epoch": 18.71, "grad_norm": 0.07404244691133499, "learning_rate": 0.00020623529411764705, "loss": 0.0334, "step": 2096 }, { "epoch": 18.72, "grad_norm": 0.09194135665893555, "learning_rate": 0.0002061764705882353, "loss": 0.0327, "step": 2097 }, { "epoch": 18.73, "grad_norm": 0.13283534348011017, "learning_rate": 0.0002061176470588235, "loss": 0.038, "step": 2098 }, { "epoch": 18.74, "grad_norm": 0.05884513258934021, "learning_rate": 0.00020605882352941175, "loss": 0.036, "step": 2099 }, { "epoch": 18.75, "grad_norm": 0.06227260082960129, "learning_rate": 0.00020599999999999997, "loss": 0.0264, "step": 2100 }, { "epoch": 18.75, "eval_cer": 0.03562556140608029, "eval_loss": 0.18643657863140106, "eval_runtime": 22.248, "eval_samples_per_second": 118.752, "eval_steps_per_second": 1.888, "eval_wer": 0.11590638635462118, "step": 2100 }, { "epoch": 18.76, "grad_norm": 0.06634752452373505, "learning_rate": 0.00020594117647058824, "loss": 0.0276, "step": 2101 }, { "epoch": 18.77, "grad_norm": 0.06490685045719147, "learning_rate": 0.00020588235294117645, "loss": 0.0318, "step": 2102 }, { "epoch": 18.78, "grad_norm": 0.07782765477895737, "learning_rate": 0.00020582352941176467, "loss": 0.0306, "step": 2103 }, { "epoch": 18.79, "grad_norm": 0.09754545241594315, "learning_rate": 0.00020576470588235294, "loss": 0.0403, "step": 2104 }, { "epoch": 18.79, "grad_norm": 0.0597204752266407, "learning_rate": 0.00020570588235294116, "loss": 0.0276, "step": 2105 }, { "epoch": 18.8, "grad_norm": 0.06698883324861526, "learning_rate": 0.0002056470588235294, "loss": 0.0318, "step": 2106 }, { "epoch": 18.81, "grad_norm": 0.06619041413068771, "learning_rate": 0.00020558823529411764, "loss": 0.0334, "step": 2107 }, { "epoch": 18.82, "grad_norm": 0.05624597892165184, "learning_rate": 0.00020552941176470586, "loss": 0.0262, "step": 2108 }, { "epoch": 18.83, "grad_norm": 0.0718187689781189, "learning_rate": 0.0002054705882352941, "loss": 0.0314, "step": 2109 }, { "epoch": 18.84, "grad_norm": 0.11686796694993973, "learning_rate": 0.00020541176470588232, "loss": 0.0407, "step": 2110 }, { "epoch": 18.85, "grad_norm": 0.05923014134168625, "learning_rate": 0.0002053529411764706, "loss": 0.0247, "step": 2111 }, { "epoch": 18.86, "grad_norm": 0.06864576041698456, "learning_rate": 0.0002052941176470588, "loss": 0.0346, "step": 2112 }, { "epoch": 18.87, "grad_norm": 0.06370431184768677, "learning_rate": 0.00020523529411764702, "loss": 0.0285, "step": 2113 }, { "epoch": 18.88, "grad_norm": 0.062278784811496735, "learning_rate": 0.0002051764705882353, "loss": 0.0273, "step": 2114 }, { "epoch": 18.88, "grad_norm": 0.07636075466871262, "learning_rate": 0.0002051176470588235, "loss": 0.0324, "step": 2115 }, { "epoch": 18.89, "grad_norm": 0.12746940553188324, "learning_rate": 0.00020505882352941173, "loss": 0.0415, "step": 2116 }, { "epoch": 18.9, "grad_norm": 0.05222547799348831, "learning_rate": 0.000205, "loss": 0.0291, "step": 2117 }, { "epoch": 18.91, "grad_norm": 0.060084518045186996, "learning_rate": 0.0002049411764705882, "loss": 0.0288, "step": 2118 }, { "epoch": 18.92, "grad_norm": 0.06343322992324829, "learning_rate": 0.00020488235294117646, "loss": 0.0294, "step": 2119 }, { "epoch": 18.93, "grad_norm": 0.06317029148340225, "learning_rate": 0.0002048235294117647, "loss": 0.0296, "step": 2120 }, { "epoch": 18.94, "grad_norm": 0.07388322055339813, "learning_rate": 0.00020476470588235292, "loss": 0.0288, "step": 2121 }, { "epoch": 18.95, "grad_norm": 0.09308870881795883, "learning_rate": 0.00020470588235294116, "loss": 0.0365, "step": 2122 }, { "epoch": 18.96, "grad_norm": 0.09080292284488678, "learning_rate": 0.00020464705882352937, "loss": 0.0431, "step": 2123 }, { "epoch": 18.96, "grad_norm": 0.06618691235780716, "learning_rate": 0.00020458823529411765, "loss": 0.0355, "step": 2124 }, { "epoch": 18.97, "grad_norm": 0.06465675681829453, "learning_rate": 0.00020452941176470586, "loss": 0.0332, "step": 2125 }, { "epoch": 18.98, "grad_norm": 0.06030743569135666, "learning_rate": 0.00020447058823529408, "loss": 0.0273, "step": 2126 }, { "epoch": 18.99, "grad_norm": 0.08355317264795303, "learning_rate": 0.00020441176470588235, "loss": 0.0343, "step": 2127 }, { "epoch": 19.0, "grad_norm": 0.09227706491947174, "learning_rate": 0.00020435294117647056, "loss": 0.0341, "step": 2128 }, { "epoch": 19.01, "grad_norm": 0.0566699281334877, "learning_rate": 0.0002042941176470588, "loss": 0.0255, "step": 2129 }, { "epoch": 19.02, "grad_norm": 0.05342443659901619, "learning_rate": 0.00020423529411764705, "loss": 0.0287, "step": 2130 }, { "epoch": 19.03, "grad_norm": 0.05447328835725784, "learning_rate": 0.00020417647058823527, "loss": 0.0291, "step": 2131 }, { "epoch": 19.04, "grad_norm": 0.054897114634513855, "learning_rate": 0.0002041176470588235, "loss": 0.026, "step": 2132 }, { "epoch": 19.04, "grad_norm": 0.06621937453746796, "learning_rate": 0.00020405882352941173, "loss": 0.0251, "step": 2133 }, { "epoch": 19.05, "grad_norm": 0.08687276393175125, "learning_rate": 0.000204, "loss": 0.033, "step": 2134 }, { "epoch": 19.06, "grad_norm": 0.1125141829252243, "learning_rate": 0.00020394117647058821, "loss": 0.04, "step": 2135 }, { "epoch": 19.07, "grad_norm": 0.06082363426685333, "learning_rate": 0.00020388235294117643, "loss": 0.0315, "step": 2136 }, { "epoch": 19.08, "grad_norm": 0.0553433857858181, "learning_rate": 0.0002038235294117647, "loss": 0.0268, "step": 2137 }, { "epoch": 19.09, "grad_norm": 0.06114237755537033, "learning_rate": 0.00020376470588235292, "loss": 0.0279, "step": 2138 }, { "epoch": 19.1, "grad_norm": 0.05804065987467766, "learning_rate": 0.00020370588235294116, "loss": 0.0272, "step": 2139 }, { "epoch": 19.11, "grad_norm": 0.09285765141248703, "learning_rate": 0.0002036470588235294, "loss": 0.0291, "step": 2140 }, { "epoch": 19.12, "grad_norm": 0.1081705391407013, "learning_rate": 0.00020358823529411762, "loss": 0.038, "step": 2141 }, { "epoch": 19.12, "grad_norm": 0.05302184820175171, "learning_rate": 0.00020352941176470586, "loss": 0.028, "step": 2142 }, { "epoch": 19.13, "grad_norm": 0.06427904963493347, "learning_rate": 0.0002034705882352941, "loss": 0.0266, "step": 2143 }, { "epoch": 19.14, "grad_norm": 0.05788515880703926, "learning_rate": 0.00020341176470588232, "loss": 0.025, "step": 2144 }, { "epoch": 19.15, "grad_norm": 0.0727313756942749, "learning_rate": 0.00020335294117647057, "loss": 0.0305, "step": 2145 }, { "epoch": 19.16, "grad_norm": 0.06664348393678665, "learning_rate": 0.00020329411764705878, "loss": 0.026, "step": 2146 }, { "epoch": 19.17, "grad_norm": 0.11840501427650452, "learning_rate": 0.00020323529411764705, "loss": 0.0388, "step": 2147 }, { "epoch": 19.18, "grad_norm": 0.0626482218503952, "learning_rate": 0.00020317647058823527, "loss": 0.0313, "step": 2148 }, { "epoch": 19.19, "grad_norm": 0.06950737535953522, "learning_rate": 0.0002031176470588235, "loss": 0.0351, "step": 2149 }, { "epoch": 19.2, "grad_norm": 0.061929699033498764, "learning_rate": 0.00020305882352941176, "loss": 0.0329, "step": 2150 }, { "epoch": 19.21, "grad_norm": 0.05309795215725899, "learning_rate": 0.00020299999999999997, "loss": 0.0233, "step": 2151 }, { "epoch": 19.21, "grad_norm": 0.062375832349061966, "learning_rate": 0.00020294117647058822, "loss": 0.0257, "step": 2152 }, { "epoch": 19.22, "grad_norm": 0.13026969134807587, "learning_rate": 0.00020288235294117646, "loss": 0.0431, "step": 2153 }, { "epoch": 19.23, "grad_norm": 0.06232600286602974, "learning_rate": 0.00020282352941176468, "loss": 0.0308, "step": 2154 }, { "epoch": 19.24, "grad_norm": 0.0655936449766159, "learning_rate": 0.00020276470588235292, "loss": 0.0299, "step": 2155 }, { "epoch": 19.25, "grad_norm": 0.06494201719760895, "learning_rate": 0.00020270588235294116, "loss": 0.0309, "step": 2156 }, { "epoch": 19.26, "grad_norm": 0.06123069301247597, "learning_rate": 0.0002026470588235294, "loss": 0.0267, "step": 2157 }, { "epoch": 19.27, "grad_norm": 0.06938119232654572, "learning_rate": 0.00020258823529411762, "loss": 0.0304, "step": 2158 }, { "epoch": 19.28, "grad_norm": 0.10094217211008072, "learning_rate": 0.00020252941176470584, "loss": 0.035, "step": 2159 }, { "epoch": 19.29, "grad_norm": 0.08669798076152802, "learning_rate": 0.0002024705882352941, "loss": 0.035, "step": 2160 }, { "epoch": 19.29, "grad_norm": 0.0575607568025589, "learning_rate": 0.00020241176470588233, "loss": 0.0307, "step": 2161 }, { "epoch": 19.3, "grad_norm": 0.061026837676763535, "learning_rate": 0.0002023529411764706, "loss": 0.0301, "step": 2162 }, { "epoch": 19.31, "grad_norm": 0.05477208271622658, "learning_rate": 0.00020229411764705881, "loss": 0.0278, "step": 2163 }, { "epoch": 19.32, "grad_norm": 0.05956774577498436, "learning_rate": 0.00020223529411764703, "loss": 0.0275, "step": 2164 }, { "epoch": 19.33, "grad_norm": 0.06744513660669327, "learning_rate": 0.00020217647058823527, "loss": 0.0251, "step": 2165 }, { "epoch": 19.34, "grad_norm": 0.10411574691534042, "learning_rate": 0.00020211764705882352, "loss": 0.0408, "step": 2166 }, { "epoch": 19.35, "grad_norm": 0.05916345492005348, "learning_rate": 0.00020205882352941176, "loss": 0.0285, "step": 2167 }, { "epoch": 19.36, "grad_norm": 0.07293649762868881, "learning_rate": 0.00020199999999999998, "loss": 0.0284, "step": 2168 }, { "epoch": 19.37, "grad_norm": 0.06648380309343338, "learning_rate": 0.0002019411764705882, "loss": 0.0273, "step": 2169 }, { "epoch": 19.38, "grad_norm": 0.06677570194005966, "learning_rate": 0.00020188235294117646, "loss": 0.03, "step": 2170 }, { "epoch": 19.38, "grad_norm": 0.06720144301652908, "learning_rate": 0.00020182352941176468, "loss": 0.0263, "step": 2171 }, { "epoch": 19.39, "grad_norm": 0.1155744194984436, "learning_rate": 0.00020176470588235295, "loss": 0.039, "step": 2172 }, { "epoch": 19.4, "grad_norm": 0.051371246576309204, "learning_rate": 0.00020170588235294117, "loss": 0.0254, "step": 2173 }, { "epoch": 19.41, "grad_norm": 0.05222965031862259, "learning_rate": 0.00020164705882352938, "loss": 0.0283, "step": 2174 }, { "epoch": 19.42, "grad_norm": 0.05005746707320213, "learning_rate": 0.00020158823529411763, "loss": 0.0248, "step": 2175 }, { "epoch": 19.43, "grad_norm": 0.06565649062395096, "learning_rate": 0.00020152941176470587, "loss": 0.026, "step": 2176 }, { "epoch": 19.44, "grad_norm": 0.06322000920772552, "learning_rate": 0.00020147058823529409, "loss": 0.0277, "step": 2177 }, { "epoch": 19.45, "grad_norm": 0.12081991881132126, "learning_rate": 0.00020141176470588233, "loss": 0.0389, "step": 2178 }, { "epoch": 19.46, "grad_norm": 0.06125609204173088, "learning_rate": 0.00020135294117647057, "loss": 0.0308, "step": 2179 }, { "epoch": 19.46, "grad_norm": 0.07139113545417786, "learning_rate": 0.00020129411764705882, "loss": 0.0323, "step": 2180 }, { "epoch": 19.47, "grad_norm": 0.06434722244739532, "learning_rate": 0.00020123529411764703, "loss": 0.0322, "step": 2181 }, { "epoch": 19.48, "grad_norm": 0.0564647801220417, "learning_rate": 0.00020117647058823525, "loss": 0.0222, "step": 2182 }, { "epoch": 19.49, "grad_norm": 0.062009118497371674, "learning_rate": 0.00020111764705882352, "loss": 0.0251, "step": 2183 }, { "epoch": 19.5, "grad_norm": 0.09379425644874573, "learning_rate": 0.00020105882352941174, "loss": 0.0345, "step": 2184 }, { "epoch": 19.51, "grad_norm": 0.11802651733160019, "learning_rate": 0.000201, "loss": 0.0434, "step": 2185 }, { "epoch": 19.52, "grad_norm": 0.059047456830739975, "learning_rate": 0.00020094117647058822, "loss": 0.0288, "step": 2186 }, { "epoch": 19.53, "grad_norm": 0.05848240479826927, "learning_rate": 0.00020088235294117644, "loss": 0.0303, "step": 2187 }, { "epoch": 19.54, "grad_norm": 0.0600242018699646, "learning_rate": 0.00020082352941176468, "loss": 0.0277, "step": 2188 }, { "epoch": 19.54, "grad_norm": 0.06942985206842422, "learning_rate": 0.00020076470588235293, "loss": 0.0295, "step": 2189 }, { "epoch": 19.55, "grad_norm": 0.07458772510290146, "learning_rate": 0.00020070588235294117, "loss": 0.03, "step": 2190 }, { "epoch": 19.56, "grad_norm": 0.11516823619604111, "learning_rate": 0.00020064705882352939, "loss": 0.0408, "step": 2191 }, { "epoch": 19.57, "grad_norm": 0.060609959065914154, "learning_rate": 0.00020058823529411763, "loss": 0.0283, "step": 2192 }, { "epoch": 19.58, "grad_norm": 0.06131531670689583, "learning_rate": 0.00020052941176470587, "loss": 0.0283, "step": 2193 }, { "epoch": 19.59, "grad_norm": 0.05881686136126518, "learning_rate": 0.0002004705882352941, "loss": 0.0298, "step": 2194 }, { "epoch": 19.6, "grad_norm": 0.06249463930726051, "learning_rate": 0.00020041176470588236, "loss": 0.0294, "step": 2195 }, { "epoch": 19.61, "grad_norm": 0.08682270348072052, "learning_rate": 0.00020035294117647058, "loss": 0.0338, "step": 2196 }, { "epoch": 19.62, "grad_norm": 0.12249086797237396, "learning_rate": 0.0002002941176470588, "loss": 0.042, "step": 2197 }, { "epoch": 19.62, "grad_norm": 0.05887043476104736, "learning_rate": 0.00020023529411764704, "loss": 0.0289, "step": 2198 }, { "epoch": 19.63, "grad_norm": 0.05861738324165344, "learning_rate": 0.00020017647058823528, "loss": 0.0292, "step": 2199 }, { "epoch": 19.64, "grad_norm": 0.059787504374980927, "learning_rate": 0.00020011764705882352, "loss": 0.0271, "step": 2200 }, { "epoch": 19.64, "eval_cer": 0.03406208070717435, "eval_loss": 0.1861434280872345, "eval_runtime": 22.5624, "eval_samples_per_second": 117.098, "eval_steps_per_second": 1.862, "eval_wer": 0.1149940499801666, "step": 2200 }, { "epoch": 19.65, "grad_norm": 0.05699005350470543, "learning_rate": 0.00020005882352941174, "loss": 0.0298, "step": 2201 }, { "epoch": 19.66, "grad_norm": 0.0681491419672966, "learning_rate": 0.00019999999999999998, "loss": 0.0313, "step": 2202 }, { "epoch": 19.67, "grad_norm": 0.1209702342748642, "learning_rate": 0.00019994117647058823, "loss": 0.0457, "step": 2203 }, { "epoch": 19.68, "grad_norm": 0.06152784079313278, "learning_rate": 0.00019988235294117644, "loss": 0.0297, "step": 2204 }, { "epoch": 19.69, "grad_norm": 0.06058143824338913, "learning_rate": 0.0001998235294117647, "loss": 0.0318, "step": 2205 }, { "epoch": 19.7, "grad_norm": 0.06092606484889984, "learning_rate": 0.00019976470588235293, "loss": 0.0283, "step": 2206 }, { "epoch": 19.71, "grad_norm": 0.0646490603685379, "learning_rate": 0.00019970588235294115, "loss": 0.0325, "step": 2207 }, { "epoch": 19.71, "grad_norm": 0.067710280418396, "learning_rate": 0.00019964705882352942, "loss": 0.0323, "step": 2208 }, { "epoch": 19.72, "grad_norm": 0.08690015226602554, "learning_rate": 0.00019958823529411763, "loss": 0.0349, "step": 2209 }, { "epoch": 19.73, "grad_norm": 0.07700332999229431, "learning_rate": 0.00019952941176470585, "loss": 0.0357, "step": 2210 }, { "epoch": 19.74, "grad_norm": 0.056973446160554886, "learning_rate": 0.0001994705882352941, "loss": 0.0298, "step": 2211 }, { "epoch": 19.75, "grad_norm": 0.06385958194732666, "learning_rate": 0.00019941176470588234, "loss": 0.0312, "step": 2212 }, { "epoch": 19.76, "grad_norm": 0.06139913946390152, "learning_rate": 0.00019935294117647058, "loss": 0.0285, "step": 2213 }, { "epoch": 19.77, "grad_norm": 0.06697844713926315, "learning_rate": 0.0001992941176470588, "loss": 0.0312, "step": 2214 }, { "epoch": 19.78, "grad_norm": 0.07495854794979095, "learning_rate": 0.00019923529411764704, "loss": 0.026, "step": 2215 }, { "epoch": 19.79, "grad_norm": 0.1093396246433258, "learning_rate": 0.00019917647058823528, "loss": 0.0416, "step": 2216 }, { "epoch": 19.79, "grad_norm": 0.059817247092723846, "learning_rate": 0.0001991176470588235, "loss": 0.0301, "step": 2217 }, { "epoch": 19.8, "grad_norm": 0.06090405583381653, "learning_rate": 0.00019905882352941177, "loss": 0.0303, "step": 2218 }, { "epoch": 19.81, "grad_norm": 0.053513459861278534, "learning_rate": 0.00019899999999999999, "loss": 0.0275, "step": 2219 }, { "epoch": 19.82, "grad_norm": 0.0619007870554924, "learning_rate": 0.0001989411764705882, "loss": 0.0246, "step": 2220 }, { "epoch": 19.83, "grad_norm": 0.07230164110660553, "learning_rate": 0.00019888235294117647, "loss": 0.0303, "step": 2221 }, { "epoch": 19.84, "grad_norm": 0.11419408023357391, "learning_rate": 0.0001988235294117647, "loss": 0.0474, "step": 2222 }, { "epoch": 19.85, "grad_norm": 0.05775037035346031, "learning_rate": 0.00019876470588235293, "loss": 0.0241, "step": 2223 }, { "epoch": 19.86, "grad_norm": 0.06071503460407257, "learning_rate": 0.00019870588235294115, "loss": 0.0293, "step": 2224 }, { "epoch": 19.87, "grad_norm": 0.060517024248838425, "learning_rate": 0.0001986470588235294, "loss": 0.0305, "step": 2225 }, { "epoch": 19.88, "grad_norm": 0.05888737365603447, "learning_rate": 0.00019858823529411764, "loss": 0.0321, "step": 2226 }, { "epoch": 19.88, "grad_norm": 0.06835459172725677, "learning_rate": 0.00019852941176470585, "loss": 0.0297, "step": 2227 }, { "epoch": 19.89, "grad_norm": 0.1049518883228302, "learning_rate": 0.00019847058823529412, "loss": 0.0396, "step": 2228 }, { "epoch": 19.9, "grad_norm": 0.05264618247747421, "learning_rate": 0.00019841176470588234, "loss": 0.027, "step": 2229 }, { "epoch": 19.91, "grad_norm": 0.0566607229411602, "learning_rate": 0.00019835294117647055, "loss": 0.028, "step": 2230 }, { "epoch": 19.92, "grad_norm": 0.0578635036945343, "learning_rate": 0.00019829411764705883, "loss": 0.0277, "step": 2231 }, { "epoch": 19.93, "grad_norm": 0.055492985993623734, "learning_rate": 0.00019823529411764704, "loss": 0.0258, "step": 2232 }, { "epoch": 19.94, "grad_norm": 0.057883501052856445, "learning_rate": 0.00019817647058823528, "loss": 0.0267, "step": 2233 }, { "epoch": 19.95, "grad_norm": 0.11310465633869171, "learning_rate": 0.0001981176470588235, "loss": 0.0353, "step": 2234 }, { "epoch": 19.96, "grad_norm": 0.11274304986000061, "learning_rate": 0.00019805882352941174, "loss": 0.0385, "step": 2235 }, { "epoch": 19.96, "grad_norm": 0.05661230534315109, "learning_rate": 0.000198, "loss": 0.0258, "step": 2236 }, { "epoch": 19.97, "grad_norm": 0.07197310775518417, "learning_rate": 0.0001979411764705882, "loss": 0.0298, "step": 2237 }, { "epoch": 19.98, "grad_norm": 0.0674099549651146, "learning_rate": 0.00019788235294117647, "loss": 0.029, "step": 2238 }, { "epoch": 19.99, "grad_norm": 0.07113310694694519, "learning_rate": 0.0001978235294117647, "loss": 0.0283, "step": 2239 }, { "epoch": 20.0, "grad_norm": 0.08611607551574707, "learning_rate": 0.0001977647058823529, "loss": 0.0375, "step": 2240 }, { "epoch": 20.01, "grad_norm": 0.051717352122068405, "learning_rate": 0.00019770588235294118, "loss": 0.0291, "step": 2241 }, { "epoch": 20.02, "grad_norm": 0.055984143167734146, "learning_rate": 0.0001976470588235294, "loss": 0.0277, "step": 2242 }, { "epoch": 20.03, "grad_norm": 0.060987718403339386, "learning_rate": 0.0001975882352941176, "loss": 0.0313, "step": 2243 }, { "epoch": 20.04, "grad_norm": 0.05700723081827164, "learning_rate": 0.00019752941176470588, "loss": 0.0244, "step": 2244 }, { "epoch": 20.04, "grad_norm": 0.05862624570727348, "learning_rate": 0.0001974705882352941, "loss": 0.0255, "step": 2245 }, { "epoch": 20.05, "grad_norm": 0.07748149335384369, "learning_rate": 0.00019741176470588234, "loss": 0.0266, "step": 2246 }, { "epoch": 20.06, "grad_norm": 0.08739695698022842, "learning_rate": 0.00019735294117647056, "loss": 0.0287, "step": 2247 }, { "epoch": 20.07, "grad_norm": 0.05724698305130005, "learning_rate": 0.0001972941176470588, "loss": 0.0273, "step": 2248 }, { "epoch": 20.08, "grad_norm": 0.06167592108249664, "learning_rate": 0.00019723529411764704, "loss": 0.0297, "step": 2249 }, { "epoch": 20.09, "grad_norm": 0.0602608323097229, "learning_rate": 0.00019717647058823526, "loss": 0.0298, "step": 2250 }, { "epoch": 20.1, "grad_norm": 0.06602107733488083, "learning_rate": 0.00019711764705882353, "loss": 0.0276, "step": 2251 }, { "epoch": 20.11, "grad_norm": 0.06687210500240326, "learning_rate": 0.00019705882352941175, "loss": 0.0302, "step": 2252 }, { "epoch": 20.12, "grad_norm": 0.09839525073766708, "learning_rate": 0.00019699999999999996, "loss": 0.0394, "step": 2253 }, { "epoch": 20.12, "grad_norm": 0.049129754304885864, "learning_rate": 0.00019694117647058823, "loss": 0.0267, "step": 2254 }, { "epoch": 20.13, "grad_norm": 0.053939610719680786, "learning_rate": 0.00019688235294117645, "loss": 0.0288, "step": 2255 }, { "epoch": 20.14, "grad_norm": 0.05686270073056221, "learning_rate": 0.0001968235294117647, "loss": 0.0293, "step": 2256 }, { "epoch": 20.15, "grad_norm": 0.05865943804383278, "learning_rate": 0.00019676470588235294, "loss": 0.0247, "step": 2257 }, { "epoch": 20.16, "grad_norm": 0.06347531825304031, "learning_rate": 0.00019670588235294115, "loss": 0.0242, "step": 2258 }, { "epoch": 20.17, "grad_norm": 0.1037096381187439, "learning_rate": 0.0001966470588235294, "loss": 0.0299, "step": 2259 }, { "epoch": 20.18, "grad_norm": 0.05274982750415802, "learning_rate": 0.00019658823529411761, "loss": 0.0272, "step": 2260 }, { "epoch": 20.19, "grad_norm": 0.0705869272351265, "learning_rate": 0.00019652941176470588, "loss": 0.0325, "step": 2261 }, { "epoch": 20.2, "grad_norm": 0.06054059416055679, "learning_rate": 0.0001964705882352941, "loss": 0.0269, "step": 2262 }, { "epoch": 20.21, "grad_norm": 0.060479119420051575, "learning_rate": 0.00019641176470588232, "loss": 0.0269, "step": 2263 }, { "epoch": 20.21, "grad_norm": 0.06396442651748657, "learning_rate": 0.0001963529411764706, "loss": 0.0232, "step": 2264 }, { "epoch": 20.22, "grad_norm": 0.0979032889008522, "learning_rate": 0.0001962941176470588, "loss": 0.0325, "step": 2265 }, { "epoch": 20.23, "grad_norm": 0.059267591685056686, "learning_rate": 0.00019623529411764705, "loss": 0.0236, "step": 2266 }, { "epoch": 20.24, "grad_norm": 0.05923108384013176, "learning_rate": 0.0001961764705882353, "loss": 0.0271, "step": 2267 }, { "epoch": 20.25, "grad_norm": 0.06391739845275879, "learning_rate": 0.0001961176470588235, "loss": 0.0288, "step": 2268 }, { "epoch": 20.26, "grad_norm": 0.06083698943257332, "learning_rate": 0.00019605882352941175, "loss": 0.0285, "step": 2269 }, { "epoch": 20.27, "grad_norm": 0.05461670830845833, "learning_rate": 0.00019599999999999997, "loss": 0.0268, "step": 2270 }, { "epoch": 20.28, "grad_norm": 0.08133354783058167, "learning_rate": 0.00019594117647058824, "loss": 0.031, "step": 2271 }, { "epoch": 20.29, "grad_norm": 0.06891635805368423, "learning_rate": 0.00019588235294117645, "loss": 0.0288, "step": 2272 }, { "epoch": 20.29, "grad_norm": 0.054957352578639984, "learning_rate": 0.00019582352941176467, "loss": 0.0266, "step": 2273 }, { "epoch": 20.3, "grad_norm": 0.06640379130840302, "learning_rate": 0.00019576470588235294, "loss": 0.0284, "step": 2274 }, { "epoch": 20.31, "grad_norm": 0.056157227605581284, "learning_rate": 0.00019570588235294116, "loss": 0.0291, "step": 2275 }, { "epoch": 20.32, "grad_norm": 0.06232301518321037, "learning_rate": 0.00019564705882352937, "loss": 0.0251, "step": 2276 }, { "epoch": 20.33, "grad_norm": 0.06556544452905655, "learning_rate": 0.00019558823529411764, "loss": 0.0208, "step": 2277 }, { "epoch": 20.34, "grad_norm": 0.08982174098491669, "learning_rate": 0.00019552941176470586, "loss": 0.0355, "step": 2278 }, { "epoch": 20.35, "grad_norm": 0.05531982332468033, "learning_rate": 0.0001954705882352941, "loss": 0.0288, "step": 2279 }, { "epoch": 20.36, "grad_norm": 0.057537741959095, "learning_rate": 0.00019541176470588235, "loss": 0.0292, "step": 2280 }, { "epoch": 20.37, "grad_norm": 0.06393026560544968, "learning_rate": 0.00019535294117647056, "loss": 0.0259, "step": 2281 }, { "epoch": 20.38, "grad_norm": 0.06747432053089142, "learning_rate": 0.0001952941176470588, "loss": 0.0285, "step": 2282 }, { "epoch": 20.38, "grad_norm": 0.06794365495443344, "learning_rate": 0.00019523529411764702, "loss": 0.0258, "step": 2283 }, { "epoch": 20.39, "grad_norm": 0.10379933565855026, "learning_rate": 0.0001951764705882353, "loss": 0.0363, "step": 2284 }, { "epoch": 20.4, "grad_norm": 0.058310963213443756, "learning_rate": 0.0001951176470588235, "loss": 0.0255, "step": 2285 }, { "epoch": 20.41, "grad_norm": 0.059759609401226044, "learning_rate": 0.00019505882352941173, "loss": 0.0244, "step": 2286 }, { "epoch": 20.42, "grad_norm": 0.05604662001132965, "learning_rate": 0.000195, "loss": 0.0276, "step": 2287 }, { "epoch": 20.43, "grad_norm": 0.05281359329819679, "learning_rate": 0.0001949411764705882, "loss": 0.0277, "step": 2288 }, { "epoch": 20.44, "grad_norm": 0.062450770288705826, "learning_rate": 0.00019488235294117646, "loss": 0.026, "step": 2289 }, { "epoch": 20.45, "grad_norm": 0.1067199781537056, "learning_rate": 0.0001948235294117647, "loss": 0.0391, "step": 2290 }, { "epoch": 20.46, "grad_norm": 0.0533805713057518, "learning_rate": 0.00019476470588235292, "loss": 0.0297, "step": 2291 }, { "epoch": 20.46, "grad_norm": 0.05415339767932892, "learning_rate": 0.00019470588235294116, "loss": 0.0284, "step": 2292 }, { "epoch": 20.47, "grad_norm": 0.059836555272340775, "learning_rate": 0.0001946470588235294, "loss": 0.0284, "step": 2293 }, { "epoch": 20.48, "grad_norm": 0.0606420524418354, "learning_rate": 0.00019458823529411765, "loss": 0.0243, "step": 2294 }, { "epoch": 20.49, "grad_norm": 0.060408059507608414, "learning_rate": 0.00019452941176470586, "loss": 0.0242, "step": 2295 }, { "epoch": 20.5, "grad_norm": 0.09023467451334, "learning_rate": 0.00019447058823529408, "loss": 0.031, "step": 2296 }, { "epoch": 20.51, "grad_norm": 0.1027788519859314, "learning_rate": 0.00019441176470588235, "loss": 0.0351, "step": 2297 }, { "epoch": 20.52, "grad_norm": 0.05779183283448219, "learning_rate": 0.00019435294117647057, "loss": 0.0261, "step": 2298 }, { "epoch": 20.53, "grad_norm": 0.05863555893301964, "learning_rate": 0.0001942941176470588, "loss": 0.0275, "step": 2299 }, { "epoch": 20.54, "grad_norm": 0.058221373707056046, "learning_rate": 0.00019423529411764705, "loss": 0.0272, "step": 2300 }, { "epoch": 20.54, "eval_cer": 0.03385537051861707, "eval_loss": 0.19447565078735352, "eval_runtime": 22.2173, "eval_samples_per_second": 118.916, "eval_steps_per_second": 1.89, "eval_wer": 0.1129115430384768, "step": 2300 }, { "epoch": 20.54, "grad_norm": 0.06473752856254578, "learning_rate": 0.00019417647058823527, "loss": 0.0288, "step": 2301 }, { "epoch": 20.55, "grad_norm": 0.0694224163889885, "learning_rate": 0.0001941176470588235, "loss": 0.0316, "step": 2302 }, { "epoch": 20.56, "grad_norm": 0.11026883870363235, "learning_rate": 0.00019405882352941176, "loss": 0.0375, "step": 2303 }, { "epoch": 20.57, "grad_norm": 0.06142175942659378, "learning_rate": 0.00019399999999999997, "loss": 0.0287, "step": 2304 }, { "epoch": 20.58, "grad_norm": 0.0559944249689579, "learning_rate": 0.00019394117647058822, "loss": 0.0257, "step": 2305 }, { "epoch": 20.59, "grad_norm": 0.061539456248283386, "learning_rate": 0.00019388235294117643, "loss": 0.0283, "step": 2306 }, { "epoch": 20.6, "grad_norm": 0.059694308787584305, "learning_rate": 0.0001938235294117647, "loss": 0.0289, "step": 2307 }, { "epoch": 20.61, "grad_norm": 0.0647382065653801, "learning_rate": 0.00019376470588235292, "loss": 0.027, "step": 2308 }, { "epoch": 20.62, "grad_norm": 0.11706908047199249, "learning_rate": 0.00019370588235294114, "loss": 0.04, "step": 2309 }, { "epoch": 20.62, "grad_norm": 0.06547684967517853, "learning_rate": 0.0001936470588235294, "loss": 0.0301, "step": 2310 }, { "epoch": 20.63, "grad_norm": 0.056935232132673264, "learning_rate": 0.00019358823529411762, "loss": 0.0267, "step": 2311 }, { "epoch": 20.64, "grad_norm": 0.062037091702222824, "learning_rate": 0.00019352941176470587, "loss": 0.0267, "step": 2312 }, { "epoch": 20.65, "grad_norm": 0.06144816055893898, "learning_rate": 0.0001934705882352941, "loss": 0.0294, "step": 2313 }, { "epoch": 20.66, "grad_norm": 0.0712016299366951, "learning_rate": 0.00019341176470588233, "loss": 0.0307, "step": 2314 }, { "epoch": 20.67, "grad_norm": 0.1184035986661911, "learning_rate": 0.00019335294117647057, "loss": 0.0404, "step": 2315 }, { "epoch": 20.68, "grad_norm": 0.057029321789741516, "learning_rate": 0.0001932941176470588, "loss": 0.0293, "step": 2316 }, { "epoch": 20.69, "grad_norm": 0.05398004129528999, "learning_rate": 0.00019323529411764706, "loss": 0.0237, "step": 2317 }, { "epoch": 20.7, "grad_norm": 0.0657753124833107, "learning_rate": 0.00019317647058823527, "loss": 0.0307, "step": 2318 }, { "epoch": 20.71, "grad_norm": 0.06391260772943497, "learning_rate": 0.0001931176470588235, "loss": 0.031, "step": 2319 }, { "epoch": 20.71, "grad_norm": 0.07627767324447632, "learning_rate": 0.00019305882352941176, "loss": 0.0304, "step": 2320 }, { "epoch": 20.72, "grad_norm": 0.07448402047157288, "learning_rate": 0.00019299999999999997, "loss": 0.028, "step": 2321 }, { "epoch": 20.73, "grad_norm": 0.0848042294383049, "learning_rate": 0.00019294117647058825, "loss": 0.0386, "step": 2322 }, { "epoch": 20.74, "grad_norm": 0.04866953194141388, "learning_rate": 0.00019288235294117646, "loss": 0.0235, "step": 2323 }, { "epoch": 20.75, "grad_norm": 0.05988512188196182, "learning_rate": 0.00019282352941176468, "loss": 0.0285, "step": 2324 }, { "epoch": 20.76, "grad_norm": 0.06434763222932816, "learning_rate": 0.00019276470588235292, "loss": 0.029, "step": 2325 }, { "epoch": 20.77, "grad_norm": 0.06563811749219894, "learning_rate": 0.00019270588235294117, "loss": 0.0282, "step": 2326 }, { "epoch": 20.78, "grad_norm": 0.07576802372932434, "learning_rate": 0.0001926470588235294, "loss": 0.0271, "step": 2327 }, { "epoch": 20.79, "grad_norm": 0.0955667719244957, "learning_rate": 0.00019258823529411762, "loss": 0.0369, "step": 2328 }, { "epoch": 20.79, "grad_norm": 0.05960403382778168, "learning_rate": 0.00019252941176470584, "loss": 0.0289, "step": 2329 }, { "epoch": 20.8, "grad_norm": 0.05784255638718605, "learning_rate": 0.0001924705882352941, "loss": 0.0296, "step": 2330 }, { "epoch": 20.81, "grad_norm": 0.05947304144501686, "learning_rate": 0.00019241176470588233, "loss": 0.0263, "step": 2331 }, { "epoch": 20.82, "grad_norm": 0.06548023223876953, "learning_rate": 0.0001923529411764706, "loss": 0.0295, "step": 2332 }, { "epoch": 20.83, "grad_norm": 0.06398402154445648, "learning_rate": 0.00019229411764705881, "loss": 0.0259, "step": 2333 }, { "epoch": 20.84, "grad_norm": 0.1204400360584259, "learning_rate": 0.00019223529411764703, "loss": 0.0365, "step": 2334 }, { "epoch": 20.85, "grad_norm": 0.06722953915596008, "learning_rate": 0.00019217647058823527, "loss": 0.0322, "step": 2335 }, { "epoch": 20.86, "grad_norm": 0.05971822142601013, "learning_rate": 0.00019211764705882352, "loss": 0.0296, "step": 2336 }, { "epoch": 20.87, "grad_norm": 0.06084239110350609, "learning_rate": 0.00019205882352941173, "loss": 0.0247, "step": 2337 }, { "epoch": 20.88, "grad_norm": 0.0638883039355278, "learning_rate": 0.00019199999999999998, "loss": 0.0297, "step": 2338 }, { "epoch": 20.88, "grad_norm": 0.06581132858991623, "learning_rate": 0.00019194117647058822, "loss": 0.0311, "step": 2339 }, { "epoch": 20.89, "grad_norm": 0.09877829253673553, "learning_rate": 0.00019188235294117646, "loss": 0.0391, "step": 2340 }, { "epoch": 20.9, "grad_norm": 0.05891522020101547, "learning_rate": 0.00019182352941176468, "loss": 0.0281, "step": 2341 }, { "epoch": 20.91, "grad_norm": 0.05553098022937775, "learning_rate": 0.0001917647058823529, "loss": 0.0274, "step": 2342 }, { "epoch": 20.92, "grad_norm": 0.060920946300029755, "learning_rate": 0.00019170588235294117, "loss": 0.0282, "step": 2343 }, { "epoch": 20.93, "grad_norm": 0.05553551763296127, "learning_rate": 0.00019164705882352938, "loss": 0.0272, "step": 2344 }, { "epoch": 20.94, "grad_norm": 0.05533089116215706, "learning_rate": 0.00019158823529411765, "loss": 0.029, "step": 2345 }, { "epoch": 20.95, "grad_norm": 0.09240368008613586, "learning_rate": 0.00019152941176470587, "loss": 0.0299, "step": 2346 }, { "epoch": 20.96, "grad_norm": 0.11492682993412018, "learning_rate": 0.0001914705882352941, "loss": 0.0426, "step": 2347 }, { "epoch": 20.96, "grad_norm": 0.05383117124438286, "learning_rate": 0.00019141176470588233, "loss": 0.0236, "step": 2348 }, { "epoch": 20.97, "grad_norm": 0.06221175566315651, "learning_rate": 0.00019135294117647057, "loss": 0.0287, "step": 2349 }, { "epoch": 20.98, "grad_norm": 0.06276100873947144, "learning_rate": 0.00019129411764705882, "loss": 0.0283, "step": 2350 }, { "epoch": 20.99, "grad_norm": 0.07971462607383728, "learning_rate": 0.00019123529411764703, "loss": 0.0297, "step": 2351 }, { "epoch": 21.0, "grad_norm": 0.09926093369722366, "learning_rate": 0.00019117647058823528, "loss": 0.0368, "step": 2352 }, { "epoch": 21.01, "grad_norm": 0.05817103013396263, "learning_rate": 0.00019111764705882352, "loss": 0.0291, "step": 2353 }, { "epoch": 21.02, "grad_norm": 0.05710224062204361, "learning_rate": 0.00019105882352941174, "loss": 0.0237, "step": 2354 }, { "epoch": 21.03, "grad_norm": 0.05489571392536163, "learning_rate": 0.000191, "loss": 0.0268, "step": 2355 }, { "epoch": 21.04, "grad_norm": 0.05414852127432823, "learning_rate": 0.00019094117647058822, "loss": 0.0247, "step": 2356 }, { "epoch": 21.04, "grad_norm": 0.05747884884476662, "learning_rate": 0.00019088235294117644, "loss": 0.0255, "step": 2357 }, { "epoch": 21.05, "grad_norm": 0.07731017470359802, "learning_rate": 0.0001908235294117647, "loss": 0.0293, "step": 2358 }, { "epoch": 21.06, "grad_norm": 0.06619216501712799, "learning_rate": 0.00019076470588235293, "loss": 0.0303, "step": 2359 }, { "epoch": 21.07, "grad_norm": 0.0528254434466362, "learning_rate": 0.00019070588235294117, "loss": 0.0267, "step": 2360 }, { "epoch": 21.08, "grad_norm": 0.054829806089401245, "learning_rate": 0.0001906470588235294, "loss": 0.023, "step": 2361 }, { "epoch": 21.09, "grad_norm": 0.05584556236863136, "learning_rate": 0.00019058823529411763, "loss": 0.026, "step": 2362 }, { "epoch": 21.1, "grad_norm": 0.06575556844472885, "learning_rate": 0.00019052941176470587, "loss": 0.0289, "step": 2363 }, { "epoch": 21.11, "grad_norm": 0.06698581576347351, "learning_rate": 0.0001904705882352941, "loss": 0.025, "step": 2364 }, { "epoch": 21.12, "grad_norm": 0.08821806311607361, "learning_rate": 0.00019041176470588236, "loss": 0.0381, "step": 2365 }, { "epoch": 21.12, "grad_norm": 0.053715500980615616, "learning_rate": 0.00019035294117647058, "loss": 0.0295, "step": 2366 }, { "epoch": 21.13, "grad_norm": 0.061022866517305374, "learning_rate": 0.0001902941176470588, "loss": 0.0332, "step": 2367 }, { "epoch": 21.14, "grad_norm": 0.05637509003281593, "learning_rate": 0.00019023529411764706, "loss": 0.0253, "step": 2368 }, { "epoch": 21.15, "grad_norm": 0.058392640203237534, "learning_rate": 0.00019017647058823528, "loss": 0.0247, "step": 2369 }, { "epoch": 21.16, "grad_norm": 0.0698927491903305, "learning_rate": 0.0001901176470588235, "loss": 0.0277, "step": 2370 }, { "epoch": 21.17, "grad_norm": 0.09259064495563507, "learning_rate": 0.00019005882352941174, "loss": 0.0378, "step": 2371 }, { "epoch": 21.18, "grad_norm": 0.05133192986249924, "learning_rate": 0.00018999999999999998, "loss": 0.0241, "step": 2372 }, { "epoch": 21.19, "grad_norm": 0.05441851168870926, "learning_rate": 0.00018994117647058823, "loss": 0.0266, "step": 2373 }, { "epoch": 21.2, "grad_norm": 0.0640125423669815, "learning_rate": 0.00018988235294117644, "loss": 0.0279, "step": 2374 }, { "epoch": 21.21, "grad_norm": 0.05552051216363907, "learning_rate": 0.0001898235294117647, "loss": 0.0247, "step": 2375 }, { "epoch": 21.21, "grad_norm": 0.06344220042228699, "learning_rate": 0.00018976470588235293, "loss": 0.0257, "step": 2376 }, { "epoch": 21.22, "grad_norm": 0.13118135929107666, "learning_rate": 0.00018970588235294115, "loss": 0.0351, "step": 2377 }, { "epoch": 21.23, "grad_norm": 0.06830672919750214, "learning_rate": 0.00018964705882352942, "loss": 0.0229, "step": 2378 }, { "epoch": 21.24, "grad_norm": 0.050936296582221985, "learning_rate": 0.00018958823529411763, "loss": 0.0218, "step": 2379 }, { "epoch": 21.25, "grad_norm": 0.05940894037485123, "learning_rate": 0.00018952941176470585, "loss": 0.0279, "step": 2380 }, { "epoch": 21.26, "grad_norm": 0.06089415401220322, "learning_rate": 0.00018947058823529412, "loss": 0.0273, "step": 2381 }, { "epoch": 21.27, "grad_norm": 0.06544557958841324, "learning_rate": 0.00018941176470588234, "loss": 0.0261, "step": 2382 }, { "epoch": 21.28, "grad_norm": 0.0979183092713356, "learning_rate": 0.00018935294117647058, "loss": 0.0313, "step": 2383 }, { "epoch": 21.29, "grad_norm": 0.11826761811971664, "learning_rate": 0.0001892941176470588, "loss": 0.0387, "step": 2384 }, { "epoch": 21.29, "grad_norm": 0.0608263723552227, "learning_rate": 0.00018923529411764704, "loss": 0.0316, "step": 2385 }, { "epoch": 21.3, "grad_norm": 0.05365098640322685, "learning_rate": 0.00018917647058823528, "loss": 0.0222, "step": 2386 }, { "epoch": 21.31, "grad_norm": 0.058039531111717224, "learning_rate": 0.0001891176470588235, "loss": 0.0278, "step": 2387 }, { "epoch": 21.32, "grad_norm": 0.06796740740537643, "learning_rate": 0.00018905882352941177, "loss": 0.0217, "step": 2388 }, { "epoch": 21.33, "grad_norm": 0.06995701044797897, "learning_rate": 0.00018899999999999999, "loss": 0.028, "step": 2389 }, { "epoch": 21.34, "grad_norm": 0.09670662879943848, "learning_rate": 0.0001889411764705882, "loss": 0.0353, "step": 2390 }, { "epoch": 21.35, "grad_norm": 0.04985029995441437, "learning_rate": 0.00018888235294117647, "loss": 0.0257, "step": 2391 }, { "epoch": 21.36, "grad_norm": 0.057722825556993484, "learning_rate": 0.0001888235294117647, "loss": 0.0241, "step": 2392 }, { "epoch": 21.37, "grad_norm": 0.06418773531913757, "learning_rate": 0.00018876470588235293, "loss": 0.0269, "step": 2393 }, { "epoch": 21.38, "grad_norm": 0.0579729825258255, "learning_rate": 0.00018870588235294115, "loss": 0.0238, "step": 2394 }, { "epoch": 21.38, "grad_norm": 0.08441362529993057, "learning_rate": 0.0001886470588235294, "loss": 0.0322, "step": 2395 }, { "epoch": 21.39, "grad_norm": 0.1134045273065567, "learning_rate": 0.00018858823529411764, "loss": 0.0317, "step": 2396 }, { "epoch": 21.4, "grad_norm": 0.054769083857536316, "learning_rate": 0.00018852941176470585, "loss": 0.026, "step": 2397 }, { "epoch": 21.41, "grad_norm": 0.06260394304990768, "learning_rate": 0.00018847058823529412, "loss": 0.0256, "step": 2398 }, { "epoch": 21.42, "grad_norm": 0.060272328555583954, "learning_rate": 0.00018841176470588234, "loss": 0.0268, "step": 2399 }, { "epoch": 21.43, "grad_norm": 0.06387102603912354, "learning_rate": 0.00018835294117647056, "loss": 0.0278, "step": 2400 }, { "epoch": 21.43, "eval_cer": 0.03428006599692566, "eval_loss": 0.19497886300086975, "eval_runtime": 22.2455, "eval_samples_per_second": 118.766, "eval_steps_per_second": 1.888, "eval_wer": 0.11310987703292344, "step": 2400 }, { "epoch": 21.44, "grad_norm": 0.056825462728738785, "learning_rate": 0.00018829411764705883, "loss": 0.0226, "step": 2401 }, { "epoch": 21.45, "grad_norm": 0.1038694679737091, "learning_rate": 0.00018823529411764704, "loss": 0.0443, "step": 2402 }, { "epoch": 21.46, "grad_norm": 0.06339828670024872, "learning_rate": 0.00018817647058823526, "loss": 0.0286, "step": 2403 }, { "epoch": 21.46, "grad_norm": 0.05528872832655907, "learning_rate": 0.00018811764705882353, "loss": 0.0278, "step": 2404 }, { "epoch": 21.47, "grad_norm": 0.05746517330408096, "learning_rate": 0.00018805882352941175, "loss": 0.0255, "step": 2405 }, { "epoch": 21.48, "grad_norm": 0.05627716705203056, "learning_rate": 0.000188, "loss": 0.0273, "step": 2406 }, { "epoch": 21.49, "grad_norm": 0.05431535094976425, "learning_rate": 0.0001879411764705882, "loss": 0.0223, "step": 2407 }, { "epoch": 21.5, "grad_norm": 0.0860472247004509, "learning_rate": 0.00018788235294117645, "loss": 0.0303, "step": 2408 }, { "epoch": 21.51, "grad_norm": 0.09086859971284866, "learning_rate": 0.0001878235294117647, "loss": 0.0292, "step": 2409 }, { "epoch": 21.52, "grad_norm": 0.06806120276451111, "learning_rate": 0.0001877647058823529, "loss": 0.0304, "step": 2410 }, { "epoch": 21.53, "grad_norm": 0.05934057757258415, "learning_rate": 0.00018770588235294118, "loss": 0.0285, "step": 2411 }, { "epoch": 21.54, "grad_norm": 0.058098290115594864, "learning_rate": 0.0001876470588235294, "loss": 0.0246, "step": 2412 }, { "epoch": 21.54, "grad_norm": 0.059182148426771164, "learning_rate": 0.0001875882352941176, "loss": 0.0243, "step": 2413 }, { "epoch": 21.55, "grad_norm": 0.08074504882097244, "learning_rate": 0.00018752941176470588, "loss": 0.0274, "step": 2414 }, { "epoch": 21.56, "grad_norm": 0.10785527527332306, "learning_rate": 0.0001874705882352941, "loss": 0.0407, "step": 2415 }, { "epoch": 21.57, "grad_norm": 0.06091910973191261, "learning_rate": 0.00018741176470588234, "loss": 0.0262, "step": 2416 }, { "epoch": 21.58, "grad_norm": 0.05796665698289871, "learning_rate": 0.00018735294117647059, "loss": 0.0263, "step": 2417 }, { "epoch": 21.59, "grad_norm": 0.06581158936023712, "learning_rate": 0.0001872941176470588, "loss": 0.0297, "step": 2418 }, { "epoch": 21.6, "grad_norm": 0.07226616144180298, "learning_rate": 0.00018723529411764705, "loss": 0.0342, "step": 2419 }, { "epoch": 21.61, "grad_norm": 0.06742393225431442, "learning_rate": 0.00018717647058823526, "loss": 0.0268, "step": 2420 }, { "epoch": 21.62, "grad_norm": 0.11267576366662979, "learning_rate": 0.00018711764705882353, "loss": 0.0432, "step": 2421 }, { "epoch": 21.62, "grad_norm": 0.06181510165333748, "learning_rate": 0.00018705882352941175, "loss": 0.0245, "step": 2422 }, { "epoch": 21.63, "grad_norm": 0.058838147670030594, "learning_rate": 0.00018699999999999996, "loss": 0.0275, "step": 2423 }, { "epoch": 21.64, "grad_norm": 0.05783597007393837, "learning_rate": 0.00018694117647058824, "loss": 0.0237, "step": 2424 }, { "epoch": 21.65, "grad_norm": 0.0620993971824646, "learning_rate": 0.00018688235294117645, "loss": 0.0276, "step": 2425 }, { "epoch": 21.66, "grad_norm": 0.07318071275949478, "learning_rate": 0.0001868235294117647, "loss": 0.0297, "step": 2426 }, { "epoch": 21.67, "grad_norm": 0.12050062417984009, "learning_rate": 0.00018676470588235294, "loss": 0.0359, "step": 2427 }, { "epoch": 21.68, "grad_norm": 0.05566311627626419, "learning_rate": 0.00018670588235294115, "loss": 0.0258, "step": 2428 }, { "epoch": 21.69, "grad_norm": 0.059560324996709824, "learning_rate": 0.0001866470588235294, "loss": 0.0287, "step": 2429 }, { "epoch": 21.7, "grad_norm": 0.049338553100824356, "learning_rate": 0.00018658823529411761, "loss": 0.021, "step": 2430 }, { "epoch": 21.71, "grad_norm": 0.06321321427822113, "learning_rate": 0.00018652941176470586, "loss": 0.0249, "step": 2431 }, { "epoch": 21.71, "grad_norm": 0.05936066433787346, "learning_rate": 0.0001864705882352941, "loss": 0.0247, "step": 2432 }, { "epoch": 21.72, "grad_norm": 0.08597270399332047, "learning_rate": 0.00018641176470588232, "loss": 0.0308, "step": 2433 }, { "epoch": 21.73, "grad_norm": 0.07818442583084106, "learning_rate": 0.0001863529411764706, "loss": 0.0315, "step": 2434 }, { "epoch": 21.74, "grad_norm": 0.06305693835020065, "learning_rate": 0.0001862941176470588, "loss": 0.0296, "step": 2435 }, { "epoch": 21.75, "grad_norm": 0.06259511411190033, "learning_rate": 0.00018623529411764702, "loss": 0.0287, "step": 2436 }, { "epoch": 21.76, "grad_norm": 0.06238440424203873, "learning_rate": 0.0001861764705882353, "loss": 0.0247, "step": 2437 }, { "epoch": 21.77, "grad_norm": 0.05943160131573677, "learning_rate": 0.0001861176470588235, "loss": 0.0282, "step": 2438 }, { "epoch": 21.78, "grad_norm": 0.0842471495270729, "learning_rate": 0.00018605882352941175, "loss": 0.035, "step": 2439 }, { "epoch": 21.79, "grad_norm": 0.19098611176013947, "learning_rate": 0.000186, "loss": 0.0354, "step": 2440 }, { "epoch": 21.79, "grad_norm": 0.05623243749141693, "learning_rate": 0.0001859411764705882, "loss": 0.029, "step": 2441 }, { "epoch": 21.8, "grad_norm": 0.05806316062808037, "learning_rate": 0.00018588235294117645, "loss": 0.0259, "step": 2442 }, { "epoch": 21.81, "grad_norm": 0.0664072260260582, "learning_rate": 0.00018582352941176467, "loss": 0.0312, "step": 2443 }, { "epoch": 21.82, "grad_norm": 0.058459002524614334, "learning_rate": 0.00018576470588235294, "loss": 0.028, "step": 2444 }, { "epoch": 21.83, "grad_norm": 0.07315807044506073, "learning_rate": 0.00018570588235294116, "loss": 0.0258, "step": 2445 }, { "epoch": 21.84, "grad_norm": 0.10811853408813477, "learning_rate": 0.00018564705882352937, "loss": 0.0451, "step": 2446 }, { "epoch": 21.85, "grad_norm": 0.04934457316994667, "learning_rate": 0.00018558823529411764, "loss": 0.0248, "step": 2447 }, { "epoch": 21.86, "grad_norm": 0.0537499338388443, "learning_rate": 0.00018552941176470586, "loss": 0.0274, "step": 2448 }, { "epoch": 21.87, "grad_norm": 0.06229821965098381, "learning_rate": 0.0001854705882352941, "loss": 0.0293, "step": 2449 }, { "epoch": 21.88, "grad_norm": 0.07059778273105621, "learning_rate": 0.00018541176470588235, "loss": 0.03, "step": 2450 }, { "epoch": 21.88, "grad_norm": 0.061576519161462784, "learning_rate": 0.00018535294117647056, "loss": 0.0275, "step": 2451 }, { "epoch": 21.89, "grad_norm": 0.09695862233638763, "learning_rate": 0.0001852941176470588, "loss": 0.0382, "step": 2452 }, { "epoch": 21.9, "grad_norm": 0.054285623133182526, "learning_rate": 0.00018523529411764705, "loss": 0.0254, "step": 2453 }, { "epoch": 21.91, "grad_norm": 0.0540817454457283, "learning_rate": 0.0001851764705882353, "loss": 0.0267, "step": 2454 }, { "epoch": 21.92, "grad_norm": 0.061084263026714325, "learning_rate": 0.0001851176470588235, "loss": 0.0281, "step": 2455 }, { "epoch": 21.93, "grad_norm": 0.056585874408483505, "learning_rate": 0.00018505882352941173, "loss": 0.0264, "step": 2456 }, { "epoch": 21.94, "grad_norm": 0.06149120256304741, "learning_rate": 0.000185, "loss": 0.0251, "step": 2457 }, { "epoch": 21.95, "grad_norm": 0.08614380657672882, "learning_rate": 0.00018494117647058821, "loss": 0.0251, "step": 2458 }, { "epoch": 21.96, "grad_norm": 0.1088634803891182, "learning_rate": 0.00018488235294117646, "loss": 0.0406, "step": 2459 }, { "epoch": 21.96, "grad_norm": 0.0585268959403038, "learning_rate": 0.0001848235294117647, "loss": 0.0262, "step": 2460 }, { "epoch": 21.97, "grad_norm": 0.061667658388614655, "learning_rate": 0.00018476470588235292, "loss": 0.0314, "step": 2461 }, { "epoch": 21.98, "grad_norm": 0.06478738784790039, "learning_rate": 0.00018470588235294116, "loss": 0.0292, "step": 2462 }, { "epoch": 21.99, "grad_norm": 0.06945609301328659, "learning_rate": 0.0001846470588235294, "loss": 0.0259, "step": 2463 }, { "epoch": 22.0, "grad_norm": 0.09599378705024719, "learning_rate": 0.00018458823529411762, "loss": 0.0357, "step": 2464 }, { "epoch": 22.01, "grad_norm": 0.045506224036216736, "learning_rate": 0.00018452941176470586, "loss": 0.0249, "step": 2465 }, { "epoch": 22.02, "grad_norm": 0.057261284440755844, "learning_rate": 0.00018447058823529408, "loss": 0.0231, "step": 2466 }, { "epoch": 22.03, "grad_norm": 0.056278035044670105, "learning_rate": 0.00018441176470588235, "loss": 0.027, "step": 2467 }, { "epoch": 22.04, "grad_norm": 0.060056935995817184, "learning_rate": 0.00018435294117647057, "loss": 0.0322, "step": 2468 }, { "epoch": 22.04, "grad_norm": 0.05936797335743904, "learning_rate": 0.00018429411764705878, "loss": 0.0253, "step": 2469 }, { "epoch": 22.05, "grad_norm": 0.10536514967679977, "learning_rate": 0.00018423529411764705, "loss": 0.0284, "step": 2470 }, { "epoch": 22.06, "grad_norm": 0.09906871616840363, "learning_rate": 0.00018417647058823527, "loss": 0.0334, "step": 2471 }, { "epoch": 22.07, "grad_norm": 0.05583423376083374, "learning_rate": 0.0001841176470588235, "loss": 0.0257, "step": 2472 }, { "epoch": 22.08, "grad_norm": 0.05540235713124275, "learning_rate": 0.00018405882352941176, "loss": 0.0242, "step": 2473 }, { "epoch": 22.09, "grad_norm": 0.055019501596689224, "learning_rate": 0.00018399999999999997, "loss": 0.0264, "step": 2474 }, { "epoch": 22.1, "grad_norm": 0.06339872628450394, "learning_rate": 0.00018394117647058822, "loss": 0.0285, "step": 2475 }, { "epoch": 22.11, "grad_norm": 0.07679237425327301, "learning_rate": 0.00018388235294117646, "loss": 0.029, "step": 2476 }, { "epoch": 22.12, "grad_norm": 0.09308271110057831, "learning_rate": 0.0001838235294117647, "loss": 0.035, "step": 2477 }, { "epoch": 22.12, "grad_norm": 0.05326671153306961, "learning_rate": 0.00018376470588235292, "loss": 0.0282, "step": 2478 }, { "epoch": 22.13, "grad_norm": 0.053068701177835464, "learning_rate": 0.00018370588235294114, "loss": 0.0256, "step": 2479 }, { "epoch": 22.14, "grad_norm": 0.057122208178043365, "learning_rate": 0.0001836470588235294, "loss": 0.0272, "step": 2480 }, { "epoch": 22.15, "grad_norm": 0.06364129483699799, "learning_rate": 0.00018358823529411762, "loss": 0.0278, "step": 2481 }, { "epoch": 22.16, "grad_norm": 0.06380900740623474, "learning_rate": 0.0001835294117647059, "loss": 0.0276, "step": 2482 }, { "epoch": 22.17, "grad_norm": 0.11803562939167023, "learning_rate": 0.0001834705882352941, "loss": 0.0366, "step": 2483 }, { "epoch": 22.18, "grad_norm": 0.058370329439640045, "learning_rate": 0.00018341176470588233, "loss": 0.0266, "step": 2484 }, { "epoch": 22.19, "grad_norm": 0.054435014724731445, "learning_rate": 0.00018335294117647057, "loss": 0.0246, "step": 2485 }, { "epoch": 22.2, "grad_norm": 0.057081181555986404, "learning_rate": 0.0001832941176470588, "loss": 0.0262, "step": 2486 }, { "epoch": 22.21, "grad_norm": 0.05508088320493698, "learning_rate": 0.00018323529411764706, "loss": 0.0256, "step": 2487 }, { "epoch": 22.21, "grad_norm": 0.06385696679353714, "learning_rate": 0.00018317647058823527, "loss": 0.0292, "step": 2488 }, { "epoch": 22.22, "grad_norm": 0.13734489679336548, "learning_rate": 0.0001831176470588235, "loss": 0.0457, "step": 2489 }, { "epoch": 22.23, "grad_norm": 0.05183258280158043, "learning_rate": 0.00018305882352941176, "loss": 0.0255, "step": 2490 }, { "epoch": 22.24, "grad_norm": 0.05746317282319069, "learning_rate": 0.00018299999999999998, "loss": 0.0322, "step": 2491 }, { "epoch": 22.25, "grad_norm": 0.05643812194466591, "learning_rate": 0.00018294117647058825, "loss": 0.0242, "step": 2492 }, { "epoch": 22.26, "grad_norm": 0.053931787610054016, "learning_rate": 0.00018288235294117646, "loss": 0.0261, "step": 2493 }, { "epoch": 22.27, "grad_norm": 0.07110514491796494, "learning_rate": 0.00018282352941176468, "loss": 0.0338, "step": 2494 }, { "epoch": 22.28, "grad_norm": 0.08726546913385391, "learning_rate": 0.00018276470588235292, "loss": 0.0325, "step": 2495 }, { "epoch": 22.29, "grad_norm": 0.06899622082710266, "learning_rate": 0.00018270588235294117, "loss": 0.0313, "step": 2496 }, { "epoch": 22.29, "grad_norm": 0.05990142747759819, "learning_rate": 0.00018264705882352938, "loss": 0.0287, "step": 2497 }, { "epoch": 22.3, "grad_norm": 0.055539678782224655, "learning_rate": 0.00018258823529411763, "loss": 0.0233, "step": 2498 }, { "epoch": 22.31, "grad_norm": 0.06318056583404541, "learning_rate": 0.00018252941176470587, "loss": 0.0279, "step": 2499 }, { "epoch": 22.32, "grad_norm": 0.06352417171001434, "learning_rate": 0.0001824705882352941, "loss": 0.0254, "step": 2500 }, { "epoch": 22.32, "eval_cer": 0.03299846282787055, "eval_loss": 0.20145094394683838, "eval_runtime": 23.1799, "eval_samples_per_second": 113.978, "eval_steps_per_second": 1.812, "eval_wer": 0.10973819912733042, "step": 2500 }, { "epoch": 22.33, "grad_norm": 0.08036191761493683, "learning_rate": 0.00018241176470588233, "loss": 0.0242, "step": 2501 }, { "epoch": 22.34, "grad_norm": 0.11871591210365295, "learning_rate": 0.00018235294117647055, "loss": 0.0309, "step": 2502 }, { "epoch": 22.35, "grad_norm": 0.055190663784742355, "learning_rate": 0.00018229411764705882, "loss": 0.0282, "step": 2503 }, { "epoch": 22.36, "grad_norm": 0.06247410550713539, "learning_rate": 0.00018223529411764703, "loss": 0.0249, "step": 2504 }, { "epoch": 22.37, "grad_norm": 0.05551055073738098, "learning_rate": 0.0001821764705882353, "loss": 0.0262, "step": 2505 }, { "epoch": 22.38, "grad_norm": 0.05253708362579346, "learning_rate": 0.00018211764705882352, "loss": 0.0263, "step": 2506 }, { "epoch": 22.38, "grad_norm": 0.06559304893016815, "learning_rate": 0.00018205882352941174, "loss": 0.0286, "step": 2507 }, { "epoch": 22.39, "grad_norm": 0.10570012778043747, "learning_rate": 0.00018199999999999998, "loss": 0.0368, "step": 2508 }, { "epoch": 22.4, "grad_norm": 0.06190425157546997, "learning_rate": 0.00018194117647058822, "loss": 0.027, "step": 2509 }, { "epoch": 22.41, "grad_norm": 0.050871044397354126, "learning_rate": 0.00018188235294117647, "loss": 0.0248, "step": 2510 }, { "epoch": 22.42, "grad_norm": 0.05783969163894653, "learning_rate": 0.00018182352941176468, "loss": 0.0253, "step": 2511 }, { "epoch": 22.43, "grad_norm": 0.057936739176511765, "learning_rate": 0.00018176470588235293, "loss": 0.025, "step": 2512 }, { "epoch": 22.44, "grad_norm": 0.05643759295344353, "learning_rate": 0.00018170588235294117, "loss": 0.0255, "step": 2513 }, { "epoch": 22.45, "grad_norm": 0.10881562530994415, "learning_rate": 0.00018164705882352939, "loss": 0.0335, "step": 2514 }, { "epoch": 22.46, "grad_norm": 0.060932427644729614, "learning_rate": 0.00018158823529411766, "loss": 0.0291, "step": 2515 }, { "epoch": 22.46, "grad_norm": 0.054830025881528854, "learning_rate": 0.00018152941176470587, "loss": 0.0255, "step": 2516 }, { "epoch": 22.47, "grad_norm": 0.05401693284511566, "learning_rate": 0.0001814705882352941, "loss": 0.0223, "step": 2517 }, { "epoch": 22.48, "grad_norm": 0.06534389406442642, "learning_rate": 0.00018141176470588236, "loss": 0.0272, "step": 2518 }, { "epoch": 22.49, "grad_norm": 0.05824942886829376, "learning_rate": 0.00018135294117647058, "loss": 0.026, "step": 2519 }, { "epoch": 22.5, "grad_norm": 0.0738939642906189, "learning_rate": 0.00018129411764705882, "loss": 0.0286, "step": 2520 }, { "epoch": 22.51, "grad_norm": 0.11110525578260422, "learning_rate": 0.00018123529411764703, "loss": 0.0299, "step": 2521 }, { "epoch": 22.52, "grad_norm": 0.0617167092859745, "learning_rate": 0.00018117647058823528, "loss": 0.0286, "step": 2522 }, { "epoch": 22.53, "grad_norm": 0.05880562588572502, "learning_rate": 0.00018111764705882352, "loss": 0.03, "step": 2523 }, { "epoch": 22.54, "grad_norm": 0.05205706134438515, "learning_rate": 0.00018105882352941174, "loss": 0.0253, "step": 2524 }, { "epoch": 22.54, "grad_norm": 0.05533972755074501, "learning_rate": 0.000181, "loss": 0.0282, "step": 2525 }, { "epoch": 22.55, "grad_norm": 0.06595009565353394, "learning_rate": 0.00018094117647058822, "loss": 0.0265, "step": 2526 }, { "epoch": 22.56, "grad_norm": 0.09634100645780563, "learning_rate": 0.00018088235294117644, "loss": 0.0436, "step": 2527 }, { "epoch": 22.57, "grad_norm": 0.045013271272182465, "learning_rate": 0.0001808235294117647, "loss": 0.0234, "step": 2528 }, { "epoch": 22.58, "grad_norm": 0.05480833351612091, "learning_rate": 0.00018076470588235293, "loss": 0.03, "step": 2529 }, { "epoch": 22.59, "grad_norm": 0.058044854551553726, "learning_rate": 0.00018070588235294114, "loss": 0.0253, "step": 2530 }, { "epoch": 22.6, "grad_norm": 0.061460185796022415, "learning_rate": 0.0001806470588235294, "loss": 0.0275, "step": 2531 }, { "epoch": 22.61, "grad_norm": 0.06623505055904388, "learning_rate": 0.00018058823529411763, "loss": 0.0252, "step": 2532 }, { "epoch": 22.62, "grad_norm": 0.11571113765239716, "learning_rate": 0.00018052941176470587, "loss": 0.043, "step": 2533 }, { "epoch": 22.62, "grad_norm": 0.048881854861974716, "learning_rate": 0.0001804705882352941, "loss": 0.0262, "step": 2534 }, { "epoch": 22.63, "grad_norm": 0.0761791467666626, "learning_rate": 0.00018041176470588233, "loss": 0.0238, "step": 2535 }, { "epoch": 22.64, "grad_norm": 0.06024662405252457, "learning_rate": 0.00018035294117647058, "loss": 0.0212, "step": 2536 }, { "epoch": 22.65, "grad_norm": 0.06886886805295944, "learning_rate": 0.0001802941176470588, "loss": 0.0257, "step": 2537 }, { "epoch": 22.66, "grad_norm": 0.06255342811346054, "learning_rate": 0.00018023529411764706, "loss": 0.0252, "step": 2538 }, { "epoch": 22.67, "grad_norm": 0.10939238965511322, "learning_rate": 0.00018017647058823528, "loss": 0.0438, "step": 2539 }, { "epoch": 22.68, "grad_norm": 0.05792367085814476, "learning_rate": 0.0001801176470588235, "loss": 0.0261, "step": 2540 }, { "epoch": 22.69, "grad_norm": 0.05639879032969475, "learning_rate": 0.00018005882352941177, "loss": 0.0262, "step": 2541 }, { "epoch": 22.7, "grad_norm": 0.06334155797958374, "learning_rate": 0.00017999999999999998, "loss": 0.0247, "step": 2542 }, { "epoch": 22.71, "grad_norm": 0.0545668862760067, "learning_rate": 0.00017994117647058823, "loss": 0.023, "step": 2543 }, { "epoch": 22.71, "grad_norm": 0.05709097906947136, "learning_rate": 0.00017988235294117644, "loss": 0.0208, "step": 2544 }, { "epoch": 22.72, "grad_norm": 0.08601279556751251, "learning_rate": 0.0001798235294117647, "loss": 0.0334, "step": 2545 }, { "epoch": 22.73, "grad_norm": 0.15349067747592926, "learning_rate": 0.00017976470588235293, "loss": 0.0343, "step": 2546 }, { "epoch": 22.74, "grad_norm": 0.065592460334301, "learning_rate": 0.00017970588235294115, "loss": 0.0257, "step": 2547 }, { "epoch": 22.75, "grad_norm": 0.05844647437334061, "learning_rate": 0.00017964705882352942, "loss": 0.0256, "step": 2548 }, { "epoch": 22.76, "grad_norm": 0.05912429094314575, "learning_rate": 0.00017958823529411763, "loss": 0.0249, "step": 2549 }, { "epoch": 22.77, "grad_norm": 0.05476592481136322, "learning_rate": 0.00017952941176470585, "loss": 0.0238, "step": 2550 }, { "epoch": 22.78, "grad_norm": 0.06735950708389282, "learning_rate": 0.00017947058823529412, "loss": 0.0245, "step": 2551 }, { "epoch": 22.79, "grad_norm": 0.09796684235334396, "learning_rate": 0.00017941176470588234, "loss": 0.0369, "step": 2552 }, { "epoch": 22.79, "grad_norm": 0.05760080739855766, "learning_rate": 0.00017935294117647058, "loss": 0.0253, "step": 2553 }, { "epoch": 22.8, "grad_norm": 0.05072222277522087, "learning_rate": 0.0001792941176470588, "loss": 0.0271, "step": 2554 }, { "epoch": 22.81, "grad_norm": 0.05979233235120773, "learning_rate": 0.00017923529411764704, "loss": 0.0307, "step": 2555 }, { "epoch": 22.82, "grad_norm": 0.0697891116142273, "learning_rate": 0.00017917647058823528, "loss": 0.0258, "step": 2556 }, { "epoch": 22.83, "grad_norm": 0.06077735498547554, "learning_rate": 0.0001791176470588235, "loss": 0.0271, "step": 2557 }, { "epoch": 22.84, "grad_norm": 0.11549992859363556, "learning_rate": 0.00017905882352941174, "loss": 0.0383, "step": 2558 }, { "epoch": 22.85, "grad_norm": 0.054385095834732056, "learning_rate": 0.000179, "loss": 0.026, "step": 2559 }, { "epoch": 22.86, "grad_norm": 0.06153104826807976, "learning_rate": 0.0001789411764705882, "loss": 0.0284, "step": 2560 }, { "epoch": 22.87, "grad_norm": 0.05704614892601967, "learning_rate": 0.00017888235294117647, "loss": 0.0236, "step": 2561 }, { "epoch": 22.88, "grad_norm": 0.06553328037261963, "learning_rate": 0.0001788235294117647, "loss": 0.0254, "step": 2562 }, { "epoch": 22.88, "grad_norm": 0.06245945021510124, "learning_rate": 0.0001787647058823529, "loss": 0.0269, "step": 2563 }, { "epoch": 22.89, "grad_norm": 0.12217217683792114, "learning_rate": 0.00017870588235294118, "loss": 0.0344, "step": 2564 }, { "epoch": 22.9, "grad_norm": 0.0552246980369091, "learning_rate": 0.0001786470588235294, "loss": 0.025, "step": 2565 }, { "epoch": 22.91, "grad_norm": 0.05491182580590248, "learning_rate": 0.00017858823529411764, "loss": 0.0272, "step": 2566 }, { "epoch": 22.92, "grad_norm": 0.056860268115997314, "learning_rate": 0.00017852941176470585, "loss": 0.0282, "step": 2567 }, { "epoch": 22.93, "grad_norm": 0.05380900576710701, "learning_rate": 0.0001784705882352941, "loss": 0.0226, "step": 2568 }, { "epoch": 22.94, "grad_norm": 0.06300035864114761, "learning_rate": 0.00017841176470588234, "loss": 0.0272, "step": 2569 }, { "epoch": 22.95, "grad_norm": 0.0868477001786232, "learning_rate": 0.00017835294117647056, "loss": 0.026, "step": 2570 }, { "epoch": 22.96, "grad_norm": 0.09526658058166504, "learning_rate": 0.00017829411764705883, "loss": 0.0296, "step": 2571 }, { "epoch": 22.96, "grad_norm": 0.05786629021167755, "learning_rate": 0.00017823529411764704, "loss": 0.0281, "step": 2572 }, { "epoch": 22.97, "grad_norm": 0.06393415480852127, "learning_rate": 0.00017817647058823526, "loss": 0.0285, "step": 2573 }, { "epoch": 22.98, "grad_norm": 0.059029340744018555, "learning_rate": 0.00017811764705882353, "loss": 0.0214, "step": 2574 }, { "epoch": 22.99, "grad_norm": 0.060377780348062515, "learning_rate": 0.00017805882352941175, "loss": 0.0245, "step": 2575 }, { "epoch": 23.0, "grad_norm": 0.07968320697546005, "learning_rate": 0.000178, "loss": 0.0305, "step": 2576 }, { "epoch": 23.01, "grad_norm": 0.055544592440128326, "learning_rate": 0.00017794117647058823, "loss": 0.0254, "step": 2577 }, { "epoch": 23.02, "grad_norm": 0.05564003810286522, "learning_rate": 0.00017788235294117645, "loss": 0.0275, "step": 2578 }, { "epoch": 23.03, "grad_norm": 0.05317571386694908, "learning_rate": 0.0001778235294117647, "loss": 0.0241, "step": 2579 }, { "epoch": 23.04, "grad_norm": 0.05589379370212555, "learning_rate": 0.0001777647058823529, "loss": 0.0249, "step": 2580 }, { "epoch": 23.04, "grad_norm": 0.06784486025571823, "learning_rate": 0.00017770588235294118, "loss": 0.029, "step": 2581 }, { "epoch": 23.05, "grad_norm": 0.07419680804014206, "learning_rate": 0.0001776470588235294, "loss": 0.0248, "step": 2582 }, { "epoch": 23.06, "grad_norm": 0.07389215379953384, "learning_rate": 0.0001775882352941176, "loss": 0.0284, "step": 2583 }, { "epoch": 23.07, "grad_norm": 0.05012824758887291, "learning_rate": 0.00017752941176470588, "loss": 0.0268, "step": 2584 }, { "epoch": 23.08, "grad_norm": 0.05442946404218674, "learning_rate": 0.0001774705882352941, "loss": 0.0227, "step": 2585 }, { "epoch": 23.09, "grad_norm": 0.055399347096681595, "learning_rate": 0.00017741176470588234, "loss": 0.0294, "step": 2586 }, { "epoch": 23.1, "grad_norm": 0.05383411794900894, "learning_rate": 0.00017735294117647059, "loss": 0.023, "step": 2587 }, { "epoch": 23.11, "grad_norm": 0.06131847947835922, "learning_rate": 0.0001772941176470588, "loss": 0.0241, "step": 2588 }, { "epoch": 23.12, "grad_norm": 0.10607028007507324, "learning_rate": 0.00017723529411764705, "loss": 0.0326, "step": 2589 }, { "epoch": 23.12, "grad_norm": 0.05809440463781357, "learning_rate": 0.00017717647058823526, "loss": 0.0255, "step": 2590 }, { "epoch": 23.13, "grad_norm": 0.05491780862212181, "learning_rate": 0.0001771176470588235, "loss": 0.0238, "step": 2591 }, { "epoch": 23.14, "grad_norm": 0.0558369942009449, "learning_rate": 0.00017705882352941175, "loss": 0.0262, "step": 2592 }, { "epoch": 23.15, "grad_norm": 0.05765916779637337, "learning_rate": 0.00017699999999999997, "loss": 0.0234, "step": 2593 }, { "epoch": 23.16, "grad_norm": 0.05445518344640732, "learning_rate": 0.00017694117647058824, "loss": 0.0236, "step": 2594 }, { "epoch": 23.17, "grad_norm": 0.1029948964715004, "learning_rate": 0.00017688235294117645, "loss": 0.0375, "step": 2595 }, { "epoch": 23.18, "grad_norm": 0.05391351133584976, "learning_rate": 0.00017682352941176467, "loss": 0.0261, "step": 2596 }, { "epoch": 23.19, "grad_norm": 0.05429863557219505, "learning_rate": 0.00017676470588235294, "loss": 0.0277, "step": 2597 }, { "epoch": 23.2, "grad_norm": 0.05892942100763321, "learning_rate": 0.00017670588235294116, "loss": 0.0261, "step": 2598 }, { "epoch": 23.21, "grad_norm": 0.05917773395776749, "learning_rate": 0.0001766470588235294, "loss": 0.026, "step": 2599 }, { "epoch": 23.21, "grad_norm": 0.057894766330718994, "learning_rate": 0.00017658823529411764, "loss": 0.0204, "step": 2600 }, { "epoch": 23.21, "eval_cer": 0.032618867754338095, "eval_loss": 0.19516734778881073, "eval_runtime": 22.3281, "eval_samples_per_second": 118.326, "eval_steps_per_second": 1.881, "eval_wer": 0.10694168980563269, "step": 2600 }, { "epoch": 23.22, "grad_norm": 0.1185203269124031, "learning_rate": 0.00017652941176470586, "loss": 0.0386, "step": 2601 }, { "epoch": 23.23, "grad_norm": 0.05277341604232788, "learning_rate": 0.0001764705882352941, "loss": 0.0236, "step": 2602 }, { "epoch": 23.24, "grad_norm": 0.049617115408182144, "learning_rate": 0.00017641176470588232, "loss": 0.0259, "step": 2603 }, { "epoch": 23.25, "grad_norm": 0.05507615953683853, "learning_rate": 0.0001763529411764706, "loss": 0.0277, "step": 2604 }, { "epoch": 23.26, "grad_norm": 0.05926371365785599, "learning_rate": 0.0001762941176470588, "loss": 0.0277, "step": 2605 }, { "epoch": 23.27, "grad_norm": 0.06230008229613304, "learning_rate": 0.00017623529411764702, "loss": 0.028, "step": 2606 }, { "epoch": 23.28, "grad_norm": 0.08549349755048752, "learning_rate": 0.0001761764705882353, "loss": 0.0283, "step": 2607 }, { "epoch": 23.29, "grad_norm": 0.10650275647640228, "learning_rate": 0.0001761176470588235, "loss": 0.0343, "step": 2608 }, { "epoch": 23.29, "grad_norm": 0.05561896041035652, "learning_rate": 0.00017605882352941175, "loss": 0.0278, "step": 2609 }, { "epoch": 23.3, "grad_norm": 0.05234317108988762, "learning_rate": 0.000176, "loss": 0.0284, "step": 2610 }, { "epoch": 23.31, "grad_norm": 0.05738748237490654, "learning_rate": 0.0001759411764705882, "loss": 0.0246, "step": 2611 }, { "epoch": 23.32, "grad_norm": 0.054971832782030106, "learning_rate": 0.00017588235294117646, "loss": 0.0228, "step": 2612 }, { "epoch": 23.33, "grad_norm": 0.06708146631717682, "learning_rate": 0.0001758235294117647, "loss": 0.0276, "step": 2613 }, { "epoch": 23.34, "grad_norm": 0.09368041157722473, "learning_rate": 0.00017576470588235294, "loss": 0.0365, "step": 2614 }, { "epoch": 23.35, "grad_norm": 0.051021311432123184, "learning_rate": 0.00017570588235294116, "loss": 0.0237, "step": 2615 }, { "epoch": 23.36, "grad_norm": 0.05173582211136818, "learning_rate": 0.00017564705882352937, "loss": 0.0253, "step": 2616 }, { "epoch": 23.37, "grad_norm": 0.05599309131503105, "learning_rate": 0.00017558823529411765, "loss": 0.0244, "step": 2617 }, { "epoch": 23.38, "grad_norm": 0.05602375045418739, "learning_rate": 0.00017552941176470586, "loss": 0.0268, "step": 2618 }, { "epoch": 23.38, "grad_norm": 0.06255139410495758, "learning_rate": 0.0001754705882352941, "loss": 0.0245, "step": 2619 }, { "epoch": 23.39, "grad_norm": 0.12780724465847015, "learning_rate": 0.00017541176470588235, "loss": 0.0375, "step": 2620 }, { "epoch": 23.4, "grad_norm": 0.0565602108836174, "learning_rate": 0.00017535294117647056, "loss": 0.0251, "step": 2621 }, { "epoch": 23.41, "grad_norm": 0.05471047759056091, "learning_rate": 0.0001752941176470588, "loss": 0.0221, "step": 2622 }, { "epoch": 23.42, "grad_norm": 0.05079091712832451, "learning_rate": 0.00017523529411764705, "loss": 0.0248, "step": 2623 }, { "epoch": 23.43, "grad_norm": 0.05325259268283844, "learning_rate": 0.00017517647058823527, "loss": 0.0267, "step": 2624 }, { "epoch": 23.44, "grad_norm": 0.0550168938934803, "learning_rate": 0.0001751176470588235, "loss": 0.0236, "step": 2625 }, { "epoch": 23.45, "grad_norm": 0.09860525280237198, "learning_rate": 0.00017505882352941173, "loss": 0.0362, "step": 2626 }, { "epoch": 23.46, "grad_norm": 0.04859638586640358, "learning_rate": 0.000175, "loss": 0.0257, "step": 2627 }, { "epoch": 23.46, "grad_norm": 0.05800432711839676, "learning_rate": 0.00017494117647058821, "loss": 0.0281, "step": 2628 }, { "epoch": 23.47, "grad_norm": 0.05190037563443184, "learning_rate": 0.00017488235294117643, "loss": 0.0254, "step": 2629 }, { "epoch": 23.48, "grad_norm": 0.05299176275730133, "learning_rate": 0.0001748235294117647, "loss": 0.0249, "step": 2630 }, { "epoch": 23.49, "grad_norm": 0.053858134895563126, "learning_rate": 0.00017476470588235292, "loss": 0.0246, "step": 2631 }, { "epoch": 23.5, "grad_norm": 0.08895876258611679, "learning_rate": 0.00017470588235294116, "loss": 0.0304, "step": 2632 }, { "epoch": 23.51, "grad_norm": 0.08598772436380386, "learning_rate": 0.0001746470588235294, "loss": 0.0268, "step": 2633 }, { "epoch": 23.52, "grad_norm": 0.05937615782022476, "learning_rate": 0.00017458823529411762, "loss": 0.0239, "step": 2634 }, { "epoch": 23.53, "grad_norm": 0.057922814041376114, "learning_rate": 0.00017452941176470586, "loss": 0.0261, "step": 2635 }, { "epoch": 23.54, "grad_norm": 0.056914910674095154, "learning_rate": 0.0001744705882352941, "loss": 0.0247, "step": 2636 }, { "epoch": 23.54, "grad_norm": 0.06148446351289749, "learning_rate": 0.00017441176470588235, "loss": 0.0233, "step": 2637 }, { "epoch": 23.55, "grad_norm": 0.06675130128860474, "learning_rate": 0.00017435294117647057, "loss": 0.0264, "step": 2638 }, { "epoch": 23.56, "grad_norm": 0.11668359488248825, "learning_rate": 0.00017429411764705878, "loss": 0.0378, "step": 2639 }, { "epoch": 23.57, "grad_norm": 0.051918674260377884, "learning_rate": 0.00017423529411764705, "loss": 0.0249, "step": 2640 }, { "epoch": 23.58, "grad_norm": 0.053308840841054916, "learning_rate": 0.00017417647058823527, "loss": 0.023, "step": 2641 }, { "epoch": 23.59, "grad_norm": 0.06300992518663406, "learning_rate": 0.00017411764705882354, "loss": 0.0296, "step": 2642 }, { "epoch": 23.6, "grad_norm": 0.05485334247350693, "learning_rate": 0.00017405882352941176, "loss": 0.0222, "step": 2643 }, { "epoch": 23.61, "grad_norm": 0.061029527336359024, "learning_rate": 0.00017399999999999997, "loss": 0.0218, "step": 2644 }, { "epoch": 23.62, "grad_norm": 0.12146846950054169, "learning_rate": 0.00017394117647058822, "loss": 0.0323, "step": 2645 }, { "epoch": 23.62, "grad_norm": 0.05135010927915573, "learning_rate": 0.00017388235294117646, "loss": 0.0234, "step": 2646 }, { "epoch": 23.63, "grad_norm": 0.0580328069627285, "learning_rate": 0.0001738235294117647, "loss": 0.0282, "step": 2647 }, { "epoch": 23.64, "grad_norm": 0.05992249771952629, "learning_rate": 0.00017376470588235292, "loss": 0.0273, "step": 2648 }, { "epoch": 23.65, "grad_norm": 0.05653415247797966, "learning_rate": 0.00017370588235294114, "loss": 0.0242, "step": 2649 }, { "epoch": 23.66, "grad_norm": 0.05682414025068283, "learning_rate": 0.0001736470588235294, "loss": 0.0262, "step": 2650 }, { "epoch": 23.67, "grad_norm": 0.10287006944417953, "learning_rate": 0.00017358823529411762, "loss": 0.032, "step": 2651 }, { "epoch": 23.68, "grad_norm": 0.060233984142541885, "learning_rate": 0.0001735294117647059, "loss": 0.0282, "step": 2652 }, { "epoch": 23.69, "grad_norm": 0.059896353632211685, "learning_rate": 0.0001734705882352941, "loss": 0.0272, "step": 2653 }, { "epoch": 23.7, "grad_norm": 0.05700116604566574, "learning_rate": 0.00017341176470588233, "loss": 0.0274, "step": 2654 }, { "epoch": 23.71, "grad_norm": 0.05264915153384209, "learning_rate": 0.00017335294117647057, "loss": 0.0199, "step": 2655 }, { "epoch": 23.71, "grad_norm": 0.06769115477800369, "learning_rate": 0.00017329411764705881, "loss": 0.027, "step": 2656 }, { "epoch": 23.72, "grad_norm": 0.07319138199090958, "learning_rate": 0.00017323529411764703, "loss": 0.0248, "step": 2657 }, { "epoch": 23.73, "grad_norm": 0.12389098107814789, "learning_rate": 0.00017317647058823527, "loss": 0.0416, "step": 2658 }, { "epoch": 23.74, "grad_norm": 0.05109129846096039, "learning_rate": 0.00017311764705882352, "loss": 0.0264, "step": 2659 }, { "epoch": 23.75, "grad_norm": 0.05779051408171654, "learning_rate": 0.00017305882352941176, "loss": 0.0271, "step": 2660 }, { "epoch": 23.76, "grad_norm": 0.05919601768255234, "learning_rate": 0.00017299999999999998, "loss": 0.0274, "step": 2661 }, { "epoch": 23.77, "grad_norm": 0.05351078137755394, "learning_rate": 0.0001729411764705882, "loss": 0.0236, "step": 2662 }, { "epoch": 23.78, "grad_norm": 0.06535013765096664, "learning_rate": 0.00017288235294117646, "loss": 0.0235, "step": 2663 }, { "epoch": 23.79, "grad_norm": 0.23863983154296875, "learning_rate": 0.00017282352941176468, "loss": 0.0435, "step": 2664 }, { "epoch": 23.79, "grad_norm": 0.05540243908762932, "learning_rate": 0.00017276470588235295, "loss": 0.0268, "step": 2665 }, { "epoch": 23.8, "grad_norm": 0.08166693896055222, "learning_rate": 0.00017270588235294117, "loss": 0.0284, "step": 2666 }, { "epoch": 23.81, "grad_norm": 0.09516628831624985, "learning_rate": 0.00017264705882352938, "loss": 0.0332, "step": 2667 }, { "epoch": 23.82, "grad_norm": 0.061793334782123566, "learning_rate": 0.00017258823529411763, "loss": 0.0272, "step": 2668 }, { "epoch": 23.83, "grad_norm": 0.06748347729444504, "learning_rate": 0.00017252941176470587, "loss": 0.0236, "step": 2669 }, { "epoch": 23.84, "grad_norm": 0.10607940703630447, "learning_rate": 0.0001724705882352941, "loss": 0.0381, "step": 2670 }, { "epoch": 23.85, "grad_norm": 0.06220061331987381, "learning_rate": 0.00017241176470588233, "loss": 0.0288, "step": 2671 }, { "epoch": 23.86, "grad_norm": 0.05508628487586975, "learning_rate": 0.00017235294117647057, "loss": 0.0244, "step": 2672 }, { "epoch": 23.87, "grad_norm": 0.05792519077658653, "learning_rate": 0.00017229411764705882, "loss": 0.0268, "step": 2673 }, { "epoch": 23.88, "grad_norm": 0.06214618682861328, "learning_rate": 0.00017223529411764703, "loss": 0.0289, "step": 2674 }, { "epoch": 23.88, "grad_norm": 0.06868556886911392, "learning_rate": 0.0001721764705882353, "loss": 0.026, "step": 2675 }, { "epoch": 23.89, "grad_norm": 0.1072574034333229, "learning_rate": 0.00017211764705882352, "loss": 0.0369, "step": 2676 }, { "epoch": 23.9, "grad_norm": 0.05920523777604103, "learning_rate": 0.00017205882352941174, "loss": 0.0287, "step": 2677 }, { "epoch": 23.91, "grad_norm": 0.057916879653930664, "learning_rate": 0.000172, "loss": 0.0277, "step": 2678 }, { "epoch": 23.92, "grad_norm": 0.06307437270879745, "learning_rate": 0.00017194117647058822, "loss": 0.0253, "step": 2679 }, { "epoch": 23.93, "grad_norm": 0.06734710931777954, "learning_rate": 0.00017188235294117647, "loss": 0.0253, "step": 2680 }, { "epoch": 23.94, "grad_norm": 0.06740694493055344, "learning_rate": 0.00017182352941176468, "loss": 0.0249, "step": 2681 }, { "epoch": 23.95, "grad_norm": 0.09946932643651962, "learning_rate": 0.00017176470588235293, "loss": 0.0312, "step": 2682 }, { "epoch": 23.96, "grad_norm": 0.10713358968496323, "learning_rate": 0.00017170588235294117, "loss": 0.0309, "step": 2683 }, { "epoch": 23.96, "grad_norm": 0.057803675532341, "learning_rate": 0.00017164705882352939, "loss": 0.0273, "step": 2684 }, { "epoch": 23.97, "grad_norm": 0.06359840929508209, "learning_rate": 0.00017158823529411766, "loss": 0.0229, "step": 2685 }, { "epoch": 23.98, "grad_norm": 0.064893439412117, "learning_rate": 0.00017152941176470587, "loss": 0.0269, "step": 2686 }, { "epoch": 23.99, "grad_norm": 0.06777261942625046, "learning_rate": 0.0001714705882352941, "loss": 0.0276, "step": 2687 }, { "epoch": 24.0, "grad_norm": 0.06847578287124634, "learning_rate": 0.00017141176470588236, "loss": 0.0293, "step": 2688 }, { "epoch": 24.01, "grad_norm": 0.05095682293176651, "learning_rate": 0.00017135294117647058, "loss": 0.0235, "step": 2689 }, { "epoch": 24.02, "grad_norm": 0.047058165073394775, "learning_rate": 0.0001712941176470588, "loss": 0.0228, "step": 2690 }, { "epoch": 24.03, "grad_norm": 0.049135103821754456, "learning_rate": 0.00017123529411764704, "loss": 0.0221, "step": 2691 }, { "epoch": 24.04, "grad_norm": 0.053765006363391876, "learning_rate": 0.00017117647058823528, "loss": 0.0236, "step": 2692 }, { "epoch": 24.04, "grad_norm": 0.05885041877627373, "learning_rate": 0.00017111764705882352, "loss": 0.0269, "step": 2693 }, { "epoch": 24.05, "grad_norm": 0.09634266793727875, "learning_rate": 0.00017105882352941174, "loss": 0.0309, "step": 2694 }, { "epoch": 24.06, "grad_norm": 0.1012498065829277, "learning_rate": 0.00017099999999999998, "loss": 0.0309, "step": 2695 }, { "epoch": 24.07, "grad_norm": 0.046449918299913406, "learning_rate": 0.00017094117647058823, "loss": 0.0222, "step": 2696 }, { "epoch": 24.08, "grad_norm": 0.0512268990278244, "learning_rate": 0.00017088235294117644, "loss": 0.026, "step": 2697 }, { "epoch": 24.09, "grad_norm": 0.05378634110093117, "learning_rate": 0.0001708235294117647, "loss": 0.0234, "step": 2698 }, { "epoch": 24.1, "grad_norm": 0.053176265209913254, "learning_rate": 0.00017076470588235293, "loss": 0.0225, "step": 2699 }, { "epoch": 24.11, "grad_norm": 0.06803590804338455, "learning_rate": 0.00017070588235294115, "loss": 0.0259, "step": 2700 }, { "epoch": 24.11, "eval_cer": 0.033002221194935226, "eval_loss": 0.19758771359920502, "eval_runtime": 22.4242, "eval_samples_per_second": 117.819, "eval_steps_per_second": 1.873, "eval_wer": 0.1102538675128917, "step": 2700 }, { "epoch": 24.12, "grad_norm": 0.08501246571540833, "learning_rate": 0.00017064705882352942, "loss": 0.0257, "step": 2701 }, { "epoch": 24.12, "grad_norm": 0.04914569482207298, "learning_rate": 0.00017058823529411763, "loss": 0.0237, "step": 2702 }, { "epoch": 24.13, "grad_norm": 0.05821319669485092, "learning_rate": 0.00017052941176470588, "loss": 0.0281, "step": 2703 }, { "epoch": 24.14, "grad_norm": 0.05908571183681488, "learning_rate": 0.0001704705882352941, "loss": 0.0267, "step": 2704 }, { "epoch": 24.15, "grad_norm": 0.06436707079410553, "learning_rate": 0.00017041176470588234, "loss": 0.0244, "step": 2705 }, { "epoch": 24.16, "grad_norm": 0.07469526678323746, "learning_rate": 0.00017035294117647058, "loss": 0.0264, "step": 2706 }, { "epoch": 24.17, "grad_norm": 0.12090101093053818, "learning_rate": 0.0001702941176470588, "loss": 0.0335, "step": 2707 }, { "epoch": 24.18, "grad_norm": 0.051356274634599686, "learning_rate": 0.00017023529411764707, "loss": 0.0258, "step": 2708 }, { "epoch": 24.19, "grad_norm": 0.05642694979906082, "learning_rate": 0.00017017647058823528, "loss": 0.0249, "step": 2709 }, { "epoch": 24.2, "grad_norm": 0.051083944737911224, "learning_rate": 0.0001701176470588235, "loss": 0.0246, "step": 2710 }, { "epoch": 24.21, "grad_norm": 0.057927340269088745, "learning_rate": 0.00017005882352941177, "loss": 0.0266, "step": 2711 }, { "epoch": 24.21, "grad_norm": 0.059637777507305145, "learning_rate": 0.00016999999999999999, "loss": 0.0256, "step": 2712 }, { "epoch": 24.22, "grad_norm": 0.09623510390520096, "learning_rate": 0.00016994117647058823, "loss": 0.0354, "step": 2713 }, { "epoch": 24.23, "grad_norm": 0.05712822824716568, "learning_rate": 0.00016988235294117644, "loss": 0.0275, "step": 2714 }, { "epoch": 24.24, "grad_norm": 0.055225055664777756, "learning_rate": 0.0001698235294117647, "loss": 0.0231, "step": 2715 }, { "epoch": 24.25, "grad_norm": 0.06384652107954025, "learning_rate": 0.00016976470588235293, "loss": 0.0246, "step": 2716 }, { "epoch": 24.26, "grad_norm": 0.060066428035497665, "learning_rate": 0.00016970588235294115, "loss": 0.0257, "step": 2717 }, { "epoch": 24.27, "grad_norm": 0.05861593782901764, "learning_rate": 0.0001696470588235294, "loss": 0.0242, "step": 2718 }, { "epoch": 24.28, "grad_norm": 0.10007983446121216, "learning_rate": 0.00016958823529411763, "loss": 0.0283, "step": 2719 }, { "epoch": 24.29, "grad_norm": 0.07153783738613129, "learning_rate": 0.00016952941176470585, "loss": 0.0279, "step": 2720 }, { "epoch": 24.29, "grad_norm": 0.061844177544116974, "learning_rate": 0.00016947058823529412, "loss": 0.0253, "step": 2721 }, { "epoch": 24.3, "grad_norm": 0.05117771402001381, "learning_rate": 0.00016941176470588234, "loss": 0.0232, "step": 2722 }, { "epoch": 24.31, "grad_norm": 0.05482063069939613, "learning_rate": 0.00016935294117647055, "loss": 0.026, "step": 2723 }, { "epoch": 24.32, "grad_norm": 0.06420817226171494, "learning_rate": 0.00016929411764705882, "loss": 0.0297, "step": 2724 }, { "epoch": 24.33, "grad_norm": 0.06492055952548981, "learning_rate": 0.00016923529411764704, "loss": 0.0222, "step": 2725 }, { "epoch": 24.34, "grad_norm": 0.10581710934638977, "learning_rate": 0.00016917647058823528, "loss": 0.0418, "step": 2726 }, { "epoch": 24.35, "grad_norm": 0.05373619124293327, "learning_rate": 0.0001691176470588235, "loss": 0.0264, "step": 2727 }, { "epoch": 24.36, "grad_norm": 0.057972416281700134, "learning_rate": 0.00016905882352941174, "loss": 0.0324, "step": 2728 }, { "epoch": 24.37, "grad_norm": 0.052475400269031525, "learning_rate": 0.000169, "loss": 0.0233, "step": 2729 }, { "epoch": 24.38, "grad_norm": 0.057983409613370895, "learning_rate": 0.0001689411764705882, "loss": 0.0274, "step": 2730 }, { "epoch": 24.38, "grad_norm": 0.06758495420217514, "learning_rate": 0.00016888235294117647, "loss": 0.0233, "step": 2731 }, { "epoch": 24.39, "grad_norm": 0.09746670722961426, "learning_rate": 0.0001688235294117647, "loss": 0.0322, "step": 2732 }, { "epoch": 24.4, "grad_norm": 0.04812254011631012, "learning_rate": 0.0001687647058823529, "loss": 0.0223, "step": 2733 }, { "epoch": 24.41, "grad_norm": 0.06194176897406578, "learning_rate": 0.00016870588235294118, "loss": 0.0234, "step": 2734 }, { "epoch": 24.42, "grad_norm": 0.05478568375110626, "learning_rate": 0.0001686470588235294, "loss": 0.0213, "step": 2735 }, { "epoch": 24.43, "grad_norm": 0.06427139043807983, "learning_rate": 0.00016858823529411764, "loss": 0.0263, "step": 2736 }, { "epoch": 24.44, "grad_norm": 0.06361524015665054, "learning_rate": 0.00016852941176470588, "loss": 0.0219, "step": 2737 }, { "epoch": 24.45, "grad_norm": 0.17025306820869446, "learning_rate": 0.0001684705882352941, "loss": 0.0242, "step": 2738 }, { "epoch": 24.46, "grad_norm": 0.05920528247952461, "learning_rate": 0.00016841176470588234, "loss": 0.0252, "step": 2739 }, { "epoch": 24.46, "grad_norm": 0.06601275503635406, "learning_rate": 0.00016835294117647056, "loss": 0.0239, "step": 2740 }, { "epoch": 24.47, "grad_norm": 0.0645885318517685, "learning_rate": 0.00016829411764705883, "loss": 0.0269, "step": 2741 }, { "epoch": 24.48, "grad_norm": 0.07101695984601974, "learning_rate": 0.00016823529411764704, "loss": 0.0239, "step": 2742 }, { "epoch": 24.49, "grad_norm": 0.05991099402308464, "learning_rate": 0.00016817647058823526, "loss": 0.0227, "step": 2743 }, { "epoch": 24.5, "grad_norm": 0.08743740618228912, "learning_rate": 0.00016811764705882353, "loss": 0.0278, "step": 2744 }, { "epoch": 24.51, "grad_norm": 0.12182283401489258, "learning_rate": 0.00016805882352941175, "loss": 0.0379, "step": 2745 }, { "epoch": 24.52, "grad_norm": 0.05626633018255234, "learning_rate": 0.000168, "loss": 0.029, "step": 2746 }, { "epoch": 24.53, "grad_norm": 0.054795052856206894, "learning_rate": 0.00016794117647058823, "loss": 0.0267, "step": 2747 }, { "epoch": 24.54, "grad_norm": 0.05787048116326332, "learning_rate": 0.00016788235294117645, "loss": 0.022, "step": 2748 }, { "epoch": 24.54, "grad_norm": 0.05009082704782486, "learning_rate": 0.0001678235294117647, "loss": 0.0199, "step": 2749 }, { "epoch": 24.55, "grad_norm": 0.06413988769054413, "learning_rate": 0.0001677647058823529, "loss": 0.0235, "step": 2750 }, { "epoch": 24.56, "grad_norm": 0.09867819398641586, "learning_rate": 0.00016770588235294115, "loss": 0.0312, "step": 2751 }, { "epoch": 24.57, "grad_norm": 0.057203102856874466, "learning_rate": 0.0001676470588235294, "loss": 0.0283, "step": 2752 }, { "epoch": 24.58, "grad_norm": 0.05727151408791542, "learning_rate": 0.0001675882352941176, "loss": 0.0255, "step": 2753 }, { "epoch": 24.59, "grad_norm": 0.05368828773498535, "learning_rate": 0.00016752941176470588, "loss": 0.0246, "step": 2754 }, { "epoch": 24.6, "grad_norm": 0.0596047081053257, "learning_rate": 0.0001674705882352941, "loss": 0.0254, "step": 2755 }, { "epoch": 24.61, "grad_norm": 0.07500813901424408, "learning_rate": 0.00016741176470588232, "loss": 0.0296, "step": 2756 }, { "epoch": 24.62, "grad_norm": 0.10808512568473816, "learning_rate": 0.0001673529411764706, "loss": 0.0389, "step": 2757 }, { "epoch": 24.62, "grad_norm": 0.057034000754356384, "learning_rate": 0.0001672941176470588, "loss": 0.0234, "step": 2758 }, { "epoch": 24.63, "grad_norm": 0.0573807917535305, "learning_rate": 0.00016723529411764705, "loss": 0.0279, "step": 2759 }, { "epoch": 24.64, "grad_norm": 0.05286450684070587, "learning_rate": 0.0001671764705882353, "loss": 0.0233, "step": 2760 }, { "epoch": 24.65, "grad_norm": 0.05330032855272293, "learning_rate": 0.0001671176470588235, "loss": 0.024, "step": 2761 }, { "epoch": 24.66, "grad_norm": 0.06454110890626907, "learning_rate": 0.00016705882352941175, "loss": 0.0288, "step": 2762 }, { "epoch": 24.67, "grad_norm": 0.08926012367010117, "learning_rate": 0.00016699999999999997, "loss": 0.0324, "step": 2763 }, { "epoch": 24.68, "grad_norm": 0.04690074920654297, "learning_rate": 0.00016694117647058824, "loss": 0.0212, "step": 2764 }, { "epoch": 24.69, "grad_norm": 0.051096923649311066, "learning_rate": 0.00016688235294117645, "loss": 0.0253, "step": 2765 }, { "epoch": 24.7, "grad_norm": 0.057431526482105255, "learning_rate": 0.00016682352941176467, "loss": 0.026, "step": 2766 }, { "epoch": 24.71, "grad_norm": 0.057798273861408234, "learning_rate": 0.00016676470588235294, "loss": 0.0226, "step": 2767 }, { "epoch": 24.71, "grad_norm": 0.05295524746179581, "learning_rate": 0.00016670588235294116, "loss": 0.0195, "step": 2768 }, { "epoch": 24.72, "grad_norm": 0.08081305027008057, "learning_rate": 0.0001666470588235294, "loss": 0.0277, "step": 2769 }, { "epoch": 24.73, "grad_norm": 0.10082969069480896, "learning_rate": 0.00016658823529411764, "loss": 0.0301, "step": 2770 }, { "epoch": 24.74, "grad_norm": 0.05353362858295441, "learning_rate": 0.00016652941176470586, "loss": 0.0237, "step": 2771 }, { "epoch": 24.75, "grad_norm": 0.04614794999361038, "learning_rate": 0.0001664705882352941, "loss": 0.0209, "step": 2772 }, { "epoch": 24.76, "grad_norm": 0.05549345910549164, "learning_rate": 0.00016641176470588235, "loss": 0.0271, "step": 2773 }, { "epoch": 24.77, "grad_norm": 0.05876650661230087, "learning_rate": 0.0001663529411764706, "loss": 0.026, "step": 2774 }, { "epoch": 24.78, "grad_norm": 0.06546223908662796, "learning_rate": 0.0001662941176470588, "loss": 0.0188, "step": 2775 }, { "epoch": 24.79, "grad_norm": 0.13608670234680176, "learning_rate": 0.00016623529411764702, "loss": 0.0448, "step": 2776 }, { "epoch": 24.79, "grad_norm": 0.04852099344134331, "learning_rate": 0.0001661764705882353, "loss": 0.0251, "step": 2777 }, { "epoch": 24.8, "grad_norm": 0.05532992631196976, "learning_rate": 0.0001661176470588235, "loss": 0.0266, "step": 2778 }, { "epoch": 24.81, "grad_norm": 0.05641378462314606, "learning_rate": 0.00016605882352941175, "loss": 0.0217, "step": 2779 }, { "epoch": 24.82, "grad_norm": 0.06152420490980148, "learning_rate": 0.000166, "loss": 0.0263, "step": 2780 }, { "epoch": 24.83, "grad_norm": 0.05759526416659355, "learning_rate": 0.0001659411764705882, "loss": 0.0251, "step": 2781 }, { "epoch": 24.84, "grad_norm": 0.09831923246383667, "learning_rate": 0.00016588235294117646, "loss": 0.0348, "step": 2782 }, { "epoch": 24.85, "grad_norm": 0.05034704878926277, "learning_rate": 0.0001658235294117647, "loss": 0.0229, "step": 2783 }, { "epoch": 24.86, "grad_norm": 0.05134845897555351, "learning_rate": 0.00016576470588235292, "loss": 0.0245, "step": 2784 }, { "epoch": 24.87, "grad_norm": 0.05441165342926979, "learning_rate": 0.00016570588235294116, "loss": 0.0233, "step": 2785 }, { "epoch": 24.88, "grad_norm": 0.05475650727748871, "learning_rate": 0.00016564705882352938, "loss": 0.0255, "step": 2786 }, { "epoch": 24.88, "grad_norm": 0.06395895779132843, "learning_rate": 0.00016558823529411765, "loss": 0.0269, "step": 2787 }, { "epoch": 24.89, "grad_norm": 0.11297452449798584, "learning_rate": 0.00016552941176470586, "loss": 0.0419, "step": 2788 }, { "epoch": 24.9, "grad_norm": 0.05215073376893997, "learning_rate": 0.00016547058823529408, "loss": 0.0244, "step": 2789 }, { "epoch": 24.91, "grad_norm": 0.05535958334803581, "learning_rate": 0.00016541176470588235, "loss": 0.0225, "step": 2790 }, { "epoch": 24.92, "grad_norm": 0.05166322737932205, "learning_rate": 0.00016535294117647057, "loss": 0.0244, "step": 2791 }, { "epoch": 24.93, "grad_norm": 0.060888104140758514, "learning_rate": 0.0001652941176470588, "loss": 0.0324, "step": 2792 }, { "epoch": 24.94, "grad_norm": 0.058065325021743774, "learning_rate": 0.00016523529411764705, "loss": 0.0266, "step": 2793 }, { "epoch": 24.95, "grad_norm": 0.07291809469461441, "learning_rate": 0.00016517647058823527, "loss": 0.0256, "step": 2794 }, { "epoch": 24.96, "grad_norm": 0.13164353370666504, "learning_rate": 0.0001651176470588235, "loss": 0.0385, "step": 2795 }, { "epoch": 24.96, "grad_norm": 0.04778966307640076, "learning_rate": 0.00016505882352941176, "loss": 0.0257, "step": 2796 }, { "epoch": 24.97, "grad_norm": 0.05566604807972908, "learning_rate": 0.000165, "loss": 0.0243, "step": 2797 }, { "epoch": 24.98, "grad_norm": 0.05550533905625343, "learning_rate": 0.00016494117647058822, "loss": 0.0285, "step": 2798 }, { "epoch": 24.99, "grad_norm": 0.06879201531410217, "learning_rate": 0.00016488235294117643, "loss": 0.0257, "step": 2799 }, { "epoch": 25.0, "grad_norm": 0.09967371076345444, "learning_rate": 0.0001648235294117647, "loss": 0.0325, "step": 2800 }, { "epoch": 25.0, "eval_cer": 0.03283309467702473, "eval_loss": 0.19575656950473785, "eval_runtime": 22.3096, "eval_samples_per_second": 118.425, "eval_steps_per_second": 1.883, "eval_wer": 0.10876636255454185, "step": 2800 }, { "epoch": 25.01, "grad_norm": 0.05569753795862198, "learning_rate": 0.00016476470588235292, "loss": 0.025, "step": 2801 }, { "epoch": 25.02, "grad_norm": 0.058367833495140076, "learning_rate": 0.0001647058823529412, "loss": 0.0292, "step": 2802 }, { "epoch": 25.03, "grad_norm": 0.0486668162047863, "learning_rate": 0.0001646470588235294, "loss": 0.0241, "step": 2803 }, { "epoch": 25.04, "grad_norm": 0.05004143714904785, "learning_rate": 0.00016458823529411762, "loss": 0.0217, "step": 2804 }, { "epoch": 25.04, "grad_norm": 0.056677915155887604, "learning_rate": 0.00016452941176470587, "loss": 0.0256, "step": 2805 }, { "epoch": 25.05, "grad_norm": 0.06901763379573822, "learning_rate": 0.0001644705882352941, "loss": 0.0215, "step": 2806 }, { "epoch": 25.06, "grad_norm": 0.09663349390029907, "learning_rate": 0.00016441176470588235, "loss": 0.0296, "step": 2807 }, { "epoch": 25.07, "grad_norm": 0.05198526382446289, "learning_rate": 0.00016435294117647057, "loss": 0.024, "step": 2808 }, { "epoch": 25.08, "grad_norm": 0.05520065873861313, "learning_rate": 0.0001642941176470588, "loss": 0.0251, "step": 2809 }, { "epoch": 25.09, "grad_norm": 0.04921083524823189, "learning_rate": 0.00016423529411764706, "loss": 0.0196, "step": 2810 }, { "epoch": 25.1, "grad_norm": 0.05463823676109314, "learning_rate": 0.00016417647058823527, "loss": 0.0222, "step": 2811 }, { "epoch": 25.11, "grad_norm": 0.07825464755296707, "learning_rate": 0.00016411764705882354, "loss": 0.0275, "step": 2812 }, { "epoch": 25.12, "grad_norm": 0.08404690027236938, "learning_rate": 0.00016405882352941176, "loss": 0.0316, "step": 2813 }, { "epoch": 25.12, "grad_norm": 0.05047415941953659, "learning_rate": 0.00016399999999999997, "loss": 0.0223, "step": 2814 }, { "epoch": 25.13, "grad_norm": 0.05363975092768669, "learning_rate": 0.00016394117647058822, "loss": 0.0271, "step": 2815 }, { "epoch": 25.14, "grad_norm": 0.0585324689745903, "learning_rate": 0.00016388235294117646, "loss": 0.0265, "step": 2816 }, { "epoch": 25.15, "grad_norm": 0.05032774433493614, "learning_rate": 0.00016382352941176468, "loss": 0.0206, "step": 2817 }, { "epoch": 25.16, "grad_norm": 0.06968959420919418, "learning_rate": 0.00016376470588235292, "loss": 0.023, "step": 2818 }, { "epoch": 25.17, "grad_norm": 0.08379457145929337, "learning_rate": 0.00016370588235294116, "loss": 0.0266, "step": 2819 }, { "epoch": 25.18, "grad_norm": 0.05781732127070427, "learning_rate": 0.0001636470588235294, "loss": 0.0286, "step": 2820 }, { "epoch": 25.19, "grad_norm": 0.04706644266843796, "learning_rate": 0.00016358823529411762, "loss": 0.0217, "step": 2821 }, { "epoch": 25.2, "grad_norm": 0.0497005395591259, "learning_rate": 0.00016352941176470584, "loss": 0.0233, "step": 2822 }, { "epoch": 25.21, "grad_norm": 0.0531892329454422, "learning_rate": 0.0001634705882352941, "loss": 0.0236, "step": 2823 }, { "epoch": 25.21, "grad_norm": 0.05514576658606529, "learning_rate": 0.00016341176470588233, "loss": 0.0217, "step": 2824 }, { "epoch": 25.22, "grad_norm": 0.10693211853504181, "learning_rate": 0.0001633529411764706, "loss": 0.0331, "step": 2825 }, { "epoch": 25.23, "grad_norm": 0.049358803778886795, "learning_rate": 0.00016329411764705881, "loss": 0.024, "step": 2826 }, { "epoch": 25.24, "grad_norm": 0.050118591636419296, "learning_rate": 0.00016323529411764703, "loss": 0.0248, "step": 2827 }, { "epoch": 25.25, "grad_norm": 0.055307239294052124, "learning_rate": 0.00016317647058823527, "loss": 0.028, "step": 2828 }, { "epoch": 25.26, "grad_norm": 0.05096099153161049, "learning_rate": 0.00016311764705882352, "loss": 0.0224, "step": 2829 }, { "epoch": 25.27, "grad_norm": 0.05580078065395355, "learning_rate": 0.00016305882352941176, "loss": 0.026, "step": 2830 }, { "epoch": 25.28, "grad_norm": 0.0734834298491478, "learning_rate": 0.00016299999999999998, "loss": 0.0258, "step": 2831 }, { "epoch": 25.29, "grad_norm": 0.13371030986309052, "learning_rate": 0.00016294117647058822, "loss": 0.0291, "step": 2832 }, { "epoch": 25.29, "grad_norm": 0.04842384159564972, "learning_rate": 0.00016288235294117646, "loss": 0.0228, "step": 2833 }, { "epoch": 25.3, "grad_norm": 0.0555318146944046, "learning_rate": 0.00016282352941176468, "loss": 0.0236, "step": 2834 }, { "epoch": 25.31, "grad_norm": 0.056293439120054245, "learning_rate": 0.00016276470588235295, "loss": 0.0225, "step": 2835 }, { "epoch": 25.32, "grad_norm": 0.05625167116522789, "learning_rate": 0.00016270588235294117, "loss": 0.0266, "step": 2836 }, { "epoch": 25.33, "grad_norm": 0.08097604662179947, "learning_rate": 0.00016264705882352938, "loss": 0.0291, "step": 2837 }, { "epoch": 25.34, "grad_norm": 0.0822027325630188, "learning_rate": 0.00016258823529411765, "loss": 0.0229, "step": 2838 }, { "epoch": 25.35, "grad_norm": 0.05596937611699104, "learning_rate": 0.00016252941176470587, "loss": 0.0277, "step": 2839 }, { "epoch": 25.36, "grad_norm": 0.05915316939353943, "learning_rate": 0.00016247058823529411, "loss": 0.0259, "step": 2840 }, { "epoch": 25.37, "grad_norm": 0.058485615998506546, "learning_rate": 0.00016241176470588233, "loss": 0.0241, "step": 2841 }, { "epoch": 25.38, "grad_norm": 0.05064799636602402, "learning_rate": 0.00016235294117647057, "loss": 0.0195, "step": 2842 }, { "epoch": 25.38, "grad_norm": 0.06672133505344391, "learning_rate": 0.00016229411764705882, "loss": 0.0208, "step": 2843 }, { "epoch": 25.39, "grad_norm": 0.09813190251588821, "learning_rate": 0.00016223529411764703, "loss": 0.033, "step": 2844 }, { "epoch": 25.4, "grad_norm": 0.05445200577378273, "learning_rate": 0.00016217647058823525, "loss": 0.0254, "step": 2845 }, { "epoch": 25.41, "grad_norm": 0.058646202087402344, "learning_rate": 0.00016211764705882352, "loss": 0.022, "step": 2846 }, { "epoch": 25.42, "grad_norm": 0.053171828389167786, "learning_rate": 0.00016205882352941174, "loss": 0.0232, "step": 2847 }, { "epoch": 25.43, "grad_norm": 0.052805688232183456, "learning_rate": 0.000162, "loss": 0.0237, "step": 2848 }, { "epoch": 25.44, "grad_norm": 0.054522205144166946, "learning_rate": 0.00016194117647058822, "loss": 0.0237, "step": 2849 }, { "epoch": 25.45, "grad_norm": 0.09734851866960526, "learning_rate": 0.00016188235294117644, "loss": 0.0265, "step": 2850 }, { "epoch": 25.46, "grad_norm": 0.05089952424168587, "learning_rate": 0.00016182352941176468, "loss": 0.023, "step": 2851 }, { "epoch": 25.46, "grad_norm": 0.05102134123444557, "learning_rate": 0.00016176470588235293, "loss": 0.0241, "step": 2852 }, { "epoch": 25.47, "grad_norm": 0.05429181456565857, "learning_rate": 0.00016170588235294117, "loss": 0.0271, "step": 2853 }, { "epoch": 25.48, "grad_norm": 0.052763838320970535, "learning_rate": 0.0001616470588235294, "loss": 0.0244, "step": 2854 }, { "epoch": 25.49, "grad_norm": 0.049629341810941696, "learning_rate": 0.00016158823529411763, "loss": 0.0203, "step": 2855 }, { "epoch": 25.5, "grad_norm": 0.07441843301057816, "learning_rate": 0.00016152941176470587, "loss": 0.0246, "step": 2856 }, { "epoch": 25.51, "grad_norm": 0.0944853201508522, "learning_rate": 0.0001614705882352941, "loss": 0.0338, "step": 2857 }, { "epoch": 25.52, "grad_norm": 0.05425774306058884, "learning_rate": 0.00016141176470588236, "loss": 0.0256, "step": 2858 }, { "epoch": 25.53, "grad_norm": 0.0528445765376091, "learning_rate": 0.00016135294117647058, "loss": 0.0254, "step": 2859 }, { "epoch": 25.54, "grad_norm": 0.061194173991680145, "learning_rate": 0.0001612941176470588, "loss": 0.0232, "step": 2860 }, { "epoch": 25.54, "grad_norm": 0.058666814118623734, "learning_rate": 0.00016123529411764706, "loss": 0.0254, "step": 2861 }, { "epoch": 25.55, "grad_norm": 0.06553830951452255, "learning_rate": 0.00016117647058823528, "loss": 0.0187, "step": 2862 }, { "epoch": 25.56, "grad_norm": 0.10020612180233002, "learning_rate": 0.00016111764705882352, "loss": 0.026, "step": 2863 }, { "epoch": 25.57, "grad_norm": 0.07953240722417831, "learning_rate": 0.00016105882352941174, "loss": 0.0249, "step": 2864 }, { "epoch": 25.58, "grad_norm": 0.058467797935009, "learning_rate": 0.00016099999999999998, "loss": 0.0281, "step": 2865 }, { "epoch": 25.59, "grad_norm": 0.05778868496417999, "learning_rate": 0.00016094117647058823, "loss": 0.0224, "step": 2866 }, { "epoch": 25.6, "grad_norm": 0.05610688775777817, "learning_rate": 0.00016088235294117644, "loss": 0.0235, "step": 2867 }, { "epoch": 25.61, "grad_norm": 0.06687482446432114, "learning_rate": 0.0001608235294117647, "loss": 0.0221, "step": 2868 }, { "epoch": 25.62, "grad_norm": 0.08267761021852493, "learning_rate": 0.00016076470588235293, "loss": 0.0293, "step": 2869 }, { "epoch": 25.62, "grad_norm": 0.0476447232067585, "learning_rate": 0.00016070588235294115, "loss": 0.0216, "step": 2870 }, { "epoch": 25.63, "grad_norm": 0.057498421519994736, "learning_rate": 0.00016064705882352942, "loss": 0.0248, "step": 2871 }, { "epoch": 25.64, "grad_norm": 0.06706904619932175, "learning_rate": 0.00016058823529411763, "loss": 0.0251, "step": 2872 }, { "epoch": 25.65, "grad_norm": 0.05606892332434654, "learning_rate": 0.00016052941176470588, "loss": 0.0217, "step": 2873 }, { "epoch": 25.66, "grad_norm": 0.054642338305711746, "learning_rate": 0.00016047058823529412, "loss": 0.0197, "step": 2874 }, { "epoch": 25.67, "grad_norm": 0.12919902801513672, "learning_rate": 0.00016041176470588234, "loss": 0.0412, "step": 2875 }, { "epoch": 25.68, "grad_norm": 0.06097327917814255, "learning_rate": 0.00016035294117647058, "loss": 0.0192, "step": 2876 }, { "epoch": 25.69, "grad_norm": 0.05964032560586929, "learning_rate": 0.0001602941176470588, "loss": 0.0244, "step": 2877 }, { "epoch": 25.7, "grad_norm": 0.060374390333890915, "learning_rate": 0.00016023529411764704, "loss": 0.0248, "step": 2878 }, { "epoch": 25.71, "grad_norm": 0.056740935891866684, "learning_rate": 0.00016017647058823528, "loss": 0.0256, "step": 2879 }, { "epoch": 25.71, "grad_norm": 0.05796075239777565, "learning_rate": 0.0001601176470588235, "loss": 0.0242, "step": 2880 }, { "epoch": 25.72, "grad_norm": 0.07502967864274979, "learning_rate": 0.00016005882352941177, "loss": 0.0216, "step": 2881 }, { "epoch": 25.73, "grad_norm": 0.09179262071847916, "learning_rate": 0.00015999999999999999, "loss": 0.0313, "step": 2882 }, { "epoch": 25.74, "grad_norm": 0.04778487607836723, "learning_rate": 0.0001599411764705882, "loss": 0.0231, "step": 2883 }, { "epoch": 25.75, "grad_norm": 0.05621251463890076, "learning_rate": 0.00015988235294117647, "loss": 0.0307, "step": 2884 }, { "epoch": 25.76, "grad_norm": 0.05809913948178291, "learning_rate": 0.0001598235294117647, "loss": 0.0268, "step": 2885 }, { "epoch": 25.77, "grad_norm": 0.05506697669625282, "learning_rate": 0.00015976470588235293, "loss": 0.0228, "step": 2886 }, { "epoch": 25.78, "grad_norm": 0.09386373311281204, "learning_rate": 0.00015970588235294115, "loss": 0.0301, "step": 2887 }, { "epoch": 25.79, "grad_norm": 0.11229165643453598, "learning_rate": 0.0001596470588235294, "loss": 0.0364, "step": 2888 }, { "epoch": 25.79, "grad_norm": 0.04829331114888191, "learning_rate": 0.00015958823529411764, "loss": 0.0205, "step": 2889 }, { "epoch": 25.8, "grad_norm": 0.04852853715419769, "learning_rate": 0.00015952941176470585, "loss": 0.0219, "step": 2890 }, { "epoch": 25.81, "grad_norm": 0.04522327333688736, "learning_rate": 0.00015947058823529412, "loss": 0.0181, "step": 2891 }, { "epoch": 25.82, "grad_norm": 0.05684347823262215, "learning_rate": 0.00015941176470588234, "loss": 0.0242, "step": 2892 }, { "epoch": 25.83, "grad_norm": 0.07527941465377808, "learning_rate": 0.00015935294117647056, "loss": 0.024, "step": 2893 }, { "epoch": 25.84, "grad_norm": 0.12193810939788818, "learning_rate": 0.00015929411764705883, "loss": 0.0382, "step": 2894 }, { "epoch": 25.85, "grad_norm": 0.05096936970949173, "learning_rate": 0.00015923529411764704, "loss": 0.0245, "step": 2895 }, { "epoch": 25.86, "grad_norm": 0.05811227858066559, "learning_rate": 0.00015917647058823529, "loss": 0.0289, "step": 2896 }, { "epoch": 25.87, "grad_norm": 0.05780656635761261, "learning_rate": 0.00015911764705882353, "loss": 0.0258, "step": 2897 }, { "epoch": 25.88, "grad_norm": 0.05754803493618965, "learning_rate": 0.00015905882352941175, "loss": 0.0201, "step": 2898 }, { "epoch": 25.88, "grad_norm": 0.0601537711918354, "learning_rate": 0.000159, "loss": 0.0238, "step": 2899 }, { "epoch": 25.89, "grad_norm": 0.11887750029563904, "learning_rate": 0.0001589411764705882, "loss": 0.0359, "step": 2900 }, { "epoch": 25.89, "eval_cer": 0.034576976995035195, "eval_loss": 0.1907992660999298, "eval_runtime": 22.3409, "eval_samples_per_second": 118.258, "eval_steps_per_second": 1.88, "eval_wer": 0.11047203490678302, "step": 2900 }, { "epoch": 25.9, "grad_norm": 0.05362885817885399, "learning_rate": 0.00015888235294117648, "loss": 0.0264, "step": 2901 }, { "epoch": 25.91, "grad_norm": 0.05292373523116112, "learning_rate": 0.0001588235294117647, "loss": 0.0269, "step": 2902 }, { "epoch": 25.92, "grad_norm": 0.05303650349378586, "learning_rate": 0.0001587647058823529, "loss": 0.0225, "step": 2903 }, { "epoch": 25.93, "grad_norm": 0.06052716076374054, "learning_rate": 0.00015870588235294118, "loss": 0.0275, "step": 2904 }, { "epoch": 25.94, "grad_norm": 0.055717941373586655, "learning_rate": 0.0001586470588235294, "loss": 0.0218, "step": 2905 }, { "epoch": 25.95, "grad_norm": 0.0733630508184433, "learning_rate": 0.00015858823529411764, "loss": 0.0241, "step": 2906 }, { "epoch": 25.96, "grad_norm": 0.08755376935005188, "learning_rate": 0.00015852941176470588, "loss": 0.027, "step": 2907 }, { "epoch": 25.96, "grad_norm": 0.052491169422864914, "learning_rate": 0.0001584705882352941, "loss": 0.019, "step": 2908 }, { "epoch": 25.97, "grad_norm": 0.0682724192738533, "learning_rate": 0.00015841176470588234, "loss": 0.0254, "step": 2909 }, { "epoch": 25.98, "grad_norm": 0.071016825735569, "learning_rate": 0.00015835294117647056, "loss": 0.0254, "step": 2910 }, { "epoch": 25.99, "grad_norm": 0.06912308186292648, "learning_rate": 0.0001582941176470588, "loss": 0.0227, "step": 2911 }, { "epoch": 26.0, "grad_norm": 0.08627953380346298, "learning_rate": 0.00015823529411764704, "loss": 0.0278, "step": 2912 }, { "epoch": 26.01, "grad_norm": 0.05893798917531967, "learning_rate": 0.00015817647058823526, "loss": 0.0263, "step": 2913 }, { "epoch": 26.02, "grad_norm": 0.05467715486884117, "learning_rate": 0.00015811764705882353, "loss": 0.0274, "step": 2914 }, { "epoch": 26.03, "grad_norm": 0.04888711869716644, "learning_rate": 0.00015805882352941175, "loss": 0.0245, "step": 2915 }, { "epoch": 26.04, "grad_norm": 0.0500195249915123, "learning_rate": 0.00015799999999999996, "loss": 0.0232, "step": 2916 }, { "epoch": 26.04, "grad_norm": 0.056167636066675186, "learning_rate": 0.00015794117647058824, "loss": 0.0249, "step": 2917 }, { "epoch": 26.05, "grad_norm": 0.08539999276399612, "learning_rate": 0.00015788235294117645, "loss": 0.029, "step": 2918 }, { "epoch": 26.06, "grad_norm": 0.07519900053739548, "learning_rate": 0.0001578235294117647, "loss": 0.0282, "step": 2919 }, { "epoch": 26.07, "grad_norm": 0.04647299647331238, "learning_rate": 0.00015776470588235294, "loss": 0.0249, "step": 2920 }, { "epoch": 26.08, "grad_norm": 0.05957910418510437, "learning_rate": 0.00015770588235294115, "loss": 0.0271, "step": 2921 }, { "epoch": 26.09, "grad_norm": 0.053026359528303146, "learning_rate": 0.0001576470588235294, "loss": 0.0243, "step": 2922 }, { "epoch": 26.1, "grad_norm": 0.046042006462812424, "learning_rate": 0.00015758823529411761, "loss": 0.0214, "step": 2923 }, { "epoch": 26.11, "grad_norm": 0.08956961333751678, "learning_rate": 0.00015752941176470588, "loss": 0.0269, "step": 2924 }, { "epoch": 26.12, "grad_norm": 0.11800762265920639, "learning_rate": 0.0001574705882352941, "loss": 0.0406, "step": 2925 }, { "epoch": 26.12, "grad_norm": 0.04754096269607544, "learning_rate": 0.00015741176470588232, "loss": 0.024, "step": 2926 }, { "epoch": 26.13, "grad_norm": 0.052196651697158813, "learning_rate": 0.0001573529411764706, "loss": 0.027, "step": 2927 }, { "epoch": 26.14, "grad_norm": 0.05159645155072212, "learning_rate": 0.0001572941176470588, "loss": 0.0231, "step": 2928 }, { "epoch": 26.15, "grad_norm": 0.0545731857419014, "learning_rate": 0.00015723529411764705, "loss": 0.0234, "step": 2929 }, { "epoch": 26.16, "grad_norm": 0.062391672283411026, "learning_rate": 0.0001571764705882353, "loss": 0.0246, "step": 2930 }, { "epoch": 26.17, "grad_norm": 0.0956978127360344, "learning_rate": 0.0001571176470588235, "loss": 0.0269, "step": 2931 }, { "epoch": 26.18, "grad_norm": 0.05004408210515976, "learning_rate": 0.00015705882352941175, "loss": 0.0225, "step": 2932 }, { "epoch": 26.19, "grad_norm": 0.05430254712700844, "learning_rate": 0.000157, "loss": 0.0269, "step": 2933 }, { "epoch": 26.2, "grad_norm": 0.051544129848480225, "learning_rate": 0.00015694117647058824, "loss": 0.0221, "step": 2934 }, { "epoch": 26.21, "grad_norm": 0.057763636112213135, "learning_rate": 0.00015688235294117645, "loss": 0.0231, "step": 2935 }, { "epoch": 26.21, "grad_norm": 0.060375794768333435, "learning_rate": 0.00015682352941176467, "loss": 0.0277, "step": 2936 }, { "epoch": 26.22, "grad_norm": 0.09702575951814651, "learning_rate": 0.00015676470588235294, "loss": 0.0319, "step": 2937 }, { "epoch": 26.23, "grad_norm": 0.04802504926919937, "learning_rate": 0.00015670588235294116, "loss": 0.0219, "step": 2938 }, { "epoch": 26.24, "grad_norm": 0.048738472163677216, "learning_rate": 0.00015664705882352943, "loss": 0.0211, "step": 2939 }, { "epoch": 26.25, "grad_norm": 0.046131592243909836, "learning_rate": 0.00015658823529411764, "loss": 0.0224, "step": 2940 }, { "epoch": 26.26, "grad_norm": 0.0489065982401371, "learning_rate": 0.00015652941176470586, "loss": 0.0211, "step": 2941 }, { "epoch": 26.27, "grad_norm": 0.05561669170856476, "learning_rate": 0.0001564705882352941, "loss": 0.0214, "step": 2942 }, { "epoch": 26.28, "grad_norm": 0.08418484032154083, "learning_rate": 0.00015641176470588235, "loss": 0.031, "step": 2943 }, { "epoch": 26.29, "grad_norm": 0.08891662210226059, "learning_rate": 0.00015635294117647056, "loss": 0.0304, "step": 2944 }, { "epoch": 26.29, "grad_norm": 0.05543001741170883, "learning_rate": 0.0001562941176470588, "loss": 0.0257, "step": 2945 }, { "epoch": 26.3, "grad_norm": 0.05482387915253639, "learning_rate": 0.00015623529411764702, "loss": 0.0251, "step": 2946 }, { "epoch": 26.31, "grad_norm": 0.05174694210290909, "learning_rate": 0.0001561764705882353, "loss": 0.0234, "step": 2947 }, { "epoch": 26.32, "grad_norm": 0.05397618189454079, "learning_rate": 0.0001561176470588235, "loss": 0.0237, "step": 2948 }, { "epoch": 26.33, "grad_norm": 0.06483880430459976, "learning_rate": 0.00015605882352941173, "loss": 0.024, "step": 2949 }, { "epoch": 26.34, "grad_norm": 0.12109798938035965, "learning_rate": 0.000156, "loss": 0.0289, "step": 2950 }, { "epoch": 26.35, "grad_norm": 0.050347570329904556, "learning_rate": 0.0001559411764705882, "loss": 0.0257, "step": 2951 }, { "epoch": 26.36, "grad_norm": 0.057522788643836975, "learning_rate": 0.00015588235294117646, "loss": 0.0237, "step": 2952 }, { "epoch": 26.37, "grad_norm": 0.049146901816129684, "learning_rate": 0.0001558235294117647, "loss": 0.0197, "step": 2953 }, { "epoch": 26.38, "grad_norm": 0.04698359966278076, "learning_rate": 0.00015576470588235292, "loss": 0.018, "step": 2954 }, { "epoch": 26.38, "grad_norm": 0.07178395241498947, "learning_rate": 0.00015570588235294116, "loss": 0.024, "step": 2955 }, { "epoch": 26.39, "grad_norm": 0.10159935057163239, "learning_rate": 0.0001556470588235294, "loss": 0.0315, "step": 2956 }, { "epoch": 26.4, "grad_norm": 0.05663328990340233, "learning_rate": 0.00015558823529411765, "loss": 0.0235, "step": 2957 }, { "epoch": 26.41, "grad_norm": 0.05715930089354515, "learning_rate": 0.00015552941176470586, "loss": 0.0254, "step": 2958 }, { "epoch": 26.42, "grad_norm": 0.06861431896686554, "learning_rate": 0.00015547058823529408, "loss": 0.0208, "step": 2959 }, { "epoch": 26.43, "grad_norm": 0.05948348343372345, "learning_rate": 0.00015541176470588235, "loss": 0.0209, "step": 2960 }, { "epoch": 26.44, "grad_norm": 0.0590101033449173, "learning_rate": 0.00015535294117647057, "loss": 0.0242, "step": 2961 }, { "epoch": 26.45, "grad_norm": 0.09696076065301895, "learning_rate": 0.00015529411764705884, "loss": 0.0324, "step": 2962 }, { "epoch": 26.46, "grad_norm": 0.04854610934853554, "learning_rate": 0.00015523529411764705, "loss": 0.024, "step": 2963 }, { "epoch": 26.46, "grad_norm": 0.04646662622690201, "learning_rate": 0.00015517647058823527, "loss": 0.0225, "step": 2964 }, { "epoch": 26.47, "grad_norm": 0.05486273020505905, "learning_rate": 0.0001551176470588235, "loss": 0.0248, "step": 2965 }, { "epoch": 26.48, "grad_norm": 0.049627941101789474, "learning_rate": 0.00015505882352941176, "loss": 0.0208, "step": 2966 }, { "epoch": 26.49, "grad_norm": 0.05518784373998642, "learning_rate": 0.000155, "loss": 0.0248, "step": 2967 }, { "epoch": 26.5, "grad_norm": 0.07837014645338058, "learning_rate": 0.00015494117647058822, "loss": 0.0265, "step": 2968 }, { "epoch": 26.51, "grad_norm": 0.11061987280845642, "learning_rate": 0.00015488235294117646, "loss": 0.0301, "step": 2969 }, { "epoch": 26.52, "grad_norm": 0.050842780619859695, "learning_rate": 0.0001548235294117647, "loss": 0.0212, "step": 2970 }, { "epoch": 26.53, "grad_norm": 0.05200947821140289, "learning_rate": 0.00015476470588235292, "loss": 0.0231, "step": 2971 }, { "epoch": 26.54, "grad_norm": 0.0542827807366848, "learning_rate": 0.0001547058823529412, "loss": 0.0251, "step": 2972 }, { "epoch": 26.54, "grad_norm": 0.05743546783924103, "learning_rate": 0.0001546470588235294, "loss": 0.0236, "step": 2973 }, { "epoch": 26.55, "grad_norm": 0.06169158220291138, "learning_rate": 0.00015458823529411762, "loss": 0.0196, "step": 2974 }, { "epoch": 26.56, "grad_norm": 0.08541537076234818, "learning_rate": 0.00015452941176470587, "loss": 0.03, "step": 2975 }, { "epoch": 26.57, "grad_norm": 0.053783103823661804, "learning_rate": 0.0001544705882352941, "loss": 0.0236, "step": 2976 }, { "epoch": 26.58, "grad_norm": 0.05454250052571297, "learning_rate": 0.00015441176470588233, "loss": 0.023, "step": 2977 }, { "epoch": 26.59, "grad_norm": 0.06030486896634102, "learning_rate": 0.00015435294117647057, "loss": 0.0263, "step": 2978 }, { "epoch": 26.6, "grad_norm": 0.05425736680626869, "learning_rate": 0.0001542941176470588, "loss": 0.024, "step": 2979 }, { "epoch": 26.61, "grad_norm": 0.05609467625617981, "learning_rate": 0.00015423529411764706, "loss": 0.0179, "step": 2980 }, { "epoch": 26.62, "grad_norm": 0.19363701343536377, "learning_rate": 0.00015417647058823527, "loss": 0.031, "step": 2981 }, { "epoch": 26.62, "grad_norm": 0.050824277102947235, "learning_rate": 0.0001541176470588235, "loss": 0.0233, "step": 2982 }, { "epoch": 26.63, "grad_norm": 0.05721789225935936, "learning_rate": 0.00015405882352941176, "loss": 0.0257, "step": 2983 }, { "epoch": 26.64, "grad_norm": 0.05407975986599922, "learning_rate": 0.00015399999999999998, "loss": 0.0224, "step": 2984 }, { "epoch": 26.65, "grad_norm": 0.05091584846377373, "learning_rate": 0.00015394117647058825, "loss": 0.0229, "step": 2985 }, { "epoch": 26.66, "grad_norm": 0.057925041764974594, "learning_rate": 0.00015388235294117646, "loss": 0.0265, "step": 2986 }, { "epoch": 26.67, "grad_norm": 0.10425586253404617, "learning_rate": 0.00015382352941176468, "loss": 0.0312, "step": 2987 }, { "epoch": 26.68, "grad_norm": 0.054229021072387695, "learning_rate": 0.00015376470588235292, "loss": 0.0257, "step": 2988 }, { "epoch": 26.69, "grad_norm": 0.05078696832060814, "learning_rate": 0.00015370588235294117, "loss": 0.0218, "step": 2989 }, { "epoch": 26.7, "grad_norm": 0.05847737938165665, "learning_rate": 0.0001536470588235294, "loss": 0.0263, "step": 2990 }, { "epoch": 26.71, "grad_norm": 0.047788508236408234, "learning_rate": 0.00015358823529411763, "loss": 0.021, "step": 2991 }, { "epoch": 26.71, "grad_norm": 0.059393640607595444, "learning_rate": 0.00015352941176470587, "loss": 0.0226, "step": 2992 }, { "epoch": 26.72, "grad_norm": 0.08014336973428726, "learning_rate": 0.0001534705882352941, "loss": 0.0284, "step": 2993 }, { "epoch": 26.73, "grad_norm": 0.07928035408258438, "learning_rate": 0.00015341176470588233, "loss": 0.026, "step": 2994 }, { "epoch": 26.74, "grad_norm": 0.05100613459944725, "learning_rate": 0.0001533529411764706, "loss": 0.0237, "step": 2995 }, { "epoch": 26.75, "grad_norm": 0.05011659488081932, "learning_rate": 0.00015329411764705882, "loss": 0.0225, "step": 2996 }, { "epoch": 26.76, "grad_norm": 0.06029354780912399, "learning_rate": 0.00015323529411764703, "loss": 0.024, "step": 2997 }, { "epoch": 26.77, "grad_norm": 0.05785534158349037, "learning_rate": 0.0001531764705882353, "loss": 0.023, "step": 2998 }, { "epoch": 26.78, "grad_norm": 0.07849323004484177, "learning_rate": 0.00015311764705882352, "loss": 0.0296, "step": 2999 }, { "epoch": 26.79, "grad_norm": 0.08631402999162674, "learning_rate": 0.00015305882352941176, "loss": 0.0265, "step": 3000 }, { "epoch": 26.79, "eval_cer": 0.033731344405482706, "eval_loss": 0.19908082485198975, "eval_runtime": 22.4013, "eval_samples_per_second": 117.94, "eval_steps_per_second": 1.875, "eval_wer": 0.10959936533121777, "step": 3000 }, { "epoch": 26.79, "grad_norm": 0.054585572332143784, "learning_rate": 0.00015299999999999998, "loss": 0.0245, "step": 3001 }, { "epoch": 26.8, "grad_norm": 0.05038483068346977, "learning_rate": 0.00015294117647058822, "loss": 0.0283, "step": 3002 }, { "epoch": 26.81, "grad_norm": 0.05247160419821739, "learning_rate": 0.00015288235294117647, "loss": 0.0241, "step": 3003 }, { "epoch": 26.82, "grad_norm": 0.052994318306446075, "learning_rate": 0.00015282352941176468, "loss": 0.0229, "step": 3004 }, { "epoch": 26.83, "grad_norm": 0.05985628813505173, "learning_rate": 0.0001527647058823529, "loss": 0.0246, "step": 3005 }, { "epoch": 26.84, "grad_norm": 0.11439044028520584, "learning_rate": 0.00015270588235294117, "loss": 0.0347, "step": 3006 }, { "epoch": 26.85, "grad_norm": 0.046517860144376755, "learning_rate": 0.00015264705882352938, "loss": 0.0224, "step": 3007 }, { "epoch": 26.86, "grad_norm": 0.04747984558343887, "learning_rate": 0.00015258823529411766, "loss": 0.0192, "step": 3008 }, { "epoch": 26.87, "grad_norm": 0.05847001448273659, "learning_rate": 0.00015252941176470587, "loss": 0.0269, "step": 3009 }, { "epoch": 26.88, "grad_norm": 0.06655603647232056, "learning_rate": 0.0001524705882352941, "loss": 0.0245, "step": 3010 }, { "epoch": 26.88, "grad_norm": 0.05660974979400635, "learning_rate": 0.00015241176470588233, "loss": 0.0215, "step": 3011 }, { "epoch": 26.89, "grad_norm": 0.1280333697795868, "learning_rate": 0.00015235294117647057, "loss": 0.0354, "step": 3012 }, { "epoch": 26.9, "grad_norm": 0.052807681262493134, "learning_rate": 0.00015229411764705882, "loss": 0.0254, "step": 3013 }, { "epoch": 26.91, "grad_norm": 0.05705934017896652, "learning_rate": 0.00015223529411764703, "loss": 0.0215, "step": 3014 }, { "epoch": 26.92, "grad_norm": 0.05490132048726082, "learning_rate": 0.00015217647058823528, "loss": 0.0203, "step": 3015 }, { "epoch": 26.93, "grad_norm": 0.05354372784495354, "learning_rate": 0.00015211764705882352, "loss": 0.025, "step": 3016 }, { "epoch": 26.94, "grad_norm": 0.06647264212369919, "learning_rate": 0.00015205882352941174, "loss": 0.0225, "step": 3017 }, { "epoch": 26.95, "grad_norm": 0.06453800201416016, "learning_rate": 0.000152, "loss": 0.0228, "step": 3018 }, { "epoch": 26.96, "grad_norm": 0.07514040917158127, "learning_rate": 0.00015194117647058822, "loss": 0.0257, "step": 3019 }, { "epoch": 26.96, "grad_norm": 0.061309799551963806, "learning_rate": 0.00015188235294117644, "loss": 0.0229, "step": 3020 }, { "epoch": 26.97, "grad_norm": 0.05893779918551445, "learning_rate": 0.0001518235294117647, "loss": 0.0254, "step": 3021 }, { "epoch": 26.98, "grad_norm": 0.059200119227170944, "learning_rate": 0.00015176470588235293, "loss": 0.0198, "step": 3022 }, { "epoch": 26.99, "grad_norm": 0.06384547054767609, "learning_rate": 0.00015170588235294117, "loss": 0.0237, "step": 3023 }, { "epoch": 27.0, "grad_norm": 0.08574508130550385, "learning_rate": 0.0001516470588235294, "loss": 0.0257, "step": 3024 }, { "epoch": 27.01, "grad_norm": 0.050044167786836624, "learning_rate": 0.00015158823529411763, "loss": 0.023, "step": 3025 }, { "epoch": 27.02, "grad_norm": 0.05156770721077919, "learning_rate": 0.00015152941176470587, "loss": 0.0239, "step": 3026 }, { "epoch": 27.03, "grad_norm": 0.04528946802020073, "learning_rate": 0.0001514705882352941, "loss": 0.0198, "step": 3027 }, { "epoch": 27.04, "grad_norm": 0.055816467851400375, "learning_rate": 0.00015141176470588236, "loss": 0.0214, "step": 3028 }, { "epoch": 27.04, "grad_norm": 0.04916231334209442, "learning_rate": 0.00015135294117647058, "loss": 0.0208, "step": 3029 }, { "epoch": 27.05, "grad_norm": 0.08857579529285431, "learning_rate": 0.0001512941176470588, "loss": 0.0247, "step": 3030 }, { "epoch": 27.06, "grad_norm": 0.06720104813575745, "learning_rate": 0.00015123529411764706, "loss": 0.0219, "step": 3031 }, { "epoch": 27.07, "grad_norm": 0.055760834366083145, "learning_rate": 0.00015117647058823528, "loss": 0.0224, "step": 3032 }, { "epoch": 27.08, "grad_norm": 0.05123697221279144, "learning_rate": 0.00015111764705882352, "loss": 0.0233, "step": 3033 }, { "epoch": 27.09, "grad_norm": 0.050634220242500305, "learning_rate": 0.00015105882352941177, "loss": 0.0198, "step": 3034 }, { "epoch": 27.1, "grad_norm": 0.058983735740184784, "learning_rate": 0.00015099999999999998, "loss": 0.023, "step": 3035 }, { "epoch": 27.11, "grad_norm": 0.057488929480314255, "learning_rate": 0.00015094117647058823, "loss": 0.0201, "step": 3036 }, { "epoch": 27.12, "grad_norm": 0.09951991587877274, "learning_rate": 0.00015088235294117644, "loss": 0.0283, "step": 3037 }, { "epoch": 27.12, "grad_norm": 0.048263560980558395, "learning_rate": 0.0001508235294117647, "loss": 0.0226, "step": 3038 }, { "epoch": 27.13, "grad_norm": 0.055034998804330826, "learning_rate": 0.00015076470588235293, "loss": 0.0269, "step": 3039 }, { "epoch": 27.14, "grad_norm": 0.05377781391143799, "learning_rate": 0.00015070588235294115, "loss": 0.0228, "step": 3040 }, { "epoch": 27.15, "grad_norm": 0.05008714646100998, "learning_rate": 0.00015064705882352942, "loss": 0.0217, "step": 3041 }, { "epoch": 27.16, "grad_norm": 0.06198955699801445, "learning_rate": 0.00015058823529411763, "loss": 0.0228, "step": 3042 }, { "epoch": 27.17, "grad_norm": 0.09308627992868423, "learning_rate": 0.00015052941176470585, "loss": 0.0342, "step": 3043 }, { "epoch": 27.18, "grad_norm": 0.05396142601966858, "learning_rate": 0.00015047058823529412, "loss": 0.0231, "step": 3044 }, { "epoch": 27.19, "grad_norm": 0.04787048324942589, "learning_rate": 0.00015041176470588234, "loss": 0.0231, "step": 3045 }, { "epoch": 27.2, "grad_norm": 0.05582379549741745, "learning_rate": 0.00015035294117647058, "loss": 0.0259, "step": 3046 }, { "epoch": 27.21, "grad_norm": 0.04990950971841812, "learning_rate": 0.0001502941176470588, "loss": 0.0218, "step": 3047 }, { "epoch": 27.21, "grad_norm": 0.05668466538190842, "learning_rate": 0.00015023529411764704, "loss": 0.0234, "step": 3048 }, { "epoch": 27.22, "grad_norm": 0.1383943408727646, "learning_rate": 0.00015017647058823528, "loss": 0.037, "step": 3049 }, { "epoch": 27.23, "grad_norm": 0.05292258784174919, "learning_rate": 0.0001501176470588235, "loss": 0.0249, "step": 3050 }, { "epoch": 27.24, "grad_norm": 0.050082866102457047, "learning_rate": 0.00015005882352941177, "loss": 0.025, "step": 3051 }, { "epoch": 27.25, "grad_norm": 0.05611185356974602, "learning_rate": 0.00015, "loss": 0.0263, "step": 3052 }, { "epoch": 27.26, "grad_norm": 0.047544948756694794, "learning_rate": 0.00014994117647058823, "loss": 0.0213, "step": 3053 }, { "epoch": 27.27, "grad_norm": 0.054379239678382874, "learning_rate": 0.00014988235294117647, "loss": 0.0194, "step": 3054 }, { "epoch": 27.28, "grad_norm": 0.12211582809686661, "learning_rate": 0.0001498235294117647, "loss": 0.0293, "step": 3055 }, { "epoch": 27.29, "grad_norm": 0.07340998947620392, "learning_rate": 0.00014976470588235293, "loss": 0.0277, "step": 3056 }, { "epoch": 27.29, "grad_norm": 0.04934728890657425, "learning_rate": 0.00014970588235294118, "loss": 0.0217, "step": 3057 }, { "epoch": 27.3, "grad_norm": 0.06107524037361145, "learning_rate": 0.0001496470588235294, "loss": 0.0266, "step": 3058 }, { "epoch": 27.31, "grad_norm": 0.046629469841718674, "learning_rate": 0.00014958823529411764, "loss": 0.0175, "step": 3059 }, { "epoch": 27.32, "grad_norm": 0.05639394000172615, "learning_rate": 0.00014952941176470585, "loss": 0.0219, "step": 3060 }, { "epoch": 27.33, "grad_norm": 0.07006697356700897, "learning_rate": 0.0001494705882352941, "loss": 0.0248, "step": 3061 }, { "epoch": 27.34, "grad_norm": 0.06783292442560196, "learning_rate": 0.00014941176470588234, "loss": 0.0253, "step": 3062 }, { "epoch": 27.35, "grad_norm": 0.059967514127492905, "learning_rate": 0.00014935294117647058, "loss": 0.023, "step": 3063 }, { "epoch": 27.36, "grad_norm": 0.050735682249069214, "learning_rate": 0.0001492941176470588, "loss": 0.0219, "step": 3064 }, { "epoch": 27.37, "grad_norm": 0.056490566581487656, "learning_rate": 0.00014923529411764704, "loss": 0.023, "step": 3065 }, { "epoch": 27.38, "grad_norm": 0.06102607026696205, "learning_rate": 0.0001491764705882353, "loss": 0.0252, "step": 3066 }, { "epoch": 27.38, "grad_norm": 0.0579913966357708, "learning_rate": 0.00014911764705882353, "loss": 0.0167, "step": 3067 }, { "epoch": 27.39, "grad_norm": 0.10275489836931229, "learning_rate": 0.00014905882352941175, "loss": 0.0318, "step": 3068 }, { "epoch": 27.4, "grad_norm": 0.04819761961698532, "learning_rate": 0.000149, "loss": 0.0224, "step": 3069 }, { "epoch": 27.41, "grad_norm": 0.04517188295722008, "learning_rate": 0.0001489411764705882, "loss": 0.0229, "step": 3070 }, { "epoch": 27.42, "grad_norm": 0.051060132682323456, "learning_rate": 0.00014888235294117645, "loss": 0.02, "step": 3071 }, { "epoch": 27.43, "grad_norm": 0.05070464685559273, "learning_rate": 0.0001488235294117647, "loss": 0.0226, "step": 3072 }, { "epoch": 27.44, "grad_norm": 0.05855333060026169, "learning_rate": 0.00014876470588235294, "loss": 0.0204, "step": 3073 }, { "epoch": 27.45, "grad_norm": 0.09937647730112076, "learning_rate": 0.00014870588235294115, "loss": 0.031, "step": 3074 }, { "epoch": 27.46, "grad_norm": 0.047324638813734055, "learning_rate": 0.0001486470588235294, "loss": 0.0216, "step": 3075 }, { "epoch": 27.46, "grad_norm": 0.05366073548793793, "learning_rate": 0.00014858823529411764, "loss": 0.0219, "step": 3076 }, { "epoch": 27.47, "grad_norm": 0.05626094341278076, "learning_rate": 0.00014852941176470588, "loss": 0.0242, "step": 3077 }, { "epoch": 27.48, "grad_norm": 0.06329621374607086, "learning_rate": 0.0001484705882352941, "loss": 0.0213, "step": 3078 }, { "epoch": 27.49, "grad_norm": 0.056740060448646545, "learning_rate": 0.00014841176470588234, "loss": 0.0208, "step": 3079 }, { "epoch": 27.5, "grad_norm": 0.07845351099967957, "learning_rate": 0.00014835294117647059, "loss": 0.0248, "step": 3080 }, { "epoch": 27.51, "grad_norm": 0.07826192677021027, "learning_rate": 0.0001482941176470588, "loss": 0.026, "step": 3081 }, { "epoch": 27.52, "grad_norm": 0.05459087714552879, "learning_rate": 0.00014823529411764705, "loss": 0.0255, "step": 3082 }, { "epoch": 27.53, "grad_norm": 0.05910929664969444, "learning_rate": 0.00014817647058823526, "loss": 0.0248, "step": 3083 }, { "epoch": 27.54, "grad_norm": 0.04926055669784546, "learning_rate": 0.0001481176470588235, "loss": 0.0221, "step": 3084 }, { "epoch": 27.54, "grad_norm": 0.052828963845968246, "learning_rate": 0.00014805882352941175, "loss": 0.0197, "step": 3085 }, { "epoch": 27.55, "grad_norm": 0.06274675577878952, "learning_rate": 0.000148, "loss": 0.0247, "step": 3086 }, { "epoch": 27.56, "grad_norm": 0.06828172504901886, "learning_rate": 0.00014794117647058824, "loss": 0.028, "step": 3087 }, { "epoch": 27.57, "grad_norm": 0.05155690386891365, "learning_rate": 0.00014788235294117645, "loss": 0.0256, "step": 3088 }, { "epoch": 27.58, "grad_norm": 0.047592706978321075, "learning_rate": 0.0001478235294117647, "loss": 0.021, "step": 3089 }, { "epoch": 27.59, "grad_norm": 0.056468334048986435, "learning_rate": 0.00014776470588235294, "loss": 0.0212, "step": 3090 }, { "epoch": 27.6, "grad_norm": 0.05021779611706734, "learning_rate": 0.00014770588235294116, "loss": 0.0197, "step": 3091 }, { "epoch": 27.61, "grad_norm": 0.06249059736728668, "learning_rate": 0.0001476470588235294, "loss": 0.0225, "step": 3092 }, { "epoch": 27.62, "grad_norm": 0.11793142557144165, "learning_rate": 0.00014758823529411764, "loss": 0.0356, "step": 3093 }, { "epoch": 27.62, "grad_norm": 0.053746987134218216, "learning_rate": 0.00014752941176470586, "loss": 0.021, "step": 3094 }, { "epoch": 27.63, "grad_norm": 0.0484970398247242, "learning_rate": 0.0001474705882352941, "loss": 0.0222, "step": 3095 }, { "epoch": 27.64, "grad_norm": 0.05492851883172989, "learning_rate": 0.00014741176470588235, "loss": 0.0237, "step": 3096 }, { "epoch": 27.65, "grad_norm": 0.06060340628027916, "learning_rate": 0.00014735294117647056, "loss": 0.024, "step": 3097 }, { "epoch": 27.66, "grad_norm": 0.06306329369544983, "learning_rate": 0.0001472941176470588, "loss": 0.0231, "step": 3098 }, { "epoch": 27.67, "grad_norm": 0.0970316007733345, "learning_rate": 0.00014723529411764705, "loss": 0.0279, "step": 3099 }, { "epoch": 27.68, "grad_norm": 0.04738640785217285, "learning_rate": 0.0001471764705882353, "loss": 0.0223, "step": 3100 }, { "epoch": 27.68, "eval_cer": 0.034483017818418255, "eval_loss": 0.1947711706161499, "eval_runtime": 22.3359, "eval_samples_per_second": 118.285, "eval_steps_per_second": 1.88, "eval_wer": 0.11069020230067433, "step": 3100 }, { "epoch": 27.69, "grad_norm": 0.050134338438510895, "learning_rate": 0.00014711764705882354, "loss": 0.0232, "step": 3101 }, { "epoch": 27.7, "grad_norm": 0.046567123383283615, "learning_rate": 0.00014705882352941175, "loss": 0.0198, "step": 3102 }, { "epoch": 27.71, "grad_norm": 0.04978331923484802, "learning_rate": 0.000147, "loss": 0.0177, "step": 3103 }, { "epoch": 27.71, "grad_norm": 0.05636028200387955, "learning_rate": 0.0001469411764705882, "loss": 0.0236, "step": 3104 }, { "epoch": 27.72, "grad_norm": 0.09316873550415039, "learning_rate": 0.00014688235294117646, "loss": 0.0271, "step": 3105 }, { "epoch": 27.73, "grad_norm": 0.07818813621997833, "learning_rate": 0.0001468235294117647, "loss": 0.0263, "step": 3106 }, { "epoch": 27.74, "grad_norm": 0.04544926434755325, "learning_rate": 0.00014676470588235291, "loss": 0.0207, "step": 3107 }, { "epoch": 27.75, "grad_norm": 0.0495959110558033, "learning_rate": 0.00014670588235294116, "loss": 0.0201, "step": 3108 }, { "epoch": 27.76, "grad_norm": 0.05512333661317825, "learning_rate": 0.0001466470588235294, "loss": 0.0243, "step": 3109 }, { "epoch": 27.77, "grad_norm": 0.04825921729207039, "learning_rate": 0.00014658823529411765, "loss": 0.0197, "step": 3110 }, { "epoch": 27.78, "grad_norm": 0.0721101313829422, "learning_rate": 0.00014652941176470586, "loss": 0.026, "step": 3111 }, { "epoch": 27.79, "grad_norm": 0.08447795361280441, "learning_rate": 0.0001464705882352941, "loss": 0.0264, "step": 3112 }, { "epoch": 27.79, "grad_norm": 0.054003193974494934, "learning_rate": 0.00014641176470588235, "loss": 0.0225, "step": 3113 }, { "epoch": 27.8, "grad_norm": 0.05464816093444824, "learning_rate": 0.0001463529411764706, "loss": 0.0245, "step": 3114 }, { "epoch": 27.81, "grad_norm": 0.05618251860141754, "learning_rate": 0.0001462941176470588, "loss": 0.0225, "step": 3115 }, { "epoch": 27.82, "grad_norm": 0.06628713756799698, "learning_rate": 0.00014623529411764705, "loss": 0.0249, "step": 3116 }, { "epoch": 27.83, "grad_norm": 0.06224177032709122, "learning_rate": 0.00014617647058823527, "loss": 0.0215, "step": 3117 }, { "epoch": 27.84, "grad_norm": 0.11041702330112457, "learning_rate": 0.0001461176470588235, "loss": 0.0311, "step": 3118 }, { "epoch": 27.85, "grad_norm": 0.04507511481642723, "learning_rate": 0.00014605882352941175, "loss": 0.0221, "step": 3119 }, { "epoch": 27.86, "grad_norm": 0.05189411714673042, "learning_rate": 0.000146, "loss": 0.0228, "step": 3120 }, { "epoch": 27.87, "grad_norm": 0.05701563507318497, "learning_rate": 0.00014594117647058821, "loss": 0.0246, "step": 3121 }, { "epoch": 27.88, "grad_norm": 0.04980058595538139, "learning_rate": 0.00014588235294117646, "loss": 0.0204, "step": 3122 }, { "epoch": 27.88, "grad_norm": 0.06099824979901314, "learning_rate": 0.0001458235294117647, "loss": 0.0223, "step": 3123 }, { "epoch": 27.89, "grad_norm": 0.1298723965883255, "learning_rate": 0.00014576470588235294, "loss": 0.0336, "step": 3124 }, { "epoch": 27.9, "grad_norm": 0.04897035285830498, "learning_rate": 0.00014570588235294116, "loss": 0.0217, "step": 3125 }, { "epoch": 27.91, "grad_norm": 0.04782100021839142, "learning_rate": 0.0001456470588235294, "loss": 0.0235, "step": 3126 }, { "epoch": 27.92, "grad_norm": 0.06138615682721138, "learning_rate": 0.00014558823529411762, "loss": 0.0274, "step": 3127 }, { "epoch": 27.93, "grad_norm": 0.06044183671474457, "learning_rate": 0.00014552941176470586, "loss": 0.0235, "step": 3128 }, { "epoch": 27.94, "grad_norm": 0.05670559033751488, "learning_rate": 0.0001454705882352941, "loss": 0.0184, "step": 3129 }, { "epoch": 27.95, "grad_norm": 0.07986319810152054, "learning_rate": 0.00014541176470588232, "loss": 0.0225, "step": 3130 }, { "epoch": 27.96, "grad_norm": 0.1061500832438469, "learning_rate": 0.00014535294117647057, "loss": 0.0319, "step": 3131 }, { "epoch": 27.96, "grad_norm": 0.049021266400814056, "learning_rate": 0.0001452941176470588, "loss": 0.0226, "step": 3132 }, { "epoch": 27.97, "grad_norm": 0.05631643906235695, "learning_rate": 0.00014523529411764705, "loss": 0.0264, "step": 3133 }, { "epoch": 27.98, "grad_norm": 0.051798731088638306, "learning_rate": 0.0001451764705882353, "loss": 0.0207, "step": 3134 }, { "epoch": 27.99, "grad_norm": 0.06304339319467545, "learning_rate": 0.00014511764705882351, "loss": 0.0206, "step": 3135 }, { "epoch": 28.0, "grad_norm": 0.07847974449396133, "learning_rate": 0.00014505882352941176, "loss": 0.0228, "step": 3136 }, { "epoch": 28.01, "grad_norm": 0.0499185211956501, "learning_rate": 0.000145, "loss": 0.0236, "step": 3137 }, { "epoch": 28.02, "grad_norm": 0.049133725464344025, "learning_rate": 0.00014494117647058822, "loss": 0.021, "step": 3138 }, { "epoch": 28.03, "grad_norm": 0.05714169517159462, "learning_rate": 0.00014488235294117646, "loss": 0.0273, "step": 3139 }, { "epoch": 28.04, "grad_norm": 0.0510253980755806, "learning_rate": 0.00014482352941176468, "loss": 0.0211, "step": 3140 }, { "epoch": 28.04, "grad_norm": 0.05600978434085846, "learning_rate": 0.00014476470588235292, "loss": 0.0237, "step": 3141 }, { "epoch": 28.05, "grad_norm": 0.08738350123167038, "learning_rate": 0.00014470588235294116, "loss": 0.0289, "step": 3142 }, { "epoch": 28.06, "grad_norm": 0.08066991716623306, "learning_rate": 0.0001446470588235294, "loss": 0.0273, "step": 3143 }, { "epoch": 28.07, "grad_norm": 0.04763602092862129, "learning_rate": 0.00014458823529411762, "loss": 0.0246, "step": 3144 }, { "epoch": 28.08, "grad_norm": 0.047240495681762695, "learning_rate": 0.00014452941176470587, "loss": 0.0214, "step": 3145 }, { "epoch": 28.09, "grad_norm": 0.04805517569184303, "learning_rate": 0.0001444705882352941, "loss": 0.0225, "step": 3146 }, { "epoch": 28.1, "grad_norm": 0.049226801842451096, "learning_rate": 0.00014441176470588235, "loss": 0.0204, "step": 3147 }, { "epoch": 28.11, "grad_norm": 0.06287840008735657, "learning_rate": 0.00014435294117647057, "loss": 0.0222, "step": 3148 }, { "epoch": 28.12, "grad_norm": 0.06693531572818756, "learning_rate": 0.00014429411764705881, "loss": 0.0222, "step": 3149 }, { "epoch": 28.12, "grad_norm": 0.04513375088572502, "learning_rate": 0.00014423529411764703, "loss": 0.0194, "step": 3150 }, { "epoch": 28.13, "grad_norm": 0.04871902987360954, "learning_rate": 0.00014417647058823527, "loss": 0.0219, "step": 3151 }, { "epoch": 28.14, "grad_norm": 0.05776379629969597, "learning_rate": 0.00014411764705882352, "loss": 0.0233, "step": 3152 }, { "epoch": 28.15, "grad_norm": 0.055099520832300186, "learning_rate": 0.00014405882352941176, "loss": 0.0243, "step": 3153 }, { "epoch": 28.16, "grad_norm": 0.055144671350717545, "learning_rate": 0.00014399999999999998, "loss": 0.0174, "step": 3154 }, { "epoch": 28.17, "grad_norm": 0.08644551038742065, "learning_rate": 0.00014394117647058822, "loss": 0.0229, "step": 3155 }, { "epoch": 28.18, "grad_norm": 0.05233171582221985, "learning_rate": 0.00014388235294117646, "loss": 0.0221, "step": 3156 }, { "epoch": 28.19, "grad_norm": 0.05046974867582321, "learning_rate": 0.0001438235294117647, "loss": 0.021, "step": 3157 }, { "epoch": 28.2, "grad_norm": 0.051787909120321274, "learning_rate": 0.00014376470588235292, "loss": 0.0208, "step": 3158 }, { "epoch": 28.21, "grad_norm": 0.052158426493406296, "learning_rate": 0.00014370588235294117, "loss": 0.0182, "step": 3159 }, { "epoch": 28.21, "grad_norm": 0.05279925838112831, "learning_rate": 0.0001436470588235294, "loss": 0.0183, "step": 3160 }, { "epoch": 28.22, "grad_norm": 0.1067124456167221, "learning_rate": 0.00014358823529411763, "loss": 0.0253, "step": 3161 }, { "epoch": 28.23, "grad_norm": 0.05293858423829079, "learning_rate": 0.00014352941176470587, "loss": 0.023, "step": 3162 }, { "epoch": 28.24, "grad_norm": 0.0508616678416729, "learning_rate": 0.00014347058823529409, "loss": 0.0244, "step": 3163 }, { "epoch": 28.25, "grad_norm": 0.050752900540828705, "learning_rate": 0.00014341176470588233, "loss": 0.0204, "step": 3164 }, { "epoch": 28.26, "grad_norm": 0.04990452527999878, "learning_rate": 0.00014335294117647057, "loss": 0.0204, "step": 3165 }, { "epoch": 28.27, "grad_norm": 0.053398337215185165, "learning_rate": 0.00014329411764705882, "loss": 0.0208, "step": 3166 }, { "epoch": 28.28, "grad_norm": 0.07779485732316971, "learning_rate": 0.00014323529411764706, "loss": 0.0235, "step": 3167 }, { "epoch": 28.29, "grad_norm": 0.08986517041921616, "learning_rate": 0.00014317647058823528, "loss": 0.0256, "step": 3168 }, { "epoch": 28.29, "grad_norm": 0.044942453503608704, "learning_rate": 0.00014311764705882352, "loss": 0.0213, "step": 3169 }, { "epoch": 28.3, "grad_norm": 0.05150241032242775, "learning_rate": 0.00014305882352941176, "loss": 0.0204, "step": 3170 }, { "epoch": 28.31, "grad_norm": 0.0570935495197773, "learning_rate": 0.00014299999999999998, "loss": 0.022, "step": 3171 }, { "epoch": 28.32, "grad_norm": 0.05327785015106201, "learning_rate": 0.00014294117647058822, "loss": 0.0188, "step": 3172 }, { "epoch": 28.33, "grad_norm": 0.07646356523036957, "learning_rate": 0.00014288235294117647, "loss": 0.0222, "step": 3173 }, { "epoch": 28.34, "grad_norm": 0.09917483478784561, "learning_rate": 0.00014282352941176468, "loss": 0.0308, "step": 3174 }, { "epoch": 28.35, "grad_norm": 0.04881855845451355, "learning_rate": 0.00014276470588235293, "loss": 0.0191, "step": 3175 }, { "epoch": 28.36, "grad_norm": 0.0523366816341877, "learning_rate": 0.00014270588235294117, "loss": 0.0247, "step": 3176 }, { "epoch": 28.37, "grad_norm": 0.058703478425741196, "learning_rate": 0.00014264705882352939, "loss": 0.023, "step": 3177 }, { "epoch": 28.38, "grad_norm": 0.04521499201655388, "learning_rate": 0.00014258823529411763, "loss": 0.0193, "step": 3178 }, { "epoch": 28.38, "grad_norm": 0.054483313113451004, "learning_rate": 0.00014252941176470587, "loss": 0.0209, "step": 3179 }, { "epoch": 28.39, "grad_norm": 0.08454073220491409, "learning_rate": 0.00014247058823529412, "loss": 0.0274, "step": 3180 }, { "epoch": 28.4, "grad_norm": 0.04647323489189148, "learning_rate": 0.00014241176470588236, "loss": 0.0236, "step": 3181 }, { "epoch": 28.41, "grad_norm": 0.04771995544433594, "learning_rate": 0.00014235294117647058, "loss": 0.0222, "step": 3182 }, { "epoch": 28.42, "grad_norm": 0.050001874566078186, "learning_rate": 0.00014229411764705882, "loss": 0.0243, "step": 3183 }, { "epoch": 28.43, "grad_norm": 0.051152534782886505, "learning_rate": 0.00014223529411764704, "loss": 0.0229, "step": 3184 }, { "epoch": 28.44, "grad_norm": 0.06005259230732918, "learning_rate": 0.00014217647058823528, "loss": 0.0234, "step": 3185 }, { "epoch": 28.45, "grad_norm": 0.11991535872220993, "learning_rate": 0.00014211764705882352, "loss": 0.029, "step": 3186 }, { "epoch": 28.46, "grad_norm": 0.0499068982899189, "learning_rate": 0.00014205882352941174, "loss": 0.0209, "step": 3187 }, { "epoch": 28.46, "grad_norm": 0.049948178231716156, "learning_rate": 0.00014199999999999998, "loss": 0.0231, "step": 3188 }, { "epoch": 28.47, "grad_norm": 0.0567111000418663, "learning_rate": 0.00014194117647058823, "loss": 0.0238, "step": 3189 }, { "epoch": 28.48, "grad_norm": 0.04997025057673454, "learning_rate": 0.00014188235294117647, "loss": 0.0232, "step": 3190 }, { "epoch": 28.49, "grad_norm": 0.062033213675022125, "learning_rate": 0.00014182352941176469, "loss": 0.0215, "step": 3191 }, { "epoch": 28.5, "grad_norm": 0.06742071360349655, "learning_rate": 0.00014176470588235293, "loss": 0.0227, "step": 3192 }, { "epoch": 28.51, "grad_norm": 0.057490911334753036, "learning_rate": 0.00014170588235294117, "loss": 0.023, "step": 3193 }, { "epoch": 28.52, "grad_norm": 0.05032145604491234, "learning_rate": 0.00014164705882352942, "loss": 0.0238, "step": 3194 }, { "epoch": 28.53, "grad_norm": 0.045561935752630234, "learning_rate": 0.00014158823529411763, "loss": 0.0233, "step": 3195 }, { "epoch": 28.54, "grad_norm": 0.05160699039697647, "learning_rate": 0.00014152941176470588, "loss": 0.0257, "step": 3196 }, { "epoch": 28.54, "grad_norm": 0.05225726217031479, "learning_rate": 0.0001414705882352941, "loss": 0.0179, "step": 3197 }, { "epoch": 28.55, "grad_norm": 0.06850314885377884, "learning_rate": 0.00014141176470588234, "loss": 0.0291, "step": 3198 }, { "epoch": 28.56, "grad_norm": 0.0695592612028122, "learning_rate": 0.00014135294117647058, "loss": 0.0255, "step": 3199 }, { "epoch": 28.57, "grad_norm": 0.046914249658584595, "learning_rate": 0.00014129411764705882, "loss": 0.025, "step": 3200 }, { "epoch": 28.57, "eval_cer": 0.03299094609374119, "eval_loss": 0.20456813275814056, "eval_runtime": 22.4984, "eval_samples_per_second": 117.431, "eval_steps_per_second": 1.867, "eval_wer": 0.10767552558508528, "step": 3200 }, { "epoch": 28.58, "grad_norm": 0.05197370797395706, "learning_rate": 0.00014123529411764704, "loss": 0.0214, "step": 3201 }, { "epoch": 28.59, "grad_norm": 0.04819939285516739, "learning_rate": 0.00014117647058823528, "loss": 0.0221, "step": 3202 }, { "epoch": 28.6, "grad_norm": 0.05303650721907616, "learning_rate": 0.00014111764705882353, "loss": 0.0229, "step": 3203 }, { "epoch": 28.61, "grad_norm": 0.05289100483059883, "learning_rate": 0.00014105882352941177, "loss": 0.0197, "step": 3204 }, { "epoch": 28.62, "grad_norm": 0.10235423594713211, "learning_rate": 0.00014099999999999998, "loss": 0.0314, "step": 3205 }, { "epoch": 28.62, "grad_norm": 0.0496661514043808, "learning_rate": 0.00014094117647058823, "loss": 0.0215, "step": 3206 }, { "epoch": 28.63, "grad_norm": 0.047629259526729584, "learning_rate": 0.00014088235294117644, "loss": 0.019, "step": 3207 }, { "epoch": 28.64, "grad_norm": 0.05727589130401611, "learning_rate": 0.0001408235294117647, "loss": 0.0231, "step": 3208 }, { "epoch": 28.65, "grad_norm": 0.05282297357916832, "learning_rate": 0.00014076470588235293, "loss": 0.0237, "step": 3209 }, { "epoch": 28.66, "grad_norm": 0.04988083615899086, "learning_rate": 0.00014070588235294115, "loss": 0.0178, "step": 3210 }, { "epoch": 28.67, "grad_norm": 0.09559240192174911, "learning_rate": 0.0001406470588235294, "loss": 0.0331, "step": 3211 }, { "epoch": 28.68, "grad_norm": 0.05182569473981857, "learning_rate": 0.00014058823529411763, "loss": 0.02, "step": 3212 }, { "epoch": 28.69, "grad_norm": 0.05532494932413101, "learning_rate": 0.00014052941176470588, "loss": 0.0198, "step": 3213 }, { "epoch": 28.7, "grad_norm": 0.053896158933639526, "learning_rate": 0.00014047058823529412, "loss": 0.0231, "step": 3214 }, { "epoch": 28.71, "grad_norm": 0.04777355492115021, "learning_rate": 0.00014041176470588234, "loss": 0.0188, "step": 3215 }, { "epoch": 28.71, "grad_norm": 0.05201899632811546, "learning_rate": 0.00014035294117647058, "loss": 0.0209, "step": 3216 }, { "epoch": 28.72, "grad_norm": 0.0713396966457367, "learning_rate": 0.00014029411764705882, "loss": 0.0261, "step": 3217 }, { "epoch": 28.73, "grad_norm": 0.08003824949264526, "learning_rate": 0.00014023529411764704, "loss": 0.0246, "step": 3218 }, { "epoch": 28.74, "grad_norm": 0.04647595062851906, "learning_rate": 0.00014017647058823528, "loss": 0.0243, "step": 3219 }, { "epoch": 28.75, "grad_norm": 0.053717732429504395, "learning_rate": 0.0001401176470588235, "loss": 0.0256, "step": 3220 }, { "epoch": 28.76, "grad_norm": 0.05227770283818245, "learning_rate": 0.00014005882352941174, "loss": 0.0222, "step": 3221 }, { "epoch": 28.77, "grad_norm": 0.0462813600897789, "learning_rate": 0.00014, "loss": 0.0194, "step": 3222 }, { "epoch": 28.78, "grad_norm": 0.060716137290000916, "learning_rate": 0.00013994117647058823, "loss": 0.0261, "step": 3223 }, { "epoch": 28.79, "grad_norm": 0.07485483586788177, "learning_rate": 0.00013988235294117645, "loss": 0.0307, "step": 3224 }, { "epoch": 28.79, "grad_norm": 0.04654227942228317, "learning_rate": 0.0001398235294117647, "loss": 0.0225, "step": 3225 }, { "epoch": 28.8, "grad_norm": 0.053567755967378616, "learning_rate": 0.00013976470588235293, "loss": 0.0209, "step": 3226 }, { "epoch": 28.81, "grad_norm": 0.048128023743629456, "learning_rate": 0.00013970588235294118, "loss": 0.024, "step": 3227 }, { "epoch": 28.82, "grad_norm": 0.057889826595783234, "learning_rate": 0.0001396470588235294, "loss": 0.0229, "step": 3228 }, { "epoch": 28.83, "grad_norm": 0.052900783717632294, "learning_rate": 0.00013958823529411764, "loss": 0.0196, "step": 3229 }, { "epoch": 28.84, "grad_norm": 0.0898534432053566, "learning_rate": 0.00013952941176470585, "loss": 0.0254, "step": 3230 }, { "epoch": 28.85, "grad_norm": 0.05033555254340172, "learning_rate": 0.0001394705882352941, "loss": 0.0216, "step": 3231 }, { "epoch": 28.86, "grad_norm": 0.05251815915107727, "learning_rate": 0.00013941176470588234, "loss": 0.0218, "step": 3232 }, { "epoch": 28.87, "grad_norm": 0.0519377738237381, "learning_rate": 0.00013935294117647058, "loss": 0.0218, "step": 3233 }, { "epoch": 28.88, "grad_norm": 0.0559382326900959, "learning_rate": 0.0001392941176470588, "loss": 0.0266, "step": 3234 }, { "epoch": 28.88, "grad_norm": 0.05021519958972931, "learning_rate": 0.00013923529411764704, "loss": 0.0203, "step": 3235 }, { "epoch": 28.89, "grad_norm": 0.123111791908741, "learning_rate": 0.0001391764705882353, "loss": 0.0308, "step": 3236 }, { "epoch": 28.9, "grad_norm": 0.04679188132286072, "learning_rate": 0.00013911764705882353, "loss": 0.0211, "step": 3237 }, { "epoch": 28.91, "grad_norm": 0.0480525977909565, "learning_rate": 0.00013905882352941175, "loss": 0.0225, "step": 3238 }, { "epoch": 28.92, "grad_norm": 0.04935698211193085, "learning_rate": 0.000139, "loss": 0.0196, "step": 3239 }, { "epoch": 28.93, "grad_norm": 0.051021214574575424, "learning_rate": 0.00013894117647058823, "loss": 0.0189, "step": 3240 }, { "epoch": 28.94, "grad_norm": 0.05619300529360771, "learning_rate": 0.00013888235294117645, "loss": 0.0227, "step": 3241 }, { "epoch": 28.95, "grad_norm": 0.08613027632236481, "learning_rate": 0.0001388235294117647, "loss": 0.0266, "step": 3242 }, { "epoch": 28.96, "grad_norm": 0.12794989347457886, "learning_rate": 0.0001387647058823529, "loss": 0.0283, "step": 3243 }, { "epoch": 28.96, "grad_norm": 0.05472174286842346, "learning_rate": 0.00013870588235294115, "loss": 0.024, "step": 3244 }, { "epoch": 28.97, "grad_norm": 0.05780802294611931, "learning_rate": 0.0001386470588235294, "loss": 0.0243, "step": 3245 }, { "epoch": 28.98, "grad_norm": 0.056403495371341705, "learning_rate": 0.00013858823529411764, "loss": 0.0217, "step": 3246 }, { "epoch": 28.99, "grad_norm": 0.0666576698422432, "learning_rate": 0.00013852941176470588, "loss": 0.0237, "step": 3247 }, { "epoch": 29.0, "grad_norm": 0.07459690421819687, "learning_rate": 0.0001384705882352941, "loss": 0.0266, "step": 3248 }, { "epoch": 29.01, "grad_norm": 0.045693084597587585, "learning_rate": 0.00013841176470588234, "loss": 0.0212, "step": 3249 }, { "epoch": 29.02, "grad_norm": 0.04682189226150513, "learning_rate": 0.0001383529411764706, "loss": 0.0211, "step": 3250 }, { "epoch": 29.03, "grad_norm": 0.047736797481775284, "learning_rate": 0.0001382941176470588, "loss": 0.0214, "step": 3251 }, { "epoch": 29.04, "grad_norm": 0.0597694106400013, "learning_rate": 0.00013823529411764705, "loss": 0.0249, "step": 3252 }, { "epoch": 29.04, "grad_norm": 0.047346778213977814, "learning_rate": 0.0001381764705882353, "loss": 0.0183, "step": 3253 }, { "epoch": 29.05, "grad_norm": 0.0749417245388031, "learning_rate": 0.0001381176470588235, "loss": 0.0281, "step": 3254 }, { "epoch": 29.06, "grad_norm": 0.06579748541116714, "learning_rate": 0.00013805882352941175, "loss": 0.0239, "step": 3255 }, { "epoch": 29.07, "grad_norm": 0.043980877846479416, "learning_rate": 0.000138, "loss": 0.0192, "step": 3256 }, { "epoch": 29.08, "grad_norm": 0.045786090195178986, "learning_rate": 0.0001379411764705882, "loss": 0.0203, "step": 3257 }, { "epoch": 29.09, "grad_norm": 0.051101308315992355, "learning_rate": 0.00013788235294117645, "loss": 0.0213, "step": 3258 }, { "epoch": 29.1, "grad_norm": 0.05994032323360443, "learning_rate": 0.0001378235294117647, "loss": 0.0212, "step": 3259 }, { "epoch": 29.11, "grad_norm": 0.0744439885020256, "learning_rate": 0.00013776470588235294, "loss": 0.0262, "step": 3260 }, { "epoch": 29.12, "grad_norm": 0.09793171286582947, "learning_rate": 0.00013770588235294118, "loss": 0.0345, "step": 3261 }, { "epoch": 29.12, "grad_norm": 0.04691844806075096, "learning_rate": 0.0001376470588235294, "loss": 0.0216, "step": 3262 }, { "epoch": 29.13, "grad_norm": 0.0566234216094017, "learning_rate": 0.00013758823529411764, "loss": 0.0238, "step": 3263 }, { "epoch": 29.14, "grad_norm": 0.04758448526263237, "learning_rate": 0.00013752941176470586, "loss": 0.0194, "step": 3264 }, { "epoch": 29.15, "grad_norm": 0.04558186233043671, "learning_rate": 0.0001374705882352941, "loss": 0.0187, "step": 3265 }, { "epoch": 29.16, "grad_norm": 0.053277499973773956, "learning_rate": 0.00013741176470588235, "loss": 0.0185, "step": 3266 }, { "epoch": 29.17, "grad_norm": 0.09081804752349854, "learning_rate": 0.00013735294117647056, "loss": 0.0283, "step": 3267 }, { "epoch": 29.18, "grad_norm": 0.04838769510388374, "learning_rate": 0.0001372941176470588, "loss": 0.0222, "step": 3268 }, { "epoch": 29.19, "grad_norm": 0.056981537491083145, "learning_rate": 0.00013723529411764705, "loss": 0.0234, "step": 3269 }, { "epoch": 29.2, "grad_norm": 0.05015343055129051, "learning_rate": 0.0001371764705882353, "loss": 0.0222, "step": 3270 }, { "epoch": 29.21, "grad_norm": 0.053255997598171234, "learning_rate": 0.0001371176470588235, "loss": 0.0241, "step": 3271 }, { "epoch": 29.21, "grad_norm": 0.057104263454675674, "learning_rate": 0.00013705882352941175, "loss": 0.0201, "step": 3272 }, { "epoch": 29.22, "grad_norm": 0.07775372266769409, "learning_rate": 0.000137, "loss": 0.0229, "step": 3273 }, { "epoch": 29.23, "grad_norm": 0.05271748825907707, "learning_rate": 0.00013694117647058824, "loss": 0.0217, "step": 3274 }, { "epoch": 29.24, "grad_norm": 0.04710380360484123, "learning_rate": 0.00013688235294117646, "loss": 0.0215, "step": 3275 }, { "epoch": 29.25, "grad_norm": 0.05564838647842407, "learning_rate": 0.0001368235294117647, "loss": 0.02, "step": 3276 }, { "epoch": 29.26, "grad_norm": 0.05574845150113106, "learning_rate": 0.00013676470588235292, "loss": 0.0219, "step": 3277 }, { "epoch": 29.27, "grad_norm": 0.06458115577697754, "learning_rate": 0.00013670588235294116, "loss": 0.0229, "step": 3278 }, { "epoch": 29.28, "grad_norm": 0.08612285554409027, "learning_rate": 0.0001366470588235294, "loss": 0.0282, "step": 3279 }, { "epoch": 29.29, "grad_norm": 0.08460797369480133, "learning_rate": 0.00013658823529411765, "loss": 0.029, "step": 3280 }, { "epoch": 29.29, "grad_norm": 0.05518344044685364, "learning_rate": 0.00013652941176470586, "loss": 0.024, "step": 3281 }, { "epoch": 29.3, "grad_norm": 0.048608794808387756, "learning_rate": 0.0001364705882352941, "loss": 0.0203, "step": 3282 }, { "epoch": 29.31, "grad_norm": 0.049384672194719315, "learning_rate": 0.00013641176470588235, "loss": 0.0214, "step": 3283 }, { "epoch": 29.32, "grad_norm": 0.050028521567583084, "learning_rate": 0.0001363529411764706, "loss": 0.022, "step": 3284 }, { "epoch": 29.33, "grad_norm": 0.06495417654514313, "learning_rate": 0.0001362941176470588, "loss": 0.022, "step": 3285 }, { "epoch": 29.34, "grad_norm": 0.08429013937711716, "learning_rate": 0.00013623529411764705, "loss": 0.0259, "step": 3286 }, { "epoch": 29.35, "grad_norm": 0.046002425253391266, "learning_rate": 0.00013617647058823527, "loss": 0.0206, "step": 3287 }, { "epoch": 29.36, "grad_norm": 0.04369529336690903, "learning_rate": 0.0001361176470588235, "loss": 0.0181, "step": 3288 }, { "epoch": 29.37, "grad_norm": 0.05745384097099304, "learning_rate": 0.00013605882352941176, "loss": 0.0219, "step": 3289 }, { "epoch": 29.38, "grad_norm": 0.05233566835522652, "learning_rate": 0.00013599999999999997, "loss": 0.0225, "step": 3290 }, { "epoch": 29.38, "grad_norm": 0.0635610893368721, "learning_rate": 0.00013594117647058822, "loss": 0.0234, "step": 3291 }, { "epoch": 29.39, "grad_norm": 0.10268981754779816, "learning_rate": 0.00013588235294117646, "loss": 0.0322, "step": 3292 }, { "epoch": 29.4, "grad_norm": 0.055386073887348175, "learning_rate": 0.0001358235294117647, "loss": 0.0246, "step": 3293 }, { "epoch": 29.41, "grad_norm": 0.04810195416212082, "learning_rate": 0.00013576470588235295, "loss": 0.0222, "step": 3294 }, { "epoch": 29.42, "grad_norm": 0.04568944871425629, "learning_rate": 0.00013570588235294116, "loss": 0.0213, "step": 3295 }, { "epoch": 29.43, "grad_norm": 0.05103462189435959, "learning_rate": 0.0001356470588235294, "loss": 0.0198, "step": 3296 }, { "epoch": 29.44, "grad_norm": 0.059654537588357925, "learning_rate": 0.00013558823529411765, "loss": 0.0252, "step": 3297 }, { "epoch": 29.45, "grad_norm": 0.09684297442436218, "learning_rate": 0.00013552941176470587, "loss": 0.026, "step": 3298 }, { "epoch": 29.46, "grad_norm": 0.046199869364500046, "learning_rate": 0.0001354705882352941, "loss": 0.0195, "step": 3299 }, { "epoch": 29.46, "grad_norm": 0.05403624847531319, "learning_rate": 0.00013541176470588232, "loss": 0.0242, "step": 3300 }, { "epoch": 29.46, "eval_cer": 0.03347577544508462, "eval_loss": 0.20549054443836212, "eval_runtime": 22.6841, "eval_samples_per_second": 116.47, "eval_steps_per_second": 1.852, "eval_wer": 0.10715985719952399, "step": 3300 }, { "epoch": 29.47, "grad_norm": 0.04708437621593475, "learning_rate": 0.00013535294117647057, "loss": 0.0222, "step": 3301 }, { "epoch": 29.48, "grad_norm": 0.060763850808143616, "learning_rate": 0.0001352941176470588, "loss": 0.0237, "step": 3302 }, { "epoch": 29.49, "grad_norm": 0.05629892274737358, "learning_rate": 0.00013523529411764706, "loss": 0.0214, "step": 3303 }, { "epoch": 29.5, "grad_norm": 0.07087591290473938, "learning_rate": 0.00013517647058823527, "loss": 0.0256, "step": 3304 }, { "epoch": 29.51, "grad_norm": 0.06636977940797806, "learning_rate": 0.00013511764705882351, "loss": 0.0255, "step": 3305 }, { "epoch": 29.52, "grad_norm": 0.0395834743976593, "learning_rate": 0.00013505882352941176, "loss": 0.0185, "step": 3306 }, { "epoch": 29.53, "grad_norm": 0.05122927576303482, "learning_rate": 0.000135, "loss": 0.0215, "step": 3307 }, { "epoch": 29.54, "grad_norm": 0.04181566834449768, "learning_rate": 0.00013494117647058822, "loss": 0.0185, "step": 3308 }, { "epoch": 29.54, "grad_norm": 0.04806053638458252, "learning_rate": 0.00013488235294117646, "loss": 0.017, "step": 3309 }, { "epoch": 29.55, "grad_norm": 0.0685345008969307, "learning_rate": 0.0001348235294117647, "loss": 0.0258, "step": 3310 }, { "epoch": 29.56, "grad_norm": 0.08832594007253647, "learning_rate": 0.00013476470588235292, "loss": 0.0248, "step": 3311 }, { "epoch": 29.57, "grad_norm": 0.04239046201109886, "learning_rate": 0.00013470588235294116, "loss": 0.0213, "step": 3312 }, { "epoch": 29.58, "grad_norm": 0.04903341084718704, "learning_rate": 0.0001346470588235294, "loss": 0.0207, "step": 3313 }, { "epoch": 29.59, "grad_norm": 0.05598336458206177, "learning_rate": 0.00013458823529411762, "loss": 0.0221, "step": 3314 }, { "epoch": 29.6, "grad_norm": 0.055826783180236816, "learning_rate": 0.00013452941176470587, "loss": 0.0191, "step": 3315 }, { "epoch": 29.61, "grad_norm": 0.06717198342084885, "learning_rate": 0.0001344705882352941, "loss": 0.0222, "step": 3316 }, { "epoch": 29.62, "grad_norm": 0.10662803053855896, "learning_rate": 0.00013441176470588235, "loss": 0.0336, "step": 3317 }, { "epoch": 29.62, "grad_norm": 0.04396955668926239, "learning_rate": 0.00013435294117647057, "loss": 0.0221, "step": 3318 }, { "epoch": 29.63, "grad_norm": 0.0503300242125988, "learning_rate": 0.00013429411764705881, "loss": 0.023, "step": 3319 }, { "epoch": 29.64, "grad_norm": 0.05000848323106766, "learning_rate": 0.00013423529411764706, "loss": 0.0206, "step": 3320 }, { "epoch": 29.65, "grad_norm": 0.046584829688072205, "learning_rate": 0.00013417647058823527, "loss": 0.021, "step": 3321 }, { "epoch": 29.66, "grad_norm": 0.05395794287323952, "learning_rate": 0.00013411764705882352, "loss": 0.0231, "step": 3322 }, { "epoch": 29.67, "grad_norm": 0.134524866938591, "learning_rate": 0.00013405882352941173, "loss": 0.0255, "step": 3323 }, { "epoch": 29.68, "grad_norm": 0.04994124546647072, "learning_rate": 0.00013399999999999998, "loss": 0.0194, "step": 3324 }, { "epoch": 29.69, "grad_norm": 0.04826788231730461, "learning_rate": 0.00013394117647058822, "loss": 0.0222, "step": 3325 }, { "epoch": 29.7, "grad_norm": 0.04609817638993263, "learning_rate": 0.00013388235294117646, "loss": 0.021, "step": 3326 }, { "epoch": 29.71, "grad_norm": 0.05121495947241783, "learning_rate": 0.0001338235294117647, "loss": 0.0178, "step": 3327 }, { "epoch": 29.71, "grad_norm": 0.057989224791526794, "learning_rate": 0.00013376470588235292, "loss": 0.0205, "step": 3328 }, { "epoch": 29.72, "grad_norm": 0.08133722841739655, "learning_rate": 0.00013370588235294117, "loss": 0.0216, "step": 3329 }, { "epoch": 29.73, "grad_norm": 0.09465450793504715, "learning_rate": 0.0001336470588235294, "loss": 0.0279, "step": 3330 }, { "epoch": 29.74, "grad_norm": 0.05890243127942085, "learning_rate": 0.00013358823529411763, "loss": 0.0235, "step": 3331 }, { "epoch": 29.75, "grad_norm": 0.04863334447145462, "learning_rate": 0.00013352941176470587, "loss": 0.0187, "step": 3332 }, { "epoch": 29.76, "grad_norm": 0.04999236762523651, "learning_rate": 0.00013347058823529411, "loss": 0.0226, "step": 3333 }, { "epoch": 29.77, "grad_norm": 0.055948127061128616, "learning_rate": 0.00013341176470588233, "loss": 0.0221, "step": 3334 }, { "epoch": 29.78, "grad_norm": 0.06907929480075836, "learning_rate": 0.00013335294117647057, "loss": 0.0208, "step": 3335 }, { "epoch": 29.79, "grad_norm": 0.0889093205332756, "learning_rate": 0.00013329411764705882, "loss": 0.0271, "step": 3336 }, { "epoch": 29.79, "grad_norm": 0.04269901290535927, "learning_rate": 0.00013323529411764703, "loss": 0.0189, "step": 3337 }, { "epoch": 29.8, "grad_norm": 0.050411589443683624, "learning_rate": 0.00013317647058823528, "loss": 0.0205, "step": 3338 }, { "epoch": 29.81, "grad_norm": 0.05397701635956764, "learning_rate": 0.00013311764705882352, "loss": 0.0206, "step": 3339 }, { "epoch": 29.82, "grad_norm": 0.057393450289964676, "learning_rate": 0.00013305882352941176, "loss": 0.0192, "step": 3340 }, { "epoch": 29.83, "grad_norm": 0.06307059526443481, "learning_rate": 0.000133, "loss": 0.0225, "step": 3341 }, { "epoch": 29.84, "grad_norm": 0.08708269149065018, "learning_rate": 0.00013294117647058822, "loss": 0.0258, "step": 3342 }, { "epoch": 29.85, "grad_norm": 0.049405332654714584, "learning_rate": 0.00013288235294117647, "loss": 0.0235, "step": 3343 }, { "epoch": 29.86, "grad_norm": 0.05103577673435211, "learning_rate": 0.00013282352941176468, "loss": 0.0215, "step": 3344 }, { "epoch": 29.87, "grad_norm": 0.05492822080850601, "learning_rate": 0.00013276470588235293, "loss": 0.0238, "step": 3345 }, { "epoch": 29.88, "grad_norm": 0.06334534287452698, "learning_rate": 0.00013270588235294117, "loss": 0.0228, "step": 3346 }, { "epoch": 29.88, "grad_norm": 0.05494748055934906, "learning_rate": 0.0001326470588235294, "loss": 0.0226, "step": 3347 }, { "epoch": 29.89, "grad_norm": 0.10256792604923248, "learning_rate": 0.00013258823529411763, "loss": 0.0306, "step": 3348 }, { "epoch": 29.9, "grad_norm": 0.046946603804826736, "learning_rate": 0.00013252941176470587, "loss": 0.0224, "step": 3349 }, { "epoch": 29.91, "grad_norm": 0.04373834282159805, "learning_rate": 0.00013247058823529412, "loss": 0.0189, "step": 3350 }, { "epoch": 29.92, "grad_norm": 0.052029527723789215, "learning_rate": 0.00013241176470588233, "loss": 0.0203, "step": 3351 }, { "epoch": 29.93, "grad_norm": 0.04216480255126953, "learning_rate": 0.00013235294117647058, "loss": 0.016, "step": 3352 }, { "epoch": 29.94, "grad_norm": 0.06439980864524841, "learning_rate": 0.00013229411764705882, "loss": 0.0214, "step": 3353 }, { "epoch": 29.95, "grad_norm": 0.062002141028642654, "learning_rate": 0.00013223529411764706, "loss": 0.0211, "step": 3354 }, { "epoch": 29.96, "grad_norm": 0.0867651030421257, "learning_rate": 0.00013217647058823528, "loss": 0.0248, "step": 3355 }, { "epoch": 29.96, "grad_norm": 0.05184529349207878, "learning_rate": 0.00013211764705882352, "loss": 0.0219, "step": 3356 }, { "epoch": 29.97, "grad_norm": 0.051039062440395355, "learning_rate": 0.00013205882352941174, "loss": 0.0204, "step": 3357 }, { "epoch": 29.98, "grad_norm": 0.05570162087678909, "learning_rate": 0.00013199999999999998, "loss": 0.0189, "step": 3358 }, { "epoch": 29.99, "grad_norm": 0.05899661406874657, "learning_rate": 0.00013194117647058823, "loss": 0.0224, "step": 3359 }, { "epoch": 30.0, "grad_norm": 0.084539495408535, "learning_rate": 0.00013188235294117647, "loss": 0.0236, "step": 3360 }, { "epoch": 30.01, "grad_norm": 0.0488189272582531, "learning_rate": 0.00013182352941176469, "loss": 0.0214, "step": 3361 }, { "epoch": 30.02, "grad_norm": 0.05479676276445389, "learning_rate": 0.00013176470588235293, "loss": 0.0225, "step": 3362 }, { "epoch": 30.03, "grad_norm": 0.042999375611543655, "learning_rate": 0.00013170588235294117, "loss": 0.0192, "step": 3363 }, { "epoch": 30.04, "grad_norm": 0.045781463384628296, "learning_rate": 0.00013164705882352942, "loss": 0.0194, "step": 3364 }, { "epoch": 30.04, "grad_norm": 0.1375766098499298, "learning_rate": 0.00013158823529411763, "loss": 0.0191, "step": 3365 }, { "epoch": 30.05, "grad_norm": 0.06471619009971619, "learning_rate": 0.00013152941176470588, "loss": 0.0212, "step": 3366 }, { "epoch": 30.06, "grad_norm": 0.10753370076417923, "learning_rate": 0.0001314705882352941, "loss": 0.0238, "step": 3367 }, { "epoch": 30.07, "grad_norm": 0.053320981562137604, "learning_rate": 0.00013141176470588234, "loss": 0.0221, "step": 3368 }, { "epoch": 30.08, "grad_norm": 0.051087040454149246, "learning_rate": 0.00013135294117647058, "loss": 0.0255, "step": 3369 }, { "epoch": 30.09, "grad_norm": 0.056379638612270355, "learning_rate": 0.0001312941176470588, "loss": 0.0229, "step": 3370 }, { "epoch": 30.1, "grad_norm": 0.05336599051952362, "learning_rate": 0.00013123529411764704, "loss": 0.0183, "step": 3371 }, { "epoch": 30.11, "grad_norm": 0.0666266679763794, "learning_rate": 0.00013117647058823528, "loss": 0.0199, "step": 3372 }, { "epoch": 30.12, "grad_norm": 0.10086492449045181, "learning_rate": 0.00013111764705882353, "loss": 0.0284, "step": 3373 }, { "epoch": 30.12, "grad_norm": 0.05312999337911606, "learning_rate": 0.00013105882352941177, "loss": 0.0225, "step": 3374 }, { "epoch": 30.13, "grad_norm": 0.04641437530517578, "learning_rate": 0.00013099999999999999, "loss": 0.0217, "step": 3375 }, { "epoch": 30.14, "grad_norm": 0.051450230181217194, "learning_rate": 0.00013094117647058823, "loss": 0.0248, "step": 3376 }, { "epoch": 30.15, "grad_norm": 0.04565277323126793, "learning_rate": 0.00013088235294117647, "loss": 0.0158, "step": 3377 }, { "epoch": 30.16, "grad_norm": 0.05888838320970535, "learning_rate": 0.0001308235294117647, "loss": 0.023, "step": 3378 }, { "epoch": 30.17, "grad_norm": 0.08209196478128433, "learning_rate": 0.00013076470588235293, "loss": 0.0239, "step": 3379 }, { "epoch": 30.18, "grad_norm": 0.050883445888757706, "learning_rate": 0.00013070588235294115, "loss": 0.023, "step": 3380 }, { "epoch": 30.19, "grad_norm": 0.04774251580238342, "learning_rate": 0.0001306470588235294, "loss": 0.0212, "step": 3381 }, { "epoch": 30.2, "grad_norm": 0.044998809695243835, "learning_rate": 0.00013058823529411764, "loss": 0.018, "step": 3382 }, { "epoch": 30.21, "grad_norm": 0.052356570959091187, "learning_rate": 0.00013052941176470588, "loss": 0.02, "step": 3383 }, { "epoch": 30.21, "grad_norm": 0.05536366254091263, "learning_rate": 0.0001304705882352941, "loss": 0.0182, "step": 3384 }, { "epoch": 30.22, "grad_norm": 0.09938780218362808, "learning_rate": 0.00013041176470588234, "loss": 0.0274, "step": 3385 }, { "epoch": 30.23, "grad_norm": 0.05490725114941597, "learning_rate": 0.00013035294117647058, "loss": 0.0213, "step": 3386 }, { "epoch": 30.24, "grad_norm": 0.056850604712963104, "learning_rate": 0.00013029411764705883, "loss": 0.0238, "step": 3387 }, { "epoch": 30.25, "grad_norm": 0.05505095049738884, "learning_rate": 0.00013023529411764704, "loss": 0.0226, "step": 3388 }, { "epoch": 30.26, "grad_norm": 0.05054643005132675, "learning_rate": 0.00013017647058823529, "loss": 0.0202, "step": 3389 }, { "epoch": 30.27, "grad_norm": 0.05233348160982132, "learning_rate": 0.00013011764705882353, "loss": 0.0194, "step": 3390 }, { "epoch": 30.28, "grad_norm": 0.07340871542692184, "learning_rate": 0.00013005882352941175, "loss": 0.0244, "step": 3391 }, { "epoch": 30.29, "grad_norm": 0.08080914616584778, "learning_rate": 0.00013, "loss": 0.0257, "step": 3392 }, { "epoch": 30.29, "grad_norm": 0.04434896633028984, "learning_rate": 0.00012994117647058823, "loss": 0.0215, "step": 3393 }, { "epoch": 30.3, "grad_norm": 0.0490557886660099, "learning_rate": 0.00012988235294117645, "loss": 0.0214, "step": 3394 }, { "epoch": 30.31, "grad_norm": 0.05217267945408821, "learning_rate": 0.0001298235294117647, "loss": 0.0225, "step": 3395 }, { "epoch": 30.32, "grad_norm": 0.04906807467341423, "learning_rate": 0.00012976470588235294, "loss": 0.02, "step": 3396 }, { "epoch": 30.33, "grad_norm": 0.06618456542491913, "learning_rate": 0.00012970588235294118, "loss": 0.0227, "step": 3397 }, { "epoch": 30.34, "grad_norm": 0.07943788915872574, "learning_rate": 0.0001296470588235294, "loss": 0.0219, "step": 3398 }, { "epoch": 30.35, "grad_norm": 0.044673431664705276, "learning_rate": 0.00012958823529411764, "loss": 0.0216, "step": 3399 }, { "epoch": 30.36, "grad_norm": 0.046142809092998505, "learning_rate": 0.00012952941176470588, "loss": 0.0187, "step": 3400 }, { "epoch": 30.36, "eval_cer": 0.030735925854934548, "eval_loss": 0.19797120988368988, "eval_runtime": 22.4052, "eval_samples_per_second": 117.919, "eval_steps_per_second": 1.875, "eval_wer": 0.10212217374057914, "step": 3400 }, { "epoch": 30.37, "grad_norm": 0.047959551215171814, "learning_rate": 0.0001294705882352941, "loss": 0.0207, "step": 3401 }, { "epoch": 30.38, "grad_norm": 0.05231958255171776, "learning_rate": 0.00012941176470588234, "loss": 0.0206, "step": 3402 }, { "epoch": 30.38, "grad_norm": 0.053719572722911835, "learning_rate": 0.00012935294117647056, "loss": 0.0198, "step": 3403 }, { "epoch": 30.39, "grad_norm": 0.08503369241952896, "learning_rate": 0.0001292941176470588, "loss": 0.0281, "step": 3404 }, { "epoch": 30.4, "grad_norm": 0.04950686916708946, "learning_rate": 0.00012923529411764704, "loss": 0.0206, "step": 3405 }, { "epoch": 30.41, "grad_norm": 0.045385610312223434, "learning_rate": 0.0001291764705882353, "loss": 0.0218, "step": 3406 }, { "epoch": 30.42, "grad_norm": 0.04846157878637314, "learning_rate": 0.00012911764705882353, "loss": 0.0219, "step": 3407 }, { "epoch": 30.43, "grad_norm": 0.04513081908226013, "learning_rate": 0.00012905882352941175, "loss": 0.0192, "step": 3408 }, { "epoch": 30.44, "grad_norm": 0.056301578879356384, "learning_rate": 0.000129, "loss": 0.0208, "step": 3409 }, { "epoch": 30.45, "grad_norm": 0.11125416308641434, "learning_rate": 0.00012894117647058823, "loss": 0.0276, "step": 3410 }, { "epoch": 30.46, "grad_norm": 0.041852883994579315, "learning_rate": 0.00012888235294117645, "loss": 0.0199, "step": 3411 }, { "epoch": 30.46, "grad_norm": 0.044221457093954086, "learning_rate": 0.0001288235294117647, "loss": 0.0198, "step": 3412 }, { "epoch": 30.47, "grad_norm": 0.050722718238830566, "learning_rate": 0.00012876470588235294, "loss": 0.0213, "step": 3413 }, { "epoch": 30.48, "grad_norm": 0.054453521966934204, "learning_rate": 0.00012870588235294115, "loss": 0.0222, "step": 3414 }, { "epoch": 30.49, "grad_norm": 0.05060184746980667, "learning_rate": 0.0001286470588235294, "loss": 0.0189, "step": 3415 }, { "epoch": 30.5, "grad_norm": 0.06100284308195114, "learning_rate": 0.00012858823529411764, "loss": 0.0255, "step": 3416 }, { "epoch": 30.51, "grad_norm": 0.07308360189199448, "learning_rate": 0.00012852941176470586, "loss": 0.0293, "step": 3417 }, { "epoch": 30.52, "grad_norm": 0.04617452993988991, "learning_rate": 0.0001284705882352941, "loss": 0.0208, "step": 3418 }, { "epoch": 30.53, "grad_norm": 0.04911469668149948, "learning_rate": 0.00012841176470588234, "loss": 0.0226, "step": 3419 }, { "epoch": 30.54, "grad_norm": 0.0460105799138546, "learning_rate": 0.0001283529411764706, "loss": 0.0191, "step": 3420 }, { "epoch": 30.54, "grad_norm": 0.047294821590185165, "learning_rate": 0.00012829411764705883, "loss": 0.0182, "step": 3421 }, { "epoch": 30.55, "grad_norm": 0.07154631614685059, "learning_rate": 0.00012823529411764705, "loss": 0.0246, "step": 3422 }, { "epoch": 30.56, "grad_norm": 0.08403802663087845, "learning_rate": 0.0001281764705882353, "loss": 0.0279, "step": 3423 }, { "epoch": 30.57, "grad_norm": 0.046277470886707306, "learning_rate": 0.0001281176470588235, "loss": 0.0184, "step": 3424 }, { "epoch": 30.58, "grad_norm": 0.04642440378665924, "learning_rate": 0.00012805882352941175, "loss": 0.0199, "step": 3425 }, { "epoch": 30.59, "grad_norm": 0.055028121918439865, "learning_rate": 0.000128, "loss": 0.0212, "step": 3426 }, { "epoch": 30.6, "grad_norm": 0.05246156454086304, "learning_rate": 0.0001279411764705882, "loss": 0.0198, "step": 3427 }, { "epoch": 30.61, "grad_norm": 0.05042044073343277, "learning_rate": 0.00012788235294117645, "loss": 0.0172, "step": 3428 }, { "epoch": 30.62, "grad_norm": 0.08460158109664917, "learning_rate": 0.0001278235294117647, "loss": 0.0302, "step": 3429 }, { "epoch": 30.62, "grad_norm": 0.05066191777586937, "learning_rate": 0.00012776470588235294, "loss": 0.0195, "step": 3430 }, { "epoch": 30.63, "grad_norm": 0.054355815052986145, "learning_rate": 0.00012770588235294116, "loss": 0.0215, "step": 3431 }, { "epoch": 30.64, "grad_norm": 0.050329890102148056, "learning_rate": 0.0001276470588235294, "loss": 0.0176, "step": 3432 }, { "epoch": 30.65, "grad_norm": 0.05507971718907356, "learning_rate": 0.00012758823529411764, "loss": 0.0204, "step": 3433 }, { "epoch": 30.66, "grad_norm": 0.056048277765512466, "learning_rate": 0.0001275294117647059, "loss": 0.0214, "step": 3434 }, { "epoch": 30.67, "grad_norm": 0.09453506767749786, "learning_rate": 0.0001274705882352941, "loss": 0.0278, "step": 3435 }, { "epoch": 30.68, "grad_norm": 0.04392372444272041, "learning_rate": 0.00012741176470588235, "loss": 0.0206, "step": 3436 }, { "epoch": 30.69, "grad_norm": 0.04168267548084259, "learning_rate": 0.00012735294117647056, "loss": 0.0207, "step": 3437 }, { "epoch": 30.7, "grad_norm": 0.04299396649003029, "learning_rate": 0.0001272941176470588, "loss": 0.019, "step": 3438 }, { "epoch": 30.71, "grad_norm": 0.04762311652302742, "learning_rate": 0.00012723529411764705, "loss": 0.02, "step": 3439 }, { "epoch": 30.71, "grad_norm": 0.052612196654081345, "learning_rate": 0.0001271764705882353, "loss": 0.0212, "step": 3440 }, { "epoch": 30.72, "grad_norm": 0.06647343188524246, "learning_rate": 0.0001271176470588235, "loss": 0.0225, "step": 3441 }, { "epoch": 30.73, "grad_norm": 0.12204380333423615, "learning_rate": 0.00012705882352941175, "loss": 0.0249, "step": 3442 }, { "epoch": 30.74, "grad_norm": 0.04954591393470764, "learning_rate": 0.000127, "loss": 0.0179, "step": 3443 }, { "epoch": 30.75, "grad_norm": 0.05146835371851921, "learning_rate": 0.00012694117647058824, "loss": 0.02, "step": 3444 }, { "epoch": 30.76, "grad_norm": 0.053591907024383545, "learning_rate": 0.00012688235294117646, "loss": 0.021, "step": 3445 }, { "epoch": 30.77, "grad_norm": 0.060286153107881546, "learning_rate": 0.0001268235294117647, "loss": 0.0202, "step": 3446 }, { "epoch": 30.78, "grad_norm": 0.055687978863716125, "learning_rate": 0.00012676470588235292, "loss": 0.0178, "step": 3447 }, { "epoch": 30.79, "grad_norm": 0.09846653789281845, "learning_rate": 0.00012670588235294116, "loss": 0.0261, "step": 3448 }, { "epoch": 30.79, "grad_norm": 0.057401638478040695, "learning_rate": 0.0001266470588235294, "loss": 0.0251, "step": 3449 }, { "epoch": 30.8, "grad_norm": 0.050923582166433334, "learning_rate": 0.00012658823529411762, "loss": 0.0235, "step": 3450 }, { "epoch": 30.81, "grad_norm": 0.05702921375632286, "learning_rate": 0.00012652941176470586, "loss": 0.0206, "step": 3451 }, { "epoch": 30.82, "grad_norm": 0.05089607834815979, "learning_rate": 0.0001264705882352941, "loss": 0.021, "step": 3452 }, { "epoch": 30.83, "grad_norm": 0.058155208826065063, "learning_rate": 0.00012641176470588235, "loss": 0.0216, "step": 3453 }, { "epoch": 30.84, "grad_norm": 0.08319725096225739, "learning_rate": 0.0001263529411764706, "loss": 0.028, "step": 3454 }, { "epoch": 30.85, "grad_norm": 0.048014167696237564, "learning_rate": 0.0001262941176470588, "loss": 0.0204, "step": 3455 }, { "epoch": 30.86, "grad_norm": 0.04385518655180931, "learning_rate": 0.00012623529411764705, "loss": 0.0192, "step": 3456 }, { "epoch": 30.87, "grad_norm": 0.04214124009013176, "learning_rate": 0.0001261764705882353, "loss": 0.0205, "step": 3457 }, { "epoch": 30.88, "grad_norm": 0.05014554038643837, "learning_rate": 0.0001261176470588235, "loss": 0.0219, "step": 3458 }, { "epoch": 30.88, "grad_norm": 0.06376246362924576, "learning_rate": 0.00012605882352941176, "loss": 0.0248, "step": 3459 }, { "epoch": 30.89, "grad_norm": 0.09171823412179947, "learning_rate": 0.00012599999999999997, "loss": 0.0252, "step": 3460 }, { "epoch": 30.9, "grad_norm": 0.0505376011133194, "learning_rate": 0.00012594117647058822, "loss": 0.0229, "step": 3461 }, { "epoch": 30.91, "grad_norm": 0.05193379148840904, "learning_rate": 0.00012588235294117646, "loss": 0.0214, "step": 3462 }, { "epoch": 30.92, "grad_norm": 0.054054778069257736, "learning_rate": 0.0001258235294117647, "loss": 0.0243, "step": 3463 }, { "epoch": 30.93, "grad_norm": 0.04865433648228645, "learning_rate": 0.00012576470588235292, "loss": 0.0196, "step": 3464 }, { "epoch": 30.94, "grad_norm": 0.04885263368487358, "learning_rate": 0.00012570588235294116, "loss": 0.0196, "step": 3465 }, { "epoch": 30.95, "grad_norm": 0.07468660920858383, "learning_rate": 0.0001256470588235294, "loss": 0.0243, "step": 3466 }, { "epoch": 30.96, "grad_norm": 0.13972200453281403, "learning_rate": 0.00012558823529411765, "loss": 0.0323, "step": 3467 }, { "epoch": 30.96, "grad_norm": 0.04695039987564087, "learning_rate": 0.00012552941176470587, "loss": 0.0213, "step": 3468 }, { "epoch": 30.97, "grad_norm": 0.04661361500620842, "learning_rate": 0.0001254705882352941, "loss": 0.0214, "step": 3469 }, { "epoch": 30.98, "grad_norm": 0.04745853319764137, "learning_rate": 0.00012541176470588235, "loss": 0.0195, "step": 3470 }, { "epoch": 30.99, "grad_norm": 0.06471362709999084, "learning_rate": 0.00012535294117647057, "loss": 0.0188, "step": 3471 }, { "epoch": 31.0, "grad_norm": 0.06648192554712296, "learning_rate": 0.0001252941176470588, "loss": 0.0236, "step": 3472 }, { "epoch": 31.01, "grad_norm": 0.047431010752916336, "learning_rate": 0.00012523529411764706, "loss": 0.0181, "step": 3473 }, { "epoch": 31.02, "grad_norm": 0.052671611309051514, "learning_rate": 0.00012517647058823527, "loss": 0.017, "step": 3474 }, { "epoch": 31.03, "grad_norm": 0.04441322386264801, "learning_rate": 0.00012511764705882352, "loss": 0.019, "step": 3475 }, { "epoch": 31.04, "grad_norm": 0.05547754466533661, "learning_rate": 0.00012505882352941176, "loss": 0.0246, "step": 3476 }, { "epoch": 31.04, "grad_norm": 0.0603085495531559, "learning_rate": 0.000125, "loss": 0.0224, "step": 3477 }, { "epoch": 31.05, "grad_norm": 0.09202206134796143, "learning_rate": 0.00012494117647058822, "loss": 0.0293, "step": 3478 }, { "epoch": 31.06, "grad_norm": 0.09541846811771393, "learning_rate": 0.00012488235294117646, "loss": 0.0292, "step": 3479 }, { "epoch": 31.07, "grad_norm": 0.04697448015213013, "learning_rate": 0.0001248235294117647, "loss": 0.0213, "step": 3480 }, { "epoch": 31.08, "grad_norm": 0.04767817258834839, "learning_rate": 0.00012476470588235292, "loss": 0.0182, "step": 3481 }, { "epoch": 31.09, "grad_norm": 0.05568642169237137, "learning_rate": 0.00012470588235294117, "loss": 0.0232, "step": 3482 }, { "epoch": 31.1, "grad_norm": 0.052640173584222794, "learning_rate": 0.00012464705882352938, "loss": 0.0214, "step": 3483 }, { "epoch": 31.11, "grad_norm": 0.06075693294405937, "learning_rate": 0.00012458823529411763, "loss": 0.0234, "step": 3484 }, { "epoch": 31.12, "grad_norm": 0.1130075678229332, "learning_rate": 0.00012452941176470587, "loss": 0.0285, "step": 3485 }, { "epoch": 31.12, "grad_norm": 0.04859291762113571, "learning_rate": 0.0001244705882352941, "loss": 0.0224, "step": 3486 }, { "epoch": 31.13, "grad_norm": 0.044511616230010986, "learning_rate": 0.00012441176470588236, "loss": 0.023, "step": 3487 }, { "epoch": 31.14, "grad_norm": 0.04058460891246796, "learning_rate": 0.00012435294117647057, "loss": 0.0185, "step": 3488 }, { "epoch": 31.15, "grad_norm": 0.08004245907068253, "learning_rate": 0.00012429411764705882, "loss": 0.0174, "step": 3489 }, { "epoch": 31.16, "grad_norm": 0.05423317849636078, "learning_rate": 0.00012423529411764706, "loss": 0.0202, "step": 3490 }, { "epoch": 31.17, "grad_norm": 0.09387530386447906, "learning_rate": 0.00012417647058823528, "loss": 0.0255, "step": 3491 }, { "epoch": 31.18, "grad_norm": 0.044507239013910294, "learning_rate": 0.00012411764705882352, "loss": 0.0204, "step": 3492 }, { "epoch": 31.19, "grad_norm": 0.04497398063540459, "learning_rate": 0.00012405882352941176, "loss": 0.0199, "step": 3493 }, { "epoch": 31.2, "grad_norm": 0.05612332001328468, "learning_rate": 0.00012399999999999998, "loss": 0.0233, "step": 3494 }, { "epoch": 31.21, "grad_norm": 0.05282190069556236, "learning_rate": 0.00012394117647058822, "loss": 0.0203, "step": 3495 }, { "epoch": 31.21, "grad_norm": 0.05601921305060387, "learning_rate": 0.00012388235294117647, "loss": 0.0236, "step": 3496 }, { "epoch": 31.22, "grad_norm": 0.12544254958629608, "learning_rate": 0.00012382352941176468, "loss": 0.0308, "step": 3497 }, { "epoch": 31.23, "grad_norm": 0.04651530086994171, "learning_rate": 0.00012376470588235292, "loss": 0.0221, "step": 3498 }, { "epoch": 31.24, "grad_norm": 0.049244217574596405, "learning_rate": 0.00012370588235294117, "loss": 0.0207, "step": 3499 }, { "epoch": 31.25, "grad_norm": 0.049539964646101, "learning_rate": 0.0001236470588235294, "loss": 0.0219, "step": 3500 }, { "epoch": 31.25, "eval_cer": 0.03221672247841758, "eval_loss": 0.1998356431722641, "eval_runtime": 23.0411, "eval_samples_per_second": 114.665, "eval_steps_per_second": 1.823, "eval_wer": 0.1053550178500595, "step": 3500 }, { "epoch": 31.26, "grad_norm": 0.04684394225478172, "learning_rate": 0.00012358823529411766, "loss": 0.0191, "step": 3501 }, { "epoch": 31.27, "grad_norm": 0.058877211064100266, "learning_rate": 0.00012352941176470587, "loss": 0.0239, "step": 3502 }, { "epoch": 31.28, "grad_norm": 0.07442399859428406, "learning_rate": 0.00012347058823529411, "loss": 0.024, "step": 3503 }, { "epoch": 31.29, "grad_norm": 0.07524219155311584, "learning_rate": 0.00012341176470588233, "loss": 0.0264, "step": 3504 }, { "epoch": 31.29, "grad_norm": 0.04997357353568077, "learning_rate": 0.00012335294117647057, "loss": 0.0218, "step": 3505 }, { "epoch": 31.3, "grad_norm": 0.04596414044499397, "learning_rate": 0.00012329411764705882, "loss": 0.0203, "step": 3506 }, { "epoch": 31.31, "grad_norm": 0.04907511547207832, "learning_rate": 0.00012323529411764703, "loss": 0.0222, "step": 3507 }, { "epoch": 31.32, "grad_norm": 0.048574049025774, "learning_rate": 0.00012317647058823528, "loss": 0.022, "step": 3508 }, { "epoch": 31.33, "grad_norm": 0.0650213211774826, "learning_rate": 0.00012311764705882352, "loss": 0.0206, "step": 3509 }, { "epoch": 31.34, "grad_norm": 0.09144242107868195, "learning_rate": 0.00012305882352941176, "loss": 0.0297, "step": 3510 }, { "epoch": 31.35, "grad_norm": 0.04303023964166641, "learning_rate": 0.00012299999999999998, "loss": 0.02, "step": 3511 }, { "epoch": 31.36, "grad_norm": 0.05274583771824837, "learning_rate": 0.00012294117647058822, "loss": 0.0229, "step": 3512 }, { "epoch": 31.37, "grad_norm": 0.047474268823862076, "learning_rate": 0.00012288235294117647, "loss": 0.0208, "step": 3513 }, { "epoch": 31.38, "grad_norm": 0.04687033221125603, "learning_rate": 0.0001228235294117647, "loss": 0.0179, "step": 3514 }, { "epoch": 31.38, "grad_norm": 0.047554392367601395, "learning_rate": 0.00012276470588235293, "loss": 0.0174, "step": 3515 }, { "epoch": 31.39, "grad_norm": 0.09634137898683548, "learning_rate": 0.00012270588235294117, "loss": 0.0285, "step": 3516 }, { "epoch": 31.4, "grad_norm": 0.04787968844175339, "learning_rate": 0.0001226470588235294, "loss": 0.0226, "step": 3517 }, { "epoch": 31.41, "grad_norm": 0.047528766095638275, "learning_rate": 0.00012258823529411763, "loss": 0.0198, "step": 3518 }, { "epoch": 31.42, "grad_norm": 0.0573684461414814, "learning_rate": 0.00012252941176470587, "loss": 0.0225, "step": 3519 }, { "epoch": 31.43, "grad_norm": 0.04796380549669266, "learning_rate": 0.00012247058823529412, "loss": 0.0185, "step": 3520 }, { "epoch": 31.44, "grad_norm": 0.06380965560674667, "learning_rate": 0.00012241176470588233, "loss": 0.0253, "step": 3521 }, { "epoch": 31.45, "grad_norm": 0.09760231524705887, "learning_rate": 0.00012235294117647058, "loss": 0.0237, "step": 3522 }, { "epoch": 31.46, "grad_norm": 0.04646872729063034, "learning_rate": 0.00012229411764705882, "loss": 0.0227, "step": 3523 }, { "epoch": 31.46, "grad_norm": 0.041683658957481384, "learning_rate": 0.00012223529411764706, "loss": 0.0187, "step": 3524 }, { "epoch": 31.47, "grad_norm": 0.04618990048766136, "learning_rate": 0.00012217647058823528, "loss": 0.0227, "step": 3525 }, { "epoch": 31.48, "grad_norm": 0.045315660536289215, "learning_rate": 0.00012211764705882352, "loss": 0.0187, "step": 3526 }, { "epoch": 31.49, "grad_norm": 0.04802365228533745, "learning_rate": 0.00012205882352941175, "loss": 0.021, "step": 3527 }, { "epoch": 31.5, "grad_norm": 0.07292218506336212, "learning_rate": 0.000122, "loss": 0.0179, "step": 3528 }, { "epoch": 31.51, "grad_norm": 0.0829540491104126, "learning_rate": 0.00012194117647058823, "loss": 0.0248, "step": 3529 }, { "epoch": 31.52, "grad_norm": 0.05062733218073845, "learning_rate": 0.00012188235294117646, "loss": 0.0223, "step": 3530 }, { "epoch": 31.53, "grad_norm": 0.056058090180158615, "learning_rate": 0.00012182352941176469, "loss": 0.023, "step": 3531 }, { "epoch": 31.54, "grad_norm": 0.05849380046129227, "learning_rate": 0.00012176470588235293, "loss": 0.0224, "step": 3532 }, { "epoch": 31.54, "grad_norm": 0.04975719004869461, "learning_rate": 0.00012170588235294117, "loss": 0.0169, "step": 3533 }, { "epoch": 31.55, "grad_norm": 0.07418406009674072, "learning_rate": 0.0001216470588235294, "loss": 0.0262, "step": 3534 }, { "epoch": 31.56, "grad_norm": 0.07387860119342804, "learning_rate": 0.00012158823529411763, "loss": 0.0241, "step": 3535 }, { "epoch": 31.57, "grad_norm": 0.05272188410162926, "learning_rate": 0.00012152941176470586, "loss": 0.0236, "step": 3536 }, { "epoch": 31.58, "grad_norm": 0.04087989777326584, "learning_rate": 0.00012147058823529411, "loss": 0.0172, "step": 3537 }, { "epoch": 31.59, "grad_norm": 0.04647759720683098, "learning_rate": 0.00012141176470588235, "loss": 0.0199, "step": 3538 }, { "epoch": 31.6, "grad_norm": 0.045854777097702026, "learning_rate": 0.00012135294117647057, "loss": 0.0177, "step": 3539 }, { "epoch": 31.61, "grad_norm": 0.06473439931869507, "learning_rate": 0.00012129411764705881, "loss": 0.0232, "step": 3540 }, { "epoch": 31.62, "grad_norm": 0.0961967185139656, "learning_rate": 0.00012123529411764705, "loss": 0.0339, "step": 3541 }, { "epoch": 31.62, "grad_norm": 0.04991842806339264, "learning_rate": 0.00012117647058823528, "loss": 0.0232, "step": 3542 }, { "epoch": 31.63, "grad_norm": 0.038326505571603775, "learning_rate": 0.00012111764705882353, "loss": 0.0155, "step": 3543 }, { "epoch": 31.64, "grad_norm": 0.0446661151945591, "learning_rate": 0.00012105882352941174, "loss": 0.0186, "step": 3544 }, { "epoch": 31.65, "grad_norm": 0.04957854002714157, "learning_rate": 0.00012099999999999999, "loss": 0.0205, "step": 3545 }, { "epoch": 31.66, "grad_norm": 0.05040052533149719, "learning_rate": 0.00012094117647058823, "loss": 0.0198, "step": 3546 }, { "epoch": 31.67, "grad_norm": 0.0885852724313736, "learning_rate": 0.00012088235294117646, "loss": 0.0229, "step": 3547 }, { "epoch": 31.68, "grad_norm": 0.04862269014120102, "learning_rate": 0.0001208235294117647, "loss": 0.0227, "step": 3548 }, { "epoch": 31.69, "grad_norm": 0.045693445950746536, "learning_rate": 0.00012076470588235292, "loss": 0.0176, "step": 3549 }, { "epoch": 31.7, "grad_norm": 0.04793444648385048, "learning_rate": 0.00012070588235294116, "loss": 0.0207, "step": 3550 }, { "epoch": 31.71, "grad_norm": 0.054520826786756516, "learning_rate": 0.0001206470588235294, "loss": 0.0224, "step": 3551 }, { "epoch": 31.71, "grad_norm": 0.06703490763902664, "learning_rate": 0.00012058823529411764, "loss": 0.0221, "step": 3552 }, { "epoch": 31.72, "grad_norm": 0.08838488906621933, "learning_rate": 0.00012052941176470588, "loss": 0.0262, "step": 3553 }, { "epoch": 31.73, "grad_norm": 0.07164445519447327, "learning_rate": 0.0001204705882352941, "loss": 0.0194, "step": 3554 }, { "epoch": 31.74, "grad_norm": 0.05277720466256142, "learning_rate": 0.00012041176470588234, "loss": 0.0208, "step": 3555 }, { "epoch": 31.75, "grad_norm": 0.05103946849703789, "learning_rate": 0.00012035294117647058, "loss": 0.0208, "step": 3556 }, { "epoch": 31.76, "grad_norm": 0.0512225367128849, "learning_rate": 0.00012029411764705881, "loss": 0.0189, "step": 3557 }, { "epoch": 31.77, "grad_norm": 0.05225786566734314, "learning_rate": 0.00012023529411764704, "loss": 0.0194, "step": 3558 }, { "epoch": 31.78, "grad_norm": 0.0629500150680542, "learning_rate": 0.00012017647058823529, "loss": 0.0233, "step": 3559 }, { "epoch": 31.79, "grad_norm": 0.06870309263467789, "learning_rate": 0.00012011764705882352, "loss": 0.0245, "step": 3560 }, { "epoch": 31.79, "grad_norm": 0.04595329612493515, "learning_rate": 0.00012005882352941176, "loss": 0.0209, "step": 3561 }, { "epoch": 31.8, "grad_norm": 0.043259911239147186, "learning_rate": 0.00011999999999999999, "loss": 0.0175, "step": 3562 }, { "epoch": 31.81, "grad_norm": 0.04623683542013168, "learning_rate": 0.00011994117647058822, "loss": 0.0205, "step": 3563 }, { "epoch": 31.82, "grad_norm": 0.046441450715065, "learning_rate": 0.00011988235294117646, "loss": 0.0174, "step": 3564 }, { "epoch": 31.83, "grad_norm": 0.08617646992206573, "learning_rate": 0.00011982352941176469, "loss": 0.02, "step": 3565 }, { "epoch": 31.84, "grad_norm": 0.11688149720430374, "learning_rate": 0.00011976470588235294, "loss": 0.0322, "step": 3566 }, { "epoch": 31.85, "grad_norm": 0.049706801772117615, "learning_rate": 0.00011970588235294118, "loss": 0.0194, "step": 3567 }, { "epoch": 31.86, "grad_norm": 0.046902839094400406, "learning_rate": 0.0001196470588235294, "loss": 0.0201, "step": 3568 }, { "epoch": 31.87, "grad_norm": 0.0508330799639225, "learning_rate": 0.00011958823529411764, "loss": 0.0187, "step": 3569 }, { "epoch": 31.88, "grad_norm": 0.04902074113488197, "learning_rate": 0.00011952941176470587, "loss": 0.0191, "step": 3570 }, { "epoch": 31.88, "grad_norm": 0.050239987671375275, "learning_rate": 0.00011947058823529411, "loss": 0.0184, "step": 3571 }, { "epoch": 31.89, "grad_norm": 0.11308218538761139, "learning_rate": 0.00011941176470588233, "loss": 0.0264, "step": 3572 }, { "epoch": 31.9, "grad_norm": 0.039108309894800186, "learning_rate": 0.00011935294117647057, "loss": 0.0181, "step": 3573 }, { "epoch": 31.91, "grad_norm": 0.05109729990363121, "learning_rate": 0.00011929411764705882, "loss": 0.0226, "step": 3574 }, { "epoch": 31.92, "grad_norm": 0.04487459361553192, "learning_rate": 0.00011923529411764705, "loss": 0.0201, "step": 3575 }, { "epoch": 31.93, "grad_norm": 0.04437851533293724, "learning_rate": 0.00011917647058823529, "loss": 0.0212, "step": 3576 }, { "epoch": 31.94, "grad_norm": 0.052766717970371246, "learning_rate": 0.0001191176470588235, "loss": 0.0175, "step": 3577 }, { "epoch": 31.95, "grad_norm": 0.08078915625810623, "learning_rate": 0.00011905882352941175, "loss": 0.0271, "step": 3578 }, { "epoch": 31.96, "grad_norm": 0.07316518574953079, "learning_rate": 0.00011899999999999999, "loss": 0.0249, "step": 3579 }, { "epoch": 31.96, "grad_norm": 0.0467817448079586, "learning_rate": 0.00011894117647058822, "loss": 0.0198, "step": 3580 }, { "epoch": 31.97, "grad_norm": 0.052814096212387085, "learning_rate": 0.00011888235294117647, "loss": 0.0258, "step": 3581 }, { "epoch": 31.98, "grad_norm": 0.06177174299955368, "learning_rate": 0.0001188235294117647, "loss": 0.0246, "step": 3582 }, { "epoch": 31.99, "grad_norm": 0.05956928804516792, "learning_rate": 0.00011876470588235293, "loss": 0.0201, "step": 3583 }, { "epoch": 32.0, "grad_norm": 0.08306241035461426, "learning_rate": 0.00011870588235294117, "loss": 0.0259, "step": 3584 }, { "epoch": 32.01, "grad_norm": 0.04788234084844589, "learning_rate": 0.00011864705882352941, "loss": 0.0238, "step": 3585 }, { "epoch": 32.02, "grad_norm": 0.04594253748655319, "learning_rate": 0.00011858823529411763, "loss": 0.0175, "step": 3586 }, { "epoch": 32.03, "grad_norm": 0.046111781150102615, "learning_rate": 0.00011852941176470587, "loss": 0.0187, "step": 3587 }, { "epoch": 32.04, "grad_norm": 0.051978856325149536, "learning_rate": 0.0001184705882352941, "loss": 0.0181, "step": 3588 }, { "epoch": 32.04, "grad_norm": 0.05756892263889313, "learning_rate": 0.00011841176470588235, "loss": 0.0231, "step": 3589 }, { "epoch": 32.05, "grad_norm": 0.08719819784164429, "learning_rate": 0.00011835294117647059, "loss": 0.0274, "step": 3590 }, { "epoch": 32.06, "grad_norm": 0.09158667922019958, "learning_rate": 0.0001182941176470588, "loss": 0.0258, "step": 3591 }, { "epoch": 32.07, "grad_norm": 0.04844530671834946, "learning_rate": 0.00011823529411764705, "loss": 0.0208, "step": 3592 }, { "epoch": 32.08, "grad_norm": 0.053919319063425064, "learning_rate": 0.00011817647058823528, "loss": 0.0245, "step": 3593 }, { "epoch": 32.09, "grad_norm": 0.04653266444802284, "learning_rate": 0.00011811764705882352, "loss": 0.0195, "step": 3594 }, { "epoch": 32.1, "grad_norm": 0.05394500866532326, "learning_rate": 0.00011805882352941177, "loss": 0.0184, "step": 3595 }, { "epoch": 32.11, "grad_norm": 0.07050443440675735, "learning_rate": 0.00011799999999999998, "loss": 0.024, "step": 3596 }, { "epoch": 32.12, "grad_norm": 0.0826314389705658, "learning_rate": 0.00011794117647058822, "loss": 0.0269, "step": 3597 }, { "epoch": 32.12, "grad_norm": 0.04215487465262413, "learning_rate": 0.00011788235294117645, "loss": 0.0198, "step": 3598 }, { "epoch": 32.13, "grad_norm": 0.041640754789114, "learning_rate": 0.0001178235294117647, "loss": 0.0176, "step": 3599 }, { "epoch": 32.14, "grad_norm": 0.044302526861429214, "learning_rate": 0.00011776470588235294, "loss": 0.0198, "step": 3600 }, { "epoch": 32.14, "eval_cer": 0.032235514313740964, "eval_loss": 0.21042084693908691, "eval_runtime": 22.9332, "eval_samples_per_second": 115.204, "eval_steps_per_second": 1.831, "eval_wer": 0.10479968266560888, "step": 3600 }, { "epoch": 32.15, "grad_norm": 0.05428871512413025, "learning_rate": 0.00011770588235294116, "loss": 0.0215, "step": 3601 }, { "epoch": 32.16, "grad_norm": 0.06890869140625, "learning_rate": 0.0001176470588235294, "loss": 0.0216, "step": 3602 }, { "epoch": 32.17, "grad_norm": 0.0953051894903183, "learning_rate": 0.00011758823529411764, "loss": 0.0205, "step": 3603 }, { "epoch": 32.18, "grad_norm": 0.047257766127586365, "learning_rate": 0.00011752941176470587, "loss": 0.0216, "step": 3604 }, { "epoch": 32.19, "grad_norm": 0.04303435608744621, "learning_rate": 0.0001174705882352941, "loss": 0.0203, "step": 3605 }, { "epoch": 32.2, "grad_norm": 0.05104793608188629, "learning_rate": 0.00011741176470588233, "loss": 0.0211, "step": 3606 }, { "epoch": 32.21, "grad_norm": 0.05943785235285759, "learning_rate": 0.00011735294117647058, "loss": 0.0183, "step": 3607 }, { "epoch": 32.21, "grad_norm": 0.06392104923725128, "learning_rate": 0.00011729411764705882, "loss": 0.0198, "step": 3608 }, { "epoch": 32.22, "grad_norm": 0.11663123220205307, "learning_rate": 0.00011723529411764705, "loss": 0.0274, "step": 3609 }, { "epoch": 32.23, "grad_norm": 0.047014717012643814, "learning_rate": 0.00011717647058823528, "loss": 0.0175, "step": 3610 }, { "epoch": 32.24, "grad_norm": 0.047937631607055664, "learning_rate": 0.00011711764705882351, "loss": 0.0195, "step": 3611 }, { "epoch": 32.25, "grad_norm": 0.046994805335998535, "learning_rate": 0.00011705882352941175, "loss": 0.0183, "step": 3612 }, { "epoch": 32.26, "grad_norm": 0.05170201137661934, "learning_rate": 0.000117, "loss": 0.0189, "step": 3613 }, { "epoch": 32.27, "grad_norm": 0.05205686017870903, "learning_rate": 0.00011694117647058823, "loss": 0.021, "step": 3614 }, { "epoch": 32.28, "grad_norm": 0.057531144469976425, "learning_rate": 0.00011688235294117646, "loss": 0.0179, "step": 3615 }, { "epoch": 32.29, "grad_norm": 0.10487011820077896, "learning_rate": 0.00011682352941176469, "loss": 0.0239, "step": 3616 }, { "epoch": 32.29, "grad_norm": 0.054197829216718674, "learning_rate": 0.00011676470588235293, "loss": 0.0221, "step": 3617 }, { "epoch": 32.3, "grad_norm": 0.05336236581206322, "learning_rate": 0.00011670588235294117, "loss": 0.0248, "step": 3618 }, { "epoch": 32.31, "grad_norm": 0.054558366537094116, "learning_rate": 0.00011664705882352939, "loss": 0.0186, "step": 3619 }, { "epoch": 32.32, "grad_norm": 0.05783456563949585, "learning_rate": 0.00011658823529411763, "loss": 0.0174, "step": 3620 }, { "epoch": 32.33, "grad_norm": 0.06637098640203476, "learning_rate": 0.00011652941176470588, "loss": 0.0192, "step": 3621 }, { "epoch": 32.34, "grad_norm": 0.12480707466602325, "learning_rate": 0.00011647058823529411, "loss": 0.0258, "step": 3622 }, { "epoch": 32.35, "grad_norm": 0.04600154235959053, "learning_rate": 0.00011641176470588235, "loss": 0.018, "step": 3623 }, { "epoch": 32.36, "grad_norm": 0.05718235671520233, "learning_rate": 0.00011635294117647057, "loss": 0.022, "step": 3624 }, { "epoch": 32.37, "grad_norm": 0.056042853742837906, "learning_rate": 0.00011629411764705881, "loss": 0.0222, "step": 3625 }, { "epoch": 32.38, "grad_norm": 0.04886164516210556, "learning_rate": 0.00011623529411764705, "loss": 0.0194, "step": 3626 }, { "epoch": 32.38, "grad_norm": 0.05563600733876228, "learning_rate": 0.00011617647058823528, "loss": 0.0184, "step": 3627 }, { "epoch": 32.39, "grad_norm": 0.09011834114789963, "learning_rate": 0.00011611764705882353, "loss": 0.0307, "step": 3628 }, { "epoch": 32.4, "grad_norm": 0.04582998901605606, "learning_rate": 0.00011605882352941174, "loss": 0.0195, "step": 3629 }, { "epoch": 32.41, "grad_norm": 0.05276224762201309, "learning_rate": 0.00011599999999999999, "loss": 0.0231, "step": 3630 }, { "epoch": 32.42, "grad_norm": 0.04544449225068092, "learning_rate": 0.00011594117647058823, "loss": 0.0172, "step": 3631 }, { "epoch": 32.43, "grad_norm": 0.04735076054930687, "learning_rate": 0.00011588235294117646, "loss": 0.02, "step": 3632 }, { "epoch": 32.44, "grad_norm": 0.05830598250031471, "learning_rate": 0.0001158235294117647, "loss": 0.0202, "step": 3633 }, { "epoch": 32.45, "grad_norm": 0.12295148521661758, "learning_rate": 0.00011576470588235292, "loss": 0.0314, "step": 3634 }, { "epoch": 32.46, "grad_norm": 0.047054532915353775, "learning_rate": 0.00011570588235294116, "loss": 0.0237, "step": 3635 }, { "epoch": 32.46, "grad_norm": 0.050387535244226456, "learning_rate": 0.00011564705882352941, "loss": 0.0203, "step": 3636 }, { "epoch": 32.47, "grad_norm": 0.04612543806433678, "learning_rate": 0.00011558823529411764, "loss": 0.0199, "step": 3637 }, { "epoch": 32.48, "grad_norm": 0.04046722501516342, "learning_rate": 0.00011552941176470587, "loss": 0.015, "step": 3638 }, { "epoch": 32.49, "grad_norm": 0.06123023107647896, "learning_rate": 0.00011547058823529411, "loss": 0.0256, "step": 3639 }, { "epoch": 32.5, "grad_norm": 0.06880581378936768, "learning_rate": 0.00011541176470588234, "loss": 0.0196, "step": 3640 }, { "epoch": 32.51, "grad_norm": 0.08232057094573975, "learning_rate": 0.00011535294117647058, "loss": 0.0253, "step": 3641 }, { "epoch": 32.52, "grad_norm": 0.045623622834682465, "learning_rate": 0.00011529411764705881, "loss": 0.0201, "step": 3642 }, { "epoch": 32.53, "grad_norm": 0.05098617449402809, "learning_rate": 0.00011523529411764704, "loss": 0.0225, "step": 3643 }, { "epoch": 32.54, "grad_norm": 0.04750572890043259, "learning_rate": 0.00011517647058823529, "loss": 0.0186, "step": 3644 }, { "epoch": 32.54, "grad_norm": 0.047422174364328384, "learning_rate": 0.00011511764705882352, "loss": 0.0178, "step": 3645 }, { "epoch": 32.55, "grad_norm": 0.06636884808540344, "learning_rate": 0.00011505882352941176, "loss": 0.0215, "step": 3646 }, { "epoch": 32.56, "grad_norm": 0.0959538146853447, "learning_rate": 0.000115, "loss": 0.0243, "step": 3647 }, { "epoch": 32.57, "grad_norm": 0.044255390763282776, "learning_rate": 0.00011494117647058822, "loss": 0.0195, "step": 3648 }, { "epoch": 32.58, "grad_norm": 0.04697493091225624, "learning_rate": 0.00011488235294117646, "loss": 0.0229, "step": 3649 }, { "epoch": 32.59, "grad_norm": 0.055186036974191666, "learning_rate": 0.00011482352941176469, "loss": 0.0214, "step": 3650 }, { "epoch": 32.6, "grad_norm": 0.048233091831207275, "learning_rate": 0.00011476470588235294, "loss": 0.0182, "step": 3651 }, { "epoch": 32.61, "grad_norm": 0.058868657797575, "learning_rate": 0.00011470588235294115, "loss": 0.0211, "step": 3652 }, { "epoch": 32.62, "grad_norm": 0.07956228405237198, "learning_rate": 0.0001146470588235294, "loss": 0.0212, "step": 3653 }, { "epoch": 32.62, "grad_norm": 0.04887668043375015, "learning_rate": 0.00011458823529411764, "loss": 0.0198, "step": 3654 }, { "epoch": 32.63, "grad_norm": 0.048192281275987625, "learning_rate": 0.00011452941176470587, "loss": 0.0227, "step": 3655 }, { "epoch": 32.64, "grad_norm": 0.04844574257731438, "learning_rate": 0.00011447058823529411, "loss": 0.0201, "step": 3656 }, { "epoch": 32.65, "grad_norm": 0.04375741258263588, "learning_rate": 0.00011441176470588233, "loss": 0.0205, "step": 3657 }, { "epoch": 32.66, "grad_norm": 0.051589082926511765, "learning_rate": 0.00011435294117647057, "loss": 0.0212, "step": 3658 }, { "epoch": 32.67, "grad_norm": 0.09967084974050522, "learning_rate": 0.00011429411764705882, "loss": 0.0285, "step": 3659 }, { "epoch": 32.68, "grad_norm": 0.05101574957370758, "learning_rate": 0.00011423529411764705, "loss": 0.0214, "step": 3660 }, { "epoch": 32.69, "grad_norm": 0.04472147300839424, "learning_rate": 0.00011417647058823529, "loss": 0.0194, "step": 3661 }, { "epoch": 32.7, "grad_norm": 0.04980763792991638, "learning_rate": 0.00011411764705882352, "loss": 0.0203, "step": 3662 }, { "epoch": 32.71, "grad_norm": 0.04720604419708252, "learning_rate": 0.00011405882352941175, "loss": 0.0166, "step": 3663 }, { "epoch": 32.71, "grad_norm": 0.061146367341279984, "learning_rate": 0.00011399999999999999, "loss": 0.019, "step": 3664 }, { "epoch": 32.72, "grad_norm": 0.0784432664513588, "learning_rate": 0.00011394117647058824, "loss": 0.0185, "step": 3665 }, { "epoch": 32.73, "grad_norm": 0.07377848029136658, "learning_rate": 0.00011388235294117645, "loss": 0.0245, "step": 3666 }, { "epoch": 32.74, "grad_norm": 0.054255399852991104, "learning_rate": 0.0001138235294117647, "loss": 0.0206, "step": 3667 }, { "epoch": 32.75, "grad_norm": 0.05981878191232681, "learning_rate": 0.00011376470588235293, "loss": 0.0236, "step": 3668 }, { "epoch": 32.76, "grad_norm": 0.04551875963807106, "learning_rate": 0.00011370588235294117, "loss": 0.0215, "step": 3669 }, { "epoch": 32.77, "grad_norm": 0.055024564266204834, "learning_rate": 0.00011364705882352941, "loss": 0.0208, "step": 3670 }, { "epoch": 32.78, "grad_norm": 0.06826770305633545, "learning_rate": 0.00011358823529411763, "loss": 0.0225, "step": 3671 }, { "epoch": 32.79, "grad_norm": 0.09451206028461456, "learning_rate": 0.00011352941176470587, "loss": 0.021, "step": 3672 }, { "epoch": 32.79, "grad_norm": 0.043758291751146317, "learning_rate": 0.0001134705882352941, "loss": 0.0175, "step": 3673 }, { "epoch": 32.8, "grad_norm": 0.04687466844916344, "learning_rate": 0.00011341176470588235, "loss": 0.0198, "step": 3674 }, { "epoch": 32.81, "grad_norm": 0.046850625425577164, "learning_rate": 0.00011335294117647059, "loss": 0.0226, "step": 3675 }, { "epoch": 32.82, "grad_norm": 0.045804087072610855, "learning_rate": 0.0001132941176470588, "loss": 0.0196, "step": 3676 }, { "epoch": 32.83, "grad_norm": 0.057633958756923676, "learning_rate": 0.00011323529411764705, "loss": 0.0183, "step": 3677 }, { "epoch": 32.84, "grad_norm": 0.08373904228210449, "learning_rate": 0.00011317647058823528, "loss": 0.0265, "step": 3678 }, { "epoch": 32.85, "grad_norm": 0.049049582332372665, "learning_rate": 0.00011311764705882352, "loss": 0.0224, "step": 3679 }, { "epoch": 32.86, "grad_norm": 0.043412089347839355, "learning_rate": 0.00011305882352941177, "loss": 0.019, "step": 3680 }, { "epoch": 32.87, "grad_norm": 0.041254326701164246, "learning_rate": 0.00011299999999999998, "loss": 0.0185, "step": 3681 }, { "epoch": 32.88, "grad_norm": 0.05116904899477959, "learning_rate": 0.00011294117647058823, "loss": 0.0217, "step": 3682 }, { "epoch": 32.88, "grad_norm": 0.05248168110847473, "learning_rate": 0.00011288235294117647, "loss": 0.02, "step": 3683 }, { "epoch": 32.89, "grad_norm": 0.09650642424821854, "learning_rate": 0.0001128235294117647, "loss": 0.0219, "step": 3684 }, { "epoch": 32.9, "grad_norm": 0.05646537244319916, "learning_rate": 0.00011276470588235293, "loss": 0.0218, "step": 3685 }, { "epoch": 32.91, "grad_norm": 0.04979201406240463, "learning_rate": 0.00011270588235294116, "loss": 0.021, "step": 3686 }, { "epoch": 32.92, "grad_norm": 0.04833272099494934, "learning_rate": 0.0001126470588235294, "loss": 0.0226, "step": 3687 }, { "epoch": 32.93, "grad_norm": 0.04990481212735176, "learning_rate": 0.00011258823529411765, "loss": 0.0193, "step": 3688 }, { "epoch": 32.94, "grad_norm": 0.057368963956832886, "learning_rate": 0.00011252941176470588, "loss": 0.0204, "step": 3689 }, { "epoch": 32.95, "grad_norm": 0.06582289934158325, "learning_rate": 0.0001124705882352941, "loss": 0.0221, "step": 3690 }, { "epoch": 32.96, "grad_norm": 0.06527213007211685, "learning_rate": 0.00011241176470588233, "loss": 0.0229, "step": 3691 }, { "epoch": 32.96, "grad_norm": 0.04593219608068466, "learning_rate": 0.00011235294117647058, "loss": 0.0216, "step": 3692 }, { "epoch": 32.97, "grad_norm": 0.048174042254686356, "learning_rate": 0.00011229411764705882, "loss": 0.0196, "step": 3693 }, { "epoch": 32.98, "grad_norm": 0.04866315796971321, "learning_rate": 0.00011223529411764705, "loss": 0.0179, "step": 3694 }, { "epoch": 32.99, "grad_norm": 0.05889221280813217, "learning_rate": 0.00011217647058823528, "loss": 0.0179, "step": 3695 }, { "epoch": 33.0, "grad_norm": 0.07265275716781616, "learning_rate": 0.00011211764705882351, "loss": 0.021, "step": 3696 }, { "epoch": 33.01, "grad_norm": 0.043408121913671494, "learning_rate": 0.00011205882352941175, "loss": 0.018, "step": 3697 }, { "epoch": 33.02, "grad_norm": 0.041275300085544586, "learning_rate": 0.000112, "loss": 0.0167, "step": 3698 }, { "epoch": 33.03, "grad_norm": 0.04630756005644798, "learning_rate": 0.00011194117647058821, "loss": 0.0185, "step": 3699 }, { "epoch": 33.04, "grad_norm": 0.04922295734286308, "learning_rate": 0.00011188235294117646, "loss": 0.0181, "step": 3700 }, { "epoch": 33.04, "eval_cer": 0.032483566540009695, "eval_loss": 0.20927734673023224, "eval_runtime": 22.7958, "eval_samples_per_second": 115.898, "eval_steps_per_second": 1.842, "eval_wer": 0.10497818326061087, "step": 3700 }, { "epoch": 33.04, "grad_norm": 0.059649765491485596, "learning_rate": 0.0001118235294117647, "loss": 0.0217, "step": 3701 }, { "epoch": 33.05, "grad_norm": 0.07166648656129837, "learning_rate": 0.00011176470588235293, "loss": 0.0202, "step": 3702 }, { "epoch": 33.06, "grad_norm": 0.056485336273908615, "learning_rate": 0.00011170588235294117, "loss": 0.0214, "step": 3703 }, { "epoch": 33.07, "grad_norm": 0.04659261554479599, "learning_rate": 0.00011164705882352939, "loss": 0.0187, "step": 3704 }, { "epoch": 33.08, "grad_norm": 0.054145973175764084, "learning_rate": 0.00011158823529411763, "loss": 0.0195, "step": 3705 }, { "epoch": 33.09, "grad_norm": 0.05096295475959778, "learning_rate": 0.00011152941176470588, "loss": 0.0189, "step": 3706 }, { "epoch": 33.1, "grad_norm": 0.04888540133833885, "learning_rate": 0.00011147058823529411, "loss": 0.0213, "step": 3707 }, { "epoch": 33.11, "grad_norm": 0.06510414928197861, "learning_rate": 0.00011141176470588235, "loss": 0.024, "step": 3708 }, { "epoch": 33.12, "grad_norm": 0.2117670178413391, "learning_rate": 0.00011135294117647057, "loss": 0.0211, "step": 3709 }, { "epoch": 33.12, "grad_norm": 0.04437718167901039, "learning_rate": 0.00011129411764705881, "loss": 0.0211, "step": 3710 }, { "epoch": 33.13, "grad_norm": 0.0447576679289341, "learning_rate": 0.00011123529411764705, "loss": 0.0191, "step": 3711 }, { "epoch": 33.14, "grad_norm": 0.05013945698738098, "learning_rate": 0.00011117647058823528, "loss": 0.0211, "step": 3712 }, { "epoch": 33.15, "grad_norm": 0.04286734759807587, "learning_rate": 0.00011111764705882353, "loss": 0.0164, "step": 3713 }, { "epoch": 33.16, "grad_norm": 0.06261122226715088, "learning_rate": 0.00011105882352941174, "loss": 0.0235, "step": 3714 }, { "epoch": 33.17, "grad_norm": 0.08340524137020111, "learning_rate": 0.00011099999999999999, "loss": 0.0245, "step": 3715 }, { "epoch": 33.18, "grad_norm": 0.04745287075638771, "learning_rate": 0.00011094117647058823, "loss": 0.0204, "step": 3716 }, { "epoch": 33.19, "grad_norm": 0.04093869775533676, "learning_rate": 0.00011088235294117646, "loss": 0.0189, "step": 3717 }, { "epoch": 33.2, "grad_norm": 0.046972230076789856, "learning_rate": 0.00011082352941176469, "loss": 0.0189, "step": 3718 }, { "epoch": 33.21, "grad_norm": 0.053276680409908295, "learning_rate": 0.00011076470588235293, "loss": 0.0177, "step": 3719 }, { "epoch": 33.21, "grad_norm": 0.05650439113378525, "learning_rate": 0.00011070588235294116, "loss": 0.0213, "step": 3720 }, { "epoch": 33.22, "grad_norm": 0.13323943316936493, "learning_rate": 0.00011064705882352941, "loss": 0.0245, "step": 3721 }, { "epoch": 33.23, "grad_norm": 0.04782477766275406, "learning_rate": 0.00011058823529411765, "loss": 0.0189, "step": 3722 }, { "epoch": 33.24, "grad_norm": 0.05005009099841118, "learning_rate": 0.00011052941176470587, "loss": 0.021, "step": 3723 }, { "epoch": 33.25, "grad_norm": 0.06174638122320175, "learning_rate": 0.00011047058823529411, "loss": 0.0226, "step": 3724 }, { "epoch": 33.26, "grad_norm": 0.054218094795942307, "learning_rate": 0.00011041176470588234, "loss": 0.0191, "step": 3725 }, { "epoch": 33.27, "grad_norm": 0.047325361520051956, "learning_rate": 0.00011035294117647058, "loss": 0.0144, "step": 3726 }, { "epoch": 33.28, "grad_norm": 0.07584651559591293, "learning_rate": 0.00011029411764705883, "loss": 0.0182, "step": 3727 }, { "epoch": 33.29, "grad_norm": 0.06795705109834671, "learning_rate": 0.00011023529411764704, "loss": 0.0236, "step": 3728 }, { "epoch": 33.29, "grad_norm": 0.0456150621175766, "learning_rate": 0.00011017647058823529, "loss": 0.0222, "step": 3729 }, { "epoch": 33.3, "grad_norm": 0.04702172428369522, "learning_rate": 0.00011011764705882352, "loss": 0.0219, "step": 3730 }, { "epoch": 33.31, "grad_norm": 0.05131182819604874, "learning_rate": 0.00011005882352941176, "loss": 0.0214, "step": 3731 }, { "epoch": 33.32, "grad_norm": 0.06191159412264824, "learning_rate": 0.00010999999999999998, "loss": 0.0216, "step": 3732 }, { "epoch": 33.33, "grad_norm": 0.053905926644802094, "learning_rate": 0.00010994117647058822, "loss": 0.0185, "step": 3733 }, { "epoch": 33.34, "grad_norm": 0.08019814640283585, "learning_rate": 0.00010988235294117646, "loss": 0.0232, "step": 3734 }, { "epoch": 33.35, "grad_norm": 0.04236734285950661, "learning_rate": 0.0001098235294117647, "loss": 0.0223, "step": 3735 }, { "epoch": 33.36, "grad_norm": 0.0380890928208828, "learning_rate": 0.00010976470588235294, "loss": 0.0195, "step": 3736 }, { "epoch": 33.37, "grad_norm": 0.05268734320998192, "learning_rate": 0.00010970588235294117, "loss": 0.0206, "step": 3737 }, { "epoch": 33.38, "grad_norm": 0.049979280680418015, "learning_rate": 0.0001096470588235294, "loss": 0.018, "step": 3738 }, { "epoch": 33.38, "grad_norm": 0.05195252224802971, "learning_rate": 0.00010958823529411764, "loss": 0.0166, "step": 3739 }, { "epoch": 33.39, "grad_norm": 0.12251890450716019, "learning_rate": 0.00010952941176470587, "loss": 0.0271, "step": 3740 }, { "epoch": 33.4, "grad_norm": 0.04267783463001251, "learning_rate": 0.00010947058823529411, "loss": 0.0182, "step": 3741 }, { "epoch": 33.41, "grad_norm": 0.0516081228852272, "learning_rate": 0.00010941176470588234, "loss": 0.0212, "step": 3742 }, { "epoch": 33.42, "grad_norm": 0.05559803172945976, "learning_rate": 0.00010935294117647057, "loss": 0.0176, "step": 3743 }, { "epoch": 33.43, "grad_norm": 0.059197813272476196, "learning_rate": 0.00010929411764705882, "loss": 0.0228, "step": 3744 }, { "epoch": 33.44, "grad_norm": 0.05572134256362915, "learning_rate": 0.00010923529411764706, "loss": 0.0187, "step": 3745 }, { "epoch": 33.45, "grad_norm": 0.10062196105718613, "learning_rate": 0.00010917647058823528, "loss": 0.0305, "step": 3746 }, { "epoch": 33.46, "grad_norm": 0.04885195568203926, "learning_rate": 0.00010911764705882352, "loss": 0.0215, "step": 3747 }, { "epoch": 33.46, "grad_norm": 0.04990047216415405, "learning_rate": 0.00010905882352941175, "loss": 0.0212, "step": 3748 }, { "epoch": 33.47, "grad_norm": 0.055074553936719894, "learning_rate": 0.00010899999999999999, "loss": 0.0226, "step": 3749 }, { "epoch": 33.48, "grad_norm": 0.05209925398230553, "learning_rate": 0.00010894117647058824, "loss": 0.0224, "step": 3750 }, { "epoch": 33.49, "grad_norm": 0.04867013543844223, "learning_rate": 0.00010888235294117645, "loss": 0.0178, "step": 3751 }, { "epoch": 33.5, "grad_norm": 0.07351130992174149, "learning_rate": 0.0001088235294117647, "loss": 0.0243, "step": 3752 }, { "epoch": 33.51, "grad_norm": 0.07198871672153473, "learning_rate": 0.00010876470588235293, "loss": 0.0216, "step": 3753 }, { "epoch": 33.52, "grad_norm": 0.04132643714547157, "learning_rate": 0.00010870588235294117, "loss": 0.0178, "step": 3754 }, { "epoch": 33.53, "grad_norm": 0.04665254428982735, "learning_rate": 0.00010864705882352941, "loss": 0.0202, "step": 3755 }, { "epoch": 33.54, "grad_norm": 0.05033242329955101, "learning_rate": 0.00010858823529411763, "loss": 0.0192, "step": 3756 }, { "epoch": 33.54, "grad_norm": 0.04282531514763832, "learning_rate": 0.00010852941176470587, "loss": 0.0175, "step": 3757 }, { "epoch": 33.55, "grad_norm": 0.08175268769264221, "learning_rate": 0.0001084705882352941, "loss": 0.0231, "step": 3758 }, { "epoch": 33.56, "grad_norm": 0.10714887082576752, "learning_rate": 0.00010841176470588235, "loss": 0.0389, "step": 3759 }, { "epoch": 33.57, "grad_norm": 0.04657446965575218, "learning_rate": 0.00010835294117647059, "loss": 0.022, "step": 3760 }, { "epoch": 33.58, "grad_norm": 0.04799662530422211, "learning_rate": 0.0001082941176470588, "loss": 0.0202, "step": 3761 }, { "epoch": 33.59, "grad_norm": 0.05010412260890007, "learning_rate": 0.00010823529411764705, "loss": 0.0202, "step": 3762 }, { "epoch": 33.6, "grad_norm": 0.04735333099961281, "learning_rate": 0.00010817647058823529, "loss": 0.0205, "step": 3763 }, { "epoch": 33.61, "grad_norm": 0.04919218271970749, "learning_rate": 0.00010811764705882352, "loss": 0.0154, "step": 3764 }, { "epoch": 33.62, "grad_norm": 0.09689608961343765, "learning_rate": 0.00010805882352941175, "loss": 0.0239, "step": 3765 }, { "epoch": 33.62, "grad_norm": 0.04646177217364311, "learning_rate": 0.00010799999999999998, "loss": 0.0204, "step": 3766 }, { "epoch": 33.63, "grad_norm": 0.04333686828613281, "learning_rate": 0.00010794117647058823, "loss": 0.0203, "step": 3767 }, { "epoch": 33.64, "grad_norm": 0.047065041959285736, "learning_rate": 0.00010788235294117647, "loss": 0.0178, "step": 3768 }, { "epoch": 33.65, "grad_norm": 0.04462426155805588, "learning_rate": 0.0001078235294117647, "loss": 0.017, "step": 3769 }, { "epoch": 33.66, "grad_norm": 0.052010223269462585, "learning_rate": 0.00010776470588235293, "loss": 0.0178, "step": 3770 }, { "epoch": 33.67, "grad_norm": 0.10874468088150024, "learning_rate": 0.00010770588235294116, "loss": 0.0263, "step": 3771 }, { "epoch": 33.68, "grad_norm": 0.040056925266981125, "learning_rate": 0.0001076470588235294, "loss": 0.0153, "step": 3772 }, { "epoch": 33.69, "grad_norm": 0.044297847896814346, "learning_rate": 0.00010758823529411765, "loss": 0.0181, "step": 3773 }, { "epoch": 33.7, "grad_norm": 0.04572479799389839, "learning_rate": 0.00010752941176470588, "loss": 0.0183, "step": 3774 }, { "epoch": 33.71, "grad_norm": 0.04739643633365631, "learning_rate": 0.0001074705882352941, "loss": 0.021, "step": 3775 }, { "epoch": 33.71, "grad_norm": 0.0541643463075161, "learning_rate": 0.00010741176470588234, "loss": 0.0211, "step": 3776 }, { "epoch": 33.72, "grad_norm": 0.08154944330453873, "learning_rate": 0.00010735294117647058, "loss": 0.0209, "step": 3777 }, { "epoch": 33.73, "grad_norm": 0.12991441786289215, "learning_rate": 0.00010729411764705882, "loss": 0.0265, "step": 3778 }, { "epoch": 33.74, "grad_norm": 0.046380940824747086, "learning_rate": 0.00010723529411764704, "loss": 0.0186, "step": 3779 }, { "epoch": 33.75, "grad_norm": 0.049973901361227036, "learning_rate": 0.00010717647058823528, "loss": 0.0189, "step": 3780 }, { "epoch": 33.76, "grad_norm": 0.04776526615023613, "learning_rate": 0.00010711764705882353, "loss": 0.019, "step": 3781 }, { "epoch": 33.77, "grad_norm": 0.0652221217751503, "learning_rate": 0.00010705882352941176, "loss": 0.0206, "step": 3782 }, { "epoch": 33.78, "grad_norm": 0.04913077875971794, "learning_rate": 0.000107, "loss": 0.018, "step": 3783 }, { "epoch": 33.79, "grad_norm": 0.07298099994659424, "learning_rate": 0.00010694117647058822, "loss": 0.0242, "step": 3784 }, { "epoch": 33.79, "grad_norm": 0.044130854308605194, "learning_rate": 0.00010688235294117646, "loss": 0.0181, "step": 3785 }, { "epoch": 33.8, "grad_norm": 0.04705604165792465, "learning_rate": 0.0001068235294117647, "loss": 0.0195, "step": 3786 }, { "epoch": 33.81, "grad_norm": 0.055822499096393585, "learning_rate": 0.00010676470588235293, "loss": 0.0199, "step": 3787 }, { "epoch": 33.82, "grad_norm": 0.06081099435687065, "learning_rate": 0.00010670588235294118, "loss": 0.0249, "step": 3788 }, { "epoch": 33.83, "grad_norm": 0.0651383101940155, "learning_rate": 0.00010664705882352939, "loss": 0.0208, "step": 3789 }, { "epoch": 33.84, "grad_norm": 0.09618881344795227, "learning_rate": 0.00010658823529411764, "loss": 0.0292, "step": 3790 }, { "epoch": 33.85, "grad_norm": 0.04474983736872673, "learning_rate": 0.00010652941176470588, "loss": 0.0187, "step": 3791 }, { "epoch": 33.86, "grad_norm": 0.04621360823512077, "learning_rate": 0.00010647058823529411, "loss": 0.0181, "step": 3792 }, { "epoch": 33.87, "grad_norm": 0.05100925266742706, "learning_rate": 0.00010641176470588235, "loss": 0.0178, "step": 3793 }, { "epoch": 33.88, "grad_norm": 0.04568636044859886, "learning_rate": 0.00010635294117647057, "loss": 0.0188, "step": 3794 }, { "epoch": 33.88, "grad_norm": 0.05396410822868347, "learning_rate": 0.00010629411764705881, "loss": 0.0188, "step": 3795 }, { "epoch": 33.89, "grad_norm": 0.07931025326251984, "learning_rate": 0.00010623529411764705, "loss": 0.0261, "step": 3796 }, { "epoch": 33.9, "grad_norm": 0.04671921953558922, "learning_rate": 0.00010617647058823528, "loss": 0.019, "step": 3797 }, { "epoch": 33.91, "grad_norm": 0.049630068242549896, "learning_rate": 0.00010611764705882351, "loss": 0.0197, "step": 3798 }, { "epoch": 33.92, "grad_norm": 0.05084071308374405, "learning_rate": 0.00010605882352941176, "loss": 0.0215, "step": 3799 }, { "epoch": 33.93, "grad_norm": 0.050024282187223434, "learning_rate": 0.00010599999999999999, "loss": 0.0166, "step": 3800 }, { "epoch": 33.93, "eval_cer": 0.03149887436906413, "eval_loss": 0.211999773979187, "eval_runtime": 22.198, "eval_samples_per_second": 119.02, "eval_steps_per_second": 1.892, "eval_wer": 0.1032130107100357, "step": 3800 }, { "epoch": 33.94, "grad_norm": 0.054959606379270554, "learning_rate": 0.00010594117647058823, "loss": 0.0214, "step": 3801 }, { "epoch": 33.95, "grad_norm": 0.08315333724021912, "learning_rate": 0.00010588235294117647, "loss": 0.0215, "step": 3802 }, { "epoch": 33.96, "grad_norm": 0.06588681787252426, "learning_rate": 0.00010582352941176469, "loss": 0.0231, "step": 3803 }, { "epoch": 33.96, "grad_norm": 0.055088456720113754, "learning_rate": 0.00010576470588235293, "loss": 0.0239, "step": 3804 }, { "epoch": 33.97, "grad_norm": 0.04421170800924301, "learning_rate": 0.00010570588235294116, "loss": 0.0185, "step": 3805 }, { "epoch": 33.98, "grad_norm": 0.051479555666446686, "learning_rate": 0.00010564705882352941, "loss": 0.0186, "step": 3806 }, { "epoch": 33.99, "grad_norm": 0.0651811733841896, "learning_rate": 0.00010558823529411765, "loss": 0.0169, "step": 3807 }, { "epoch": 34.0, "grad_norm": 0.08760152012109756, "learning_rate": 0.00010552941176470587, "loss": 0.0221, "step": 3808 }, { "epoch": 34.01, "grad_norm": 0.04792863503098488, "learning_rate": 0.00010547058823529411, "loss": 0.0193, "step": 3809 }, { "epoch": 34.02, "grad_norm": 0.04409262537956238, "learning_rate": 0.00010541176470588234, "loss": 0.0213, "step": 3810 }, { "epoch": 34.03, "grad_norm": 0.04361652955412865, "learning_rate": 0.00010535294117647058, "loss": 0.0193, "step": 3811 }, { "epoch": 34.04, "grad_norm": 0.05131509155035019, "learning_rate": 0.0001052941176470588, "loss": 0.0193, "step": 3812 }, { "epoch": 34.04, "grad_norm": 0.04921355098485947, "learning_rate": 0.00010523529411764704, "loss": 0.0188, "step": 3813 }, { "epoch": 34.05, "grad_norm": 0.060709960758686066, "learning_rate": 0.00010517647058823529, "loss": 0.0201, "step": 3814 }, { "epoch": 34.06, "grad_norm": 0.07292447984218597, "learning_rate": 0.00010511764705882352, "loss": 0.0225, "step": 3815 }, { "epoch": 34.07, "grad_norm": 0.05245755612850189, "learning_rate": 0.00010505882352941176, "loss": 0.0214, "step": 3816 }, { "epoch": 34.08, "grad_norm": 0.05187506601214409, "learning_rate": 0.00010499999999999999, "loss": 0.0223, "step": 3817 }, { "epoch": 34.09, "grad_norm": 0.04697016626596451, "learning_rate": 0.00010494117647058822, "loss": 0.0153, "step": 3818 }, { "epoch": 34.1, "grad_norm": 0.05046670138835907, "learning_rate": 0.00010488235294117646, "loss": 0.0169, "step": 3819 }, { "epoch": 34.11, "grad_norm": 0.06887195259332657, "learning_rate": 0.0001048235294117647, "loss": 0.0198, "step": 3820 }, { "epoch": 34.12, "grad_norm": 0.10050436854362488, "learning_rate": 0.00010476470588235294, "loss": 0.03, "step": 3821 }, { "epoch": 34.12, "grad_norm": 0.04660368338227272, "learning_rate": 0.00010470588235294117, "loss": 0.016, "step": 3822 }, { "epoch": 34.13, "grad_norm": 0.050970591604709625, "learning_rate": 0.0001046470588235294, "loss": 0.0194, "step": 3823 }, { "epoch": 34.14, "grad_norm": 0.0501236766576767, "learning_rate": 0.00010458823529411764, "loss": 0.0207, "step": 3824 }, { "epoch": 34.15, "grad_norm": 0.0496857650578022, "learning_rate": 0.00010452941176470588, "loss": 0.0169, "step": 3825 }, { "epoch": 34.16, "grad_norm": 0.04786838963627815, "learning_rate": 0.0001044705882352941, "loss": 0.0179, "step": 3826 }, { "epoch": 34.17, "grad_norm": 0.07660852372646332, "learning_rate": 0.00010441176470588234, "loss": 0.0208, "step": 3827 }, { "epoch": 34.18, "grad_norm": 0.04219606891274452, "learning_rate": 0.00010435294117647057, "loss": 0.0193, "step": 3828 }, { "epoch": 34.19, "grad_norm": 0.0388142429292202, "learning_rate": 0.00010429411764705882, "loss": 0.0164, "step": 3829 }, { "epoch": 34.2, "grad_norm": 0.04581207409501076, "learning_rate": 0.00010423529411764706, "loss": 0.0192, "step": 3830 }, { "epoch": 34.21, "grad_norm": 0.045865558087825775, "learning_rate": 0.00010417647058823528, "loss": 0.0194, "step": 3831 }, { "epoch": 34.21, "grad_norm": 0.052915655076503754, "learning_rate": 0.00010411764705882352, "loss": 0.0181, "step": 3832 }, { "epoch": 34.22, "grad_norm": 0.09456345438957214, "learning_rate": 0.00010405882352941175, "loss": 0.0266, "step": 3833 }, { "epoch": 34.23, "grad_norm": 0.04623633995652199, "learning_rate": 0.000104, "loss": 0.0183, "step": 3834 }, { "epoch": 34.24, "grad_norm": 0.05617140978574753, "learning_rate": 0.00010394117647058824, "loss": 0.0208, "step": 3835 }, { "epoch": 34.25, "grad_norm": 0.044178467243909836, "learning_rate": 0.00010388235294117645, "loss": 0.0167, "step": 3836 }, { "epoch": 34.26, "grad_norm": 0.055569615215063095, "learning_rate": 0.0001038235294117647, "loss": 0.0201, "step": 3837 }, { "epoch": 34.27, "grad_norm": 0.04524802044034004, "learning_rate": 0.00010376470588235293, "loss": 0.0171, "step": 3838 }, { "epoch": 34.28, "grad_norm": 0.06489437073469162, "learning_rate": 0.00010370588235294117, "loss": 0.0204, "step": 3839 }, { "epoch": 34.29, "grad_norm": 0.0554768443107605, "learning_rate": 0.00010364705882352941, "loss": 0.0189, "step": 3840 }, { "epoch": 34.29, "grad_norm": 0.046934958547353745, "learning_rate": 0.00010358823529411763, "loss": 0.0205, "step": 3841 }, { "epoch": 34.3, "grad_norm": 0.057572390884160995, "learning_rate": 0.00010352941176470587, "loss": 0.0181, "step": 3842 }, { "epoch": 34.31, "grad_norm": 0.05205719172954559, "learning_rate": 0.00010347058823529412, "loss": 0.0206, "step": 3843 }, { "epoch": 34.32, "grad_norm": 0.04701286554336548, "learning_rate": 0.00010341176470588235, "loss": 0.016, "step": 3844 }, { "epoch": 34.33, "grad_norm": 0.06379012763500214, "learning_rate": 0.00010335294117647058, "loss": 0.0227, "step": 3845 }, { "epoch": 34.34, "grad_norm": 0.07218367606401443, "learning_rate": 0.0001032941176470588, "loss": 0.0257, "step": 3846 }, { "epoch": 34.35, "grad_norm": 0.04608406499028206, "learning_rate": 0.00010323529411764705, "loss": 0.0177, "step": 3847 }, { "epoch": 34.36, "grad_norm": 0.04197123646736145, "learning_rate": 0.00010317647058823529, "loss": 0.0174, "step": 3848 }, { "epoch": 34.37, "grad_norm": 0.04382241144776344, "learning_rate": 0.00010311764705882352, "loss": 0.0203, "step": 3849 }, { "epoch": 34.38, "grad_norm": 0.03994964063167572, "learning_rate": 0.00010305882352941175, "loss": 0.0171, "step": 3850 }, { "epoch": 34.38, "grad_norm": 0.0624520443379879, "learning_rate": 0.00010299999999999998, "loss": 0.0244, "step": 3851 }, { "epoch": 34.39, "grad_norm": 0.07916943728923798, "learning_rate": 0.00010294117647058823, "loss": 0.0209, "step": 3852 }, { "epoch": 34.4, "grad_norm": 0.04561072960495949, "learning_rate": 0.00010288235294117647, "loss": 0.0223, "step": 3853 }, { "epoch": 34.41, "grad_norm": 0.04165709391236305, "learning_rate": 0.0001028235294117647, "loss": 0.0186, "step": 3854 }, { "epoch": 34.42, "grad_norm": 0.04192725569009781, "learning_rate": 0.00010276470588235293, "loss": 0.0205, "step": 3855 }, { "epoch": 34.43, "grad_norm": 0.06156204640865326, "learning_rate": 0.00010270588235294116, "loss": 0.0216, "step": 3856 }, { "epoch": 34.44, "grad_norm": 0.043788302689790726, "learning_rate": 0.0001026470588235294, "loss": 0.0158, "step": 3857 }, { "epoch": 34.45, "grad_norm": 0.08256421983242035, "learning_rate": 0.00010258823529411765, "loss": 0.0236, "step": 3858 }, { "epoch": 34.46, "grad_norm": 0.04196475073695183, "learning_rate": 0.00010252941176470586, "loss": 0.0194, "step": 3859 }, { "epoch": 34.46, "grad_norm": 0.050266288220882416, "learning_rate": 0.0001024705882352941, "loss": 0.0224, "step": 3860 }, { "epoch": 34.47, "grad_norm": 0.04009345546364784, "learning_rate": 0.00010241176470588235, "loss": 0.0143, "step": 3861 }, { "epoch": 34.48, "grad_norm": 0.05010367929935455, "learning_rate": 0.00010235294117647058, "loss": 0.0196, "step": 3862 }, { "epoch": 34.49, "grad_norm": 0.04665965214371681, "learning_rate": 0.00010229411764705882, "loss": 0.016, "step": 3863 }, { "epoch": 34.5, "grad_norm": 0.06397382915019989, "learning_rate": 0.00010223529411764704, "loss": 0.0222, "step": 3864 }, { "epoch": 34.51, "grad_norm": 0.07012230902910233, "learning_rate": 0.00010217647058823528, "loss": 0.0215, "step": 3865 }, { "epoch": 34.52, "grad_norm": 0.05256851390004158, "learning_rate": 0.00010211764705882353, "loss": 0.0208, "step": 3866 }, { "epoch": 34.53, "grad_norm": 0.051373377442359924, "learning_rate": 0.00010205882352941176, "loss": 0.0203, "step": 3867 }, { "epoch": 34.54, "grad_norm": 0.049756649881601334, "learning_rate": 0.000102, "loss": 0.0197, "step": 3868 }, { "epoch": 34.54, "grad_norm": 0.04557337984442711, "learning_rate": 0.00010194117647058822, "loss": 0.0194, "step": 3869 }, { "epoch": 34.55, "grad_norm": 0.05897529423236847, "learning_rate": 0.00010188235294117646, "loss": 0.0236, "step": 3870 }, { "epoch": 34.56, "grad_norm": 0.12045527994632721, "learning_rate": 0.0001018235294117647, "loss": 0.0208, "step": 3871 }, { "epoch": 34.57, "grad_norm": 0.041995007544755936, "learning_rate": 0.00010176470588235293, "loss": 0.0209, "step": 3872 }, { "epoch": 34.58, "grad_norm": 0.03895152360200882, "learning_rate": 0.00010170588235294116, "loss": 0.0174, "step": 3873 }, { "epoch": 34.59, "grad_norm": 0.048664640635252, "learning_rate": 0.00010164705882352939, "loss": 0.0196, "step": 3874 }, { "epoch": 34.6, "grad_norm": 0.04239026457071304, "learning_rate": 0.00010158823529411764, "loss": 0.016, "step": 3875 }, { "epoch": 34.61, "grad_norm": 0.054239142686128616, "learning_rate": 0.00010152941176470588, "loss": 0.0188, "step": 3876 }, { "epoch": 34.62, "grad_norm": 0.09040377289056778, "learning_rate": 0.00010147058823529411, "loss": 0.0234, "step": 3877 }, { "epoch": 34.62, "grad_norm": 0.0430770069360733, "learning_rate": 0.00010141176470588234, "loss": 0.0187, "step": 3878 }, { "epoch": 34.63, "grad_norm": 0.04614727944135666, "learning_rate": 0.00010135294117647058, "loss": 0.019, "step": 3879 }, { "epoch": 34.64, "grad_norm": 0.05060048773884773, "learning_rate": 0.00010129411764705881, "loss": 0.0212, "step": 3880 }, { "epoch": 34.65, "grad_norm": 0.047570060938596725, "learning_rate": 0.00010123529411764706, "loss": 0.0186, "step": 3881 }, { "epoch": 34.66, "grad_norm": 0.05347687751054764, "learning_rate": 0.0001011764705882353, "loss": 0.0183, "step": 3882 }, { "epoch": 34.67, "grad_norm": 0.11090794950723648, "learning_rate": 0.00010111764705882352, "loss": 0.0206, "step": 3883 }, { "epoch": 34.68, "grad_norm": 0.03908548131585121, "learning_rate": 0.00010105882352941176, "loss": 0.019, "step": 3884 }, { "epoch": 34.69, "grad_norm": 0.05485350266098976, "learning_rate": 0.00010099999999999999, "loss": 0.0204, "step": 3885 }, { "epoch": 34.7, "grad_norm": 0.04888375476002693, "learning_rate": 0.00010094117647058823, "loss": 0.0162, "step": 3886 }, { "epoch": 34.71, "grad_norm": 0.054330479353666306, "learning_rate": 0.00010088235294117648, "loss": 0.0217, "step": 3887 }, { "epoch": 34.71, "grad_norm": 0.04673108458518982, "learning_rate": 0.00010082352941176469, "loss": 0.0184, "step": 3888 }, { "epoch": 34.72, "grad_norm": 0.0918075367808342, "learning_rate": 0.00010076470588235294, "loss": 0.0294, "step": 3889 }, { "epoch": 34.73, "grad_norm": 0.10332155972719193, "learning_rate": 0.00010070588235294116, "loss": 0.0216, "step": 3890 }, { "epoch": 34.74, "grad_norm": 0.043055880814790726, "learning_rate": 0.00010064705882352941, "loss": 0.0203, "step": 3891 }, { "epoch": 34.75, "grad_norm": 0.04952540248632431, "learning_rate": 0.00010058823529411762, "loss": 0.0176, "step": 3892 }, { "epoch": 34.76, "grad_norm": 0.040237873792648315, "learning_rate": 0.00010052941176470587, "loss": 0.0179, "step": 3893 }, { "epoch": 34.77, "grad_norm": 0.052950188517570496, "learning_rate": 0.00010047058823529411, "loss": 0.0197, "step": 3894 }, { "epoch": 34.78, "grad_norm": 0.05970821902155876, "learning_rate": 0.00010041176470588234, "loss": 0.0201, "step": 3895 }, { "epoch": 34.79, "grad_norm": 0.06343962252140045, "learning_rate": 0.00010035294117647058, "loss": 0.0189, "step": 3896 }, { "epoch": 34.79, "grad_norm": 0.04404140263795853, "learning_rate": 0.00010029411764705881, "loss": 0.0193, "step": 3897 }, { "epoch": 34.8, "grad_norm": 0.04577597603201866, "learning_rate": 0.00010023529411764704, "loss": 0.0213, "step": 3898 }, { "epoch": 34.81, "grad_norm": 0.04664938896894455, "learning_rate": 0.00010017647058823529, "loss": 0.0199, "step": 3899 }, { "epoch": 34.82, "grad_norm": 0.05409785360097885, "learning_rate": 0.00010011764705882352, "loss": 0.0212, "step": 3900 }, { "epoch": 34.82, "eval_cer": 0.030025594479710455, "eval_loss": 0.2020832896232605, "eval_runtime": 22.9436, "eval_samples_per_second": 115.152, "eval_steps_per_second": 1.831, "eval_wer": 0.10029750099166997, "step": 3900 }, { "epoch": 34.83, "grad_norm": 0.0607311986386776, "learning_rate": 0.00010005882352941176, "loss": 0.019, "step": 3901 }, { "epoch": 34.84, "grad_norm": 0.1514507532119751, "learning_rate": 9.999999999999999e-05, "loss": 0.0278, "step": 3902 }, { "epoch": 34.85, "grad_norm": 0.047490138560533524, "learning_rate": 9.994117647058822e-05, "loss": 0.0189, "step": 3903 }, { "epoch": 34.86, "grad_norm": 0.04280892014503479, "learning_rate": 9.988235294117646e-05, "loss": 0.0183, "step": 3904 }, { "epoch": 34.87, "grad_norm": 0.04812157154083252, "learning_rate": 9.982352941176471e-05, "loss": 0.0237, "step": 3905 }, { "epoch": 34.88, "grad_norm": 0.052132874727249146, "learning_rate": 9.976470588235292e-05, "loss": 0.02, "step": 3906 }, { "epoch": 34.88, "grad_norm": 0.05362263694405556, "learning_rate": 9.970588235294117e-05, "loss": 0.0208, "step": 3907 }, { "epoch": 34.89, "grad_norm": 0.1373939961194992, "learning_rate": 9.96470588235294e-05, "loss": 0.0339, "step": 3908 }, { "epoch": 34.9, "grad_norm": 0.04172702878713608, "learning_rate": 9.958823529411764e-05, "loss": 0.0224, "step": 3909 }, { "epoch": 34.91, "grad_norm": 0.04621425271034241, "learning_rate": 9.952941176470588e-05, "loss": 0.0185, "step": 3910 }, { "epoch": 34.92, "grad_norm": 0.04608256742358208, "learning_rate": 9.94705882352941e-05, "loss": 0.0202, "step": 3911 }, { "epoch": 34.93, "grad_norm": 0.05011044070124626, "learning_rate": 9.941176470588234e-05, "loss": 0.0204, "step": 3912 }, { "epoch": 34.94, "grad_norm": 0.04857317730784416, "learning_rate": 9.935294117647057e-05, "loss": 0.0207, "step": 3913 }, { "epoch": 34.95, "grad_norm": 0.07684504985809326, "learning_rate": 9.929411764705882e-05, "loss": 0.0235, "step": 3914 }, { "epoch": 34.96, "grad_norm": 0.04883652925491333, "learning_rate": 9.923529411764706e-05, "loss": 0.0185, "step": 3915 }, { "epoch": 34.96, "grad_norm": 0.04158344864845276, "learning_rate": 9.917647058823528e-05, "loss": 0.0207, "step": 3916 }, { "epoch": 34.97, "grad_norm": 0.04643884673714638, "learning_rate": 9.911764705882352e-05, "loss": 0.0191, "step": 3917 }, { "epoch": 34.98, "grad_norm": 0.046996667981147766, "learning_rate": 9.905882352941175e-05, "loss": 0.0158, "step": 3918 }, { "epoch": 34.99, "grad_norm": 0.058684635907411575, "learning_rate": 9.9e-05, "loss": 0.0222, "step": 3919 }, { "epoch": 35.0, "grad_norm": 0.21057476103305817, "learning_rate": 9.894117647058824e-05, "loss": 0.0291, "step": 3920 }, { "epoch": 35.01, "grad_norm": 0.04154033213853836, "learning_rate": 9.888235294117645e-05, "loss": 0.0188, "step": 3921 }, { "epoch": 35.02, "grad_norm": 0.04205253720283508, "learning_rate": 9.88235294117647e-05, "loss": 0.016, "step": 3922 }, { "epoch": 35.03, "grad_norm": 0.05432457849383354, "learning_rate": 9.876470588235294e-05, "loss": 0.0207, "step": 3923 }, { "epoch": 35.04, "grad_norm": 0.0533113107085228, "learning_rate": 9.870588235294117e-05, "loss": 0.0192, "step": 3924 }, { "epoch": 35.04, "grad_norm": 0.051685433834791183, "learning_rate": 9.86470588235294e-05, "loss": 0.0204, "step": 3925 }, { "epoch": 35.05, "grad_norm": 0.06034713238477707, "learning_rate": 9.858823529411763e-05, "loss": 0.0193, "step": 3926 }, { "epoch": 35.06, "grad_norm": 0.0914221778512001, "learning_rate": 9.852941176470587e-05, "loss": 0.027, "step": 3927 }, { "epoch": 35.07, "grad_norm": 0.04747364670038223, "learning_rate": 9.847058823529412e-05, "loss": 0.0191, "step": 3928 }, { "epoch": 35.08, "grad_norm": 0.0516696535050869, "learning_rate": 9.841176470588235e-05, "loss": 0.0226, "step": 3929 }, { "epoch": 35.09, "grad_norm": 0.051890451461076736, "learning_rate": 9.835294117647058e-05, "loss": 0.0227, "step": 3930 }, { "epoch": 35.1, "grad_norm": 0.06238153949379921, "learning_rate": 9.829411764705881e-05, "loss": 0.024, "step": 3931 }, { "epoch": 35.11, "grad_norm": 0.06373263150453568, "learning_rate": 9.823529411764705e-05, "loss": 0.0175, "step": 3932 }, { "epoch": 35.12, "grad_norm": 0.08236797899007797, "learning_rate": 9.81764705882353e-05, "loss": 0.0239, "step": 3933 }, { "epoch": 35.12, "grad_norm": 0.04389422759413719, "learning_rate": 9.811764705882352e-05, "loss": 0.0193, "step": 3934 }, { "epoch": 35.13, "grad_norm": 0.04222368448972702, "learning_rate": 9.805882352941175e-05, "loss": 0.0163, "step": 3935 }, { "epoch": 35.14, "grad_norm": 0.04606059566140175, "learning_rate": 9.799999999999998e-05, "loss": 0.0179, "step": 3936 }, { "epoch": 35.15, "grad_norm": 0.04678035527467728, "learning_rate": 9.794117647058823e-05, "loss": 0.0196, "step": 3937 }, { "epoch": 35.16, "grad_norm": 0.05267263203859329, "learning_rate": 9.788235294117647e-05, "loss": 0.0206, "step": 3938 }, { "epoch": 35.17, "grad_norm": 0.08604593575000763, "learning_rate": 9.782352941176469e-05, "loss": 0.0193, "step": 3939 }, { "epoch": 35.18, "grad_norm": 0.040144361555576324, "learning_rate": 9.776470588235293e-05, "loss": 0.0189, "step": 3940 }, { "epoch": 35.19, "grad_norm": 0.050802409648895264, "learning_rate": 9.770588235294117e-05, "loss": 0.0204, "step": 3941 }, { "epoch": 35.2, "grad_norm": 0.04380539059638977, "learning_rate": 9.76470588235294e-05, "loss": 0.0205, "step": 3942 }, { "epoch": 35.21, "grad_norm": 0.049333445727825165, "learning_rate": 9.758823529411765e-05, "loss": 0.0171, "step": 3943 }, { "epoch": 35.21, "grad_norm": 0.0534956157207489, "learning_rate": 9.752941176470586e-05, "loss": 0.0193, "step": 3944 }, { "epoch": 35.22, "grad_norm": 0.0936095118522644, "learning_rate": 9.74705882352941e-05, "loss": 0.0238, "step": 3945 }, { "epoch": 35.23, "grad_norm": 0.043501611799001694, "learning_rate": 9.741176470588235e-05, "loss": 0.0171, "step": 3946 }, { "epoch": 35.24, "grad_norm": 0.049899764358997345, "learning_rate": 9.735294117647058e-05, "loss": 0.0176, "step": 3947 }, { "epoch": 35.25, "grad_norm": 0.047519441694021225, "learning_rate": 9.729411764705882e-05, "loss": 0.0189, "step": 3948 }, { "epoch": 35.26, "grad_norm": 0.05049138143658638, "learning_rate": 9.723529411764704e-05, "loss": 0.0159, "step": 3949 }, { "epoch": 35.27, "grad_norm": 0.04400688037276268, "learning_rate": 9.717647058823528e-05, "loss": 0.0168, "step": 3950 }, { "epoch": 35.28, "grad_norm": 0.06818078458309174, "learning_rate": 9.711764705882353e-05, "loss": 0.0175, "step": 3951 }, { "epoch": 35.29, "grad_norm": 0.0692988783121109, "learning_rate": 9.705882352941176e-05, "loss": 0.0189, "step": 3952 }, { "epoch": 35.29, "grad_norm": 0.04943229258060455, "learning_rate": 9.699999999999999e-05, "loss": 0.0195, "step": 3953 }, { "epoch": 35.3, "grad_norm": 0.042735274881124496, "learning_rate": 9.694117647058822e-05, "loss": 0.0172, "step": 3954 }, { "epoch": 35.31, "grad_norm": 0.051739778369665146, "learning_rate": 9.688235294117646e-05, "loss": 0.0185, "step": 3955 }, { "epoch": 35.32, "grad_norm": 0.04766315221786499, "learning_rate": 9.68235294117647e-05, "loss": 0.0196, "step": 3956 }, { "epoch": 35.33, "grad_norm": 0.0675685852766037, "learning_rate": 9.676470588235293e-05, "loss": 0.0204, "step": 3957 }, { "epoch": 35.34, "grad_norm": 0.07023131102323532, "learning_rate": 9.670588235294116e-05, "loss": 0.0256, "step": 3958 }, { "epoch": 35.35, "grad_norm": 0.06399403512477875, "learning_rate": 9.66470588235294e-05, "loss": 0.0192, "step": 3959 }, { "epoch": 35.36, "grad_norm": 0.04383014887571335, "learning_rate": 9.658823529411764e-05, "loss": 0.0191, "step": 3960 }, { "epoch": 35.37, "grad_norm": 0.04324600473046303, "learning_rate": 9.652941176470588e-05, "loss": 0.0193, "step": 3961 }, { "epoch": 35.38, "grad_norm": 0.04773641377687454, "learning_rate": 9.647058823529412e-05, "loss": 0.0186, "step": 3962 }, { "epoch": 35.38, "grad_norm": 0.05815904587507248, "learning_rate": 9.641176470588234e-05, "loss": 0.0205, "step": 3963 }, { "epoch": 35.39, "grad_norm": 0.10423627495765686, "learning_rate": 9.635294117647058e-05, "loss": 0.0216, "step": 3964 }, { "epoch": 35.4, "grad_norm": 0.04527971148490906, "learning_rate": 9.629411764705881e-05, "loss": 0.0215, "step": 3965 }, { "epoch": 35.41, "grad_norm": 0.04167898744344711, "learning_rate": 9.623529411764706e-05, "loss": 0.0152, "step": 3966 }, { "epoch": 35.42, "grad_norm": 0.041512541472911835, "learning_rate": 9.61764705882353e-05, "loss": 0.0191, "step": 3967 }, { "epoch": 35.43, "grad_norm": 0.04319898039102554, "learning_rate": 9.611764705882352e-05, "loss": 0.016, "step": 3968 }, { "epoch": 35.44, "grad_norm": 0.052551429718732834, "learning_rate": 9.605882352941176e-05, "loss": 0.0218, "step": 3969 }, { "epoch": 35.45, "grad_norm": 0.11623974144458771, "learning_rate": 9.599999999999999e-05, "loss": 0.0239, "step": 3970 }, { "epoch": 35.46, "grad_norm": 0.049464885145425797, "learning_rate": 9.594117647058823e-05, "loss": 0.0214, "step": 3971 }, { "epoch": 35.46, "grad_norm": 0.04722049832344055, "learning_rate": 9.588235294117645e-05, "loss": 0.0225, "step": 3972 }, { "epoch": 35.47, "grad_norm": 0.044510941952466965, "learning_rate": 9.582352941176469e-05, "loss": 0.0177, "step": 3973 }, { "epoch": 35.48, "grad_norm": 0.054078925400972366, "learning_rate": 9.576470588235294e-05, "loss": 0.0177, "step": 3974 }, { "epoch": 35.49, "grad_norm": 0.06436042487621307, "learning_rate": 9.570588235294117e-05, "loss": 0.0201, "step": 3975 }, { "epoch": 35.5, "grad_norm": 0.07849033921957016, "learning_rate": 9.564705882352941e-05, "loss": 0.0247, "step": 3976 }, { "epoch": 35.51, "grad_norm": 0.07635417580604553, "learning_rate": 9.558823529411764e-05, "loss": 0.0217, "step": 3977 }, { "epoch": 35.52, "grad_norm": 0.04666660726070404, "learning_rate": 9.552941176470587e-05, "loss": 0.0213, "step": 3978 }, { "epoch": 35.53, "grad_norm": 0.04402756690979004, "learning_rate": 9.547058823529411e-05, "loss": 0.0153, "step": 3979 }, { "epoch": 35.54, "grad_norm": 0.05166502296924591, "learning_rate": 9.541176470588236e-05, "loss": 0.0213, "step": 3980 }, { "epoch": 35.54, "grad_norm": 0.046839211136102676, "learning_rate": 9.535294117647059e-05, "loss": 0.02, "step": 3981 }, { "epoch": 35.55, "grad_norm": 0.07163360714912415, "learning_rate": 9.529411764705882e-05, "loss": 0.0209, "step": 3982 }, { "epoch": 35.56, "grad_norm": 0.1001317948102951, "learning_rate": 9.523529411764705e-05, "loss": 0.0246, "step": 3983 }, { "epoch": 35.57, "grad_norm": 0.04498804733157158, "learning_rate": 9.517647058823529e-05, "loss": 0.0207, "step": 3984 }, { "epoch": 35.58, "grad_norm": 0.04487980902194977, "learning_rate": 9.511764705882353e-05, "loss": 0.0208, "step": 3985 }, { "epoch": 35.59, "grad_norm": 0.04715997725725174, "learning_rate": 9.505882352941175e-05, "loss": 0.0182, "step": 3986 }, { "epoch": 35.6, "grad_norm": 0.04480253905057907, "learning_rate": 9.499999999999999e-05, "loss": 0.0154, "step": 3987 }, { "epoch": 35.61, "grad_norm": 0.058956392109394073, "learning_rate": 9.494117647058822e-05, "loss": 0.0189, "step": 3988 }, { "epoch": 35.62, "grad_norm": 0.09902913868427277, "learning_rate": 9.488235294117646e-05, "loss": 0.0249, "step": 3989 }, { "epoch": 35.62, "grad_norm": 0.04270798712968826, "learning_rate": 9.482352941176471e-05, "loss": 0.0196, "step": 3990 }, { "epoch": 35.63, "grad_norm": 0.05272712558507919, "learning_rate": 9.476470588235292e-05, "loss": 0.0192, "step": 3991 }, { "epoch": 35.64, "grad_norm": 0.04505373165011406, "learning_rate": 9.470588235294117e-05, "loss": 0.0176, "step": 3992 }, { "epoch": 35.65, "grad_norm": 0.04484843462705612, "learning_rate": 9.46470588235294e-05, "loss": 0.0152, "step": 3993 }, { "epoch": 35.66, "grad_norm": 0.054936349391937256, "learning_rate": 9.458823529411764e-05, "loss": 0.0197, "step": 3994 }, { "epoch": 35.67, "grad_norm": 0.10348665714263916, "learning_rate": 9.452941176470588e-05, "loss": 0.0269, "step": 3995 }, { "epoch": 35.68, "grad_norm": 0.05079462379217148, "learning_rate": 9.44705882352941e-05, "loss": 0.0222, "step": 3996 }, { "epoch": 35.69, "grad_norm": 0.04353007301688194, "learning_rate": 9.441176470588234e-05, "loss": 0.0185, "step": 3997 }, { "epoch": 35.7, "grad_norm": 0.04913564771413803, "learning_rate": 9.435294117647057e-05, "loss": 0.0201, "step": 3998 }, { "epoch": 35.71, "grad_norm": 0.0476599857211113, "learning_rate": 9.429411764705882e-05, "loss": 0.0162, "step": 3999 }, { "epoch": 35.71, "grad_norm": 0.05431988462805748, "learning_rate": 9.423529411764706e-05, "loss": 0.0214, "step": 4000 }, { "epoch": 35.71, "eval_cer": 0.031649209051651236, "eval_loss": 0.20453354716300964, "eval_runtime": 23.7481, "eval_samples_per_second": 111.251, "eval_steps_per_second": 1.769, "eval_wer": 0.10329234430781437, "step": 4000 }, { "epoch": 35.72, "grad_norm": 0.06919653713703156, "learning_rate": 9.417647058823528e-05, "loss": 0.0186, "step": 4001 }, { "epoch": 35.73, "grad_norm": 0.10540597140789032, "learning_rate": 9.411764705882352e-05, "loss": 0.0253, "step": 4002 }, { "epoch": 35.74, "grad_norm": 0.04335903748869896, "learning_rate": 9.405882352941176e-05, "loss": 0.0153, "step": 4003 }, { "epoch": 35.75, "grad_norm": 0.0410657674074173, "learning_rate": 9.4e-05, "loss": 0.0193, "step": 4004 }, { "epoch": 35.76, "grad_norm": 0.045593492686748505, "learning_rate": 9.394117647058822e-05, "loss": 0.0172, "step": 4005 }, { "epoch": 35.77, "grad_norm": 0.04321907460689545, "learning_rate": 9.388235294117645e-05, "loss": 0.0174, "step": 4006 }, { "epoch": 35.78, "grad_norm": 0.06603457778692245, "learning_rate": 9.38235294117647e-05, "loss": 0.0215, "step": 4007 }, { "epoch": 35.79, "grad_norm": 0.06926041096448898, "learning_rate": 9.376470588235294e-05, "loss": 0.0234, "step": 4008 }, { "epoch": 35.79, "grad_norm": 0.045095477253198624, "learning_rate": 9.370588235294117e-05, "loss": 0.0222, "step": 4009 }, { "epoch": 35.8, "grad_norm": 0.035017479211091995, "learning_rate": 9.36470588235294e-05, "loss": 0.0143, "step": 4010 }, { "epoch": 35.81, "grad_norm": 0.04700927063822746, "learning_rate": 9.358823529411763e-05, "loss": 0.0204, "step": 4011 }, { "epoch": 35.82, "grad_norm": 0.05358951911330223, "learning_rate": 9.352941176470587e-05, "loss": 0.0207, "step": 4012 }, { "epoch": 35.83, "grad_norm": 0.058761801570653915, "learning_rate": 9.347058823529412e-05, "loss": 0.0175, "step": 4013 }, { "epoch": 35.84, "grad_norm": 0.08514871448278427, "learning_rate": 9.341176470588235e-05, "loss": 0.0244, "step": 4014 }, { "epoch": 35.85, "grad_norm": 0.042977988719940186, "learning_rate": 9.335294117647058e-05, "loss": 0.0203, "step": 4015 }, { "epoch": 35.86, "grad_norm": 0.03963282331824303, "learning_rate": 9.329411764705881e-05, "loss": 0.0152, "step": 4016 }, { "epoch": 35.87, "grad_norm": 0.04526777192950249, "learning_rate": 9.323529411764705e-05, "loss": 0.0185, "step": 4017 }, { "epoch": 35.88, "grad_norm": 0.04637821763753891, "learning_rate": 9.31764705882353e-05, "loss": 0.0175, "step": 4018 }, { "epoch": 35.88, "grad_norm": 0.052102718502283096, "learning_rate": 9.311764705882351e-05, "loss": 0.0221, "step": 4019 }, { "epoch": 35.89, "grad_norm": 0.0893474668264389, "learning_rate": 9.305882352941175e-05, "loss": 0.0294, "step": 4020 }, { "epoch": 35.9, "grad_norm": 0.04137646406888962, "learning_rate": 9.3e-05, "loss": 0.0186, "step": 4021 }, { "epoch": 35.91, "grad_norm": 0.049010612070560455, "learning_rate": 9.294117647058823e-05, "loss": 0.0213, "step": 4022 }, { "epoch": 35.92, "grad_norm": 0.039913684129714966, "learning_rate": 9.288235294117647e-05, "loss": 0.0164, "step": 4023 }, { "epoch": 35.93, "grad_norm": 0.04661644250154495, "learning_rate": 9.282352941176469e-05, "loss": 0.017, "step": 4024 }, { "epoch": 35.94, "grad_norm": 0.05610841512680054, "learning_rate": 9.276470588235293e-05, "loss": 0.0206, "step": 4025 }, { "epoch": 35.95, "grad_norm": 0.07188771665096283, "learning_rate": 9.270588235294117e-05, "loss": 0.0212, "step": 4026 }, { "epoch": 35.96, "grad_norm": 0.07807855308055878, "learning_rate": 9.26470588235294e-05, "loss": 0.0211, "step": 4027 }, { "epoch": 35.96, "grad_norm": 0.042308419942855835, "learning_rate": 9.258823529411765e-05, "loss": 0.0179, "step": 4028 }, { "epoch": 35.97, "grad_norm": 0.052682895213365555, "learning_rate": 9.252941176470586e-05, "loss": 0.0162, "step": 4029 }, { "epoch": 35.98, "grad_norm": 0.0560329295694828, "learning_rate": 9.247058823529411e-05, "loss": 0.0214, "step": 4030 }, { "epoch": 35.99, "grad_norm": 0.06603354215621948, "learning_rate": 9.241176470588235e-05, "loss": 0.0203, "step": 4031 }, { "epoch": 36.0, "grad_norm": 0.0614035464823246, "learning_rate": 9.235294117647058e-05, "loss": 0.0215, "step": 4032 }, { "epoch": 36.01, "grad_norm": 0.0384756401181221, "learning_rate": 9.229411764705881e-05, "loss": 0.0172, "step": 4033 }, { "epoch": 36.02, "grad_norm": 0.045768752694129944, "learning_rate": 9.223529411764704e-05, "loss": 0.0173, "step": 4034 }, { "epoch": 36.03, "grad_norm": 0.04661966860294342, "learning_rate": 9.217647058823528e-05, "loss": 0.0182, "step": 4035 }, { "epoch": 36.04, "grad_norm": 0.04212106764316559, "learning_rate": 9.211764705882353e-05, "loss": 0.0162, "step": 4036 }, { "epoch": 36.04, "grad_norm": 0.050992123782634735, "learning_rate": 9.205882352941176e-05, "loss": 0.0176, "step": 4037 }, { "epoch": 36.05, "grad_norm": 0.07657718658447266, "learning_rate": 9.199999999999999e-05, "loss": 0.0215, "step": 4038 }, { "epoch": 36.06, "grad_norm": 0.06231314316391945, "learning_rate": 9.194117647058823e-05, "loss": 0.0195, "step": 4039 }, { "epoch": 36.07, "grad_norm": 0.044922493398189545, "learning_rate": 9.188235294117646e-05, "loss": 0.0204, "step": 4040 }, { "epoch": 36.08, "grad_norm": 0.046494387090206146, "learning_rate": 9.18235294117647e-05, "loss": 0.0175, "step": 4041 }, { "epoch": 36.09, "grad_norm": 0.051921382546424866, "learning_rate": 9.176470588235295e-05, "loss": 0.024, "step": 4042 }, { "epoch": 36.1, "grad_norm": 0.04883875325322151, "learning_rate": 9.170588235294116e-05, "loss": 0.0187, "step": 4043 }, { "epoch": 36.11, "grad_norm": 0.05999523401260376, "learning_rate": 9.16470588235294e-05, "loss": 0.0209, "step": 4044 }, { "epoch": 36.12, "grad_norm": 0.08448347449302673, "learning_rate": 9.158823529411764e-05, "loss": 0.022, "step": 4045 }, { "epoch": 36.12, "grad_norm": 0.04675189405679703, "learning_rate": 9.152941176470588e-05, "loss": 0.0223, "step": 4046 }, { "epoch": 36.13, "grad_norm": 0.04032502323389053, "learning_rate": 9.147058823529412e-05, "loss": 0.0186, "step": 4047 }, { "epoch": 36.14, "grad_norm": 0.048110414296388626, "learning_rate": 9.141176470588234e-05, "loss": 0.0186, "step": 4048 }, { "epoch": 36.15, "grad_norm": 0.04452439397573471, "learning_rate": 9.135294117647058e-05, "loss": 0.0164, "step": 4049 }, { "epoch": 36.16, "grad_norm": 0.057770442217588425, "learning_rate": 9.129411764705881e-05, "loss": 0.0219, "step": 4050 }, { "epoch": 36.17, "grad_norm": 0.0856478363275528, "learning_rate": 9.123529411764706e-05, "loss": 0.0207, "step": 4051 }, { "epoch": 36.18, "grad_norm": 0.04736426845192909, "learning_rate": 9.117647058823527e-05, "loss": 0.0182, "step": 4052 }, { "epoch": 36.19, "grad_norm": 0.03857048973441124, "learning_rate": 9.111764705882352e-05, "loss": 0.0165, "step": 4053 }, { "epoch": 36.2, "grad_norm": 0.04223356395959854, "learning_rate": 9.105882352941176e-05, "loss": 0.0176, "step": 4054 }, { "epoch": 36.21, "grad_norm": 0.046761445701122284, "learning_rate": 9.099999999999999e-05, "loss": 0.0172, "step": 4055 }, { "epoch": 36.21, "grad_norm": 0.045401718467473984, "learning_rate": 9.094117647058823e-05, "loss": 0.0161, "step": 4056 }, { "epoch": 36.22, "grad_norm": 0.0933755412697792, "learning_rate": 9.088235294117646e-05, "loss": 0.0208, "step": 4057 }, { "epoch": 36.23, "grad_norm": 0.042552392929792404, "learning_rate": 9.082352941176469e-05, "loss": 0.0172, "step": 4058 }, { "epoch": 36.24, "grad_norm": 0.04614667221903801, "learning_rate": 9.076470588235294e-05, "loss": 0.0172, "step": 4059 }, { "epoch": 36.25, "grad_norm": 0.044079042971134186, "learning_rate": 9.070588235294118e-05, "loss": 0.0191, "step": 4060 }, { "epoch": 36.26, "grad_norm": 0.0437043160200119, "learning_rate": 9.064705882352941e-05, "loss": 0.0185, "step": 4061 }, { "epoch": 36.27, "grad_norm": 0.046947527676820755, "learning_rate": 9.058823529411764e-05, "loss": 0.0161, "step": 4062 }, { "epoch": 36.28, "grad_norm": 0.06822053343057632, "learning_rate": 9.052941176470587e-05, "loss": 0.0201, "step": 4063 }, { "epoch": 36.29, "grad_norm": 0.06094614788889885, "learning_rate": 9.047058823529411e-05, "loss": 0.0169, "step": 4064 }, { "epoch": 36.29, "grad_norm": 0.04314233362674713, "learning_rate": 9.041176470588236e-05, "loss": 0.0161, "step": 4065 }, { "epoch": 36.3, "grad_norm": 0.03811066225171089, "learning_rate": 9.035294117647057e-05, "loss": 0.0159, "step": 4066 }, { "epoch": 36.31, "grad_norm": 0.05600721389055252, "learning_rate": 9.029411764705882e-05, "loss": 0.0204, "step": 4067 }, { "epoch": 36.32, "grad_norm": 0.05661261826753616, "learning_rate": 9.023529411764705e-05, "loss": 0.0191, "step": 4068 }, { "epoch": 36.33, "grad_norm": 0.06936700642108917, "learning_rate": 9.017647058823529e-05, "loss": 0.0241, "step": 4069 }, { "epoch": 36.34, "grad_norm": 0.09189575910568237, "learning_rate": 9.011764705882353e-05, "loss": 0.0262, "step": 4070 }, { "epoch": 36.35, "grad_norm": 0.046048182994127274, "learning_rate": 9.005882352941175e-05, "loss": 0.0212, "step": 4071 }, { "epoch": 36.36, "grad_norm": 0.04294374957680702, "learning_rate": 8.999999999999999e-05, "loss": 0.0173, "step": 4072 }, { "epoch": 36.37, "grad_norm": 0.04520189017057419, "learning_rate": 8.994117647058822e-05, "loss": 0.0199, "step": 4073 }, { "epoch": 36.38, "grad_norm": 0.043684907257556915, "learning_rate": 8.988235294117647e-05, "loss": 0.0172, "step": 4074 }, { "epoch": 36.38, "grad_norm": 0.05723635107278824, "learning_rate": 8.982352941176471e-05, "loss": 0.0235, "step": 4075 }, { "epoch": 36.39, "grad_norm": 0.0909346342086792, "learning_rate": 8.976470588235293e-05, "loss": 0.0252, "step": 4076 }, { "epoch": 36.4, "grad_norm": 0.042179208248853683, "learning_rate": 8.970588235294117e-05, "loss": 0.0169, "step": 4077 }, { "epoch": 36.41, "grad_norm": 0.04011724889278412, "learning_rate": 8.96470588235294e-05, "loss": 0.0195, "step": 4078 }, { "epoch": 36.42, "grad_norm": 0.042223189026117325, "learning_rate": 8.958823529411764e-05, "loss": 0.0153, "step": 4079 }, { "epoch": 36.43, "grad_norm": 0.0425608791410923, "learning_rate": 8.952941176470587e-05, "loss": 0.0162, "step": 4080 }, { "epoch": 36.44, "grad_norm": 0.06180000677704811, "learning_rate": 8.94705882352941e-05, "loss": 0.018, "step": 4081 }, { "epoch": 36.45, "grad_norm": 0.08952069282531738, "learning_rate": 8.941176470588235e-05, "loss": 0.0241, "step": 4082 }, { "epoch": 36.46, "grad_norm": 0.05010952427983284, "learning_rate": 8.935294117647059e-05, "loss": 0.0201, "step": 4083 }, { "epoch": 36.46, "grad_norm": 0.04697617143392563, "learning_rate": 8.929411764705882e-05, "loss": 0.0205, "step": 4084 }, { "epoch": 36.47, "grad_norm": 0.04716704785823822, "learning_rate": 8.923529411764705e-05, "loss": 0.0161, "step": 4085 }, { "epoch": 36.48, "grad_norm": 0.04796275869011879, "learning_rate": 8.917647058823528e-05, "loss": 0.0177, "step": 4086 }, { "epoch": 36.49, "grad_norm": 0.05288788303732872, "learning_rate": 8.911764705882352e-05, "loss": 0.0215, "step": 4087 }, { "epoch": 36.5, "grad_norm": 0.07674020528793335, "learning_rate": 8.905882352941177e-05, "loss": 0.0205, "step": 4088 }, { "epoch": 36.51, "grad_norm": 0.06745994836091995, "learning_rate": 8.9e-05, "loss": 0.0198, "step": 4089 }, { "epoch": 36.52, "grad_norm": 0.04017637297511101, "learning_rate": 8.894117647058822e-05, "loss": 0.0172, "step": 4090 }, { "epoch": 36.53, "grad_norm": 0.0458281934261322, "learning_rate": 8.888235294117645e-05, "loss": 0.0196, "step": 4091 }, { "epoch": 36.54, "grad_norm": 0.04622480645775795, "learning_rate": 8.88235294117647e-05, "loss": 0.0187, "step": 4092 }, { "epoch": 36.54, "grad_norm": 0.04795752465724945, "learning_rate": 8.876470588235294e-05, "loss": 0.0191, "step": 4093 }, { "epoch": 36.55, "grad_norm": 0.05634784698486328, "learning_rate": 8.870588235294117e-05, "loss": 0.0209, "step": 4094 }, { "epoch": 36.56, "grad_norm": 0.09274792671203613, "learning_rate": 8.86470588235294e-05, "loss": 0.0268, "step": 4095 }, { "epoch": 36.57, "grad_norm": 0.04380248114466667, "learning_rate": 8.858823529411763e-05, "loss": 0.0178, "step": 4096 }, { "epoch": 36.58, "grad_norm": 0.04211769253015518, "learning_rate": 8.852941176470587e-05, "loss": 0.0188, "step": 4097 }, { "epoch": 36.59, "grad_norm": 0.04355277493596077, "learning_rate": 8.847058823529412e-05, "loss": 0.0181, "step": 4098 }, { "epoch": 36.6, "grad_norm": 0.05291120707988739, "learning_rate": 8.841176470588233e-05, "loss": 0.0204, "step": 4099 }, { "epoch": 36.61, "grad_norm": 0.04758208990097046, "learning_rate": 8.835294117647058e-05, "loss": 0.016, "step": 4100 }, { "epoch": 36.61, "eval_cer": 0.030153378959909497, "eval_loss": 0.20217667520046234, "eval_runtime": 22.4099, "eval_samples_per_second": 117.894, "eval_steps_per_second": 1.874, "eval_wer": 0.09998016660055534, "step": 4100 }, { "epoch": 36.62, "grad_norm": 0.0827101469039917, "learning_rate": 8.829411764705882e-05, "loss": 0.0198, "step": 4101 }, { "epoch": 36.62, "grad_norm": 0.04562339186668396, "learning_rate": 8.823529411764705e-05, "loss": 0.0183, "step": 4102 }, { "epoch": 36.63, "grad_norm": 0.04250386729836464, "learning_rate": 8.81764705882353e-05, "loss": 0.0182, "step": 4103 }, { "epoch": 36.64, "grad_norm": 0.04504351317882538, "learning_rate": 8.811764705882351e-05, "loss": 0.019, "step": 4104 }, { "epoch": 36.65, "grad_norm": 0.062047552317380905, "learning_rate": 8.805882352941175e-05, "loss": 0.0209, "step": 4105 }, { "epoch": 36.66, "grad_norm": 0.06217120215296745, "learning_rate": 8.8e-05, "loss": 0.0194, "step": 4106 }, { "epoch": 36.67, "grad_norm": 0.09967740625143051, "learning_rate": 8.794117647058823e-05, "loss": 0.0274, "step": 4107 }, { "epoch": 36.68, "grad_norm": 0.043474357575178146, "learning_rate": 8.788235294117647e-05, "loss": 0.0198, "step": 4108 }, { "epoch": 36.69, "grad_norm": 0.04572237655520439, "learning_rate": 8.782352941176469e-05, "loss": 0.0202, "step": 4109 }, { "epoch": 36.7, "grad_norm": 0.04573224112391472, "learning_rate": 8.776470588235293e-05, "loss": 0.0173, "step": 4110 }, { "epoch": 36.71, "grad_norm": 0.05247281864285469, "learning_rate": 8.770588235294117e-05, "loss": 0.0183, "step": 4111 }, { "epoch": 36.71, "grad_norm": 0.047982342541217804, "learning_rate": 8.76470588235294e-05, "loss": 0.018, "step": 4112 }, { "epoch": 36.72, "grad_norm": 0.06717761605978012, "learning_rate": 8.758823529411763e-05, "loss": 0.0192, "step": 4113 }, { "epoch": 36.73, "grad_norm": 0.06690295040607452, "learning_rate": 8.752941176470586e-05, "loss": 0.0215, "step": 4114 }, { "epoch": 36.74, "grad_norm": 0.045360762625932693, "learning_rate": 8.747058823529411e-05, "loss": 0.0167, "step": 4115 }, { "epoch": 36.75, "grad_norm": 0.04655643552541733, "learning_rate": 8.741176470588235e-05, "loss": 0.0211, "step": 4116 }, { "epoch": 36.76, "grad_norm": 0.04601249471306801, "learning_rate": 8.735294117647058e-05, "loss": 0.0187, "step": 4117 }, { "epoch": 36.77, "grad_norm": 0.04942779242992401, "learning_rate": 8.729411764705881e-05, "loss": 0.0191, "step": 4118 }, { "epoch": 36.78, "grad_norm": 0.05830894038081169, "learning_rate": 8.723529411764705e-05, "loss": 0.0152, "step": 4119 }, { "epoch": 36.79, "grad_norm": 0.09998707473278046, "learning_rate": 8.717647058823528e-05, "loss": 0.0249, "step": 4120 }, { "epoch": 36.79, "grad_norm": 0.049037862569093704, "learning_rate": 8.711764705882353e-05, "loss": 0.0196, "step": 4121 }, { "epoch": 36.8, "grad_norm": 0.05310535803437233, "learning_rate": 8.705882352941177e-05, "loss": 0.0181, "step": 4122 }, { "epoch": 36.81, "grad_norm": 0.051805902272462845, "learning_rate": 8.699999999999999e-05, "loss": 0.021, "step": 4123 }, { "epoch": 36.82, "grad_norm": 0.04846580699086189, "learning_rate": 8.694117647058823e-05, "loss": 0.0151, "step": 4124 }, { "epoch": 36.83, "grad_norm": 0.05232789367437363, "learning_rate": 8.688235294117646e-05, "loss": 0.0183, "step": 4125 }, { "epoch": 36.84, "grad_norm": 0.07918441295623779, "learning_rate": 8.68235294117647e-05, "loss": 0.0209, "step": 4126 }, { "epoch": 36.85, "grad_norm": 0.04547763615846634, "learning_rate": 8.676470588235295e-05, "loss": 0.0195, "step": 4127 }, { "epoch": 36.86, "grad_norm": 0.04671810194849968, "learning_rate": 8.670588235294116e-05, "loss": 0.0198, "step": 4128 }, { "epoch": 36.87, "grad_norm": 0.045081958174705505, "learning_rate": 8.664705882352941e-05, "loss": 0.0202, "step": 4129 }, { "epoch": 36.88, "grad_norm": 0.05202661454677582, "learning_rate": 8.658823529411764e-05, "loss": 0.0196, "step": 4130 }, { "epoch": 36.88, "grad_norm": 0.04858111962676048, "learning_rate": 8.652941176470588e-05, "loss": 0.017, "step": 4131 }, { "epoch": 36.89, "grad_norm": 0.09790980815887451, "learning_rate": 8.64705882352941e-05, "loss": 0.022, "step": 4132 }, { "epoch": 36.9, "grad_norm": 0.045605581253767014, "learning_rate": 8.641176470588234e-05, "loss": 0.0202, "step": 4133 }, { "epoch": 36.91, "grad_norm": 0.041663676500320435, "learning_rate": 8.635294117647058e-05, "loss": 0.017, "step": 4134 }, { "epoch": 36.92, "grad_norm": 0.04285924509167671, "learning_rate": 8.629411764705881e-05, "loss": 0.0177, "step": 4135 }, { "epoch": 36.93, "grad_norm": 0.05176946520805359, "learning_rate": 8.623529411764706e-05, "loss": 0.0205, "step": 4136 }, { "epoch": 36.94, "grad_norm": 0.04874508082866669, "learning_rate": 8.617647058823529e-05, "loss": 0.0184, "step": 4137 }, { "epoch": 36.95, "grad_norm": 0.06821689754724503, "learning_rate": 8.611764705882352e-05, "loss": 0.0228, "step": 4138 }, { "epoch": 36.96, "grad_norm": 0.11184251308441162, "learning_rate": 8.605882352941176e-05, "loss": 0.0263, "step": 4139 }, { "epoch": 36.96, "grad_norm": 0.04449713975191116, "learning_rate": 8.6e-05, "loss": 0.0196, "step": 4140 }, { "epoch": 36.97, "grad_norm": 0.04029373079538345, "learning_rate": 8.594117647058823e-05, "loss": 0.0177, "step": 4141 }, { "epoch": 36.98, "grad_norm": 0.04268214851617813, "learning_rate": 8.588235294117646e-05, "loss": 0.018, "step": 4142 }, { "epoch": 36.99, "grad_norm": 0.04401444271206856, "learning_rate": 8.582352941176469e-05, "loss": 0.0158, "step": 4143 }, { "epoch": 37.0, "grad_norm": 0.06233417987823486, "learning_rate": 8.576470588235294e-05, "loss": 0.0222, "step": 4144 }, { "epoch": 37.01, "grad_norm": 0.03744405508041382, "learning_rate": 8.570588235294118e-05, "loss": 0.0207, "step": 4145 }, { "epoch": 37.02, "grad_norm": 0.04306573420763016, "learning_rate": 8.56470588235294e-05, "loss": 0.0197, "step": 4146 }, { "epoch": 37.03, "grad_norm": 0.044871244579553604, "learning_rate": 8.558823529411764e-05, "loss": 0.0178, "step": 4147 }, { "epoch": 37.04, "grad_norm": 0.042695172131061554, "learning_rate": 8.552941176470587e-05, "loss": 0.016, "step": 4148 }, { "epoch": 37.04, "grad_norm": 0.051238398998975754, "learning_rate": 8.547058823529411e-05, "loss": 0.0196, "step": 4149 }, { "epoch": 37.05, "grad_norm": 0.09372657537460327, "learning_rate": 8.541176470588236e-05, "loss": 0.0251, "step": 4150 }, { "epoch": 37.06, "grad_norm": 0.08845796436071396, "learning_rate": 8.535294117647057e-05, "loss": 0.0253, "step": 4151 }, { "epoch": 37.07, "grad_norm": 0.041753947734832764, "learning_rate": 8.529411764705882e-05, "loss": 0.0161, "step": 4152 }, { "epoch": 37.08, "grad_norm": 0.04683902859687805, "learning_rate": 8.523529411764705e-05, "loss": 0.0199, "step": 4153 }, { "epoch": 37.09, "grad_norm": 0.04193998500704765, "learning_rate": 8.517647058823529e-05, "loss": 0.0163, "step": 4154 }, { "epoch": 37.1, "grad_norm": 0.04733918607234955, "learning_rate": 8.511764705882353e-05, "loss": 0.0159, "step": 4155 }, { "epoch": 37.11, "grad_norm": 0.05876201391220093, "learning_rate": 8.505882352941175e-05, "loss": 0.0163, "step": 4156 }, { "epoch": 37.12, "grad_norm": 0.09874025732278824, "learning_rate": 8.499999999999999e-05, "loss": 0.02, "step": 4157 }, { "epoch": 37.12, "grad_norm": 0.04608433321118355, "learning_rate": 8.494117647058822e-05, "loss": 0.0182, "step": 4158 }, { "epoch": 37.13, "grad_norm": 0.04740406572818756, "learning_rate": 8.488235294117647e-05, "loss": 0.0185, "step": 4159 }, { "epoch": 37.14, "grad_norm": 0.04387908801436424, "learning_rate": 8.48235294117647e-05, "loss": 0.016, "step": 4160 }, { "epoch": 37.15, "grad_norm": 0.05817878246307373, "learning_rate": 8.476470588235293e-05, "loss": 0.0212, "step": 4161 }, { "epoch": 37.16, "grad_norm": 0.055451344698667526, "learning_rate": 8.470588235294117e-05, "loss": 0.0166, "step": 4162 }, { "epoch": 37.17, "grad_norm": 0.0807330384850502, "learning_rate": 8.464705882352941e-05, "loss": 0.0222, "step": 4163 }, { "epoch": 37.18, "grad_norm": 0.04307100921869278, "learning_rate": 8.458823529411764e-05, "loss": 0.018, "step": 4164 }, { "epoch": 37.19, "grad_norm": 0.04464540630578995, "learning_rate": 8.452941176470587e-05, "loss": 0.0148, "step": 4165 }, { "epoch": 37.2, "grad_norm": 0.04523914307355881, "learning_rate": 8.44705882352941e-05, "loss": 0.017, "step": 4166 }, { "epoch": 37.21, "grad_norm": 0.04177824407815933, "learning_rate": 8.441176470588235e-05, "loss": 0.0193, "step": 4167 }, { "epoch": 37.21, "grad_norm": 0.057433731853961945, "learning_rate": 8.435294117647059e-05, "loss": 0.0192, "step": 4168 }, { "epoch": 37.22, "grad_norm": 0.13495777547359467, "learning_rate": 8.429411764705882e-05, "loss": 0.0221, "step": 4169 }, { "epoch": 37.23, "grad_norm": 0.04041381552815437, "learning_rate": 8.423529411764705e-05, "loss": 0.0198, "step": 4170 }, { "epoch": 37.24, "grad_norm": 0.04346789792180061, "learning_rate": 8.417647058823528e-05, "loss": 0.0191, "step": 4171 }, { "epoch": 37.25, "grad_norm": 0.035024456679821014, "learning_rate": 8.411764705882352e-05, "loss": 0.0143, "step": 4172 }, { "epoch": 37.26, "grad_norm": 0.044780805706977844, "learning_rate": 8.405882352941177e-05, "loss": 0.0169, "step": 4173 }, { "epoch": 37.27, "grad_norm": 0.049140334129333496, "learning_rate": 8.4e-05, "loss": 0.0215, "step": 4174 }, { "epoch": 37.28, "grad_norm": 0.07246115058660507, "learning_rate": 8.394117647058823e-05, "loss": 0.0228, "step": 4175 }, { "epoch": 37.29, "grad_norm": 0.06579442322254181, "learning_rate": 8.388235294117646e-05, "loss": 0.022, "step": 4176 }, { "epoch": 37.29, "grad_norm": 0.043875981122255325, "learning_rate": 8.38235294117647e-05, "loss": 0.0197, "step": 4177 }, { "epoch": 37.3, "grad_norm": 0.04688160493969917, "learning_rate": 8.376470588235294e-05, "loss": 0.019, "step": 4178 }, { "epoch": 37.31, "grad_norm": 0.04777594655752182, "learning_rate": 8.370588235294116e-05, "loss": 0.0202, "step": 4179 }, { "epoch": 37.32, "grad_norm": 0.044735386967659, "learning_rate": 8.36470588235294e-05, "loss": 0.0152, "step": 4180 }, { "epoch": 37.33, "grad_norm": 0.06980415433645248, "learning_rate": 8.358823529411765e-05, "loss": 0.019, "step": 4181 }, { "epoch": 37.34, "grad_norm": 0.07926139235496521, "learning_rate": 8.352941176470588e-05, "loss": 0.0247, "step": 4182 }, { "epoch": 37.35, "grad_norm": 0.04676423966884613, "learning_rate": 8.347058823529412e-05, "loss": 0.0181, "step": 4183 }, { "epoch": 37.36, "grad_norm": 0.04603610187768936, "learning_rate": 8.341176470588233e-05, "loss": 0.017, "step": 4184 }, { "epoch": 37.37, "grad_norm": 0.04675346612930298, "learning_rate": 8.335294117647058e-05, "loss": 0.0179, "step": 4185 }, { "epoch": 37.38, "grad_norm": 0.053286049515008926, "learning_rate": 8.329411764705882e-05, "loss": 0.0162, "step": 4186 }, { "epoch": 37.38, "grad_norm": 0.06341850012540817, "learning_rate": 8.323529411764705e-05, "loss": 0.0162, "step": 4187 }, { "epoch": 37.39, "grad_norm": 0.09439020603895187, "learning_rate": 8.31764705882353e-05, "loss": 0.0233, "step": 4188 }, { "epoch": 37.4, "grad_norm": 0.045090291649103165, "learning_rate": 8.311764705882351e-05, "loss": 0.017, "step": 4189 }, { "epoch": 37.41, "grad_norm": 0.04638542979955673, "learning_rate": 8.305882352941175e-05, "loss": 0.0214, "step": 4190 }, { "epoch": 37.42, "grad_norm": 0.051234204322099686, "learning_rate": 8.3e-05, "loss": 0.0184, "step": 4191 }, { "epoch": 37.43, "grad_norm": 0.04307663068175316, "learning_rate": 8.294117647058823e-05, "loss": 0.0177, "step": 4192 }, { "epoch": 37.44, "grad_norm": 0.045090969651937485, "learning_rate": 8.288235294117646e-05, "loss": 0.0123, "step": 4193 }, { "epoch": 37.45, "grad_norm": 0.07967652380466461, "learning_rate": 8.282352941176469e-05, "loss": 0.0204, "step": 4194 }, { "epoch": 37.46, "grad_norm": 0.04527030885219574, "learning_rate": 8.276470588235293e-05, "loss": 0.0201, "step": 4195 }, { "epoch": 37.46, "grad_norm": 0.04721197485923767, "learning_rate": 8.270588235294117e-05, "loss": 0.0227, "step": 4196 }, { "epoch": 37.47, "grad_norm": 0.05305152386426926, "learning_rate": 8.26470588235294e-05, "loss": 0.0184, "step": 4197 }, { "epoch": 37.48, "grad_norm": 0.04736615717411041, "learning_rate": 8.258823529411763e-05, "loss": 0.0155, "step": 4198 }, { "epoch": 37.49, "grad_norm": 0.046044182032346725, "learning_rate": 8.252941176470588e-05, "loss": 0.0201, "step": 4199 }, { "epoch": 37.5, "grad_norm": 0.06805248558521271, "learning_rate": 8.247058823529411e-05, "loss": 0.0169, "step": 4200 }, { "epoch": 37.5, "eval_cer": 0.02986774306299399, "eval_loss": 0.20600314438343048, "eval_runtime": 22.4441, "eval_samples_per_second": 117.715, "eval_steps_per_second": 1.871, "eval_wer": 0.09956366521221738, "step": 4200 }, { "epoch": 37.51, "grad_norm": 0.060498278588056564, "learning_rate": 8.241176470588235e-05, "loss": 0.0182, "step": 4201 }, { "epoch": 37.52, "grad_norm": 0.043822020292282104, "learning_rate": 8.23529411764706e-05, "loss": 0.0185, "step": 4202 }, { "epoch": 37.53, "grad_norm": 0.045863326638936996, "learning_rate": 8.229411764705881e-05, "loss": 0.0185, "step": 4203 }, { "epoch": 37.54, "grad_norm": 0.04996486380696297, "learning_rate": 8.223529411764705e-05, "loss": 0.0194, "step": 4204 }, { "epoch": 37.54, "grad_norm": 0.04862746596336365, "learning_rate": 8.217647058823528e-05, "loss": 0.0155, "step": 4205 }, { "epoch": 37.55, "grad_norm": 0.060789793729782104, "learning_rate": 8.211764705882353e-05, "loss": 0.0184, "step": 4206 }, { "epoch": 37.56, "grad_norm": 0.10485976934432983, "learning_rate": 8.205882352941177e-05, "loss": 0.0249, "step": 4207 }, { "epoch": 37.57, "grad_norm": 0.04610714688897133, "learning_rate": 8.199999999999999e-05, "loss": 0.0178, "step": 4208 }, { "epoch": 37.58, "grad_norm": 0.04796239733695984, "learning_rate": 8.194117647058823e-05, "loss": 0.0184, "step": 4209 }, { "epoch": 37.59, "grad_norm": 0.04679669439792633, "learning_rate": 8.188235294117646e-05, "loss": 0.0207, "step": 4210 }, { "epoch": 37.6, "grad_norm": 0.05065898597240448, "learning_rate": 8.18235294117647e-05, "loss": 0.0157, "step": 4211 }, { "epoch": 37.61, "grad_norm": 0.051082100719213486, "learning_rate": 8.176470588235292e-05, "loss": 0.0177, "step": 4212 }, { "epoch": 37.62, "grad_norm": 0.06115908920764923, "learning_rate": 8.170588235294116e-05, "loss": 0.0177, "step": 4213 }, { "epoch": 37.62, "grad_norm": 0.04532017186284065, "learning_rate": 8.164705882352941e-05, "loss": 0.0177, "step": 4214 }, { "epoch": 37.63, "grad_norm": 0.04716581851243973, "learning_rate": 8.158823529411764e-05, "loss": 0.0189, "step": 4215 }, { "epoch": 37.64, "grad_norm": 0.05453303083777428, "learning_rate": 8.152941176470588e-05, "loss": 0.0187, "step": 4216 }, { "epoch": 37.65, "grad_norm": 0.04072986915707588, "learning_rate": 8.147058823529411e-05, "loss": 0.0147, "step": 4217 }, { "epoch": 37.66, "grad_norm": 0.0515437088906765, "learning_rate": 8.141176470588234e-05, "loss": 0.0187, "step": 4218 }, { "epoch": 37.67, "grad_norm": 0.0794481411576271, "learning_rate": 8.135294117647058e-05, "loss": 0.0175, "step": 4219 }, { "epoch": 37.68, "grad_norm": 0.038771871477365494, "learning_rate": 8.129411764705883e-05, "loss": 0.0176, "step": 4220 }, { "epoch": 37.69, "grad_norm": 0.04678460955619812, "learning_rate": 8.123529411764706e-05, "loss": 0.0206, "step": 4221 }, { "epoch": 37.7, "grad_norm": 0.04655611515045166, "learning_rate": 8.117647058823529e-05, "loss": 0.0186, "step": 4222 }, { "epoch": 37.71, "grad_norm": 0.04548626393079758, "learning_rate": 8.111764705882352e-05, "loss": 0.0167, "step": 4223 }, { "epoch": 37.71, "grad_norm": 0.05780279263854027, "learning_rate": 8.105882352941176e-05, "loss": 0.0199, "step": 4224 }, { "epoch": 37.72, "grad_norm": 0.06784381717443466, "learning_rate": 8.1e-05, "loss": 0.0198, "step": 4225 }, { "epoch": 37.73, "grad_norm": 0.07435184717178345, "learning_rate": 8.094117647058822e-05, "loss": 0.0204, "step": 4226 }, { "epoch": 37.74, "grad_norm": 0.039560895413160324, "learning_rate": 8.088235294117646e-05, "loss": 0.0158, "step": 4227 }, { "epoch": 37.75, "grad_norm": 0.042115263640880585, "learning_rate": 8.08235294117647e-05, "loss": 0.0168, "step": 4228 }, { "epoch": 37.76, "grad_norm": 0.041196148842573166, "learning_rate": 8.076470588235294e-05, "loss": 0.0169, "step": 4229 }, { "epoch": 37.77, "grad_norm": 0.04904539883136749, "learning_rate": 8.070588235294118e-05, "loss": 0.0166, "step": 4230 }, { "epoch": 37.78, "grad_norm": 0.0626995861530304, "learning_rate": 8.06470588235294e-05, "loss": 0.0196, "step": 4231 }, { "epoch": 37.79, "grad_norm": 0.07392898947000504, "learning_rate": 8.058823529411764e-05, "loss": 0.0184, "step": 4232 }, { "epoch": 37.79, "grad_norm": 0.04533496871590614, "learning_rate": 8.052941176470587e-05, "loss": 0.0181, "step": 4233 }, { "epoch": 37.8, "grad_norm": 0.04209979623556137, "learning_rate": 8.047058823529411e-05, "loss": 0.0177, "step": 4234 }, { "epoch": 37.81, "grad_norm": 0.037162382155656815, "learning_rate": 8.041176470588236e-05, "loss": 0.0146, "step": 4235 }, { "epoch": 37.82, "grad_norm": 0.04646997153759003, "learning_rate": 8.035294117647057e-05, "loss": 0.0168, "step": 4236 }, { "epoch": 37.83, "grad_norm": 0.05494370311498642, "learning_rate": 8.029411764705882e-05, "loss": 0.0172, "step": 4237 }, { "epoch": 37.84, "grad_norm": 0.08447180688381195, "learning_rate": 8.023529411764706e-05, "loss": 0.0212, "step": 4238 }, { "epoch": 37.85, "grad_norm": 0.04605470597743988, "learning_rate": 8.017647058823529e-05, "loss": 0.0175, "step": 4239 }, { "epoch": 37.86, "grad_norm": 0.041734080761671066, "learning_rate": 8.011764705882352e-05, "loss": 0.0133, "step": 4240 }, { "epoch": 37.87, "grad_norm": 0.04305846244096756, "learning_rate": 8.005882352941175e-05, "loss": 0.0156, "step": 4241 }, { "epoch": 37.88, "grad_norm": 0.049476027488708496, "learning_rate": 7.999999999999999e-05, "loss": 0.0164, "step": 4242 }, { "epoch": 37.88, "grad_norm": 0.047128137201070786, "learning_rate": 7.994117647058824e-05, "loss": 0.0155, "step": 4243 }, { "epoch": 37.89, "grad_norm": 0.10725882649421692, "learning_rate": 7.988235294117647e-05, "loss": 0.0232, "step": 4244 }, { "epoch": 37.9, "grad_norm": 0.04166736453771591, "learning_rate": 7.98235294117647e-05, "loss": 0.0171, "step": 4245 }, { "epoch": 37.91, "grad_norm": 0.04831862077116966, "learning_rate": 7.976470588235293e-05, "loss": 0.0203, "step": 4246 }, { "epoch": 37.92, "grad_norm": 0.051550157368183136, "learning_rate": 7.970588235294117e-05, "loss": 0.0189, "step": 4247 }, { "epoch": 37.93, "grad_norm": 0.045276615768671036, "learning_rate": 7.964705882352941e-05, "loss": 0.0187, "step": 4248 }, { "epoch": 37.94, "grad_norm": 0.05321819707751274, "learning_rate": 7.958823529411764e-05, "loss": 0.0161, "step": 4249 }, { "epoch": 37.95, "grad_norm": 0.06772742420434952, "learning_rate": 7.952941176470587e-05, "loss": 0.02, "step": 4250 }, { "epoch": 37.96, "grad_norm": 0.057556118816137314, "learning_rate": 7.94705882352941e-05, "loss": 0.02, "step": 4251 }, { "epoch": 37.96, "grad_norm": 0.039384014904499054, "learning_rate": 7.941176470588235e-05, "loss": 0.0175, "step": 4252 }, { "epoch": 37.97, "grad_norm": 0.04523039981722832, "learning_rate": 7.935294117647059e-05, "loss": 0.019, "step": 4253 }, { "epoch": 37.98, "grad_norm": 0.040083229541778564, "learning_rate": 7.929411764705882e-05, "loss": 0.0147, "step": 4254 }, { "epoch": 37.99, "grad_norm": 0.04562624543905258, "learning_rate": 7.923529411764705e-05, "loss": 0.0167, "step": 4255 }, { "epoch": 38.0, "grad_norm": 0.06696755439043045, "learning_rate": 7.917647058823528e-05, "loss": 0.0192, "step": 4256 }, { "epoch": 38.01, "grad_norm": 0.04006008058786392, "learning_rate": 7.911764705882352e-05, "loss": 0.0157, "step": 4257 }, { "epoch": 38.02, "grad_norm": 0.04034001752734184, "learning_rate": 7.905882352941177e-05, "loss": 0.0196, "step": 4258 }, { "epoch": 38.03, "grad_norm": 0.04585159569978714, "learning_rate": 7.899999999999998e-05, "loss": 0.0194, "step": 4259 }, { "epoch": 38.04, "grad_norm": 0.04633148014545441, "learning_rate": 7.894117647058823e-05, "loss": 0.0168, "step": 4260 }, { "epoch": 38.04, "grad_norm": 0.0424240380525589, "learning_rate": 7.888235294117647e-05, "loss": 0.0163, "step": 4261 }, { "epoch": 38.05, "grad_norm": 0.07280178368091583, "learning_rate": 7.88235294117647e-05, "loss": 0.0228, "step": 4262 }, { "epoch": 38.06, "grad_norm": 0.09230028092861176, "learning_rate": 7.876470588235294e-05, "loss": 0.0218, "step": 4263 }, { "epoch": 38.07, "grad_norm": 0.047616370022296906, "learning_rate": 7.870588235294116e-05, "loss": 0.0151, "step": 4264 }, { "epoch": 38.08, "grad_norm": 0.04466402158141136, "learning_rate": 7.86470588235294e-05, "loss": 0.017, "step": 4265 }, { "epoch": 38.09, "grad_norm": 0.04777903854846954, "learning_rate": 7.858823529411765e-05, "loss": 0.0198, "step": 4266 }, { "epoch": 38.1, "grad_norm": 0.04484889283776283, "learning_rate": 7.852941176470588e-05, "loss": 0.0179, "step": 4267 }, { "epoch": 38.11, "grad_norm": 0.06533332914113998, "learning_rate": 7.847058823529412e-05, "loss": 0.0221, "step": 4268 }, { "epoch": 38.12, "grad_norm": 0.10680500417947769, "learning_rate": 7.841176470588234e-05, "loss": 0.0221, "step": 4269 }, { "epoch": 38.12, "grad_norm": 0.03996514156460762, "learning_rate": 7.835294117647058e-05, "loss": 0.02, "step": 4270 }, { "epoch": 38.13, "grad_norm": 0.039795588701963425, "learning_rate": 7.829411764705882e-05, "loss": 0.0156, "step": 4271 }, { "epoch": 38.14, "grad_norm": 0.037465307861566544, "learning_rate": 7.823529411764705e-05, "loss": 0.0166, "step": 4272 }, { "epoch": 38.15, "grad_norm": 0.048377782106399536, "learning_rate": 7.817647058823528e-05, "loss": 0.0178, "step": 4273 }, { "epoch": 38.16, "grad_norm": 0.04678390920162201, "learning_rate": 7.811764705882351e-05, "loss": 0.0201, "step": 4274 }, { "epoch": 38.17, "grad_norm": 0.08644243329763412, "learning_rate": 7.805882352941176e-05, "loss": 0.0216, "step": 4275 }, { "epoch": 38.18, "grad_norm": 0.043905939906835556, "learning_rate": 7.8e-05, "loss": 0.0187, "step": 4276 }, { "epoch": 38.19, "grad_norm": 0.04375680908560753, "learning_rate": 7.794117647058823e-05, "loss": 0.021, "step": 4277 }, { "epoch": 38.2, "grad_norm": 0.04755211994051933, "learning_rate": 7.788235294117646e-05, "loss": 0.0173, "step": 4278 }, { "epoch": 38.21, "grad_norm": 0.04453584924340248, "learning_rate": 7.78235294117647e-05, "loss": 0.0154, "step": 4279 }, { "epoch": 38.21, "grad_norm": 0.04129549860954285, "learning_rate": 7.776470588235293e-05, "loss": 0.0167, "step": 4280 }, { "epoch": 38.22, "grad_norm": 0.0804203599691391, "learning_rate": 7.770588235294118e-05, "loss": 0.0187, "step": 4281 }, { "epoch": 38.23, "grad_norm": 0.04315444827079773, "learning_rate": 7.764705882352942e-05, "loss": 0.0168, "step": 4282 }, { "epoch": 38.24, "grad_norm": 0.0411263182759285, "learning_rate": 7.758823529411763e-05, "loss": 0.0186, "step": 4283 }, { "epoch": 38.25, "grad_norm": 0.05210395157337189, "learning_rate": 7.752941176470588e-05, "loss": 0.0223, "step": 4284 }, { "epoch": 38.26, "grad_norm": 0.05146675929427147, "learning_rate": 7.747058823529411e-05, "loss": 0.0195, "step": 4285 }, { "epoch": 38.27, "grad_norm": 0.05376441404223442, "learning_rate": 7.741176470588235e-05, "loss": 0.0158, "step": 4286 }, { "epoch": 38.28, "grad_norm": 0.059104304760694504, "learning_rate": 7.73529411764706e-05, "loss": 0.0161, "step": 4287 }, { "epoch": 38.29, "grad_norm": 0.05555211752653122, "learning_rate": 7.729411764705881e-05, "loss": 0.0175, "step": 4288 }, { "epoch": 38.29, "grad_norm": 0.03919730708003044, "learning_rate": 7.723529411764705e-05, "loss": 0.0167, "step": 4289 }, { "epoch": 38.3, "grad_norm": 0.05061023309826851, "learning_rate": 7.717647058823528e-05, "loss": 0.0172, "step": 4290 }, { "epoch": 38.31, "grad_norm": 0.04149852320551872, "learning_rate": 7.711764705882353e-05, "loss": 0.0154, "step": 4291 }, { "epoch": 38.32, "grad_norm": 0.04099132493138313, "learning_rate": 7.705882352941174e-05, "loss": 0.0136, "step": 4292 }, { "epoch": 38.33, "grad_norm": 0.0593915656208992, "learning_rate": 7.699999999999999e-05, "loss": 0.02, "step": 4293 }, { "epoch": 38.34, "grad_norm": 0.0756407380104065, "learning_rate": 7.694117647058823e-05, "loss": 0.0211, "step": 4294 }, { "epoch": 38.35, "grad_norm": 0.04416878521442413, "learning_rate": 7.688235294117646e-05, "loss": 0.0173, "step": 4295 }, { "epoch": 38.36, "grad_norm": 0.04282741993665695, "learning_rate": 7.68235294117647e-05, "loss": 0.0175, "step": 4296 }, { "epoch": 38.37, "grad_norm": 0.036725807934999466, "learning_rate": 7.676470588235293e-05, "loss": 0.0154, "step": 4297 }, { "epoch": 38.38, "grad_norm": 0.05139268562197685, "learning_rate": 7.670588235294116e-05, "loss": 0.0212, "step": 4298 }, { "epoch": 38.38, "grad_norm": 0.04303603619337082, "learning_rate": 7.664705882352941e-05, "loss": 0.0141, "step": 4299 }, { "epoch": 38.39, "grad_norm": 0.10238958895206451, "learning_rate": 7.658823529411765e-05, "loss": 0.0191, "step": 4300 }, { "epoch": 38.39, "eval_cer": 0.030653241779511637, "eval_loss": 0.2114274501800537, "eval_runtime": 22.5509, "eval_samples_per_second": 117.157, "eval_steps_per_second": 1.862, "eval_wer": 0.10063466878222928, "step": 4300 }, { "epoch": 38.4, "grad_norm": 0.04634826257824898, "learning_rate": 7.652941176470588e-05, "loss": 0.0206, "step": 4301 }, { "epoch": 38.41, "grad_norm": 0.04878462851047516, "learning_rate": 7.647058823529411e-05, "loss": 0.0211, "step": 4302 }, { "epoch": 38.42, "grad_norm": 0.041815824806690216, "learning_rate": 7.641176470588234e-05, "loss": 0.0179, "step": 4303 }, { "epoch": 38.43, "grad_norm": 0.044155798852443695, "learning_rate": 7.635294117647058e-05, "loss": 0.0155, "step": 4304 }, { "epoch": 38.44, "grad_norm": 0.04693682864308357, "learning_rate": 7.629411764705883e-05, "loss": 0.0159, "step": 4305 }, { "epoch": 38.45, "grad_norm": 0.07583024352788925, "learning_rate": 7.623529411764704e-05, "loss": 0.0189, "step": 4306 }, { "epoch": 38.46, "grad_norm": 0.04122854396700859, "learning_rate": 7.617647058823529e-05, "loss": 0.0151, "step": 4307 }, { "epoch": 38.46, "grad_norm": 0.0455055758357048, "learning_rate": 7.611764705882352e-05, "loss": 0.0193, "step": 4308 }, { "epoch": 38.47, "grad_norm": 0.04155740141868591, "learning_rate": 7.605882352941176e-05, "loss": 0.0153, "step": 4309 }, { "epoch": 38.48, "grad_norm": 0.04967249929904938, "learning_rate": 7.6e-05, "loss": 0.0151, "step": 4310 }, { "epoch": 38.49, "grad_norm": 0.04226543381810188, "learning_rate": 7.594117647058822e-05, "loss": 0.0155, "step": 4311 }, { "epoch": 38.5, "grad_norm": 0.06497447937726974, "learning_rate": 7.588235294117646e-05, "loss": 0.0171, "step": 4312 }, { "epoch": 38.51, "grad_norm": 0.05942431464791298, "learning_rate": 7.58235294117647e-05, "loss": 0.0176, "step": 4313 }, { "epoch": 38.52, "grad_norm": 0.05040823668241501, "learning_rate": 7.576470588235294e-05, "loss": 0.0171, "step": 4314 }, { "epoch": 38.53, "grad_norm": 0.051547449082136154, "learning_rate": 7.570588235294118e-05, "loss": 0.0191, "step": 4315 }, { "epoch": 38.54, "grad_norm": 0.04978009685873985, "learning_rate": 7.56470588235294e-05, "loss": 0.0178, "step": 4316 }, { "epoch": 38.54, "grad_norm": 0.05885303393006325, "learning_rate": 7.558823529411764e-05, "loss": 0.0193, "step": 4317 }, { "epoch": 38.55, "grad_norm": 0.0688789114356041, "learning_rate": 7.552941176470588e-05, "loss": 0.0202, "step": 4318 }, { "epoch": 38.56, "grad_norm": 0.08288246393203735, "learning_rate": 7.547058823529411e-05, "loss": 0.0181, "step": 4319 }, { "epoch": 38.57, "grad_norm": 0.04574671387672424, "learning_rate": 7.541176470588234e-05, "loss": 0.0188, "step": 4320 }, { "epoch": 38.58, "grad_norm": 0.040807656943798065, "learning_rate": 7.535294117647057e-05, "loss": 0.0183, "step": 4321 }, { "epoch": 38.59, "grad_norm": 0.041974350810050964, "learning_rate": 7.529411764705882e-05, "loss": 0.0168, "step": 4322 }, { "epoch": 38.6, "grad_norm": 0.047496818006038666, "learning_rate": 7.523529411764706e-05, "loss": 0.0168, "step": 4323 }, { "epoch": 38.61, "grad_norm": 0.05184216424822807, "learning_rate": 7.517647058823529e-05, "loss": 0.0192, "step": 4324 }, { "epoch": 38.62, "grad_norm": 0.07647722959518433, "learning_rate": 7.511764705882352e-05, "loss": 0.0216, "step": 4325 }, { "epoch": 38.62, "grad_norm": 0.04705112800002098, "learning_rate": 7.505882352941175e-05, "loss": 0.0194, "step": 4326 }, { "epoch": 38.63, "grad_norm": 0.05239846184849739, "learning_rate": 7.5e-05, "loss": 0.0177, "step": 4327 }, { "epoch": 38.64, "grad_norm": 0.03698711097240448, "learning_rate": 7.494117647058824e-05, "loss": 0.0128, "step": 4328 }, { "epoch": 38.65, "grad_norm": 0.04837555065751076, "learning_rate": 7.488235294117647e-05, "loss": 0.0162, "step": 4329 }, { "epoch": 38.66, "grad_norm": 0.04549936205148697, "learning_rate": 7.48235294117647e-05, "loss": 0.0151, "step": 4330 }, { "epoch": 38.67, "grad_norm": 0.08335675299167633, "learning_rate": 7.476470588235293e-05, "loss": 0.0224, "step": 4331 }, { "epoch": 38.68, "grad_norm": 0.040344540029764175, "learning_rate": 7.470588235294117e-05, "loss": 0.0165, "step": 4332 }, { "epoch": 38.69, "grad_norm": 0.04174313321709633, "learning_rate": 7.46470588235294e-05, "loss": 0.0146, "step": 4333 }, { "epoch": 38.7, "grad_norm": 0.04302332177758217, "learning_rate": 7.458823529411764e-05, "loss": 0.0148, "step": 4334 }, { "epoch": 38.71, "grad_norm": 0.049152255058288574, "learning_rate": 7.452941176470587e-05, "loss": 0.0216, "step": 4335 }, { "epoch": 38.71, "grad_norm": 0.04938128590583801, "learning_rate": 7.44705882352941e-05, "loss": 0.017, "step": 4336 }, { "epoch": 38.72, "grad_norm": 0.07563067972660065, "learning_rate": 7.441176470588235e-05, "loss": 0.0213, "step": 4337 }, { "epoch": 38.73, "grad_norm": 0.056373558938503265, "learning_rate": 7.435294117647058e-05, "loss": 0.018, "step": 4338 }, { "epoch": 38.74, "grad_norm": 0.04104134440422058, "learning_rate": 7.429411764705882e-05, "loss": 0.018, "step": 4339 }, { "epoch": 38.75, "grad_norm": 0.03918365389108658, "learning_rate": 7.423529411764705e-05, "loss": 0.0149, "step": 4340 }, { "epoch": 38.76, "grad_norm": 0.052823957055807114, "learning_rate": 7.417647058823529e-05, "loss": 0.02, "step": 4341 }, { "epoch": 38.77, "grad_norm": 0.053039487451314926, "learning_rate": 7.411764705882352e-05, "loss": 0.0162, "step": 4342 }, { "epoch": 38.78, "grad_norm": 0.06652284413576126, "learning_rate": 7.405882352941175e-05, "loss": 0.0183, "step": 4343 }, { "epoch": 38.79, "grad_norm": 0.08099169284105301, "learning_rate": 7.4e-05, "loss": 0.022, "step": 4344 }, { "epoch": 38.79, "grad_norm": 0.038255538791418076, "learning_rate": 7.394117647058823e-05, "loss": 0.0173, "step": 4345 }, { "epoch": 38.8, "grad_norm": 0.04626018926501274, "learning_rate": 7.388235294117647e-05, "loss": 0.021, "step": 4346 }, { "epoch": 38.81, "grad_norm": 0.04810623824596405, "learning_rate": 7.38235294117647e-05, "loss": 0.0159, "step": 4347 }, { "epoch": 38.82, "grad_norm": 0.043795496225357056, "learning_rate": 7.376470588235293e-05, "loss": 0.0184, "step": 4348 }, { "epoch": 38.83, "grad_norm": 0.04710904508829117, "learning_rate": 7.370588235294117e-05, "loss": 0.0184, "step": 4349 }, { "epoch": 38.84, "grad_norm": 0.08242934197187424, "learning_rate": 7.36470588235294e-05, "loss": 0.0223, "step": 4350 }, { "epoch": 38.85, "grad_norm": 0.039028432220220566, "learning_rate": 7.358823529411765e-05, "loss": 0.0149, "step": 4351 }, { "epoch": 38.86, "grad_norm": 0.03827488794922829, "learning_rate": 7.352941176470588e-05, "loss": 0.014, "step": 4352 }, { "epoch": 38.87, "grad_norm": 0.04675621539354324, "learning_rate": 7.34705882352941e-05, "loss": 0.0199, "step": 4353 }, { "epoch": 38.88, "grad_norm": 0.04818340763449669, "learning_rate": 7.341176470588235e-05, "loss": 0.0174, "step": 4354 }, { "epoch": 38.88, "grad_norm": 0.04653319716453552, "learning_rate": 7.335294117647058e-05, "loss": 0.0168, "step": 4355 }, { "epoch": 38.89, "grad_norm": 0.11294801533222198, "learning_rate": 7.329411764705882e-05, "loss": 0.025, "step": 4356 }, { "epoch": 38.9, "grad_norm": 0.04957776889204979, "learning_rate": 7.323529411764705e-05, "loss": 0.0183, "step": 4357 }, { "epoch": 38.91, "grad_norm": 0.04400325566530228, "learning_rate": 7.31764705882353e-05, "loss": 0.0179, "step": 4358 }, { "epoch": 38.92, "grad_norm": 0.04249448701739311, "learning_rate": 7.311764705882353e-05, "loss": 0.016, "step": 4359 }, { "epoch": 38.93, "grad_norm": 0.041930440813302994, "learning_rate": 7.305882352941176e-05, "loss": 0.0176, "step": 4360 }, { "epoch": 38.94, "grad_norm": 0.04815790429711342, "learning_rate": 7.3e-05, "loss": 0.018, "step": 4361 }, { "epoch": 38.95, "grad_norm": 0.06433267146348953, "learning_rate": 7.294117647058823e-05, "loss": 0.0176, "step": 4362 }, { "epoch": 38.96, "grad_norm": 0.09689892828464508, "learning_rate": 7.288235294117647e-05, "loss": 0.0342, "step": 4363 }, { "epoch": 38.96, "grad_norm": 0.04072459414601326, "learning_rate": 7.28235294117647e-05, "loss": 0.017, "step": 4364 }, { "epoch": 38.97, "grad_norm": 0.04257611185312271, "learning_rate": 7.276470588235293e-05, "loss": 0.0188, "step": 4365 }, { "epoch": 38.98, "grad_norm": 0.04054246470332146, "learning_rate": 7.270588235294116e-05, "loss": 0.0182, "step": 4366 }, { "epoch": 38.99, "grad_norm": 0.05006944015622139, "learning_rate": 7.26470588235294e-05, "loss": 0.0189, "step": 4367 }, { "epoch": 39.0, "grad_norm": 0.06838885694742203, "learning_rate": 7.258823529411765e-05, "loss": 0.0183, "step": 4368 }, { "epoch": 39.01, "grad_norm": 0.03908037766814232, "learning_rate": 7.252941176470588e-05, "loss": 0.0162, "step": 4369 }, { "epoch": 39.02, "grad_norm": 0.03869599848985672, "learning_rate": 7.247058823529411e-05, "loss": 0.0168, "step": 4370 }, { "epoch": 39.03, "grad_norm": 0.040932200849056244, "learning_rate": 7.241176470588234e-05, "loss": 0.0166, "step": 4371 }, { "epoch": 39.04, "grad_norm": 0.04948969930410385, "learning_rate": 7.235294117647058e-05, "loss": 0.0189, "step": 4372 }, { "epoch": 39.04, "grad_norm": 0.04653513804078102, "learning_rate": 7.229411764705881e-05, "loss": 0.017, "step": 4373 }, { "epoch": 39.05, "grad_norm": 0.1132410317659378, "learning_rate": 7.223529411764706e-05, "loss": 0.024, "step": 4374 }, { "epoch": 39.06, "grad_norm": 0.07041914016008377, "learning_rate": 7.217647058823529e-05, "loss": 0.0245, "step": 4375 }, { "epoch": 39.07, "grad_norm": 0.044276244938373566, "learning_rate": 7.211764705882351e-05, "loss": 0.0171, "step": 4376 }, { "epoch": 39.08, "grad_norm": 0.04436330497264862, "learning_rate": 7.205882352941176e-05, "loss": 0.0176, "step": 4377 }, { "epoch": 39.09, "grad_norm": 0.04595024138689041, "learning_rate": 7.199999999999999e-05, "loss": 0.018, "step": 4378 }, { "epoch": 39.1, "grad_norm": 0.04338918626308441, "learning_rate": 7.194117647058823e-05, "loss": 0.0146, "step": 4379 }, { "epoch": 39.11, "grad_norm": 0.05065865069627762, "learning_rate": 7.188235294117646e-05, "loss": 0.0159, "step": 4380 }, { "epoch": 39.12, "grad_norm": 0.10124563425779343, "learning_rate": 7.18235294117647e-05, "loss": 0.0232, "step": 4381 }, { "epoch": 39.12, "grad_norm": 0.04452907294034958, "learning_rate": 7.176470588235293e-05, "loss": 0.0187, "step": 4382 }, { "epoch": 39.13, "grad_norm": 0.04646245390176773, "learning_rate": 7.170588235294116e-05, "loss": 0.0181, "step": 4383 }, { "epoch": 39.14, "grad_norm": 0.045267120003700256, "learning_rate": 7.164705882352941e-05, "loss": 0.0167, "step": 4384 }, { "epoch": 39.15, "grad_norm": 0.04318848252296448, "learning_rate": 7.158823529411764e-05, "loss": 0.0161, "step": 4385 }, { "epoch": 39.16, "grad_norm": 0.05045012757182121, "learning_rate": 7.152941176470588e-05, "loss": 0.0166, "step": 4386 }, { "epoch": 39.17, "grad_norm": 0.08273526281118393, "learning_rate": 7.147058823529411e-05, "loss": 0.0214, "step": 4387 }, { "epoch": 39.18, "grad_norm": 0.03756927698850632, "learning_rate": 7.141176470588234e-05, "loss": 0.0162, "step": 4388 }, { "epoch": 39.19, "grad_norm": 0.042943406850099564, "learning_rate": 7.135294117647058e-05, "loss": 0.018, "step": 4389 }, { "epoch": 39.2, "grad_norm": 0.03948609158396721, "learning_rate": 7.129411764705881e-05, "loss": 0.0166, "step": 4390 }, { "epoch": 39.21, "grad_norm": 0.03829723596572876, "learning_rate": 7.123529411764706e-05, "loss": 0.0154, "step": 4391 }, { "epoch": 39.21, "grad_norm": 0.052331846207380295, "learning_rate": 7.117647058823529e-05, "loss": 0.0165, "step": 4392 }, { "epoch": 39.22, "grad_norm": 0.06658195704221725, "learning_rate": 7.111764705882352e-05, "loss": 0.0143, "step": 4393 }, { "epoch": 39.23, "grad_norm": 0.04346032440662384, "learning_rate": 7.105882352941176e-05, "loss": 0.0195, "step": 4394 }, { "epoch": 39.24, "grad_norm": 0.04886326938867569, "learning_rate": 7.099999999999999e-05, "loss": 0.0224, "step": 4395 }, { "epoch": 39.25, "grad_norm": 0.04518451169133186, "learning_rate": 7.094117647058823e-05, "loss": 0.0157, "step": 4396 }, { "epoch": 39.26, "grad_norm": 0.04408060014247894, "learning_rate": 7.088235294117646e-05, "loss": 0.0166, "step": 4397 }, { "epoch": 39.27, "grad_norm": 0.06991055607795715, "learning_rate": 7.082352941176471e-05, "loss": 0.0185, "step": 4398 }, { "epoch": 39.28, "grad_norm": 0.0835832804441452, "learning_rate": 7.076470588235294e-05, "loss": 0.021, "step": 4399 }, { "epoch": 39.29, "grad_norm": 0.20428909361362457, "learning_rate": 7.070588235294117e-05, "loss": 0.0218, "step": 4400 }, { "epoch": 39.29, "eval_cer": 0.031431223761899926, "eval_loss": 0.20662860572338104, "eval_runtime": 22.7039, "eval_samples_per_second": 116.368, "eval_steps_per_second": 1.85, "eval_wer": 0.1014676715589052, "step": 4400 }, { "epoch": 39.29, "grad_norm": 0.044423457235097885, "learning_rate": 7.064705882352941e-05, "loss": 0.0197, "step": 4401 }, { "epoch": 39.3, "grad_norm": 0.04140207916498184, "learning_rate": 7.058823529411764e-05, "loss": 0.0176, "step": 4402 }, { "epoch": 39.31, "grad_norm": 0.04382094740867615, "learning_rate": 7.052941176470588e-05, "loss": 0.0157, "step": 4403 }, { "epoch": 39.32, "grad_norm": 0.05342730134725571, "learning_rate": 7.047058823529411e-05, "loss": 0.016, "step": 4404 }, { "epoch": 39.33, "grad_norm": 0.05287891626358032, "learning_rate": 7.041176470588234e-05, "loss": 0.0135, "step": 4405 }, { "epoch": 39.34, "grad_norm": 0.08517567068338394, "learning_rate": 7.035294117647057e-05, "loss": 0.0209, "step": 4406 }, { "epoch": 39.35, "grad_norm": 0.04383644461631775, "learning_rate": 7.029411764705882e-05, "loss": 0.0189, "step": 4407 }, { "epoch": 39.36, "grad_norm": 0.04496624693274498, "learning_rate": 7.023529411764706e-05, "loss": 0.0167, "step": 4408 }, { "epoch": 39.37, "grad_norm": 0.0436580665409565, "learning_rate": 7.017647058823529e-05, "loss": 0.017, "step": 4409 }, { "epoch": 39.38, "grad_norm": 0.04130517691373825, "learning_rate": 7.011764705882352e-05, "loss": 0.0184, "step": 4410 }, { "epoch": 39.38, "grad_norm": 0.05226563289761543, "learning_rate": 7.005882352941175e-05, "loss": 0.0139, "step": 4411 }, { "epoch": 39.39, "grad_norm": 0.10196669399738312, "learning_rate": 7e-05, "loss": 0.0243, "step": 4412 }, { "epoch": 39.4, "grad_norm": 0.04151444882154465, "learning_rate": 6.994117647058822e-05, "loss": 0.0179, "step": 4413 }, { "epoch": 39.41, "grad_norm": 0.035112831741571426, "learning_rate": 6.988235294117647e-05, "loss": 0.016, "step": 4414 }, { "epoch": 39.42, "grad_norm": 0.043129149824380875, "learning_rate": 6.98235294117647e-05, "loss": 0.0169, "step": 4415 }, { "epoch": 39.43, "grad_norm": 0.0493992418050766, "learning_rate": 6.976470588235293e-05, "loss": 0.0201, "step": 4416 }, { "epoch": 39.44, "grad_norm": 0.04564020782709122, "learning_rate": 6.970588235294117e-05, "loss": 0.0146, "step": 4417 }, { "epoch": 39.45, "grad_norm": 0.08126378059387207, "learning_rate": 6.96470588235294e-05, "loss": 0.0221, "step": 4418 }, { "epoch": 39.46, "grad_norm": 0.04035772755742073, "learning_rate": 6.958823529411764e-05, "loss": 0.0179, "step": 4419 }, { "epoch": 39.46, "grad_norm": 0.03929206356406212, "learning_rate": 6.952941176470587e-05, "loss": 0.0151, "step": 4420 }, { "epoch": 39.47, "grad_norm": 0.04197614639997482, "learning_rate": 6.947058823529412e-05, "loss": 0.0192, "step": 4421 }, { "epoch": 39.48, "grad_norm": 0.04631732031702995, "learning_rate": 6.941176470588235e-05, "loss": 0.0178, "step": 4422 }, { "epoch": 39.49, "grad_norm": 0.04176364466547966, "learning_rate": 6.935294117647058e-05, "loss": 0.0158, "step": 4423 }, { "epoch": 39.5, "grad_norm": 0.054150938987731934, "learning_rate": 6.929411764705882e-05, "loss": 0.0187, "step": 4424 }, { "epoch": 39.51, "grad_norm": 0.07182719558477402, "learning_rate": 6.923529411764705e-05, "loss": 0.0203, "step": 4425 }, { "epoch": 39.52, "grad_norm": 0.043137047439813614, "learning_rate": 6.91764705882353e-05, "loss": 0.0177, "step": 4426 }, { "epoch": 39.53, "grad_norm": 0.040281008929014206, "learning_rate": 6.911764705882352e-05, "loss": 0.0163, "step": 4427 }, { "epoch": 39.54, "grad_norm": 0.0436340756714344, "learning_rate": 6.905882352941175e-05, "loss": 0.0146, "step": 4428 }, { "epoch": 39.54, "grad_norm": 0.05586494505405426, "learning_rate": 6.9e-05, "loss": 0.0181, "step": 4429 }, { "epoch": 39.55, "grad_norm": 0.053915951400995255, "learning_rate": 6.894117647058823e-05, "loss": 0.0156, "step": 4430 }, { "epoch": 39.56, "grad_norm": 0.09184318780899048, "learning_rate": 6.888235294117647e-05, "loss": 0.0257, "step": 4431 }, { "epoch": 39.57, "grad_norm": 0.04375550150871277, "learning_rate": 6.88235294117647e-05, "loss": 0.015, "step": 4432 }, { "epoch": 39.58, "grad_norm": 0.04235227778553963, "learning_rate": 6.876470588235293e-05, "loss": 0.0168, "step": 4433 }, { "epoch": 39.59, "grad_norm": 0.04491400346159935, "learning_rate": 6.870588235294117e-05, "loss": 0.0184, "step": 4434 }, { "epoch": 39.6, "grad_norm": 0.0541360005736351, "learning_rate": 6.86470588235294e-05, "loss": 0.0157, "step": 4435 }, { "epoch": 39.61, "grad_norm": 0.048810943961143494, "learning_rate": 6.858823529411765e-05, "loss": 0.0155, "step": 4436 }, { "epoch": 39.62, "grad_norm": 0.07412078976631165, "learning_rate": 6.852941176470588e-05, "loss": 0.0175, "step": 4437 }, { "epoch": 39.62, "grad_norm": 0.04780080169439316, "learning_rate": 6.847058823529412e-05, "loss": 0.0191, "step": 4438 }, { "epoch": 39.63, "grad_norm": 0.043971408158540726, "learning_rate": 6.841176470588235e-05, "loss": 0.0177, "step": 4439 }, { "epoch": 39.64, "grad_norm": 0.03924727067351341, "learning_rate": 6.835294117647058e-05, "loss": 0.0156, "step": 4440 }, { "epoch": 39.65, "grad_norm": 0.05375504493713379, "learning_rate": 6.829411764705882e-05, "loss": 0.0203, "step": 4441 }, { "epoch": 39.66, "grad_norm": 0.05465371906757355, "learning_rate": 6.823529411764705e-05, "loss": 0.0202, "step": 4442 }, { "epoch": 39.67, "grad_norm": 0.10145536810159683, "learning_rate": 6.81764705882353e-05, "loss": 0.022, "step": 4443 }, { "epoch": 39.68, "grad_norm": 0.039379965513944626, "learning_rate": 6.811764705882353e-05, "loss": 0.0159, "step": 4444 }, { "epoch": 39.69, "grad_norm": 0.04439634829759598, "learning_rate": 6.805882352941176e-05, "loss": 0.0162, "step": 4445 }, { "epoch": 39.7, "grad_norm": 0.04451480880379677, "learning_rate": 6.799999999999999e-05, "loss": 0.0201, "step": 4446 }, { "epoch": 39.71, "grad_norm": 0.04450339451432228, "learning_rate": 6.794117647058823e-05, "loss": 0.0163, "step": 4447 }, { "epoch": 39.71, "grad_norm": 0.058022525161504745, "learning_rate": 6.788235294117647e-05, "loss": 0.0199, "step": 4448 }, { "epoch": 39.72, "grad_norm": 0.0665673092007637, "learning_rate": 6.78235294117647e-05, "loss": 0.0201, "step": 4449 }, { "epoch": 39.73, "grad_norm": 0.053832486271858215, "learning_rate": 6.776470588235293e-05, "loss": 0.018, "step": 4450 }, { "epoch": 39.74, "grad_norm": 0.03415067121386528, "learning_rate": 6.770588235294116e-05, "loss": 0.0145, "step": 4451 }, { "epoch": 39.75, "grad_norm": 0.04492098465561867, "learning_rate": 6.76470588235294e-05, "loss": 0.0179, "step": 4452 }, { "epoch": 39.76, "grad_norm": 0.03915470838546753, "learning_rate": 6.758823529411764e-05, "loss": 0.0178, "step": 4453 }, { "epoch": 39.77, "grad_norm": 0.04149465635418892, "learning_rate": 6.752941176470588e-05, "loss": 0.018, "step": 4454 }, { "epoch": 39.78, "grad_norm": 0.06206176429986954, "learning_rate": 6.747058823529411e-05, "loss": 0.0223, "step": 4455 }, { "epoch": 39.79, "grad_norm": 0.07275174558162689, "learning_rate": 6.741176470588235e-05, "loss": 0.02, "step": 4456 }, { "epoch": 39.79, "grad_norm": 0.04092457890510559, "learning_rate": 6.735294117647058e-05, "loss": 0.0172, "step": 4457 }, { "epoch": 39.8, "grad_norm": 0.046152371913194656, "learning_rate": 6.729411764705881e-05, "loss": 0.0161, "step": 4458 }, { "epoch": 39.81, "grad_norm": 0.04276079684495926, "learning_rate": 6.723529411764706e-05, "loss": 0.0133, "step": 4459 }, { "epoch": 39.82, "grad_norm": 0.0418611615896225, "learning_rate": 6.717647058823529e-05, "loss": 0.015, "step": 4460 }, { "epoch": 39.83, "grad_norm": 0.06505148112773895, "learning_rate": 6.711764705882353e-05, "loss": 0.0163, "step": 4461 }, { "epoch": 39.84, "grad_norm": 0.12159974128007889, "learning_rate": 6.705882352941176e-05, "loss": 0.0279, "step": 4462 }, { "epoch": 39.85, "grad_norm": 0.041626088321208954, "learning_rate": 6.699999999999999e-05, "loss": 0.0167, "step": 4463 }, { "epoch": 39.86, "grad_norm": 0.04645446315407753, "learning_rate": 6.694117647058823e-05, "loss": 0.0165, "step": 4464 }, { "epoch": 39.87, "grad_norm": 0.04155229032039642, "learning_rate": 6.688235294117646e-05, "loss": 0.015, "step": 4465 }, { "epoch": 39.88, "grad_norm": 0.05169856548309326, "learning_rate": 6.68235294117647e-05, "loss": 0.0178, "step": 4466 }, { "epoch": 39.88, "grad_norm": 0.04497070983052254, "learning_rate": 6.676470588235294e-05, "loss": 0.0154, "step": 4467 }, { "epoch": 39.89, "grad_norm": 0.11043273657560349, "learning_rate": 6.670588235294117e-05, "loss": 0.0293, "step": 4468 }, { "epoch": 39.9, "grad_norm": 0.041215650737285614, "learning_rate": 6.664705882352941e-05, "loss": 0.0193, "step": 4469 }, { "epoch": 39.91, "grad_norm": 0.04200184717774391, "learning_rate": 6.658823529411764e-05, "loss": 0.0177, "step": 4470 }, { "epoch": 39.92, "grad_norm": 0.035983189940452576, "learning_rate": 6.652941176470588e-05, "loss": 0.0131, "step": 4471 }, { "epoch": 39.93, "grad_norm": 0.042236682027578354, "learning_rate": 6.647058823529411e-05, "loss": 0.0138, "step": 4472 }, { "epoch": 39.94, "grad_norm": 0.05005483701825142, "learning_rate": 6.641176470588234e-05, "loss": 0.0147, "step": 4473 }, { "epoch": 39.95, "grad_norm": 0.06001787260174751, "learning_rate": 6.635294117647059e-05, "loss": 0.0146, "step": 4474 }, { "epoch": 39.96, "grad_norm": 0.07263220101594925, "learning_rate": 6.629411764705882e-05, "loss": 0.0212, "step": 4475 }, { "epoch": 39.96, "grad_norm": 0.04336221143603325, "learning_rate": 6.623529411764706e-05, "loss": 0.0196, "step": 4476 }, { "epoch": 39.97, "grad_norm": 0.05087766796350479, "learning_rate": 6.617647058823529e-05, "loss": 0.0202, "step": 4477 }, { "epoch": 39.98, "grad_norm": 0.04861607402563095, "learning_rate": 6.611764705882353e-05, "loss": 0.0185, "step": 4478 }, { "epoch": 39.99, "grad_norm": 0.043793126940727234, "learning_rate": 6.605882352941176e-05, "loss": 0.0137, "step": 4479 }, { "epoch": 40.0, "grad_norm": 0.05250184237957001, "learning_rate": 6.599999999999999e-05, "loss": 0.0179, "step": 4480 }, { "epoch": 40.01, "grad_norm": 0.04231712967157364, "learning_rate": 6.594117647058823e-05, "loss": 0.0181, "step": 4481 }, { "epoch": 40.02, "grad_norm": 0.04119037091732025, "learning_rate": 6.588235294117646e-05, "loss": 0.0162, "step": 4482 }, { "epoch": 40.03, "grad_norm": 0.04248598963022232, "learning_rate": 6.582352941176471e-05, "loss": 0.0175, "step": 4483 }, { "epoch": 40.04, "grad_norm": 0.04289815574884415, "learning_rate": 6.576470588235294e-05, "loss": 0.0173, "step": 4484 }, { "epoch": 40.04, "grad_norm": 0.04525744542479515, "learning_rate": 6.570588235294117e-05, "loss": 0.0172, "step": 4485 }, { "epoch": 40.05, "grad_norm": 0.07593417912721634, "learning_rate": 6.56470588235294e-05, "loss": 0.0168, "step": 4486 }, { "epoch": 40.06, "grad_norm": 0.07838865369558334, "learning_rate": 6.558823529411764e-05, "loss": 0.0159, "step": 4487 }, { "epoch": 40.07, "grad_norm": 0.0439491868019104, "learning_rate": 6.552941176470588e-05, "loss": 0.0165, "step": 4488 }, { "epoch": 40.08, "grad_norm": 0.0446203351020813, "learning_rate": 6.547058823529411e-05, "loss": 0.0171, "step": 4489 }, { "epoch": 40.09, "grad_norm": 0.04487207904458046, "learning_rate": 6.541176470588234e-05, "loss": 0.0168, "step": 4490 }, { "epoch": 40.1, "grad_norm": 0.046270936727523804, "learning_rate": 6.535294117647057e-05, "loss": 0.0171, "step": 4491 }, { "epoch": 40.11, "grad_norm": 0.06900790333747864, "learning_rate": 6.529411764705882e-05, "loss": 0.0198, "step": 4492 }, { "epoch": 40.12, "grad_norm": 0.0703432485461235, "learning_rate": 6.523529411764705e-05, "loss": 0.0219, "step": 4493 }, { "epoch": 40.12, "grad_norm": 0.04324033483862877, "learning_rate": 6.517647058823529e-05, "loss": 0.0188, "step": 4494 }, { "epoch": 40.13, "grad_norm": 0.044322215020656586, "learning_rate": 6.511764705882352e-05, "loss": 0.0189, "step": 4495 }, { "epoch": 40.14, "grad_norm": 0.04601391777396202, "learning_rate": 6.505882352941176e-05, "loss": 0.0176, "step": 4496 }, { "epoch": 40.15, "grad_norm": 0.04588237777352333, "learning_rate": 6.5e-05, "loss": 0.0147, "step": 4497 }, { "epoch": 40.16, "grad_norm": 0.04775720089673996, "learning_rate": 6.494117647058822e-05, "loss": 0.0145, "step": 4498 }, { "epoch": 40.17, "grad_norm": 0.08080917596817017, "learning_rate": 6.488235294117647e-05, "loss": 0.0193, "step": 4499 }, { "epoch": 40.18, "grad_norm": 0.04132635518908501, "learning_rate": 6.48235294117647e-05, "loss": 0.0182, "step": 4500 }, { "epoch": 40.18, "eval_cer": 0.030010561011451744, "eval_loss": 0.20539185404777527, "eval_runtime": 22.3198, "eval_samples_per_second": 118.37, "eval_steps_per_second": 1.882, "eval_wer": 0.09884966283220945, "step": 4500 }, { "epoch": 40.19, "grad_norm": 0.04232960566878319, "learning_rate": 6.476470588235294e-05, "loss": 0.0183, "step": 4501 }, { "epoch": 40.2, "grad_norm": 0.04780032858252525, "learning_rate": 6.470588235294117e-05, "loss": 0.021, "step": 4502 }, { "epoch": 40.21, "grad_norm": 0.05101853981614113, "learning_rate": 6.46470588235294e-05, "loss": 0.0179, "step": 4503 }, { "epoch": 40.21, "grad_norm": 0.05485577508807182, "learning_rate": 6.458823529411764e-05, "loss": 0.0171, "step": 4504 }, { "epoch": 40.22, "grad_norm": 0.09795645624399185, "learning_rate": 6.452941176470587e-05, "loss": 0.0246, "step": 4505 }, { "epoch": 40.23, "grad_norm": 0.04066602513194084, "learning_rate": 6.447058823529412e-05, "loss": 0.0166, "step": 4506 }, { "epoch": 40.24, "grad_norm": 0.040202949196100235, "learning_rate": 6.441176470588235e-05, "loss": 0.018, "step": 4507 }, { "epoch": 40.25, "grad_norm": 0.043347131460905075, "learning_rate": 6.435294117647058e-05, "loss": 0.0188, "step": 4508 }, { "epoch": 40.26, "grad_norm": 0.04132955148816109, "learning_rate": 6.429411764705882e-05, "loss": 0.0144, "step": 4509 }, { "epoch": 40.27, "grad_norm": 0.04157760739326477, "learning_rate": 6.423529411764705e-05, "loss": 0.0145, "step": 4510 }, { "epoch": 40.28, "grad_norm": 0.060481470078229904, "learning_rate": 6.41764705882353e-05, "loss": 0.016, "step": 4511 }, { "epoch": 40.29, "grad_norm": 0.05811312794685364, "learning_rate": 6.411764705882352e-05, "loss": 0.0212, "step": 4512 }, { "epoch": 40.29, "grad_norm": 0.04830561950802803, "learning_rate": 6.405882352941175e-05, "loss": 0.0222, "step": 4513 }, { "epoch": 40.3, "grad_norm": 0.04640211910009384, "learning_rate": 6.4e-05, "loss": 0.0158, "step": 4514 }, { "epoch": 40.31, "grad_norm": 0.04630252346396446, "learning_rate": 6.394117647058823e-05, "loss": 0.0182, "step": 4515 }, { "epoch": 40.32, "grad_norm": 0.044155918061733246, "learning_rate": 6.388235294117647e-05, "loss": 0.0167, "step": 4516 }, { "epoch": 40.33, "grad_norm": 0.07158509641885757, "learning_rate": 6.38235294117647e-05, "loss": 0.023, "step": 4517 }, { "epoch": 40.34, "grad_norm": 0.0738370344042778, "learning_rate": 6.376470588235294e-05, "loss": 0.0217, "step": 4518 }, { "epoch": 40.35, "grad_norm": 0.04584616795182228, "learning_rate": 6.370588235294117e-05, "loss": 0.0181, "step": 4519 }, { "epoch": 40.36, "grad_norm": 0.048624083399772644, "learning_rate": 6.36470588235294e-05, "loss": 0.0194, "step": 4520 }, { "epoch": 40.37, "grad_norm": 0.04459695890545845, "learning_rate": 6.358823529411765e-05, "loss": 0.0185, "step": 4521 }, { "epoch": 40.38, "grad_norm": 0.04149356484413147, "learning_rate": 6.352941176470588e-05, "loss": 0.015, "step": 4522 }, { "epoch": 40.38, "grad_norm": 0.06206259876489639, "learning_rate": 6.347058823529412e-05, "loss": 0.0196, "step": 4523 }, { "epoch": 40.39, "grad_norm": 0.08526722341775894, "learning_rate": 6.341176470588235e-05, "loss": 0.021, "step": 4524 }, { "epoch": 40.4, "grad_norm": 0.04059320315718651, "learning_rate": 6.335294117647058e-05, "loss": 0.0152, "step": 4525 }, { "epoch": 40.41, "grad_norm": 0.03885188326239586, "learning_rate": 6.329411764705881e-05, "loss": 0.0158, "step": 4526 }, { "epoch": 40.42, "grad_norm": 0.0395585261285305, "learning_rate": 6.323529411764705e-05, "loss": 0.0139, "step": 4527 }, { "epoch": 40.43, "grad_norm": 0.04279087111353874, "learning_rate": 6.31764705882353e-05, "loss": 0.0165, "step": 4528 }, { "epoch": 40.44, "grad_norm": 0.03796738386154175, "learning_rate": 6.311764705882353e-05, "loss": 0.0114, "step": 4529 }, { "epoch": 40.45, "grad_norm": 0.0947808027267456, "learning_rate": 6.305882352941176e-05, "loss": 0.0257, "step": 4530 }, { "epoch": 40.46, "grad_norm": 0.04211699590086937, "learning_rate": 6.299999999999999e-05, "loss": 0.0173, "step": 4531 }, { "epoch": 40.46, "grad_norm": 0.04453981667757034, "learning_rate": 6.294117647058823e-05, "loss": 0.0191, "step": 4532 }, { "epoch": 40.47, "grad_norm": 0.047891560941934586, "learning_rate": 6.288235294117646e-05, "loss": 0.0171, "step": 4533 }, { "epoch": 40.48, "grad_norm": 0.055286191403865814, "learning_rate": 6.28235294117647e-05, "loss": 0.0177, "step": 4534 }, { "epoch": 40.49, "grad_norm": 0.05157831311225891, "learning_rate": 6.276470588235293e-05, "loss": 0.019, "step": 4535 }, { "epoch": 40.5, "grad_norm": 0.05864199250936508, "learning_rate": 6.270588235294118e-05, "loss": 0.0171, "step": 4536 }, { "epoch": 40.51, "grad_norm": 0.0728534385561943, "learning_rate": 6.26470588235294e-05, "loss": 0.0193, "step": 4537 }, { "epoch": 40.52, "grad_norm": 0.040822833776474, "learning_rate": 6.258823529411764e-05, "loss": 0.0199, "step": 4538 }, { "epoch": 40.53, "grad_norm": 0.04746582359075546, "learning_rate": 6.252941176470588e-05, "loss": 0.0182, "step": 4539 }, { "epoch": 40.54, "grad_norm": 0.037793178111314774, "learning_rate": 6.247058823529411e-05, "loss": 0.0151, "step": 4540 }, { "epoch": 40.54, "grad_norm": 0.044214483350515366, "learning_rate": 6.241176470588235e-05, "loss": 0.0188, "step": 4541 }, { "epoch": 40.55, "grad_norm": 0.05251981317996979, "learning_rate": 6.235294117647058e-05, "loss": 0.0166, "step": 4542 }, { "epoch": 40.56, "grad_norm": 0.0751509889960289, "learning_rate": 6.229411764705881e-05, "loss": 0.0212, "step": 4543 }, { "epoch": 40.57, "grad_norm": 0.038863301277160645, "learning_rate": 6.223529411764706e-05, "loss": 0.0176, "step": 4544 }, { "epoch": 40.58, "grad_norm": 0.041233357042074203, "learning_rate": 6.217647058823529e-05, "loss": 0.017, "step": 4545 }, { "epoch": 40.59, "grad_norm": 0.049184199422597885, "learning_rate": 6.211764705882353e-05, "loss": 0.0178, "step": 4546 }, { "epoch": 40.6, "grad_norm": 0.04078080132603645, "learning_rate": 6.205882352941176e-05, "loss": 0.0172, "step": 4547 }, { "epoch": 40.61, "grad_norm": 0.05991187319159508, "learning_rate": 6.199999999999999e-05, "loss": 0.0175, "step": 4548 }, { "epoch": 40.62, "grad_norm": 0.07770812511444092, "learning_rate": 6.194117647058823e-05, "loss": 0.0192, "step": 4549 }, { "epoch": 40.62, "grad_norm": 0.0437345877289772, "learning_rate": 6.188235294117646e-05, "loss": 0.017, "step": 4550 }, { "epoch": 40.63, "grad_norm": 0.04400186240673065, "learning_rate": 6.18235294117647e-05, "loss": 0.0189, "step": 4551 }, { "epoch": 40.64, "grad_norm": 0.04059171676635742, "learning_rate": 6.176470588235294e-05, "loss": 0.0177, "step": 4552 }, { "epoch": 40.65, "grad_norm": 0.04959942772984505, "learning_rate": 6.170588235294117e-05, "loss": 0.0204, "step": 4553 }, { "epoch": 40.66, "grad_norm": 0.04438275843858719, "learning_rate": 6.164705882352941e-05, "loss": 0.0168, "step": 4554 }, { "epoch": 40.67, "grad_norm": 0.09111711382865906, "learning_rate": 6.158823529411764e-05, "loss": 0.0272, "step": 4555 }, { "epoch": 40.68, "grad_norm": 0.03727611526846886, "learning_rate": 6.152941176470588e-05, "loss": 0.0143, "step": 4556 }, { "epoch": 40.69, "grad_norm": 0.04391516000032425, "learning_rate": 6.147058823529411e-05, "loss": 0.0166, "step": 4557 }, { "epoch": 40.7, "grad_norm": 0.040411531925201416, "learning_rate": 6.141176470588236e-05, "loss": 0.0186, "step": 4558 }, { "epoch": 40.71, "grad_norm": 0.042106371372938156, "learning_rate": 6.135294117647059e-05, "loss": 0.0184, "step": 4559 }, { "epoch": 40.71, "grad_norm": 0.051107071340084076, "learning_rate": 6.129411764705882e-05, "loss": 0.0194, "step": 4560 }, { "epoch": 40.72, "grad_norm": 0.07036192715167999, "learning_rate": 6.123529411764706e-05, "loss": 0.0188, "step": 4561 }, { "epoch": 40.73, "grad_norm": 0.07041957974433899, "learning_rate": 6.117647058823529e-05, "loss": 0.0173, "step": 4562 }, { "epoch": 40.74, "grad_norm": 0.04342490807175636, "learning_rate": 6.111764705882353e-05, "loss": 0.0176, "step": 4563 }, { "epoch": 40.75, "grad_norm": 0.04865897819399834, "learning_rate": 6.105882352941176e-05, "loss": 0.0189, "step": 4564 }, { "epoch": 40.76, "grad_norm": 0.04451196268200874, "learning_rate": 6.1e-05, "loss": 0.0192, "step": 4565 }, { "epoch": 40.77, "grad_norm": 0.045886747539043427, "learning_rate": 6.094117647058823e-05, "loss": 0.0163, "step": 4566 }, { "epoch": 40.78, "grad_norm": 0.060928717255592346, "learning_rate": 6.0882352941176465e-05, "loss": 0.0176, "step": 4567 }, { "epoch": 40.79, "grad_norm": 0.07810866087675095, "learning_rate": 6.08235294117647e-05, "loss": 0.0216, "step": 4568 }, { "epoch": 40.79, "grad_norm": 0.03726855292916298, "learning_rate": 6.076470588235293e-05, "loss": 0.0154, "step": 4569 }, { "epoch": 40.8, "grad_norm": 0.038348231464624405, "learning_rate": 6.0705882352941175e-05, "loss": 0.0153, "step": 4570 }, { "epoch": 40.81, "grad_norm": 0.044328585267066956, "learning_rate": 6.0647058823529405e-05, "loss": 0.0191, "step": 4571 }, { "epoch": 40.82, "grad_norm": 0.043415606021881104, "learning_rate": 6.058823529411764e-05, "loss": 0.0159, "step": 4572 }, { "epoch": 40.83, "grad_norm": 0.047394152730703354, "learning_rate": 6.052941176470587e-05, "loss": 0.0154, "step": 4573 }, { "epoch": 40.84, "grad_norm": 0.06495626270771027, "learning_rate": 6.0470588235294115e-05, "loss": 0.0192, "step": 4574 }, { "epoch": 40.85, "grad_norm": 0.03826696053147316, "learning_rate": 6.041176470588235e-05, "loss": 0.0171, "step": 4575 }, { "epoch": 40.86, "grad_norm": 0.04659932851791382, "learning_rate": 6.035294117647058e-05, "loss": 0.0205, "step": 4576 }, { "epoch": 40.87, "grad_norm": 0.04046553745865822, "learning_rate": 6.029411764705882e-05, "loss": 0.0156, "step": 4577 }, { "epoch": 40.88, "grad_norm": 0.05137075111269951, "learning_rate": 6.023529411764705e-05, "loss": 0.0165, "step": 4578 }, { "epoch": 40.88, "grad_norm": 0.04387735575437546, "learning_rate": 6.017647058823529e-05, "loss": 0.0146, "step": 4579 }, { "epoch": 40.89, "grad_norm": 0.06885524094104767, "learning_rate": 6.011764705882352e-05, "loss": 0.0142, "step": 4580 }, { "epoch": 40.9, "grad_norm": 0.034240756183862686, "learning_rate": 6.005882352941176e-05, "loss": 0.0133, "step": 4581 }, { "epoch": 40.91, "grad_norm": 0.04060880467295647, "learning_rate": 5.9999999999999995e-05, "loss": 0.0158, "step": 4582 }, { "epoch": 40.92, "grad_norm": 0.044692594558000565, "learning_rate": 5.994117647058823e-05, "loss": 0.0177, "step": 4583 }, { "epoch": 40.93, "grad_norm": 0.04203140363097191, "learning_rate": 5.988235294117647e-05, "loss": 0.0162, "step": 4584 }, { "epoch": 40.94, "grad_norm": 0.05605940520763397, "learning_rate": 5.98235294117647e-05, "loss": 0.0184, "step": 4585 }, { "epoch": 40.95, "grad_norm": 0.07269809395074844, "learning_rate": 5.9764705882352935e-05, "loss": 0.0237, "step": 4586 }, { "epoch": 40.96, "grad_norm": 0.05527862161397934, "learning_rate": 5.9705882352941164e-05, "loss": 0.0173, "step": 4587 }, { "epoch": 40.96, "grad_norm": 0.046791303902864456, "learning_rate": 5.964705882352941e-05, "loss": 0.0165, "step": 4588 }, { "epoch": 40.97, "grad_norm": 0.040417153388261795, "learning_rate": 5.9588235294117645e-05, "loss": 0.0167, "step": 4589 }, { "epoch": 40.98, "grad_norm": 0.04512058570981026, "learning_rate": 5.9529411764705874e-05, "loss": 0.0165, "step": 4590 }, { "epoch": 40.99, "grad_norm": 0.05198723450303078, "learning_rate": 5.947058823529411e-05, "loss": 0.0179, "step": 4591 }, { "epoch": 41.0, "grad_norm": 0.060485243797302246, "learning_rate": 5.941176470588235e-05, "loss": 0.0163, "step": 4592 }, { "epoch": 41.01, "grad_norm": 0.04254607483744621, "learning_rate": 5.9352941176470584e-05, "loss": 0.0164, "step": 4593 }, { "epoch": 41.02, "grad_norm": 0.04230436310172081, "learning_rate": 5.9294117647058814e-05, "loss": 0.0186, "step": 4594 }, { "epoch": 41.03, "grad_norm": 0.04634520038962364, "learning_rate": 5.923529411764705e-05, "loss": 0.0172, "step": 4595 }, { "epoch": 41.04, "grad_norm": 0.04482303559780121, "learning_rate": 5.9176470588235294e-05, "loss": 0.0193, "step": 4596 }, { "epoch": 41.04, "grad_norm": 0.04185933992266655, "learning_rate": 5.9117647058823524e-05, "loss": 0.0156, "step": 4597 }, { "epoch": 41.05, "grad_norm": 0.058788761496543884, "learning_rate": 5.905882352941176e-05, "loss": 0.0199, "step": 4598 }, { "epoch": 41.06, "grad_norm": 0.08305046707391739, "learning_rate": 5.899999999999999e-05, "loss": 0.0215, "step": 4599 }, { "epoch": 41.07, "grad_norm": 0.045421600341796875, "learning_rate": 5.894117647058823e-05, "loss": 0.0185, "step": 4600 }, { "epoch": 41.07, "eval_cer": 0.030345055680208063, "eval_loss": 0.20500260591506958, "eval_runtime": 22.2766, "eval_samples_per_second": 118.6, "eval_steps_per_second": 1.885, "eval_wer": 0.09938516461721539, "step": 4600 }, { "epoch": 41.08, "grad_norm": 0.038410548120737076, "learning_rate": 5.888235294117647e-05, "loss": 0.0172, "step": 4601 }, { "epoch": 41.09, "grad_norm": 0.038646068423986435, "learning_rate": 5.88235294117647e-05, "loss": 0.016, "step": 4602 }, { "epoch": 41.1, "grad_norm": 0.04110243543982506, "learning_rate": 5.876470588235294e-05, "loss": 0.0152, "step": 4603 }, { "epoch": 41.11, "grad_norm": 0.052151646465063095, "learning_rate": 5.870588235294117e-05, "loss": 0.0159, "step": 4604 }, { "epoch": 41.12, "grad_norm": 0.06517789512872696, "learning_rate": 5.864705882352941e-05, "loss": 0.0204, "step": 4605 }, { "epoch": 41.12, "grad_norm": 0.0377584770321846, "learning_rate": 5.858823529411764e-05, "loss": 0.0155, "step": 4606 }, { "epoch": 41.13, "grad_norm": 0.03711734712123871, "learning_rate": 5.852941176470588e-05, "loss": 0.0177, "step": 4607 }, { "epoch": 41.14, "grad_norm": 0.04377947002649307, "learning_rate": 5.8470588235294114e-05, "loss": 0.0149, "step": 4608 }, { "epoch": 41.15, "grad_norm": 0.04752575606107712, "learning_rate": 5.8411764705882344e-05, "loss": 0.0178, "step": 4609 }, { "epoch": 41.16, "grad_norm": 0.04799149930477142, "learning_rate": 5.835294117647059e-05, "loss": 0.0173, "step": 4610 }, { "epoch": 41.17, "grad_norm": 0.1113613173365593, "learning_rate": 5.829411764705882e-05, "loss": 0.0243, "step": 4611 }, { "epoch": 41.18, "grad_norm": 0.04745407775044441, "learning_rate": 5.8235294117647054e-05, "loss": 0.0199, "step": 4612 }, { "epoch": 41.19, "grad_norm": 0.03987278416752815, "learning_rate": 5.8176470588235284e-05, "loss": 0.0171, "step": 4613 }, { "epoch": 41.2, "grad_norm": 0.04175101965665817, "learning_rate": 5.811764705882353e-05, "loss": 0.0176, "step": 4614 }, { "epoch": 41.21, "grad_norm": 0.04692227765917778, "learning_rate": 5.8058823529411764e-05, "loss": 0.0168, "step": 4615 }, { "epoch": 41.21, "grad_norm": 0.04507969692349434, "learning_rate": 5.7999999999999994e-05, "loss": 0.0167, "step": 4616 }, { "epoch": 41.22, "grad_norm": 0.08686289191246033, "learning_rate": 5.794117647058823e-05, "loss": 0.0244, "step": 4617 }, { "epoch": 41.23, "grad_norm": 0.03982887789607048, "learning_rate": 5.788235294117646e-05, "loss": 0.0179, "step": 4618 }, { "epoch": 41.24, "grad_norm": 0.04830826818943024, "learning_rate": 5.7823529411764704e-05, "loss": 0.0195, "step": 4619 }, { "epoch": 41.25, "grad_norm": 0.0378747284412384, "learning_rate": 5.7764705882352933e-05, "loss": 0.0152, "step": 4620 }, { "epoch": 41.26, "grad_norm": 0.03851475194096565, "learning_rate": 5.770588235294117e-05, "loss": 0.0168, "step": 4621 }, { "epoch": 41.27, "grad_norm": 0.04425816982984543, "learning_rate": 5.764705882352941e-05, "loss": 0.0167, "step": 4622 }, { "epoch": 41.28, "grad_norm": 0.06909024715423584, "learning_rate": 5.758823529411764e-05, "loss": 0.0153, "step": 4623 }, { "epoch": 41.29, "grad_norm": 0.0706043615937233, "learning_rate": 5.752941176470588e-05, "loss": 0.0238, "step": 4624 }, { "epoch": 41.29, "grad_norm": 0.0388813391327858, "learning_rate": 5.747058823529411e-05, "loss": 0.0165, "step": 4625 }, { "epoch": 41.3, "grad_norm": 0.03613726422190666, "learning_rate": 5.7411764705882347e-05, "loss": 0.0146, "step": 4626 }, { "epoch": 41.31, "grad_norm": 0.044658176600933075, "learning_rate": 5.7352941176470576e-05, "loss": 0.0158, "step": 4627 }, { "epoch": 41.32, "grad_norm": 0.04512956365942955, "learning_rate": 5.729411764705882e-05, "loss": 0.0182, "step": 4628 }, { "epoch": 41.33, "grad_norm": 0.06834746897220612, "learning_rate": 5.7235294117647057e-05, "loss": 0.0176, "step": 4629 }, { "epoch": 41.34, "grad_norm": 0.09034556895494461, "learning_rate": 5.7176470588235286e-05, "loss": 0.0212, "step": 4630 }, { "epoch": 41.35, "grad_norm": 0.0401027612388134, "learning_rate": 5.711764705882352e-05, "loss": 0.0152, "step": 4631 }, { "epoch": 41.36, "grad_norm": 0.04389679804444313, "learning_rate": 5.705882352941176e-05, "loss": 0.0165, "step": 4632 }, { "epoch": 41.37, "grad_norm": 0.04307018220424652, "learning_rate": 5.6999999999999996e-05, "loss": 0.0157, "step": 4633 }, { "epoch": 41.38, "grad_norm": 0.041806433349847794, "learning_rate": 5.6941176470588226e-05, "loss": 0.0142, "step": 4634 }, { "epoch": 41.38, "grad_norm": 0.05418125540018082, "learning_rate": 5.688235294117646e-05, "loss": 0.0184, "step": 4635 }, { "epoch": 41.39, "grad_norm": 0.07835173606872559, "learning_rate": 5.6823529411764706e-05, "loss": 0.0188, "step": 4636 }, { "epoch": 41.4, "grad_norm": 0.034479010850191116, "learning_rate": 5.6764705882352936e-05, "loss": 0.014, "step": 4637 }, { "epoch": 41.41, "grad_norm": 0.04069066047668457, "learning_rate": 5.670588235294117e-05, "loss": 0.0154, "step": 4638 }, { "epoch": 41.42, "grad_norm": 0.041832029819488525, "learning_rate": 5.66470588235294e-05, "loss": 0.0197, "step": 4639 }, { "epoch": 41.43, "grad_norm": 0.04355741664767265, "learning_rate": 5.658823529411764e-05, "loss": 0.0151, "step": 4640 }, { "epoch": 41.44, "grad_norm": 0.049824878573417664, "learning_rate": 5.652941176470588e-05, "loss": 0.0162, "step": 4641 }, { "epoch": 41.45, "grad_norm": 0.09460069239139557, "learning_rate": 5.647058823529411e-05, "loss": 0.0189, "step": 4642 }, { "epoch": 41.46, "grad_norm": 0.037275031208992004, "learning_rate": 5.641176470588235e-05, "loss": 0.0147, "step": 4643 }, { "epoch": 41.46, "grad_norm": 0.04388081654906273, "learning_rate": 5.635294117647058e-05, "loss": 0.0167, "step": 4644 }, { "epoch": 41.47, "grad_norm": 0.03630803897976875, "learning_rate": 5.629411764705882e-05, "loss": 0.0149, "step": 4645 }, { "epoch": 41.48, "grad_norm": 0.034449201077222824, "learning_rate": 5.623529411764705e-05, "loss": 0.0132, "step": 4646 }, { "epoch": 41.49, "grad_norm": 0.046683307737112045, "learning_rate": 5.617647058823529e-05, "loss": 0.0167, "step": 4647 }, { "epoch": 41.5, "grad_norm": 0.050131574273109436, "learning_rate": 5.6117647058823526e-05, "loss": 0.0146, "step": 4648 }, { "epoch": 41.51, "grad_norm": 0.0743521898984909, "learning_rate": 5.6058823529411756e-05, "loss": 0.0172, "step": 4649 }, { "epoch": 41.52, "grad_norm": 0.04171568527817726, "learning_rate": 5.6e-05, "loss": 0.0157, "step": 4650 }, { "epoch": 41.53, "grad_norm": 0.043973859399557114, "learning_rate": 5.594117647058823e-05, "loss": 0.0181, "step": 4651 }, { "epoch": 41.54, "grad_norm": 0.039228856563568115, "learning_rate": 5.5882352941176466e-05, "loss": 0.0133, "step": 4652 }, { "epoch": 41.54, "grad_norm": 0.045872762799263, "learning_rate": 5.5823529411764696e-05, "loss": 0.0153, "step": 4653 }, { "epoch": 41.55, "grad_norm": 0.06259597837924957, "learning_rate": 5.576470588235294e-05, "loss": 0.0204, "step": 4654 }, { "epoch": 41.56, "grad_norm": 0.0728481337428093, "learning_rate": 5.5705882352941176e-05, "loss": 0.022, "step": 4655 }, { "epoch": 41.57, "grad_norm": 0.039760757237672806, "learning_rate": 5.5647058823529406e-05, "loss": 0.0164, "step": 4656 }, { "epoch": 41.58, "grad_norm": 0.03884727507829666, "learning_rate": 5.558823529411764e-05, "loss": 0.0154, "step": 4657 }, { "epoch": 41.59, "grad_norm": 0.03832199051976204, "learning_rate": 5.552941176470587e-05, "loss": 0.0162, "step": 4658 }, { "epoch": 41.6, "grad_norm": 0.04435676708817482, "learning_rate": 5.5470588235294115e-05, "loss": 0.0166, "step": 4659 }, { "epoch": 41.61, "grad_norm": 0.05417013168334961, "learning_rate": 5.5411764705882345e-05, "loss": 0.0163, "step": 4660 }, { "epoch": 41.62, "grad_norm": 0.07437200099229813, "learning_rate": 5.535294117647058e-05, "loss": 0.0224, "step": 4661 }, { "epoch": 41.62, "grad_norm": 0.0526440367102623, "learning_rate": 5.5294117647058825e-05, "loss": 0.0207, "step": 4662 }, { "epoch": 41.63, "grad_norm": 0.040949463844299316, "learning_rate": 5.5235294117647055e-05, "loss": 0.0166, "step": 4663 }, { "epoch": 41.64, "grad_norm": 0.04179446026682854, "learning_rate": 5.517647058823529e-05, "loss": 0.0157, "step": 4664 }, { "epoch": 41.65, "grad_norm": 0.04304821044206619, "learning_rate": 5.511764705882352e-05, "loss": 0.0174, "step": 4665 }, { "epoch": 41.66, "grad_norm": 0.05936549976468086, "learning_rate": 5.505882352941176e-05, "loss": 0.0179, "step": 4666 }, { "epoch": 41.67, "grad_norm": 0.08342947065830231, "learning_rate": 5.499999999999999e-05, "loss": 0.0192, "step": 4667 }, { "epoch": 41.68, "grad_norm": 0.04317031800746918, "learning_rate": 5.494117647058823e-05, "loss": 0.0168, "step": 4668 }, { "epoch": 41.69, "grad_norm": 0.045395709574222565, "learning_rate": 5.488235294117647e-05, "loss": 0.0166, "step": 4669 }, { "epoch": 41.7, "grad_norm": 0.0382905974984169, "learning_rate": 5.48235294117647e-05, "loss": 0.0167, "step": 4670 }, { "epoch": 41.71, "grad_norm": 0.04265590384602547, "learning_rate": 5.4764705882352935e-05, "loss": 0.0167, "step": 4671 }, { "epoch": 41.71, "grad_norm": 0.04800283536314964, "learning_rate": 5.470588235294117e-05, "loss": 0.0176, "step": 4672 }, { "epoch": 41.72, "grad_norm": 0.08824150264263153, "learning_rate": 5.464705882352941e-05, "loss": 0.0185, "step": 4673 }, { "epoch": 41.73, "grad_norm": 0.04878758266568184, "learning_rate": 5.458823529411764e-05, "loss": 0.0187, "step": 4674 }, { "epoch": 41.74, "grad_norm": 0.0359526127576828, "learning_rate": 5.4529411764705875e-05, "loss": 0.0134, "step": 4675 }, { "epoch": 41.75, "grad_norm": 0.039270978420972824, "learning_rate": 5.447058823529412e-05, "loss": 0.0139, "step": 4676 }, { "epoch": 41.76, "grad_norm": 0.043132711201906204, "learning_rate": 5.441176470588235e-05, "loss": 0.0179, "step": 4677 }, { "epoch": 41.77, "grad_norm": 0.04192988947033882, "learning_rate": 5.4352941176470585e-05, "loss": 0.017, "step": 4678 }, { "epoch": 41.78, "grad_norm": 0.05601268634200096, "learning_rate": 5.4294117647058815e-05, "loss": 0.0139, "step": 4679 }, { "epoch": 41.79, "grad_norm": 0.11920702457427979, "learning_rate": 5.423529411764705e-05, "loss": 0.0288, "step": 4680 }, { "epoch": 41.79, "grad_norm": 0.0415286086499691, "learning_rate": 5.4176470588235295e-05, "loss": 0.0161, "step": 4681 }, { "epoch": 41.8, "grad_norm": 0.043536920100450516, "learning_rate": 5.4117647058823525e-05, "loss": 0.0141, "step": 4682 }, { "epoch": 41.81, "grad_norm": 0.04488612338900566, "learning_rate": 5.405882352941176e-05, "loss": 0.0142, "step": 4683 }, { "epoch": 41.82, "grad_norm": 0.05310561880469322, "learning_rate": 5.399999999999999e-05, "loss": 0.0129, "step": 4684 }, { "epoch": 41.83, "grad_norm": 0.05201854184269905, "learning_rate": 5.3941176470588235e-05, "loss": 0.018, "step": 4685 }, { "epoch": 41.84, "grad_norm": 0.09678861498832703, "learning_rate": 5.3882352941176465e-05, "loss": 0.0227, "step": 4686 }, { "epoch": 41.85, "grad_norm": 0.04128541052341461, "learning_rate": 5.38235294117647e-05, "loss": 0.0182, "step": 4687 }, { "epoch": 41.86, "grad_norm": 0.039448902010917664, "learning_rate": 5.376470588235294e-05, "loss": 0.0148, "step": 4688 }, { "epoch": 41.87, "grad_norm": 0.04196583852171898, "learning_rate": 5.370588235294117e-05, "loss": 0.0159, "step": 4689 }, { "epoch": 41.88, "grad_norm": 0.04726090282201767, "learning_rate": 5.364705882352941e-05, "loss": 0.0152, "step": 4690 }, { "epoch": 41.88, "grad_norm": 0.04414398968219757, "learning_rate": 5.358823529411764e-05, "loss": 0.0143, "step": 4691 }, { "epoch": 41.89, "grad_norm": 0.09072451293468475, "learning_rate": 5.352941176470588e-05, "loss": 0.0204, "step": 4692 }, { "epoch": 41.9, "grad_norm": 0.0364651121199131, "learning_rate": 5.347058823529411e-05, "loss": 0.0171, "step": 4693 }, { "epoch": 41.91, "grad_norm": 0.03598048537969589, "learning_rate": 5.341176470588235e-05, "loss": 0.016, "step": 4694 }, { "epoch": 41.92, "grad_norm": 0.039510637521743774, "learning_rate": 5.335294117647059e-05, "loss": 0.0166, "step": 4695 }, { "epoch": 41.93, "grad_norm": 0.04128700867295265, "learning_rate": 5.329411764705882e-05, "loss": 0.0155, "step": 4696 }, { "epoch": 41.94, "grad_norm": 0.04875123128294945, "learning_rate": 5.3235294117647054e-05, "loss": 0.016, "step": 4697 }, { "epoch": 41.95, "grad_norm": 0.058702390640974045, "learning_rate": 5.3176470588235284e-05, "loss": 0.0152, "step": 4698 }, { "epoch": 41.96, "grad_norm": 0.07510247826576233, "learning_rate": 5.311764705882353e-05, "loss": 0.0222, "step": 4699 }, { "epoch": 41.96, "grad_norm": 0.04359116405248642, "learning_rate": 5.305882352941176e-05, "loss": 0.0171, "step": 4700 }, { "epoch": 41.96, "eval_cer": 0.030570557704088726, "eval_loss": 0.21361805498600006, "eval_runtime": 22.1312, "eval_samples_per_second": 119.379, "eval_steps_per_second": 1.898, "eval_wer": 0.09936533121777072, "step": 4700 }, { "epoch": 41.97, "grad_norm": 0.04909026250243187, "learning_rate": 5.2999999999999994e-05, "loss": 0.0162, "step": 4701 }, { "epoch": 41.98, "grad_norm": 0.05668247863650322, "learning_rate": 5.294117647058824e-05, "loss": 0.0208, "step": 4702 }, { "epoch": 41.99, "grad_norm": 0.06715834885835648, "learning_rate": 5.288235294117647e-05, "loss": 0.0181, "step": 4703 }, { "epoch": 42.0, "grad_norm": 0.06368158757686615, "learning_rate": 5.2823529411764704e-05, "loss": 0.0174, "step": 4704 }, { "epoch": 42.01, "grad_norm": 0.04115816578269005, "learning_rate": 5.2764705882352934e-05, "loss": 0.0174, "step": 4705 }, { "epoch": 42.02, "grad_norm": 0.04423500970005989, "learning_rate": 5.270588235294117e-05, "loss": 0.0163, "step": 4706 }, { "epoch": 42.03, "grad_norm": 0.038058824837207794, "learning_rate": 5.26470588235294e-05, "loss": 0.0152, "step": 4707 }, { "epoch": 42.04, "grad_norm": 0.04965026304125786, "learning_rate": 5.2588235294117644e-05, "loss": 0.0188, "step": 4708 }, { "epoch": 42.04, "grad_norm": 0.05564122274518013, "learning_rate": 5.252941176470588e-05, "loss": 0.0169, "step": 4709 }, { "epoch": 42.05, "grad_norm": 0.0768323689699173, "learning_rate": 5.247058823529411e-05, "loss": 0.0192, "step": 4710 }, { "epoch": 42.06, "grad_norm": 0.048551928251981735, "learning_rate": 5.241176470588235e-05, "loss": 0.0168, "step": 4711 }, { "epoch": 42.07, "grad_norm": 0.032121993601322174, "learning_rate": 5.2352941176470584e-05, "loss": 0.0131, "step": 4712 }, { "epoch": 42.08, "grad_norm": 0.03721311315894127, "learning_rate": 5.229411764705882e-05, "loss": 0.0173, "step": 4713 }, { "epoch": 42.09, "grad_norm": 0.041603635996580124, "learning_rate": 5.223529411764705e-05, "loss": 0.0176, "step": 4714 }, { "epoch": 42.1, "grad_norm": 0.04141776263713837, "learning_rate": 5.217647058823529e-05, "loss": 0.0174, "step": 4715 }, { "epoch": 42.11, "grad_norm": 0.04728950187563896, "learning_rate": 5.211764705882353e-05, "loss": 0.0156, "step": 4716 }, { "epoch": 42.12, "grad_norm": 0.09710820019245148, "learning_rate": 5.205882352941176e-05, "loss": 0.0243, "step": 4717 }, { "epoch": 42.12, "grad_norm": 0.04254890978336334, "learning_rate": 5.2e-05, "loss": 0.0172, "step": 4718 }, { "epoch": 42.13, "grad_norm": 0.04165995493531227, "learning_rate": 5.194117647058823e-05, "loss": 0.0182, "step": 4719 }, { "epoch": 42.14, "grad_norm": 0.03956056386232376, "learning_rate": 5.188235294117646e-05, "loss": 0.0165, "step": 4720 }, { "epoch": 42.15, "grad_norm": 0.041155051440000534, "learning_rate": 5.182352941176471e-05, "loss": 0.0187, "step": 4721 }, { "epoch": 42.16, "grad_norm": 0.06585805863142014, "learning_rate": 5.176470588235294e-05, "loss": 0.0187, "step": 4722 }, { "epoch": 42.17, "grad_norm": 0.07872984558343887, "learning_rate": 5.170588235294117e-05, "loss": 0.0183, "step": 4723 }, { "epoch": 42.18, "grad_norm": 0.03979578614234924, "learning_rate": 5.16470588235294e-05, "loss": 0.0163, "step": 4724 }, { "epoch": 42.19, "grad_norm": 0.04740254208445549, "learning_rate": 5.1588235294117647e-05, "loss": 0.0172, "step": 4725 }, { "epoch": 42.2, "grad_norm": 0.03644009679555893, "learning_rate": 5.1529411764705876e-05, "loss": 0.0157, "step": 4726 }, { "epoch": 42.21, "grad_norm": 0.044104836881160736, "learning_rate": 5.147058823529411e-05, "loss": 0.0174, "step": 4727 }, { "epoch": 42.21, "grad_norm": 0.05358245223760605, "learning_rate": 5.141176470588235e-05, "loss": 0.0183, "step": 4728 }, { "epoch": 42.22, "grad_norm": 0.08560299128293991, "learning_rate": 5.135294117647058e-05, "loss": 0.0216, "step": 4729 }, { "epoch": 42.23, "grad_norm": 0.04160348325967789, "learning_rate": 5.129411764705882e-05, "loss": 0.016, "step": 4730 }, { "epoch": 42.24, "grad_norm": 0.04579866677522659, "learning_rate": 5.123529411764705e-05, "loss": 0.0182, "step": 4731 }, { "epoch": 42.25, "grad_norm": 0.05497223883867264, "learning_rate": 5.117647058823529e-05, "loss": 0.02, "step": 4732 }, { "epoch": 42.26, "grad_norm": 0.04410240799188614, "learning_rate": 5.111764705882352e-05, "loss": 0.0156, "step": 4733 }, { "epoch": 42.27, "grad_norm": 0.05195372551679611, "learning_rate": 5.105882352941176e-05, "loss": 0.0162, "step": 4734 }, { "epoch": 42.28, "grad_norm": 0.07460621744394302, "learning_rate": 5.1e-05, "loss": 0.0166, "step": 4735 }, { "epoch": 42.29, "grad_norm": 0.08454734832048416, "learning_rate": 5.094117647058823e-05, "loss": 0.0207, "step": 4736 }, { "epoch": 42.29, "grad_norm": 0.039156123995780945, "learning_rate": 5.0882352941176466e-05, "loss": 0.0172, "step": 4737 }, { "epoch": 42.3, "grad_norm": 0.049701571464538574, "learning_rate": 5.0823529411764696e-05, "loss": 0.019, "step": 4738 }, { "epoch": 42.31, "grad_norm": 0.04055621474981308, "learning_rate": 5.076470588235294e-05, "loss": 0.0166, "step": 4739 }, { "epoch": 42.32, "grad_norm": 0.04590931907296181, "learning_rate": 5.070588235294117e-05, "loss": 0.0172, "step": 4740 }, { "epoch": 42.33, "grad_norm": 0.06520258635282516, "learning_rate": 5.0647058823529406e-05, "loss": 0.0182, "step": 4741 }, { "epoch": 42.34, "grad_norm": 0.15398338437080383, "learning_rate": 5.058823529411765e-05, "loss": 0.0212, "step": 4742 }, { "epoch": 42.35, "grad_norm": 0.03619791939854622, "learning_rate": 5.052941176470588e-05, "loss": 0.0162, "step": 4743 }, { "epoch": 42.36, "grad_norm": 0.04270081967115402, "learning_rate": 5.0470588235294116e-05, "loss": 0.0184, "step": 4744 }, { "epoch": 42.37, "grad_norm": 0.03663381189107895, "learning_rate": 5.0411764705882346e-05, "loss": 0.0147, "step": 4745 }, { "epoch": 42.38, "grad_norm": 0.04784776642918587, "learning_rate": 5.035294117647058e-05, "loss": 0.0182, "step": 4746 }, { "epoch": 42.38, "grad_norm": 0.0638587549328804, "learning_rate": 5.029411764705881e-05, "loss": 0.0204, "step": 4747 }, { "epoch": 42.39, "grad_norm": 0.10195545107126236, "learning_rate": 5.0235294117647056e-05, "loss": 0.0248, "step": 4748 }, { "epoch": 42.4, "grad_norm": 0.035146091133356094, "learning_rate": 5.017647058823529e-05, "loss": 0.0155, "step": 4749 }, { "epoch": 42.41, "grad_norm": 0.04046854376792908, "learning_rate": 5.011764705882352e-05, "loss": 0.0182, "step": 4750 }, { "epoch": 42.42, "grad_norm": 0.03912757709622383, "learning_rate": 5.005882352941176e-05, "loss": 0.0129, "step": 4751 }, { "epoch": 42.43, "grad_norm": 0.04403378441929817, "learning_rate": 4.9999999999999996e-05, "loss": 0.0156, "step": 4752 }, { "epoch": 42.44, "grad_norm": 0.04925810173153877, "learning_rate": 4.994117647058823e-05, "loss": 0.0173, "step": 4753 }, { "epoch": 42.45, "grad_norm": 0.10861742496490479, "learning_rate": 4.988235294117646e-05, "loss": 0.0251, "step": 4754 }, { "epoch": 42.46, "grad_norm": 0.04123973101377487, "learning_rate": 4.98235294117647e-05, "loss": 0.0147, "step": 4755 }, { "epoch": 42.46, "grad_norm": 0.03818226233124733, "learning_rate": 4.976470588235294e-05, "loss": 0.0146, "step": 4756 }, { "epoch": 42.47, "grad_norm": 0.03866466134786606, "learning_rate": 4.970588235294117e-05, "loss": 0.0151, "step": 4757 }, { "epoch": 42.48, "grad_norm": 0.042279865592718124, "learning_rate": 4.964705882352941e-05, "loss": 0.0154, "step": 4758 }, { "epoch": 42.49, "grad_norm": 0.04880718141794205, "learning_rate": 4.958823529411764e-05, "loss": 0.0151, "step": 4759 }, { "epoch": 42.5, "grad_norm": 0.05486907437443733, "learning_rate": 4.9529411764705875e-05, "loss": 0.0171, "step": 4760 }, { "epoch": 42.51, "grad_norm": 0.06372758746147156, "learning_rate": 4.947058823529412e-05, "loss": 0.0196, "step": 4761 }, { "epoch": 42.52, "grad_norm": 0.04401450976729393, "learning_rate": 4.941176470588235e-05, "loss": 0.0183, "step": 4762 }, { "epoch": 42.53, "grad_norm": 0.042200129479169846, "learning_rate": 4.9352941176470585e-05, "loss": 0.0159, "step": 4763 }, { "epoch": 42.54, "grad_norm": 0.04643147811293602, "learning_rate": 4.9294117647058815e-05, "loss": 0.015, "step": 4764 }, { "epoch": 42.54, "grad_norm": 0.042301055043935776, "learning_rate": 4.923529411764706e-05, "loss": 0.0163, "step": 4765 }, { "epoch": 42.55, "grad_norm": 0.05764590576291084, "learning_rate": 4.917647058823529e-05, "loss": 0.0191, "step": 4766 }, { "epoch": 42.56, "grad_norm": 0.07639754563570023, "learning_rate": 4.9117647058823525e-05, "loss": 0.0232, "step": 4767 }, { "epoch": 42.57, "grad_norm": 0.050897639244794846, "learning_rate": 4.905882352941176e-05, "loss": 0.0154, "step": 4768 }, { "epoch": 42.58, "grad_norm": 0.04394242540001869, "learning_rate": 4.899999999999999e-05, "loss": 0.0177, "step": 4769 }, { "epoch": 42.59, "grad_norm": 0.03962036967277527, "learning_rate": 4.8941176470588235e-05, "loss": 0.0166, "step": 4770 }, { "epoch": 42.6, "grad_norm": 0.04944916069507599, "learning_rate": 4.8882352941176465e-05, "loss": 0.0206, "step": 4771 }, { "epoch": 42.61, "grad_norm": 0.04640146717429161, "learning_rate": 4.88235294117647e-05, "loss": 0.0132, "step": 4772 }, { "epoch": 42.62, "grad_norm": 0.08091548085212708, "learning_rate": 4.876470588235293e-05, "loss": 0.0202, "step": 4773 }, { "epoch": 42.62, "grad_norm": 0.04228280484676361, "learning_rate": 4.8705882352941175e-05, "loss": 0.0171, "step": 4774 }, { "epoch": 42.63, "grad_norm": 0.04076916724443436, "learning_rate": 4.864705882352941e-05, "loss": 0.016, "step": 4775 }, { "epoch": 42.64, "grad_norm": 0.047374263405799866, "learning_rate": 4.858823529411764e-05, "loss": 0.0169, "step": 4776 }, { "epoch": 42.65, "grad_norm": 0.047457072883844376, "learning_rate": 4.852941176470588e-05, "loss": 0.0149, "step": 4777 }, { "epoch": 42.66, "grad_norm": 0.04585172235965729, "learning_rate": 4.847058823529411e-05, "loss": 0.019, "step": 4778 }, { "epoch": 42.67, "grad_norm": 0.09318045526742935, "learning_rate": 4.841176470588235e-05, "loss": 0.0262, "step": 4779 }, { "epoch": 42.68, "grad_norm": 0.03627597540616989, "learning_rate": 4.835294117647058e-05, "loss": 0.0147, "step": 4780 }, { "epoch": 42.69, "grad_norm": 0.04001631587743759, "learning_rate": 4.829411764705882e-05, "loss": 0.0181, "step": 4781 }, { "epoch": 42.7, "grad_norm": 0.04767382889986038, "learning_rate": 4.823529411764706e-05, "loss": 0.0166, "step": 4782 }, { "epoch": 42.71, "grad_norm": 0.04053715243935585, "learning_rate": 4.817647058823529e-05, "loss": 0.0164, "step": 4783 }, { "epoch": 42.71, "grad_norm": 0.04655947536230087, "learning_rate": 4.811764705882353e-05, "loss": 0.0163, "step": 4784 }, { "epoch": 42.72, "grad_norm": 0.07540638744831085, "learning_rate": 4.805882352941176e-05, "loss": 0.0162, "step": 4785 }, { "epoch": 42.73, "grad_norm": 0.04849407076835632, "learning_rate": 4.7999999999999994e-05, "loss": 0.016, "step": 4786 }, { "epoch": 42.74, "grad_norm": 0.04162999987602234, "learning_rate": 4.7941176470588224e-05, "loss": 0.0163, "step": 4787 }, { "epoch": 42.75, "grad_norm": 0.04699215292930603, "learning_rate": 4.788235294117647e-05, "loss": 0.018, "step": 4788 }, { "epoch": 42.76, "grad_norm": 0.03915581852197647, "learning_rate": 4.7823529411764704e-05, "loss": 0.0142, "step": 4789 }, { "epoch": 42.77, "grad_norm": 0.051829997450113297, "learning_rate": 4.7764705882352934e-05, "loss": 0.0181, "step": 4790 }, { "epoch": 42.78, "grad_norm": 0.047189947217702866, "learning_rate": 4.770588235294118e-05, "loss": 0.0171, "step": 4791 }, { "epoch": 42.79, "grad_norm": 0.05920446664094925, "learning_rate": 4.764705882352941e-05, "loss": 0.0159, "step": 4792 }, { "epoch": 42.79, "grad_norm": 0.03865901008248329, "learning_rate": 4.7588235294117644e-05, "loss": 0.0179, "step": 4793 }, { "epoch": 42.8, "grad_norm": 0.043928008526563644, "learning_rate": 4.7529411764705874e-05, "loss": 0.0186, "step": 4794 }, { "epoch": 42.81, "grad_norm": 0.04029054567217827, "learning_rate": 4.747058823529411e-05, "loss": 0.0181, "step": 4795 }, { "epoch": 42.82, "grad_norm": 0.04039912298321724, "learning_rate": 4.7411764705882354e-05, "loss": 0.0148, "step": 4796 }, { "epoch": 42.83, "grad_norm": 0.05792859196662903, "learning_rate": 4.7352941176470584e-05, "loss": 0.0168, "step": 4797 }, { "epoch": 42.84, "grad_norm": 0.08704151958227158, "learning_rate": 4.729411764705882e-05, "loss": 0.0213, "step": 4798 }, { "epoch": 42.85, "grad_norm": 0.03789398819208145, "learning_rate": 4.723529411764705e-05, "loss": 0.0172, "step": 4799 }, { "epoch": 42.86, "grad_norm": 0.04100920259952545, "learning_rate": 4.717647058823529e-05, "loss": 0.0171, "step": 4800 }, { "epoch": 42.86, "eval_cer": 0.031784510265979636, "eval_loss": 0.20623566210269928, "eval_runtime": 22.326, "eval_samples_per_second": 118.337, "eval_steps_per_second": 1.881, "eval_wer": 0.1006743355811186, "step": 4800 }, { "epoch": 42.87, "grad_norm": 0.043528687208890915, "learning_rate": 4.711764705882353e-05, "loss": 0.0186, "step": 4801 }, { "epoch": 42.88, "grad_norm": 0.03754991665482521, "learning_rate": 4.705882352941176e-05, "loss": 0.0147, "step": 4802 }, { "epoch": 42.88, "grad_norm": 0.038673486560583115, "learning_rate": 4.7e-05, "loss": 0.0156, "step": 4803 }, { "epoch": 42.89, "grad_norm": 0.11040922999382019, "learning_rate": 4.694117647058823e-05, "loss": 0.027, "step": 4804 }, { "epoch": 42.9, "grad_norm": 0.03630939498543739, "learning_rate": 4.688235294117647e-05, "loss": 0.0167, "step": 4805 }, { "epoch": 42.91, "grad_norm": 0.042319733649492264, "learning_rate": 4.68235294117647e-05, "loss": 0.0184, "step": 4806 }, { "epoch": 42.92, "grad_norm": 0.03956311196088791, "learning_rate": 4.676470588235294e-05, "loss": 0.017, "step": 4807 }, { "epoch": 42.93, "grad_norm": 0.03828151151537895, "learning_rate": 4.6705882352941174e-05, "loss": 0.0143, "step": 4808 }, { "epoch": 42.94, "grad_norm": 0.048234179615974426, "learning_rate": 4.6647058823529404e-05, "loss": 0.0169, "step": 4809 }, { "epoch": 42.95, "grad_norm": 0.06010056287050247, "learning_rate": 4.658823529411765e-05, "loss": 0.0213, "step": 4810 }, { "epoch": 42.96, "grad_norm": 0.06197648495435715, "learning_rate": 4.652941176470588e-05, "loss": 0.018, "step": 4811 }, { "epoch": 42.96, "grad_norm": 0.04620685800909996, "learning_rate": 4.6470588235294114e-05, "loss": 0.0183, "step": 4812 }, { "epoch": 42.97, "grad_norm": 0.035234078764915466, "learning_rate": 4.6411764705882343e-05, "loss": 0.0158, "step": 4813 }, { "epoch": 42.98, "grad_norm": 0.04321025311946869, "learning_rate": 4.635294117647059e-05, "loss": 0.0147, "step": 4814 }, { "epoch": 42.99, "grad_norm": 0.04962904006242752, "learning_rate": 4.6294117647058824e-05, "loss": 0.0139, "step": 4815 }, { "epoch": 43.0, "grad_norm": 0.08016182482242584, "learning_rate": 4.6235294117647053e-05, "loss": 0.0199, "step": 4816 }, { "epoch": 43.01, "grad_norm": 0.03688027709722519, "learning_rate": 4.617647058823529e-05, "loss": 0.015, "step": 4817 }, { "epoch": 43.02, "grad_norm": 0.04421921446919441, "learning_rate": 4.611764705882352e-05, "loss": 0.0165, "step": 4818 }, { "epoch": 43.03, "grad_norm": 0.03546473756432533, "learning_rate": 4.605882352941176e-05, "loss": 0.0121, "step": 4819 }, { "epoch": 43.04, "grad_norm": 0.03673641011118889, "learning_rate": 4.599999999999999e-05, "loss": 0.0129, "step": 4820 }, { "epoch": 43.04, "grad_norm": 0.04233020171523094, "learning_rate": 4.594117647058823e-05, "loss": 0.0147, "step": 4821 }, { "epoch": 43.05, "grad_norm": 0.08312328159809113, "learning_rate": 4.588235294117647e-05, "loss": 0.0176, "step": 4822 }, { "epoch": 43.06, "grad_norm": 0.06797804683446884, "learning_rate": 4.58235294117647e-05, "loss": 0.0193, "step": 4823 }, { "epoch": 43.07, "grad_norm": 0.03830239549279213, "learning_rate": 4.576470588235294e-05, "loss": 0.0154, "step": 4824 }, { "epoch": 43.08, "grad_norm": 0.04687207564711571, "learning_rate": 4.570588235294117e-05, "loss": 0.0163, "step": 4825 }, { "epoch": 43.09, "grad_norm": 0.041979048401117325, "learning_rate": 4.5647058823529406e-05, "loss": 0.0149, "step": 4826 }, { "epoch": 43.1, "grad_norm": 0.04596380516886711, "learning_rate": 4.5588235294117636e-05, "loss": 0.017, "step": 4827 }, { "epoch": 43.11, "grad_norm": 0.06350573152303696, "learning_rate": 4.552941176470588e-05, "loss": 0.0195, "step": 4828 }, { "epoch": 43.12, "grad_norm": 0.06950381398200989, "learning_rate": 4.5470588235294116e-05, "loss": 0.0202, "step": 4829 }, { "epoch": 43.12, "grad_norm": 0.04189309477806091, "learning_rate": 4.5411764705882346e-05, "loss": 0.0155, "step": 4830 }, { "epoch": 43.13, "grad_norm": 0.039666492491960526, "learning_rate": 4.535294117647059e-05, "loss": 0.0162, "step": 4831 }, { "epoch": 43.14, "grad_norm": 0.03722996264696121, "learning_rate": 4.529411764705882e-05, "loss": 0.0145, "step": 4832 }, { "epoch": 43.15, "grad_norm": 0.04492158815264702, "learning_rate": 4.5235294117647056e-05, "loss": 0.0173, "step": 4833 }, { "epoch": 43.16, "grad_norm": 0.05497761815786362, "learning_rate": 4.5176470588235286e-05, "loss": 0.0156, "step": 4834 }, { "epoch": 43.17, "grad_norm": 0.10796255618333817, "learning_rate": 4.511764705882352e-05, "loss": 0.0221, "step": 4835 }, { "epoch": 43.18, "grad_norm": 0.046445827931165695, "learning_rate": 4.5058823529411766e-05, "loss": 0.0186, "step": 4836 }, { "epoch": 43.19, "grad_norm": 0.03872071951627731, "learning_rate": 4.4999999999999996e-05, "loss": 0.0165, "step": 4837 }, { "epoch": 43.2, "grad_norm": 0.04532414674758911, "learning_rate": 4.494117647058823e-05, "loss": 0.016, "step": 4838 }, { "epoch": 43.21, "grad_norm": 0.04898528382182121, "learning_rate": 4.488235294117646e-05, "loss": 0.0188, "step": 4839 }, { "epoch": 43.21, "grad_norm": 0.0423404835164547, "learning_rate": 4.48235294117647e-05, "loss": 0.0133, "step": 4840 }, { "epoch": 43.22, "grad_norm": 0.09090810269117355, "learning_rate": 4.4764705882352936e-05, "loss": 0.0183, "step": 4841 }, { "epoch": 43.23, "grad_norm": 0.03748342767357826, "learning_rate": 4.470588235294117e-05, "loss": 0.0158, "step": 4842 }, { "epoch": 43.24, "grad_norm": 0.037210963666439056, "learning_rate": 4.464705882352941e-05, "loss": 0.0177, "step": 4843 }, { "epoch": 43.25, "grad_norm": 0.039508573710918427, "learning_rate": 4.458823529411764e-05, "loss": 0.0154, "step": 4844 }, { "epoch": 43.26, "grad_norm": 0.042392343282699585, "learning_rate": 4.452941176470588e-05, "loss": 0.0134, "step": 4845 }, { "epoch": 43.27, "grad_norm": 0.046576011925935745, "learning_rate": 4.447058823529411e-05, "loss": 0.0158, "step": 4846 }, { "epoch": 43.28, "grad_norm": 0.07849511504173279, "learning_rate": 4.441176470588235e-05, "loss": 0.0201, "step": 4847 }, { "epoch": 43.29, "grad_norm": 0.06206928938627243, "learning_rate": 4.4352941176470586e-05, "loss": 0.0216, "step": 4848 }, { "epoch": 43.29, "grad_norm": 0.03816213831305504, "learning_rate": 4.4294117647058816e-05, "loss": 0.0157, "step": 4849 }, { "epoch": 43.3, "grad_norm": 0.036603085696697235, "learning_rate": 4.423529411764706e-05, "loss": 0.0136, "step": 4850 }, { "epoch": 43.31, "grad_norm": 0.039472538977861404, "learning_rate": 4.417647058823529e-05, "loss": 0.0156, "step": 4851 }, { "epoch": 43.32, "grad_norm": 0.04416007548570633, "learning_rate": 4.4117647058823526e-05, "loss": 0.0174, "step": 4852 }, { "epoch": 43.33, "grad_norm": 0.045166872441768646, "learning_rate": 4.4058823529411755e-05, "loss": 0.0154, "step": 4853 }, { "epoch": 43.34, "grad_norm": 0.06845077872276306, "learning_rate": 4.4e-05, "loss": 0.0179, "step": 4854 }, { "epoch": 43.35, "grad_norm": 0.04094138368964195, "learning_rate": 4.3941176470588236e-05, "loss": 0.0167, "step": 4855 }, { "epoch": 43.36, "grad_norm": 0.041409075260162354, "learning_rate": 4.3882352941176465e-05, "loss": 0.0166, "step": 4856 }, { "epoch": 43.37, "grad_norm": 0.038814477622509, "learning_rate": 4.38235294117647e-05, "loss": 0.0158, "step": 4857 }, { "epoch": 43.38, "grad_norm": 0.04638247936964035, "learning_rate": 4.376470588235293e-05, "loss": 0.0167, "step": 4858 }, { "epoch": 43.38, "grad_norm": 0.05674579739570618, "learning_rate": 4.3705882352941175e-05, "loss": 0.0154, "step": 4859 }, { "epoch": 43.39, "grad_norm": 0.09118795394897461, "learning_rate": 4.3647058823529405e-05, "loss": 0.0194, "step": 4860 }, { "epoch": 43.4, "grad_norm": 0.04603675752878189, "learning_rate": 4.358823529411764e-05, "loss": 0.0185, "step": 4861 }, { "epoch": 43.41, "grad_norm": 0.043492477387189865, "learning_rate": 4.3529411764705885e-05, "loss": 0.0182, "step": 4862 }, { "epoch": 43.42, "grad_norm": 0.03807063400745392, "learning_rate": 4.3470588235294115e-05, "loss": 0.0146, "step": 4863 }, { "epoch": 43.43, "grad_norm": 0.04093649610877037, "learning_rate": 4.341176470588235e-05, "loss": 0.0172, "step": 4864 }, { "epoch": 43.44, "grad_norm": 0.04611308127641678, "learning_rate": 4.335294117647058e-05, "loss": 0.0156, "step": 4865 }, { "epoch": 43.45, "grad_norm": 0.09458320587873459, "learning_rate": 4.329411764705882e-05, "loss": 0.0227, "step": 4866 }, { "epoch": 43.46, "grad_norm": 0.043307170271873474, "learning_rate": 4.323529411764705e-05, "loss": 0.0149, "step": 4867 }, { "epoch": 43.46, "grad_norm": 0.04051913321018219, "learning_rate": 4.317647058823529e-05, "loss": 0.0143, "step": 4868 }, { "epoch": 43.47, "grad_norm": 0.04021545872092247, "learning_rate": 4.311764705882353e-05, "loss": 0.0156, "step": 4869 }, { "epoch": 43.48, "grad_norm": 0.046592261642217636, "learning_rate": 4.305882352941176e-05, "loss": 0.0179, "step": 4870 }, { "epoch": 43.49, "grad_norm": 0.049437109380960464, "learning_rate": 4.3e-05, "loss": 0.0164, "step": 4871 }, { "epoch": 43.5, "grad_norm": 0.05881152302026749, "learning_rate": 4.294117647058823e-05, "loss": 0.0174, "step": 4872 }, { "epoch": 43.51, "grad_norm": 0.09323722869157791, "learning_rate": 4.288235294117647e-05, "loss": 0.019, "step": 4873 }, { "epoch": 43.52, "grad_norm": 0.03924582153558731, "learning_rate": 4.28235294117647e-05, "loss": 0.0141, "step": 4874 }, { "epoch": 43.53, "grad_norm": 0.03648946061730385, "learning_rate": 4.2764705882352935e-05, "loss": 0.0137, "step": 4875 }, { "epoch": 43.54, "grad_norm": 0.04003776237368584, "learning_rate": 4.270588235294118e-05, "loss": 0.0168, "step": 4876 }, { "epoch": 43.54, "grad_norm": 0.05596655234694481, "learning_rate": 4.264705882352941e-05, "loss": 0.014, "step": 4877 }, { "epoch": 43.55, "grad_norm": 0.047849416732788086, "learning_rate": 4.2588235294117645e-05, "loss": 0.0113, "step": 4878 }, { "epoch": 43.56, "grad_norm": 0.08403313905000687, "learning_rate": 4.2529411764705875e-05, "loss": 0.0186, "step": 4879 }, { "epoch": 43.57, "grad_norm": 0.042822834104299545, "learning_rate": 4.247058823529411e-05, "loss": 0.0182, "step": 4880 }, { "epoch": 43.58, "grad_norm": 0.04393649473786354, "learning_rate": 4.241176470588235e-05, "loss": 0.0157, "step": 4881 }, { "epoch": 43.59, "grad_norm": 0.03740609437227249, "learning_rate": 4.2352941176470585e-05, "loss": 0.0148, "step": 4882 }, { "epoch": 43.6, "grad_norm": 0.049269191920757294, "learning_rate": 4.229411764705882e-05, "loss": 0.0185, "step": 4883 }, { "epoch": 43.61, "grad_norm": 0.054355815052986145, "learning_rate": 4.223529411764705e-05, "loss": 0.0173, "step": 4884 }, { "epoch": 43.62, "grad_norm": 0.09597691148519516, "learning_rate": 4.2176470588235294e-05, "loss": 0.0192, "step": 4885 }, { "epoch": 43.62, "grad_norm": 0.04877206310629845, "learning_rate": 4.2117647058823524e-05, "loss": 0.02, "step": 4886 }, { "epoch": 43.63, "grad_norm": 0.03655581921339035, "learning_rate": 4.205882352941176e-05, "loss": 0.0148, "step": 4887 }, { "epoch": 43.64, "grad_norm": 0.043534889817237854, "learning_rate": 4.2e-05, "loss": 0.0144, "step": 4888 }, { "epoch": 43.65, "grad_norm": 0.044560547918081284, "learning_rate": 4.194117647058823e-05, "loss": 0.0157, "step": 4889 }, { "epoch": 43.66, "grad_norm": 0.04925515875220299, "learning_rate": 4.188235294117647e-05, "loss": 0.0144, "step": 4890 }, { "epoch": 43.67, "grad_norm": 0.08285937458276749, "learning_rate": 4.18235294117647e-05, "loss": 0.0222, "step": 4891 }, { "epoch": 43.68, "grad_norm": 0.0362589992582798, "learning_rate": 4.176470588235294e-05, "loss": 0.0131, "step": 4892 }, { "epoch": 43.69, "grad_norm": 0.032939568161964417, "learning_rate": 4.170588235294117e-05, "loss": 0.0131, "step": 4893 }, { "epoch": 43.7, "grad_norm": 0.043853238224983215, "learning_rate": 4.164705882352941e-05, "loss": 0.0155, "step": 4894 }, { "epoch": 43.71, "grad_norm": 0.04488749802112579, "learning_rate": 4.158823529411765e-05, "loss": 0.0143, "step": 4895 }, { "epoch": 43.71, "grad_norm": 0.04842665046453476, "learning_rate": 4.152941176470588e-05, "loss": 0.0161, "step": 4896 }, { "epoch": 43.72, "grad_norm": 0.05583030730485916, "learning_rate": 4.1470588235294114e-05, "loss": 0.0146, "step": 4897 }, { "epoch": 43.73, "grad_norm": 0.07999975234270096, "learning_rate": 4.1411764705882344e-05, "loss": 0.0202, "step": 4898 }, { "epoch": 43.74, "grad_norm": 0.044483449310064316, "learning_rate": 4.135294117647059e-05, "loss": 0.0181, "step": 4899 }, { "epoch": 43.75, "grad_norm": 0.04633469134569168, "learning_rate": 4.129411764705882e-05, "loss": 0.0161, "step": 4900 }, { "epoch": 43.75, "eval_cer": 0.03188222780966126, "eval_loss": 0.21013225615024567, "eval_runtime": 22.254, "eval_samples_per_second": 118.72, "eval_steps_per_second": 1.887, "eval_wer": 0.10134867116223721, "step": 4900 }, { "epoch": 43.76, "grad_norm": 0.05003685876727104, "learning_rate": 4.1235294117647054e-05, "loss": 0.0185, "step": 4901 }, { "epoch": 43.77, "grad_norm": 0.04347279667854309, "learning_rate": 4.11764705882353e-05, "loss": 0.0161, "step": 4902 }, { "epoch": 43.78, "grad_norm": 0.054564401507377625, "learning_rate": 4.111764705882353e-05, "loss": 0.0145, "step": 4903 }, { "epoch": 43.79, "grad_norm": 0.09958654642105103, "learning_rate": 4.1058823529411764e-05, "loss": 0.0222, "step": 4904 }, { "epoch": 43.79, "grad_norm": 0.03584211319684982, "learning_rate": 4.0999999999999994e-05, "loss": 0.0154, "step": 4905 }, { "epoch": 43.8, "grad_norm": 0.043052252382040024, "learning_rate": 4.094117647058823e-05, "loss": 0.0149, "step": 4906 }, { "epoch": 43.81, "grad_norm": 0.045404739677906036, "learning_rate": 4.088235294117646e-05, "loss": 0.0194, "step": 4907 }, { "epoch": 43.82, "grad_norm": 0.042859796434640884, "learning_rate": 4.0823529411764704e-05, "loss": 0.0147, "step": 4908 }, { "epoch": 43.83, "grad_norm": 0.05667326971888542, "learning_rate": 4.076470588235294e-05, "loss": 0.0144, "step": 4909 }, { "epoch": 43.84, "grad_norm": 0.0632072240114212, "learning_rate": 4.070588235294117e-05, "loss": 0.0172, "step": 4910 }, { "epoch": 43.85, "grad_norm": 0.041090261191129684, "learning_rate": 4.0647058823529414e-05, "loss": 0.0151, "step": 4911 }, { "epoch": 43.86, "grad_norm": 0.04376598820090294, "learning_rate": 4.0588235294117644e-05, "loss": 0.0171, "step": 4912 }, { "epoch": 43.87, "grad_norm": 0.039289943873882294, "learning_rate": 4.052941176470588e-05, "loss": 0.016, "step": 4913 }, { "epoch": 43.88, "grad_norm": 0.047562599182128906, "learning_rate": 4.047058823529411e-05, "loss": 0.0181, "step": 4914 }, { "epoch": 43.88, "grad_norm": 0.04658946394920349, "learning_rate": 4.041176470588235e-05, "loss": 0.0156, "step": 4915 }, { "epoch": 43.89, "grad_norm": 0.09022916853427887, "learning_rate": 4.035294117647059e-05, "loss": 0.0194, "step": 4916 }, { "epoch": 43.9, "grad_norm": 0.03939886763691902, "learning_rate": 4.029411764705882e-05, "loss": 0.0153, "step": 4917 }, { "epoch": 43.91, "grad_norm": 0.04202469810843468, "learning_rate": 4.023529411764706e-05, "loss": 0.0193, "step": 4918 }, { "epoch": 43.92, "grad_norm": 0.04549553617835045, "learning_rate": 4.0176470588235287e-05, "loss": 0.0179, "step": 4919 }, { "epoch": 43.93, "grad_norm": 0.04405608028173447, "learning_rate": 4.011764705882353e-05, "loss": 0.0174, "step": 4920 }, { "epoch": 43.94, "grad_norm": 0.03873012959957123, "learning_rate": 4.005882352941176e-05, "loss": 0.0119, "step": 4921 }, { "epoch": 43.95, "grad_norm": 0.06394640356302261, "learning_rate": 3.9999999999999996e-05, "loss": 0.0202, "step": 4922 }, { "epoch": 43.96, "grad_norm": 0.07013455033302307, "learning_rate": 3.994117647058823e-05, "loss": 0.0199, "step": 4923 }, { "epoch": 43.96, "grad_norm": 0.04394387826323509, "learning_rate": 3.988235294117646e-05, "loss": 0.0177, "step": 4924 }, { "epoch": 43.97, "grad_norm": 0.04156498610973358, "learning_rate": 3.9823529411764706e-05, "loss": 0.016, "step": 4925 }, { "epoch": 43.98, "grad_norm": 0.042225249111652374, "learning_rate": 3.9764705882352936e-05, "loss": 0.0147, "step": 4926 }, { "epoch": 43.99, "grad_norm": 0.049209680408239365, "learning_rate": 3.970588235294117e-05, "loss": 0.0159, "step": 4927 }, { "epoch": 44.0, "grad_norm": 0.0688699260354042, "learning_rate": 3.964705882352941e-05, "loss": 0.0183, "step": 4928 }, { "epoch": 44.01, "grad_norm": 0.04197148606181145, "learning_rate": 3.958823529411764e-05, "loss": 0.0165, "step": 4929 }, { "epoch": 44.02, "grad_norm": 0.03934997692704201, "learning_rate": 3.952941176470588e-05, "loss": 0.015, "step": 4930 }, { "epoch": 44.03, "grad_norm": 0.04739455133676529, "learning_rate": 3.947058823529411e-05, "loss": 0.0151, "step": 4931 }, { "epoch": 44.04, "grad_norm": 0.04214252158999443, "learning_rate": 3.941176470588235e-05, "loss": 0.0153, "step": 4932 }, { "epoch": 44.04, "grad_norm": 0.046974487602710724, "learning_rate": 3.935294117647058e-05, "loss": 0.0163, "step": 4933 }, { "epoch": 44.05, "grad_norm": 0.0665048211812973, "learning_rate": 3.929411764705882e-05, "loss": 0.0185, "step": 4934 }, { "epoch": 44.06, "grad_norm": 0.06398850679397583, "learning_rate": 3.923529411764706e-05, "loss": 0.019, "step": 4935 }, { "epoch": 44.07, "grad_norm": 0.040308427065610886, "learning_rate": 3.917647058823529e-05, "loss": 0.0188, "step": 4936 }, { "epoch": 44.08, "grad_norm": 0.0501236766576767, "learning_rate": 3.9117647058823526e-05, "loss": 0.0199, "step": 4937 }, { "epoch": 44.09, "grad_norm": 0.03701817989349365, "learning_rate": 3.9058823529411756e-05, "loss": 0.0127, "step": 4938 }, { "epoch": 44.1, "grad_norm": 0.04599235579371452, "learning_rate": 3.9e-05, "loss": 0.0167, "step": 4939 }, { "epoch": 44.11, "grad_norm": 0.0533258430659771, "learning_rate": 3.894117647058823e-05, "loss": 0.0146, "step": 4940 }, { "epoch": 44.12, "grad_norm": 0.06163892522454262, "learning_rate": 3.8882352941176466e-05, "loss": 0.0141, "step": 4941 }, { "epoch": 44.12, "grad_norm": 0.042363330721855164, "learning_rate": 3.882352941176471e-05, "loss": 0.0184, "step": 4942 }, { "epoch": 44.13, "grad_norm": 0.04036133736371994, "learning_rate": 3.876470588235294e-05, "loss": 0.0148, "step": 4943 }, { "epoch": 44.14, "grad_norm": 0.04974912479519844, "learning_rate": 3.8705882352941176e-05, "loss": 0.0186, "step": 4944 }, { "epoch": 44.15, "grad_norm": 0.04702618345618248, "learning_rate": 3.8647058823529406e-05, "loss": 0.0175, "step": 4945 }, { "epoch": 44.16, "grad_norm": 0.04483968764543533, "learning_rate": 3.858823529411764e-05, "loss": 0.0134, "step": 4946 }, { "epoch": 44.17, "grad_norm": 0.06495136022567749, "learning_rate": 3.852941176470587e-05, "loss": 0.0179, "step": 4947 }, { "epoch": 44.18, "grad_norm": 0.042255327105522156, "learning_rate": 3.8470588235294116e-05, "loss": 0.0188, "step": 4948 }, { "epoch": 44.19, "grad_norm": 0.04956101253628731, "learning_rate": 3.841176470588235e-05, "loss": 0.0183, "step": 4949 }, { "epoch": 44.2, "grad_norm": 0.04366575926542282, "learning_rate": 3.835294117647058e-05, "loss": 0.0163, "step": 4950 }, { "epoch": 44.21, "grad_norm": 0.047807127237319946, "learning_rate": 3.8294117647058826e-05, "loss": 0.0176, "step": 4951 }, { "epoch": 44.21, "grad_norm": 0.04627014324069023, "learning_rate": 3.8235294117647055e-05, "loss": 0.0122, "step": 4952 }, { "epoch": 44.22, "grad_norm": 0.07453977316617966, "learning_rate": 3.817647058823529e-05, "loss": 0.0172, "step": 4953 }, { "epoch": 44.23, "grad_norm": 0.03574393689632416, "learning_rate": 3.811764705882352e-05, "loss": 0.0135, "step": 4954 }, { "epoch": 44.24, "grad_norm": 0.04239301383495331, "learning_rate": 3.805882352941176e-05, "loss": 0.0141, "step": 4955 }, { "epoch": 44.25, "grad_norm": 0.04692602902650833, "learning_rate": 3.8e-05, "loss": 0.0164, "step": 4956 }, { "epoch": 44.26, "grad_norm": 0.04097558930516243, "learning_rate": 3.794117647058823e-05, "loss": 0.0155, "step": 4957 }, { "epoch": 44.27, "grad_norm": 0.04169277474284172, "learning_rate": 3.788235294117647e-05, "loss": 0.0166, "step": 4958 }, { "epoch": 44.28, "grad_norm": 0.07089413702487946, "learning_rate": 3.78235294117647e-05, "loss": 0.0184, "step": 4959 }, { "epoch": 44.29, "grad_norm": 0.05112564191222191, "learning_rate": 3.776470588235294e-05, "loss": 0.0184, "step": 4960 }, { "epoch": 44.29, "grad_norm": 0.037595827132463455, "learning_rate": 3.770588235294117e-05, "loss": 0.0152, "step": 4961 }, { "epoch": 44.3, "grad_norm": 0.03754653409123421, "learning_rate": 3.764705882352941e-05, "loss": 0.0143, "step": 4962 }, { "epoch": 44.31, "grad_norm": 0.03680499270558357, "learning_rate": 3.7588235294117645e-05, "loss": 0.0146, "step": 4963 }, { "epoch": 44.32, "grad_norm": 0.046565115451812744, "learning_rate": 3.7529411764705875e-05, "loss": 0.0142, "step": 4964 }, { "epoch": 44.33, "grad_norm": 0.06004823371767998, "learning_rate": 3.747058823529412e-05, "loss": 0.0163, "step": 4965 }, { "epoch": 44.34, "grad_norm": 0.08733204752206802, "learning_rate": 3.741176470588235e-05, "loss": 0.022, "step": 4966 }, { "epoch": 44.35, "grad_norm": 0.041879232972860336, "learning_rate": 3.7352941176470585e-05, "loss": 0.0191, "step": 4967 }, { "epoch": 44.36, "grad_norm": 0.037341900169849396, "learning_rate": 3.729411764705882e-05, "loss": 0.0152, "step": 4968 }, { "epoch": 44.37, "grad_norm": 0.04649944603443146, "learning_rate": 3.723529411764705e-05, "loss": 0.0177, "step": 4969 }, { "epoch": 44.38, "grad_norm": 0.04623737558722496, "learning_rate": 3.717647058823529e-05, "loss": 0.0156, "step": 4970 }, { "epoch": 44.38, "grad_norm": 0.046323150396347046, "learning_rate": 3.7117647058823525e-05, "loss": 0.0126, "step": 4971 }, { "epoch": 44.39, "grad_norm": 0.06873354315757751, "learning_rate": 3.705882352941176e-05, "loss": 0.0219, "step": 4972 }, { "epoch": 44.4, "grad_norm": 0.03552534431219101, "learning_rate": 3.7e-05, "loss": 0.0153, "step": 4973 }, { "epoch": 44.41, "grad_norm": 0.04047302156686783, "learning_rate": 3.6941176470588235e-05, "loss": 0.0158, "step": 4974 }, { "epoch": 44.42, "grad_norm": 0.03792682662606239, "learning_rate": 3.6882352941176465e-05, "loss": 0.0132, "step": 4975 }, { "epoch": 44.43, "grad_norm": 0.03600406274199486, "learning_rate": 3.68235294117647e-05, "loss": 0.0142, "step": 4976 }, { "epoch": 44.44, "grad_norm": 0.0528290793299675, "learning_rate": 3.676470588235294e-05, "loss": 0.0205, "step": 4977 }, { "epoch": 44.45, "grad_norm": 0.07228340953588486, "learning_rate": 3.6705882352941175e-05, "loss": 0.0177, "step": 4978 }, { "epoch": 44.46, "grad_norm": 0.04562455415725708, "learning_rate": 3.664705882352941e-05, "loss": 0.0182, "step": 4979 }, { "epoch": 44.46, "grad_norm": 0.04094189032912254, "learning_rate": 3.658823529411765e-05, "loss": 0.0172, "step": 4980 }, { "epoch": 44.47, "grad_norm": 0.04234864190220833, "learning_rate": 3.652941176470588e-05, "loss": 0.0167, "step": 4981 }, { "epoch": 44.48, "grad_norm": 0.04225002974271774, "learning_rate": 3.6470588235294114e-05, "loss": 0.0163, "step": 4982 }, { "epoch": 44.49, "grad_norm": 0.04998965933918953, "learning_rate": 3.641176470588235e-05, "loss": 0.0146, "step": 4983 }, { "epoch": 44.5, "grad_norm": 0.09114740043878555, "learning_rate": 3.635294117647058e-05, "loss": 0.0218, "step": 4984 }, { "epoch": 44.51, "grad_norm": 0.054906509816646576, "learning_rate": 3.6294117647058824e-05, "loss": 0.0141, "step": 4985 }, { "epoch": 44.52, "grad_norm": 0.04177793860435486, "learning_rate": 3.6235294117647054e-05, "loss": 0.0172, "step": 4986 }, { "epoch": 44.53, "grad_norm": 0.05029189586639404, "learning_rate": 3.617647058823529e-05, "loss": 0.0178, "step": 4987 }, { "epoch": 44.54, "grad_norm": 0.04333079233765602, "learning_rate": 3.611764705882353e-05, "loss": 0.0151, "step": 4988 }, { "epoch": 44.54, "grad_norm": 0.042823683470487595, "learning_rate": 3.605882352941176e-05, "loss": 0.015, "step": 4989 }, { "epoch": 44.55, "grad_norm": 0.03932007774710655, "learning_rate": 3.5999999999999994e-05, "loss": 0.0102, "step": 4990 }, { "epoch": 44.56, "grad_norm": 0.07560725510120392, "learning_rate": 3.594117647058823e-05, "loss": 0.0208, "step": 4991 }, { "epoch": 44.57, "grad_norm": 0.03645060583949089, "learning_rate": 3.588235294117647e-05, "loss": 0.015, "step": 4992 }, { "epoch": 44.58, "grad_norm": 0.048673465847969055, "learning_rate": 3.5823529411764704e-05, "loss": 0.0171, "step": 4993 }, { "epoch": 44.59, "grad_norm": 0.04412667080760002, "learning_rate": 3.576470588235294e-05, "loss": 0.0187, "step": 4994 }, { "epoch": 44.6, "grad_norm": 0.04287470504641533, "learning_rate": 3.570588235294117e-05, "loss": 0.0162, "step": 4995 }, { "epoch": 44.61, "grad_norm": 0.03945418447256088, "learning_rate": 3.564705882352941e-05, "loss": 0.0128, "step": 4996 }, { "epoch": 44.62, "grad_norm": 0.13640552759170532, "learning_rate": 3.5588235294117644e-05, "loss": 0.0217, "step": 4997 }, { "epoch": 44.62, "grad_norm": 0.04495558887720108, "learning_rate": 3.552941176470588e-05, "loss": 0.0153, "step": 4998 }, { "epoch": 44.63, "grad_norm": 0.037331100553274155, "learning_rate": 3.547058823529412e-05, "loss": 0.0161, "step": 4999 }, { "epoch": 44.64, "grad_norm": 0.03802134841680527, "learning_rate": 3.5411764705882354e-05, "loss": 0.0168, "step": 5000 }, { "epoch": 44.64, "eval_cer": 0.03064948341244696, "eval_loss": 0.2111145257949829, "eval_runtime": 22.1584, "eval_samples_per_second": 119.232, "eval_steps_per_second": 1.895, "eval_wer": 0.09847282824276081, "step": 5000 }, { "epoch": 44.65, "grad_norm": 0.03596673160791397, "learning_rate": 3.5352941176470584e-05, "loss": 0.0124, "step": 5001 }, { "epoch": 44.66, "grad_norm": 0.039762191474437714, "learning_rate": 3.529411764705882e-05, "loss": 0.013, "step": 5002 }, { "epoch": 44.67, "grad_norm": 0.08294077217578888, "learning_rate": 3.523529411764706e-05, "loss": 0.0176, "step": 5003 }, { "epoch": 44.68, "grad_norm": 0.04089253768324852, "learning_rate": 3.517647058823529e-05, "loss": 0.016, "step": 5004 }, { "epoch": 44.69, "grad_norm": 0.04697546362876892, "learning_rate": 3.511764705882353e-05, "loss": 0.0185, "step": 5005 }, { "epoch": 44.7, "grad_norm": 0.039550378918647766, "learning_rate": 3.505882352941176e-05, "loss": 0.0156, "step": 5006 }, { "epoch": 44.71, "grad_norm": 0.056655146181583405, "learning_rate": 3.5e-05, "loss": 0.0157, "step": 5007 }, { "epoch": 44.71, "grad_norm": 0.05132512003183365, "learning_rate": 3.4941176470588234e-05, "loss": 0.0177, "step": 5008 }, { "epoch": 44.72, "grad_norm": 0.0632912665605545, "learning_rate": 3.4882352941176463e-05, "loss": 0.0153, "step": 5009 }, { "epoch": 44.73, "grad_norm": 0.11135727167129517, "learning_rate": 3.48235294117647e-05, "loss": 0.0232, "step": 5010 }, { "epoch": 44.74, "grad_norm": 0.03697282448410988, "learning_rate": 3.476470588235294e-05, "loss": 0.0155, "step": 5011 }, { "epoch": 44.75, "grad_norm": 0.03992462903261185, "learning_rate": 3.4705882352941173e-05, "loss": 0.0153, "step": 5012 }, { "epoch": 44.76, "grad_norm": 0.04411754757165909, "learning_rate": 3.464705882352941e-05, "loss": 0.018, "step": 5013 }, { "epoch": 44.77, "grad_norm": 0.04339717701077461, "learning_rate": 3.458823529411765e-05, "loss": 0.0156, "step": 5014 }, { "epoch": 44.78, "grad_norm": 0.04332556948065758, "learning_rate": 3.452941176470588e-05, "loss": 0.0116, "step": 5015 }, { "epoch": 44.79, "grad_norm": 0.061066947877407074, "learning_rate": 3.447058823529411e-05, "loss": 0.0168, "step": 5016 }, { "epoch": 44.79, "grad_norm": 0.037729501724243164, "learning_rate": 3.441176470588235e-05, "loss": 0.0144, "step": 5017 }, { "epoch": 44.8, "grad_norm": 0.0411374568939209, "learning_rate": 3.4352941176470587e-05, "loss": 0.0174, "step": 5018 }, { "epoch": 44.81, "grad_norm": 0.04277394711971283, "learning_rate": 3.429411764705882e-05, "loss": 0.0155, "step": 5019 }, { "epoch": 44.82, "grad_norm": 0.03835034370422363, "learning_rate": 3.423529411764706e-05, "loss": 0.0134, "step": 5020 }, { "epoch": 44.83, "grad_norm": 0.0481446236371994, "learning_rate": 3.417647058823529e-05, "loss": 0.016, "step": 5021 }, { "epoch": 44.84, "grad_norm": 0.07615407556295395, "learning_rate": 3.4117647058823526e-05, "loss": 0.0232, "step": 5022 }, { "epoch": 44.85, "grad_norm": 0.040802374482154846, "learning_rate": 3.405882352941176e-05, "loss": 0.0155, "step": 5023 }, { "epoch": 44.86, "grad_norm": 0.04113549739122391, "learning_rate": 3.399999999999999e-05, "loss": 0.0163, "step": 5024 }, { "epoch": 44.87, "grad_norm": 0.039817169308662415, "learning_rate": 3.3941176470588236e-05, "loss": 0.0143, "step": 5025 }, { "epoch": 44.88, "grad_norm": 0.04265448823571205, "learning_rate": 3.3882352941176466e-05, "loss": 0.0161, "step": 5026 }, { "epoch": 44.88, "grad_norm": 0.05202582851052284, "learning_rate": 3.38235294117647e-05, "loss": 0.017, "step": 5027 }, { "epoch": 44.89, "grad_norm": 0.09392998367547989, "learning_rate": 3.376470588235294e-05, "loss": 0.0193, "step": 5028 }, { "epoch": 44.9, "grad_norm": 0.037095922976732254, "learning_rate": 3.3705882352941176e-05, "loss": 0.0149, "step": 5029 }, { "epoch": 44.91, "grad_norm": 0.04058606177568436, "learning_rate": 3.3647058823529406e-05, "loss": 0.0171, "step": 5030 }, { "epoch": 44.92, "grad_norm": 0.045299794524908066, "learning_rate": 3.358823529411764e-05, "loss": 0.0166, "step": 5031 }, { "epoch": 44.93, "grad_norm": 0.04051448032259941, "learning_rate": 3.352941176470588e-05, "loss": 0.0155, "step": 5032 }, { "epoch": 44.94, "grad_norm": 0.048243798315525055, "learning_rate": 3.3470588235294116e-05, "loss": 0.0169, "step": 5033 }, { "epoch": 44.95, "grad_norm": 0.07924217730760574, "learning_rate": 3.341176470588235e-05, "loss": 0.0157, "step": 5034 }, { "epoch": 44.96, "grad_norm": 0.06391122937202454, "learning_rate": 3.335294117647058e-05, "loss": 0.0173, "step": 5035 }, { "epoch": 44.96, "grad_norm": 0.039239589124917984, "learning_rate": 3.329411764705882e-05, "loss": 0.0164, "step": 5036 }, { "epoch": 44.97, "grad_norm": 0.04166821390390396, "learning_rate": 3.3235294117647056e-05, "loss": 0.0129, "step": 5037 }, { "epoch": 44.98, "grad_norm": 0.04653739556670189, "learning_rate": 3.317647058823529e-05, "loss": 0.0143, "step": 5038 }, { "epoch": 44.99, "grad_norm": 0.04925594851374626, "learning_rate": 3.311764705882353e-05, "loss": 0.0145, "step": 5039 }, { "epoch": 45.0, "grad_norm": 0.05927234888076782, "learning_rate": 3.3058823529411766e-05, "loss": 0.0199, "step": 5040 }, { "epoch": 45.01, "grad_norm": 0.037993013858795166, "learning_rate": 3.2999999999999996e-05, "loss": 0.0177, "step": 5041 }, { "epoch": 45.02, "grad_norm": 0.03294490650296211, "learning_rate": 3.294117647058823e-05, "loss": 0.0122, "step": 5042 }, { "epoch": 45.03, "grad_norm": 0.035598304122686386, "learning_rate": 3.288235294117647e-05, "loss": 0.0148, "step": 5043 }, { "epoch": 45.04, "grad_norm": 0.04196874424815178, "learning_rate": 3.28235294117647e-05, "loss": 0.0148, "step": 5044 }, { "epoch": 45.04, "grad_norm": 0.04705050215125084, "learning_rate": 3.276470588235294e-05, "loss": 0.0162, "step": 5045 }, { "epoch": 45.05, "grad_norm": 0.06958391517400742, "learning_rate": 3.270588235294117e-05, "loss": 0.0206, "step": 5046 }, { "epoch": 45.06, "grad_norm": 0.054922085255384445, "learning_rate": 3.264705882352941e-05, "loss": 0.0195, "step": 5047 }, { "epoch": 45.07, "grad_norm": 0.03778674080967903, "learning_rate": 3.2588235294117646e-05, "loss": 0.0147, "step": 5048 }, { "epoch": 45.08, "grad_norm": 0.042429905384778976, "learning_rate": 3.252941176470588e-05, "loss": 0.0152, "step": 5049 }, { "epoch": 45.09, "grad_norm": 0.03815051540732384, "learning_rate": 3.247058823529411e-05, "loss": 0.0131, "step": 5050 }, { "epoch": 45.1, "grad_norm": 0.0463109090924263, "learning_rate": 3.241176470588235e-05, "loss": 0.0158, "step": 5051 }, { "epoch": 45.11, "grad_norm": 0.062121860682964325, "learning_rate": 3.2352941176470585e-05, "loss": 0.0204, "step": 5052 }, { "epoch": 45.12, "grad_norm": 0.07599413394927979, "learning_rate": 3.229411764705882e-05, "loss": 0.0181, "step": 5053 }, { "epoch": 45.12, "grad_norm": 0.0417756550014019, "learning_rate": 3.223529411764706e-05, "loss": 0.0146, "step": 5054 }, { "epoch": 45.13, "grad_norm": 0.039690159261226654, "learning_rate": 3.217647058823529e-05, "loss": 0.016, "step": 5055 }, { "epoch": 45.14, "grad_norm": 0.03887281194329262, "learning_rate": 3.2117647058823525e-05, "loss": 0.0154, "step": 5056 }, { "epoch": 45.15, "grad_norm": 0.050522856414318085, "learning_rate": 3.205882352941176e-05, "loss": 0.0173, "step": 5057 }, { "epoch": 45.16, "grad_norm": 0.04674147441983223, "learning_rate": 3.2e-05, "loss": 0.0145, "step": 5058 }, { "epoch": 45.17, "grad_norm": 0.0707877054810524, "learning_rate": 3.1941176470588235e-05, "loss": 0.0157, "step": 5059 }, { "epoch": 45.18, "grad_norm": 0.03815138339996338, "learning_rate": 3.188235294117647e-05, "loss": 0.0171, "step": 5060 }, { "epoch": 45.19, "grad_norm": 0.038219183683395386, "learning_rate": 3.18235294117647e-05, "loss": 0.016, "step": 5061 }, { "epoch": 45.2, "grad_norm": 0.045984767377376556, "learning_rate": 3.176470588235294e-05, "loss": 0.0177, "step": 5062 }, { "epoch": 45.21, "grad_norm": 0.043422240763902664, "learning_rate": 3.1705882352941175e-05, "loss": 0.0139, "step": 5063 }, { "epoch": 45.21, "grad_norm": 0.04400533810257912, "learning_rate": 3.1647058823529405e-05, "loss": 0.014, "step": 5064 }, { "epoch": 45.22, "grad_norm": 0.07776004076004028, "learning_rate": 3.158823529411765e-05, "loss": 0.0199, "step": 5065 }, { "epoch": 45.23, "grad_norm": 0.04204792156815529, "learning_rate": 3.152941176470588e-05, "loss": 0.0164, "step": 5066 }, { "epoch": 45.24, "grad_norm": 0.0436769463121891, "learning_rate": 3.1470588235294115e-05, "loss": 0.0174, "step": 5067 }, { "epoch": 45.25, "grad_norm": 0.04520263895392418, "learning_rate": 3.141176470588235e-05, "loss": 0.0156, "step": 5068 }, { "epoch": 45.26, "grad_norm": 0.037524010986089706, "learning_rate": 3.135294117647059e-05, "loss": 0.0144, "step": 5069 }, { "epoch": 45.27, "grad_norm": 0.04929890111088753, "learning_rate": 3.129411764705882e-05, "loss": 0.0156, "step": 5070 }, { "epoch": 45.28, "grad_norm": 0.06272881478071213, "learning_rate": 3.1235294117647055e-05, "loss": 0.0171, "step": 5071 }, { "epoch": 45.29, "grad_norm": 0.04566546902060509, "learning_rate": 3.117647058823529e-05, "loss": 0.0154, "step": 5072 }, { "epoch": 45.29, "grad_norm": 0.03950928524136543, "learning_rate": 3.111764705882353e-05, "loss": 0.0153, "step": 5073 }, { "epoch": 45.3, "grad_norm": 0.03667450323700905, "learning_rate": 3.1058823529411765e-05, "loss": 0.0128, "step": 5074 }, { "epoch": 45.31, "grad_norm": 0.046285588294267654, "learning_rate": 3.0999999999999995e-05, "loss": 0.0217, "step": 5075 }, { "epoch": 45.32, "grad_norm": 0.04641619697213173, "learning_rate": 3.094117647058823e-05, "loss": 0.0164, "step": 5076 }, { "epoch": 45.33, "grad_norm": 0.0505821518599987, "learning_rate": 3.088235294117647e-05, "loss": 0.0163, "step": 5077 }, { "epoch": 45.34, "grad_norm": 0.06749798357486725, "learning_rate": 3.0823529411764705e-05, "loss": 0.016, "step": 5078 }, { "epoch": 45.35, "grad_norm": 0.04133579507470131, "learning_rate": 3.076470588235294e-05, "loss": 0.0134, "step": 5079 }, { "epoch": 45.36, "grad_norm": 0.04400995746254921, "learning_rate": 3.070588235294118e-05, "loss": 0.0161, "step": 5080 }, { "epoch": 45.37, "grad_norm": 0.04433593526482582, "learning_rate": 3.064705882352941e-05, "loss": 0.0146, "step": 5081 }, { "epoch": 45.38, "grad_norm": 0.04578491300344467, "learning_rate": 3.0588235294117644e-05, "loss": 0.0167, "step": 5082 }, { "epoch": 45.38, "grad_norm": 0.04872238263487816, "learning_rate": 3.052941176470588e-05, "loss": 0.017, "step": 5083 }, { "epoch": 45.39, "grad_norm": 0.08894068002700806, "learning_rate": 3.0470588235294114e-05, "loss": 0.021, "step": 5084 }, { "epoch": 45.4, "grad_norm": 0.037841398268938065, "learning_rate": 3.041176470588235e-05, "loss": 0.0151, "step": 5085 }, { "epoch": 45.41, "grad_norm": 0.041993726044893265, "learning_rate": 3.0352941176470588e-05, "loss": 0.0161, "step": 5086 }, { "epoch": 45.42, "grad_norm": 0.04302545636892319, "learning_rate": 3.029411764705882e-05, "loss": 0.0161, "step": 5087 }, { "epoch": 45.43, "grad_norm": 0.045309703797101974, "learning_rate": 3.0235294117647058e-05, "loss": 0.0129, "step": 5088 }, { "epoch": 45.44, "grad_norm": 0.060696911066770554, "learning_rate": 3.017647058823529e-05, "loss": 0.0213, "step": 5089 }, { "epoch": 45.45, "grad_norm": 0.07425782829523087, "learning_rate": 3.0117647058823524e-05, "loss": 0.0193, "step": 5090 }, { "epoch": 45.46, "grad_norm": 0.03925832360982895, "learning_rate": 3.005882352941176e-05, "loss": 0.0155, "step": 5091 }, { "epoch": 45.46, "grad_norm": 0.039438169449567795, "learning_rate": 2.9999999999999997e-05, "loss": 0.0146, "step": 5092 }, { "epoch": 45.47, "grad_norm": 0.03728676959872246, "learning_rate": 2.9941176470588234e-05, "loss": 0.0121, "step": 5093 }, { "epoch": 45.48, "grad_norm": 0.043770913034677505, "learning_rate": 2.9882352941176467e-05, "loss": 0.0164, "step": 5094 }, { "epoch": 45.49, "grad_norm": 0.06378931552171707, "learning_rate": 2.9823529411764704e-05, "loss": 0.017, "step": 5095 }, { "epoch": 45.5, "grad_norm": 0.06921689212322235, "learning_rate": 2.9764705882352937e-05, "loss": 0.0192, "step": 5096 }, { "epoch": 45.51, "grad_norm": 0.06097017973661423, "learning_rate": 2.9705882352941174e-05, "loss": 0.0173, "step": 5097 }, { "epoch": 45.52, "grad_norm": 0.036359891295433044, "learning_rate": 2.9647058823529407e-05, "loss": 0.0157, "step": 5098 }, { "epoch": 45.53, "grad_norm": 0.04372749477624893, "learning_rate": 2.9588235294117647e-05, "loss": 0.0145, "step": 5099 }, { "epoch": 45.54, "grad_norm": 0.039443690329790115, "learning_rate": 2.952941176470588e-05, "loss": 0.015, "step": 5100 }, { "epoch": 45.54, "eval_cer": 0.03175068496239754, "eval_loss": 0.21100349724292755, "eval_runtime": 22.3023, "eval_samples_per_second": 118.463, "eval_steps_per_second": 1.883, "eval_wer": 0.10027766759222531, "step": 5100 }, { "epoch": 45.54, "grad_norm": 0.04158908501267433, "learning_rate": 2.9470588235294114e-05, "loss": 0.0124, "step": 5101 }, { "epoch": 45.55, "grad_norm": 0.049603141844272614, "learning_rate": 2.941176470588235e-05, "loss": 0.0155, "step": 5102 }, { "epoch": 45.56, "grad_norm": 0.06320662796497345, "learning_rate": 2.9352941176470584e-05, "loss": 0.0202, "step": 5103 }, { "epoch": 45.57, "grad_norm": 0.04063093662261963, "learning_rate": 2.929411764705882e-05, "loss": 0.0139, "step": 5104 }, { "epoch": 45.58, "grad_norm": 0.038668982684612274, "learning_rate": 2.9235294117647057e-05, "loss": 0.0148, "step": 5105 }, { "epoch": 45.59, "grad_norm": 0.03724049776792526, "learning_rate": 2.9176470588235294e-05, "loss": 0.0127, "step": 5106 }, { "epoch": 45.6, "grad_norm": 0.04132268950343132, "learning_rate": 2.9117647058823527e-05, "loss": 0.0144, "step": 5107 }, { "epoch": 45.61, "grad_norm": 0.05094775930047035, "learning_rate": 2.9058823529411764e-05, "loss": 0.0143, "step": 5108 }, { "epoch": 45.62, "grad_norm": 0.09159460663795471, "learning_rate": 2.8999999999999997e-05, "loss": 0.0227, "step": 5109 }, { "epoch": 45.62, "grad_norm": 0.034520480781793594, "learning_rate": 2.894117647058823e-05, "loss": 0.0129, "step": 5110 }, { "epoch": 45.63, "grad_norm": 0.0377240888774395, "learning_rate": 2.8882352941176467e-05, "loss": 0.0165, "step": 5111 }, { "epoch": 45.64, "grad_norm": 0.040630314499139786, "learning_rate": 2.8823529411764703e-05, "loss": 0.0158, "step": 5112 }, { "epoch": 45.65, "grad_norm": 0.04070843383669853, "learning_rate": 2.876470588235294e-05, "loss": 0.0131, "step": 5113 }, { "epoch": 45.66, "grad_norm": 0.048122283071279526, "learning_rate": 2.8705882352941173e-05, "loss": 0.0172, "step": 5114 }, { "epoch": 45.67, "grad_norm": 0.08413251489400864, "learning_rate": 2.864705882352941e-05, "loss": 0.022, "step": 5115 }, { "epoch": 45.68, "grad_norm": 0.03990272432565689, "learning_rate": 2.8588235294117643e-05, "loss": 0.0168, "step": 5116 }, { "epoch": 45.69, "grad_norm": 0.04196707159280777, "learning_rate": 2.852941176470588e-05, "loss": 0.0154, "step": 5117 }, { "epoch": 45.7, "grad_norm": 0.03930666670203209, "learning_rate": 2.8470588235294113e-05, "loss": 0.0135, "step": 5118 }, { "epoch": 45.71, "grad_norm": 0.040444232523441315, "learning_rate": 2.8411764705882353e-05, "loss": 0.0146, "step": 5119 }, { "epoch": 45.71, "grad_norm": 0.04675314202904701, "learning_rate": 2.8352941176470586e-05, "loss": 0.0173, "step": 5120 }, { "epoch": 45.72, "grad_norm": 0.06005815044045448, "learning_rate": 2.829411764705882e-05, "loss": 0.0172, "step": 5121 }, { "epoch": 45.73, "grad_norm": 0.06058090925216675, "learning_rate": 2.8235294117647056e-05, "loss": 0.0159, "step": 5122 }, { "epoch": 45.74, "grad_norm": 0.04015067592263222, "learning_rate": 2.817647058823529e-05, "loss": 0.0156, "step": 5123 }, { "epoch": 45.75, "grad_norm": 0.04325965791940689, "learning_rate": 2.8117647058823526e-05, "loss": 0.0181, "step": 5124 }, { "epoch": 45.76, "grad_norm": 0.04603307694196701, "learning_rate": 2.8058823529411763e-05, "loss": 0.0144, "step": 5125 }, { "epoch": 45.77, "grad_norm": 0.04270898178219795, "learning_rate": 2.8e-05, "loss": 0.0151, "step": 5126 }, { "epoch": 45.78, "grad_norm": 0.045705217868089676, "learning_rate": 2.7941176470588233e-05, "loss": 0.0131, "step": 5127 }, { "epoch": 45.79, "grad_norm": 0.08076942712068558, "learning_rate": 2.788235294117647e-05, "loss": 0.0212, "step": 5128 }, { "epoch": 45.79, "grad_norm": 0.03954634815454483, "learning_rate": 2.7823529411764703e-05, "loss": 0.0172, "step": 5129 }, { "epoch": 45.8, "grad_norm": 0.0347314290702343, "learning_rate": 2.7764705882352936e-05, "loss": 0.0141, "step": 5130 }, { "epoch": 45.81, "grad_norm": 0.04443887993693352, "learning_rate": 2.7705882352941173e-05, "loss": 0.0152, "step": 5131 }, { "epoch": 45.82, "grad_norm": 0.04813896119594574, "learning_rate": 2.7647058823529413e-05, "loss": 0.0147, "step": 5132 }, { "epoch": 45.83, "grad_norm": 0.04440528526902199, "learning_rate": 2.7588235294117646e-05, "loss": 0.0146, "step": 5133 }, { "epoch": 45.84, "grad_norm": 0.06592968106269836, "learning_rate": 2.752941176470588e-05, "loss": 0.0177, "step": 5134 }, { "epoch": 45.85, "grad_norm": 0.04435674101114273, "learning_rate": 2.7470588235294116e-05, "loss": 0.0153, "step": 5135 }, { "epoch": 45.86, "grad_norm": 0.039653025567531586, "learning_rate": 2.741176470588235e-05, "loss": 0.0139, "step": 5136 }, { "epoch": 45.87, "grad_norm": 0.04258428514003754, "learning_rate": 2.7352941176470586e-05, "loss": 0.0142, "step": 5137 }, { "epoch": 45.88, "grad_norm": 0.04643780365586281, "learning_rate": 2.729411764705882e-05, "loss": 0.0152, "step": 5138 }, { "epoch": 45.88, "grad_norm": 0.04483505338430405, "learning_rate": 2.723529411764706e-05, "loss": 0.0164, "step": 5139 }, { "epoch": 45.89, "grad_norm": 0.06826629489660263, "learning_rate": 2.7176470588235292e-05, "loss": 0.0158, "step": 5140 }, { "epoch": 45.9, "grad_norm": 0.033806659281253815, "learning_rate": 2.7117647058823526e-05, "loss": 0.0147, "step": 5141 }, { "epoch": 45.91, "grad_norm": 0.04065383970737457, "learning_rate": 2.7058823529411762e-05, "loss": 0.0169, "step": 5142 }, { "epoch": 45.92, "grad_norm": 0.04558153077960014, "learning_rate": 2.6999999999999996e-05, "loss": 0.0182, "step": 5143 }, { "epoch": 45.93, "grad_norm": 0.042024046182632446, "learning_rate": 2.6941176470588232e-05, "loss": 0.0152, "step": 5144 }, { "epoch": 45.94, "grad_norm": 0.04814515262842178, "learning_rate": 2.688235294117647e-05, "loss": 0.0163, "step": 5145 }, { "epoch": 45.95, "grad_norm": 0.06419163197278976, "learning_rate": 2.6823529411764706e-05, "loss": 0.0151, "step": 5146 }, { "epoch": 45.96, "grad_norm": 0.08004485070705414, "learning_rate": 2.676470588235294e-05, "loss": 0.0211, "step": 5147 }, { "epoch": 45.96, "grad_norm": 0.0423152782022953, "learning_rate": 2.6705882352941175e-05, "loss": 0.0177, "step": 5148 }, { "epoch": 45.97, "grad_norm": 0.03929867595434189, "learning_rate": 2.664705882352941e-05, "loss": 0.014, "step": 5149 }, { "epoch": 45.98, "grad_norm": 0.04568082094192505, "learning_rate": 2.6588235294117642e-05, "loss": 0.0157, "step": 5150 }, { "epoch": 45.99, "grad_norm": 0.05054107680916786, "learning_rate": 2.652941176470588e-05, "loss": 0.0148, "step": 5151 }, { "epoch": 46.0, "grad_norm": 0.06225099042057991, "learning_rate": 2.647058823529412e-05, "loss": 0.0181, "step": 5152 }, { "epoch": 46.01, "grad_norm": 0.037778183817863464, "learning_rate": 2.6411764705882352e-05, "loss": 0.0152, "step": 5153 }, { "epoch": 46.02, "grad_norm": 0.035917848348617554, "learning_rate": 2.6352941176470585e-05, "loss": 0.0148, "step": 5154 }, { "epoch": 46.03, "grad_norm": 0.03493110090494156, "learning_rate": 2.6294117647058822e-05, "loss": 0.0131, "step": 5155 }, { "epoch": 46.04, "grad_norm": 0.04298359900712967, "learning_rate": 2.6235294117647055e-05, "loss": 0.0157, "step": 5156 }, { "epoch": 46.04, "grad_norm": 0.0488450862467289, "learning_rate": 2.6176470588235292e-05, "loss": 0.0162, "step": 5157 }, { "epoch": 46.05, "grad_norm": 0.0724613144993782, "learning_rate": 2.6117647058823525e-05, "loss": 0.0169, "step": 5158 }, { "epoch": 46.06, "grad_norm": 0.05420735850930214, "learning_rate": 2.6058823529411765e-05, "loss": 0.0166, "step": 5159 }, { "epoch": 46.07, "grad_norm": 0.039159055799245834, "learning_rate": 2.6e-05, "loss": 0.0176, "step": 5160 }, { "epoch": 46.08, "grad_norm": 0.04124021530151367, "learning_rate": 2.594117647058823e-05, "loss": 0.0169, "step": 5161 }, { "epoch": 46.09, "grad_norm": 0.041537050157785416, "learning_rate": 2.588235294117647e-05, "loss": 0.015, "step": 5162 }, { "epoch": 46.1, "grad_norm": 0.042550474405288696, "learning_rate": 2.58235294117647e-05, "loss": 0.0154, "step": 5163 }, { "epoch": 46.11, "grad_norm": 0.05221645161509514, "learning_rate": 2.5764705882352938e-05, "loss": 0.0127, "step": 5164 }, { "epoch": 46.12, "grad_norm": 0.08014420419931412, "learning_rate": 2.5705882352941175e-05, "loss": 0.0252, "step": 5165 }, { "epoch": 46.12, "grad_norm": 0.03902066871523857, "learning_rate": 2.564705882352941e-05, "loss": 0.0163, "step": 5166 }, { "epoch": 46.13, "grad_norm": 0.03881099820137024, "learning_rate": 2.5588235294117645e-05, "loss": 0.0153, "step": 5167 }, { "epoch": 46.14, "grad_norm": 0.03553776443004608, "learning_rate": 2.552941176470588e-05, "loss": 0.0132, "step": 5168 }, { "epoch": 46.15, "grad_norm": 0.042607516050338745, "learning_rate": 2.5470588235294115e-05, "loss": 0.0135, "step": 5169 }, { "epoch": 46.16, "grad_norm": 0.05033285915851593, "learning_rate": 2.5411764705882348e-05, "loss": 0.013, "step": 5170 }, { "epoch": 46.17, "grad_norm": 0.06352284550666809, "learning_rate": 2.5352941176470585e-05, "loss": 0.0146, "step": 5171 }, { "epoch": 46.18, "grad_norm": 0.035775329917669296, "learning_rate": 2.5294117647058825e-05, "loss": 0.0147, "step": 5172 }, { "epoch": 46.19, "grad_norm": 0.04284421727061272, "learning_rate": 2.5235294117647058e-05, "loss": 0.0138, "step": 5173 }, { "epoch": 46.2, "grad_norm": 0.04920913651585579, "learning_rate": 2.517647058823529e-05, "loss": 0.016, "step": 5174 }, { "epoch": 46.21, "grad_norm": 0.046024713665246964, "learning_rate": 2.5117647058823528e-05, "loss": 0.0166, "step": 5175 }, { "epoch": 46.21, "grad_norm": 0.047369956970214844, "learning_rate": 2.505882352941176e-05, "loss": 0.0148, "step": 5176 }, { "epoch": 46.22, "grad_norm": 0.09221918135881424, "learning_rate": 2.4999999999999998e-05, "loss": 0.0161, "step": 5177 }, { "epoch": 46.23, "grad_norm": 0.048006605356931686, "learning_rate": 2.494117647058823e-05, "loss": 0.0161, "step": 5178 }, { "epoch": 46.24, "grad_norm": 0.038232386112213135, "learning_rate": 2.488235294117647e-05, "loss": 0.0141, "step": 5179 }, { "epoch": 46.25, "grad_norm": 0.046028390526771545, "learning_rate": 2.4823529411764704e-05, "loss": 0.0182, "step": 5180 }, { "epoch": 46.26, "grad_norm": 0.039989035576581955, "learning_rate": 2.4764705882352938e-05, "loss": 0.0137, "step": 5181 }, { "epoch": 46.27, "grad_norm": 0.05071651563048363, "learning_rate": 2.4705882352941174e-05, "loss": 0.0146, "step": 5182 }, { "epoch": 46.28, "grad_norm": 0.07601336389780045, "learning_rate": 2.4647058823529408e-05, "loss": 0.0183, "step": 5183 }, { "epoch": 46.29, "grad_norm": 0.10517021268606186, "learning_rate": 2.4588235294117644e-05, "loss": 0.0192, "step": 5184 }, { "epoch": 46.29, "grad_norm": 0.03758545219898224, "learning_rate": 2.452941176470588e-05, "loss": 0.0151, "step": 5185 }, { "epoch": 46.3, "grad_norm": 0.04599851742386818, "learning_rate": 2.4470588235294118e-05, "loss": 0.0161, "step": 5186 }, { "epoch": 46.31, "grad_norm": 0.048986662179231644, "learning_rate": 2.441176470588235e-05, "loss": 0.0181, "step": 5187 }, { "epoch": 46.32, "grad_norm": 0.044982846826314926, "learning_rate": 2.4352941176470587e-05, "loss": 0.0162, "step": 5188 }, { "epoch": 46.33, "grad_norm": 0.05033083260059357, "learning_rate": 2.429411764705882e-05, "loss": 0.0168, "step": 5189 }, { "epoch": 46.34, "grad_norm": 0.07366334646940231, "learning_rate": 2.4235294117647054e-05, "loss": 0.0224, "step": 5190 }, { "epoch": 46.35, "grad_norm": 0.038727469742298126, "learning_rate": 2.417647058823529e-05, "loss": 0.0176, "step": 5191 }, { "epoch": 46.36, "grad_norm": 0.04443450644612312, "learning_rate": 2.411764705882353e-05, "loss": 0.018, "step": 5192 }, { "epoch": 46.37, "grad_norm": 0.03861584886908531, "learning_rate": 2.4058823529411764e-05, "loss": 0.0179, "step": 5193 }, { "epoch": 46.38, "grad_norm": 0.04566491022706032, "learning_rate": 2.3999999999999997e-05, "loss": 0.0154, "step": 5194 }, { "epoch": 46.38, "grad_norm": 0.052542828023433685, "learning_rate": 2.3941176470588234e-05, "loss": 0.0164, "step": 5195 }, { "epoch": 46.39, "grad_norm": 0.07896377891302109, "learning_rate": 2.3882352941176467e-05, "loss": 0.0181, "step": 5196 }, { "epoch": 46.4, "grad_norm": 0.040415626019239426, "learning_rate": 2.3823529411764704e-05, "loss": 0.0175, "step": 5197 }, { "epoch": 46.41, "grad_norm": 0.040500037372112274, "learning_rate": 2.3764705882352937e-05, "loss": 0.0162, "step": 5198 }, { "epoch": 46.42, "grad_norm": 0.040322475135326385, "learning_rate": 2.3705882352941177e-05, "loss": 0.0212, "step": 5199 }, { "epoch": 46.43, "grad_norm": 0.04169744998216629, "learning_rate": 2.364705882352941e-05, "loss": 0.0126, "step": 5200 }, { "epoch": 46.43, "eval_cer": 0.031855919240208516, "eval_loss": 0.20860451459884644, "eval_runtime": 22.948, "eval_samples_per_second": 115.13, "eval_steps_per_second": 1.83, "eval_wer": 0.09986116620388735, "step": 5200 }, { "epoch": 46.44, "grad_norm": 0.04425807297229767, "learning_rate": 2.3588235294117644e-05, "loss": 0.0138, "step": 5201 }, { "epoch": 46.45, "grad_norm": 0.07776098698377609, "learning_rate": 2.352941176470588e-05, "loss": 0.0187, "step": 5202 }, { "epoch": 46.46, "grad_norm": 0.045044541358947754, "learning_rate": 2.3470588235294114e-05, "loss": 0.0165, "step": 5203 }, { "epoch": 46.46, "grad_norm": 0.03875579312443733, "learning_rate": 2.341176470588235e-05, "loss": 0.016, "step": 5204 }, { "epoch": 46.47, "grad_norm": 0.04249643534421921, "learning_rate": 2.3352941176470587e-05, "loss": 0.0161, "step": 5205 }, { "epoch": 46.48, "grad_norm": 0.044398561120033264, "learning_rate": 2.3294117647058824e-05, "loss": 0.0141, "step": 5206 }, { "epoch": 46.49, "grad_norm": 0.04499765485525131, "learning_rate": 2.3235294117647057e-05, "loss": 0.0168, "step": 5207 }, { "epoch": 46.5, "grad_norm": 0.054214634001255035, "learning_rate": 2.3176470588235293e-05, "loss": 0.0179, "step": 5208 }, { "epoch": 46.51, "grad_norm": 0.06382989138364792, "learning_rate": 2.3117647058823527e-05, "loss": 0.0171, "step": 5209 }, { "epoch": 46.52, "grad_norm": 0.041569650173187256, "learning_rate": 2.305882352941176e-05, "loss": 0.0162, "step": 5210 }, { "epoch": 46.53, "grad_norm": 0.04001566022634506, "learning_rate": 2.2999999999999997e-05, "loss": 0.0159, "step": 5211 }, { "epoch": 46.54, "grad_norm": 0.03527286276221275, "learning_rate": 2.2941176470588237e-05, "loss": 0.0127, "step": 5212 }, { "epoch": 46.54, "grad_norm": 0.04458502307534218, "learning_rate": 2.288235294117647e-05, "loss": 0.0135, "step": 5213 }, { "epoch": 46.55, "grad_norm": 0.04917190596461296, "learning_rate": 2.2823529411764703e-05, "loss": 0.0151, "step": 5214 }, { "epoch": 46.56, "grad_norm": 0.0713915154337883, "learning_rate": 2.276470588235294e-05, "loss": 0.0164, "step": 5215 }, { "epoch": 46.57, "grad_norm": 0.038980573415756226, "learning_rate": 2.2705882352941173e-05, "loss": 0.016, "step": 5216 }, { "epoch": 46.58, "grad_norm": 0.03902507573366165, "learning_rate": 2.264705882352941e-05, "loss": 0.0142, "step": 5217 }, { "epoch": 46.59, "grad_norm": 0.04390474408864975, "learning_rate": 2.2588235294117643e-05, "loss": 0.016, "step": 5218 }, { "epoch": 46.6, "grad_norm": 0.04564650356769562, "learning_rate": 2.2529411764705883e-05, "loss": 0.0168, "step": 5219 }, { "epoch": 46.61, "grad_norm": 0.06496992707252502, "learning_rate": 2.2470588235294116e-05, "loss": 0.0173, "step": 5220 }, { "epoch": 46.62, "grad_norm": 0.13928504288196564, "learning_rate": 2.241176470588235e-05, "loss": 0.0202, "step": 5221 }, { "epoch": 46.62, "grad_norm": 0.03780483081936836, "learning_rate": 2.2352941176470586e-05, "loss": 0.0163, "step": 5222 }, { "epoch": 46.63, "grad_norm": 0.03962361067533493, "learning_rate": 2.229411764705882e-05, "loss": 0.0153, "step": 5223 }, { "epoch": 46.64, "grad_norm": 0.04358210042119026, "learning_rate": 2.2235294117647056e-05, "loss": 0.0145, "step": 5224 }, { "epoch": 46.65, "grad_norm": 0.04540077596902847, "learning_rate": 2.2176470588235293e-05, "loss": 0.016, "step": 5225 }, { "epoch": 46.66, "grad_norm": 0.04471196234226227, "learning_rate": 2.211764705882353e-05, "loss": 0.0133, "step": 5226 }, { "epoch": 46.67, "grad_norm": 0.10236772894859314, "learning_rate": 2.2058823529411763e-05, "loss": 0.0218, "step": 5227 }, { "epoch": 46.68, "grad_norm": 0.039916884154081345, "learning_rate": 2.2e-05, "loss": 0.0169, "step": 5228 }, { "epoch": 46.69, "grad_norm": 0.034644100815057755, "learning_rate": 2.1941176470588233e-05, "loss": 0.0129, "step": 5229 }, { "epoch": 46.7, "grad_norm": 0.044551361352205276, "learning_rate": 2.1882352941176466e-05, "loss": 0.0164, "step": 5230 }, { "epoch": 46.71, "grad_norm": 0.040567588061094284, "learning_rate": 2.1823529411764703e-05, "loss": 0.0146, "step": 5231 }, { "epoch": 46.71, "grad_norm": 0.046880677342414856, "learning_rate": 2.1764705882352943e-05, "loss": 0.017, "step": 5232 }, { "epoch": 46.72, "grad_norm": 0.08010052144527435, "learning_rate": 2.1705882352941176e-05, "loss": 0.0192, "step": 5233 }, { "epoch": 46.73, "grad_norm": 0.13500761985778809, "learning_rate": 2.164705882352941e-05, "loss": 0.0183, "step": 5234 }, { "epoch": 46.74, "grad_norm": 0.03708220645785332, "learning_rate": 2.1588235294117646e-05, "loss": 0.0165, "step": 5235 }, { "epoch": 46.75, "grad_norm": 0.03563365340232849, "learning_rate": 2.152941176470588e-05, "loss": 0.0154, "step": 5236 }, { "epoch": 46.76, "grad_norm": 0.04525330662727356, "learning_rate": 2.1470588235294116e-05, "loss": 0.0159, "step": 5237 }, { "epoch": 46.77, "grad_norm": 0.0355866402387619, "learning_rate": 2.141176470588235e-05, "loss": 0.0126, "step": 5238 }, { "epoch": 46.78, "grad_norm": 0.046234723180532455, "learning_rate": 2.135294117647059e-05, "loss": 0.0143, "step": 5239 }, { "epoch": 46.79, "grad_norm": 0.06242381036281586, "learning_rate": 2.1294117647058822e-05, "loss": 0.0146, "step": 5240 }, { "epoch": 46.79, "grad_norm": 0.037977006286382675, "learning_rate": 2.1235294117647056e-05, "loss": 0.016, "step": 5241 }, { "epoch": 46.8, "grad_norm": 0.03535516560077667, "learning_rate": 2.1176470588235292e-05, "loss": 0.0137, "step": 5242 }, { "epoch": 46.81, "grad_norm": 0.039125002920627594, "learning_rate": 2.1117647058823526e-05, "loss": 0.0144, "step": 5243 }, { "epoch": 46.82, "grad_norm": 0.04807785898447037, "learning_rate": 2.1058823529411762e-05, "loss": 0.0156, "step": 5244 }, { "epoch": 46.83, "grad_norm": 0.06447841972112656, "learning_rate": 2.1e-05, "loss": 0.0191, "step": 5245 }, { "epoch": 46.84, "grad_norm": 0.0857747420668602, "learning_rate": 2.0941176470588235e-05, "loss": 0.0244, "step": 5246 }, { "epoch": 46.85, "grad_norm": 0.038232676684856415, "learning_rate": 2.088235294117647e-05, "loss": 0.0158, "step": 5247 }, { "epoch": 46.86, "grad_norm": 0.040532663464546204, "learning_rate": 2.0823529411764705e-05, "loss": 0.0155, "step": 5248 }, { "epoch": 46.87, "grad_norm": 0.04061966761946678, "learning_rate": 2.076470588235294e-05, "loss": 0.0139, "step": 5249 }, { "epoch": 46.88, "grad_norm": 0.037793710827827454, "learning_rate": 2.0705882352941172e-05, "loss": 0.0134, "step": 5250 }, { "epoch": 46.88, "grad_norm": 0.037497226148843765, "learning_rate": 2.064705882352941e-05, "loss": 0.0113, "step": 5251 }, { "epoch": 46.89, "grad_norm": 0.09749874472618103, "learning_rate": 2.058823529411765e-05, "loss": 0.0211, "step": 5252 }, { "epoch": 46.9, "grad_norm": 0.03333595395088196, "learning_rate": 2.0529411764705882e-05, "loss": 0.0137, "step": 5253 }, { "epoch": 46.91, "grad_norm": 0.0389777310192585, "learning_rate": 2.0470588235294115e-05, "loss": 0.0166, "step": 5254 }, { "epoch": 46.92, "grad_norm": 0.038943417370319366, "learning_rate": 2.0411764705882352e-05, "loss": 0.0163, "step": 5255 }, { "epoch": 46.93, "grad_norm": 0.04082312434911728, "learning_rate": 2.0352941176470585e-05, "loss": 0.0156, "step": 5256 }, { "epoch": 46.94, "grad_norm": 0.03889601305127144, "learning_rate": 2.0294117647058822e-05, "loss": 0.0164, "step": 5257 }, { "epoch": 46.95, "grad_norm": 0.048415396362543106, "learning_rate": 2.0235294117647055e-05, "loss": 0.0168, "step": 5258 }, { "epoch": 46.96, "grad_norm": 0.05437806248664856, "learning_rate": 2.0176470588235295e-05, "loss": 0.0176, "step": 5259 }, { "epoch": 46.96, "grad_norm": 0.03630838543176651, "learning_rate": 2.011764705882353e-05, "loss": 0.0155, "step": 5260 }, { "epoch": 46.97, "grad_norm": 0.04185402765870094, "learning_rate": 2.0058823529411765e-05, "loss": 0.015, "step": 5261 }, { "epoch": 46.98, "grad_norm": 0.03746276721358299, "learning_rate": 1.9999999999999998e-05, "loss": 0.0154, "step": 5262 }, { "epoch": 46.99, "grad_norm": 0.04353839159011841, "learning_rate": 1.994117647058823e-05, "loss": 0.013, "step": 5263 }, { "epoch": 47.0, "grad_norm": 0.07891372591257095, "learning_rate": 1.9882352941176468e-05, "loss": 0.0154, "step": 5264 }, { "epoch": 47.01, "grad_norm": 0.0363650768995285, "learning_rate": 1.9823529411764705e-05, "loss": 0.0166, "step": 5265 }, { "epoch": 47.02, "grad_norm": 0.03396986052393913, "learning_rate": 1.976470588235294e-05, "loss": 0.0143, "step": 5266 }, { "epoch": 47.03, "grad_norm": 0.038241930305957794, "learning_rate": 1.9705882352941175e-05, "loss": 0.0159, "step": 5267 }, { "epoch": 47.04, "grad_norm": 0.03874950110912323, "learning_rate": 1.964705882352941e-05, "loss": 0.0147, "step": 5268 }, { "epoch": 47.04, "grad_norm": 0.04823707416653633, "learning_rate": 1.9588235294117645e-05, "loss": 0.0169, "step": 5269 }, { "epoch": 47.05, "grad_norm": 0.05711193010210991, "learning_rate": 1.9529411764705878e-05, "loss": 0.0155, "step": 5270 }, { "epoch": 47.06, "grad_norm": 0.04007866233587265, "learning_rate": 1.9470588235294115e-05, "loss": 0.0119, "step": 5271 }, { "epoch": 47.07, "grad_norm": 0.03977508470416069, "learning_rate": 1.9411764705882355e-05, "loss": 0.0154, "step": 5272 }, { "epoch": 47.08, "grad_norm": 0.038793738931417465, "learning_rate": 1.9352941176470588e-05, "loss": 0.0157, "step": 5273 }, { "epoch": 47.09, "grad_norm": 0.04206732288002968, "learning_rate": 1.929411764705882e-05, "loss": 0.0124, "step": 5274 }, { "epoch": 47.1, "grad_norm": 0.041772741824388504, "learning_rate": 1.9235294117647058e-05, "loss": 0.0145, "step": 5275 }, { "epoch": 47.11, "grad_norm": 0.05594871938228607, "learning_rate": 1.917647058823529e-05, "loss": 0.0165, "step": 5276 }, { "epoch": 47.12, "grad_norm": 0.08875638246536255, "learning_rate": 1.9117647058823528e-05, "loss": 0.0207, "step": 5277 }, { "epoch": 47.12, "grad_norm": 0.03953978419303894, "learning_rate": 1.905882352941176e-05, "loss": 0.0151, "step": 5278 }, { "epoch": 47.13, "grad_norm": 0.049001071602106094, "learning_rate": 1.9e-05, "loss": 0.0145, "step": 5279 }, { "epoch": 47.14, "grad_norm": 0.04553956538438797, "learning_rate": 1.8941176470588234e-05, "loss": 0.016, "step": 5280 }, { "epoch": 47.15, "grad_norm": 0.04373745247721672, "learning_rate": 1.888235294117647e-05, "loss": 0.0159, "step": 5281 }, { "epoch": 47.16, "grad_norm": 0.051922187209129333, "learning_rate": 1.8823529411764704e-05, "loss": 0.0155, "step": 5282 }, { "epoch": 47.17, "grad_norm": 0.0890134647488594, "learning_rate": 1.8764705882352937e-05, "loss": 0.0161, "step": 5283 }, { "epoch": 47.18, "grad_norm": 0.040788739919662476, "learning_rate": 1.8705882352941174e-05, "loss": 0.0136, "step": 5284 }, { "epoch": 47.19, "grad_norm": 0.037018533796072006, "learning_rate": 1.864705882352941e-05, "loss": 0.0144, "step": 5285 }, { "epoch": 47.2, "grad_norm": 0.04045609012246132, "learning_rate": 1.8588235294117644e-05, "loss": 0.0127, "step": 5286 }, { "epoch": 47.21, "grad_norm": 0.04248913750052452, "learning_rate": 1.852941176470588e-05, "loss": 0.014, "step": 5287 }, { "epoch": 47.21, "grad_norm": 0.04623138904571533, "learning_rate": 1.8470588235294117e-05, "loss": 0.0155, "step": 5288 }, { "epoch": 47.22, "grad_norm": 0.08569624274969101, "learning_rate": 1.841176470588235e-05, "loss": 0.0138, "step": 5289 }, { "epoch": 47.23, "grad_norm": 0.04352473095059395, "learning_rate": 1.8352941176470587e-05, "loss": 0.0181, "step": 5290 }, { "epoch": 47.24, "grad_norm": 0.03902510926127434, "learning_rate": 1.8294117647058824e-05, "loss": 0.0145, "step": 5291 }, { "epoch": 47.25, "grad_norm": 0.04916934296488762, "learning_rate": 1.8235294117647057e-05, "loss": 0.0175, "step": 5292 }, { "epoch": 47.26, "grad_norm": 0.04030926525592804, "learning_rate": 1.817647058823529e-05, "loss": 0.0138, "step": 5293 }, { "epoch": 47.27, "grad_norm": 0.03972399979829788, "learning_rate": 1.8117647058823527e-05, "loss": 0.0166, "step": 5294 }, { "epoch": 47.28, "grad_norm": 0.06011297181248665, "learning_rate": 1.8058823529411764e-05, "loss": 0.0174, "step": 5295 }, { "epoch": 47.29, "grad_norm": 0.1028251126408577, "learning_rate": 1.7999999999999997e-05, "loss": 0.0202, "step": 5296 }, { "epoch": 47.29, "grad_norm": 0.04311250522732735, "learning_rate": 1.7941176470588234e-05, "loss": 0.0158, "step": 5297 }, { "epoch": 47.3, "grad_norm": 0.03926079720258713, "learning_rate": 1.788235294117647e-05, "loss": 0.0144, "step": 5298 }, { "epoch": 47.31, "grad_norm": 0.03284962475299835, "learning_rate": 1.7823529411764704e-05, "loss": 0.0129, "step": 5299 }, { "epoch": 47.32, "grad_norm": 0.04275604709982872, "learning_rate": 1.776470588235294e-05, "loss": 0.0153, "step": 5300 }, { "epoch": 47.32, "eval_cer": 0.03095015277762118, "eval_loss": 0.2094593048095703, "eval_runtime": 22.6317, "eval_samples_per_second": 116.739, "eval_steps_per_second": 1.856, "eval_wer": 0.09813566045220151, "step": 5300 }, { "epoch": 47.33, "grad_norm": 0.06282328814268112, "learning_rate": 1.7705882352941177e-05, "loss": 0.0175, "step": 5301 }, { "epoch": 47.34, "grad_norm": 0.055917032063007355, "learning_rate": 1.764705882352941e-05, "loss": 0.0158, "step": 5302 }, { "epoch": 47.35, "grad_norm": 0.037404872477054596, "learning_rate": 1.7588235294117643e-05, "loss": 0.0141, "step": 5303 }, { "epoch": 47.36, "grad_norm": 0.03317873552441597, "learning_rate": 1.752941176470588e-05, "loss": 0.0151, "step": 5304 }, { "epoch": 47.37, "grad_norm": 0.036500830203294754, "learning_rate": 1.7470588235294117e-05, "loss": 0.0139, "step": 5305 }, { "epoch": 47.38, "grad_norm": 0.041090648621320724, "learning_rate": 1.741176470588235e-05, "loss": 0.0165, "step": 5306 }, { "epoch": 47.38, "grad_norm": 0.06290547549724579, "learning_rate": 1.7352941176470587e-05, "loss": 0.0151, "step": 5307 }, { "epoch": 47.39, "grad_norm": 0.07896995544433594, "learning_rate": 1.7294117647058823e-05, "loss": 0.0195, "step": 5308 }, { "epoch": 47.4, "grad_norm": 0.03444727882742882, "learning_rate": 1.7235294117647057e-05, "loss": 0.0147, "step": 5309 }, { "epoch": 47.41, "grad_norm": 0.03905628249049187, "learning_rate": 1.7176470588235293e-05, "loss": 0.0149, "step": 5310 }, { "epoch": 47.42, "grad_norm": 0.035952065140008926, "learning_rate": 1.711764705882353e-05, "loss": 0.0123, "step": 5311 }, { "epoch": 47.43, "grad_norm": 0.041040096431970596, "learning_rate": 1.7058823529411763e-05, "loss": 0.0141, "step": 5312 }, { "epoch": 47.44, "grad_norm": 0.04559514299035072, "learning_rate": 1.6999999999999996e-05, "loss": 0.0163, "step": 5313 }, { "epoch": 47.45, "grad_norm": 0.07138743251562119, "learning_rate": 1.6941176470588233e-05, "loss": 0.0159, "step": 5314 }, { "epoch": 47.46, "grad_norm": 0.03392981365323067, "learning_rate": 1.688235294117647e-05, "loss": 0.0153, "step": 5315 }, { "epoch": 47.46, "grad_norm": 0.03276912122964859, "learning_rate": 1.6823529411764703e-05, "loss": 0.013, "step": 5316 }, { "epoch": 47.47, "grad_norm": 0.041713979095220566, "learning_rate": 1.676470588235294e-05, "loss": 0.0157, "step": 5317 }, { "epoch": 47.48, "grad_norm": 0.04545548930764198, "learning_rate": 1.6705882352941176e-05, "loss": 0.0174, "step": 5318 }, { "epoch": 47.49, "grad_norm": 0.044357605278491974, "learning_rate": 1.664705882352941e-05, "loss": 0.0149, "step": 5319 }, { "epoch": 47.5, "grad_norm": 0.06216815114021301, "learning_rate": 1.6588235294117646e-05, "loss": 0.0126, "step": 5320 }, { "epoch": 47.51, "grad_norm": 0.0639604777097702, "learning_rate": 1.6529411764705883e-05, "loss": 0.0159, "step": 5321 }, { "epoch": 47.52, "grad_norm": 0.04166419431567192, "learning_rate": 1.6470588235294116e-05, "loss": 0.0158, "step": 5322 }, { "epoch": 47.53, "grad_norm": 0.040787551552057266, "learning_rate": 1.641176470588235e-05, "loss": 0.016, "step": 5323 }, { "epoch": 47.54, "grad_norm": 0.037571802735328674, "learning_rate": 1.6352941176470586e-05, "loss": 0.0143, "step": 5324 }, { "epoch": 47.54, "grad_norm": 0.03916793689131737, "learning_rate": 1.6294117647058823e-05, "loss": 0.0159, "step": 5325 }, { "epoch": 47.55, "grad_norm": 0.05931222438812256, "learning_rate": 1.6235294117647056e-05, "loss": 0.0173, "step": 5326 }, { "epoch": 47.56, "grad_norm": 0.05944420397281647, "learning_rate": 1.6176470588235293e-05, "loss": 0.0205, "step": 5327 }, { "epoch": 47.57, "grad_norm": 0.043424081057310104, "learning_rate": 1.611764705882353e-05, "loss": 0.0143, "step": 5328 }, { "epoch": 47.58, "grad_norm": 0.040597956627607346, "learning_rate": 1.6058823529411763e-05, "loss": 0.0174, "step": 5329 }, { "epoch": 47.59, "grad_norm": 0.0380982905626297, "learning_rate": 1.6e-05, "loss": 0.0129, "step": 5330 }, { "epoch": 47.6, "grad_norm": 0.04688919708132744, "learning_rate": 1.5941176470588236e-05, "loss": 0.0174, "step": 5331 }, { "epoch": 47.61, "grad_norm": 0.04591430351138115, "learning_rate": 1.588235294117647e-05, "loss": 0.0142, "step": 5332 }, { "epoch": 47.62, "grad_norm": 0.07394150644540787, "learning_rate": 1.5823529411764702e-05, "loss": 0.0167, "step": 5333 }, { "epoch": 47.62, "grad_norm": 0.0348970890045166, "learning_rate": 1.576470588235294e-05, "loss": 0.0155, "step": 5334 }, { "epoch": 47.63, "grad_norm": 0.03675265982747078, "learning_rate": 1.5705882352941176e-05, "loss": 0.0138, "step": 5335 }, { "epoch": 47.64, "grad_norm": 0.04313450679183006, "learning_rate": 1.564705882352941e-05, "loss": 0.016, "step": 5336 }, { "epoch": 47.65, "grad_norm": 0.035124484449625015, "learning_rate": 1.5588235294117646e-05, "loss": 0.0127, "step": 5337 }, { "epoch": 47.66, "grad_norm": 0.05071822553873062, "learning_rate": 1.5529411764705882e-05, "loss": 0.0154, "step": 5338 }, { "epoch": 47.67, "grad_norm": 0.06335127353668213, "learning_rate": 1.5470588235294116e-05, "loss": 0.0128, "step": 5339 }, { "epoch": 47.68, "grad_norm": 0.04092599079012871, "learning_rate": 1.5411764705882352e-05, "loss": 0.0171, "step": 5340 }, { "epoch": 47.69, "grad_norm": 0.049333348870277405, "learning_rate": 1.535294117647059e-05, "loss": 0.0187, "step": 5341 }, { "epoch": 47.7, "grad_norm": 0.0430619940161705, "learning_rate": 1.5294117647058822e-05, "loss": 0.0155, "step": 5342 }, { "epoch": 47.71, "grad_norm": 0.03829503059387207, "learning_rate": 1.5235294117647057e-05, "loss": 0.014, "step": 5343 }, { "epoch": 47.71, "grad_norm": 0.05448585003614426, "learning_rate": 1.5176470588235294e-05, "loss": 0.0169, "step": 5344 }, { "epoch": 47.72, "grad_norm": 0.07088486850261688, "learning_rate": 1.5117647058823529e-05, "loss": 0.0209, "step": 5345 }, { "epoch": 47.73, "grad_norm": 0.06297363340854645, "learning_rate": 1.5058823529411762e-05, "loss": 0.0175, "step": 5346 }, { "epoch": 47.74, "grad_norm": 0.04363665357232094, "learning_rate": 1.4999999999999999e-05, "loss": 0.0179, "step": 5347 }, { "epoch": 47.75, "grad_norm": 0.04440702870488167, "learning_rate": 1.4941176470588234e-05, "loss": 0.0176, "step": 5348 }, { "epoch": 47.76, "grad_norm": 0.03349072486162186, "learning_rate": 1.4882352941176469e-05, "loss": 0.0134, "step": 5349 }, { "epoch": 47.77, "grad_norm": 0.04793049022555351, "learning_rate": 1.4823529411764704e-05, "loss": 0.0153, "step": 5350 }, { "epoch": 47.78, "grad_norm": 0.05858717858791351, "learning_rate": 1.476470588235294e-05, "loss": 0.0132, "step": 5351 }, { "epoch": 47.79, "grad_norm": 0.05767468735575676, "learning_rate": 1.4705882352941175e-05, "loss": 0.014, "step": 5352 }, { "epoch": 47.79, "grad_norm": 0.03719104826450348, "learning_rate": 1.464705882352941e-05, "loss": 0.0159, "step": 5353 }, { "epoch": 47.8, "grad_norm": 0.0334928072988987, "learning_rate": 1.4588235294117647e-05, "loss": 0.0145, "step": 5354 }, { "epoch": 47.81, "grad_norm": 0.0341482050716877, "learning_rate": 1.4529411764705882e-05, "loss": 0.0143, "step": 5355 }, { "epoch": 47.82, "grad_norm": 0.037394795566797256, "learning_rate": 1.4470588235294115e-05, "loss": 0.0141, "step": 5356 }, { "epoch": 47.83, "grad_norm": 0.0508609376847744, "learning_rate": 1.4411764705882352e-05, "loss": 0.0129, "step": 5357 }, { "epoch": 47.84, "grad_norm": 0.12816482782363892, "learning_rate": 1.4352941176470587e-05, "loss": 0.0277, "step": 5358 }, { "epoch": 47.85, "grad_norm": 0.045090626925230026, "learning_rate": 1.4294117647058822e-05, "loss": 0.0171, "step": 5359 }, { "epoch": 47.86, "grad_norm": 0.03483239561319351, "learning_rate": 1.4235294117647057e-05, "loss": 0.0129, "step": 5360 }, { "epoch": 47.87, "grad_norm": 0.039795368909835815, "learning_rate": 1.4176470588235293e-05, "loss": 0.0159, "step": 5361 }, { "epoch": 47.88, "grad_norm": 0.035538945347070694, "learning_rate": 1.4117647058823528e-05, "loss": 0.0132, "step": 5362 }, { "epoch": 47.88, "grad_norm": 0.047424495220184326, "learning_rate": 1.4058823529411763e-05, "loss": 0.0154, "step": 5363 }, { "epoch": 47.89, "grad_norm": 0.0885155200958252, "learning_rate": 1.4e-05, "loss": 0.0154, "step": 5364 }, { "epoch": 47.9, "grad_norm": 0.03710082918405533, "learning_rate": 1.3941176470588235e-05, "loss": 0.0155, "step": 5365 }, { "epoch": 47.91, "grad_norm": 0.03797599673271179, "learning_rate": 1.3882352941176468e-05, "loss": 0.0148, "step": 5366 }, { "epoch": 47.92, "grad_norm": 0.03618619963526726, "learning_rate": 1.3823529411764706e-05, "loss": 0.0116, "step": 5367 }, { "epoch": 47.93, "grad_norm": 0.04147461801767349, "learning_rate": 1.376470588235294e-05, "loss": 0.0126, "step": 5368 }, { "epoch": 47.94, "grad_norm": 0.053551096469163895, "learning_rate": 1.3705882352941175e-05, "loss": 0.0166, "step": 5369 }, { "epoch": 47.95, "grad_norm": 0.059746768325567245, "learning_rate": 1.364705882352941e-05, "loss": 0.0179, "step": 5370 }, { "epoch": 47.96, "grad_norm": 0.13540853559970856, "learning_rate": 1.3588235294117646e-05, "loss": 0.0184, "step": 5371 }, { "epoch": 47.96, "grad_norm": 0.04125310480594635, "learning_rate": 1.3529411764705881e-05, "loss": 0.0155, "step": 5372 }, { "epoch": 47.97, "grad_norm": 0.035127729177474976, "learning_rate": 1.3470588235294116e-05, "loss": 0.0135, "step": 5373 }, { "epoch": 47.98, "grad_norm": 0.043600305914878845, "learning_rate": 1.3411764705882353e-05, "loss": 0.0155, "step": 5374 }, { "epoch": 47.99, "grad_norm": 0.05032142624258995, "learning_rate": 1.3352941176470588e-05, "loss": 0.0171, "step": 5375 }, { "epoch": 48.0, "grad_norm": 0.06508412957191467, "learning_rate": 1.3294117647058821e-05, "loss": 0.0156, "step": 5376 }, { "epoch": 48.01, "grad_norm": 0.036650948226451874, "learning_rate": 1.323529411764706e-05, "loss": 0.0155, "step": 5377 }, { "epoch": 48.02, "grad_norm": 0.04516515135765076, "learning_rate": 1.3176470588235293e-05, "loss": 0.0149, "step": 5378 }, { "epoch": 48.03, "grad_norm": 0.035986579954624176, "learning_rate": 1.3117647058823528e-05, "loss": 0.0134, "step": 5379 }, { "epoch": 48.04, "grad_norm": 0.038538608700037, "learning_rate": 1.3058823529411763e-05, "loss": 0.0133, "step": 5380 }, { "epoch": 48.04, "grad_norm": 0.04843611642718315, "learning_rate": 1.3e-05, "loss": 0.0194, "step": 5381 }, { "epoch": 48.05, "grad_norm": 0.05646837130188942, "learning_rate": 1.2941176470588234e-05, "loss": 0.0204, "step": 5382 }, { "epoch": 48.06, "grad_norm": 0.07528936862945557, "learning_rate": 1.2882352941176469e-05, "loss": 0.0188, "step": 5383 }, { "epoch": 48.07, "grad_norm": 0.04549802467226982, "learning_rate": 1.2823529411764706e-05, "loss": 0.0173, "step": 5384 }, { "epoch": 48.08, "grad_norm": 0.03472273051738739, "learning_rate": 1.276470588235294e-05, "loss": 0.011, "step": 5385 }, { "epoch": 48.09, "grad_norm": 0.039434779435396194, "learning_rate": 1.2705882352941174e-05, "loss": 0.0144, "step": 5386 }, { "epoch": 48.1, "grad_norm": 0.054497748613357544, "learning_rate": 1.2647058823529412e-05, "loss": 0.0176, "step": 5387 }, { "epoch": 48.11, "grad_norm": 0.04958281293511391, "learning_rate": 1.2588235294117646e-05, "loss": 0.012, "step": 5388 }, { "epoch": 48.12, "grad_norm": 0.06760144978761673, "learning_rate": 1.252941176470588e-05, "loss": 0.0181, "step": 5389 }, { "epoch": 48.12, "grad_norm": 0.03802026808261871, "learning_rate": 1.2470588235294116e-05, "loss": 0.0144, "step": 5390 }, { "epoch": 48.13, "grad_norm": 0.04341566562652588, "learning_rate": 1.2411764705882352e-05, "loss": 0.0143, "step": 5391 }, { "epoch": 48.14, "grad_norm": 0.03979827091097832, "learning_rate": 1.2352941176470587e-05, "loss": 0.0138, "step": 5392 }, { "epoch": 48.15, "grad_norm": 0.045508190989494324, "learning_rate": 1.2294117647058822e-05, "loss": 0.0161, "step": 5393 }, { "epoch": 48.16, "grad_norm": 0.05848633125424385, "learning_rate": 1.2235294117647059e-05, "loss": 0.0162, "step": 5394 }, { "epoch": 48.17, "grad_norm": 0.07386845350265503, "learning_rate": 1.2176470588235294e-05, "loss": 0.0189, "step": 5395 }, { "epoch": 48.18, "grad_norm": 0.034790199249982834, "learning_rate": 1.2117647058823527e-05, "loss": 0.0146, "step": 5396 }, { "epoch": 48.19, "grad_norm": 0.041191063821315765, "learning_rate": 1.2058823529411765e-05, "loss": 0.013, "step": 5397 }, { "epoch": 48.2, "grad_norm": 0.03899366036057472, "learning_rate": 1.1999999999999999e-05, "loss": 0.0143, "step": 5398 }, { "epoch": 48.21, "grad_norm": 0.043636348098516464, "learning_rate": 1.1941176470588234e-05, "loss": 0.0156, "step": 5399 }, { "epoch": 48.21, "grad_norm": 0.0491810105741024, "learning_rate": 1.1882352941176469e-05, "loss": 0.0172, "step": 5400 }, { "epoch": 48.21, "eval_cer": 0.031036595220108768, "eval_loss": 0.21303808689117432, "eval_runtime": 22.4132, "eval_samples_per_second": 117.877, "eval_steps_per_second": 1.874, "eval_wer": 0.09851249504165013, "step": 5400 }, { "epoch": 48.22, "grad_norm": 0.09034627676010132, "learning_rate": 1.1823529411764705e-05, "loss": 0.0156, "step": 5401 }, { "epoch": 48.23, "grad_norm": 0.04086814075708389, "learning_rate": 1.176470588235294e-05, "loss": 0.0154, "step": 5402 }, { "epoch": 48.24, "grad_norm": 0.03886063024401665, "learning_rate": 1.1705882352941175e-05, "loss": 0.0164, "step": 5403 }, { "epoch": 48.25, "grad_norm": 0.038670048117637634, "learning_rate": 1.1647058823529412e-05, "loss": 0.015, "step": 5404 }, { "epoch": 48.26, "grad_norm": 0.045868970453739166, "learning_rate": 1.1588235294117647e-05, "loss": 0.0155, "step": 5405 }, { "epoch": 48.27, "grad_norm": 0.04485901817679405, "learning_rate": 1.152941176470588e-05, "loss": 0.0164, "step": 5406 }, { "epoch": 48.28, "grad_norm": 0.0518074557185173, "learning_rate": 1.1470588235294118e-05, "loss": 0.0124, "step": 5407 }, { "epoch": 48.29, "grad_norm": 0.06739792227745056, "learning_rate": 1.1411764705882352e-05, "loss": 0.018, "step": 5408 }, { "epoch": 48.29, "grad_norm": 0.035138729959726334, "learning_rate": 1.1352941176470587e-05, "loss": 0.0161, "step": 5409 }, { "epoch": 48.3, "grad_norm": 0.03395545855164528, "learning_rate": 1.1294117647058822e-05, "loss": 0.0137, "step": 5410 }, { "epoch": 48.31, "grad_norm": 0.036876097321510315, "learning_rate": 1.1235294117647058e-05, "loss": 0.0132, "step": 5411 }, { "epoch": 48.32, "grad_norm": 0.049984682351350784, "learning_rate": 1.1176470588235293e-05, "loss": 0.0188, "step": 5412 }, { "epoch": 48.33, "grad_norm": 0.04078980162739754, "learning_rate": 1.1117647058823528e-05, "loss": 0.0126, "step": 5413 }, { "epoch": 48.34, "grad_norm": 0.07755071669816971, "learning_rate": 1.1058823529411765e-05, "loss": 0.0151, "step": 5414 }, { "epoch": 48.35, "grad_norm": 0.03706435486674309, "learning_rate": 1.1e-05, "loss": 0.015, "step": 5415 }, { "epoch": 48.36, "grad_norm": 0.0402042493224144, "learning_rate": 1.0941176470588233e-05, "loss": 0.0165, "step": 5416 }, { "epoch": 48.37, "grad_norm": 0.038473259657621384, "learning_rate": 1.0882352941176471e-05, "loss": 0.0143, "step": 5417 }, { "epoch": 48.38, "grad_norm": 0.04128929600119591, "learning_rate": 1.0823529411764705e-05, "loss": 0.0173, "step": 5418 }, { "epoch": 48.38, "grad_norm": 0.04608842357993126, "learning_rate": 1.076470588235294e-05, "loss": 0.0159, "step": 5419 }, { "epoch": 48.39, "grad_norm": 0.07743281871080399, "learning_rate": 1.0705882352941175e-05, "loss": 0.0158, "step": 5420 }, { "epoch": 48.4, "grad_norm": 0.038645412772893906, "learning_rate": 1.0647058823529411e-05, "loss": 0.0138, "step": 5421 }, { "epoch": 48.41, "grad_norm": 0.040869440883398056, "learning_rate": 1.0588235294117646e-05, "loss": 0.0124, "step": 5422 }, { "epoch": 48.42, "grad_norm": 0.04238196089863777, "learning_rate": 1.0529411764705881e-05, "loss": 0.0158, "step": 5423 }, { "epoch": 48.43, "grad_norm": 0.04477444291114807, "learning_rate": 1.0470588235294118e-05, "loss": 0.0167, "step": 5424 }, { "epoch": 48.44, "grad_norm": 0.04536725580692291, "learning_rate": 1.0411764705882353e-05, "loss": 0.0138, "step": 5425 }, { "epoch": 48.45, "grad_norm": 0.08704833686351776, "learning_rate": 1.0352941176470586e-05, "loss": 0.0165, "step": 5426 }, { "epoch": 48.46, "grad_norm": 0.04076733440160751, "learning_rate": 1.0294117647058824e-05, "loss": 0.0157, "step": 5427 }, { "epoch": 48.46, "grad_norm": 0.04137210175395012, "learning_rate": 1.0235294117647058e-05, "loss": 0.0163, "step": 5428 }, { "epoch": 48.47, "grad_norm": 0.04278163984417915, "learning_rate": 1.0176470588235293e-05, "loss": 0.0157, "step": 5429 }, { "epoch": 48.48, "grad_norm": 0.04365207254886627, "learning_rate": 1.0117647058823528e-05, "loss": 0.0145, "step": 5430 }, { "epoch": 48.49, "grad_norm": 0.04416772723197937, "learning_rate": 1.0058823529411764e-05, "loss": 0.0126, "step": 5431 }, { "epoch": 48.5, "grad_norm": 0.050360389053821564, "learning_rate": 9.999999999999999e-06, "loss": 0.0146, "step": 5432 }, { "epoch": 48.51, "grad_norm": 0.04284907504916191, "learning_rate": 9.941176470588234e-06, "loss": 0.0158, "step": 5433 }, { "epoch": 48.52, "grad_norm": 0.03979944810271263, "learning_rate": 9.88235294117647e-06, "loss": 0.0151, "step": 5434 }, { "epoch": 48.53, "grad_norm": 0.03629522770643234, "learning_rate": 9.823529411764706e-06, "loss": 0.0138, "step": 5435 }, { "epoch": 48.54, "grad_norm": 0.03917134553194046, "learning_rate": 9.764705882352939e-06, "loss": 0.0155, "step": 5436 }, { "epoch": 48.54, "grad_norm": 0.04647763445973396, "learning_rate": 9.705882352941177e-06, "loss": 0.0175, "step": 5437 }, { "epoch": 48.55, "grad_norm": 0.052529849112033844, "learning_rate": 9.64705882352941e-06, "loss": 0.0131, "step": 5438 }, { "epoch": 48.56, "grad_norm": 0.07595250755548477, "learning_rate": 9.588235294117646e-06, "loss": 0.0198, "step": 5439 }, { "epoch": 48.57, "grad_norm": 0.03902169317007065, "learning_rate": 9.52941176470588e-06, "loss": 0.0163, "step": 5440 }, { "epoch": 48.58, "grad_norm": 0.03682319447398186, "learning_rate": 9.470588235294117e-06, "loss": 0.0148, "step": 5441 }, { "epoch": 48.59, "grad_norm": 0.042619552463293076, "learning_rate": 9.411764705882352e-06, "loss": 0.0148, "step": 5442 }, { "epoch": 48.6, "grad_norm": 0.04521515592932701, "learning_rate": 9.352941176470587e-06, "loss": 0.0148, "step": 5443 }, { "epoch": 48.61, "grad_norm": 0.05362657457590103, "learning_rate": 9.294117647058822e-06, "loss": 0.0177, "step": 5444 }, { "epoch": 48.62, "grad_norm": 0.07619816064834595, "learning_rate": 9.235294117647059e-06, "loss": 0.0204, "step": 5445 }, { "epoch": 48.62, "grad_norm": 0.05332028865814209, "learning_rate": 9.176470588235294e-06, "loss": 0.0163, "step": 5446 }, { "epoch": 48.63, "grad_norm": 0.03711302950978279, "learning_rate": 9.117647058823529e-06, "loss": 0.0136, "step": 5447 }, { "epoch": 48.64, "grad_norm": 0.039778921753168106, "learning_rate": 9.058823529411764e-06, "loss": 0.0161, "step": 5448 }, { "epoch": 48.65, "grad_norm": 0.042309898883104324, "learning_rate": 8.999999999999999e-06, "loss": 0.0144, "step": 5449 }, { "epoch": 48.66, "grad_norm": 0.04288448393344879, "learning_rate": 8.941176470588235e-06, "loss": 0.0142, "step": 5450 }, { "epoch": 48.67, "grad_norm": 0.09097269177436829, "learning_rate": 8.88235294117647e-06, "loss": 0.023, "step": 5451 }, { "epoch": 48.68, "grad_norm": 0.03806547820568085, "learning_rate": 8.823529411764705e-06, "loss": 0.0159, "step": 5452 }, { "epoch": 48.69, "grad_norm": 0.03374017775058746, "learning_rate": 8.76470588235294e-06, "loss": 0.0128, "step": 5453 }, { "epoch": 48.7, "grad_norm": 0.03565361723303795, "learning_rate": 8.705882352941175e-06, "loss": 0.0136, "step": 5454 }, { "epoch": 48.71, "grad_norm": 0.03919528052210808, "learning_rate": 8.647058823529412e-06, "loss": 0.0152, "step": 5455 }, { "epoch": 48.71, "grad_norm": 0.03529151901602745, "learning_rate": 8.588235294117647e-06, "loss": 0.0118, "step": 5456 }, { "epoch": 48.72, "grad_norm": 0.06929656863212585, "learning_rate": 8.529411764705882e-06, "loss": 0.0177, "step": 5457 }, { "epoch": 48.73, "grad_norm": 0.05540144443511963, "learning_rate": 8.470588235294117e-06, "loss": 0.0151, "step": 5458 }, { "epoch": 48.74, "grad_norm": 0.04252156987786293, "learning_rate": 8.411764705882352e-06, "loss": 0.0148, "step": 5459 }, { "epoch": 48.75, "grad_norm": 0.04111355170607567, "learning_rate": 8.352941176470588e-06, "loss": 0.0189, "step": 5460 }, { "epoch": 48.76, "grad_norm": 0.039277084171772, "learning_rate": 8.294117647058823e-06, "loss": 0.0132, "step": 5461 }, { "epoch": 48.77, "grad_norm": 0.03730860352516174, "learning_rate": 8.235294117647058e-06, "loss": 0.0134, "step": 5462 }, { "epoch": 48.78, "grad_norm": 0.06344401091337204, "learning_rate": 8.176470588235293e-06, "loss": 0.0195, "step": 5463 }, { "epoch": 48.79, "grad_norm": 0.060826625674963, "learning_rate": 8.117647058823528e-06, "loss": 0.0148, "step": 5464 }, { "epoch": 48.79, "grad_norm": 0.03751077502965927, "learning_rate": 8.058823529411765e-06, "loss": 0.0167, "step": 5465 }, { "epoch": 48.8, "grad_norm": 0.05045950412750244, "learning_rate": 8e-06, "loss": 0.0178, "step": 5466 }, { "epoch": 48.81, "grad_norm": 0.036700934171676636, "learning_rate": 7.941176470588235e-06, "loss": 0.0138, "step": 5467 }, { "epoch": 48.82, "grad_norm": 0.043851833790540695, "learning_rate": 7.88235294117647e-06, "loss": 0.0152, "step": 5468 }, { "epoch": 48.83, "grad_norm": 0.050441958010196686, "learning_rate": 7.823529411764705e-06, "loss": 0.0177, "step": 5469 }, { "epoch": 48.84, "grad_norm": 0.05202293023467064, "learning_rate": 7.764705882352941e-06, "loss": 0.0134, "step": 5470 }, { "epoch": 48.85, "grad_norm": 0.03672241047024727, "learning_rate": 7.705882352941176e-06, "loss": 0.0142, "step": 5471 }, { "epoch": 48.86, "grad_norm": 0.04451717808842659, "learning_rate": 7.647058823529411e-06, "loss": 0.0167, "step": 5472 }, { "epoch": 48.87, "grad_norm": 0.03132355585694313, "learning_rate": 7.588235294117647e-06, "loss": 0.0122, "step": 5473 }, { "epoch": 48.88, "grad_norm": 0.05414542928338051, "learning_rate": 7.529411764705881e-06, "loss": 0.0133, "step": 5474 }, { "epoch": 48.88, "grad_norm": 0.051125116646289825, "learning_rate": 7.470588235294117e-06, "loss": 0.0145, "step": 5475 }, { "epoch": 48.89, "grad_norm": 0.09471362084150314, "learning_rate": 7.411764705882352e-06, "loss": 0.0168, "step": 5476 }, { "epoch": 48.9, "grad_norm": 0.03679506480693817, "learning_rate": 7.352941176470588e-06, "loss": 0.0155, "step": 5477 }, { "epoch": 48.91, "grad_norm": 0.03783881291747093, "learning_rate": 7.294117647058823e-06, "loss": 0.0129, "step": 5478 }, { "epoch": 48.92, "grad_norm": 0.047866713255643845, "learning_rate": 7.2352941176470575e-06, "loss": 0.0152, "step": 5479 }, { "epoch": 48.93, "grad_norm": 0.03788332641124725, "learning_rate": 7.176470588235293e-06, "loss": 0.0132, "step": 5480 }, { "epoch": 48.94, "grad_norm": 0.04627546668052673, "learning_rate": 7.117647058823528e-06, "loss": 0.0143, "step": 5481 }, { "epoch": 48.95, "grad_norm": 0.0775122195482254, "learning_rate": 7.058823529411764e-06, "loss": 0.0208, "step": 5482 }, { "epoch": 48.96, "grad_norm": 0.051570240408182144, "learning_rate": 7e-06, "loss": 0.0156, "step": 5483 }, { "epoch": 48.96, "grad_norm": 0.04197980836033821, "learning_rate": 6.941176470588234e-06, "loss": 0.0158, "step": 5484 }, { "epoch": 48.97, "grad_norm": 0.03953948989510536, "learning_rate": 6.88235294117647e-06, "loss": 0.0136, "step": 5485 }, { "epoch": 48.98, "grad_norm": 0.05131828784942627, "learning_rate": 6.823529411764705e-06, "loss": 0.0169, "step": 5486 }, { "epoch": 48.99, "grad_norm": 0.04814542829990387, "learning_rate": 6.764705882352941e-06, "loss": 0.0148, "step": 5487 }, { "epoch": 49.0, "grad_norm": 0.04880789667367935, "learning_rate": 6.705882352941176e-06, "loss": 0.0136, "step": 5488 }, { "epoch": 49.01, "grad_norm": 0.037186071276664734, "learning_rate": 6.6470588235294105e-06, "loss": 0.0139, "step": 5489 }, { "epoch": 49.02, "grad_norm": 0.04042769595980644, "learning_rate": 6.588235294117646e-06, "loss": 0.0157, "step": 5490 }, { "epoch": 49.03, "grad_norm": 0.036447346210479736, "learning_rate": 6.529411764705881e-06, "loss": 0.0163, "step": 5491 }, { "epoch": 49.04, "grad_norm": 0.045571520924568176, "learning_rate": 6.470588235294117e-06, "loss": 0.0172, "step": 5492 }, { "epoch": 49.04, "grad_norm": 0.045244552195072174, "learning_rate": 6.411764705882353e-06, "loss": 0.0148, "step": 5493 }, { "epoch": 49.05, "grad_norm": 0.04821966215968132, "learning_rate": 6.352941176470587e-06, "loss": 0.014, "step": 5494 }, { "epoch": 49.06, "grad_norm": 0.05282486602663994, "learning_rate": 6.294117647058823e-06, "loss": 0.0156, "step": 5495 }, { "epoch": 49.07, "grad_norm": 0.03506775572896004, "learning_rate": 6.235294117647058e-06, "loss": 0.0163, "step": 5496 }, { "epoch": 49.08, "grad_norm": 0.04147464781999588, "learning_rate": 6.176470588235294e-06, "loss": 0.0138, "step": 5497 }, { "epoch": 49.09, "grad_norm": 0.049196507781744, "learning_rate": 6.117647058823529e-06, "loss": 0.0141, "step": 5498 }, { "epoch": 49.1, "grad_norm": 0.044748276472091675, "learning_rate": 6.0588235294117635e-06, "loss": 0.0172, "step": 5499 }, { "epoch": 49.11, "grad_norm": 0.0551871620118618, "learning_rate": 5.999999999999999e-06, "loss": 0.017, "step": 5500 }, { "epoch": 49.11, "eval_cer": 0.03161538374806914, "eval_loss": 0.2137099951505661, "eval_runtime": 22.2933, "eval_samples_per_second": 118.511, "eval_steps_per_second": 1.884, "eval_wer": 0.09944466481554938, "step": 5500 }, { "epoch": 49.12, "grad_norm": 0.06691740453243256, "learning_rate": 5.941176470588234e-06, "loss": 0.0172, "step": 5501 }, { "epoch": 49.12, "grad_norm": 0.03986073285341263, "learning_rate": 5.88235294117647e-06, "loss": 0.0155, "step": 5502 }, { "epoch": 49.13, "grad_norm": 0.04453933984041214, "learning_rate": 5.823529411764706e-06, "loss": 0.0185, "step": 5503 }, { "epoch": 49.14, "grad_norm": 0.0417616032063961, "learning_rate": 5.76470588235294e-06, "loss": 0.0156, "step": 5504 }, { "epoch": 49.15, "grad_norm": 0.03996429964900017, "learning_rate": 5.705882352941176e-06, "loss": 0.015, "step": 5505 }, { "epoch": 49.16, "grad_norm": 0.051541831344366074, "learning_rate": 5.647058823529411e-06, "loss": 0.0167, "step": 5506 }, { "epoch": 49.17, "grad_norm": 0.09497403353452682, "learning_rate": 5.5882352941176466e-06, "loss": 0.0185, "step": 5507 }, { "epoch": 49.18, "grad_norm": 0.04161723703145981, "learning_rate": 5.529411764705882e-06, "loss": 0.0147, "step": 5508 }, { "epoch": 49.19, "grad_norm": 0.04817429929971695, "learning_rate": 5.4705882352941165e-06, "loss": 0.0166, "step": 5509 }, { "epoch": 49.2, "grad_norm": 0.036694012582302094, "learning_rate": 5.411764705882352e-06, "loss": 0.013, "step": 5510 }, { "epoch": 49.21, "grad_norm": 0.04692278429865837, "learning_rate": 5.352941176470587e-06, "loss": 0.0166, "step": 5511 }, { "epoch": 49.21, "grad_norm": 0.03901823237538338, "learning_rate": 5.294117647058823e-06, "loss": 0.0117, "step": 5512 }, { "epoch": 49.22, "grad_norm": 0.08639217168092728, "learning_rate": 5.235294117647059e-06, "loss": 0.0211, "step": 5513 }, { "epoch": 49.23, "grad_norm": 0.047227051109075546, "learning_rate": 5.176470588235293e-06, "loss": 0.0171, "step": 5514 }, { "epoch": 49.24, "grad_norm": 0.03695077449083328, "learning_rate": 5.117647058823529e-06, "loss": 0.0155, "step": 5515 }, { "epoch": 49.25, "grad_norm": 0.04250597581267357, "learning_rate": 5.058823529411764e-06, "loss": 0.017, "step": 5516 }, { "epoch": 49.26, "grad_norm": 0.036251239478588104, "learning_rate": 4.9999999999999996e-06, "loss": 0.0118, "step": 5517 }, { "epoch": 49.27, "grad_norm": 0.04985490441322327, "learning_rate": 4.941176470588235e-06, "loss": 0.0163, "step": 5518 }, { "epoch": 49.28, "grad_norm": 0.08453112095594406, "learning_rate": 4.8823529411764695e-06, "loss": 0.0162, "step": 5519 }, { "epoch": 49.29, "grad_norm": 0.05833496153354645, "learning_rate": 4.823529411764705e-06, "loss": 0.018, "step": 5520 }, { "epoch": 49.29, "grad_norm": 0.03763258084654808, "learning_rate": 4.76470588235294e-06, "loss": 0.0139, "step": 5521 }, { "epoch": 49.3, "grad_norm": 0.04027899354696274, "learning_rate": 4.705882352941176e-06, "loss": 0.011, "step": 5522 }, { "epoch": 49.31, "grad_norm": 0.0386640764772892, "learning_rate": 4.647058823529411e-06, "loss": 0.0143, "step": 5523 }, { "epoch": 49.32, "grad_norm": 0.039674367755651474, "learning_rate": 4.588235294117647e-06, "loss": 0.0109, "step": 5524 }, { "epoch": 49.33, "grad_norm": 0.07444358617067337, "learning_rate": 4.529411764705882e-06, "loss": 0.0123, "step": 5525 }, { "epoch": 49.34, "grad_norm": 0.07372348755598068, "learning_rate": 4.470588235294118e-06, "loss": 0.0173, "step": 5526 }, { "epoch": 49.35, "grad_norm": 0.033435601741075516, "learning_rate": 4.4117647058823526e-06, "loss": 0.0132, "step": 5527 }, { "epoch": 49.36, "grad_norm": 0.0519268698990345, "learning_rate": 4.3529411764705875e-06, "loss": 0.0178, "step": 5528 }, { "epoch": 49.37, "grad_norm": 0.03948422893881798, "learning_rate": 4.294117647058823e-06, "loss": 0.0136, "step": 5529 }, { "epoch": 49.38, "grad_norm": 0.04186204820871353, "learning_rate": 4.235294117647058e-06, "loss": 0.0162, "step": 5530 }, { "epoch": 49.38, "grad_norm": 0.052189480513334274, "learning_rate": 4.176470588235294e-06, "loss": 0.0148, "step": 5531 }, { "epoch": 49.39, "grad_norm": 0.06453348696231842, "learning_rate": 4.117647058823529e-06, "loss": 0.0138, "step": 5532 }, { "epoch": 49.4, "grad_norm": 0.03898428753018379, "learning_rate": 4.058823529411764e-06, "loss": 0.0154, "step": 5533 }, { "epoch": 49.41, "grad_norm": 0.03803344815969467, "learning_rate": 4e-06, "loss": 0.0149, "step": 5534 }, { "epoch": 49.42, "grad_norm": 0.04040852189064026, "learning_rate": 3.941176470588235e-06, "loss": 0.0151, "step": 5535 }, { "epoch": 49.43, "grad_norm": 0.05394357070326805, "learning_rate": 3.882352941176471e-06, "loss": 0.0164, "step": 5536 }, { "epoch": 49.44, "grad_norm": 0.040444180369377136, "learning_rate": 3.8235294117647055e-06, "loss": 0.0141, "step": 5537 }, { "epoch": 49.45, "grad_norm": 0.08052466809749603, "learning_rate": 3.7647058823529405e-06, "loss": 0.0185, "step": 5538 }, { "epoch": 49.46, "grad_norm": 0.035009950399398804, "learning_rate": 3.705882352941176e-06, "loss": 0.0148, "step": 5539 }, { "epoch": 49.46, "grad_norm": 0.040605854243040085, "learning_rate": 3.6470588235294117e-06, "loss": 0.0155, "step": 5540 }, { "epoch": 49.47, "grad_norm": 0.03829166293144226, "learning_rate": 3.5882352941176467e-06, "loss": 0.013, "step": 5541 }, { "epoch": 49.48, "grad_norm": 0.04586075246334076, "learning_rate": 3.529411764705882e-06, "loss": 0.0157, "step": 5542 }, { "epoch": 49.49, "grad_norm": 0.04065732657909393, "learning_rate": 3.470588235294117e-06, "loss": 0.0151, "step": 5543 }, { "epoch": 49.5, "grad_norm": 0.0751880407333374, "learning_rate": 3.4117647058823524e-06, "loss": 0.0153, "step": 5544 }, { "epoch": 49.51, "grad_norm": 0.06613259017467499, "learning_rate": 3.352941176470588e-06, "loss": 0.0157, "step": 5545 }, { "epoch": 49.52, "grad_norm": 0.04640515521168709, "learning_rate": 3.294117647058823e-06, "loss": 0.0181, "step": 5546 }, { "epoch": 49.53, "grad_norm": 0.03859524428844452, "learning_rate": 3.2352941176470585e-06, "loss": 0.017, "step": 5547 }, { "epoch": 49.54, "grad_norm": 0.04536804184317589, "learning_rate": 3.1764705882352935e-06, "loss": 0.0149, "step": 5548 }, { "epoch": 49.54, "grad_norm": 0.04511251300573349, "learning_rate": 3.117647058823529e-06, "loss": 0.0152, "step": 5549 }, { "epoch": 49.55, "grad_norm": 0.06233420968055725, "learning_rate": 3.0588235294117647e-06, "loss": 0.0181, "step": 5550 }, { "epoch": 49.56, "grad_norm": 0.09452083706855774, "learning_rate": 2.9999999999999997e-06, "loss": 0.017, "step": 5551 }, { "epoch": 49.57, "grad_norm": 0.036748792976140976, "learning_rate": 2.941176470588235e-06, "loss": 0.0136, "step": 5552 }, { "epoch": 49.58, "grad_norm": 0.037301577627658844, "learning_rate": 2.88235294117647e-06, "loss": 0.0155, "step": 5553 }, { "epoch": 49.59, "grad_norm": 0.03743209317326546, "learning_rate": 2.8235294117647054e-06, "loss": 0.0131, "step": 5554 }, { "epoch": 49.6, "grad_norm": 0.0437840111553669, "learning_rate": 2.764705882352941e-06, "loss": 0.0169, "step": 5555 }, { "epoch": 49.61, "grad_norm": 0.05424346774816513, "learning_rate": 2.705882352941176e-06, "loss": 0.0155, "step": 5556 }, { "epoch": 49.62, "grad_norm": 0.06325390934944153, "learning_rate": 2.6470588235294115e-06, "loss": 0.0136, "step": 5557 }, { "epoch": 49.62, "grad_norm": 0.03527723625302315, "learning_rate": 2.5882352941176465e-06, "loss": 0.015, "step": 5558 }, { "epoch": 49.63, "grad_norm": 0.03985361009836197, "learning_rate": 2.529411764705882e-06, "loss": 0.0164, "step": 5559 }, { "epoch": 49.64, "grad_norm": 0.038023244589567184, "learning_rate": 2.4705882352941177e-06, "loss": 0.014, "step": 5560 }, { "epoch": 49.65, "grad_norm": 0.046528175473213196, "learning_rate": 2.4117647058823526e-06, "loss": 0.0168, "step": 5561 }, { "epoch": 49.66, "grad_norm": 0.048520345240831375, "learning_rate": 2.352941176470588e-06, "loss": 0.0132, "step": 5562 }, { "epoch": 49.67, "grad_norm": 0.06942006200551987, "learning_rate": 2.2941176470588234e-06, "loss": 0.0169, "step": 5563 }, { "epoch": 49.68, "grad_norm": 0.03572802245616913, "learning_rate": 2.235294117647059e-06, "loss": 0.0133, "step": 5564 }, { "epoch": 49.69, "grad_norm": 0.03914961963891983, "learning_rate": 2.1764705882352938e-06, "loss": 0.0148, "step": 5565 }, { "epoch": 49.7, "grad_norm": 0.04333449900150299, "learning_rate": 2.117647058823529e-06, "loss": 0.018, "step": 5566 }, { "epoch": 49.71, "grad_norm": 0.044780388474464417, "learning_rate": 2.0588235294117645e-06, "loss": 0.0123, "step": 5567 }, { "epoch": 49.71, "grad_norm": 0.03866629675030708, "learning_rate": 2e-06, "loss": 0.0125, "step": 5568 }, { "epoch": 49.72, "grad_norm": 0.07407289743423462, "learning_rate": 1.9411764705882353e-06, "loss": 0.0159, "step": 5569 }, { "epoch": 49.73, "grad_norm": 0.11904079467058182, "learning_rate": 1.8823529411764703e-06, "loss": 0.0155, "step": 5570 }, { "epoch": 49.74, "grad_norm": 0.034318406134843826, "learning_rate": 1.8235294117647058e-06, "loss": 0.0148, "step": 5571 }, { "epoch": 49.75, "grad_norm": 0.0394885390996933, "learning_rate": 1.764705882352941e-06, "loss": 0.016, "step": 5572 }, { "epoch": 49.76, "grad_norm": 0.04434511810541153, "learning_rate": 1.7058823529411762e-06, "loss": 0.0148, "step": 5573 }, { "epoch": 49.77, "grad_norm": 0.050256311893463135, "learning_rate": 1.6470588235294116e-06, "loss": 0.0153, "step": 5574 }, { "epoch": 49.78, "grad_norm": 0.04231452941894531, "learning_rate": 1.5882352941176468e-06, "loss": 0.009, "step": 5575 }, { "epoch": 49.79, "grad_norm": 0.0795440748333931, "learning_rate": 1.5294117647058823e-06, "loss": 0.0191, "step": 5576 }, { "epoch": 49.79, "grad_norm": 0.04174872487783432, "learning_rate": 1.4705882352941175e-06, "loss": 0.0166, "step": 5577 }, { "epoch": 49.8, "grad_norm": 0.03474007174372673, "learning_rate": 1.4117647058823527e-06, "loss": 0.0134, "step": 5578 }, { "epoch": 49.81, "grad_norm": 0.046486325562000275, "learning_rate": 1.352941176470588e-06, "loss": 0.0167, "step": 5579 }, { "epoch": 49.82, "grad_norm": 0.04521731659770012, "learning_rate": 1.2941176470588232e-06, "loss": 0.0144, "step": 5580 }, { "epoch": 49.83, "grad_norm": 0.052535295486450195, "learning_rate": 1.2352941176470588e-06, "loss": 0.0163, "step": 5581 }, { "epoch": 49.84, "grad_norm": 0.06909803301095963, "learning_rate": 1.176470588235294e-06, "loss": 0.017, "step": 5582 }, { "epoch": 49.85, "grad_norm": 0.04130563884973526, "learning_rate": 1.1176470588235294e-06, "loss": 0.017, "step": 5583 }, { "epoch": 49.86, "grad_norm": 0.04639488086104393, "learning_rate": 1.0588235294117646e-06, "loss": 0.0143, "step": 5584 }, { "epoch": 49.87, "grad_norm": 0.03253469243645668, "learning_rate": 1e-06, "loss": 0.0119, "step": 5585 }, { "epoch": 49.88, "grad_norm": 0.041558682918548584, "learning_rate": 9.411764705882351e-07, "loss": 0.0136, "step": 5586 }, { "epoch": 49.88, "grad_norm": 0.04843180626630783, "learning_rate": 8.823529411764705e-07, "loss": 0.0151, "step": 5587 }, { "epoch": 49.89, "grad_norm": 0.08238411694765091, "learning_rate": 8.235294117647058e-07, "loss": 0.0178, "step": 5588 }, { "epoch": 49.9, "grad_norm": 0.05005532503128052, "learning_rate": 7.647058823529412e-07, "loss": 0.0148, "step": 5589 }, { "epoch": 49.91, "grad_norm": 0.043235499411821365, "learning_rate": 7.058823529411763e-07, "loss": 0.0148, "step": 5590 }, { "epoch": 49.92, "grad_norm": 0.04127110540866852, "learning_rate": 6.470588235294116e-07, "loss": 0.0182, "step": 5591 }, { "epoch": 49.93, "grad_norm": 0.04067129269242287, "learning_rate": 5.88235294117647e-07, "loss": 0.015, "step": 5592 }, { "epoch": 49.94, "grad_norm": 0.04593358933925629, "learning_rate": 5.294117647058823e-07, "loss": 0.0161, "step": 5593 }, { "epoch": 49.95, "grad_norm": 0.07899615168571472, "learning_rate": 4.7058823529411756e-07, "loss": 0.0185, "step": 5594 }, { "epoch": 49.96, "grad_norm": 0.052048083394765854, "learning_rate": 4.117647058823529e-07, "loss": 0.0154, "step": 5595 }, { "epoch": 49.96, "grad_norm": 0.03803560510277748, "learning_rate": 3.5294117647058817e-07, "loss": 0.0144, "step": 5596 }, { "epoch": 49.97, "grad_norm": 0.032355159521102905, "learning_rate": 2.941176470588235e-07, "loss": 0.0124, "step": 5597 }, { "epoch": 49.98, "grad_norm": 0.03637293353676796, "learning_rate": 2.3529411764705878e-07, "loss": 0.0133, "step": 5598 }, { "epoch": 49.99, "grad_norm": 0.05197904258966446, "learning_rate": 1.7647058823529409e-07, "loss": 0.0155, "step": 5599 }, { "epoch": 50.0, "grad_norm": 0.060445237904787064, "learning_rate": 1.1764705882352939e-07, "loss": 0.0152, "step": 5600 }, { "epoch": 50.0, "eval_cer": 0.031607867013939785, "eval_loss": 0.21432273089885712, "eval_runtime": 22.3097, "eval_samples_per_second": 118.424, "eval_steps_per_second": 1.883, "eval_wer": 0.09948433161443872, "step": 5600 }, { "epoch": 50.0, "step": 5600, "total_flos": 5.590980477274324e+20, "train_loss": 0.20686535484689686, "train_runtime": 19804.4548, "train_samples_per_second": 72.049, "train_steps_per_second": 0.283 }, { "epoch": 50.0, "step": 5600, "total_flos": 5.590980477274324e+20, "train_loss": 0.0, "train_runtime": 249.9893, "train_samples_per_second": 5707.844, "train_steps_per_second": 22.401 } ], "logging_steps": 1.0, "max_steps": 5600, "num_input_tokens_seen": 0, "num_train_epochs": 50, "save_steps": 400, "total_flos": 5.590980477274324e+20, "train_batch_size": 32, "trial_name": null, "trial_params": null }