{ "best_metric": 1.143676996231079, "best_model_checkpoint": "./SOLAR_NEW_DATASET_SLEEP_fc_out/checkpoint-1800", "epoch": 1.8828451882845187, "eval_steps": 300, "global_step": 1800, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0, "grad_norm": 5.1021552085876465, "learning_rate": 1e-06, "loss": 2.2775, "step": 1 }, { "epoch": 0.0, "grad_norm": NaN, "learning_rate": 1e-06, "loss": 2.2251, "step": 2 }, { "epoch": 0.0, "grad_norm": 6.894376277923584, "learning_rate": 2e-06, "loss": 2.5432, "step": 3 }, { "epoch": 0.0, "grad_norm": 5.441768646240234, "learning_rate": 2.9999999999999997e-06, "loss": 2.4134, "step": 4 }, { "epoch": 0.01, "grad_norm": 2.577650785446167, "learning_rate": 4e-06, "loss": 1.7783, "step": 5 }, { "epoch": 0.01, "grad_norm": 2.6100335121154785, "learning_rate": 4.9999999999999996e-06, "loss": 1.9091, "step": 6 }, { "epoch": 0.01, "grad_norm": 3.566389560699463, "learning_rate": 5.999999999999999e-06, "loss": 1.6825, "step": 7 }, { "epoch": 0.01, "grad_norm": 5.842864513397217, "learning_rate": 7e-06, "loss": 2.0507, "step": 8 }, { "epoch": 0.01, "grad_norm": 3.9386281967163086, "learning_rate": 8e-06, "loss": 1.743, "step": 9 }, { "epoch": 0.01, "grad_norm": 3.832657814025879, "learning_rate": 8.999999999999999e-06, "loss": 2.1379, "step": 10 }, { "epoch": 0.01, "grad_norm": 2.7397637367248535, "learning_rate": 9.999999999999999e-06, "loss": 1.1938, "step": 11 }, { "epoch": 0.01, "grad_norm": 2.1158039569854736, "learning_rate": 1.1e-05, "loss": 1.6535, "step": 12 }, { "epoch": 0.01, "grad_norm": 1.9051222801208496, "learning_rate": 1.1999999999999999e-05, "loss": 1.5113, "step": 13 }, { "epoch": 0.01, "grad_norm": 2.472515106201172, "learning_rate": 1.3e-05, "loss": 1.7566, "step": 14 }, { "epoch": 0.02, "grad_norm": 3.4902000427246094, "learning_rate": 1.4e-05, "loss": 2.5336, "step": 15 }, { "epoch": 0.02, "grad_norm": 3.6271560192108154, "learning_rate": 1.4999999999999999e-05, "loss": 1.9697, "step": 16 }, { "epoch": 0.02, "grad_norm": 5.09641695022583, "learning_rate": 1.6e-05, "loss": 2.0609, "step": 17 }, { "epoch": 0.02, "grad_norm": 3.0167582035064697, "learning_rate": 1.6999999999999996e-05, "loss": 1.8016, "step": 18 }, { "epoch": 0.02, "grad_norm": 5.549201011657715, "learning_rate": 1.7999999999999997e-05, "loss": 2.3704, "step": 19 }, { "epoch": 0.02, "grad_norm": 3.9748847484588623, "learning_rate": 1.9e-05, "loss": 1.811, "step": 20 }, { "epoch": 0.02, "grad_norm": 3.768181324005127, "learning_rate": 1.9999999999999998e-05, "loss": 1.5366, "step": 21 }, { "epoch": 0.02, "grad_norm": 5.290782928466797, "learning_rate": 2.1e-05, "loss": 2.4972, "step": 22 }, { "epoch": 0.02, "grad_norm": 2.7193872928619385, "learning_rate": 2.2e-05, "loss": 1.7859, "step": 23 }, { "epoch": 0.03, "grad_norm": 3.833155632019043, "learning_rate": 2.2999999999999997e-05, "loss": 2.0601, "step": 24 }, { "epoch": 0.03, "grad_norm": 2.297412157058716, "learning_rate": 2.3999999999999997e-05, "loss": 1.3346, "step": 25 }, { "epoch": 0.03, "grad_norm": 3.314594268798828, "learning_rate": 2.4999999999999998e-05, "loss": 1.9146, "step": 26 }, { "epoch": 0.03, "grad_norm": 2.7510643005371094, "learning_rate": 2.6e-05, "loss": 1.8501, "step": 27 }, { "epoch": 0.03, "grad_norm": 2.2888293266296387, "learning_rate": 2.6999999999999996e-05, "loss": 1.8301, "step": 28 }, { "epoch": 0.03, "grad_norm": 4.5635504722595215, "learning_rate": 2.8e-05, "loss": 2.1873, "step": 29 }, { "epoch": 0.03, "grad_norm": 7.103892803192139, "learning_rate": 2.8999999999999997e-05, "loss": 1.9607, "step": 30 }, { "epoch": 0.03, "grad_norm": 6.598682880401611, "learning_rate": 2.9999999999999997e-05, "loss": 1.7272, "step": 31 }, { "epoch": 0.03, "grad_norm": 1.7731786966323853, "learning_rate": 3.0999999999999995e-05, "loss": 1.8001, "step": 32 }, { "epoch": 0.03, "grad_norm": 3.651439905166626, "learning_rate": 3.2e-05, "loss": 1.6389, "step": 33 }, { "epoch": 0.04, "grad_norm": 3.4542276859283447, "learning_rate": 3.2999999999999996e-05, "loss": 1.7928, "step": 34 }, { "epoch": 0.04, "grad_norm": 2.9039721488952637, "learning_rate": 3.399999999999999e-05, "loss": 1.9144, "step": 35 }, { "epoch": 0.04, "grad_norm": 3.4144136905670166, "learning_rate": 3.5e-05, "loss": 1.7467, "step": 36 }, { "epoch": 0.04, "grad_norm": 6.434959411621094, "learning_rate": 3.5999999999999994e-05, "loss": 2.1261, "step": 37 }, { "epoch": 0.04, "grad_norm": 3.9881653785705566, "learning_rate": 3.7e-05, "loss": 1.7411, "step": 38 }, { "epoch": 0.04, "grad_norm": 4.064974308013916, "learning_rate": 3.8e-05, "loss": 1.6445, "step": 39 }, { "epoch": 0.04, "grad_norm": 2.88478946685791, "learning_rate": 3.9e-05, "loss": 1.7376, "step": 40 }, { "epoch": 0.04, "grad_norm": 2.7643675804138184, "learning_rate": 3.9999999999999996e-05, "loss": 1.6438, "step": 41 }, { "epoch": 0.04, "grad_norm": 2.1387925148010254, "learning_rate": 4.0999999999999994e-05, "loss": 1.4881, "step": 42 }, { "epoch": 0.04, "grad_norm": 2.5704689025878906, "learning_rate": 4.2e-05, "loss": 1.4022, "step": 43 }, { "epoch": 0.05, "grad_norm": 3.6913387775421143, "learning_rate": 4.3e-05, "loss": 1.6571, "step": 44 }, { "epoch": 0.05, "grad_norm": 3.2858211994171143, "learning_rate": 4.4e-05, "loss": 1.7759, "step": 45 }, { "epoch": 0.05, "grad_norm": 4.481564998626709, "learning_rate": 4.4999999999999996e-05, "loss": 1.6854, "step": 46 }, { "epoch": 0.05, "grad_norm": 3.004473924636841, "learning_rate": 4.599999999999999e-05, "loss": 1.5408, "step": 47 }, { "epoch": 0.05, "grad_norm": 2.49001145362854, "learning_rate": 4.7e-05, "loss": 1.6673, "step": 48 }, { "epoch": 0.05, "grad_norm": 2.1351754665374756, "learning_rate": 4.7999999999999994e-05, "loss": 1.3816, "step": 49 }, { "epoch": 0.05, "grad_norm": 3.705871820449829, "learning_rate": 4.899999999999999e-05, "loss": 1.8553, "step": 50 }, { "epoch": 0.05, "grad_norm": 3.0498299598693848, "learning_rate": 4.9999999999999996e-05, "loss": 1.4696, "step": 51 }, { "epoch": 0.05, "grad_norm": 2.111211061477661, "learning_rate": 5.1e-05, "loss": 1.6067, "step": 52 }, { "epoch": 0.06, "grad_norm": 2.0597739219665527, "learning_rate": 5.2e-05, "loss": 1.334, "step": 53 }, { "epoch": 0.06, "grad_norm": 2.0325064659118652, "learning_rate": 5.2999999999999994e-05, "loss": 1.2886, "step": 54 }, { "epoch": 0.06, "grad_norm": 3.371424436569214, "learning_rate": 5.399999999999999e-05, "loss": 1.5398, "step": 55 }, { "epoch": 0.06, "grad_norm": 4.390995025634766, "learning_rate": 5.499999999999999e-05, "loss": 1.9422, "step": 56 }, { "epoch": 0.06, "grad_norm": 4.701133728027344, "learning_rate": 5.6e-05, "loss": 1.9618, "step": 57 }, { "epoch": 0.06, "grad_norm": 3.4953861236572266, "learning_rate": 5.6999999999999996e-05, "loss": 1.6602, "step": 58 }, { "epoch": 0.06, "grad_norm": 2.652651071548462, "learning_rate": 5.7999999999999994e-05, "loss": 1.5143, "step": 59 }, { "epoch": 0.06, "grad_norm": 6.5872697830200195, "learning_rate": 5.899999999999999e-05, "loss": 1.2444, "step": 60 }, { "epoch": 0.06, "grad_norm": 3.623898506164551, "learning_rate": 5.9999999999999995e-05, "loss": 1.7179, "step": 61 }, { "epoch": 0.06, "grad_norm": 3.985961437225342, "learning_rate": 6.1e-05, "loss": 1.4591, "step": 62 }, { "epoch": 0.07, "grad_norm": 2.351639986038208, "learning_rate": 6.199999999999999e-05, "loss": 1.4416, "step": 63 }, { "epoch": 0.07, "grad_norm": 4.620721817016602, "learning_rate": 6.299999999999999e-05, "loss": 1.258, "step": 64 }, { "epoch": 0.07, "grad_norm": 4.6299662590026855, "learning_rate": 6.4e-05, "loss": 1.2641, "step": 65 }, { "epoch": 0.07, "grad_norm": 2.620664596557617, "learning_rate": 6.5e-05, "loss": 1.469, "step": 66 }, { "epoch": 0.07, "grad_norm": 3.9535703659057617, "learning_rate": 6.599999999999999e-05, "loss": 1.4236, "step": 67 }, { "epoch": 0.07, "grad_norm": 3.4887490272521973, "learning_rate": 6.699999999999999e-05, "loss": 1.322, "step": 68 }, { "epoch": 0.07, "grad_norm": 5.64335823059082, "learning_rate": 6.799999999999999e-05, "loss": 1.438, "step": 69 }, { "epoch": 0.07, "grad_norm": 3.370468854904175, "learning_rate": 6.9e-05, "loss": 1.2633, "step": 70 }, { "epoch": 0.07, "grad_norm": 3.171403408050537, "learning_rate": 7e-05, "loss": 1.6355, "step": 71 }, { "epoch": 0.08, "grad_norm": 3.2457258701324463, "learning_rate": 7.099999999999999e-05, "loss": 1.5638, "step": 72 }, { "epoch": 0.08, "grad_norm": 2.745772361755371, "learning_rate": 7.199999999999999e-05, "loss": 1.3061, "step": 73 }, { "epoch": 0.08, "grad_norm": 2.8581624031066895, "learning_rate": 7.3e-05, "loss": 1.0935, "step": 74 }, { "epoch": 0.08, "grad_norm": 7.144169807434082, "learning_rate": 7.4e-05, "loss": 1.4205, "step": 75 }, { "epoch": 0.08, "grad_norm": 4.5456461906433105, "learning_rate": 7.5e-05, "loss": 1.2544, "step": 76 }, { "epoch": 0.08, "grad_norm": 4.343386650085449, "learning_rate": 7.6e-05, "loss": 1.7345, "step": 77 }, { "epoch": 0.08, "grad_norm": 5.298736095428467, "learning_rate": 7.699999999999999e-05, "loss": 1.4268, "step": 78 }, { "epoch": 0.08, "grad_norm": 2.657853126525879, "learning_rate": 7.8e-05, "loss": 1.1615, "step": 79 }, { "epoch": 0.08, "grad_norm": 4.102127552032471, "learning_rate": 7.899999999999998e-05, "loss": 1.3367, "step": 80 }, { "epoch": 0.08, "grad_norm": 3.519240140914917, "learning_rate": 7.999999999999999e-05, "loss": 1.2521, "step": 81 }, { "epoch": 0.09, "grad_norm": 2.7445943355560303, "learning_rate": 8.1e-05, "loss": 1.0568, "step": 82 }, { "epoch": 0.09, "grad_norm": 2.702052116394043, "learning_rate": 8.199999999999999e-05, "loss": 1.2667, "step": 83 }, { "epoch": 0.09, "grad_norm": 6.428670883178711, "learning_rate": 8.3e-05, "loss": 1.1848, "step": 84 }, { "epoch": 0.09, "grad_norm": 3.7245421409606934, "learning_rate": 8.4e-05, "loss": 1.5863, "step": 85 }, { "epoch": 0.09, "grad_norm": 5.550326824188232, "learning_rate": 8.499999999999999e-05, "loss": 1.0869, "step": 86 }, { "epoch": 0.09, "grad_norm": 5.588808059692383, "learning_rate": 8.6e-05, "loss": 0.9643, "step": 87 }, { "epoch": 0.09, "grad_norm": 4.284501552581787, "learning_rate": 8.699999999999999e-05, "loss": 1.1781, "step": 88 }, { "epoch": 0.09, "grad_norm": 5.207043647766113, "learning_rate": 8.8e-05, "loss": 1.3532, "step": 89 }, { "epoch": 0.09, "grad_norm": 3.409078359603882, "learning_rate": 8.9e-05, "loss": 1.1546, "step": 90 }, { "epoch": 0.1, "grad_norm": 4.021597385406494, "learning_rate": 8.999999999999999e-05, "loss": 0.8727, "step": 91 }, { "epoch": 0.1, "grad_norm": 4.004073619842529, "learning_rate": 9.099999999999999e-05, "loss": 0.8301, "step": 92 }, { "epoch": 0.1, "grad_norm": 5.221350193023682, "learning_rate": 9.199999999999999e-05, "loss": 1.1282, "step": 93 }, { "epoch": 0.1, "grad_norm": 6.712344646453857, "learning_rate": 9.3e-05, "loss": 1.5645, "step": 94 }, { "epoch": 0.1, "grad_norm": 4.151594638824463, "learning_rate": 9.4e-05, "loss": 1.1636, "step": 95 }, { "epoch": 0.1, "grad_norm": 4.9005351066589355, "learning_rate": 9.499999999999999e-05, "loss": 0.9978, "step": 96 }, { "epoch": 0.1, "grad_norm": 8.770527839660645, "learning_rate": 9.599999999999999e-05, "loss": 0.7176, "step": 97 }, { "epoch": 0.1, "grad_norm": 4.691393852233887, "learning_rate": 9.699999999999999e-05, "loss": 0.9424, "step": 98 }, { "epoch": 0.1, "grad_norm": 4.191307544708252, "learning_rate": 9.799999999999998e-05, "loss": 0.8837, "step": 99 }, { "epoch": 0.1, "grad_norm": 7.723273277282715, "learning_rate": 9.9e-05, "loss": 1.2063, "step": 100 }, { "epoch": 0.11, "grad_norm": 3.1191282272338867, "learning_rate": 9.999999999999999e-05, "loss": 0.8371, "step": 101 }, { "epoch": 0.11, "grad_norm": 4.945955276489258, "learning_rate": 0.00010099999999999999, "loss": 0.9069, "step": 102 }, { "epoch": 0.11, "grad_norm": 8.733388900756836, "learning_rate": 0.000102, "loss": 1.319, "step": 103 }, { "epoch": 0.11, "grad_norm": 6.735355854034424, "learning_rate": 0.00010299999999999998, "loss": 1.3797, "step": 104 }, { "epoch": 0.11, "grad_norm": 4.036331653594971, "learning_rate": 0.000104, "loss": 0.5183, "step": 105 }, { "epoch": 0.11, "grad_norm": 7.886319637298584, "learning_rate": 0.00010499999999999999, "loss": 1.5606, "step": 106 }, { "epoch": 0.11, "grad_norm": 4.50234317779541, "learning_rate": 0.00010599999999999999, "loss": 1.3502, "step": 107 }, { "epoch": 0.11, "grad_norm": 4.697140216827393, "learning_rate": 0.000107, "loss": 0.9047, "step": 108 }, { "epoch": 0.11, "grad_norm": 7.2634806632995605, "learning_rate": 0.00010799999999999998, "loss": 1.7172, "step": 109 }, { "epoch": 0.12, "grad_norm": 6.540546894073486, "learning_rate": 0.00010899999999999999, "loss": 1.4783, "step": 110 }, { "epoch": 0.12, "grad_norm": 8.533005714416504, "learning_rate": 0.00010999999999999998, "loss": 1.3668, "step": 111 }, { "epoch": 0.12, "grad_norm": 5.5528564453125, "learning_rate": 0.00011099999999999999, "loss": 1.6343, "step": 112 }, { "epoch": 0.12, "grad_norm": 3.9506545066833496, "learning_rate": 0.000112, "loss": 0.5561, "step": 113 }, { "epoch": 0.12, "grad_norm": 4.126518726348877, "learning_rate": 0.00011299999999999998, "loss": 1.574, "step": 114 }, { "epoch": 0.12, "grad_norm": 5.16034460067749, "learning_rate": 0.00011399999999999999, "loss": 1.6017, "step": 115 }, { "epoch": 0.12, "grad_norm": 2.636258602142334, "learning_rate": 0.000115, "loss": 0.4447, "step": 116 }, { "epoch": 0.12, "grad_norm": 4.657654762268066, "learning_rate": 0.00011599999999999999, "loss": 1.683, "step": 117 }, { "epoch": 0.12, "grad_norm": 2.899839401245117, "learning_rate": 0.000117, "loss": 1.5159, "step": 118 }, { "epoch": 0.12, "grad_norm": 7.1795125007629395, "learning_rate": 0.00011799999999999998, "loss": 0.984, "step": 119 }, { "epoch": 0.13, "grad_norm": 3.968451976776123, "learning_rate": 0.00011899999999999999, "loss": 1.6149, "step": 120 }, { "epoch": 0.13, "grad_norm": 7.363707542419434, "learning_rate": 0.00011999999999999999, "loss": 1.68, "step": 121 }, { "epoch": 0.13, "grad_norm": 4.613768577575684, "learning_rate": 0.00012099999999999999, "loss": 0.9822, "step": 122 }, { "epoch": 0.13, "grad_norm": 2.9544668197631836, "learning_rate": 0.000122, "loss": 1.1721, "step": 123 }, { "epoch": 0.13, "grad_norm": 5.6496357917785645, "learning_rate": 0.00012299999999999998, "loss": 1.0635, "step": 124 }, { "epoch": 0.13, "grad_norm": 3.220294952392578, "learning_rate": 0.00012399999999999998, "loss": 0.3894, "step": 125 }, { "epoch": 0.13, "grad_norm": 5.641209602355957, "learning_rate": 0.000125, "loss": 1.2102, "step": 126 }, { "epoch": 0.13, "grad_norm": 2.1986618041992188, "learning_rate": 0.00012599999999999997, "loss": 0.32, "step": 127 }, { "epoch": 0.13, "grad_norm": 3.084183931350708, "learning_rate": 0.000127, "loss": 1.5444, "step": 128 }, { "epoch": 0.13, "grad_norm": 3.7426083087921143, "learning_rate": 0.000128, "loss": 1.2618, "step": 129 }, { "epoch": 0.14, "grad_norm": 4.764894008636475, "learning_rate": 0.000129, "loss": 1.2171, "step": 130 }, { "epoch": 0.14, "grad_norm": 2.4803237915039062, "learning_rate": 0.00013, "loss": 0.3114, "step": 131 }, { "epoch": 0.14, "grad_norm": 1.9172024726867676, "learning_rate": 0.00013099999999999999, "loss": 0.4047, "step": 132 }, { "epoch": 0.14, "grad_norm": 3.119025468826294, "learning_rate": 0.00013199999999999998, "loss": 1.1797, "step": 133 }, { "epoch": 0.14, "grad_norm": 5.320390701293945, "learning_rate": 0.000133, "loss": 1.2692, "step": 134 }, { "epoch": 0.14, "grad_norm": 4.118578910827637, "learning_rate": 0.00013399999999999998, "loss": 1.579, "step": 135 }, { "epoch": 0.14, "grad_norm": 3.933561086654663, "learning_rate": 0.000135, "loss": 1.1655, "step": 136 }, { "epoch": 0.14, "grad_norm": 3.4326107501983643, "learning_rate": 0.00013599999999999997, "loss": 1.3859, "step": 137 }, { "epoch": 0.14, "grad_norm": 5.371403217315674, "learning_rate": 0.000137, "loss": 1.2995, "step": 138 }, { "epoch": 0.15, "grad_norm": 5.300549507141113, "learning_rate": 0.000138, "loss": 1.6417, "step": 139 }, { "epoch": 0.15, "grad_norm": 4.252517223358154, "learning_rate": 0.000139, "loss": 1.1399, "step": 140 }, { "epoch": 0.15, "grad_norm": 4.720141410827637, "learning_rate": 0.00014, "loss": 1.935, "step": 141 }, { "epoch": 0.15, "grad_norm": 4.964791297912598, "learning_rate": 0.00014099999999999998, "loss": 0.9864, "step": 142 }, { "epoch": 0.15, "grad_norm": 4.073893070220947, "learning_rate": 0.00014199999999999998, "loss": 0.3407, "step": 143 }, { "epoch": 0.15, "grad_norm": 3.5245461463928223, "learning_rate": 0.00014299999999999998, "loss": 1.1783, "step": 144 }, { "epoch": 0.15, "grad_norm": 5.986094951629639, "learning_rate": 0.00014399999999999998, "loss": 1.0759, "step": 145 }, { "epoch": 0.15, "grad_norm": 3.874466896057129, "learning_rate": 0.000145, "loss": 1.5019, "step": 146 }, { "epoch": 0.15, "grad_norm": 3.829094409942627, "learning_rate": 0.000146, "loss": 1.8141, "step": 147 }, { "epoch": 0.15, "grad_norm": 5.950973033905029, "learning_rate": 0.000147, "loss": 1.6488, "step": 148 }, { "epoch": 0.16, "grad_norm": 2.917398691177368, "learning_rate": 0.000148, "loss": 0.2216, "step": 149 }, { "epoch": 0.16, "grad_norm": 2.3399293422698975, "learning_rate": 0.000149, "loss": 1.1146, "step": 150 }, { "epoch": 0.16, "grad_norm": 3.7433464527130127, "learning_rate": 0.00015, "loss": 1.1838, "step": 151 }, { "epoch": 0.16, "grad_norm": 2.8234732151031494, "learning_rate": 0.00015099999999999998, "loss": 1.7287, "step": 152 }, { "epoch": 0.16, "grad_norm": 4.2081990242004395, "learning_rate": 0.000152, "loss": 1.5875, "step": 153 }, { "epoch": 0.16, "grad_norm": 3.479569673538208, "learning_rate": 0.00015299999999999998, "loss": 1.3674, "step": 154 }, { "epoch": 0.16, "grad_norm": 4.263027191162109, "learning_rate": 0.00015399999999999998, "loss": 1.0861, "step": 155 }, { "epoch": 0.16, "grad_norm": 4.20069694519043, "learning_rate": 0.000155, "loss": 0.8604, "step": 156 }, { "epoch": 0.16, "grad_norm": 2.7267985343933105, "learning_rate": 0.000156, "loss": 1.6038, "step": 157 }, { "epoch": 0.17, "grad_norm": 3.7871580123901367, "learning_rate": 0.000157, "loss": 1.0676, "step": 158 }, { "epoch": 0.17, "grad_norm": 3.6624743938446045, "learning_rate": 0.00015799999999999996, "loss": 0.9559, "step": 159 }, { "epoch": 0.17, "grad_norm": 4.26881742477417, "learning_rate": 0.000159, "loss": 1.6486, "step": 160 }, { "epoch": 0.17, "grad_norm": 4.106972694396973, "learning_rate": 0.00015999999999999999, "loss": 1.0407, "step": 161 }, { "epoch": 0.17, "grad_norm": 12.234991073608398, "learning_rate": 0.00016099999999999998, "loss": 0.788, "step": 162 }, { "epoch": 0.17, "grad_norm": 7.54979133605957, "learning_rate": 0.000162, "loss": 1.2009, "step": 163 }, { "epoch": 0.17, "grad_norm": 8.871726036071777, "learning_rate": 0.00016299999999999998, "loss": 1.3877, "step": 164 }, { "epoch": 0.17, "grad_norm": 6.019041538238525, "learning_rate": 0.00016399999999999997, "loss": 1.5193, "step": 165 }, { "epoch": 0.17, "grad_norm": 7.0723676681518555, "learning_rate": 0.000165, "loss": 1.2119, "step": 166 }, { "epoch": 0.17, "grad_norm": 5.5908708572387695, "learning_rate": 0.000166, "loss": 0.9027, "step": 167 }, { "epoch": 0.18, "grad_norm": 4.6804304122924805, "learning_rate": 0.00016699999999999997, "loss": 0.7778, "step": 168 }, { "epoch": 0.18, "grad_norm": 4.805787563323975, "learning_rate": 0.000168, "loss": 0.2573, "step": 169 }, { "epoch": 0.18, "grad_norm": 2.424121141433716, "learning_rate": 0.000169, "loss": 0.9506, "step": 170 }, { "epoch": 0.18, "grad_norm": 4.17886209487915, "learning_rate": 0.00016999999999999999, "loss": 1.0797, "step": 171 }, { "epoch": 0.18, "grad_norm": 4.611898899078369, "learning_rate": 0.00017099999999999998, "loss": 0.7596, "step": 172 }, { "epoch": 0.18, "grad_norm": 2.827244281768799, "learning_rate": 0.000172, "loss": 1.5111, "step": 173 }, { "epoch": 0.18, "grad_norm": 3.5306131839752197, "learning_rate": 0.00017299999999999998, "loss": 1.7616, "step": 174 }, { "epoch": 0.18, "grad_norm": 4.013223171234131, "learning_rate": 0.00017399999999999997, "loss": 1.2749, "step": 175 }, { "epoch": 0.18, "grad_norm": 3.9415531158447266, "learning_rate": 0.000175, "loss": 1.3843, "step": 176 }, { "epoch": 0.19, "grad_norm": 5.582239151000977, "learning_rate": 0.000176, "loss": 1.2561, "step": 177 }, { "epoch": 0.19, "grad_norm": 3.296367645263672, "learning_rate": 0.00017699999999999997, "loss": 1.1076, "step": 178 }, { "epoch": 0.19, "grad_norm": 4.854572296142578, "learning_rate": 0.000178, "loss": 1.3671, "step": 179 }, { "epoch": 0.19, "grad_norm": 3.11190128326416, "learning_rate": 0.000179, "loss": 1.1844, "step": 180 }, { "epoch": 0.19, "grad_norm": 3.326961040496826, "learning_rate": 0.00017999999999999998, "loss": 0.9527, "step": 181 }, { "epoch": 0.19, "grad_norm": 3.8623571395874023, "learning_rate": 0.000181, "loss": 0.8668, "step": 182 }, { "epoch": 0.19, "grad_norm": 3.3031466007232666, "learning_rate": 0.00018199999999999998, "loss": 1.6397, "step": 183 }, { "epoch": 0.19, "grad_norm": 3.3300487995147705, "learning_rate": 0.00018299999999999998, "loss": 1.5656, "step": 184 }, { "epoch": 0.19, "grad_norm": 4.852734088897705, "learning_rate": 0.00018399999999999997, "loss": 1.0274, "step": 185 }, { "epoch": 0.19, "grad_norm": 3.535395860671997, "learning_rate": 0.000185, "loss": 0.9264, "step": 186 }, { "epoch": 0.2, "grad_norm": 4.4967265129089355, "learning_rate": 0.000186, "loss": 0.7599, "step": 187 }, { "epoch": 0.2, "grad_norm": 3.382897138595581, "learning_rate": 0.00018699999999999996, "loss": 0.7655, "step": 188 }, { "epoch": 0.2, "grad_norm": 3.327773094177246, "learning_rate": 0.000188, "loss": 0.2743, "step": 189 }, { "epoch": 0.2, "grad_norm": 3.7694613933563232, "learning_rate": 0.00018899999999999999, "loss": 1.0972, "step": 190 }, { "epoch": 0.2, "grad_norm": 1.895078182220459, "learning_rate": 0.00018999999999999998, "loss": 0.5177, "step": 191 }, { "epoch": 0.2, "grad_norm": 8.051892280578613, "learning_rate": 0.000191, "loss": 1.4296, "step": 192 }, { "epoch": 0.2, "grad_norm": 2.4651572704315186, "learning_rate": 0.00019199999999999998, "loss": 0.2123, "step": 193 }, { "epoch": 0.2, "grad_norm": 5.485482692718506, "learning_rate": 0.00019299999999999997, "loss": 1.8226, "step": 194 }, { "epoch": 0.2, "grad_norm": 7.438916206359863, "learning_rate": 0.00019399999999999997, "loss": 0.7194, "step": 195 }, { "epoch": 0.21, "grad_norm": 4.396063804626465, "learning_rate": 0.000195, "loss": 1.6922, "step": 196 }, { "epoch": 0.21, "grad_norm": 8.508432388305664, "learning_rate": 0.00019599999999999997, "loss": 1.6448, "step": 197 }, { "epoch": 0.21, "grad_norm": 3.487405776977539, "learning_rate": 0.00019699999999999996, "loss": 1.1225, "step": 198 }, { "epoch": 0.21, "grad_norm": 2.8554043769836426, "learning_rate": 0.000198, "loss": 1.4758, "step": 199 }, { "epoch": 0.21, "grad_norm": 2.9215590953826904, "learning_rate": 0.00019899999999999999, "loss": 0.2644, "step": 200 }, { "epoch": 0.21, "grad_norm": 1.809658408164978, "learning_rate": 0.00019999999999999998, "loss": 0.6342, "step": 201 }, { "epoch": 0.21, "grad_norm": 2.948272705078125, "learning_rate": 0.000201, "loss": 1.3171, "step": 202 }, { "epoch": 0.21, "grad_norm": 4.659675598144531, "learning_rate": 0.00020199999999999998, "loss": 1.3682, "step": 203 }, { "epoch": 0.21, "grad_norm": 3.1061782836914062, "learning_rate": 0.00020299999999999997, "loss": 0.6586, "step": 204 }, { "epoch": 0.21, "grad_norm": 4.067229270935059, "learning_rate": 0.000204, "loss": 1.4402, "step": 205 }, { "epoch": 0.22, "grad_norm": 2.9608683586120605, "learning_rate": 0.000205, "loss": 1.5721, "step": 206 }, { "epoch": 0.22, "grad_norm": 3.231966972351074, "learning_rate": 0.00020599999999999997, "loss": 1.4125, "step": 207 }, { "epoch": 0.22, "grad_norm": 3.415351629257202, "learning_rate": 0.00020699999999999996, "loss": 1.234, "step": 208 }, { "epoch": 0.22, "grad_norm": 3.1476757526397705, "learning_rate": 0.000208, "loss": 0.8833, "step": 209 }, { "epoch": 0.22, "grad_norm": 5.153148651123047, "learning_rate": 0.00020899999999999998, "loss": 1.5821, "step": 210 }, { "epoch": 0.22, "grad_norm": 4.2117390632629395, "learning_rate": 0.00020999999999999998, "loss": 1.1968, "step": 211 }, { "epoch": 0.22, "grad_norm": 4.106515884399414, "learning_rate": 0.00021099999999999998, "loss": 1.1919, "step": 212 }, { "epoch": 0.22, "grad_norm": 2.5760037899017334, "learning_rate": 0.00021199999999999998, "loss": 1.6354, "step": 213 }, { "epoch": 0.22, "grad_norm": 2.4257736206054688, "learning_rate": 0.00021299999999999997, "loss": 0.6762, "step": 214 }, { "epoch": 0.22, "grad_norm": 2.381577968597412, "learning_rate": 0.000214, "loss": 1.3986, "step": 215 }, { "epoch": 0.23, "grad_norm": 3.177124500274658, "learning_rate": 0.000215, "loss": 0.9631, "step": 216 }, { "epoch": 0.23, "grad_norm": 3.468573570251465, "learning_rate": 0.00021599999999999996, "loss": 1.4267, "step": 217 }, { "epoch": 0.23, "grad_norm": 3.499484062194824, "learning_rate": 0.000217, "loss": 1.0607, "step": 218 }, { "epoch": 0.23, "grad_norm": 1.8307218551635742, "learning_rate": 0.00021799999999999999, "loss": 0.5877, "step": 219 }, { "epoch": 0.23, "grad_norm": 3.825871229171753, "learning_rate": 0.00021899999999999998, "loss": 0.6927, "step": 220 }, { "epoch": 0.23, "grad_norm": 1.552651047706604, "learning_rate": 0.00021999999999999995, "loss": 0.2026, "step": 221 }, { "epoch": 0.23, "grad_norm": 2.360166311264038, "learning_rate": 0.00022099999999999998, "loss": 1.4072, "step": 222 }, { "epoch": 0.23, "grad_norm": 3.88201642036438, "learning_rate": 0.00022199999999999998, "loss": 0.6932, "step": 223 }, { "epoch": 0.23, "grad_norm": 4.5754194259643555, "learning_rate": 0.00022299999999999997, "loss": 1.1474, "step": 224 }, { "epoch": 0.24, "grad_norm": 5.174070835113525, "learning_rate": 0.000224, "loss": 1.6259, "step": 225 }, { "epoch": 0.24, "grad_norm": 6.317915916442871, "learning_rate": 0.000225, "loss": 1.5691, "step": 226 }, { "epoch": 0.24, "grad_norm": 2.2685751914978027, "learning_rate": 0.00022599999999999996, "loss": 0.7069, "step": 227 }, { "epoch": 0.24, "grad_norm": 2.4414174556732178, "learning_rate": 0.000227, "loss": 1.4426, "step": 228 }, { "epoch": 0.24, "grad_norm": 3.9098880290985107, "learning_rate": 0.00022799999999999999, "loss": 1.4848, "step": 229 }, { "epoch": 0.24, "grad_norm": 2.94317364692688, "learning_rate": 0.00022899999999999998, "loss": 1.5398, "step": 230 }, { "epoch": 0.24, "grad_norm": 3.696044445037842, "learning_rate": 0.00023, "loss": 1.0508, "step": 231 }, { "epoch": 0.24, "grad_norm": 3.5637283325195312, "learning_rate": 0.00023099999999999998, "loss": 1.293, "step": 232 }, { "epoch": 0.24, "grad_norm": 5.252113342285156, "learning_rate": 0.00023199999999999997, "loss": 1.0238, "step": 233 }, { "epoch": 0.24, "grad_norm": 3.8909826278686523, "learning_rate": 0.00023299999999999997, "loss": 0.8286, "step": 234 }, { "epoch": 0.25, "grad_norm": 3.0664350986480713, "learning_rate": 0.000234, "loss": 1.049, "step": 235 }, { "epoch": 0.25, "grad_norm": 2.636462926864624, "learning_rate": 0.00023499999999999997, "loss": 1.3311, "step": 236 }, { "epoch": 0.25, "grad_norm": 3.8265254497528076, "learning_rate": 0.00023599999999999996, "loss": 1.0761, "step": 237 }, { "epoch": 0.25, "grad_norm": 3.1792984008789062, "learning_rate": 0.000237, "loss": 1.143, "step": 238 }, { "epoch": 0.25, "grad_norm": 3.2147650718688965, "learning_rate": 0.00023799999999999998, "loss": 1.0399, "step": 239 }, { "epoch": 0.25, "grad_norm": 1.9508872032165527, "learning_rate": 0.00023899999999999998, "loss": 1.5802, "step": 240 }, { "epoch": 0.25, "grad_norm": 2.6398966312408447, "learning_rate": 0.00023999999999999998, "loss": 1.2396, "step": 241 }, { "epoch": 0.25, "grad_norm": 3.227956771850586, "learning_rate": 0.00024099999999999998, "loss": 0.2776, "step": 242 }, { "epoch": 0.25, "grad_norm": 3.4774301052093506, "learning_rate": 0.00024199999999999997, "loss": 0.9155, "step": 243 }, { "epoch": 0.26, "grad_norm": 2.6463623046875, "learning_rate": 0.000243, "loss": 1.5446, "step": 244 }, { "epoch": 0.26, "grad_norm": 1.357964277267456, "learning_rate": 0.000244, "loss": 0.4428, "step": 245 }, { "epoch": 0.26, "grad_norm": 3.01023268699646, "learning_rate": 0.000245, "loss": 1.0256, "step": 246 }, { "epoch": 0.26, "grad_norm": 4.485054016113281, "learning_rate": 0.00024599999999999996, "loss": 0.9325, "step": 247 }, { "epoch": 0.26, "grad_norm": 4.258424758911133, "learning_rate": 0.000247, "loss": 1.1518, "step": 248 }, { "epoch": 0.26, "grad_norm": 3.5742106437683105, "learning_rate": 0.00024799999999999996, "loss": 0.9365, "step": 249 }, { "epoch": 0.26, "grad_norm": 3.131269931793213, "learning_rate": 0.000249, "loss": 1.006, "step": 250 }, { "epoch": 0.26, "grad_norm": 2.8377223014831543, "learning_rate": 0.00025, "loss": 0.2142, "step": 251 }, { "epoch": 0.26, "grad_norm": 6.699740886688232, "learning_rate": 0.000251, "loss": 1.2012, "step": 252 }, { "epoch": 0.26, "grad_norm": 2.748816967010498, "learning_rate": 0.00025199999999999995, "loss": 1.2162, "step": 253 }, { "epoch": 0.27, "grad_norm": 1.5976569652557373, "learning_rate": 0.00025299999999999997, "loss": 0.7342, "step": 254 }, { "epoch": 0.27, "grad_norm": 3.2964205741882324, "learning_rate": 0.000254, "loss": 0.6944, "step": 255 }, { "epoch": 0.27, "grad_norm": 1.2986418008804321, "learning_rate": 0.00025499999999999996, "loss": 0.1366, "step": 256 }, { "epoch": 0.27, "grad_norm": 3.5206665992736816, "learning_rate": 0.000256, "loss": 0.8447, "step": 257 }, { "epoch": 0.27, "grad_norm": 2.548757791519165, "learning_rate": 0.00025699999999999996, "loss": 0.9743, "step": 258 }, { "epoch": 0.27, "grad_norm": 2.173283576965332, "learning_rate": 0.000258, "loss": 1.0704, "step": 259 }, { "epoch": 0.27, "grad_norm": 2.844398260116577, "learning_rate": 0.00025899999999999995, "loss": 0.9054, "step": 260 }, { "epoch": 0.27, "grad_norm": 3.233757495880127, "learning_rate": 0.00026, "loss": 1.0035, "step": 261 }, { "epoch": 0.27, "grad_norm": 2.585799217224121, "learning_rate": 0.000261, "loss": 1.0935, "step": 262 }, { "epoch": 0.28, "grad_norm": 3.3614985942840576, "learning_rate": 0.00026199999999999997, "loss": 1.1227, "step": 263 }, { "epoch": 0.28, "grad_norm": 3.9490151405334473, "learning_rate": 0.000263, "loss": 1.0003, "step": 264 }, { "epoch": 0.28, "grad_norm": 4.104151725769043, "learning_rate": 0.00026399999999999997, "loss": 0.8303, "step": 265 }, { "epoch": 0.28, "grad_norm": 1.2483747005462646, "learning_rate": 0.000265, "loss": 0.2044, "step": 266 }, { "epoch": 0.28, "grad_norm": 2.9833176136016846, "learning_rate": 0.000266, "loss": 0.6864, "step": 267 }, { "epoch": 0.28, "grad_norm": 3.925781011581421, "learning_rate": 0.000267, "loss": 1.1738, "step": 268 }, { "epoch": 0.28, "grad_norm": 3.0927608013153076, "learning_rate": 0.00026799999999999995, "loss": 0.7891, "step": 269 }, { "epoch": 0.28, "grad_norm": 2.4451119899749756, "learning_rate": 0.000269, "loss": 0.7228, "step": 270 }, { "epoch": 0.28, "grad_norm": 3.7132692337036133, "learning_rate": 0.00027, "loss": 0.8993, "step": 271 }, { "epoch": 0.28, "grad_norm": 2.241142988204956, "learning_rate": 0.000271, "loss": 0.1454, "step": 272 }, { "epoch": 0.29, "grad_norm": 1.2361118793487549, "learning_rate": 0.00027199999999999994, "loss": 0.2268, "step": 273 }, { "epoch": 0.29, "grad_norm": 2.1823840141296387, "learning_rate": 0.00027299999999999997, "loss": 1.3618, "step": 274 }, { "epoch": 0.29, "grad_norm": 1.52080237865448, "learning_rate": 0.000274, "loss": 0.191, "step": 275 }, { "epoch": 0.29, "grad_norm": 4.636223316192627, "learning_rate": 0.00027499999999999996, "loss": 1.4811, "step": 276 }, { "epoch": 0.29, "grad_norm": 3.443178176879883, "learning_rate": 0.000276, "loss": 1.7303, "step": 277 }, { "epoch": 0.29, "grad_norm": 1.1942827701568604, "learning_rate": 0.00027699999999999996, "loss": 0.4678, "step": 278 }, { "epoch": 0.29, "grad_norm": 3.1649677753448486, "learning_rate": 0.000278, "loss": 1.8159, "step": 279 }, { "epoch": 0.29, "grad_norm": 2.49360728263855, "learning_rate": 0.000279, "loss": 1.3272, "step": 280 }, { "epoch": 0.29, "grad_norm": 2.574930429458618, "learning_rate": 0.00028, "loss": 1.1373, "step": 281 }, { "epoch": 0.29, "grad_norm": 3.4578397274017334, "learning_rate": 0.00028099999999999995, "loss": 1.3062, "step": 282 }, { "epoch": 0.3, "grad_norm": 3.2991280555725098, "learning_rate": 0.00028199999999999997, "loss": 1.0804, "step": 283 }, { "epoch": 0.3, "grad_norm": 5.0849409103393555, "learning_rate": 0.000283, "loss": 1.0897, "step": 284 }, { "epoch": 0.3, "grad_norm": 2.3282253742218018, "learning_rate": 0.00028399999999999996, "loss": 0.8288, "step": 285 }, { "epoch": 0.3, "grad_norm": 5.600067615509033, "learning_rate": 0.000285, "loss": 0.8487, "step": 286 }, { "epoch": 0.3, "grad_norm": 3.722736120223999, "learning_rate": 0.00028599999999999996, "loss": 1.7637, "step": 287 }, { "epoch": 0.3, "grad_norm": 3.682899236679077, "learning_rate": 0.000287, "loss": 1.1277, "step": 288 }, { "epoch": 0.3, "grad_norm": 2.891709804534912, "learning_rate": 0.00028799999999999995, "loss": 1.5311, "step": 289 }, { "epoch": 0.3, "grad_norm": 2.917855978012085, "learning_rate": 0.000289, "loss": 0.9244, "step": 290 }, { "epoch": 0.3, "grad_norm": 3.527740240097046, "learning_rate": 0.00029, "loss": 0.8896, "step": 291 }, { "epoch": 0.31, "grad_norm": 4.798820972442627, "learning_rate": 0.00029099999999999997, "loss": 1.4213, "step": 292 }, { "epoch": 0.31, "grad_norm": 3.3912644386291504, "learning_rate": 0.000292, "loss": 0.8839, "step": 293 }, { "epoch": 0.31, "grad_norm": 2.7786941528320312, "learning_rate": 0.00029299999999999997, "loss": 1.4677, "step": 294 }, { "epoch": 0.31, "grad_norm": 2.2656240463256836, "learning_rate": 0.000294, "loss": 0.8155, "step": 295 }, { "epoch": 0.31, "grad_norm": 3.2138984203338623, "learning_rate": 0.00029499999999999996, "loss": 0.453, "step": 296 }, { "epoch": 0.31, "grad_norm": 5.567823886871338, "learning_rate": 0.000296, "loss": 1.3948, "step": 297 }, { "epoch": 0.31, "grad_norm": 2.424060106277466, "learning_rate": 0.00029699999999999996, "loss": 1.2355, "step": 298 }, { "epoch": 0.31, "grad_norm": 3.3072586059570312, "learning_rate": 0.000298, "loss": 1.5173, "step": 299 }, { "epoch": 0.31, "grad_norm": 3.51914381980896, "learning_rate": 0.000299, "loss": 1.565, "step": 300 }, { "epoch": 0.31, "eval_loss": 1.2794907093048096, "eval_runtime": 1.0128, "eval_samples_per_second": 4.937, "eval_steps_per_second": 0.987, "step": 300 }, { "epoch": 0.31, "grad_norm": 2.966632843017578, "learning_rate": 0.0003, "loss": 1.4345, "step": 301 }, { "epoch": 0.32, "grad_norm": 3.790022850036621, "learning_rate": 0.0002998831775700934, "loss": 1.4484, "step": 302 }, { "epoch": 0.32, "grad_norm": 3.3552589416503906, "learning_rate": 0.0002997663551401869, "loss": 2.0556, "step": 303 }, { "epoch": 0.32, "grad_norm": 3.0574560165405273, "learning_rate": 0.0002996495327102804, "loss": 0.523, "step": 304 }, { "epoch": 0.32, "grad_norm": 2.8360860347747803, "learning_rate": 0.00029953271028037383, "loss": 1.4312, "step": 305 }, { "epoch": 0.32, "grad_norm": 2.2051467895507812, "learning_rate": 0.0002994158878504672, "loss": 1.1874, "step": 306 }, { "epoch": 0.32, "grad_norm": 4.811757564544678, "learning_rate": 0.00029929906542056073, "loss": 1.2609, "step": 307 }, { "epoch": 0.32, "grad_norm": 1.9196655750274658, "learning_rate": 0.0002991822429906542, "loss": 0.3526, "step": 308 }, { "epoch": 0.32, "grad_norm": 2.7059614658355713, "learning_rate": 0.00029906542056074763, "loss": 1.0694, "step": 309 }, { "epoch": 0.32, "grad_norm": 2.548222780227661, "learning_rate": 0.0002989485981308411, "loss": 1.4761, "step": 310 }, { "epoch": 0.33, "grad_norm": 3.1029458045959473, "learning_rate": 0.0002988317757009346, "loss": 1.0938, "step": 311 }, { "epoch": 0.33, "grad_norm": 2.750783920288086, "learning_rate": 0.00029871495327102803, "loss": 1.5409, "step": 312 }, { "epoch": 0.33, "grad_norm": 2.425215244293213, "learning_rate": 0.0002985981308411215, "loss": 1.2011, "step": 313 }, { "epoch": 0.33, "grad_norm": 2.5072877407073975, "learning_rate": 0.00029848130841121493, "loss": 0.8516, "step": 314 }, { "epoch": 0.33, "grad_norm": 2.0297932624816895, "learning_rate": 0.0002983644859813084, "loss": 1.428, "step": 315 }, { "epoch": 0.33, "grad_norm": 2.4369113445281982, "learning_rate": 0.00029824766355140183, "loss": 1.5548, "step": 316 }, { "epoch": 0.33, "grad_norm": 3.450639247894287, "learning_rate": 0.0002981308411214953, "loss": 0.7257, "step": 317 }, { "epoch": 0.33, "grad_norm": 1.8958454132080078, "learning_rate": 0.0002980140186915888, "loss": 1.743, "step": 318 }, { "epoch": 0.33, "grad_norm": 2.61169695854187, "learning_rate": 0.00029789719626168224, "loss": 1.2043, "step": 319 }, { "epoch": 0.33, "grad_norm": 2.2103683948516846, "learning_rate": 0.0002977803738317757, "loss": 0.2433, "step": 320 }, { "epoch": 0.34, "grad_norm": 2.215791940689087, "learning_rate": 0.00029766355140186914, "loss": 0.5894, "step": 321 }, { "epoch": 0.34, "grad_norm": 3.2916409969329834, "learning_rate": 0.0002975467289719626, "loss": 0.7389, "step": 322 }, { "epoch": 0.34, "grad_norm": 2.915350914001465, "learning_rate": 0.00029742990654205604, "loss": 1.776, "step": 323 }, { "epoch": 0.34, "grad_norm": 1.9768015146255493, "learning_rate": 0.0002973130841121495, "loss": 0.5903, "step": 324 }, { "epoch": 0.34, "grad_norm": 1.9784126281738281, "learning_rate": 0.00029719626168224294, "loss": 1.3178, "step": 325 }, { "epoch": 0.34, "grad_norm": 2.2313179969787598, "learning_rate": 0.00029707943925233644, "loss": 1.4696, "step": 326 }, { "epoch": 0.34, "grad_norm": 2.7534449100494385, "learning_rate": 0.0002969626168224299, "loss": 1.0521, "step": 327 }, { "epoch": 0.34, "grad_norm": 2.8486061096191406, "learning_rate": 0.00029684579439252334, "loss": 1.1824, "step": 328 }, { "epoch": 0.34, "grad_norm": 3.505977153778076, "learning_rate": 0.0002967289719626168, "loss": 0.6442, "step": 329 }, { "epoch": 0.35, "grad_norm": 2.27866268157959, "learning_rate": 0.00029661214953271024, "loss": 1.5632, "step": 330 }, { "epoch": 0.35, "grad_norm": 3.004122495651245, "learning_rate": 0.0002964953271028037, "loss": 0.8333, "step": 331 }, { "epoch": 0.35, "grad_norm": 2.0221564769744873, "learning_rate": 0.00029637850467289714, "loss": 0.2884, "step": 332 }, { "epoch": 0.35, "grad_norm": 3.4967293739318848, "learning_rate": 0.00029626168224299065, "loss": 1.805, "step": 333 }, { "epoch": 0.35, "grad_norm": 5.704538822174072, "learning_rate": 0.0002961448598130841, "loss": 1.2926, "step": 334 }, { "epoch": 0.35, "grad_norm": 2.2548699378967285, "learning_rate": 0.00029602803738317755, "loss": 0.6661, "step": 335 }, { "epoch": 0.35, "grad_norm": 0.8257749676704407, "learning_rate": 0.000295911214953271, "loss": 0.1868, "step": 336 }, { "epoch": 0.35, "grad_norm": 4.1990132331848145, "learning_rate": 0.0002957943925233645, "loss": 1.4626, "step": 337 }, { "epoch": 0.35, "grad_norm": 3.2955174446105957, "learning_rate": 0.0002956775700934579, "loss": 1.1332, "step": 338 }, { "epoch": 0.35, "grad_norm": 1.0604300498962402, "learning_rate": 0.00029556074766355135, "loss": 0.3651, "step": 339 }, { "epoch": 0.36, "grad_norm": 2.6107492446899414, "learning_rate": 0.00029544392523364485, "loss": 0.8476, "step": 340 }, { "epoch": 0.36, "grad_norm": 2.7882015705108643, "learning_rate": 0.0002953271028037383, "loss": 1.1006, "step": 341 }, { "epoch": 0.36, "grad_norm": 2.7657339572906494, "learning_rate": 0.00029521028037383175, "loss": 1.2806, "step": 342 }, { "epoch": 0.36, "grad_norm": 0.8367657661437988, "learning_rate": 0.0002950934579439252, "loss": 0.2574, "step": 343 }, { "epoch": 0.36, "grad_norm": 4.316808223724365, "learning_rate": 0.00029497663551401865, "loss": 0.7867, "step": 344 }, { "epoch": 0.36, "grad_norm": 1.0288803577423096, "learning_rate": 0.00029485981308411215, "loss": 0.4766, "step": 345 }, { "epoch": 0.36, "grad_norm": 5.54671049118042, "learning_rate": 0.00029474299065420555, "loss": 0.824, "step": 346 }, { "epoch": 0.36, "grad_norm": 6.718745231628418, "learning_rate": 0.00029462616822429905, "loss": 1.3466, "step": 347 }, { "epoch": 0.36, "grad_norm": 3.292029857635498, "learning_rate": 0.0002945093457943925, "loss": 0.7201, "step": 348 }, { "epoch": 0.37, "grad_norm": 2.1415319442749023, "learning_rate": 0.00029439252336448596, "loss": 1.7006, "step": 349 }, { "epoch": 0.37, "grad_norm": 3.0505154132843018, "learning_rate": 0.0002942757009345794, "loss": 1.4469, "step": 350 }, { "epoch": 0.37, "grad_norm": 2.2195465564727783, "learning_rate": 0.00029415887850467286, "loss": 1.4332, "step": 351 }, { "epoch": 0.37, "grad_norm": 2.9841485023498535, "learning_rate": 0.00029404205607476636, "loss": 0.7682, "step": 352 }, { "epoch": 0.37, "grad_norm": 2.8377418518066406, "learning_rate": 0.0002939252336448598, "loss": 1.0434, "step": 353 }, { "epoch": 0.37, "grad_norm": 2.081266403198242, "learning_rate": 0.0002938084112149532, "loss": 0.5904, "step": 354 }, { "epoch": 0.37, "grad_norm": 2.4761319160461426, "learning_rate": 0.0002936915887850467, "loss": 1.1598, "step": 355 }, { "epoch": 0.37, "grad_norm": 2.700859308242798, "learning_rate": 0.00029357476635514016, "loss": 0.523, "step": 356 }, { "epoch": 0.37, "grad_norm": 2.601006031036377, "learning_rate": 0.0002934579439252336, "loss": 1.567, "step": 357 }, { "epoch": 0.37, "grad_norm": 2.2836368083953857, "learning_rate": 0.00029334112149532706, "loss": 0.1971, "step": 358 }, { "epoch": 0.38, "grad_norm": 3.212724208831787, "learning_rate": 0.00029322429906542056, "loss": 0.5909, "step": 359 }, { "epoch": 0.38, "grad_norm": 3.0591113567352295, "learning_rate": 0.000293107476635514, "loss": 0.939, "step": 360 }, { "epoch": 0.38, "grad_norm": 0.9243104457855225, "learning_rate": 0.00029299065420560746, "loss": 0.3335, "step": 361 }, { "epoch": 0.38, "grad_norm": 0.6790910363197327, "learning_rate": 0.0002928738317757009, "loss": 0.2015, "step": 362 }, { "epoch": 0.38, "grad_norm": 5.168013095855713, "learning_rate": 0.00029275700934579436, "loss": 0.404, "step": 363 }, { "epoch": 0.38, "grad_norm": 3.2125356197357178, "learning_rate": 0.0002926401869158878, "loss": 1.6901, "step": 364 }, { "epoch": 0.38, "grad_norm": 0.8769881725311279, "learning_rate": 0.00029252336448598126, "loss": 0.4032, "step": 365 }, { "epoch": 0.38, "grad_norm": 2.276057243347168, "learning_rate": 0.00029240654205607477, "loss": 0.9828, "step": 366 }, { "epoch": 0.38, "grad_norm": 2.7380950450897217, "learning_rate": 0.0002922897196261682, "loss": 1.5195, "step": 367 }, { "epoch": 0.38, "grad_norm": 0.8719393610954285, "learning_rate": 0.00029217289719626167, "loss": 0.2892, "step": 368 }, { "epoch": 0.39, "grad_norm": 1.1746935844421387, "learning_rate": 0.0002920560747663551, "loss": 0.1869, "step": 369 }, { "epoch": 0.39, "grad_norm": 2.2664616107940674, "learning_rate": 0.00029193925233644857, "loss": 0.9131, "step": 370 }, { "epoch": 0.39, "grad_norm": 3.618701934814453, "learning_rate": 0.000291822429906542, "loss": 1.2094, "step": 371 }, { "epoch": 0.39, "grad_norm": 3.9570870399475098, "learning_rate": 0.00029170560747663547, "loss": 0.722, "step": 372 }, { "epoch": 0.39, "grad_norm": 2.253892183303833, "learning_rate": 0.0002915887850467289, "loss": 1.4645, "step": 373 }, { "epoch": 0.39, "grad_norm": 2.9197115898132324, "learning_rate": 0.0002914719626168224, "loss": 0.6279, "step": 374 }, { "epoch": 0.39, "grad_norm": 2.723787546157837, "learning_rate": 0.00029135514018691587, "loss": 0.8615, "step": 375 }, { "epoch": 0.39, "grad_norm": 1.6628739833831787, "learning_rate": 0.0002912383177570093, "loss": 0.5986, "step": 376 }, { "epoch": 0.39, "grad_norm": 2.0160231590270996, "learning_rate": 0.0002911214953271028, "loss": 1.4512, "step": 377 }, { "epoch": 0.4, "grad_norm": 3.787649631500244, "learning_rate": 0.0002910046728971962, "loss": 1.2608, "step": 378 }, { "epoch": 0.4, "grad_norm": 2.7951927185058594, "learning_rate": 0.0002908878504672897, "loss": 1.7605, "step": 379 }, { "epoch": 0.4, "grad_norm": 2.1774840354919434, "learning_rate": 0.0002907710280373831, "loss": 0.9581, "step": 380 }, { "epoch": 0.4, "grad_norm": 3.9759175777435303, "learning_rate": 0.00029065420560747663, "loss": 1.7762, "step": 381 }, { "epoch": 0.4, "grad_norm": 1.0567970275878906, "learning_rate": 0.0002905373831775701, "loss": 0.4799, "step": 382 }, { "epoch": 0.4, "grad_norm": 0.9178869128227234, "learning_rate": 0.00029042056074766353, "loss": 0.3698, "step": 383 }, { "epoch": 0.4, "grad_norm": 3.4597392082214355, "learning_rate": 0.000290303738317757, "loss": 1.5229, "step": 384 }, { "epoch": 0.4, "grad_norm": 2.4791572093963623, "learning_rate": 0.0002901869158878505, "loss": 1.6204, "step": 385 }, { "epoch": 0.4, "grad_norm": 3.3275704383850098, "learning_rate": 0.0002900700934579439, "loss": 0.9628, "step": 386 }, { "epoch": 0.4, "grad_norm": 1.1707335710525513, "learning_rate": 0.00028995327102803733, "loss": 0.7267, "step": 387 }, { "epoch": 0.41, "grad_norm": 2.8721630573272705, "learning_rate": 0.00028983644859813083, "loss": 1.1341, "step": 388 }, { "epoch": 0.41, "grad_norm": 3.7177517414093018, "learning_rate": 0.0002897196261682243, "loss": 1.4855, "step": 389 }, { "epoch": 0.41, "grad_norm": 3.316612720489502, "learning_rate": 0.00028960280373831773, "loss": 1.005, "step": 390 }, { "epoch": 0.41, "grad_norm": 1.4785094261169434, "learning_rate": 0.0002894859813084112, "loss": 0.6708, "step": 391 }, { "epoch": 0.41, "grad_norm": 2.6307642459869385, "learning_rate": 0.00028936915887850463, "loss": 0.8549, "step": 392 }, { "epoch": 0.41, "grad_norm": 2.2438905239105225, "learning_rate": 0.00028925233644859814, "loss": 1.6076, "step": 393 }, { "epoch": 0.41, "grad_norm": 2.3739724159240723, "learning_rate": 0.00028913551401869153, "loss": 1.12, "step": 394 }, { "epoch": 0.41, "grad_norm": 3.674501657485962, "learning_rate": 0.00028901869158878504, "loss": 0.5076, "step": 395 }, { "epoch": 0.41, "grad_norm": 2.6238901615142822, "learning_rate": 0.0002889018691588785, "loss": 0.6966, "step": 396 }, { "epoch": 0.42, "grad_norm": 4.171786785125732, "learning_rate": 0.00028878504672897194, "loss": 1.3563, "step": 397 }, { "epoch": 0.42, "grad_norm": 4.3918046951293945, "learning_rate": 0.0002886682242990654, "loss": 1.4797, "step": 398 }, { "epoch": 0.42, "grad_norm": 1.8573514223098755, "learning_rate": 0.00028855140186915884, "loss": 0.6252, "step": 399 }, { "epoch": 0.42, "grad_norm": 2.9090523719787598, "learning_rate": 0.00028843457943925234, "loss": 1.1731, "step": 400 }, { "epoch": 0.42, "grad_norm": 2.797912359237671, "learning_rate": 0.0002883177570093458, "loss": 0.5088, "step": 401 }, { "epoch": 0.42, "grad_norm": 2.8787248134613037, "learning_rate": 0.0002882009345794392, "loss": 1.8064, "step": 402 }, { "epoch": 0.42, "grad_norm": 1.8502628803253174, "learning_rate": 0.0002880841121495327, "loss": 0.4324, "step": 403 }, { "epoch": 0.42, "grad_norm": 2.2849178314208984, "learning_rate": 0.00028796728971962614, "loss": 1.2576, "step": 404 }, { "epoch": 0.42, "grad_norm": 7.455745220184326, "learning_rate": 0.0002878504672897196, "loss": 1.4212, "step": 405 }, { "epoch": 0.42, "grad_norm": 2.977170944213867, "learning_rate": 0.00028773364485981304, "loss": 0.9915, "step": 406 }, { "epoch": 0.43, "grad_norm": 4.004940032958984, "learning_rate": 0.00028761682242990655, "loss": 0.8094, "step": 407 }, { "epoch": 0.43, "grad_norm": 4.568225383758545, "learning_rate": 0.0002875, "loss": 0.8189, "step": 408 }, { "epoch": 0.43, "grad_norm": 2.1345443725585938, "learning_rate": 0.00028738317757009345, "loss": 1.0812, "step": 409 }, { "epoch": 0.43, "grad_norm": 2.8404977321624756, "learning_rate": 0.0002872663551401869, "loss": 0.5456, "step": 410 }, { "epoch": 0.43, "grad_norm": 1.2581323385238647, "learning_rate": 0.00028714953271028035, "loss": 0.2504, "step": 411 }, { "epoch": 0.43, "grad_norm": 2.7793455123901367, "learning_rate": 0.0002870327102803738, "loss": 1.1482, "step": 412 }, { "epoch": 0.43, "grad_norm": 1.3713812828063965, "learning_rate": 0.00028691588785046725, "loss": 1.1142, "step": 413 }, { "epoch": 0.43, "grad_norm": 2.7433762550354004, "learning_rate": 0.00028679906542056075, "loss": 1.1598, "step": 414 }, { "epoch": 0.43, "grad_norm": 2.55407977104187, "learning_rate": 0.0002866822429906542, "loss": 0.5134, "step": 415 }, { "epoch": 0.44, "grad_norm": 3.431687116622925, "learning_rate": 0.00028656542056074765, "loss": 1.0363, "step": 416 }, { "epoch": 0.44, "grad_norm": 1.9685442447662354, "learning_rate": 0.0002864485981308411, "loss": 0.1242, "step": 417 }, { "epoch": 0.44, "grad_norm": 1.535361647605896, "learning_rate": 0.00028633177570093455, "loss": 0.735, "step": 418 }, { "epoch": 0.44, "grad_norm": 4.509974002838135, "learning_rate": 0.000286214953271028, "loss": 1.194, "step": 419 }, { "epoch": 0.44, "grad_norm": 0.9291192293167114, "learning_rate": 0.00028609813084112145, "loss": 0.4654, "step": 420 }, { "epoch": 0.44, "grad_norm": 3.855079174041748, "learning_rate": 0.0002859813084112149, "loss": 1.4628, "step": 421 }, { "epoch": 0.44, "grad_norm": 2.7849321365356445, "learning_rate": 0.0002858644859813084, "loss": 0.9878, "step": 422 }, { "epoch": 0.44, "grad_norm": 0.733687162399292, "learning_rate": 0.00028574766355140185, "loss": 0.2796, "step": 423 }, { "epoch": 0.44, "grad_norm": 3.3665411472320557, "learning_rate": 0.0002856308411214953, "loss": 1.2903, "step": 424 }, { "epoch": 0.44, "grad_norm": 2.7777185440063477, "learning_rate": 0.00028551401869158875, "loss": 1.3224, "step": 425 }, { "epoch": 0.45, "grad_norm": 2.3624370098114014, "learning_rate": 0.0002853971962616822, "loss": 0.6824, "step": 426 }, { "epoch": 0.45, "grad_norm": 2.8823182582855225, "learning_rate": 0.00028528037383177565, "loss": 1.4465, "step": 427 }, { "epoch": 0.45, "grad_norm": 2.512755870819092, "learning_rate": 0.0002851635514018691, "loss": 0.8171, "step": 428 }, { "epoch": 0.45, "grad_norm": 1.120779275894165, "learning_rate": 0.0002850467289719626, "loss": 0.6632, "step": 429 }, { "epoch": 0.45, "grad_norm": 1.4150028228759766, "learning_rate": 0.00028492990654205606, "loss": 0.7185, "step": 430 }, { "epoch": 0.45, "grad_norm": 2.5052907466888428, "learning_rate": 0.0002848130841121495, "loss": 0.9646, "step": 431 }, { "epoch": 0.45, "grad_norm": 2.989311933517456, "learning_rate": 0.00028469626168224296, "loss": 1.4964, "step": 432 }, { "epoch": 0.45, "grad_norm": 2.0057992935180664, "learning_rate": 0.00028457943925233646, "loss": 0.5575, "step": 433 }, { "epoch": 0.45, "grad_norm": 2.1718015670776367, "learning_rate": 0.00028446261682242986, "loss": 0.9122, "step": 434 }, { "epoch": 0.46, "grad_norm": 3.9890103340148926, "learning_rate": 0.0002843457943925233, "loss": 1.196, "step": 435 }, { "epoch": 0.46, "grad_norm": 5.763711452484131, "learning_rate": 0.0002842289719626168, "loss": 1.309, "step": 436 }, { "epoch": 0.46, "grad_norm": 2.5950281620025635, "learning_rate": 0.00028411214953271026, "loss": 0.5255, "step": 437 }, { "epoch": 0.46, "grad_norm": 2.1492373943328857, "learning_rate": 0.0002839953271028037, "loss": 0.7657, "step": 438 }, { "epoch": 0.46, "grad_norm": 2.520798683166504, "learning_rate": 0.00028387850467289716, "loss": 0.6183, "step": 439 }, { "epoch": 0.46, "grad_norm": NaN, "learning_rate": 0.00028387850467289716, "loss": 1.5529, "step": 440 }, { "epoch": 0.46, "grad_norm": 3.6735353469848633, "learning_rate": 0.0002837616822429906, "loss": 1.1269, "step": 441 }, { "epoch": 0.46, "grad_norm": 1.8713912963867188, "learning_rate": 0.0002836448598130841, "loss": 0.5722, "step": 442 }, { "epoch": 0.46, "grad_norm": 0.9943745136260986, "learning_rate": 0.0002835280373831775, "loss": 0.3957, "step": 443 }, { "epoch": 0.46, "grad_norm": 2.4403250217437744, "learning_rate": 0.000283411214953271, "loss": 0.9147, "step": 444 }, { "epoch": 0.47, "grad_norm": 2.170886993408203, "learning_rate": 0.00028329439252336447, "loss": 0.8155, "step": 445 }, { "epoch": 0.47, "grad_norm": 2.1296136379241943, "learning_rate": 0.0002831775700934579, "loss": 1.2639, "step": 446 }, { "epoch": 0.47, "grad_norm": 2.446403980255127, "learning_rate": 0.00028306074766355137, "loss": 0.5806, "step": 447 }, { "epoch": 0.47, "grad_norm": 2.98860764503479, "learning_rate": 0.0002829439252336448, "loss": 1.1213, "step": 448 }, { "epoch": 0.47, "grad_norm": 2.848855972290039, "learning_rate": 0.0002828271028037383, "loss": 0.7091, "step": 449 }, { "epoch": 0.47, "grad_norm": 2.713719367980957, "learning_rate": 0.00028271028037383177, "loss": 0.7886, "step": 450 }, { "epoch": 0.47, "grad_norm": 1.9307395219802856, "learning_rate": 0.00028259345794392517, "loss": 1.0976, "step": 451 }, { "epoch": 0.47, "grad_norm": 2.1166470050811768, "learning_rate": 0.00028247663551401867, "loss": 0.3523, "step": 452 }, { "epoch": 0.47, "grad_norm": 1.9235117435455322, "learning_rate": 0.0002823598130841121, "loss": 0.6344, "step": 453 }, { "epoch": 0.47, "grad_norm": 6.735736846923828, "learning_rate": 0.00028224299065420557, "loss": 1.0573, "step": 454 }, { "epoch": 0.48, "grad_norm": 3.388742446899414, "learning_rate": 0.000282126168224299, "loss": 1.2948, "step": 455 }, { "epoch": 0.48, "grad_norm": 2.0969812870025635, "learning_rate": 0.0002820093457943925, "loss": 0.8144, "step": 456 }, { "epoch": 0.48, "grad_norm": 1.877164363861084, "learning_rate": 0.000281892523364486, "loss": 0.5874, "step": 457 }, { "epoch": 0.48, "grad_norm": 4.144896984100342, "learning_rate": 0.0002817757009345794, "loss": 0.9143, "step": 458 }, { "epoch": 0.48, "grad_norm": 3.1770153045654297, "learning_rate": 0.0002816588785046729, "loss": 0.8423, "step": 459 }, { "epoch": 0.48, "grad_norm": 2.5973880290985107, "learning_rate": 0.0002815420560747663, "loss": 1.3463, "step": 460 }, { "epoch": 0.48, "grad_norm": 2.3985776901245117, "learning_rate": 0.0002814252336448598, "loss": 0.8316, "step": 461 }, { "epoch": 0.48, "grad_norm": 3.82443904876709, "learning_rate": 0.0002813084112149532, "loss": 0.7252, "step": 462 }, { "epoch": 0.48, "grad_norm": 2.2212815284729004, "learning_rate": 0.00028119158878504673, "loss": 0.8296, "step": 463 }, { "epoch": 0.49, "grad_norm": 3.1041300296783447, "learning_rate": 0.0002810747663551402, "loss": 1.4688, "step": 464 }, { "epoch": 0.49, "grad_norm": 2.560612916946411, "learning_rate": 0.00028095794392523363, "loss": 1.0018, "step": 465 }, { "epoch": 0.49, "grad_norm": 1.8889777660369873, "learning_rate": 0.0002808411214953271, "loss": 1.0207, "step": 466 }, { "epoch": 0.49, "grad_norm": 3.116854429244995, "learning_rate": 0.00028072429906542053, "loss": 0.8899, "step": 467 }, { "epoch": 0.49, "grad_norm": 2.6070220470428467, "learning_rate": 0.000280607476635514, "loss": 0.7297, "step": 468 }, { "epoch": 0.49, "grad_norm": 3.0625195503234863, "learning_rate": 0.00028049065420560743, "loss": 0.916, "step": 469 }, { "epoch": 0.49, "grad_norm": 5.634510517120361, "learning_rate": 0.0002803738317757009, "loss": 1.059, "step": 470 }, { "epoch": 0.49, "grad_norm": 2.602274179458618, "learning_rate": 0.0002802570093457944, "loss": 1.4006, "step": 471 }, { "epoch": 0.49, "grad_norm": 2.149019479751587, "learning_rate": 0.00028014018691588784, "loss": 0.6402, "step": 472 }, { "epoch": 0.49, "grad_norm": 1.638101577758789, "learning_rate": 0.0002800233644859813, "loss": 0.301, "step": 473 }, { "epoch": 0.5, "grad_norm": 2.0306832790374756, "learning_rate": 0.00027990654205607474, "loss": 1.4859, "step": 474 }, { "epoch": 0.5, "grad_norm": 2.660569667816162, "learning_rate": 0.0002797897196261682, "loss": 0.7307, "step": 475 }, { "epoch": 0.5, "grad_norm": 4.271596908569336, "learning_rate": 0.00027967289719626164, "loss": 1.2726, "step": 476 }, { "epoch": 0.5, "grad_norm": 3.8445537090301514, "learning_rate": 0.0002795560747663551, "loss": 1.0576, "step": 477 }, { "epoch": 0.5, "grad_norm": 0.8691243529319763, "learning_rate": 0.0002794392523364486, "loss": 0.2533, "step": 478 }, { "epoch": 0.5, "grad_norm": 2.495535135269165, "learning_rate": 0.00027932242990654204, "loss": 1.0136, "step": 479 }, { "epoch": 0.5, "grad_norm": 3.485879898071289, "learning_rate": 0.0002792056074766355, "loss": 0.964, "step": 480 }, { "epoch": 0.5, "grad_norm": 3.352851629257202, "learning_rate": 0.00027908878504672894, "loss": 0.9433, "step": 481 }, { "epoch": 0.5, "grad_norm": 2.3454959392547607, "learning_rate": 0.00027897196261682244, "loss": 0.9644, "step": 482 }, { "epoch": 0.51, "grad_norm": 3.8068840503692627, "learning_rate": 0.00027885514018691584, "loss": 1.4043, "step": 483 }, { "epoch": 0.51, "grad_norm": 4.597325801849365, "learning_rate": 0.0002787383177570093, "loss": 1.2679, "step": 484 }, { "epoch": 0.51, "grad_norm": 5.513976097106934, "learning_rate": 0.0002786214953271028, "loss": 1.3383, "step": 485 }, { "epoch": 0.51, "grad_norm": 2.5806922912597656, "learning_rate": 0.00027850467289719624, "loss": 1.1213, "step": 486 }, { "epoch": 0.51, "grad_norm": 2.425881862640381, "learning_rate": 0.0002783878504672897, "loss": 0.5837, "step": 487 }, { "epoch": 0.51, "grad_norm": 1.552446961402893, "learning_rate": 0.00027827102803738314, "loss": 0.7027, "step": 488 }, { "epoch": 0.51, "grad_norm": 3.1759777069091797, "learning_rate": 0.0002781542056074766, "loss": 0.8814, "step": 489 }, { "epoch": 0.51, "grad_norm": 2.7307231426239014, "learning_rate": 0.0002780373831775701, "loss": 0.2867, "step": 490 }, { "epoch": 0.51, "grad_norm": 2.0762507915496826, "learning_rate": 0.0002779205607476635, "loss": 0.4642, "step": 491 }, { "epoch": 0.51, "grad_norm": 1.5488018989562988, "learning_rate": 0.000277803738317757, "loss": 0.1932, "step": 492 }, { "epoch": 0.52, "grad_norm": 0.7389271259307861, "learning_rate": 0.00027768691588785045, "loss": 0.206, "step": 493 }, { "epoch": 0.52, "grad_norm": 3.4336817264556885, "learning_rate": 0.0002775700934579439, "loss": 1.0873, "step": 494 }, { "epoch": 0.52, "grad_norm": 2.1345937252044678, "learning_rate": 0.00027745327102803735, "loss": 1.6094, "step": 495 }, { "epoch": 0.52, "grad_norm": 0.7259533405303955, "learning_rate": 0.0002773364485981308, "loss": 0.1039, "step": 496 }, { "epoch": 0.52, "grad_norm": 4.417773246765137, "learning_rate": 0.0002772196261682243, "loss": 1.4269, "step": 497 }, { "epoch": 0.52, "grad_norm": 1.8780008554458618, "learning_rate": 0.00027710280373831775, "loss": 0.6249, "step": 498 }, { "epoch": 0.52, "grad_norm": 1.9605576992034912, "learning_rate": 0.00027698598130841115, "loss": 0.6416, "step": 499 }, { "epoch": 0.52, "grad_norm": 0.7016693353652954, "learning_rate": 0.00027686915887850465, "loss": 0.2955, "step": 500 }, { "epoch": 0.52, "grad_norm": 0.8871127367019653, "learning_rate": 0.0002767523364485981, "loss": 0.3411, "step": 501 }, { "epoch": 0.53, "grad_norm": 0.952772319316864, "learning_rate": 0.00027663551401869155, "loss": 0.4015, "step": 502 }, { "epoch": 0.53, "grad_norm": 3.231271982192993, "learning_rate": 0.000276518691588785, "loss": 1.223, "step": 503 }, { "epoch": 0.53, "grad_norm": 2.837109088897705, "learning_rate": 0.0002764018691588785, "loss": 0.8736, "step": 504 }, { "epoch": 0.53, "grad_norm": 2.7296323776245117, "learning_rate": 0.00027628504672897196, "loss": 0.568, "step": 505 }, { "epoch": 0.53, "grad_norm": 2.1369681358337402, "learning_rate": 0.0002761682242990654, "loss": 0.3843, "step": 506 }, { "epoch": 0.53, "grad_norm": 2.714029312133789, "learning_rate": 0.00027605140186915886, "loss": 0.8895, "step": 507 }, { "epoch": 0.53, "grad_norm": 2.8727917671203613, "learning_rate": 0.0002759345794392523, "loss": 0.6678, "step": 508 }, { "epoch": 0.53, "grad_norm": 1.3814454078674316, "learning_rate": 0.00027581775700934576, "loss": 0.4698, "step": 509 }, { "epoch": 0.53, "grad_norm": 2.3573546409606934, "learning_rate": 0.0002757009345794392, "loss": 0.775, "step": 510 }, { "epoch": 0.53, "grad_norm": 2.1534595489501953, "learning_rate": 0.0002755841121495327, "loss": 0.788, "step": 511 }, { "epoch": 0.54, "grad_norm": 1.0166823863983154, "learning_rate": 0.00027546728971962616, "loss": 0.3544, "step": 512 }, { "epoch": 0.54, "grad_norm": 1.865272879600525, "learning_rate": 0.0002753504672897196, "loss": 0.7045, "step": 513 }, { "epoch": 0.54, "grad_norm": 3.6572630405426025, "learning_rate": 0.00027523364485981306, "loss": 1.1575, "step": 514 }, { "epoch": 0.54, "grad_norm": 3.012207269668579, "learning_rate": 0.0002751168224299065, "loss": 1.0693, "step": 515 }, { "epoch": 0.54, "grad_norm": 1.9867669343948364, "learning_rate": 0.00027499999999999996, "loss": 0.4059, "step": 516 }, { "epoch": 0.54, "grad_norm": 3.223353385925293, "learning_rate": 0.0002748831775700934, "loss": 1.9971, "step": 517 }, { "epoch": 0.54, "grad_norm": 1.2805993556976318, "learning_rate": 0.00027476635514018686, "loss": 0.508, "step": 518 }, { "epoch": 0.54, "grad_norm": 1.3479033708572388, "learning_rate": 0.00027464953271028037, "loss": 0.3285, "step": 519 }, { "epoch": 0.54, "grad_norm": 2.865412950515747, "learning_rate": 0.0002745327102803738, "loss": 1.4067, "step": 520 }, { "epoch": 0.54, "grad_norm": 2.9801173210144043, "learning_rate": 0.00027441588785046727, "loss": 1.2826, "step": 521 }, { "epoch": 0.55, "grad_norm": 0.8976773619651794, "learning_rate": 0.0002742990654205607, "loss": 0.3444, "step": 522 }, { "epoch": 0.55, "grad_norm": 3.2174181938171387, "learning_rate": 0.00027418224299065417, "loss": 0.3907, "step": 523 }, { "epoch": 0.55, "grad_norm": 2.3877856731414795, "learning_rate": 0.0002740654205607476, "loss": 1.284, "step": 524 }, { "epoch": 0.55, "grad_norm": 2.1452724933624268, "learning_rate": 0.00027394859813084107, "loss": 1.33, "step": 525 }, { "epoch": 0.55, "grad_norm": 3.878735065460205, "learning_rate": 0.00027383177570093457, "loss": 1.112, "step": 526 }, { "epoch": 0.55, "grad_norm": 1.091610312461853, "learning_rate": 0.000273714953271028, "loss": 0.3708, "step": 527 }, { "epoch": 0.55, "grad_norm": 2.5223939418792725, "learning_rate": 0.00027359813084112147, "loss": 1.6085, "step": 528 }, { "epoch": 0.55, "grad_norm": 3.6348628997802734, "learning_rate": 0.0002734813084112149, "loss": 0.8705, "step": 529 }, { "epoch": 0.55, "grad_norm": 2.8594553470611572, "learning_rate": 0.0002733644859813084, "loss": 0.7917, "step": 530 }, { "epoch": 0.56, "grad_norm": 2.1773569583892822, "learning_rate": 0.0002732476635514018, "loss": 0.8118, "step": 531 }, { "epoch": 0.56, "grad_norm": 4.174500942230225, "learning_rate": 0.00027313084112149527, "loss": 0.5933, "step": 532 }, { "epoch": 0.56, "grad_norm": 3.555126428604126, "learning_rate": 0.0002730140186915888, "loss": 0.7971, "step": 533 }, { "epoch": 0.56, "grad_norm": 3.0811665058135986, "learning_rate": 0.0002728971962616822, "loss": 1.4148, "step": 534 }, { "epoch": 0.56, "grad_norm": 3.6521191596984863, "learning_rate": 0.0002727803738317757, "loss": 0.9921, "step": 535 }, { "epoch": 0.56, "grad_norm": 1.9518895149230957, "learning_rate": 0.0002726635514018691, "loss": 1.5573, "step": 536 }, { "epoch": 0.56, "grad_norm": 3.6719624996185303, "learning_rate": 0.0002725467289719626, "loss": 0.8119, "step": 537 }, { "epoch": 0.56, "grad_norm": 1.9216612577438354, "learning_rate": 0.0002724299065420561, "loss": 1.417, "step": 538 }, { "epoch": 0.56, "grad_norm": 2.0820119380950928, "learning_rate": 0.0002723130841121495, "loss": 0.9631, "step": 539 }, { "epoch": 0.56, "grad_norm": 2.7720649242401123, "learning_rate": 0.000272196261682243, "loss": 1.3626, "step": 540 }, { "epoch": 0.57, "grad_norm": 3.1084446907043457, "learning_rate": 0.00027207943925233643, "loss": 0.8393, "step": 541 }, { "epoch": 0.57, "grad_norm": 1.0460463762283325, "learning_rate": 0.0002719626168224299, "loss": 0.1851, "step": 542 }, { "epoch": 0.57, "grad_norm": 2.8795809745788574, "learning_rate": 0.00027184579439252333, "loss": 1.0474, "step": 543 }, { "epoch": 0.57, "grad_norm": 2.6006479263305664, "learning_rate": 0.0002717289719626168, "loss": 1.9008, "step": 544 }, { "epoch": 0.57, "grad_norm": 2.1522631645202637, "learning_rate": 0.0002716121495327103, "loss": 1.2351, "step": 545 }, { "epoch": 0.57, "grad_norm": 1.7561458349227905, "learning_rate": 0.00027149532710280373, "loss": 0.8284, "step": 546 }, { "epoch": 0.57, "grad_norm": 1.9102766513824463, "learning_rate": 0.00027137850467289713, "loss": 0.7709, "step": 547 }, { "epoch": 0.57, "grad_norm": 2.9740517139434814, "learning_rate": 0.00027126168224299063, "loss": 0.9322, "step": 548 }, { "epoch": 0.57, "grad_norm": 2.9720640182495117, "learning_rate": 0.0002711448598130841, "loss": 0.5249, "step": 549 }, { "epoch": 0.58, "grad_norm": 1.711076259613037, "learning_rate": 0.00027102803738317753, "loss": 0.8303, "step": 550 }, { "epoch": 0.58, "grad_norm": 1.2298800945281982, "learning_rate": 0.000270911214953271, "loss": 0.1259, "step": 551 }, { "epoch": 0.58, "grad_norm": 2.824721336364746, "learning_rate": 0.0002707943925233645, "loss": 1.2054, "step": 552 }, { "epoch": 0.58, "grad_norm": 2.455997943878174, "learning_rate": 0.00027067757009345794, "loss": 0.7108, "step": 553 }, { "epoch": 0.58, "grad_norm": 1.940439224243164, "learning_rate": 0.0002705607476635514, "loss": 1.4464, "step": 554 }, { "epoch": 0.58, "grad_norm": 1.046644926071167, "learning_rate": 0.00027044392523364484, "loss": 0.3161, "step": 555 }, { "epoch": 0.58, "grad_norm": 3.7850840091705322, "learning_rate": 0.0002703271028037383, "loss": 0.4476, "step": 556 }, { "epoch": 0.58, "grad_norm": 0.6609066724777222, "learning_rate": 0.00027021028037383174, "loss": 0.186, "step": 557 }, { "epoch": 0.58, "grad_norm": 2.8339734077453613, "learning_rate": 0.0002700934579439252, "loss": 1.0351, "step": 558 }, { "epoch": 0.58, "grad_norm": 0.947851836681366, "learning_rate": 0.0002699766355140187, "loss": 0.4941, "step": 559 }, { "epoch": 0.59, "grad_norm": 0.7194287776947021, "learning_rate": 0.00026985981308411214, "loss": 0.3142, "step": 560 }, { "epoch": 0.59, "grad_norm": 3.0309903621673584, "learning_rate": 0.0002697429906542056, "loss": 0.9159, "step": 561 }, { "epoch": 0.59, "grad_norm": 0.7473992109298706, "learning_rate": 0.00026962616822429904, "loss": 0.3141, "step": 562 }, { "epoch": 0.59, "grad_norm": 0.604888916015625, "learning_rate": 0.0002695093457943925, "loss": 0.2259, "step": 563 }, { "epoch": 0.59, "grad_norm": 1.9733377695083618, "learning_rate": 0.00026939252336448594, "loss": 0.6048, "step": 564 }, { "epoch": 0.59, "grad_norm": 2.3935818672180176, "learning_rate": 0.0002692757009345794, "loss": 1.1154, "step": 565 }, { "epoch": 0.59, "grad_norm": 2.5244064331054688, "learning_rate": 0.00026915887850467284, "loss": 1.4576, "step": 566 }, { "epoch": 0.59, "grad_norm": 2.2174429893493652, "learning_rate": 0.00026904205607476635, "loss": 1.3647, "step": 567 }, { "epoch": 0.59, "grad_norm": 1.750571370124817, "learning_rate": 0.0002689252336448598, "loss": 0.7841, "step": 568 }, { "epoch": 0.6, "grad_norm": 3.9296715259552, "learning_rate": 0.00026880841121495325, "loss": 0.8488, "step": 569 }, { "epoch": 0.6, "grad_norm": 4.912085056304932, "learning_rate": 0.0002686915887850467, "loss": 1.8655, "step": 570 }, { "epoch": 0.6, "grad_norm": 2.35219407081604, "learning_rate": 0.00026857476635514015, "loss": 0.6427, "step": 571 }, { "epoch": 0.6, "grad_norm": 3.13024640083313, "learning_rate": 0.0002684579439252336, "loss": 1.5838, "step": 572 }, { "epoch": 0.6, "grad_norm": 1.217462182044983, "learning_rate": 0.00026834112149532705, "loss": 0.3112, "step": 573 }, { "epoch": 0.6, "grad_norm": 1.472783088684082, "learning_rate": 0.00026822429906542055, "loss": 0.4344, "step": 574 }, { "epoch": 0.6, "grad_norm": 3.9528167247772217, "learning_rate": 0.000268107476635514, "loss": 0.8929, "step": 575 }, { "epoch": 0.6, "grad_norm": 2.462442398071289, "learning_rate": 0.00026799065420560745, "loss": 1.1492, "step": 576 }, { "epoch": 0.6, "grad_norm": 1.3671787977218628, "learning_rate": 0.0002678738317757009, "loss": 0.3359, "step": 577 }, { "epoch": 0.6, "grad_norm": 2.802870750427246, "learning_rate": 0.0002677570093457944, "loss": 1.2908, "step": 578 }, { "epoch": 0.61, "grad_norm": 2.338261127471924, "learning_rate": 0.0002676401869158878, "loss": 0.8661, "step": 579 }, { "epoch": 0.61, "grad_norm": 2.6252553462982178, "learning_rate": 0.00026752336448598125, "loss": 1.606, "step": 580 }, { "epoch": 0.61, "grad_norm": 0.6777186989784241, "learning_rate": 0.00026740654205607476, "loss": 0.2579, "step": 581 }, { "epoch": 0.61, "grad_norm": 0.9596633315086365, "learning_rate": 0.0002672897196261682, "loss": 0.462, "step": 582 }, { "epoch": 0.61, "grad_norm": 2.5880768299102783, "learning_rate": 0.00026717289719626166, "loss": 0.9047, "step": 583 }, { "epoch": 0.61, "grad_norm": 2.7881953716278076, "learning_rate": 0.0002670560747663551, "loss": 1.3014, "step": 584 }, { "epoch": 0.61, "grad_norm": 3.6334471702575684, "learning_rate": 0.00026693925233644856, "loss": 0.4504, "step": 585 }, { "epoch": 0.61, "grad_norm": 2.4941344261169434, "learning_rate": 0.00026682242990654206, "loss": 0.6579, "step": 586 }, { "epoch": 0.61, "grad_norm": 1.56283438205719, "learning_rate": 0.00026670560747663546, "loss": 1.3118, "step": 587 }, { "epoch": 0.62, "grad_norm": 3.000277280807495, "learning_rate": 0.00026658878504672896, "loss": 0.7094, "step": 588 }, { "epoch": 0.62, "grad_norm": 3.8379180431365967, "learning_rate": 0.0002664719626168224, "loss": 0.9054, "step": 589 }, { "epoch": 0.62, "grad_norm": 1.7205324172973633, "learning_rate": 0.00026635514018691586, "loss": 0.751, "step": 590 }, { "epoch": 0.62, "grad_norm": 1.9362393617630005, "learning_rate": 0.0002662383177570093, "loss": 0.4933, "step": 591 }, { "epoch": 0.62, "grad_norm": 0.944587230682373, "learning_rate": 0.00026612149532710276, "loss": 0.3332, "step": 592 }, { "epoch": 0.62, "grad_norm": 1.3814319372177124, "learning_rate": 0.00026600467289719627, "loss": 0.4593, "step": 593 }, { "epoch": 0.62, "grad_norm": 1.5680646896362305, "learning_rate": 0.0002658878504672897, "loss": 1.4059, "step": 594 }, { "epoch": 0.62, "grad_norm": 0.6818034052848816, "learning_rate": 0.0002657710280373831, "loss": 0.1153, "step": 595 }, { "epoch": 0.62, "grad_norm": 1.9206401109695435, "learning_rate": 0.0002656542056074766, "loss": 1.141, "step": 596 }, { "epoch": 0.62, "grad_norm": 2.1888787746429443, "learning_rate": 0.00026553738317757007, "loss": 1.0324, "step": 597 }, { "epoch": 0.63, "grad_norm": 2.396777629852295, "learning_rate": 0.0002654205607476635, "loss": 0.9237, "step": 598 }, { "epoch": 0.63, "grad_norm": 3.9854557514190674, "learning_rate": 0.00026530373831775697, "loss": 1.5611, "step": 599 }, { "epoch": 0.63, "grad_norm": 1.558633804321289, "learning_rate": 0.00026518691588785047, "loss": 0.6527, "step": 600 }, { "epoch": 0.63, "eval_loss": 1.24161958694458, "eval_runtime": 1.0124, "eval_samples_per_second": 4.939, "eval_steps_per_second": 0.988, "step": 600 }, { "epoch": 0.63, "grad_norm": 0.5544018745422363, "learning_rate": 0.0002650700934579439, "loss": 0.2054, "step": 601 }, { "epoch": 0.63, "grad_norm": 1.5441477298736572, "learning_rate": 0.00026495327102803737, "loss": 0.5178, "step": 602 }, { "epoch": 0.63, "grad_norm": 1.9024848937988281, "learning_rate": 0.0002648364485981308, "loss": 1.1805, "step": 603 }, { "epoch": 0.63, "grad_norm": 1.5047481060028076, "learning_rate": 0.00026471962616822427, "loss": 0.4179, "step": 604 }, { "epoch": 0.63, "grad_norm": 2.946559190750122, "learning_rate": 0.0002646028037383177, "loss": 0.6404, "step": 605 }, { "epoch": 0.63, "grad_norm": 2.839164972305298, "learning_rate": 0.00026448598130841117, "loss": 1.4949, "step": 606 }, { "epoch": 0.63, "grad_norm": 2.933732509613037, "learning_rate": 0.0002643691588785047, "loss": 1.1352, "step": 607 }, { "epoch": 0.64, "grad_norm": 2.644740581512451, "learning_rate": 0.0002642523364485981, "loss": 0.9017, "step": 608 }, { "epoch": 0.64, "grad_norm": 5.920925140380859, "learning_rate": 0.0002641355140186916, "loss": 0.9778, "step": 609 }, { "epoch": 0.64, "grad_norm": 2.580047369003296, "learning_rate": 0.000264018691588785, "loss": 0.3734, "step": 610 }, { "epoch": 0.64, "grad_norm": 1.5725473165512085, "learning_rate": 0.0002639018691588785, "loss": 0.4728, "step": 611 }, { "epoch": 0.64, "grad_norm": 2.36690092086792, "learning_rate": 0.0002637850467289719, "loss": 0.8096, "step": 612 }, { "epoch": 0.64, "grad_norm": 3.609687328338623, "learning_rate": 0.0002636682242990654, "loss": 1.2741, "step": 613 }, { "epoch": 0.64, "grad_norm": 1.9025781154632568, "learning_rate": 0.0002635514018691588, "loss": 1.2263, "step": 614 }, { "epoch": 0.64, "grad_norm": 2.570065975189209, "learning_rate": 0.00026343457943925233, "loss": 1.6307, "step": 615 }, { "epoch": 0.64, "grad_norm": 0.7822004556655884, "learning_rate": 0.0002633177570093458, "loss": 0.1984, "step": 616 }, { "epoch": 0.65, "grad_norm": 3.2356181144714355, "learning_rate": 0.00026320093457943923, "loss": 0.6621, "step": 617 }, { "epoch": 0.65, "grad_norm": 3.7617156505584717, "learning_rate": 0.0002630841121495327, "loss": 1.6087, "step": 618 }, { "epoch": 0.65, "grad_norm": 1.9174844026565552, "learning_rate": 0.00026296728971962613, "loss": 0.6119, "step": 619 }, { "epoch": 0.65, "grad_norm": 1.9553015232086182, "learning_rate": 0.0002628504672897196, "loss": 0.6097, "step": 620 }, { "epoch": 0.65, "grad_norm": 2.008237838745117, "learning_rate": 0.00026273364485981303, "loss": 1.0158, "step": 621 }, { "epoch": 0.65, "grad_norm": 3.6738736629486084, "learning_rate": 0.00026261682242990653, "loss": 1.8953, "step": 622 }, { "epoch": 0.65, "grad_norm": 1.6950339078903198, "learning_rate": 0.0002625, "loss": 0.6117, "step": 623 }, { "epoch": 0.65, "grad_norm": 2.8285133838653564, "learning_rate": 0.00026238317757009343, "loss": 1.5104, "step": 624 }, { "epoch": 0.65, "grad_norm": 1.8922878503799438, "learning_rate": 0.0002622663551401869, "loss": 1.0823, "step": 625 }, { "epoch": 0.65, "grad_norm": 2.4725558757781982, "learning_rate": 0.0002621495327102804, "loss": 1.0039, "step": 626 }, { "epoch": 0.66, "grad_norm": 0.8886311054229736, "learning_rate": 0.00026203271028037384, "loss": 0.223, "step": 627 }, { "epoch": 0.66, "grad_norm": 1.8503968715667725, "learning_rate": 0.00026191588785046723, "loss": 0.3353, "step": 628 }, { "epoch": 0.66, "grad_norm": 2.4176552295684814, "learning_rate": 0.00026179906542056074, "loss": 1.174, "step": 629 }, { "epoch": 0.66, "grad_norm": 4.0846028327941895, "learning_rate": 0.0002616822429906542, "loss": 1.167, "step": 630 }, { "epoch": 0.66, "grad_norm": 2.3127601146698, "learning_rate": 0.00026156542056074764, "loss": 1.3337, "step": 631 }, { "epoch": 0.66, "grad_norm": 2.930818796157837, "learning_rate": 0.0002614485981308411, "loss": 1.1385, "step": 632 }, { "epoch": 0.66, "grad_norm": 1.258216381072998, "learning_rate": 0.00026133177570093454, "loss": 0.2404, "step": 633 }, { "epoch": 0.66, "grad_norm": 3.8267745971679688, "learning_rate": 0.00026121495327102804, "loss": 1.6167, "step": 634 }, { "epoch": 0.66, "grad_norm": 1.8974788188934326, "learning_rate": 0.0002610981308411215, "loss": 1.0328, "step": 635 }, { "epoch": 0.67, "grad_norm": 0.77346271276474, "learning_rate": 0.00026098130841121494, "loss": 0.3266, "step": 636 }, { "epoch": 0.67, "grad_norm": 2.7831459045410156, "learning_rate": 0.0002608644859813084, "loss": 1.4077, "step": 637 }, { "epoch": 0.67, "grad_norm": 2.6431167125701904, "learning_rate": 0.00026074766355140184, "loss": 0.6833, "step": 638 }, { "epoch": 0.67, "grad_norm": 0.6087179780006409, "learning_rate": 0.0002606308411214953, "loss": 0.2766, "step": 639 }, { "epoch": 0.67, "grad_norm": 2.9367430210113525, "learning_rate": 0.00026051401869158874, "loss": 0.9431, "step": 640 }, { "epoch": 0.67, "grad_norm": 0.7643646001815796, "learning_rate": 0.00026039719626168225, "loss": 0.266, "step": 641 }, { "epoch": 0.67, "grad_norm": 2.9407589435577393, "learning_rate": 0.0002602803738317757, "loss": 1.8693, "step": 642 }, { "epoch": 0.67, "grad_norm": 2.003830671310425, "learning_rate": 0.00026016355140186915, "loss": 1.502, "step": 643 }, { "epoch": 0.67, "grad_norm": 5.822230339050293, "learning_rate": 0.0002600467289719626, "loss": 1.3887, "step": 644 }, { "epoch": 0.67, "grad_norm": 2.560053586959839, "learning_rate": 0.00025992990654205605, "loss": 1.0752, "step": 645 }, { "epoch": 0.68, "grad_norm": 2.014336109161377, "learning_rate": 0.0002598130841121495, "loss": 1.1494, "step": 646 }, { "epoch": 0.68, "grad_norm": 2.923731803894043, "learning_rate": 0.00025969626168224295, "loss": 1.881, "step": 647 }, { "epoch": 0.68, "grad_norm": 3.4779105186462402, "learning_rate": 0.00025957943925233645, "loss": 1.3292, "step": 648 }, { "epoch": 0.68, "grad_norm": 2.266974449157715, "learning_rate": 0.0002594626168224299, "loss": 0.8172, "step": 649 }, { "epoch": 0.68, "grad_norm": 2.011704921722412, "learning_rate": 0.00025934579439252335, "loss": 1.0187, "step": 650 }, { "epoch": 0.68, "grad_norm": 3.450458526611328, "learning_rate": 0.0002592289719626168, "loss": 0.7546, "step": 651 }, { "epoch": 0.68, "grad_norm": 2.7575178146362305, "learning_rate": 0.00025911214953271025, "loss": 0.3802, "step": 652 }, { "epoch": 0.68, "grad_norm": 2.205942392349243, "learning_rate": 0.0002589953271028037, "loss": 0.7156, "step": 653 }, { "epoch": 0.68, "grad_norm": 1.2393170595169067, "learning_rate": 0.00025887850467289715, "loss": 0.3533, "step": 654 }, { "epoch": 0.69, "grad_norm": 1.4706906080245972, "learning_rate": 0.00025876168224299066, "loss": 0.213, "step": 655 }, { "epoch": 0.69, "grad_norm": 2.9835875034332275, "learning_rate": 0.0002586448598130841, "loss": 0.9362, "step": 656 }, { "epoch": 0.69, "grad_norm": 2.087411880493164, "learning_rate": 0.00025852803738317756, "loss": 0.9816, "step": 657 }, { "epoch": 0.69, "grad_norm": 1.7548537254333496, "learning_rate": 0.000258411214953271, "loss": 1.2555, "step": 658 }, { "epoch": 0.69, "grad_norm": 1.9126662015914917, "learning_rate": 0.00025829439252336446, "loss": 0.4827, "step": 659 }, { "epoch": 0.69, "grad_norm": 2.2377541065216064, "learning_rate": 0.0002581775700934579, "loss": 0.8133, "step": 660 }, { "epoch": 0.69, "grad_norm": 1.1898380517959595, "learning_rate": 0.00025806074766355136, "loss": 0.3985, "step": 661 }, { "epoch": 0.69, "grad_norm": 3.593562602996826, "learning_rate": 0.0002579439252336448, "loss": 1.1507, "step": 662 }, { "epoch": 0.69, "grad_norm": 1.7690136432647705, "learning_rate": 0.0002578271028037383, "loss": 0.6489, "step": 663 }, { "epoch": 0.69, "grad_norm": 3.863168478012085, "learning_rate": 0.00025771028037383176, "loss": 1.0941, "step": 664 }, { "epoch": 0.7, "grad_norm": 4.265000820159912, "learning_rate": 0.0002575934579439252, "loss": 1.8224, "step": 665 }, { "epoch": 0.7, "grad_norm": 3.0160512924194336, "learning_rate": 0.00025747663551401866, "loss": 0.3632, "step": 666 }, { "epoch": 0.7, "grad_norm": 2.9630157947540283, "learning_rate": 0.00025735981308411216, "loss": 0.8955, "step": 667 }, { "epoch": 0.7, "grad_norm": 3.4230360984802246, "learning_rate": 0.00025724299065420556, "loss": 0.9785, "step": 668 }, { "epoch": 0.7, "grad_norm": 4.195234775543213, "learning_rate": 0.000257126168224299, "loss": 1.3986, "step": 669 }, { "epoch": 0.7, "grad_norm": 1.4523710012435913, "learning_rate": 0.0002570093457943925, "loss": 0.6776, "step": 670 }, { "epoch": 0.7, "grad_norm": 1.4690325260162354, "learning_rate": 0.00025689252336448597, "loss": 0.249, "step": 671 }, { "epoch": 0.7, "grad_norm": 2.279630184173584, "learning_rate": 0.0002567757009345794, "loss": 0.7246, "step": 672 }, { "epoch": 0.7, "grad_norm": 4.060863971710205, "learning_rate": 0.00025665887850467287, "loss": 1.2851, "step": 673 }, { "epoch": 0.71, "grad_norm": 1.2301100492477417, "learning_rate": 0.00025654205607476637, "loss": 1.0589, "step": 674 }, { "epoch": 0.71, "grad_norm": 2.657299757003784, "learning_rate": 0.0002564252336448598, "loss": 0.43, "step": 675 }, { "epoch": 0.71, "grad_norm": 2.5239768028259277, "learning_rate": 0.0002563084112149532, "loss": 0.751, "step": 676 }, { "epoch": 0.71, "grad_norm": 2.668548583984375, "learning_rate": 0.0002561915887850467, "loss": 0.828, "step": 677 }, { "epoch": 0.71, "grad_norm": 1.8801578283309937, "learning_rate": 0.00025607476635514017, "loss": 0.3485, "step": 678 }, { "epoch": 0.71, "grad_norm": 2.409930944442749, "learning_rate": 0.0002559579439252336, "loss": 1.4811, "step": 679 }, { "epoch": 0.71, "grad_norm": 1.8931938409805298, "learning_rate": 0.00025584112149532707, "loss": 1.2942, "step": 680 }, { "epoch": 0.71, "grad_norm": 1.7943670749664307, "learning_rate": 0.0002557242990654205, "loss": 0.0962, "step": 681 }, { "epoch": 0.71, "grad_norm": 3.3645403385162354, "learning_rate": 0.000255607476635514, "loss": 1.0911, "step": 682 }, { "epoch": 0.71, "grad_norm": 0.8726574778556824, "learning_rate": 0.0002554906542056075, "loss": 0.4142, "step": 683 }, { "epoch": 0.72, "grad_norm": 2.9188904762268066, "learning_rate": 0.0002553738317757009, "loss": 0.7505, "step": 684 }, { "epoch": 0.72, "grad_norm": 0.5008811950683594, "learning_rate": 0.0002552570093457944, "loss": 0.1469, "step": 685 }, { "epoch": 0.72, "grad_norm": 2.0353548526763916, "learning_rate": 0.0002551401869158878, "loss": 0.7272, "step": 686 }, { "epoch": 0.72, "grad_norm": 1.0340296030044556, "learning_rate": 0.0002550233644859813, "loss": 0.5369, "step": 687 }, { "epoch": 0.72, "grad_norm": 0.5980859398841858, "learning_rate": 0.0002549065420560747, "loss": 0.2287, "step": 688 }, { "epoch": 0.72, "grad_norm": 1.8595726490020752, "learning_rate": 0.00025478971962616823, "loss": 1.4049, "step": 689 }, { "epoch": 0.72, "grad_norm": 3.352480173110962, "learning_rate": 0.0002546728971962617, "loss": 0.4344, "step": 690 }, { "epoch": 0.72, "grad_norm": 4.3725690841674805, "learning_rate": 0.00025455607476635513, "loss": 0.9403, "step": 691 }, { "epoch": 0.72, "grad_norm": 3.2894887924194336, "learning_rate": 0.0002544392523364486, "loss": 0.7964, "step": 692 }, { "epoch": 0.72, "grad_norm": 1.6135333776474, "learning_rate": 0.00025432242990654203, "loss": 0.7688, "step": 693 }, { "epoch": 0.73, "grad_norm": 2.0678396224975586, "learning_rate": 0.0002542056074766355, "loss": 1.6217, "step": 694 }, { "epoch": 0.73, "grad_norm": 1.9601709842681885, "learning_rate": 0.00025408878504672893, "loss": 0.994, "step": 695 }, { "epoch": 0.73, "grad_norm": 2.180920362472534, "learning_rate": 0.00025397196261682243, "loss": 0.6805, "step": 696 }, { "epoch": 0.73, "grad_norm": 0.7055142521858215, "learning_rate": 0.0002538551401869159, "loss": 0.2653, "step": 697 }, { "epoch": 0.73, "grad_norm": 1.6646941900253296, "learning_rate": 0.00025373831775700933, "loss": 0.4899, "step": 698 }, { "epoch": 0.73, "grad_norm": 0.7786800265312195, "learning_rate": 0.0002536214953271028, "loss": 0.3756, "step": 699 }, { "epoch": 0.73, "grad_norm": 1.5901905298233032, "learning_rate": 0.00025350467289719623, "loss": 0.6204, "step": 700 }, { "epoch": 0.73, "grad_norm": 1.7284669876098633, "learning_rate": 0.0002533878504672897, "loss": 0.4973, "step": 701 }, { "epoch": 0.73, "grad_norm": 3.1285464763641357, "learning_rate": 0.00025327102803738313, "loss": 0.7694, "step": 702 }, { "epoch": 0.74, "grad_norm": 2.5928149223327637, "learning_rate": 0.00025315420560747664, "loss": 1.1837, "step": 703 }, { "epoch": 0.74, "grad_norm": 2.4394235610961914, "learning_rate": 0.0002530373831775701, "loss": 0.7965, "step": 704 }, { "epoch": 0.74, "grad_norm": 2.4615914821624756, "learning_rate": 0.00025292056074766354, "loss": 1.0362, "step": 705 }, { "epoch": 0.74, "grad_norm": 0.9794842600822449, "learning_rate": 0.000252803738317757, "loss": 0.476, "step": 706 }, { "epoch": 0.74, "grad_norm": 2.6198465824127197, "learning_rate": 0.00025268691588785044, "loss": 0.9868, "step": 707 }, { "epoch": 0.74, "grad_norm": 1.645184874534607, "learning_rate": 0.0002525700934579439, "loss": 0.6718, "step": 708 }, { "epoch": 0.74, "grad_norm": 4.293039321899414, "learning_rate": 0.00025245327102803734, "loss": 0.8782, "step": 709 }, { "epoch": 0.74, "grad_norm": 1.1102802753448486, "learning_rate": 0.0002523364485981308, "loss": 0.5605, "step": 710 }, { "epoch": 0.74, "grad_norm": 0.9872053861618042, "learning_rate": 0.0002522196261682243, "loss": 0.2716, "step": 711 }, { "epoch": 0.74, "grad_norm": 4.878575801849365, "learning_rate": 0.00025210280373831774, "loss": 1.51, "step": 712 }, { "epoch": 0.75, "grad_norm": 1.1407004594802856, "learning_rate": 0.0002519859813084112, "loss": 0.5516, "step": 713 }, { "epoch": 0.75, "grad_norm": 2.4332990646362305, "learning_rate": 0.00025186915887850464, "loss": 0.9987, "step": 714 }, { "epoch": 0.75, "grad_norm": 3.410146713256836, "learning_rate": 0.00025175233644859815, "loss": 1.5265, "step": 715 }, { "epoch": 0.75, "grad_norm": 1.5623661279678345, "learning_rate": 0.00025163551401869154, "loss": 0.9784, "step": 716 }, { "epoch": 0.75, "grad_norm": 3.8849399089813232, "learning_rate": 0.000251518691588785, "loss": 1.1001, "step": 717 }, { "epoch": 0.75, "grad_norm": 3.7529332637786865, "learning_rate": 0.0002514018691588785, "loss": 0.7718, "step": 718 }, { "epoch": 0.75, "grad_norm": 2.7470431327819824, "learning_rate": 0.00025128504672897195, "loss": 0.7242, "step": 719 }, { "epoch": 0.75, "grad_norm": 2.720810651779175, "learning_rate": 0.0002511682242990654, "loss": 1.4097, "step": 720 }, { "epoch": 0.75, "grad_norm": 2.660722255706787, "learning_rate": 0.00025105140186915885, "loss": 1.1479, "step": 721 }, { "epoch": 0.76, "grad_norm": 3.5043423175811768, "learning_rate": 0.00025093457943925235, "loss": 0.6971, "step": 722 }, { "epoch": 0.76, "grad_norm": 0.6985735297203064, "learning_rate": 0.0002508177570093458, "loss": 0.2465, "step": 723 }, { "epoch": 0.76, "grad_norm": 3.065187692642212, "learning_rate": 0.0002507009345794392, "loss": 1.2275, "step": 724 }, { "epoch": 0.76, "grad_norm": 1.846989631652832, "learning_rate": 0.0002505841121495327, "loss": 0.584, "step": 725 }, { "epoch": 0.76, "grad_norm": 1.5561527013778687, "learning_rate": 0.00025046728971962615, "loss": 0.8527, "step": 726 }, { "epoch": 0.76, "grad_norm": 2.3260703086853027, "learning_rate": 0.0002503504672897196, "loss": 1.0349, "step": 727 }, { "epoch": 0.76, "grad_norm": 1.7104542255401611, "learning_rate": 0.00025023364485981305, "loss": 0.5936, "step": 728 }, { "epoch": 0.76, "grad_norm": 1.5694267749786377, "learning_rate": 0.0002501168224299065, "loss": 1.0465, "step": 729 }, { "epoch": 0.76, "grad_norm": 3.052154302597046, "learning_rate": 0.00025, "loss": 1.0695, "step": 730 }, { "epoch": 0.76, "grad_norm": 2.267850160598755, "learning_rate": 0.00024988317757009346, "loss": 1.1448, "step": 731 }, { "epoch": 0.77, "grad_norm": 0.9715405702590942, "learning_rate": 0.0002497663551401869, "loss": 0.5502, "step": 732 }, { "epoch": 0.77, "grad_norm": 3.080495834350586, "learning_rate": 0.00024964953271028036, "loss": 0.8377, "step": 733 }, { "epoch": 0.77, "grad_norm": 3.0935518741607666, "learning_rate": 0.0002495327102803738, "loss": 1.665, "step": 734 }, { "epoch": 0.77, "grad_norm": 0.7347010970115662, "learning_rate": 0.00024941588785046726, "loss": 0.288, "step": 735 }, { "epoch": 0.77, "grad_norm": 1.334194302558899, "learning_rate": 0.0002492990654205607, "loss": 0.3979, "step": 736 }, { "epoch": 0.77, "grad_norm": 2.544018507003784, "learning_rate": 0.0002491822429906542, "loss": 0.8903, "step": 737 }, { "epoch": 0.77, "grad_norm": 2.9076032638549805, "learning_rate": 0.00024906542056074766, "loss": 0.7581, "step": 738 }, { "epoch": 0.77, "grad_norm": 2.0532596111297607, "learning_rate": 0.0002489485981308411, "loss": 0.5799, "step": 739 }, { "epoch": 0.77, "grad_norm": 1.9696563482284546, "learning_rate": 0.00024883177570093456, "loss": 1.4261, "step": 740 }, { "epoch": 0.78, "grad_norm": 0.7314748764038086, "learning_rate": 0.000248714953271028, "loss": 0.2073, "step": 741 }, { "epoch": 0.78, "grad_norm": 1.2788918018341064, "learning_rate": 0.00024859813084112146, "loss": 0.3712, "step": 742 }, { "epoch": 0.78, "grad_norm": 2.2528998851776123, "learning_rate": 0.0002484813084112149, "loss": 1.7037, "step": 743 }, { "epoch": 0.78, "grad_norm": 3.076824903488159, "learning_rate": 0.0002483644859813084, "loss": 1.1481, "step": 744 }, { "epoch": 0.78, "grad_norm": 1.8832570314407349, "learning_rate": 0.00024824766355140186, "loss": 0.6691, "step": 745 }, { "epoch": 0.78, "grad_norm": 4.096408843994141, "learning_rate": 0.0002481308411214953, "loss": 1.2458, "step": 746 }, { "epoch": 0.78, "grad_norm": 1.913481593132019, "learning_rate": 0.00024801401869158876, "loss": 0.921, "step": 747 }, { "epoch": 0.78, "grad_norm": 2.3440444469451904, "learning_rate": 0.0002478971962616822, "loss": 0.9811, "step": 748 }, { "epoch": 0.78, "grad_norm": 2.1785082817077637, "learning_rate": 0.00024778037383177566, "loss": 0.5827, "step": 749 }, { "epoch": 0.78, "grad_norm": 3.120669364929199, "learning_rate": 0.0002476635514018691, "loss": 0.992, "step": 750 }, { "epoch": 0.79, "grad_norm": 3.2538716793060303, "learning_rate": 0.0002475467289719626, "loss": 1.2138, "step": 751 }, { "epoch": 0.79, "grad_norm": 1.8802791833877563, "learning_rate": 0.00024742990654205607, "loss": 1.4769, "step": 752 }, { "epoch": 0.79, "grad_norm": 1.9248284101486206, "learning_rate": 0.0002473130841121495, "loss": 1.3245, "step": 753 }, { "epoch": 0.79, "grad_norm": 2.0179731845855713, "learning_rate": 0.00024719626168224297, "loss": 1.1622, "step": 754 }, { "epoch": 0.79, "grad_norm": 1.760973334312439, "learning_rate": 0.0002470794392523364, "loss": 0.6696, "step": 755 }, { "epoch": 0.79, "grad_norm": 2.4388394355773926, "learning_rate": 0.00024696261682242987, "loss": 1.0924, "step": 756 }, { "epoch": 0.79, "grad_norm": 3.125169277191162, "learning_rate": 0.0002468457943925233, "loss": 1.508, "step": 757 }, { "epoch": 0.79, "grad_norm": 1.1705702543258667, "learning_rate": 0.00024672897196261677, "loss": 0.4995, "step": 758 }, { "epoch": 0.79, "grad_norm": 1.662678837776184, "learning_rate": 0.0002466121495327103, "loss": 0.4772, "step": 759 }, { "epoch": 0.79, "grad_norm": 3.0994110107421875, "learning_rate": 0.0002464953271028037, "loss": 0.6918, "step": 760 }, { "epoch": 0.8, "grad_norm": 3.3928942680358887, "learning_rate": 0.0002463785046728972, "loss": 1.7424, "step": 761 }, { "epoch": 0.8, "grad_norm": 2.1945111751556396, "learning_rate": 0.0002462616822429906, "loss": 1.1678, "step": 762 }, { "epoch": 0.8, "grad_norm": 0.6065562963485718, "learning_rate": 0.00024614485981308413, "loss": 0.2749, "step": 763 }, { "epoch": 0.8, "grad_norm": 2.162224292755127, "learning_rate": 0.0002460280373831775, "loss": 0.9568, "step": 764 }, { "epoch": 0.8, "grad_norm": 1.2459497451782227, "learning_rate": 0.000245911214953271, "loss": 0.7103, "step": 765 }, { "epoch": 0.8, "grad_norm": 2.7798285484313965, "learning_rate": 0.0002457943925233645, "loss": 0.5845, "step": 766 }, { "epoch": 0.8, "grad_norm": 1.8737719058990479, "learning_rate": 0.00024567757009345793, "loss": 0.9293, "step": 767 }, { "epoch": 0.8, "grad_norm": 2.1505696773529053, "learning_rate": 0.0002455607476635514, "loss": 0.5254, "step": 768 }, { "epoch": 0.8, "grad_norm": 2.1278975009918213, "learning_rate": 0.00024544392523364483, "loss": 0.6887, "step": 769 }, { "epoch": 0.81, "grad_norm": 3.0058064460754395, "learning_rate": 0.00024532710280373833, "loss": 1.4686, "step": 770 }, { "epoch": 0.81, "grad_norm": 7.39130163192749, "learning_rate": 0.0002452102803738318, "loss": 0.84, "step": 771 }, { "epoch": 0.81, "grad_norm": 3.1862969398498535, "learning_rate": 0.0002450934579439252, "loss": 0.8424, "step": 772 }, { "epoch": 0.81, "grad_norm": 1.3539414405822754, "learning_rate": 0.0002449766355140187, "loss": 0.5938, "step": 773 }, { "epoch": 0.81, "grad_norm": 1.706716537475586, "learning_rate": 0.00024485981308411213, "loss": 0.5478, "step": 774 }, { "epoch": 0.81, "grad_norm": 3.8543996810913086, "learning_rate": 0.0002447429906542056, "loss": 0.8018, "step": 775 }, { "epoch": 0.81, "grad_norm": 2.0526278018951416, "learning_rate": 0.00024462616822429903, "loss": 0.6896, "step": 776 }, { "epoch": 0.81, "grad_norm": 3.7325849533081055, "learning_rate": 0.0002445093457943925, "loss": 1.2247, "step": 777 }, { "epoch": 0.81, "grad_norm": 4.3046183586120605, "learning_rate": 0.000244392523364486, "loss": 1.2575, "step": 778 }, { "epoch": 0.81, "grad_norm": 2.125836133956909, "learning_rate": 0.00024427570093457944, "loss": 0.7927, "step": 779 }, { "epoch": 0.82, "grad_norm": 0.7354124784469604, "learning_rate": 0.0002441588785046729, "loss": 0.317, "step": 780 }, { "epoch": 0.82, "grad_norm": 1.628983736038208, "learning_rate": 0.00024404205607476634, "loss": 0.5974, "step": 781 }, { "epoch": 0.82, "grad_norm": 2.533229351043701, "learning_rate": 0.0002439252336448598, "loss": 1.4416, "step": 782 }, { "epoch": 0.82, "grad_norm": 2.980332851409912, "learning_rate": 0.00024380841121495324, "loss": 1.5009, "step": 783 }, { "epoch": 0.82, "grad_norm": 0.8960031867027283, "learning_rate": 0.00024369158878504671, "loss": 0.2601, "step": 784 }, { "epoch": 0.82, "grad_norm": 1.8643038272857666, "learning_rate": 0.00024357476635514016, "loss": 1.589, "step": 785 }, { "epoch": 0.82, "grad_norm": 3.299959897994995, "learning_rate": 0.00024345794392523364, "loss": 1.6819, "step": 786 }, { "epoch": 0.82, "grad_norm": 1.8638948202133179, "learning_rate": 0.0002433411214953271, "loss": 1.3736, "step": 787 }, { "epoch": 0.82, "grad_norm": 1.6240087747573853, "learning_rate": 0.00024322429906542051, "loss": 0.5334, "step": 788 }, { "epoch": 0.83, "grad_norm": 3.438859224319458, "learning_rate": 0.000243107476635514, "loss": 1.2165, "step": 789 }, { "epoch": 0.83, "grad_norm": 1.9611045122146606, "learning_rate": 0.00024299065420560744, "loss": 1.4199, "step": 790 }, { "epoch": 0.83, "grad_norm": 2.787391185760498, "learning_rate": 0.00024287383177570092, "loss": 1.1579, "step": 791 }, { "epoch": 0.83, "grad_norm": 2.4074761867523193, "learning_rate": 0.00024275700934579437, "loss": 1.5847, "step": 792 }, { "epoch": 0.83, "grad_norm": 1.7211910486221313, "learning_rate": 0.00024264018691588785, "loss": 0.6404, "step": 793 }, { "epoch": 0.83, "grad_norm": 2.036355495452881, "learning_rate": 0.0002425233644859813, "loss": 0.4518, "step": 794 }, { "epoch": 0.83, "grad_norm": 2.337662696838379, "learning_rate": 0.00024240654205607477, "loss": 0.8751, "step": 795 }, { "epoch": 0.83, "grad_norm": 3.0157761573791504, "learning_rate": 0.0002422897196261682, "loss": 0.6199, "step": 796 }, { "epoch": 0.83, "grad_norm": 2.2106847763061523, "learning_rate": 0.00024217289719626165, "loss": 1.3694, "step": 797 }, { "epoch": 0.83, "grad_norm": 1.1246968507766724, "learning_rate": 0.00024205607476635512, "loss": 0.4661, "step": 798 }, { "epoch": 0.84, "grad_norm": 2.3180928230285645, "learning_rate": 0.00024193925233644857, "loss": 1.1047, "step": 799 }, { "epoch": 0.84, "grad_norm": 4.011066913604736, "learning_rate": 0.00024182242990654205, "loss": 0.7958, "step": 800 }, { "epoch": 0.84, "grad_norm": 4.280853271484375, "learning_rate": 0.0002417056074766355, "loss": 1.0047, "step": 801 }, { "epoch": 0.84, "grad_norm": 3.310791492462158, "learning_rate": 0.00024158878504672895, "loss": 1.691, "step": 802 }, { "epoch": 0.84, "grad_norm": 2.7300541400909424, "learning_rate": 0.00024147196261682243, "loss": 1.4916, "step": 803 }, { "epoch": 0.84, "grad_norm": 4.2915143966674805, "learning_rate": 0.00024135514018691585, "loss": 0.9508, "step": 804 }, { "epoch": 0.84, "grad_norm": 2.642038583755493, "learning_rate": 0.00024123831775700933, "loss": 0.835, "step": 805 }, { "epoch": 0.84, "grad_norm": 2.139812707901001, "learning_rate": 0.00024112149532710278, "loss": 0.8397, "step": 806 }, { "epoch": 0.84, "grad_norm": 2.194612979888916, "learning_rate": 0.00024100467289719623, "loss": 1.2641, "step": 807 }, { "epoch": 0.85, "grad_norm": 2.043593168258667, "learning_rate": 0.0002408878504672897, "loss": 1.1955, "step": 808 }, { "epoch": 0.85, "grad_norm": 3.5014655590057373, "learning_rate": 0.00024077102803738315, "loss": 1.1901, "step": 809 }, { "epoch": 0.85, "grad_norm": 3.0221776962280273, "learning_rate": 0.00024065420560747663, "loss": 1.5222, "step": 810 }, { "epoch": 0.85, "grad_norm": 2.3791370391845703, "learning_rate": 0.00024053738317757008, "loss": 1.2012, "step": 811 }, { "epoch": 0.85, "grad_norm": 2.600904941558838, "learning_rate": 0.0002404205607476635, "loss": 1.5508, "step": 812 }, { "epoch": 0.85, "grad_norm": 1.9494956731796265, "learning_rate": 0.00024030373831775698, "loss": 0.8289, "step": 813 }, { "epoch": 0.85, "grad_norm": 2.927809715270996, "learning_rate": 0.00024018691588785043, "loss": 1.2377, "step": 814 }, { "epoch": 0.85, "grad_norm": 1.0436631441116333, "learning_rate": 0.0002400700934579439, "loss": 0.2766, "step": 815 }, { "epoch": 0.85, "grad_norm": 2.220791816711426, "learning_rate": 0.00023995327102803736, "loss": 1.1935, "step": 816 }, { "epoch": 0.85, "grad_norm": 3.4562952518463135, "learning_rate": 0.00023983644859813084, "loss": 0.9728, "step": 817 }, { "epoch": 0.86, "grad_norm": 3.344489812850952, "learning_rate": 0.00023971962616822429, "loss": 0.9271, "step": 818 }, { "epoch": 0.86, "grad_norm": 3.47554874420166, "learning_rate": 0.00023960280373831776, "loss": 0.7369, "step": 819 }, { "epoch": 0.86, "grad_norm": 0.6083333492279053, "learning_rate": 0.00023948598130841119, "loss": 0.2156, "step": 820 }, { "epoch": 0.86, "grad_norm": 2.2916417121887207, "learning_rate": 0.00023936915887850464, "loss": 0.5969, "step": 821 }, { "epoch": 0.86, "grad_norm": 2.7046704292297363, "learning_rate": 0.0002392523364485981, "loss": 1.4117, "step": 822 }, { "epoch": 0.86, "grad_norm": 2.682845115661621, "learning_rate": 0.00023913551401869156, "loss": 0.7092, "step": 823 }, { "epoch": 0.86, "grad_norm": 2.9161324501037598, "learning_rate": 0.00023901869158878504, "loss": 1.6658, "step": 824 }, { "epoch": 0.86, "grad_norm": 3.6389951705932617, "learning_rate": 0.0002389018691588785, "loss": 1.0424, "step": 825 }, { "epoch": 0.86, "grad_norm": 2.222813606262207, "learning_rate": 0.00023878504672897194, "loss": 1.8298, "step": 826 }, { "epoch": 0.87, "grad_norm": 0.769364058971405, "learning_rate": 0.00023866822429906542, "loss": 0.3196, "step": 827 }, { "epoch": 0.87, "grad_norm": 2.443896770477295, "learning_rate": 0.00023855140186915884, "loss": 1.2312, "step": 828 }, { "epoch": 0.87, "grad_norm": 2.5431735515594482, "learning_rate": 0.00023843457943925232, "loss": 1.0854, "step": 829 }, { "epoch": 0.87, "grad_norm": 2.4142019748687744, "learning_rate": 0.00023831775700934577, "loss": 0.7535, "step": 830 }, { "epoch": 0.87, "grad_norm": 3.1519904136657715, "learning_rate": 0.00023820093457943922, "loss": 1.633, "step": 831 }, { "epoch": 0.87, "grad_norm": 2.1021811962127686, "learning_rate": 0.0002380841121495327, "loss": 1.1532, "step": 832 }, { "epoch": 0.87, "grad_norm": 3.130340099334717, "learning_rate": 0.00023796728971962615, "loss": 0.9221, "step": 833 }, { "epoch": 0.87, "grad_norm": 3.2165324687957764, "learning_rate": 0.00023785046728971962, "loss": 1.0475, "step": 834 }, { "epoch": 0.87, "grad_norm": 3.920137882232666, "learning_rate": 0.00023773364485981307, "loss": 1.4406, "step": 835 }, { "epoch": 0.87, "grad_norm": 2.199028253555298, "learning_rate": 0.0002376168224299065, "loss": 0.945, "step": 836 }, { "epoch": 0.88, "grad_norm": 2.366101026535034, "learning_rate": 0.00023749999999999997, "loss": 1.2456, "step": 837 }, { "epoch": 0.88, "grad_norm": 2.0884835720062256, "learning_rate": 0.00023738317757009342, "loss": 1.371, "step": 838 }, { "epoch": 0.88, "grad_norm": 3.546804666519165, "learning_rate": 0.0002372663551401869, "loss": 1.0952, "step": 839 }, { "epoch": 0.88, "grad_norm": 1.8506627082824707, "learning_rate": 0.00023714953271028035, "loss": 1.1875, "step": 840 }, { "epoch": 0.88, "grad_norm": 3.8292133808135986, "learning_rate": 0.00023703271028037383, "loss": 1.2783, "step": 841 }, { "epoch": 0.88, "grad_norm": 4.448188781738281, "learning_rate": 0.00023691588785046728, "loss": 1.0957, "step": 842 }, { "epoch": 0.88, "grad_norm": 2.644364595413208, "learning_rate": 0.00023679906542056075, "loss": 0.4811, "step": 843 }, { "epoch": 0.88, "grad_norm": 1.5675925016403198, "learning_rate": 0.00023668224299065418, "loss": 0.4748, "step": 844 }, { "epoch": 0.88, "grad_norm": 1.530189871788025, "learning_rate": 0.00023656542056074763, "loss": 0.6145, "step": 845 }, { "epoch": 0.88, "grad_norm": 1.5131443738937378, "learning_rate": 0.0002364485981308411, "loss": 1.2528, "step": 846 }, { "epoch": 0.89, "grad_norm": 2.101269245147705, "learning_rate": 0.00023633177570093455, "loss": 0.8421, "step": 847 }, { "epoch": 0.89, "grad_norm": 2.0407848358154297, "learning_rate": 0.00023621495327102803, "loss": 0.6174, "step": 848 }, { "epoch": 0.89, "grad_norm": 1.4575300216674805, "learning_rate": 0.00023609813084112148, "loss": 0.5449, "step": 849 }, { "epoch": 0.89, "grad_norm": 2.286203145980835, "learning_rate": 0.00023598130841121493, "loss": 1.8737, "step": 850 }, { "epoch": 0.89, "grad_norm": 1.9306058883666992, "learning_rate": 0.0002358644859813084, "loss": 0.8433, "step": 851 }, { "epoch": 0.89, "grad_norm": 1.401845932006836, "learning_rate": 0.00023574766355140183, "loss": 0.1541, "step": 852 }, { "epoch": 0.89, "grad_norm": 2.6361005306243896, "learning_rate": 0.0002356308411214953, "loss": 0.7135, "step": 853 }, { "epoch": 0.89, "grad_norm": 2.040468215942383, "learning_rate": 0.00023551401869158876, "loss": 0.5627, "step": 854 }, { "epoch": 0.89, "grad_norm": 2.8553760051727295, "learning_rate": 0.0002353971962616822, "loss": 1.019, "step": 855 }, { "epoch": 0.9, "grad_norm": 3.2895450592041016, "learning_rate": 0.00023528037383177569, "loss": 0.7987, "step": 856 }, { "epoch": 0.9, "grad_norm": 2.958900213241577, "learning_rate": 0.00023516355140186914, "loss": 1.0162, "step": 857 }, { "epoch": 0.9, "grad_norm": 2.653841972351074, "learning_rate": 0.0002350467289719626, "loss": 0.9121, "step": 858 }, { "epoch": 0.9, "grad_norm": 3.3145627975463867, "learning_rate": 0.00023492990654205606, "loss": 1.6394, "step": 859 }, { "epoch": 0.9, "grad_norm": 3.452100992202759, "learning_rate": 0.00023481308411214949, "loss": 0.9281, "step": 860 }, { "epoch": 0.9, "grad_norm": 2.180034875869751, "learning_rate": 0.00023469626168224296, "loss": 0.4314, "step": 861 }, { "epoch": 0.9, "grad_norm": 3.1082077026367188, "learning_rate": 0.0002345794392523364, "loss": 0.6923, "step": 862 }, { "epoch": 0.9, "grad_norm": 2.2554924488067627, "learning_rate": 0.0002344626168224299, "loss": 0.9983, "step": 863 }, { "epoch": 0.9, "grad_norm": 2.957667589187622, "learning_rate": 0.00023434579439252334, "loss": 1.4724, "step": 864 }, { "epoch": 0.9, "grad_norm": 3.3966641426086426, "learning_rate": 0.00023422897196261682, "loss": 0.8326, "step": 865 }, { "epoch": 0.91, "grad_norm": 2.6179311275482178, "learning_rate": 0.00023411214953271027, "loss": 1.7635, "step": 866 }, { "epoch": 0.91, "grad_norm": 1.6067564487457275, "learning_rate": 0.00023399532710280374, "loss": 0.5869, "step": 867 }, { "epoch": 0.91, "grad_norm": 2.7425529956817627, "learning_rate": 0.00023387850467289717, "loss": 0.9132, "step": 868 }, { "epoch": 0.91, "grad_norm": 2.3865866661071777, "learning_rate": 0.00023376168224299062, "loss": 1.1675, "step": 869 }, { "epoch": 0.91, "grad_norm": 1.8935320377349854, "learning_rate": 0.0002336448598130841, "loss": 0.8528, "step": 870 }, { "epoch": 0.91, "grad_norm": 1.8941283226013184, "learning_rate": 0.00023352803738317754, "loss": 0.7109, "step": 871 }, { "epoch": 0.91, "grad_norm": 3.0906989574432373, "learning_rate": 0.00023341121495327102, "loss": 0.8982, "step": 872 }, { "epoch": 0.91, "grad_norm": 1.4004254341125488, "learning_rate": 0.00023329439252336447, "loss": 0.637, "step": 873 }, { "epoch": 0.91, "grad_norm": 1.8859999179840088, "learning_rate": 0.00023317757009345792, "loss": 0.8726, "step": 874 }, { "epoch": 0.92, "grad_norm": 2.0776727199554443, "learning_rate": 0.0002330607476635514, "loss": 0.8262, "step": 875 }, { "epoch": 0.92, "grad_norm": 1.6682260036468506, "learning_rate": 0.00023294392523364482, "loss": 0.4234, "step": 876 }, { "epoch": 0.92, "grad_norm": 1.968831181526184, "learning_rate": 0.0002328271028037383, "loss": 0.1693, "step": 877 }, { "epoch": 0.92, "grad_norm": 1.7580511569976807, "learning_rate": 0.00023271028037383175, "loss": 0.7502, "step": 878 }, { "epoch": 0.92, "grad_norm": 2.454692840576172, "learning_rate": 0.0002325934579439252, "loss": 1.7017, "step": 879 }, { "epoch": 0.92, "grad_norm": 2.112985610961914, "learning_rate": 0.00023247663551401868, "loss": 1.0773, "step": 880 }, { "epoch": 0.92, "grad_norm": 4.550245761871338, "learning_rate": 0.00023235981308411213, "loss": 1.2831, "step": 881 }, { "epoch": 0.92, "grad_norm": 2.5282704830169678, "learning_rate": 0.0002322429906542056, "loss": 0.5417, "step": 882 }, { "epoch": 0.92, "grad_norm": 3.580568552017212, "learning_rate": 0.00023212616822429905, "loss": 1.4039, "step": 883 }, { "epoch": 0.92, "grad_norm": 2.3084704875946045, "learning_rate": 0.00023200934579439248, "loss": 1.3537, "step": 884 }, { "epoch": 0.93, "grad_norm": 0.7144612073898315, "learning_rate": 0.00023189252336448595, "loss": 0.2048, "step": 885 }, { "epoch": 0.93, "grad_norm": 2.499814510345459, "learning_rate": 0.0002317757009345794, "loss": 0.9366, "step": 886 }, { "epoch": 0.93, "grad_norm": 3.09336519241333, "learning_rate": 0.00023165887850467288, "loss": 1.7808, "step": 887 }, { "epoch": 0.93, "grad_norm": 0.5693227648735046, "learning_rate": 0.00023154205607476633, "loss": 0.1463, "step": 888 }, { "epoch": 0.93, "grad_norm": 2.217259645462036, "learning_rate": 0.0002314252336448598, "loss": 1.1722, "step": 889 }, { "epoch": 0.93, "grad_norm": 2.928795337677002, "learning_rate": 0.00023130841121495326, "loss": 0.7112, "step": 890 }, { "epoch": 0.93, "grad_norm": 1.595618486404419, "learning_rate": 0.00023119158878504674, "loss": 0.7386, "step": 891 }, { "epoch": 0.93, "grad_norm": 3.523850679397583, "learning_rate": 0.00023107476635514016, "loss": 1.5385, "step": 892 }, { "epoch": 0.93, "grad_norm": 0.7062773108482361, "learning_rate": 0.0002309579439252336, "loss": 0.3865, "step": 893 }, { "epoch": 0.94, "grad_norm": 1.406078577041626, "learning_rate": 0.00023084112149532709, "loss": 0.3898, "step": 894 }, { "epoch": 0.94, "grad_norm": 1.1150354146957397, "learning_rate": 0.00023072429906542054, "loss": 0.8224, "step": 895 }, { "epoch": 0.94, "grad_norm": 2.0604560375213623, "learning_rate": 0.000230607476635514, "loss": 0.5875, "step": 896 }, { "epoch": 0.94, "grad_norm": 4.368139743804932, "learning_rate": 0.00023049065420560746, "loss": 1.3192, "step": 897 }, { "epoch": 0.94, "grad_norm": 3.1544618606567383, "learning_rate": 0.0002303738317757009, "loss": 0.7825, "step": 898 }, { "epoch": 0.94, "grad_norm": 1.3759732246398926, "learning_rate": 0.0002302570093457944, "loss": 0.4662, "step": 899 }, { "epoch": 0.94, "grad_norm": 1.9174294471740723, "learning_rate": 0.0002301401869158878, "loss": 1.0672, "step": 900 }, { "epoch": 0.94, "eval_loss": 1.1630452871322632, "eval_runtime": 1.0017, "eval_samples_per_second": 4.992, "eval_steps_per_second": 0.998, "step": 900 }, { "epoch": 0.94, "grad_norm": 1.9549384117126465, "learning_rate": 0.0002300233644859813, "loss": 1.0137, "step": 901 }, { "epoch": 0.94, "grad_norm": 1.340488314628601, "learning_rate": 0.00022990654205607474, "loss": 0.5765, "step": 902 }, { "epoch": 0.94, "grad_norm": 2.487250804901123, "learning_rate": 0.0002297897196261682, "loss": 1.3398, "step": 903 }, { "epoch": 0.95, "grad_norm": 3.376892328262329, "learning_rate": 0.00022967289719626167, "loss": 1.1591, "step": 904 }, { "epoch": 0.95, "grad_norm": 3.779705286026001, "learning_rate": 0.00022955607476635512, "loss": 1.4591, "step": 905 }, { "epoch": 0.95, "grad_norm": 1.9788891077041626, "learning_rate": 0.0002294392523364486, "loss": 0.9374, "step": 906 }, { "epoch": 0.95, "grad_norm": 1.1561695337295532, "learning_rate": 0.00022932242990654204, "loss": 0.4868, "step": 907 }, { "epoch": 0.95, "grad_norm": 0.8206226825714111, "learning_rate": 0.00022920560747663547, "loss": 0.3994, "step": 908 }, { "epoch": 0.95, "grad_norm": 2.6440787315368652, "learning_rate": 0.00022908878504672894, "loss": 1.3197, "step": 909 }, { "epoch": 0.95, "grad_norm": 1.1878468990325928, "learning_rate": 0.0002289719626168224, "loss": 0.332, "step": 910 }, { "epoch": 0.95, "grad_norm": 2.2395591735839844, "learning_rate": 0.00022885514018691587, "loss": 1.1612, "step": 911 }, { "epoch": 0.95, "grad_norm": 1.8139145374298096, "learning_rate": 0.00022873831775700932, "loss": 1.7079, "step": 912 }, { "epoch": 0.96, "grad_norm": 1.3331412076950073, "learning_rate": 0.0002286214953271028, "loss": 0.67, "step": 913 }, { "epoch": 0.96, "grad_norm": 2.7752532958984375, "learning_rate": 0.00022850467289719625, "loss": 1.2638, "step": 914 }, { "epoch": 0.96, "grad_norm": 2.7987313270568848, "learning_rate": 0.00022838785046728973, "loss": 1.0048, "step": 915 }, { "epoch": 0.96, "grad_norm": 2.3547263145446777, "learning_rate": 0.00022827102803738315, "loss": 0.552, "step": 916 }, { "epoch": 0.96, "grad_norm": 1.9099464416503906, "learning_rate": 0.0002281542056074766, "loss": 0.4864, "step": 917 }, { "epoch": 0.96, "grad_norm": 5.29815149307251, "learning_rate": 0.00022803738317757008, "loss": 1.5697, "step": 918 }, { "epoch": 0.96, "grad_norm": 1.8438161611557007, "learning_rate": 0.00022792056074766353, "loss": 0.6215, "step": 919 }, { "epoch": 0.96, "grad_norm": 2.376704454421997, "learning_rate": 0.000227803738317757, "loss": 1.48, "step": 920 }, { "epoch": 0.96, "grad_norm": 2.3474555015563965, "learning_rate": 0.00022768691588785045, "loss": 1.1367, "step": 921 }, { "epoch": 0.96, "grad_norm": 2.982185125350952, "learning_rate": 0.0002275700934579439, "loss": 1.443, "step": 922 }, { "epoch": 0.97, "grad_norm": 4.1197896003723145, "learning_rate": 0.00022745327102803738, "loss": 1.083, "step": 923 }, { "epoch": 0.97, "grad_norm": 2.8206117153167725, "learning_rate": 0.0002273364485981308, "loss": 1.0382, "step": 924 }, { "epoch": 0.97, "grad_norm": 2.9323513507843018, "learning_rate": 0.00022721962616822425, "loss": 1.2924, "step": 925 }, { "epoch": 0.97, "grad_norm": 2.5834381580352783, "learning_rate": 0.00022710280373831773, "loss": 0.9821, "step": 926 }, { "epoch": 0.97, "grad_norm": 2.3738291263580322, "learning_rate": 0.00022698598130841118, "loss": 0.7764, "step": 927 }, { "epoch": 0.97, "grad_norm": 1.9597241878509521, "learning_rate": 0.00022686915887850466, "loss": 0.8203, "step": 928 }, { "epoch": 0.97, "grad_norm": 0.6998361349105835, "learning_rate": 0.0002267523364485981, "loss": 0.1453, "step": 929 }, { "epoch": 0.97, "grad_norm": 2.9886910915374756, "learning_rate": 0.00022663551401869158, "loss": 1.1826, "step": 930 }, { "epoch": 0.97, "grad_norm": 2.020965099334717, "learning_rate": 0.00022651869158878503, "loss": 0.8265, "step": 931 }, { "epoch": 0.97, "grad_norm": 1.5389176607131958, "learning_rate": 0.00022640186915887846, "loss": 0.5353, "step": 932 }, { "epoch": 0.98, "grad_norm": 2.712364912033081, "learning_rate": 0.00022628504672897194, "loss": 0.4676, "step": 933 }, { "epoch": 0.98, "grad_norm": 2.8216958045959473, "learning_rate": 0.00022616822429906539, "loss": 1.0984, "step": 934 }, { "epoch": 0.98, "grad_norm": 0.8422656059265137, "learning_rate": 0.00022605140186915886, "loss": 0.3279, "step": 935 }, { "epoch": 0.98, "grad_norm": 2.1753759384155273, "learning_rate": 0.0002259345794392523, "loss": 0.5013, "step": 936 }, { "epoch": 0.98, "grad_norm": 2.3775906562805176, "learning_rate": 0.0002258177570093458, "loss": 1.9156, "step": 937 }, { "epoch": 0.98, "grad_norm": 2.1752231121063232, "learning_rate": 0.00022570093457943924, "loss": 1.2206, "step": 938 }, { "epoch": 0.98, "grad_norm": 2.4699342250823975, "learning_rate": 0.00022558411214953272, "loss": 0.9297, "step": 939 }, { "epoch": 0.98, "grad_norm": 0.877358615398407, "learning_rate": 0.00022546728971962614, "loss": 0.3107, "step": 940 }, { "epoch": 0.98, "grad_norm": 0.7622907161712646, "learning_rate": 0.0002253504672897196, "loss": 0.4124, "step": 941 }, { "epoch": 0.99, "grad_norm": 2.6599836349487305, "learning_rate": 0.00022523364485981307, "loss": 1.7029, "step": 942 }, { "epoch": 0.99, "grad_norm": 3.203469753265381, "learning_rate": 0.00022511682242990652, "loss": 1.5104, "step": 943 }, { "epoch": 0.99, "grad_norm": 3.2014012336730957, "learning_rate": 0.000225, "loss": 1.0979, "step": 944 }, { "epoch": 0.99, "grad_norm": 1.75557279586792, "learning_rate": 0.00022488317757009344, "loss": 0.5876, "step": 945 }, { "epoch": 0.99, "grad_norm": 0.8471612930297852, "learning_rate": 0.0002247663551401869, "loss": 0.2429, "step": 946 }, { "epoch": 0.99, "grad_norm": 3.0547866821289062, "learning_rate": 0.00022464953271028037, "loss": 1.34, "step": 947 }, { "epoch": 0.99, "grad_norm": 2.2609493732452393, "learning_rate": 0.00022453271028037382, "loss": 0.9612, "step": 948 }, { "epoch": 0.99, "grad_norm": 0.6535717248916626, "learning_rate": 0.00022441588785046724, "loss": 0.2885, "step": 949 }, { "epoch": 0.99, "grad_norm": 1.7238208055496216, "learning_rate": 0.00022429906542056072, "loss": 0.5047, "step": 950 }, { "epoch": 0.99, "grad_norm": 3.345454216003418, "learning_rate": 0.00022418224299065417, "loss": 0.6963, "step": 951 }, { "epoch": 1.0, "grad_norm": 0.6006823182106018, "learning_rate": 0.00022406542056074765, "loss": 0.1972, "step": 952 }, { "epoch": 1.0, "grad_norm": 1.3403688669204712, "learning_rate": 0.0002239485981308411, "loss": 0.4257, "step": 953 }, { "epoch": 1.0, "grad_norm": 4.329044342041016, "learning_rate": 0.00022383177570093458, "loss": 1.3304, "step": 954 }, { "epoch": 1.0, "grad_norm": 1.3422555923461914, "learning_rate": 0.00022371495327102803, "loss": 0.3354, "step": 955 }, { "epoch": 1.0, "grad_norm": 3.834536075592041, "learning_rate": 0.0002235981308411215, "loss": 1.2915, "step": 956 }, { "epoch": 1.0, "grad_norm": 2.039134979248047, "learning_rate": 0.00022348130841121493, "loss": 0.7149, "step": 957 }, { "epoch": 1.0, "grad_norm": 1.1390619277954102, "learning_rate": 0.00022336448598130838, "loss": 0.3418, "step": 958 }, { "epoch": 1.0, "grad_norm": 1.4070141315460205, "learning_rate": 0.00022324766355140185, "loss": 0.3751, "step": 959 }, { "epoch": 1.0, "grad_norm": 3.0205116271972656, "learning_rate": 0.0002231308411214953, "loss": 0.9732, "step": 960 }, { "epoch": 1.01, "grad_norm": 2.3249778747558594, "learning_rate": 0.00022301401869158878, "loss": 0.3551, "step": 961 }, { "epoch": 1.01, "grad_norm": 1.534330129623413, "learning_rate": 0.00022289719626168223, "loss": 0.5207, "step": 962 }, { "epoch": 1.01, "grad_norm": 1.9354491233825684, "learning_rate": 0.0002227803738317757, "loss": 0.3616, "step": 963 }, { "epoch": 1.01, "grad_norm": 3.116346836090088, "learning_rate": 0.00022266355140186916, "loss": 1.0175, "step": 964 }, { "epoch": 1.01, "grad_norm": 3.099825859069824, "learning_rate": 0.00022254672897196258, "loss": 0.5644, "step": 965 }, { "epoch": 1.01, "grad_norm": 2.362391710281372, "learning_rate": 0.00022242990654205606, "loss": 0.9214, "step": 966 }, { "epoch": 1.01, "grad_norm": 1.5349410772323608, "learning_rate": 0.0002223130841121495, "loss": 0.4219, "step": 967 }, { "epoch": 1.01, "grad_norm": 2.8551294803619385, "learning_rate": 0.00022219626168224296, "loss": 0.9566, "step": 968 }, { "epoch": 1.01, "grad_norm": 3.527337074279785, "learning_rate": 0.00022207943925233643, "loss": 1.4504, "step": 969 }, { "epoch": 1.01, "grad_norm": 2.6233699321746826, "learning_rate": 0.00022196261682242988, "loss": 0.5531, "step": 970 }, { "epoch": 1.02, "grad_norm": 2.0485408306121826, "learning_rate": 0.00022184579439252336, "loss": 0.7565, "step": 971 }, { "epoch": 1.02, "grad_norm": 3.941380262374878, "learning_rate": 0.0002217289719626168, "loss": 0.6388, "step": 972 }, { "epoch": 1.02, "grad_norm": 2.8376948833465576, "learning_rate": 0.00022161214953271023, "loss": 0.9422, "step": 973 }, { "epoch": 1.02, "grad_norm": 2.7060530185699463, "learning_rate": 0.0002214953271028037, "loss": 0.8072, "step": 974 }, { "epoch": 1.02, "grad_norm": 2.446294069290161, "learning_rate": 0.00022137850467289716, "loss": 0.5575, "step": 975 }, { "epoch": 1.02, "grad_norm": 1.1146581172943115, "learning_rate": 0.00022126168224299064, "loss": 0.2803, "step": 976 }, { "epoch": 1.02, "grad_norm": 0.5902735590934753, "learning_rate": 0.0002211448598130841, "loss": 0.2146, "step": 977 }, { "epoch": 1.02, "grad_norm": 0.6364985704421997, "learning_rate": 0.00022102803738317757, "loss": 0.2221, "step": 978 }, { "epoch": 1.02, "grad_norm": 2.4132468700408936, "learning_rate": 0.00022091121495327102, "loss": 0.7333, "step": 979 }, { "epoch": 1.03, "grad_norm": 2.3000097274780273, "learning_rate": 0.0002207943925233645, "loss": 1.6004, "step": 980 }, { "epoch": 1.03, "grad_norm": 2.1039211750030518, "learning_rate": 0.00022067757009345792, "loss": 0.7024, "step": 981 }, { "epoch": 1.03, "grad_norm": 1.9659446477890015, "learning_rate": 0.00022056074766355137, "loss": 1.5743, "step": 982 }, { "epoch": 1.03, "grad_norm": 3.8441948890686035, "learning_rate": 0.00022044392523364484, "loss": 0.6775, "step": 983 }, { "epoch": 1.03, "grad_norm": 2.7042272090911865, "learning_rate": 0.0002203271028037383, "loss": 0.5636, "step": 984 }, { "epoch": 1.03, "grad_norm": 3.106860637664795, "learning_rate": 0.00022021028037383177, "loss": 1.243, "step": 985 }, { "epoch": 1.03, "grad_norm": 1.890007734298706, "learning_rate": 0.00022009345794392522, "loss": 1.2065, "step": 986 }, { "epoch": 1.03, "grad_norm": 3.0000622272491455, "learning_rate": 0.00021997663551401867, "loss": 0.8706, "step": 987 }, { "epoch": 1.03, "grad_norm": 2.43005108833313, "learning_rate": 0.00021985981308411215, "loss": 0.5945, "step": 988 }, { "epoch": 1.03, "grad_norm": 2.614928960800171, "learning_rate": 0.00021974299065420557, "loss": 0.5288, "step": 989 }, { "epoch": 1.04, "grad_norm": 1.0085580348968506, "learning_rate": 0.00021962616822429905, "loss": 0.4928, "step": 990 }, { "epoch": 1.04, "grad_norm": 0.5782190561294556, "learning_rate": 0.0002195093457943925, "loss": 0.1496, "step": 991 }, { "epoch": 1.04, "grad_norm": 0.675377607345581, "learning_rate": 0.00021939252336448595, "loss": 0.2521, "step": 992 }, { "epoch": 1.04, "grad_norm": 0.4882884621620178, "learning_rate": 0.00021927570093457943, "loss": 0.1353, "step": 993 }, { "epoch": 1.04, "grad_norm": 1.384681224822998, "learning_rate": 0.00021915887850467288, "loss": 0.6342, "step": 994 }, { "epoch": 1.04, "grad_norm": 0.8572222590446472, "learning_rate": 0.00021904205607476635, "loss": 0.117, "step": 995 }, { "epoch": 1.04, "grad_norm": 2.631561040878296, "learning_rate": 0.0002189252336448598, "loss": 1.2984, "step": 996 }, { "epoch": 1.04, "grad_norm": 2.5276076793670654, "learning_rate": 0.00021880841121495323, "loss": 0.7352, "step": 997 }, { "epoch": 1.04, "grad_norm": 3.206256151199341, "learning_rate": 0.0002186915887850467, "loss": 1.4104, "step": 998 }, { "epoch": 1.04, "grad_norm": 2.2699246406555176, "learning_rate": 0.00021857476635514015, "loss": 0.5665, "step": 999 }, { "epoch": 1.05, "grad_norm": 2.288726806640625, "learning_rate": 0.00021845794392523363, "loss": 0.696, "step": 1000 }, { "epoch": 1.05, "grad_norm": 2.2717671394348145, "learning_rate": 0.00021834112149532708, "loss": 0.9446, "step": 1001 }, { "epoch": 1.05, "grad_norm": 2.451627731323242, "learning_rate": 0.00021822429906542056, "loss": 1.362, "step": 1002 }, { "epoch": 1.05, "grad_norm": 1.8697036504745483, "learning_rate": 0.000218107476635514, "loss": 0.5908, "step": 1003 }, { "epoch": 1.05, "grad_norm": 2.1752212047576904, "learning_rate": 0.00021799065420560748, "loss": 1.2794, "step": 1004 }, { "epoch": 1.05, "grad_norm": 5.133718013763428, "learning_rate": 0.0002178738317757009, "loss": 0.9315, "step": 1005 }, { "epoch": 1.05, "grad_norm": 2.6146345138549805, "learning_rate": 0.00021775700934579436, "loss": 1.3513, "step": 1006 }, { "epoch": 1.05, "grad_norm": 2.957939386367798, "learning_rate": 0.00021764018691588783, "loss": 1.5546, "step": 1007 }, { "epoch": 1.05, "grad_norm": 1.8134377002716064, "learning_rate": 0.00021752336448598128, "loss": 0.6268, "step": 1008 }, { "epoch": 1.06, "grad_norm": 1.2340497970581055, "learning_rate": 0.00021740654205607476, "loss": 0.6106, "step": 1009 }, { "epoch": 1.06, "grad_norm": 2.744544744491577, "learning_rate": 0.0002172897196261682, "loss": 0.8776, "step": 1010 }, { "epoch": 1.06, "grad_norm": 2.7156779766082764, "learning_rate": 0.00021717289719626166, "loss": 1.5869, "step": 1011 }, { "epoch": 1.06, "grad_norm": 1.6292368173599243, "learning_rate": 0.00021705607476635514, "loss": 0.2781, "step": 1012 }, { "epoch": 1.06, "grad_norm": 1.877001166343689, "learning_rate": 0.00021693925233644856, "loss": 0.8583, "step": 1013 }, { "epoch": 1.06, "grad_norm": 0.631164014339447, "learning_rate": 0.00021682242990654204, "loss": 0.1627, "step": 1014 }, { "epoch": 1.06, "grad_norm": 1.1797211170196533, "learning_rate": 0.0002167056074766355, "loss": 0.259, "step": 1015 }, { "epoch": 1.06, "grad_norm": 3.3959903717041016, "learning_rate": 0.00021658878504672894, "loss": 0.4309, "step": 1016 }, { "epoch": 1.06, "grad_norm": 2.6847167015075684, "learning_rate": 0.00021647196261682242, "loss": 1.2204, "step": 1017 }, { "epoch": 1.06, "grad_norm": 1.9416961669921875, "learning_rate": 0.00021635514018691587, "loss": 1.2186, "step": 1018 }, { "epoch": 1.07, "grad_norm": 1.53370201587677, "learning_rate": 0.00021623831775700934, "loss": 0.3904, "step": 1019 }, { "epoch": 1.07, "grad_norm": 1.584384799003601, "learning_rate": 0.0002161214953271028, "loss": 1.0917, "step": 1020 }, { "epoch": 1.07, "grad_norm": 2.6039652824401855, "learning_rate": 0.00021600467289719622, "loss": 0.7606, "step": 1021 }, { "epoch": 1.07, "grad_norm": 3.1853108406066895, "learning_rate": 0.0002158878504672897, "loss": 0.717, "step": 1022 }, { "epoch": 1.07, "grad_norm": 4.574007987976074, "learning_rate": 0.00021577102803738314, "loss": 1.2157, "step": 1023 }, { "epoch": 1.07, "grad_norm": 2.5888893604278564, "learning_rate": 0.00021565420560747662, "loss": 1.4272, "step": 1024 }, { "epoch": 1.07, "grad_norm": 3.850660800933838, "learning_rate": 0.00021553738317757007, "loss": 0.6831, "step": 1025 }, { "epoch": 1.07, "grad_norm": 2.4923202991485596, "learning_rate": 0.00021542056074766355, "loss": 0.5352, "step": 1026 }, { "epoch": 1.07, "grad_norm": 1.457641839981079, "learning_rate": 0.000215303738317757, "loss": 0.4242, "step": 1027 }, { "epoch": 1.08, "grad_norm": 1.6069073677062988, "learning_rate": 0.00021518691588785047, "loss": 0.5674, "step": 1028 }, { "epoch": 1.08, "grad_norm": 2.628026008605957, "learning_rate": 0.0002150700934579439, "loss": 0.9033, "step": 1029 }, { "epoch": 1.08, "grad_norm": 1.1619964838027954, "learning_rate": 0.00021495327102803735, "loss": 0.1613, "step": 1030 }, { "epoch": 1.08, "grad_norm": 1.593367099761963, "learning_rate": 0.00021483644859813082, "loss": 0.4673, "step": 1031 }, { "epoch": 1.08, "grad_norm": 1.325135588645935, "learning_rate": 0.00021471962616822427, "loss": 0.3284, "step": 1032 }, { "epoch": 1.08, "grad_norm": 2.5256247520446777, "learning_rate": 0.00021460280373831775, "loss": 0.9166, "step": 1033 }, { "epoch": 1.08, "grad_norm": 1.4277769327163696, "learning_rate": 0.0002144859813084112, "loss": 0.3661, "step": 1034 }, { "epoch": 1.08, "grad_norm": 1.2940905094146729, "learning_rate": 0.00021436915887850465, "loss": 0.6463, "step": 1035 }, { "epoch": 1.08, "grad_norm": 1.9980312585830688, "learning_rate": 0.00021425233644859813, "loss": 1.0043, "step": 1036 }, { "epoch": 1.08, "grad_norm": 2.1811680793762207, "learning_rate": 0.00021413551401869155, "loss": 0.414, "step": 1037 }, { "epoch": 1.09, "grad_norm": 1.60305655002594, "learning_rate": 0.00021401869158878503, "loss": 0.3784, "step": 1038 }, { "epoch": 1.09, "grad_norm": 0.7862429618835449, "learning_rate": 0.00021390186915887848, "loss": 0.2915, "step": 1039 }, { "epoch": 1.09, "grad_norm": 2.8023910522460938, "learning_rate": 0.00021378504672897193, "loss": 0.6625, "step": 1040 }, { "epoch": 1.09, "grad_norm": 2.0663235187530518, "learning_rate": 0.0002136682242990654, "loss": 1.3877, "step": 1041 }, { "epoch": 1.09, "grad_norm": 0.6616790890693665, "learning_rate": 0.00021355140186915886, "loss": 0.226, "step": 1042 }, { "epoch": 1.09, "grad_norm": 2.5566747188568115, "learning_rate": 0.00021343457943925233, "loss": 1.1761, "step": 1043 }, { "epoch": 1.09, "grad_norm": 1.1111620664596558, "learning_rate": 0.00021331775700934578, "loss": 0.4354, "step": 1044 }, { "epoch": 1.09, "grad_norm": 0.5970983505249023, "learning_rate": 0.0002132009345794392, "loss": 0.165, "step": 1045 }, { "epoch": 1.09, "grad_norm": 2.0268211364746094, "learning_rate": 0.00021308411214953268, "loss": 0.6597, "step": 1046 }, { "epoch": 1.1, "grad_norm": 1.6138622760772705, "learning_rate": 0.00021296728971962613, "loss": 1.1535, "step": 1047 }, { "epoch": 1.1, "grad_norm": 1.9670050144195557, "learning_rate": 0.0002128504672897196, "loss": 1.0955, "step": 1048 }, { "epoch": 1.1, "grad_norm": 6.258568286895752, "learning_rate": 0.00021273364485981306, "loss": 1.0739, "step": 1049 }, { "epoch": 1.1, "grad_norm": 2.8623459339141846, "learning_rate": 0.00021261682242990654, "loss": 0.8344, "step": 1050 }, { "epoch": 1.1, "grad_norm": 1.135180950164795, "learning_rate": 0.0002125, "loss": 0.6828, "step": 1051 }, { "epoch": 1.1, "grad_norm": 2.476372480392456, "learning_rate": 0.00021238317757009347, "loss": 0.9149, "step": 1052 }, { "epoch": 1.1, "grad_norm": 2.1945157051086426, "learning_rate": 0.0002122663551401869, "loss": 0.701, "step": 1053 }, { "epoch": 1.1, "grad_norm": 2.106142520904541, "learning_rate": 0.00021214953271028034, "loss": 0.7747, "step": 1054 }, { "epoch": 1.1, "grad_norm": 1.4185359477996826, "learning_rate": 0.00021203271028037382, "loss": 0.8606, "step": 1055 }, { "epoch": 1.1, "grad_norm": 0.8948013782501221, "learning_rate": 0.00021191588785046727, "loss": 0.3924, "step": 1056 }, { "epoch": 1.11, "grad_norm": 4.456455230712891, "learning_rate": 0.00021179906542056074, "loss": 0.5109, "step": 1057 }, { "epoch": 1.11, "grad_norm": 2.550096273422241, "learning_rate": 0.0002116822429906542, "loss": 0.6182, "step": 1058 }, { "epoch": 1.11, "grad_norm": 2.1884052753448486, "learning_rate": 0.00021156542056074764, "loss": 1.2729, "step": 1059 }, { "epoch": 1.11, "grad_norm": 1.9526294469833374, "learning_rate": 0.00021144859813084112, "loss": 0.6765, "step": 1060 }, { "epoch": 1.11, "grad_norm": 2.172882318496704, "learning_rate": 0.00021133177570093454, "loss": 1.0866, "step": 1061 }, { "epoch": 1.11, "grad_norm": 2.258068799972534, "learning_rate": 0.00021121495327102802, "loss": 1.1084, "step": 1062 }, { "epoch": 1.11, "grad_norm": 2.720658302307129, "learning_rate": 0.00021109813084112147, "loss": 0.7547, "step": 1063 }, { "epoch": 1.11, "grad_norm": 0.6227821111679077, "learning_rate": 0.00021098130841121492, "loss": 0.2561, "step": 1064 }, { "epoch": 1.11, "grad_norm": 2.7355456352233887, "learning_rate": 0.0002108644859813084, "loss": 1.2223, "step": 1065 }, { "epoch": 1.12, "grad_norm": 0.6639083623886108, "learning_rate": 0.00021074766355140185, "loss": 0.2132, "step": 1066 }, { "epoch": 1.12, "grad_norm": 2.391815185546875, "learning_rate": 0.00021063084112149532, "loss": 0.5128, "step": 1067 }, { "epoch": 1.12, "grad_norm": 2.4221978187561035, "learning_rate": 0.00021051401869158877, "loss": 1.4154, "step": 1068 }, { "epoch": 1.12, "grad_norm": 2.7561087608337402, "learning_rate": 0.0002103971962616822, "loss": 0.6037, "step": 1069 }, { "epoch": 1.12, "grad_norm": 2.95412540435791, "learning_rate": 0.00021028037383177567, "loss": 0.9519, "step": 1070 }, { "epoch": 1.12, "grad_norm": 2.575559139251709, "learning_rate": 0.00021016355140186912, "loss": 0.5565, "step": 1071 }, { "epoch": 1.12, "grad_norm": 2.1348252296447754, "learning_rate": 0.0002100467289719626, "loss": 1.2106, "step": 1072 }, { "epoch": 1.12, "grad_norm": 3.2099993228912354, "learning_rate": 0.00020992990654205605, "loss": 0.7627, "step": 1073 }, { "epoch": 1.12, "grad_norm": 3.261626958847046, "learning_rate": 0.00020981308411214953, "loss": 0.7699, "step": 1074 }, { "epoch": 1.12, "grad_norm": 1.9946436882019043, "learning_rate": 0.00020969626168224298, "loss": 0.4004, "step": 1075 }, { "epoch": 1.13, "grad_norm": 2.639751672744751, "learning_rate": 0.00020957943925233646, "loss": 0.8801, "step": 1076 }, { "epoch": 1.13, "grad_norm": 2.098203659057617, "learning_rate": 0.00020946261682242988, "loss": 0.6697, "step": 1077 }, { "epoch": 1.13, "grad_norm": 2.1346704959869385, "learning_rate": 0.00020934579439252333, "loss": 0.4131, "step": 1078 }, { "epoch": 1.13, "grad_norm": 2.8900938034057617, "learning_rate": 0.0002092289719626168, "loss": 0.7294, "step": 1079 }, { "epoch": 1.13, "grad_norm": 2.871983289718628, "learning_rate": 0.00020911214953271026, "loss": 1.1347, "step": 1080 }, { "epoch": 1.13, "grad_norm": 1.3414250612258911, "learning_rate": 0.00020899532710280373, "loss": 0.3847, "step": 1081 }, { "epoch": 1.13, "grad_norm": 2.448362350463867, "learning_rate": 0.00020887850467289718, "loss": 1.0003, "step": 1082 }, { "epoch": 1.13, "grad_norm": 2.3160276412963867, "learning_rate": 0.00020876168224299063, "loss": 1.2056, "step": 1083 }, { "epoch": 1.13, "grad_norm": 0.8403595089912415, "learning_rate": 0.0002086448598130841, "loss": 0.2933, "step": 1084 }, { "epoch": 1.13, "grad_norm": 1.617903709411621, "learning_rate": 0.00020852803738317753, "loss": 0.3853, "step": 1085 }, { "epoch": 1.14, "grad_norm": 2.0691823959350586, "learning_rate": 0.000208411214953271, "loss": 0.7975, "step": 1086 }, { "epoch": 1.14, "grad_norm": 1.9060134887695312, "learning_rate": 0.00020829439252336446, "loss": 0.4843, "step": 1087 }, { "epoch": 1.14, "grad_norm": 1.8235893249511719, "learning_rate": 0.0002081775700934579, "loss": 0.8804, "step": 1088 }, { "epoch": 1.14, "grad_norm": 1.7594454288482666, "learning_rate": 0.0002080607476635514, "loss": 0.669, "step": 1089 }, { "epoch": 1.14, "grad_norm": 3.9268202781677246, "learning_rate": 0.00020794392523364484, "loss": 1.1165, "step": 1090 }, { "epoch": 1.14, "grad_norm": 2.8990890979766846, "learning_rate": 0.00020782710280373831, "loss": 0.6744, "step": 1091 }, { "epoch": 1.14, "grad_norm": 1.999157428741455, "learning_rate": 0.00020771028037383176, "loss": 1.1191, "step": 1092 }, { "epoch": 1.14, "grad_norm": 2.2413084506988525, "learning_rate": 0.0002075934579439252, "loss": 0.8147, "step": 1093 }, { "epoch": 1.14, "grad_norm": 1.5006706714630127, "learning_rate": 0.00020747663551401867, "loss": 0.351, "step": 1094 }, { "epoch": 1.15, "grad_norm": 1.8998149633407593, "learning_rate": 0.00020735981308411212, "loss": 0.5434, "step": 1095 }, { "epoch": 1.15, "grad_norm": 0.9295514822006226, "learning_rate": 0.0002072429906542056, "loss": 0.308, "step": 1096 }, { "epoch": 1.15, "grad_norm": 0.4306630790233612, "learning_rate": 0.00020712616822429904, "loss": 0.1004, "step": 1097 }, { "epoch": 1.15, "grad_norm": 1.194831132888794, "learning_rate": 0.00020700934579439252, "loss": 0.4371, "step": 1098 }, { "epoch": 1.15, "grad_norm": 2.3609731197357178, "learning_rate": 0.00020689252336448597, "loss": 1.1762, "step": 1099 }, { "epoch": 1.15, "grad_norm": 1.4672547578811646, "learning_rate": 0.00020677570093457945, "loss": 1.0477, "step": 1100 }, { "epoch": 1.15, "grad_norm": 1.824455976486206, "learning_rate": 0.00020665887850467287, "loss": 0.5816, "step": 1101 }, { "epoch": 1.15, "grad_norm": 2.7696213722229004, "learning_rate": 0.00020654205607476632, "loss": 1.3223, "step": 1102 }, { "epoch": 1.15, "grad_norm": 0.8341705799102783, "learning_rate": 0.0002064252336448598, "loss": 0.28, "step": 1103 }, { "epoch": 1.15, "grad_norm": 0.5999470353126526, "learning_rate": 0.00020630841121495325, "loss": 0.1999, "step": 1104 }, { "epoch": 1.16, "grad_norm": 3.682039499282837, "learning_rate": 0.00020619158878504672, "loss": 0.6262, "step": 1105 }, { "epoch": 1.16, "grad_norm": 3.5219595432281494, "learning_rate": 0.00020607476635514017, "loss": 1.2373, "step": 1106 }, { "epoch": 1.16, "grad_norm": 0.5517832040786743, "learning_rate": 0.00020595794392523362, "loss": 0.1942, "step": 1107 }, { "epoch": 1.16, "grad_norm": 2.029999017715454, "learning_rate": 0.0002058411214953271, "loss": 0.6479, "step": 1108 }, { "epoch": 1.16, "grad_norm": 3.3053765296936035, "learning_rate": 0.00020572429906542052, "loss": 0.3246, "step": 1109 }, { "epoch": 1.16, "grad_norm": 3.070949077606201, "learning_rate": 0.000205607476635514, "loss": 0.8793, "step": 1110 }, { "epoch": 1.16, "grad_norm": 0.68199223279953, "learning_rate": 0.00020549065420560745, "loss": 0.2444, "step": 1111 }, { "epoch": 1.16, "grad_norm": 0.8713790774345398, "learning_rate": 0.0002053738317757009, "loss": 0.4021, "step": 1112 }, { "epoch": 1.16, "grad_norm": 0.6102296710014343, "learning_rate": 0.00020525700934579438, "loss": 0.224, "step": 1113 }, { "epoch": 1.17, "grad_norm": 2.1383678913116455, "learning_rate": 0.00020514018691588783, "loss": 1.1279, "step": 1114 }, { "epoch": 1.17, "grad_norm": 3.3480324745178223, "learning_rate": 0.0002050233644859813, "loss": 0.7442, "step": 1115 }, { "epoch": 1.17, "grad_norm": 2.174382209777832, "learning_rate": 0.00020490654205607476, "loss": 1.1191, "step": 1116 }, { "epoch": 1.17, "grad_norm": 3.6875200271606445, "learning_rate": 0.00020478971962616818, "loss": 1.2578, "step": 1117 }, { "epoch": 1.17, "grad_norm": 0.9592370986938477, "learning_rate": 0.00020467289719626166, "loss": 0.2917, "step": 1118 }, { "epoch": 1.17, "grad_norm": 2.211015462875366, "learning_rate": 0.0002045560747663551, "loss": 0.4775, "step": 1119 }, { "epoch": 1.17, "grad_norm": 0.5659094452857971, "learning_rate": 0.00020443925233644858, "loss": 0.241, "step": 1120 }, { "epoch": 1.17, "grad_norm": 3.4829139709472656, "learning_rate": 0.00020432242990654203, "loss": 0.7289, "step": 1121 }, { "epoch": 1.17, "grad_norm": 0.6128184199333191, "learning_rate": 0.0002042056074766355, "loss": 0.2248, "step": 1122 }, { "epoch": 1.17, "grad_norm": 3.0875678062438965, "learning_rate": 0.00020408878504672896, "loss": 0.7469, "step": 1123 }, { "epoch": 1.18, "grad_norm": 2.5821774005889893, "learning_rate": 0.00020397196261682244, "loss": 1.7178, "step": 1124 }, { "epoch": 1.18, "grad_norm": 2.0887813568115234, "learning_rate": 0.00020385514018691586, "loss": 0.5692, "step": 1125 }, { "epoch": 1.18, "grad_norm": 5.818206787109375, "learning_rate": 0.0002037383177570093, "loss": 0.7365, "step": 1126 }, { "epoch": 1.18, "grad_norm": 2.984973430633545, "learning_rate": 0.0002036214953271028, "loss": 1.0391, "step": 1127 }, { "epoch": 1.18, "grad_norm": 3.1518402099609375, "learning_rate": 0.00020350467289719624, "loss": 1.0598, "step": 1128 }, { "epoch": 1.18, "grad_norm": 1.685483694076538, "learning_rate": 0.00020338785046728971, "loss": 0.2998, "step": 1129 }, { "epoch": 1.18, "grad_norm": 3.082444429397583, "learning_rate": 0.00020327102803738316, "loss": 1.2295, "step": 1130 }, { "epoch": 1.18, "grad_norm": 2.6049225330352783, "learning_rate": 0.00020315420560747661, "loss": 0.9492, "step": 1131 }, { "epoch": 1.18, "grad_norm": 3.3788952827453613, "learning_rate": 0.0002030373831775701, "loss": 1.1305, "step": 1132 }, { "epoch": 1.19, "grad_norm": 3.523831367492676, "learning_rate": 0.00020292056074766351, "loss": 0.8953, "step": 1133 }, { "epoch": 1.19, "grad_norm": 2.302276134490967, "learning_rate": 0.000202803738317757, "loss": 1.136, "step": 1134 }, { "epoch": 1.19, "grad_norm": 3.4932010173797607, "learning_rate": 0.00020268691588785044, "loss": 1.1149, "step": 1135 }, { "epoch": 1.19, "grad_norm": 0.7620978355407715, "learning_rate": 0.0002025700934579439, "loss": 0.2841, "step": 1136 }, { "epoch": 1.19, "grad_norm": 3.206010103225708, "learning_rate": 0.00020245327102803737, "loss": 1.2515, "step": 1137 }, { "epoch": 1.19, "grad_norm": 2.4083549976348877, "learning_rate": 0.00020233644859813082, "loss": 1.551, "step": 1138 }, { "epoch": 1.19, "grad_norm": 1.192397952079773, "learning_rate": 0.0002022196261682243, "loss": 0.2744, "step": 1139 }, { "epoch": 1.19, "grad_norm": 2.2741410732269287, "learning_rate": 0.00020210280373831775, "loss": 1.2151, "step": 1140 }, { "epoch": 1.19, "grad_norm": 1.7759387493133545, "learning_rate": 0.00020198598130841117, "loss": 0.2124, "step": 1141 }, { "epoch": 1.19, "grad_norm": 2.2041332721710205, "learning_rate": 0.00020186915887850465, "loss": 1.2591, "step": 1142 }, { "epoch": 1.2, "grad_norm": 2.2854342460632324, "learning_rate": 0.0002017523364485981, "loss": 0.6741, "step": 1143 }, { "epoch": 1.2, "grad_norm": 1.1802997589111328, "learning_rate": 0.00020163551401869157, "loss": 0.6383, "step": 1144 }, { "epoch": 1.2, "grad_norm": 2.4487531185150146, "learning_rate": 0.00020151869158878502, "loss": 1.0766, "step": 1145 }, { "epoch": 1.2, "grad_norm": 2.068978786468506, "learning_rate": 0.0002014018691588785, "loss": 1.0623, "step": 1146 }, { "epoch": 1.2, "grad_norm": 2.6643881797790527, "learning_rate": 0.00020128504672897195, "loss": 1.4822, "step": 1147 }, { "epoch": 1.2, "grad_norm": 3.279334306716919, "learning_rate": 0.00020116822429906543, "loss": 0.274, "step": 1148 }, { "epoch": 1.2, "grad_norm": 1.5919523239135742, "learning_rate": 0.00020105140186915885, "loss": 0.4191, "step": 1149 }, { "epoch": 1.2, "grad_norm": 2.5224316120147705, "learning_rate": 0.0002009345794392523, "loss": 1.18, "step": 1150 }, { "epoch": 1.2, "grad_norm": 3.791248321533203, "learning_rate": 0.00020081775700934578, "loss": 1.0462, "step": 1151 }, { "epoch": 1.21, "grad_norm": 0.8050941824913025, "learning_rate": 0.00020070093457943923, "loss": 0.3471, "step": 1152 }, { "epoch": 1.21, "grad_norm": 2.283918619155884, "learning_rate": 0.0002005841121495327, "loss": 1.1178, "step": 1153 }, { "epoch": 1.21, "grad_norm": 3.0043535232543945, "learning_rate": 0.00020046728971962616, "loss": 0.8636, "step": 1154 }, { "epoch": 1.21, "grad_norm": 2.247918128967285, "learning_rate": 0.0002003504672897196, "loss": 1.1561, "step": 1155 }, { "epoch": 1.21, "grad_norm": 3.544013023376465, "learning_rate": 0.00020023364485981308, "loss": 0.7743, "step": 1156 }, { "epoch": 1.21, "grad_norm": 0.5524381399154663, "learning_rate": 0.0002001168224299065, "loss": 0.1143, "step": 1157 }, { "epoch": 1.21, "grad_norm": 2.6980960369110107, "learning_rate": 0.00019999999999999998, "loss": 1.0201, "step": 1158 }, { "epoch": 1.21, "grad_norm": 0.896748423576355, "learning_rate": 0.00019988317757009343, "loss": 0.4344, "step": 1159 }, { "epoch": 1.21, "grad_norm": 1.7616729736328125, "learning_rate": 0.00019976635514018688, "loss": 0.4877, "step": 1160 }, { "epoch": 1.21, "grad_norm": 2.6219723224639893, "learning_rate": 0.00019964953271028036, "loss": 1.5149, "step": 1161 }, { "epoch": 1.22, "grad_norm": 0.9278208613395691, "learning_rate": 0.0001995327102803738, "loss": 0.3392, "step": 1162 }, { "epoch": 1.22, "grad_norm": 3.1442532539367676, "learning_rate": 0.0001994158878504673, "loss": 0.8593, "step": 1163 }, { "epoch": 1.22, "grad_norm": 2.008396863937378, "learning_rate": 0.00019929906542056074, "loss": 0.3437, "step": 1164 }, { "epoch": 1.22, "grad_norm": 0.7166587114334106, "learning_rate": 0.00019918224299065416, "loss": 0.2603, "step": 1165 }, { "epoch": 1.22, "grad_norm": 1.835529088973999, "learning_rate": 0.00019906542056074764, "loss": 0.5484, "step": 1166 }, { "epoch": 1.22, "grad_norm": 1.9410781860351562, "learning_rate": 0.0001989485981308411, "loss": 0.9887, "step": 1167 }, { "epoch": 1.22, "grad_norm": 2.6602988243103027, "learning_rate": 0.00019883177570093456, "loss": 1.1274, "step": 1168 }, { "epoch": 1.22, "grad_norm": 3.1548686027526855, "learning_rate": 0.00019871495327102801, "loss": 1.0373, "step": 1169 }, { "epoch": 1.22, "grad_norm": 2.9196207523345947, "learning_rate": 0.0001985981308411215, "loss": 1.0006, "step": 1170 }, { "epoch": 1.22, "grad_norm": 3.27177095413208, "learning_rate": 0.00019848130841121494, "loss": 0.5959, "step": 1171 }, { "epoch": 1.23, "grad_norm": 2.277813196182251, "learning_rate": 0.00019836448598130842, "loss": 0.7807, "step": 1172 }, { "epoch": 1.23, "grad_norm": 2.142241954803467, "learning_rate": 0.00019824766355140184, "loss": 0.8082, "step": 1173 }, { "epoch": 1.23, "grad_norm": 2.313734292984009, "learning_rate": 0.0001981308411214953, "loss": 1.6829, "step": 1174 }, { "epoch": 1.23, "grad_norm": 1.6561822891235352, "learning_rate": 0.00019801401869158877, "loss": 0.5527, "step": 1175 }, { "epoch": 1.23, "grad_norm": 3.4537465572357178, "learning_rate": 0.00019789719626168222, "loss": 0.3977, "step": 1176 }, { "epoch": 1.23, "grad_norm": 2.100102663040161, "learning_rate": 0.0001977803738317757, "loss": 1.1519, "step": 1177 }, { "epoch": 1.23, "grad_norm": 0.8067005276679993, "learning_rate": 0.00019766355140186915, "loss": 0.292, "step": 1178 }, { "epoch": 1.23, "grad_norm": 2.6419880390167236, "learning_rate": 0.0001975467289719626, "loss": 0.8597, "step": 1179 }, { "epoch": 1.23, "grad_norm": 2.25374174118042, "learning_rate": 0.00019742990654205607, "loss": 1.153, "step": 1180 }, { "epoch": 1.24, "grad_norm": 1.927171230316162, "learning_rate": 0.0001973130841121495, "loss": 0.7577, "step": 1181 }, { "epoch": 1.24, "grad_norm": 1.136739730834961, "learning_rate": 0.00019719626168224297, "loss": 0.434, "step": 1182 }, { "epoch": 1.24, "grad_norm": 1.9915034770965576, "learning_rate": 0.00019707943925233642, "loss": 0.5844, "step": 1183 }, { "epoch": 1.24, "grad_norm": 0.9362226128578186, "learning_rate": 0.00019696261682242987, "loss": 0.2859, "step": 1184 }, { "epoch": 1.24, "grad_norm": 2.459658622741699, "learning_rate": 0.00019684579439252335, "loss": 0.4476, "step": 1185 }, { "epoch": 1.24, "grad_norm": 2.027477502822876, "learning_rate": 0.0001967289719626168, "loss": 0.6242, "step": 1186 }, { "epoch": 1.24, "grad_norm": 1.1415551900863647, "learning_rate": 0.00019661214953271028, "loss": 0.4766, "step": 1187 }, { "epoch": 1.24, "grad_norm": 1.7337886095046997, "learning_rate": 0.00019649532710280373, "loss": 1.1372, "step": 1188 }, { "epoch": 1.24, "grad_norm": 3.035874366760254, "learning_rate": 0.00019637850467289715, "loss": 0.9431, "step": 1189 }, { "epoch": 1.24, "grad_norm": 2.2807509899139404, "learning_rate": 0.00019626168224299063, "loss": 1.0572, "step": 1190 }, { "epoch": 1.25, "grad_norm": 2.0541138648986816, "learning_rate": 0.00019614485981308408, "loss": 0.9536, "step": 1191 }, { "epoch": 1.25, "grad_norm": 0.9543030261993408, "learning_rate": 0.00019602803738317755, "loss": 0.1971, "step": 1192 }, { "epoch": 1.25, "grad_norm": 1.7279877662658691, "learning_rate": 0.000195911214953271, "loss": 0.4557, "step": 1193 }, { "epoch": 1.25, "grad_norm": 1.7344752550125122, "learning_rate": 0.00019579439252336448, "loss": 0.2517, "step": 1194 }, { "epoch": 1.25, "grad_norm": 1.9353537559509277, "learning_rate": 0.00019567757009345793, "loss": 0.6948, "step": 1195 }, { "epoch": 1.25, "grad_norm": 2.699579954147339, "learning_rate": 0.0001955607476635514, "loss": 1.4253, "step": 1196 }, { "epoch": 1.25, "grad_norm": 3.5536420345306396, "learning_rate": 0.00019544392523364483, "loss": 0.7727, "step": 1197 }, { "epoch": 1.25, "grad_norm": 1.5282950401306152, "learning_rate": 0.00019532710280373828, "loss": 1.0087, "step": 1198 }, { "epoch": 1.25, "grad_norm": 1.729099154472351, "learning_rate": 0.00019521028037383176, "loss": 0.5094, "step": 1199 }, { "epoch": 1.26, "grad_norm": 3.1926229000091553, "learning_rate": 0.0001950934579439252, "loss": 1.0685, "step": 1200 }, { "epoch": 1.26, "eval_loss": 1.1658847332000732, "eval_runtime": 1.0094, "eval_samples_per_second": 4.954, "eval_steps_per_second": 0.991, "step": 1200 }, { "epoch": 1.26, "grad_norm": 0.7322415113449097, "learning_rate": 0.0001949766355140187, "loss": 0.2591, "step": 1201 }, { "epoch": 1.26, "grad_norm": 0.7986508011817932, "learning_rate": 0.00019485981308411214, "loss": 0.2158, "step": 1202 }, { "epoch": 1.26, "grad_norm": 2.1974411010742188, "learning_rate": 0.0001947429906542056, "loss": 0.5025, "step": 1203 }, { "epoch": 1.26, "grad_norm": 2.495664358139038, "learning_rate": 0.00019462616822429906, "loss": 0.6207, "step": 1204 }, { "epoch": 1.26, "grad_norm": 1.8991073369979858, "learning_rate": 0.0001945093457943925, "loss": 0.6394, "step": 1205 }, { "epoch": 1.26, "grad_norm": 1.0878992080688477, "learning_rate": 0.00019439252336448596, "loss": 0.4923, "step": 1206 }, { "epoch": 1.26, "grad_norm": 2.0958821773529053, "learning_rate": 0.00019427570093457941, "loss": 1.186, "step": 1207 }, { "epoch": 1.26, "grad_norm": 4.290122032165527, "learning_rate": 0.00019415887850467286, "loss": 1.005, "step": 1208 }, { "epoch": 1.26, "grad_norm": 3.115603446960449, "learning_rate": 0.00019404205607476634, "loss": 0.8839, "step": 1209 }, { "epoch": 1.27, "grad_norm": 2.7535462379455566, "learning_rate": 0.0001939252336448598, "loss": 0.419, "step": 1210 }, { "epoch": 1.27, "grad_norm": 4.466191291809082, "learning_rate": 0.00019380841121495327, "loss": 1.1126, "step": 1211 }, { "epoch": 1.27, "grad_norm": 1.9874646663665771, "learning_rate": 0.00019369158878504672, "loss": 0.8667, "step": 1212 }, { "epoch": 1.27, "grad_norm": 2.7608489990234375, "learning_rate": 0.00019357476635514014, "loss": 1.2938, "step": 1213 }, { "epoch": 1.27, "grad_norm": 0.9021081328392029, "learning_rate": 0.00019345794392523362, "loss": 0.4965, "step": 1214 }, { "epoch": 1.27, "grad_norm": 1.2415839433670044, "learning_rate": 0.00019334112149532707, "loss": 0.5533, "step": 1215 }, { "epoch": 1.27, "grad_norm": 0.8637053370475769, "learning_rate": 0.00019322429906542055, "loss": 0.3898, "step": 1216 }, { "epoch": 1.27, "grad_norm": 1.9663678407669067, "learning_rate": 0.000193107476635514, "loss": 1.0766, "step": 1217 }, { "epoch": 1.27, "grad_norm": 0.6900842189788818, "learning_rate": 0.00019299065420560747, "loss": 0.2224, "step": 1218 }, { "epoch": 1.28, "grad_norm": 0.5902005434036255, "learning_rate": 0.00019287383177570092, "loss": 0.1778, "step": 1219 }, { "epoch": 1.28, "grad_norm": 2.3478646278381348, "learning_rate": 0.0001927570093457944, "loss": 1.0055, "step": 1220 }, { "epoch": 1.28, "grad_norm": 0.5951477289199829, "learning_rate": 0.00019264018691588782, "loss": 0.2086, "step": 1221 }, { "epoch": 1.28, "grad_norm": 1.806227684020996, "learning_rate": 0.00019252336448598127, "loss": 0.6624, "step": 1222 }, { "epoch": 1.28, "grad_norm": 1.736450433731079, "learning_rate": 0.00019240654205607475, "loss": 1.4586, "step": 1223 }, { "epoch": 1.28, "grad_norm": 2.0015785694122314, "learning_rate": 0.0001922897196261682, "loss": 0.7516, "step": 1224 }, { "epoch": 1.28, "grad_norm": 2.632293701171875, "learning_rate": 0.00019217289719626168, "loss": 0.6397, "step": 1225 }, { "epoch": 1.28, "grad_norm": 2.11899471282959, "learning_rate": 0.00019205607476635513, "loss": 1.7155, "step": 1226 }, { "epoch": 1.28, "grad_norm": 2.5453994274139404, "learning_rate": 0.00019193925233644858, "loss": 0.5191, "step": 1227 }, { "epoch": 1.28, "grad_norm": 2.255399703979492, "learning_rate": 0.00019182242990654205, "loss": 0.6001, "step": 1228 }, { "epoch": 1.29, "grad_norm": 2.749321937561035, "learning_rate": 0.00019170560747663548, "loss": 0.7474, "step": 1229 }, { "epoch": 1.29, "grad_norm": 2.781606912612915, "learning_rate": 0.00019158878504672895, "loss": 0.7796, "step": 1230 }, { "epoch": 1.29, "grad_norm": 2.328237533569336, "learning_rate": 0.0001914719626168224, "loss": 1.1671, "step": 1231 }, { "epoch": 1.29, "grad_norm": 2.4334988594055176, "learning_rate": 0.00019135514018691585, "loss": 0.6105, "step": 1232 }, { "epoch": 1.29, "grad_norm": 1.9006870985031128, "learning_rate": 0.00019123831775700933, "loss": 0.4648, "step": 1233 }, { "epoch": 1.29, "grad_norm": 2.9073917865753174, "learning_rate": 0.00019112149532710278, "loss": 1.2155, "step": 1234 }, { "epoch": 1.29, "grad_norm": 2.242873430252075, "learning_rate": 0.00019100467289719626, "loss": 0.6119, "step": 1235 }, { "epoch": 1.29, "grad_norm": 1.9779205322265625, "learning_rate": 0.0001908878504672897, "loss": 0.2464, "step": 1236 }, { "epoch": 1.29, "grad_norm": 2.8669989109039307, "learning_rate": 0.00019077102803738313, "loss": 0.7974, "step": 1237 }, { "epoch": 1.29, "grad_norm": 1.161089539527893, "learning_rate": 0.0001906542056074766, "loss": 0.1048, "step": 1238 }, { "epoch": 1.3, "grad_norm": 4.102783679962158, "learning_rate": 0.00019053738317757006, "loss": 0.9183, "step": 1239 }, { "epoch": 1.3, "grad_norm": 2.924055814743042, "learning_rate": 0.00019042056074766354, "loss": 1.4965, "step": 1240 }, { "epoch": 1.3, "grad_norm": 2.143636465072632, "learning_rate": 0.00019030373831775699, "loss": 0.2837, "step": 1241 }, { "epoch": 1.3, "grad_norm": 1.9068852663040161, "learning_rate": 0.00019018691588785046, "loss": 0.9671, "step": 1242 }, { "epoch": 1.3, "grad_norm": 1.4567501544952393, "learning_rate": 0.0001900700934579439, "loss": 0.3306, "step": 1243 }, { "epoch": 1.3, "grad_norm": 2.6074962615966797, "learning_rate": 0.0001899532710280374, "loss": 1.601, "step": 1244 }, { "epoch": 1.3, "grad_norm": 2.3963537216186523, "learning_rate": 0.0001898364485981308, "loss": 1.1746, "step": 1245 }, { "epoch": 1.3, "grad_norm": 1.6171189546585083, "learning_rate": 0.00018971962616822426, "loss": 0.349, "step": 1246 }, { "epoch": 1.3, "grad_norm": 2.6384449005126953, "learning_rate": 0.00018960280373831774, "loss": 0.8075, "step": 1247 }, { "epoch": 1.31, "grad_norm": 2.3255858421325684, "learning_rate": 0.0001894859813084112, "loss": 1.1681, "step": 1248 }, { "epoch": 1.31, "grad_norm": 0.8400274515151978, "learning_rate": 0.00018936915887850467, "loss": 0.3144, "step": 1249 }, { "epoch": 1.31, "grad_norm": 2.320280075073242, "learning_rate": 0.00018925233644859812, "loss": 0.6442, "step": 1250 }, { "epoch": 1.31, "grad_norm": 0.9876735210418701, "learning_rate": 0.00018913551401869157, "loss": 0.1542, "step": 1251 }, { "epoch": 1.31, "grad_norm": 2.2251205444335938, "learning_rate": 0.00018901869158878504, "loss": 0.9543, "step": 1252 }, { "epoch": 1.31, "grad_norm": 1.0860331058502197, "learning_rate": 0.00018890186915887847, "loss": 0.6192, "step": 1253 }, { "epoch": 1.31, "grad_norm": 4.4009881019592285, "learning_rate": 0.00018878504672897195, "loss": 0.9997, "step": 1254 }, { "epoch": 1.31, "grad_norm": 2.2344605922698975, "learning_rate": 0.0001886682242990654, "loss": 0.5521, "step": 1255 }, { "epoch": 1.31, "grad_norm": 2.8071048259735107, "learning_rate": 0.00018855140186915885, "loss": 0.3418, "step": 1256 }, { "epoch": 1.31, "grad_norm": 2.5023577213287354, "learning_rate": 0.00018843457943925232, "loss": 0.3715, "step": 1257 }, { "epoch": 1.32, "grad_norm": 2.644026517868042, "learning_rate": 0.00018831775700934577, "loss": 0.525, "step": 1258 }, { "epoch": 1.32, "grad_norm": 1.5025291442871094, "learning_rate": 0.00018820093457943925, "loss": 1.0208, "step": 1259 }, { "epoch": 1.32, "grad_norm": 1.4263161420822144, "learning_rate": 0.0001880841121495327, "loss": 0.3544, "step": 1260 }, { "epoch": 1.32, "grad_norm": 1.8181207180023193, "learning_rate": 0.00018796728971962612, "loss": 0.5881, "step": 1261 }, { "epoch": 1.32, "grad_norm": 2.331484079360962, "learning_rate": 0.0001878504672897196, "loss": 0.4943, "step": 1262 }, { "epoch": 1.32, "grad_norm": 1.104075312614441, "learning_rate": 0.00018773364485981305, "loss": 0.225, "step": 1263 }, { "epoch": 1.32, "grad_norm": 0.39052748680114746, "learning_rate": 0.00018761682242990653, "loss": 0.1284, "step": 1264 }, { "epoch": 1.32, "grad_norm": 1.3384759426116943, "learning_rate": 0.00018749999999999998, "loss": 0.291, "step": 1265 }, { "epoch": 1.32, "grad_norm": 1.8362799882888794, "learning_rate": 0.00018738317757009345, "loss": 0.7755, "step": 1266 }, { "epoch": 1.33, "grad_norm": 2.271969795227051, "learning_rate": 0.0001872663551401869, "loss": 0.377, "step": 1267 }, { "epoch": 1.33, "grad_norm": 1.036214828491211, "learning_rate": 0.00018714953271028038, "loss": 0.5356, "step": 1268 }, { "epoch": 1.33, "grad_norm": 4.588119029998779, "learning_rate": 0.00018703271028037383, "loss": 1.0096, "step": 1269 }, { "epoch": 1.33, "grad_norm": 1.5631695985794067, "learning_rate": 0.00018691588785046725, "loss": 0.3514, "step": 1270 }, { "epoch": 1.33, "grad_norm": 2.6622061729431152, "learning_rate": 0.00018679906542056073, "loss": 0.8358, "step": 1271 }, { "epoch": 1.33, "grad_norm": 4.314937114715576, "learning_rate": 0.00018668224299065418, "loss": 0.9017, "step": 1272 }, { "epoch": 1.33, "grad_norm": 2.548337459564209, "learning_rate": 0.00018656542056074766, "loss": 1.1176, "step": 1273 }, { "epoch": 1.33, "grad_norm": 2.879739284515381, "learning_rate": 0.0001864485981308411, "loss": 0.7511, "step": 1274 }, { "epoch": 1.33, "grad_norm": 2.590949535369873, "learning_rate": 0.00018633177570093456, "loss": 0.8095, "step": 1275 }, { "epoch": 1.33, "grad_norm": 2.957411766052246, "learning_rate": 0.00018621495327102804, "loss": 0.7659, "step": 1276 }, { "epoch": 1.34, "grad_norm": 2.2279860973358154, "learning_rate": 0.00018609813084112149, "loss": 0.9038, "step": 1277 }, { "epoch": 1.34, "grad_norm": 0.5968736410140991, "learning_rate": 0.00018598130841121494, "loss": 0.1685, "step": 1278 }, { "epoch": 1.34, "grad_norm": 3.183213472366333, "learning_rate": 0.00018586448598130839, "loss": 1.0396, "step": 1279 }, { "epoch": 1.34, "grad_norm": 1.9355374574661255, "learning_rate": 0.00018574766355140184, "loss": 0.5406, "step": 1280 }, { "epoch": 1.34, "grad_norm": 5.673951625823975, "learning_rate": 0.0001856308411214953, "loss": 1.2144, "step": 1281 }, { "epoch": 1.34, "grad_norm": 3.6509315967559814, "learning_rate": 0.00018551401869158876, "loss": 1.406, "step": 1282 }, { "epoch": 1.34, "grad_norm": 0.6314842700958252, "learning_rate": 0.00018539719626168224, "loss": 0.2197, "step": 1283 }, { "epoch": 1.34, "grad_norm": 2.440687894821167, "learning_rate": 0.0001852803738317757, "loss": 0.7316, "step": 1284 }, { "epoch": 1.34, "grad_norm": 2.5698628425598145, "learning_rate": 0.00018516355140186917, "loss": 0.4897, "step": 1285 }, { "epoch": 1.35, "grad_norm": 2.361003875732422, "learning_rate": 0.0001850467289719626, "loss": 0.7666, "step": 1286 }, { "epoch": 1.35, "grad_norm": 3.423429489135742, "learning_rate": 0.00018492990654205604, "loss": 0.7807, "step": 1287 }, { "epoch": 1.35, "grad_norm": 3.4809789657592773, "learning_rate": 0.00018481308411214952, "loss": 0.5707, "step": 1288 }, { "epoch": 1.35, "grad_norm": 2.4859747886657715, "learning_rate": 0.00018469626168224297, "loss": 1.6028, "step": 1289 }, { "epoch": 1.35, "grad_norm": 1.108879566192627, "learning_rate": 0.00018457943925233644, "loss": 0.545, "step": 1290 }, { "epoch": 1.35, "grad_norm": 2.270004987716675, "learning_rate": 0.0001844626168224299, "loss": 0.6226, "step": 1291 }, { "epoch": 1.35, "grad_norm": 2.700712203979492, "learning_rate": 0.00018434579439252337, "loss": 0.8702, "step": 1292 }, { "epoch": 1.35, "grad_norm": 2.3957457542419434, "learning_rate": 0.00018422897196261682, "loss": 1.3282, "step": 1293 }, { "epoch": 1.35, "grad_norm": 2.5982956886291504, "learning_rate": 0.00018411214953271024, "loss": 0.6065, "step": 1294 }, { "epoch": 1.35, "grad_norm": 1.1615874767303467, "learning_rate": 0.00018399532710280372, "loss": 0.6769, "step": 1295 }, { "epoch": 1.36, "grad_norm": 2.294102191925049, "learning_rate": 0.00018387850467289717, "loss": 0.4803, "step": 1296 }, { "epoch": 1.36, "grad_norm": 2.3875255584716797, "learning_rate": 0.00018376168224299065, "loss": 0.7764, "step": 1297 }, { "epoch": 1.36, "grad_norm": 2.732203483581543, "learning_rate": 0.0001836448598130841, "loss": 0.768, "step": 1298 }, { "epoch": 1.36, "grad_norm": 1.0977386236190796, "learning_rate": 0.00018352803738317755, "loss": 0.2692, "step": 1299 }, { "epoch": 1.36, "grad_norm": 0.6086471676826477, "learning_rate": 0.00018341121495327103, "loss": 0.1623, "step": 1300 }, { "epoch": 1.36, "grad_norm": 2.480614185333252, "learning_rate": 0.00018329439252336448, "loss": 1.3882, "step": 1301 }, { "epoch": 1.36, "grad_norm": 2.656018018722534, "learning_rate": 0.00018317757009345793, "loss": 0.8735, "step": 1302 }, { "epoch": 1.36, "grad_norm": 3.2506661415100098, "learning_rate": 0.00018306074766355138, "loss": 0.8242, "step": 1303 }, { "epoch": 1.36, "grad_norm": 1.9476683139801025, "learning_rate": 0.00018294392523364483, "loss": 0.8056, "step": 1304 }, { "epoch": 1.37, "grad_norm": 3.344555616378784, "learning_rate": 0.0001828271028037383, "loss": 0.9017, "step": 1305 }, { "epoch": 1.37, "grad_norm": 2.0389673709869385, "learning_rate": 0.00018271028037383175, "loss": 0.9627, "step": 1306 }, { "epoch": 1.37, "grad_norm": 2.904149055480957, "learning_rate": 0.00018259345794392523, "loss": 0.9032, "step": 1307 }, { "epoch": 1.37, "grad_norm": 2.444079875946045, "learning_rate": 0.00018247663551401868, "loss": 1.5189, "step": 1308 }, { "epoch": 1.37, "grad_norm": 4.824917316436768, "learning_rate": 0.00018235981308411216, "loss": 1.1401, "step": 1309 }, { "epoch": 1.37, "grad_norm": 2.3599419593811035, "learning_rate": 0.00018224299065420558, "loss": 0.3766, "step": 1310 }, { "epoch": 1.37, "grad_norm": 2.9183833599090576, "learning_rate": 0.00018212616822429903, "loss": 0.5002, "step": 1311 }, { "epoch": 1.37, "grad_norm": 2.920369863510132, "learning_rate": 0.0001820093457943925, "loss": 0.6568, "step": 1312 }, { "epoch": 1.37, "grad_norm": 3.70578932762146, "learning_rate": 0.00018189252336448596, "loss": 1.8708, "step": 1313 }, { "epoch": 1.37, "grad_norm": 4.370184898376465, "learning_rate": 0.00018177570093457944, "loss": 1.1221, "step": 1314 }, { "epoch": 1.38, "grad_norm": 2.540465831756592, "learning_rate": 0.00018165887850467289, "loss": 1.241, "step": 1315 }, { "epoch": 1.38, "grad_norm": 2.349395990371704, "learning_rate": 0.00018154205607476636, "loss": 0.754, "step": 1316 }, { "epoch": 1.38, "grad_norm": 1.9466333389282227, "learning_rate": 0.0001814252336448598, "loss": 0.5155, "step": 1317 }, { "epoch": 1.38, "grad_norm": 1.9178152084350586, "learning_rate": 0.00018130841121495324, "loss": 0.8991, "step": 1318 }, { "epoch": 1.38, "grad_norm": 2.0130553245544434, "learning_rate": 0.0001811915887850467, "loss": 1.0502, "step": 1319 }, { "epoch": 1.38, "grad_norm": 1.6518648862838745, "learning_rate": 0.00018107476635514016, "loss": 0.3683, "step": 1320 }, { "epoch": 1.38, "grad_norm": 1.7037773132324219, "learning_rate": 0.00018095794392523364, "loss": 0.6605, "step": 1321 }, { "epoch": 1.38, "grad_norm": 2.6347455978393555, "learning_rate": 0.0001808411214953271, "loss": 0.5213, "step": 1322 }, { "epoch": 1.38, "grad_norm": 3.909637451171875, "learning_rate": 0.00018072429906542054, "loss": 1.0715, "step": 1323 }, { "epoch": 1.38, "grad_norm": 1.7166997194290161, "learning_rate": 0.00018060747663551402, "loss": 0.4536, "step": 1324 }, { "epoch": 1.39, "grad_norm": 1.9127898216247559, "learning_rate": 0.00018049065420560747, "loss": 0.5231, "step": 1325 }, { "epoch": 1.39, "grad_norm": 2.906681537628174, "learning_rate": 0.00018037383177570092, "loss": 0.8225, "step": 1326 }, { "epoch": 1.39, "grad_norm": 2.234952926635742, "learning_rate": 0.00018025700934579437, "loss": 0.9737, "step": 1327 }, { "epoch": 1.39, "grad_norm": 2.5723302364349365, "learning_rate": 0.00018014018691588782, "loss": 1.3425, "step": 1328 }, { "epoch": 1.39, "grad_norm": 2.9902420043945312, "learning_rate": 0.0001800233644859813, "loss": 1.6166, "step": 1329 }, { "epoch": 1.39, "grad_norm": 2.5610482692718506, "learning_rate": 0.00017990654205607474, "loss": 0.6638, "step": 1330 }, { "epoch": 1.39, "grad_norm": 2.5049209594726562, "learning_rate": 0.00017978971962616822, "loss": 0.6624, "step": 1331 }, { "epoch": 1.39, "grad_norm": 1.2319400310516357, "learning_rate": 0.00017967289719626167, "loss": 0.6184, "step": 1332 }, { "epoch": 1.39, "grad_norm": 1.0322645902633667, "learning_rate": 0.00017955607476635515, "loss": 0.3184, "step": 1333 }, { "epoch": 1.4, "grad_norm": 2.68593430519104, "learning_rate": 0.00017943925233644857, "loss": 1.3337, "step": 1334 }, { "epoch": 1.4, "grad_norm": 3.216642141342163, "learning_rate": 0.00017932242990654202, "loss": 1.3633, "step": 1335 }, { "epoch": 1.4, "grad_norm": 1.0757498741149902, "learning_rate": 0.0001792056074766355, "loss": 0.5981, "step": 1336 }, { "epoch": 1.4, "grad_norm": 2.787240743637085, "learning_rate": 0.00017908878504672895, "loss": 0.8051, "step": 1337 }, { "epoch": 1.4, "grad_norm": 4.88184928894043, "learning_rate": 0.00017897196261682243, "loss": 1.4034, "step": 1338 }, { "epoch": 1.4, "grad_norm": 0.5093235969543457, "learning_rate": 0.00017885514018691588, "loss": 0.151, "step": 1339 }, { "epoch": 1.4, "grad_norm": 0.8691405653953552, "learning_rate": 0.00017873831775700935, "loss": 0.3816, "step": 1340 }, { "epoch": 1.4, "grad_norm": 2.2686407566070557, "learning_rate": 0.0001786214953271028, "loss": 0.403, "step": 1341 }, { "epoch": 1.4, "grad_norm": 3.1247494220733643, "learning_rate": 0.00017850467289719623, "loss": 1.461, "step": 1342 }, { "epoch": 1.4, "grad_norm": 2.5943002700805664, "learning_rate": 0.0001783878504672897, "loss": 0.8829, "step": 1343 }, { "epoch": 1.41, "grad_norm": 1.44015634059906, "learning_rate": 0.00017827102803738315, "loss": 0.2918, "step": 1344 }, { "epoch": 1.41, "grad_norm": 1.8427207469940186, "learning_rate": 0.00017815420560747663, "loss": 0.5179, "step": 1345 }, { "epoch": 1.41, "grad_norm": 2.8338239192962646, "learning_rate": 0.00017803738317757008, "loss": 0.7246, "step": 1346 }, { "epoch": 1.41, "grad_norm": 0.8842005133628845, "learning_rate": 0.00017792056074766353, "loss": 0.3522, "step": 1347 }, { "epoch": 1.41, "grad_norm": 2.3126587867736816, "learning_rate": 0.000177803738317757, "loss": 1.291, "step": 1348 }, { "epoch": 1.41, "grad_norm": 0.49202239513397217, "learning_rate": 0.00017768691588785046, "loss": 0.1394, "step": 1349 }, { "epoch": 1.41, "grad_norm": 2.2132513523101807, "learning_rate": 0.0001775700934579439, "loss": 1.1189, "step": 1350 }, { "epoch": 1.41, "grad_norm": 0.8676378726959229, "learning_rate": 0.00017745327102803736, "loss": 0.3637, "step": 1351 }, { "epoch": 1.41, "grad_norm": 2.5113818645477295, "learning_rate": 0.0001773364485981308, "loss": 0.701, "step": 1352 }, { "epoch": 1.42, "grad_norm": 0.5786195397377014, "learning_rate": 0.00017721962616822428, "loss": 0.1862, "step": 1353 }, { "epoch": 1.42, "grad_norm": 2.432063341140747, "learning_rate": 0.00017710280373831773, "loss": 0.3464, "step": 1354 }, { "epoch": 1.42, "grad_norm": 3.4058117866516113, "learning_rate": 0.0001769859813084112, "loss": 0.6988, "step": 1355 }, { "epoch": 1.42, "grad_norm": 1.8814724683761597, "learning_rate": 0.00017686915887850466, "loss": 0.5569, "step": 1356 }, { "epoch": 1.42, "grad_norm": 2.372424364089966, "learning_rate": 0.00017675233644859814, "loss": 0.8539, "step": 1357 }, { "epoch": 1.42, "grad_norm": 0.3942906856536865, "learning_rate": 0.00017663551401869156, "loss": 0.086, "step": 1358 }, { "epoch": 1.42, "grad_norm": 4.250904083251953, "learning_rate": 0.000176518691588785, "loss": 0.9614, "step": 1359 }, { "epoch": 1.42, "grad_norm": 1.135056972503662, "learning_rate": 0.0001764018691588785, "loss": 0.516, "step": 1360 }, { "epoch": 1.42, "grad_norm": 0.6572325825691223, "learning_rate": 0.00017628504672897194, "loss": 0.1664, "step": 1361 }, { "epoch": 1.42, "grad_norm": 2.5226712226867676, "learning_rate": 0.00017616822429906542, "loss": 0.893, "step": 1362 }, { "epoch": 1.43, "grad_norm": 2.456709623336792, "learning_rate": 0.00017605140186915887, "loss": 0.8222, "step": 1363 }, { "epoch": 1.43, "grad_norm": 0.7911829352378845, "learning_rate": 0.00017593457943925234, "loss": 0.3, "step": 1364 }, { "epoch": 1.43, "grad_norm": 3.0613296031951904, "learning_rate": 0.0001758177570093458, "loss": 1.4222, "step": 1365 }, { "epoch": 1.43, "grad_norm": 2.1822903156280518, "learning_rate": 0.00017570093457943922, "loss": 0.4439, "step": 1366 }, { "epoch": 1.43, "grad_norm": 2.6135878562927246, "learning_rate": 0.0001755841121495327, "loss": 0.2818, "step": 1367 }, { "epoch": 1.43, "grad_norm": 3.2960660457611084, "learning_rate": 0.00017546728971962614, "loss": 0.5994, "step": 1368 }, { "epoch": 1.43, "grad_norm": 0.9349135160446167, "learning_rate": 0.00017535046728971962, "loss": 0.3976, "step": 1369 }, { "epoch": 1.43, "grad_norm": 2.733487129211426, "learning_rate": 0.00017523364485981307, "loss": 0.8686, "step": 1370 }, { "epoch": 1.43, "grad_norm": 2.251721143722534, "learning_rate": 0.00017511682242990652, "loss": 1.0455, "step": 1371 }, { "epoch": 1.44, "grad_norm": 2.2353663444519043, "learning_rate": 0.000175, "loss": 0.9254, "step": 1372 }, { "epoch": 1.44, "grad_norm": 4.86463737487793, "learning_rate": 0.00017488317757009345, "loss": 1.1722, "step": 1373 }, { "epoch": 1.44, "grad_norm": 0.4062563478946686, "learning_rate": 0.0001747663551401869, "loss": 0.07, "step": 1374 }, { "epoch": 1.44, "grad_norm": 4.017411708831787, "learning_rate": 0.00017464953271028035, "loss": 0.6862, "step": 1375 }, { "epoch": 1.44, "grad_norm": 3.2097668647766113, "learning_rate": 0.0001745327102803738, "loss": 0.6583, "step": 1376 }, { "epoch": 1.44, "grad_norm": 3.701545238494873, "learning_rate": 0.00017441588785046728, "loss": 1.1785, "step": 1377 }, { "epoch": 1.44, "grad_norm": 2.956078290939331, "learning_rate": 0.00017429906542056073, "loss": 0.7798, "step": 1378 }, { "epoch": 1.44, "grad_norm": 4.492142677307129, "learning_rate": 0.0001741822429906542, "loss": 1.1939, "step": 1379 }, { "epoch": 1.44, "grad_norm": 3.10951828956604, "learning_rate": 0.00017406542056074765, "loss": 0.6798, "step": 1380 }, { "epoch": 1.44, "grad_norm": 3.4178178310394287, "learning_rate": 0.00017394859813084113, "loss": 1.7696, "step": 1381 }, { "epoch": 1.45, "grad_norm": 2.3063645362854004, "learning_rate": 0.00017383177570093455, "loss": 0.6285, "step": 1382 }, { "epoch": 1.45, "grad_norm": 2.3473939895629883, "learning_rate": 0.000173714953271028, "loss": 0.9029, "step": 1383 }, { "epoch": 1.45, "grad_norm": 0.47164860367774963, "learning_rate": 0.00017359813084112148, "loss": 0.1249, "step": 1384 }, { "epoch": 1.45, "grad_norm": 2.301178216934204, "learning_rate": 0.00017348130841121493, "loss": 1.1899, "step": 1385 }, { "epoch": 1.45, "grad_norm": 0.9900512099266052, "learning_rate": 0.0001733644859813084, "loss": 0.4836, "step": 1386 }, { "epoch": 1.45, "grad_norm": 0.7707881927490234, "learning_rate": 0.00017324766355140186, "loss": 0.3347, "step": 1387 }, { "epoch": 1.45, "grad_norm": 2.288416862487793, "learning_rate": 0.00017313084112149533, "loss": 1.1194, "step": 1388 }, { "epoch": 1.45, "grad_norm": 1.8858298063278198, "learning_rate": 0.00017301401869158878, "loss": 0.4368, "step": 1389 }, { "epoch": 1.45, "grad_norm": 2.7885446548461914, "learning_rate": 0.0001728971962616822, "loss": 1.2908, "step": 1390 }, { "epoch": 1.46, "grad_norm": 3.9828786849975586, "learning_rate": 0.00017278037383177568, "loss": 0.9096, "step": 1391 }, { "epoch": 1.46, "grad_norm": 2.8363916873931885, "learning_rate": 0.00017266355140186913, "loss": 1.0472, "step": 1392 }, { "epoch": 1.46, "grad_norm": 2.291325092315674, "learning_rate": 0.0001725467289719626, "loss": 0.6374, "step": 1393 }, { "epoch": 1.46, "grad_norm": 2.0536561012268066, "learning_rate": 0.00017242990654205606, "loss": 0.5722, "step": 1394 }, { "epoch": 1.46, "grad_norm": 1.9046696424484253, "learning_rate": 0.0001723130841121495, "loss": 0.6212, "step": 1395 }, { "epoch": 1.46, "grad_norm": 3.9934444427490234, "learning_rate": 0.000172196261682243, "loss": 0.7917, "step": 1396 }, { "epoch": 1.46, "grad_norm": 0.6799383163452148, "learning_rate": 0.00017207943925233644, "loss": 0.3291, "step": 1397 }, { "epoch": 1.46, "grad_norm": 0.4810992181301117, "learning_rate": 0.0001719626168224299, "loss": 0.174, "step": 1398 }, { "epoch": 1.46, "grad_norm": 2.9290904998779297, "learning_rate": 0.00017184579439252334, "loss": 1.0489, "step": 1399 }, { "epoch": 1.46, "grad_norm": 1.0876963138580322, "learning_rate": 0.0001717289719626168, "loss": 0.5794, "step": 1400 }, { "epoch": 1.47, "grad_norm": 0.6646468043327332, "learning_rate": 0.00017161214953271027, "loss": 0.3333, "step": 1401 }, { "epoch": 1.47, "grad_norm": 1.0397571325302124, "learning_rate": 0.00017149532710280372, "loss": 0.5403, "step": 1402 }, { "epoch": 1.47, "grad_norm": 3.3683550357818604, "learning_rate": 0.0001713785046728972, "loss": 0.9768, "step": 1403 }, { "epoch": 1.47, "grad_norm": 1.7085354328155518, "learning_rate": 0.00017126168224299064, "loss": 0.8301, "step": 1404 }, { "epoch": 1.47, "grad_norm": 0.9951138496398926, "learning_rate": 0.00017114485981308412, "loss": 0.5877, "step": 1405 }, { "epoch": 1.47, "grad_norm": 2.1947481632232666, "learning_rate": 0.00017102803738317754, "loss": 1.3012, "step": 1406 }, { "epoch": 1.47, "grad_norm": 2.249422311782837, "learning_rate": 0.000170911214953271, "loss": 0.5428, "step": 1407 }, { "epoch": 1.47, "grad_norm": 2.176011562347412, "learning_rate": 0.00017079439252336447, "loss": 0.6749, "step": 1408 }, { "epoch": 1.47, "grad_norm": 2.369123935699463, "learning_rate": 0.00017067757009345792, "loss": 0.8806, "step": 1409 }, { "epoch": 1.47, "grad_norm": 3.223184823989868, "learning_rate": 0.0001705607476635514, "loss": 0.978, "step": 1410 }, { "epoch": 1.48, "grad_norm": 1.7362747192382812, "learning_rate": 0.00017044392523364485, "loss": 0.4143, "step": 1411 }, { "epoch": 1.48, "grad_norm": 1.9589636325836182, "learning_rate": 0.00017032710280373833, "loss": 0.9767, "step": 1412 }, { "epoch": 1.48, "grad_norm": 2.106274127960205, "learning_rate": 0.00017021028037383178, "loss": 0.5638, "step": 1413 }, { "epoch": 1.48, "grad_norm": 1.9418315887451172, "learning_rate": 0.0001700934579439252, "loss": 0.9572, "step": 1414 }, { "epoch": 1.48, "grad_norm": 2.3618438243865967, "learning_rate": 0.00016997663551401868, "loss": 0.673, "step": 1415 }, { "epoch": 1.48, "grad_norm": 2.0478830337524414, "learning_rate": 0.00016985981308411213, "loss": 0.5286, "step": 1416 }, { "epoch": 1.48, "grad_norm": 3.376671314239502, "learning_rate": 0.0001697429906542056, "loss": 1.1043, "step": 1417 }, { "epoch": 1.48, "grad_norm": 1.0522353649139404, "learning_rate": 0.00016962616822429905, "loss": 0.0854, "step": 1418 }, { "epoch": 1.48, "grad_norm": 0.4326811730861664, "learning_rate": 0.0001695093457943925, "loss": 0.118, "step": 1419 }, { "epoch": 1.49, "grad_norm": 3.028013229370117, "learning_rate": 0.00016939252336448598, "loss": 0.7437, "step": 1420 }, { "epoch": 1.49, "grad_norm": 2.15781307220459, "learning_rate": 0.00016927570093457943, "loss": 0.9347, "step": 1421 }, { "epoch": 1.49, "grad_norm": 2.592618465423584, "learning_rate": 0.00016915887850467288, "loss": 0.9163, "step": 1422 }, { "epoch": 1.49, "grad_norm": 2.283153533935547, "learning_rate": 0.00016904205607476633, "loss": 1.0058, "step": 1423 }, { "epoch": 1.49, "grad_norm": 1.3824974298477173, "learning_rate": 0.00016892523364485978, "loss": 0.8266, "step": 1424 }, { "epoch": 1.49, "grad_norm": 3.0501463413238525, "learning_rate": 0.00016880841121495326, "loss": 1.4845, "step": 1425 }, { "epoch": 1.49, "grad_norm": 1.2995612621307373, "learning_rate": 0.0001686915887850467, "loss": 0.1369, "step": 1426 }, { "epoch": 1.49, "grad_norm": 1.060412049293518, "learning_rate": 0.00016857476635514018, "loss": 0.2031, "step": 1427 }, { "epoch": 1.49, "grad_norm": 2.402827262878418, "learning_rate": 0.00016845794392523363, "loss": 0.6165, "step": 1428 }, { "epoch": 1.49, "grad_norm": 2.1554033756256104, "learning_rate": 0.0001683411214953271, "loss": 0.9243, "step": 1429 }, { "epoch": 1.5, "grad_norm": 0.8789975047111511, "learning_rate": 0.00016822429906542053, "loss": 0.3416, "step": 1430 }, { "epoch": 1.5, "grad_norm": 3.1596078872680664, "learning_rate": 0.00016810747663551398, "loss": 0.8469, "step": 1431 }, { "epoch": 1.5, "grad_norm": 1.5885155200958252, "learning_rate": 0.00016799065420560746, "loss": 0.4804, "step": 1432 }, { "epoch": 1.5, "grad_norm": 0.705204427242279, "learning_rate": 0.0001678738317757009, "loss": 0.2568, "step": 1433 }, { "epoch": 1.5, "grad_norm": 2.0723459720611572, "learning_rate": 0.0001677570093457944, "loss": 0.5643, "step": 1434 }, { "epoch": 1.5, "grad_norm": 1.1040033102035522, "learning_rate": 0.00016764018691588784, "loss": 0.4714, "step": 1435 }, { "epoch": 1.5, "grad_norm": 6.07669734954834, "learning_rate": 0.00016752336448598132, "loss": 1.2381, "step": 1436 }, { "epoch": 1.5, "grad_norm": 1.854767918586731, "learning_rate": 0.00016740654205607477, "loss": 0.5169, "step": 1437 }, { "epoch": 1.5, "grad_norm": 2.84541916847229, "learning_rate": 0.0001672897196261682, "loss": 0.7201, "step": 1438 }, { "epoch": 1.51, "grad_norm": 1.7634392976760864, "learning_rate": 0.00016717289719626167, "loss": 0.4629, "step": 1439 }, { "epoch": 1.51, "grad_norm": 2.0387461185455322, "learning_rate": 0.00016705607476635512, "loss": 0.7306, "step": 1440 }, { "epoch": 1.51, "grad_norm": 1.1351913213729858, "learning_rate": 0.0001669392523364486, "loss": 0.1392, "step": 1441 }, { "epoch": 1.51, "grad_norm": 3.165987253189087, "learning_rate": 0.00016682242990654204, "loss": 1.0364, "step": 1442 }, { "epoch": 1.51, "grad_norm": 0.5354011654853821, "learning_rate": 0.0001667056074766355, "loss": 0.1786, "step": 1443 }, { "epoch": 1.51, "grad_norm": 1.6701676845550537, "learning_rate": 0.00016658878504672897, "loss": 0.3902, "step": 1444 }, { "epoch": 1.51, "grad_norm": 2.9532558917999268, "learning_rate": 0.00016647196261682242, "loss": 1.308, "step": 1445 }, { "epoch": 1.51, "grad_norm": 3.9400277137756348, "learning_rate": 0.00016635514018691587, "loss": 0.8879, "step": 1446 }, { "epoch": 1.51, "grad_norm": 2.938701629638672, "learning_rate": 0.00016623831775700932, "loss": 1.1334, "step": 1447 }, { "epoch": 1.51, "grad_norm": 1.7852617502212524, "learning_rate": 0.00016612149532710277, "loss": 0.5887, "step": 1448 }, { "epoch": 1.52, "grad_norm": 1.42865788936615, "learning_rate": 0.00016600467289719625, "loss": 0.276, "step": 1449 }, { "epoch": 1.52, "grad_norm": 2.0600006580352783, "learning_rate": 0.0001658878504672897, "loss": 0.6751, "step": 1450 }, { "epoch": 1.52, "grad_norm": 0.6229619383811951, "learning_rate": 0.00016577102803738317, "loss": 0.1782, "step": 1451 }, { "epoch": 1.52, "grad_norm": 0.6343135833740234, "learning_rate": 0.00016565420560747662, "loss": 0.2434, "step": 1452 }, { "epoch": 1.52, "grad_norm": 3.1584959030151367, "learning_rate": 0.0001655373831775701, "loss": 0.738, "step": 1453 }, { "epoch": 1.52, "grad_norm": 0.9337627291679382, "learning_rate": 0.00016542056074766352, "loss": 0.4132, "step": 1454 }, { "epoch": 1.52, "grad_norm": 2.4795186519622803, "learning_rate": 0.00016530373831775697, "loss": 1.1011, "step": 1455 }, { "epoch": 1.52, "grad_norm": 2.3030202388763428, "learning_rate": 0.00016518691588785045, "loss": 0.31, "step": 1456 }, { "epoch": 1.52, "grad_norm": 3.6737964153289795, "learning_rate": 0.0001650700934579439, "loss": 1.5343, "step": 1457 }, { "epoch": 1.53, "grad_norm": 4.254706382751465, "learning_rate": 0.00016495327102803738, "loss": 0.8686, "step": 1458 }, { "epoch": 1.53, "grad_norm": 1.095659852027893, "learning_rate": 0.00016483644859813083, "loss": 0.552, "step": 1459 }, { "epoch": 1.53, "grad_norm": 2.9776525497436523, "learning_rate": 0.0001647196261682243, "loss": 1.247, "step": 1460 }, { "epoch": 1.53, "grad_norm": 1.611816167831421, "learning_rate": 0.00016460280373831776, "loss": 1.2273, "step": 1461 }, { "epoch": 1.53, "grad_norm": 1.910477876663208, "learning_rate": 0.00016448598130841118, "loss": 1.2431, "step": 1462 }, { "epoch": 1.53, "grad_norm": 4.306861877441406, "learning_rate": 0.00016436915887850466, "loss": 1.3482, "step": 1463 }, { "epoch": 1.53, "grad_norm": 3.019909381866455, "learning_rate": 0.0001642523364485981, "loss": 1.1731, "step": 1464 }, { "epoch": 1.53, "grad_norm": 2.6072418689727783, "learning_rate": 0.00016413551401869158, "loss": 0.6615, "step": 1465 }, { "epoch": 1.53, "grad_norm": 2.3710482120513916, "learning_rate": 0.00016401869158878503, "loss": 0.5472, "step": 1466 }, { "epoch": 1.53, "grad_norm": 1.8100333213806152, "learning_rate": 0.00016390186915887848, "loss": 0.8855, "step": 1467 }, { "epoch": 1.54, "grad_norm": 2.7441534996032715, "learning_rate": 0.00016378504672897196, "loss": 1.5301, "step": 1468 }, { "epoch": 1.54, "grad_norm": 1.9324244260787964, "learning_rate": 0.0001636682242990654, "loss": 1.0325, "step": 1469 }, { "epoch": 1.54, "grad_norm": 4.5111565589904785, "learning_rate": 0.00016355140186915886, "loss": 0.9948, "step": 1470 }, { "epoch": 1.54, "grad_norm": 0.6447874903678894, "learning_rate": 0.0001634345794392523, "loss": 0.1443, "step": 1471 }, { "epoch": 1.54, "grad_norm": 2.870194673538208, "learning_rate": 0.00016331775700934576, "loss": 0.5645, "step": 1472 }, { "epoch": 1.54, "grad_norm": 0.656026303768158, "learning_rate": 0.00016320093457943924, "loss": 0.2724, "step": 1473 }, { "epoch": 1.54, "grad_norm": 0.9519236087799072, "learning_rate": 0.0001630841121495327, "loss": 0.2745, "step": 1474 }, { "epoch": 1.54, "grad_norm": 0.730017364025116, "learning_rate": 0.00016296728971962617, "loss": 0.1684, "step": 1475 }, { "epoch": 1.54, "grad_norm": 2.9128291606903076, "learning_rate": 0.00016285046728971962, "loss": 0.7495, "step": 1476 }, { "epoch": 1.54, "grad_norm": 2.1994237899780273, "learning_rate": 0.0001627336448598131, "loss": 0.6549, "step": 1477 }, { "epoch": 1.55, "grad_norm": 2.888871908187866, "learning_rate": 0.00016261682242990652, "loss": 0.5391, "step": 1478 }, { "epoch": 1.55, "grad_norm": 2.2752833366394043, "learning_rate": 0.00016249999999999997, "loss": 1.3852, "step": 1479 }, { "epoch": 1.55, "grad_norm": 2.8205981254577637, "learning_rate": 0.00016238317757009344, "loss": 0.5354, "step": 1480 }, { "epoch": 1.55, "grad_norm": 1.2935905456542969, "learning_rate": 0.0001622663551401869, "loss": 0.6873, "step": 1481 }, { "epoch": 1.55, "grad_norm": 3.462782621383667, "learning_rate": 0.00016214953271028037, "loss": 1.3113, "step": 1482 }, { "epoch": 1.55, "grad_norm": 1.3133735656738281, "learning_rate": 0.00016203271028037382, "loss": 0.9383, "step": 1483 }, { "epoch": 1.55, "grad_norm": 1.02700936794281, "learning_rate": 0.0001619158878504673, "loss": 0.555, "step": 1484 }, { "epoch": 1.55, "grad_norm": 3.047100305557251, "learning_rate": 0.00016179906542056075, "loss": 1.227, "step": 1485 }, { "epoch": 1.55, "grad_norm": 3.2910258769989014, "learning_rate": 0.00016168224299065417, "loss": 0.7439, "step": 1486 }, { "epoch": 1.56, "grad_norm": 2.5884504318237305, "learning_rate": 0.00016156542056074765, "loss": 1.1479, "step": 1487 }, { "epoch": 1.56, "grad_norm": 2.7963638305664062, "learning_rate": 0.0001614485981308411, "loss": 1.2802, "step": 1488 }, { "epoch": 1.56, "grad_norm": 3.7087883949279785, "learning_rate": 0.00016133177570093457, "loss": 1.2185, "step": 1489 }, { "epoch": 1.56, "grad_norm": 0.849493682384491, "learning_rate": 0.00016121495327102802, "loss": 0.3358, "step": 1490 }, { "epoch": 1.56, "grad_norm": 0.7434322834014893, "learning_rate": 0.00016109813084112147, "loss": 0.2501, "step": 1491 }, { "epoch": 1.56, "grad_norm": 0.6131064295768738, "learning_rate": 0.00016098130841121495, "loss": 0.1483, "step": 1492 }, { "epoch": 1.56, "grad_norm": 1.3495182991027832, "learning_rate": 0.0001608644859813084, "loss": 0.4082, "step": 1493 }, { "epoch": 1.56, "grad_norm": 2.7759788036346436, "learning_rate": 0.00016074766355140185, "loss": 0.9269, "step": 1494 }, { "epoch": 1.56, "grad_norm": 2.0917370319366455, "learning_rate": 0.0001606308411214953, "loss": 0.7764, "step": 1495 }, { "epoch": 1.56, "grad_norm": 3.4610509872436523, "learning_rate": 0.00016051401869158875, "loss": 1.1966, "step": 1496 }, { "epoch": 1.57, "grad_norm": 3.6502034664154053, "learning_rate": 0.00016039719626168223, "loss": 1.1802, "step": 1497 }, { "epoch": 1.57, "grad_norm": 2.2995026111602783, "learning_rate": 0.00016028037383177568, "loss": 1.0207, "step": 1498 }, { "epoch": 1.57, "grad_norm": 3.921745777130127, "learning_rate": 0.00016016355140186916, "loss": 0.9952, "step": 1499 }, { "epoch": 1.57, "grad_norm": 3.0950217247009277, "learning_rate": 0.0001600467289719626, "loss": 0.4891, "step": 1500 }, { "epoch": 1.57, "eval_loss": 1.1533477306365967, "eval_runtime": 1.0124, "eval_samples_per_second": 4.939, "eval_steps_per_second": 0.988, "step": 1500 }, { "epoch": 1.57, "grad_norm": 0.7715978026390076, "learning_rate": 0.00015992990654205608, "loss": 0.2482, "step": 1501 }, { "epoch": 1.57, "grad_norm": 3.1252341270446777, "learning_rate": 0.0001598130841121495, "loss": 0.6653, "step": 1502 }, { "epoch": 1.57, "grad_norm": 0.6810656785964966, "learning_rate": 0.00015969626168224296, "loss": 0.2498, "step": 1503 }, { "epoch": 1.57, "grad_norm": 0.776508092880249, "learning_rate": 0.00015957943925233643, "loss": 0.3936, "step": 1504 }, { "epoch": 1.57, "grad_norm": 2.2532429695129395, "learning_rate": 0.00015946261682242988, "loss": 0.8081, "step": 1505 }, { "epoch": 1.58, "grad_norm": 1.6081033945083618, "learning_rate": 0.00015934579439252336, "loss": 0.4501, "step": 1506 }, { "epoch": 1.58, "grad_norm": 2.283398389816284, "learning_rate": 0.0001592289719626168, "loss": 1.52, "step": 1507 }, { "epoch": 1.58, "grad_norm": 2.9111759662628174, "learning_rate": 0.0001591121495327103, "loss": 1.0499, "step": 1508 }, { "epoch": 1.58, "grad_norm": 2.6676039695739746, "learning_rate": 0.00015899532710280374, "loss": 0.6595, "step": 1509 }, { "epoch": 1.58, "grad_norm": 2.395261526107788, "learning_rate": 0.00015887850467289716, "loss": 0.3202, "step": 1510 }, { "epoch": 1.58, "grad_norm": 2.177774667739868, "learning_rate": 0.00015876168224299064, "loss": 0.3961, "step": 1511 }, { "epoch": 1.58, "grad_norm": 2.0199573040008545, "learning_rate": 0.0001586448598130841, "loss": 0.5494, "step": 1512 }, { "epoch": 1.58, "grad_norm": 1.7580457925796509, "learning_rate": 0.00015852803738317756, "loss": 0.4739, "step": 1513 }, { "epoch": 1.58, "grad_norm": 2.2193167209625244, "learning_rate": 0.00015841121495327101, "loss": 1.3114, "step": 1514 }, { "epoch": 1.58, "grad_norm": 3.2167983055114746, "learning_rate": 0.00015829439252336446, "loss": 1.0337, "step": 1515 }, { "epoch": 1.59, "grad_norm": 1.9968187808990479, "learning_rate": 0.00015817757009345794, "loss": 0.6111, "step": 1516 }, { "epoch": 1.59, "grad_norm": 0.3653596341609955, "learning_rate": 0.0001580607476635514, "loss": 0.0711, "step": 1517 }, { "epoch": 1.59, "grad_norm": 1.1816760301589966, "learning_rate": 0.00015794392523364484, "loss": 0.338, "step": 1518 }, { "epoch": 1.59, "grad_norm": 0.6894358992576599, "learning_rate": 0.0001578271028037383, "loss": 0.2162, "step": 1519 }, { "epoch": 1.59, "grad_norm": 1.4301586151123047, "learning_rate": 0.00015771028037383174, "loss": 0.88, "step": 1520 }, { "epoch": 1.59, "grad_norm": 2.3182575702667236, "learning_rate": 0.00015759345794392522, "loss": 1.1359, "step": 1521 }, { "epoch": 1.59, "grad_norm": 2.250000238418579, "learning_rate": 0.00015747663551401867, "loss": 0.8995, "step": 1522 }, { "epoch": 1.59, "grad_norm": 2.535024404525757, "learning_rate": 0.00015735981308411215, "loss": 0.8351, "step": 1523 }, { "epoch": 1.59, "grad_norm": 3.6850876808166504, "learning_rate": 0.0001572429906542056, "loss": 1.597, "step": 1524 }, { "epoch": 1.6, "grad_norm": 2.4490113258361816, "learning_rate": 0.00015712616822429907, "loss": 0.6743, "step": 1525 }, { "epoch": 1.6, "grad_norm": 2.006627082824707, "learning_rate": 0.0001570093457943925, "loss": 0.5022, "step": 1526 }, { "epoch": 1.6, "grad_norm": 1.6286826133728027, "learning_rate": 0.00015689252336448595, "loss": 0.4875, "step": 1527 }, { "epoch": 1.6, "grad_norm": 0.4024123251438141, "learning_rate": 0.00015677570093457942, "loss": 0.0667, "step": 1528 }, { "epoch": 1.6, "grad_norm": 1.9532690048217773, "learning_rate": 0.00015665887850467287, "loss": 0.5431, "step": 1529 }, { "epoch": 1.6, "grad_norm": 3.2107231616973877, "learning_rate": 0.00015654205607476635, "loss": 0.8777, "step": 1530 }, { "epoch": 1.6, "grad_norm": 3.744612693786621, "learning_rate": 0.0001564252336448598, "loss": 1.0231, "step": 1531 }, { "epoch": 1.6, "grad_norm": 2.101804256439209, "learning_rate": 0.00015630841121495328, "loss": 0.5195, "step": 1532 }, { "epoch": 1.6, "grad_norm": 1.6645424365997314, "learning_rate": 0.00015619158878504673, "loss": 0.8324, "step": 1533 }, { "epoch": 1.6, "grad_norm": 2.112236738204956, "learning_rate": 0.00015607476635514015, "loss": 1.2638, "step": 1534 }, { "epoch": 1.61, "grad_norm": 1.9546703100204468, "learning_rate": 0.00015595794392523363, "loss": 0.6795, "step": 1535 }, { "epoch": 1.61, "grad_norm": 1.0169304609298706, "learning_rate": 0.00015584112149532708, "loss": 0.4056, "step": 1536 }, { "epoch": 1.61, "grad_norm": 1.9648607969284058, "learning_rate": 0.00015572429906542056, "loss": 0.3932, "step": 1537 }, { "epoch": 1.61, "grad_norm": 2.3926336765289307, "learning_rate": 0.000155607476635514, "loss": 0.8817, "step": 1538 }, { "epoch": 1.61, "grad_norm": 2.6523513793945312, "learning_rate": 0.00015549065420560746, "loss": 0.861, "step": 1539 }, { "epoch": 1.61, "grad_norm": 2.9970486164093018, "learning_rate": 0.00015537383177570093, "loss": 1.0401, "step": 1540 }, { "epoch": 1.61, "grad_norm": 1.2951358556747437, "learning_rate": 0.00015525700934579438, "loss": 0.2964, "step": 1541 }, { "epoch": 1.61, "grad_norm": 1.569375991821289, "learning_rate": 0.00015514018691588783, "loss": 0.3614, "step": 1542 }, { "epoch": 1.61, "grad_norm": 2.29843807220459, "learning_rate": 0.00015502336448598128, "loss": 0.4918, "step": 1543 }, { "epoch": 1.62, "grad_norm": 3.2564470767974854, "learning_rate": 0.00015490654205607473, "loss": 0.7068, "step": 1544 }, { "epoch": 1.62, "grad_norm": 2.965322971343994, "learning_rate": 0.0001547897196261682, "loss": 0.861, "step": 1545 }, { "epoch": 1.62, "grad_norm": 3.3394079208374023, "learning_rate": 0.00015467289719626166, "loss": 1.5551, "step": 1546 }, { "epoch": 1.62, "grad_norm": 2.289705514907837, "learning_rate": 0.00015455607476635514, "loss": 0.9215, "step": 1547 }, { "epoch": 1.62, "grad_norm": 1.7418241500854492, "learning_rate": 0.0001544392523364486, "loss": 0.4123, "step": 1548 }, { "epoch": 1.62, "grad_norm": 2.0741488933563232, "learning_rate": 0.00015432242990654206, "loss": 1.2427, "step": 1549 }, { "epoch": 1.62, "grad_norm": 0.3914594054222107, "learning_rate": 0.0001542056074766355, "loss": 0.0735, "step": 1550 }, { "epoch": 1.62, "grad_norm": 3.96793532371521, "learning_rate": 0.00015408878504672894, "loss": 0.9863, "step": 1551 }, { "epoch": 1.62, "grad_norm": 1.1604739427566528, "learning_rate": 0.00015397196261682241, "loss": 0.5499, "step": 1552 }, { "epoch": 1.62, "grad_norm": 4.087449550628662, "learning_rate": 0.00015385514018691586, "loss": 1.1625, "step": 1553 }, { "epoch": 1.63, "grad_norm": 1.983540415763855, "learning_rate": 0.00015373831775700934, "loss": 1.3978, "step": 1554 }, { "epoch": 1.63, "grad_norm": 2.174452781677246, "learning_rate": 0.0001536214953271028, "loss": 0.613, "step": 1555 }, { "epoch": 1.63, "grad_norm": 2.5901448726654053, "learning_rate": 0.00015350467289719627, "loss": 0.5222, "step": 1556 }, { "epoch": 1.63, "grad_norm": 2.7491252422332764, "learning_rate": 0.00015338785046728972, "loss": 0.5357, "step": 1557 }, { "epoch": 1.63, "grad_norm": 2.587277889251709, "learning_rate": 0.00015327102803738314, "loss": 1.4659, "step": 1558 }, { "epoch": 1.63, "grad_norm": 3.2006373405456543, "learning_rate": 0.00015315420560747662, "loss": 1.2877, "step": 1559 }, { "epoch": 1.63, "grad_norm": 2.9988112449645996, "learning_rate": 0.00015303738317757007, "loss": 0.5014, "step": 1560 }, { "epoch": 1.63, "grad_norm": 3.011751890182495, "learning_rate": 0.00015292056074766355, "loss": 0.6075, "step": 1561 }, { "epoch": 1.63, "grad_norm": 2.0071706771850586, "learning_rate": 0.000152803738317757, "loss": 1.0028, "step": 1562 }, { "epoch": 1.63, "grad_norm": 0.4963289499282837, "learning_rate": 0.00015268691588785045, "loss": 0.0778, "step": 1563 }, { "epoch": 1.64, "grad_norm": 1.5799555778503418, "learning_rate": 0.00015257009345794392, "loss": 0.2804, "step": 1564 }, { "epoch": 1.64, "grad_norm": 2.747706651687622, "learning_rate": 0.00015245327102803737, "loss": 0.3919, "step": 1565 }, { "epoch": 1.64, "grad_norm": 6.703662872314453, "learning_rate": 0.00015233644859813082, "loss": 0.723, "step": 1566 }, { "epoch": 1.64, "grad_norm": 1.7172305583953857, "learning_rate": 0.00015221962616822427, "loss": 0.4695, "step": 1567 }, { "epoch": 1.64, "grad_norm": 1.9481459856033325, "learning_rate": 0.00015210280373831772, "loss": 0.444, "step": 1568 }, { "epoch": 1.64, "grad_norm": 1.0129424333572388, "learning_rate": 0.0001519859813084112, "loss": 0.4649, "step": 1569 }, { "epoch": 1.64, "grad_norm": 1.9686312675476074, "learning_rate": 0.00015186915887850465, "loss": 0.2723, "step": 1570 }, { "epoch": 1.64, "grad_norm": 2.451975107192993, "learning_rate": 0.00015175233644859813, "loss": 0.7925, "step": 1571 }, { "epoch": 1.64, "grad_norm": 3.254682779312134, "learning_rate": 0.00015163551401869158, "loss": 0.9014, "step": 1572 }, { "epoch": 1.65, "grad_norm": 1.4323906898498535, "learning_rate": 0.00015151869158878506, "loss": 0.3117, "step": 1573 }, { "epoch": 1.65, "grad_norm": 1.9285516738891602, "learning_rate": 0.00015140186915887848, "loss": 1.5676, "step": 1574 }, { "epoch": 1.65, "grad_norm": 3.664214849472046, "learning_rate": 0.00015128504672897193, "loss": 1.0707, "step": 1575 }, { "epoch": 1.65, "grad_norm": 2.9163613319396973, "learning_rate": 0.0001511682242990654, "loss": 0.9862, "step": 1576 }, { "epoch": 1.65, "grad_norm": 3.5014891624450684, "learning_rate": 0.00015105140186915886, "loss": 0.8882, "step": 1577 }, { "epoch": 1.65, "grad_norm": 1.6664868593215942, "learning_rate": 0.00015093457943925233, "loss": 0.5145, "step": 1578 }, { "epoch": 1.65, "grad_norm": 2.4006149768829346, "learning_rate": 0.00015081775700934578, "loss": 0.6458, "step": 1579 }, { "epoch": 1.65, "grad_norm": 1.898476481437683, "learning_rate": 0.00015070093457943926, "loss": 0.5072, "step": 1580 }, { "epoch": 1.65, "grad_norm": 1.0624420642852783, "learning_rate": 0.0001505841121495327, "loss": 0.2644, "step": 1581 }, { "epoch": 1.65, "grad_norm": 2.577528476715088, "learning_rate": 0.00015046728971962613, "loss": 0.9336, "step": 1582 }, { "epoch": 1.66, "grad_norm": 2.376910448074341, "learning_rate": 0.0001503504672897196, "loss": 0.6101, "step": 1583 }, { "epoch": 1.66, "grad_norm": 3.466841697692871, "learning_rate": 0.00015023364485981306, "loss": 1.1847, "step": 1584 }, { "epoch": 1.66, "grad_norm": 2.308872699737549, "learning_rate": 0.00015011682242990654, "loss": 0.546, "step": 1585 }, { "epoch": 1.66, "grad_norm": 2.0582919120788574, "learning_rate": 0.00015, "loss": 0.5657, "step": 1586 }, { "epoch": 1.66, "grad_norm": 0.6894460916519165, "learning_rate": 0.00014988317757009344, "loss": 0.1334, "step": 1587 }, { "epoch": 1.66, "grad_norm": 2.060089349746704, "learning_rate": 0.00014976635514018691, "loss": 1.3325, "step": 1588 }, { "epoch": 1.66, "grad_norm": 2.185960054397583, "learning_rate": 0.00014964953271028036, "loss": 0.609, "step": 1589 }, { "epoch": 1.66, "grad_norm": 2.608562469482422, "learning_rate": 0.00014953271028037381, "loss": 1.4962, "step": 1590 }, { "epoch": 1.66, "grad_norm": 2.8520126342773438, "learning_rate": 0.0001494158878504673, "loss": 1.1023, "step": 1591 }, { "epoch": 1.67, "grad_norm": 1.7289494276046753, "learning_rate": 0.00014929906542056074, "loss": 0.4568, "step": 1592 }, { "epoch": 1.67, "grad_norm": 5.0688090324401855, "learning_rate": 0.0001491822429906542, "loss": 1.581, "step": 1593 }, { "epoch": 1.67, "grad_norm": 1.531291127204895, "learning_rate": 0.00014906542056074764, "loss": 1.2141, "step": 1594 }, { "epoch": 1.67, "grad_norm": 1.8704463243484497, "learning_rate": 0.00014894859813084112, "loss": 0.9042, "step": 1595 }, { "epoch": 1.67, "grad_norm": 3.1523284912109375, "learning_rate": 0.00014883177570093457, "loss": 1.2137, "step": 1596 }, { "epoch": 1.67, "grad_norm": 3.0960710048675537, "learning_rate": 0.00014871495327102802, "loss": 0.6209, "step": 1597 }, { "epoch": 1.67, "grad_norm": 0.5884736776351929, "learning_rate": 0.00014859813084112147, "loss": 0.1863, "step": 1598 }, { "epoch": 1.67, "grad_norm": 1.639065146446228, "learning_rate": 0.00014848130841121495, "loss": 0.4886, "step": 1599 }, { "epoch": 1.67, "grad_norm": 2.418137311935425, "learning_rate": 0.0001483644859813084, "loss": 1.194, "step": 1600 }, { "epoch": 1.67, "grad_norm": 1.7051281929016113, "learning_rate": 0.00014824766355140185, "loss": 0.7105, "step": 1601 }, { "epoch": 1.68, "grad_norm": 2.0735154151916504, "learning_rate": 0.00014813084112149532, "loss": 0.5942, "step": 1602 }, { "epoch": 1.68, "grad_norm": 2.791166305541992, "learning_rate": 0.00014801401869158877, "loss": 1.4937, "step": 1603 }, { "epoch": 1.68, "grad_norm": 1.4166151285171509, "learning_rate": 0.00014789719626168225, "loss": 0.5445, "step": 1604 }, { "epoch": 1.68, "grad_norm": 2.774190902709961, "learning_rate": 0.00014778037383177567, "loss": 0.5487, "step": 1605 }, { "epoch": 1.68, "grad_norm": 2.2081236839294434, "learning_rate": 0.00014766355140186915, "loss": 0.7735, "step": 1606 }, { "epoch": 1.68, "grad_norm": 0.9153463244438171, "learning_rate": 0.0001475467289719626, "loss": 0.3899, "step": 1607 }, { "epoch": 1.68, "grad_norm": 0.8370518684387207, "learning_rate": 0.00014742990654205608, "loss": 0.3468, "step": 1608 }, { "epoch": 1.68, "grad_norm": 3.461733102798462, "learning_rate": 0.00014731308411214953, "loss": 1.1843, "step": 1609 }, { "epoch": 1.68, "grad_norm": 2.4178223609924316, "learning_rate": 0.00014719626168224298, "loss": 1.168, "step": 1610 }, { "epoch": 1.69, "grad_norm": 2.963109016418457, "learning_rate": 0.00014707943925233643, "loss": 1.0568, "step": 1611 }, { "epoch": 1.69, "grad_norm": 0.694535493850708, "learning_rate": 0.0001469626168224299, "loss": 0.2683, "step": 1612 }, { "epoch": 1.69, "grad_norm": 1.9057122468948364, "learning_rate": 0.00014684579439252335, "loss": 0.3639, "step": 1613 }, { "epoch": 1.69, "grad_norm": 0.505016028881073, "learning_rate": 0.0001467289719626168, "loss": 0.1307, "step": 1614 }, { "epoch": 1.69, "grad_norm": 1.8371268510818481, "learning_rate": 0.00014661214953271028, "loss": 0.4274, "step": 1615 }, { "epoch": 1.69, "grad_norm": 2.2967214584350586, "learning_rate": 0.00014649532710280373, "loss": 1.2995, "step": 1616 }, { "epoch": 1.69, "grad_norm": 1.8484244346618652, "learning_rate": 0.00014637850467289718, "loss": 0.4558, "step": 1617 }, { "epoch": 1.69, "grad_norm": 2.2368478775024414, "learning_rate": 0.00014626168224299063, "loss": 0.7785, "step": 1618 }, { "epoch": 1.69, "grad_norm": 2.3678500652313232, "learning_rate": 0.0001461448598130841, "loss": 1.4081, "step": 1619 }, { "epoch": 1.69, "grad_norm": 0.6158261895179749, "learning_rate": 0.00014602803738317756, "loss": 0.1808, "step": 1620 }, { "epoch": 1.7, "grad_norm": 1.6939470767974854, "learning_rate": 0.000145911214953271, "loss": 0.509, "step": 1621 }, { "epoch": 1.7, "grad_norm": 2.4551703929901123, "learning_rate": 0.00014579439252336446, "loss": 0.473, "step": 1622 }, { "epoch": 1.7, "grad_norm": 2.0342600345611572, "learning_rate": 0.00014567757009345794, "loss": 0.4449, "step": 1623 }, { "epoch": 1.7, "grad_norm": 1.9285507202148438, "learning_rate": 0.0001455607476635514, "loss": 1.3971, "step": 1624 }, { "epoch": 1.7, "grad_norm": 0.5966708660125732, "learning_rate": 0.00014544392523364484, "loss": 0.2074, "step": 1625 }, { "epoch": 1.7, "grad_norm": 2.5656778812408447, "learning_rate": 0.00014532710280373831, "loss": 0.6084, "step": 1626 }, { "epoch": 1.7, "grad_norm": 2.896272897720337, "learning_rate": 0.00014521028037383176, "loss": 0.9998, "step": 1627 }, { "epoch": 1.7, "grad_norm": 2.119194746017456, "learning_rate": 0.00014509345794392524, "loss": 0.3859, "step": 1628 }, { "epoch": 1.7, "grad_norm": 2.5145485401153564, "learning_rate": 0.00014497663551401866, "loss": 0.6619, "step": 1629 }, { "epoch": 1.71, "grad_norm": 1.912062406539917, "learning_rate": 0.00014485981308411214, "loss": 0.8044, "step": 1630 }, { "epoch": 1.71, "grad_norm": 2.8148365020751953, "learning_rate": 0.0001447429906542056, "loss": 0.5945, "step": 1631 }, { "epoch": 1.71, "grad_norm": 2.518463373184204, "learning_rate": 0.00014462616822429907, "loss": 0.6838, "step": 1632 }, { "epoch": 1.71, "grad_norm": 3.0430805683135986, "learning_rate": 0.00014450934579439252, "loss": 0.8019, "step": 1633 }, { "epoch": 1.71, "grad_norm": 3.276461124420166, "learning_rate": 0.00014439252336448597, "loss": 0.7042, "step": 1634 }, { "epoch": 1.71, "grad_norm": 2.3962302207946777, "learning_rate": 0.00014427570093457942, "loss": 1.4162, "step": 1635 }, { "epoch": 1.71, "grad_norm": 0.7789445519447327, "learning_rate": 0.0001441588785046729, "loss": 0.291, "step": 1636 }, { "epoch": 1.71, "grad_norm": 3.588848352432251, "learning_rate": 0.00014404205607476635, "loss": 1.125, "step": 1637 }, { "epoch": 1.71, "grad_norm": 2.0526657104492188, "learning_rate": 0.0001439252336448598, "loss": 1.168, "step": 1638 }, { "epoch": 1.71, "grad_norm": 3.957766056060791, "learning_rate": 0.00014380841121495327, "loss": 1.0162, "step": 1639 }, { "epoch": 1.72, "grad_norm": 0.8492609262466431, "learning_rate": 0.00014369158878504672, "loss": 0.4544, "step": 1640 }, { "epoch": 1.72, "grad_norm": 2.658005714416504, "learning_rate": 0.00014357476635514017, "loss": 0.6238, "step": 1641 }, { "epoch": 1.72, "grad_norm": 2.339304208755493, "learning_rate": 0.00014345794392523362, "loss": 1.2503, "step": 1642 }, { "epoch": 1.72, "grad_norm": 2.0792555809020996, "learning_rate": 0.0001433411214953271, "loss": 0.4744, "step": 1643 }, { "epoch": 1.72, "grad_norm": 2.6944262981414795, "learning_rate": 0.00014322429906542055, "loss": 1.2977, "step": 1644 }, { "epoch": 1.72, "grad_norm": 3.134782314300537, "learning_rate": 0.000143107476635514, "loss": 0.9947, "step": 1645 }, { "epoch": 1.72, "grad_norm": 4.126375198364258, "learning_rate": 0.00014299065420560745, "loss": 1.2147, "step": 1646 }, { "epoch": 1.72, "grad_norm": 2.5845067501068115, "learning_rate": 0.00014287383177570093, "loss": 1.1301, "step": 1647 }, { "epoch": 1.72, "grad_norm": 3.6140151023864746, "learning_rate": 0.00014275700934579438, "loss": 1.4641, "step": 1648 }, { "epoch": 1.72, "grad_norm": 0.5477080941200256, "learning_rate": 0.00014264018691588783, "loss": 0.1762, "step": 1649 }, { "epoch": 1.73, "grad_norm": 2.663865089416504, "learning_rate": 0.0001425233644859813, "loss": 0.7445, "step": 1650 }, { "epoch": 1.73, "grad_norm": 0.3674846887588501, "learning_rate": 0.00014240654205607475, "loss": 0.087, "step": 1651 }, { "epoch": 1.73, "grad_norm": 1.6670650243759155, "learning_rate": 0.00014228971962616823, "loss": 1.2636, "step": 1652 }, { "epoch": 1.73, "grad_norm": 2.501187801361084, "learning_rate": 0.00014217289719626165, "loss": 0.8981, "step": 1653 }, { "epoch": 1.73, "grad_norm": 2.9647672176361084, "learning_rate": 0.00014205607476635513, "loss": 1.5088, "step": 1654 }, { "epoch": 1.73, "grad_norm": 1.6598446369171143, "learning_rate": 0.00014193925233644858, "loss": 0.3709, "step": 1655 }, { "epoch": 1.73, "grad_norm": 2.7449827194213867, "learning_rate": 0.00014182242990654206, "loss": 1.3826, "step": 1656 }, { "epoch": 1.73, "grad_norm": 2.3242647647857666, "learning_rate": 0.0001417056074766355, "loss": 0.4957, "step": 1657 }, { "epoch": 1.73, "grad_norm": 1.6587960720062256, "learning_rate": 0.00014158878504672896, "loss": 0.7291, "step": 1658 }, { "epoch": 1.74, "grad_norm": 1.4735907316207886, "learning_rate": 0.0001414719626168224, "loss": 0.48, "step": 1659 }, { "epoch": 1.74, "grad_norm": 1.680935263633728, "learning_rate": 0.00014135514018691589, "loss": 0.656, "step": 1660 }, { "epoch": 1.74, "grad_norm": 1.679986834526062, "learning_rate": 0.00014123831775700934, "loss": 0.5056, "step": 1661 }, { "epoch": 1.74, "grad_norm": 1.520255208015442, "learning_rate": 0.00014112149532710279, "loss": 0.2241, "step": 1662 }, { "epoch": 1.74, "grad_norm": 1.179740309715271, "learning_rate": 0.00014100467289719626, "loss": 0.2556, "step": 1663 }, { "epoch": 1.74, "grad_norm": 3.3339192867279053, "learning_rate": 0.0001408878504672897, "loss": 1.1833, "step": 1664 }, { "epoch": 1.74, "grad_norm": 1.6800388097763062, "learning_rate": 0.00014077102803738316, "loss": 0.4323, "step": 1665 }, { "epoch": 1.74, "grad_norm": 2.357207775115967, "learning_rate": 0.0001406542056074766, "loss": 0.7178, "step": 1666 }, { "epoch": 1.74, "grad_norm": 1.8271247148513794, "learning_rate": 0.0001405373831775701, "loss": 0.6007, "step": 1667 }, { "epoch": 1.74, "grad_norm": 0.7845667004585266, "learning_rate": 0.00014042056074766354, "loss": 0.3273, "step": 1668 }, { "epoch": 1.75, "grad_norm": 2.022867441177368, "learning_rate": 0.000140303738317757, "loss": 0.8424, "step": 1669 }, { "epoch": 1.75, "grad_norm": 2.6293094158172607, "learning_rate": 0.00014018691588785044, "loss": 0.7543, "step": 1670 }, { "epoch": 1.75, "grad_norm": 2.154860258102417, "learning_rate": 0.00014007009345794392, "loss": 0.8477, "step": 1671 }, { "epoch": 1.75, "grad_norm": 1.9960460662841797, "learning_rate": 0.00013995327102803737, "loss": 0.4638, "step": 1672 }, { "epoch": 1.75, "grad_norm": 0.6653493046760559, "learning_rate": 0.00013983644859813082, "loss": 0.1838, "step": 1673 }, { "epoch": 1.75, "grad_norm": 2.4807372093200684, "learning_rate": 0.0001397196261682243, "loss": 0.6451, "step": 1674 }, { "epoch": 1.75, "grad_norm": 1.979583978652954, "learning_rate": 0.00013960280373831774, "loss": 0.4743, "step": 1675 }, { "epoch": 1.75, "grad_norm": 4.964494705200195, "learning_rate": 0.00013948598130841122, "loss": 1.1769, "step": 1676 }, { "epoch": 1.75, "grad_norm": 2.702359199523926, "learning_rate": 0.00013936915887850465, "loss": 1.7705, "step": 1677 }, { "epoch": 1.76, "grad_norm": 2.9674830436706543, "learning_rate": 0.00013925233644859812, "loss": 0.8044, "step": 1678 }, { "epoch": 1.76, "grad_norm": 0.6541764736175537, "learning_rate": 0.00013913551401869157, "loss": 0.277, "step": 1679 }, { "epoch": 1.76, "grad_norm": 0.7068645358085632, "learning_rate": 0.00013901869158878505, "loss": 0.2462, "step": 1680 }, { "epoch": 1.76, "grad_norm": 0.6276235580444336, "learning_rate": 0.0001389018691588785, "loss": 0.2266, "step": 1681 }, { "epoch": 1.76, "grad_norm": 1.7366113662719727, "learning_rate": 0.00013878504672897195, "loss": 0.6867, "step": 1682 }, { "epoch": 1.76, "grad_norm": 1.0275715589523315, "learning_rate": 0.0001386682242990654, "loss": 0.3801, "step": 1683 }, { "epoch": 1.76, "grad_norm": 2.116137981414795, "learning_rate": 0.00013855140186915888, "loss": 1.5116, "step": 1684 }, { "epoch": 1.76, "grad_norm": 3.340062379837036, "learning_rate": 0.00013843457943925233, "loss": 0.6008, "step": 1685 }, { "epoch": 1.76, "grad_norm": 1.818067193031311, "learning_rate": 0.00013831775700934578, "loss": 0.8285, "step": 1686 }, { "epoch": 1.76, "grad_norm": 2.169006109237671, "learning_rate": 0.00013820093457943925, "loss": 1.56, "step": 1687 }, { "epoch": 1.77, "grad_norm": 2.3605668544769287, "learning_rate": 0.0001380841121495327, "loss": 0.476, "step": 1688 }, { "epoch": 1.77, "grad_norm": 1.9693185091018677, "learning_rate": 0.00013796728971962615, "loss": 0.5195, "step": 1689 }, { "epoch": 1.77, "grad_norm": 0.9456527829170227, "learning_rate": 0.0001378504672897196, "loss": 0.4079, "step": 1690 }, { "epoch": 1.77, "grad_norm": 1.855286955833435, "learning_rate": 0.00013773364485981308, "loss": 0.5983, "step": 1691 }, { "epoch": 1.77, "grad_norm": 2.4261703491210938, "learning_rate": 0.00013761682242990653, "loss": 0.3252, "step": 1692 }, { "epoch": 1.77, "grad_norm": 2.379132032394409, "learning_rate": 0.00013749999999999998, "loss": 0.861, "step": 1693 }, { "epoch": 1.77, "grad_norm": 0.2820170521736145, "learning_rate": 0.00013738317757009343, "loss": 0.0576, "step": 1694 }, { "epoch": 1.77, "grad_norm": 2.008049964904785, "learning_rate": 0.0001372663551401869, "loss": 0.4623, "step": 1695 }, { "epoch": 1.77, "grad_norm": 1.8559750318527222, "learning_rate": 0.00013714953271028036, "loss": 0.518, "step": 1696 }, { "epoch": 1.78, "grad_norm": 2.115945816040039, "learning_rate": 0.0001370327102803738, "loss": 0.5179, "step": 1697 }, { "epoch": 1.78, "grad_norm": 1.434239149093628, "learning_rate": 0.00013691588785046729, "loss": 0.5205, "step": 1698 }, { "epoch": 1.78, "grad_norm": 1.3366906642913818, "learning_rate": 0.00013679906542056074, "loss": 0.7658, "step": 1699 }, { "epoch": 1.78, "grad_norm": 3.734058141708374, "learning_rate": 0.0001366822429906542, "loss": 1.525, "step": 1700 }, { "epoch": 1.78, "grad_norm": 3.2385447025299072, "learning_rate": 0.00013656542056074764, "loss": 0.8095, "step": 1701 }, { "epoch": 1.78, "grad_norm": 3.614528179168701, "learning_rate": 0.0001364485981308411, "loss": 1.0613, "step": 1702 }, { "epoch": 1.78, "grad_norm": 0.726099967956543, "learning_rate": 0.00013633177570093456, "loss": 0.1841, "step": 1703 }, { "epoch": 1.78, "grad_norm": 3.171700954437256, "learning_rate": 0.00013621495327102804, "loss": 1.0322, "step": 1704 }, { "epoch": 1.78, "grad_norm": 2.1440253257751465, "learning_rate": 0.0001360981308411215, "loss": 1.0489, "step": 1705 }, { "epoch": 1.78, "grad_norm": 0.680216908454895, "learning_rate": 0.00013598130841121494, "loss": 0.2887, "step": 1706 }, { "epoch": 1.79, "grad_norm": 3.5613088607788086, "learning_rate": 0.0001358644859813084, "loss": 0.8109, "step": 1707 }, { "epoch": 1.79, "grad_norm": 3.2237210273742676, "learning_rate": 0.00013574766355140187, "loss": 1.3278, "step": 1708 }, { "epoch": 1.79, "grad_norm": 3.895890951156616, "learning_rate": 0.00013563084112149532, "loss": 1.1268, "step": 1709 }, { "epoch": 1.79, "grad_norm": 1.0976706743240356, "learning_rate": 0.00013551401869158877, "loss": 0.4548, "step": 1710 }, { "epoch": 1.79, "grad_norm": 2.142307758331299, "learning_rate": 0.00013539719626168224, "loss": 0.507, "step": 1711 }, { "epoch": 1.79, "grad_norm": 2.718857765197754, "learning_rate": 0.0001352803738317757, "loss": 0.4967, "step": 1712 }, { "epoch": 1.79, "grad_norm": 0.7044395208358765, "learning_rate": 0.00013516355140186914, "loss": 0.215, "step": 1713 }, { "epoch": 1.79, "grad_norm": 2.744619607925415, "learning_rate": 0.0001350467289719626, "loss": 0.7843, "step": 1714 }, { "epoch": 1.79, "grad_norm": 1.998574137687683, "learning_rate": 0.00013492990654205607, "loss": 1.4232, "step": 1715 }, { "epoch": 1.79, "grad_norm": 3.0873868465423584, "learning_rate": 0.00013481308411214952, "loss": 0.7932, "step": 1716 }, { "epoch": 1.8, "grad_norm": 2.173910617828369, "learning_rate": 0.00013469626168224297, "loss": 0.7113, "step": 1717 }, { "epoch": 1.8, "grad_norm": 2.539853096008301, "learning_rate": 0.00013457943925233642, "loss": 0.3913, "step": 1718 }, { "epoch": 1.8, "grad_norm": 1.858372449874878, "learning_rate": 0.0001344626168224299, "loss": 0.5278, "step": 1719 }, { "epoch": 1.8, "grad_norm": 1.7758537530899048, "learning_rate": 0.00013434579439252335, "loss": 0.384, "step": 1720 }, { "epoch": 1.8, "grad_norm": 0.768137514591217, "learning_rate": 0.0001342289719626168, "loss": 0.3117, "step": 1721 }, { "epoch": 1.8, "grad_norm": 0.7663379311561584, "learning_rate": 0.00013411214953271028, "loss": 0.3133, "step": 1722 }, { "epoch": 1.8, "grad_norm": 2.0158727169036865, "learning_rate": 0.00013399532710280373, "loss": 0.6746, "step": 1723 }, { "epoch": 1.8, "grad_norm": 2.7553088665008545, "learning_rate": 0.0001338785046728972, "loss": 0.8026, "step": 1724 }, { "epoch": 1.8, "grad_norm": 1.7455641031265259, "learning_rate": 0.00013376168224299063, "loss": 0.4179, "step": 1725 }, { "epoch": 1.81, "grad_norm": 1.652976155281067, "learning_rate": 0.0001336448598130841, "loss": 0.7564, "step": 1726 }, { "epoch": 1.81, "grad_norm": 1.452163815498352, "learning_rate": 0.00013352803738317755, "loss": 0.3617, "step": 1727 }, { "epoch": 1.81, "grad_norm": 2.5906240940093994, "learning_rate": 0.00013341121495327103, "loss": 0.7063, "step": 1728 }, { "epoch": 1.81, "grad_norm": 1.8791476488113403, "learning_rate": 0.00013329439252336448, "loss": 0.4949, "step": 1729 }, { "epoch": 1.81, "grad_norm": 3.926201820373535, "learning_rate": 0.00013317757009345793, "loss": 1.5384, "step": 1730 }, { "epoch": 1.81, "grad_norm": 3.678696870803833, "learning_rate": 0.00013306074766355138, "loss": 1.3355, "step": 1731 }, { "epoch": 1.81, "grad_norm": 4.7318501472473145, "learning_rate": 0.00013294392523364486, "loss": 0.9178, "step": 1732 }, { "epoch": 1.81, "grad_norm": 3.0158326625823975, "learning_rate": 0.0001328271028037383, "loss": 1.0379, "step": 1733 }, { "epoch": 1.81, "grad_norm": 1.457908034324646, "learning_rate": 0.00013271028037383176, "loss": 0.3679, "step": 1734 }, { "epoch": 1.81, "grad_norm": 1.1844749450683594, "learning_rate": 0.00013259345794392524, "loss": 0.4305, "step": 1735 }, { "epoch": 1.82, "grad_norm": 2.570006847381592, "learning_rate": 0.00013247663551401869, "loss": 0.4679, "step": 1736 }, { "epoch": 1.82, "grad_norm": 3.1339075565338135, "learning_rate": 0.00013235981308411214, "loss": 0.7069, "step": 1737 }, { "epoch": 1.82, "grad_norm": 0.4061032235622406, "learning_rate": 0.00013224299065420559, "loss": 0.1061, "step": 1738 }, { "epoch": 1.82, "grad_norm": 2.7448058128356934, "learning_rate": 0.00013212616822429906, "loss": 1.1243, "step": 1739 }, { "epoch": 1.82, "grad_norm": 2.1957473754882812, "learning_rate": 0.0001320093457943925, "loss": 0.3921, "step": 1740 }, { "epoch": 1.82, "grad_norm": 0.7898138761520386, "learning_rate": 0.00013189252336448596, "loss": 0.3201, "step": 1741 }, { "epoch": 1.82, "grad_norm": 3.37489652633667, "learning_rate": 0.0001317757009345794, "loss": 1.1398, "step": 1742 }, { "epoch": 1.82, "grad_norm": 4.243715286254883, "learning_rate": 0.0001316588785046729, "loss": 0.603, "step": 1743 }, { "epoch": 1.82, "grad_norm": 2.359915256500244, "learning_rate": 0.00013154205607476634, "loss": 1.0448, "step": 1744 }, { "epoch": 1.83, "grad_norm": 1.555791974067688, "learning_rate": 0.0001314252336448598, "loss": 0.3294, "step": 1745 }, { "epoch": 1.83, "grad_norm": 1.50589120388031, "learning_rate": 0.00013130841121495327, "loss": 0.3309, "step": 1746 }, { "epoch": 1.83, "grad_norm": 2.945429801940918, "learning_rate": 0.00013119158878504672, "loss": 0.8982, "step": 1747 }, { "epoch": 1.83, "grad_norm": 3.0196831226348877, "learning_rate": 0.0001310747663551402, "loss": 0.9588, "step": 1748 }, { "epoch": 1.83, "grad_norm": 2.560878038406372, "learning_rate": 0.00013095794392523362, "loss": 0.6943, "step": 1749 }, { "epoch": 1.83, "grad_norm": 2.355393409729004, "learning_rate": 0.0001308411214953271, "loss": 0.6189, "step": 1750 }, { "epoch": 1.83, "grad_norm": 4.520174503326416, "learning_rate": 0.00013072429906542054, "loss": 0.9338, "step": 1751 }, { "epoch": 1.83, "grad_norm": 2.112287998199463, "learning_rate": 0.00013060747663551402, "loss": 0.4086, "step": 1752 }, { "epoch": 1.83, "grad_norm": 3.6935834884643555, "learning_rate": 0.00013049065420560747, "loss": 0.4796, "step": 1753 }, { "epoch": 1.83, "grad_norm": 0.544985294342041, "learning_rate": 0.00013037383177570092, "loss": 0.1304, "step": 1754 }, { "epoch": 1.84, "grad_norm": 3.0136711597442627, "learning_rate": 0.00013025700934579437, "loss": 0.4961, "step": 1755 }, { "epoch": 1.84, "grad_norm": 4.112677097320557, "learning_rate": 0.00013014018691588785, "loss": 0.9081, "step": 1756 }, { "epoch": 1.84, "grad_norm": 2.986521005630493, "learning_rate": 0.0001300233644859813, "loss": 0.963, "step": 1757 }, { "epoch": 1.84, "grad_norm": 0.8491998910903931, "learning_rate": 0.00012990654205607475, "loss": 0.3462, "step": 1758 }, { "epoch": 1.84, "grad_norm": 2.529773712158203, "learning_rate": 0.00012978971962616823, "loss": 0.8424, "step": 1759 }, { "epoch": 1.84, "grad_norm": 1.9695286750793457, "learning_rate": 0.00012967289719626168, "loss": 1.0353, "step": 1760 }, { "epoch": 1.84, "grad_norm": 3.785512685775757, "learning_rate": 0.00012955607476635513, "loss": 1.0751, "step": 1761 }, { "epoch": 1.84, "grad_norm": 3.8414695262908936, "learning_rate": 0.00012943925233644858, "loss": 1.1467, "step": 1762 }, { "epoch": 1.84, "grad_norm": 2.077259063720703, "learning_rate": 0.00012932242990654205, "loss": 0.7422, "step": 1763 }, { "epoch": 1.85, "grad_norm": 2.666192054748535, "learning_rate": 0.0001292056074766355, "loss": 0.7893, "step": 1764 }, { "epoch": 1.85, "grad_norm": 2.1345760822296143, "learning_rate": 0.00012908878504672895, "loss": 0.527, "step": 1765 }, { "epoch": 1.85, "grad_norm": 3.850268840789795, "learning_rate": 0.0001289719626168224, "loss": 0.9478, "step": 1766 }, { "epoch": 1.85, "grad_norm": 2.2270233631134033, "learning_rate": 0.00012885514018691588, "loss": 1.0623, "step": 1767 }, { "epoch": 1.85, "grad_norm": 0.8560389280319214, "learning_rate": 0.00012873831775700933, "loss": 0.2867, "step": 1768 }, { "epoch": 1.85, "grad_norm": 3.4770913124084473, "learning_rate": 0.00012862149532710278, "loss": 1.1433, "step": 1769 }, { "epoch": 1.85, "grad_norm": 4.113784313201904, "learning_rate": 0.00012850467289719626, "loss": 1.1744, "step": 1770 }, { "epoch": 1.85, "grad_norm": 2.4345414638519287, "learning_rate": 0.0001283878504672897, "loss": 1.0072, "step": 1771 }, { "epoch": 1.85, "grad_norm": 0.8015138506889343, "learning_rate": 0.00012827102803738318, "loss": 0.206, "step": 1772 }, { "epoch": 1.85, "grad_norm": 1.1205389499664307, "learning_rate": 0.0001281542056074766, "loss": 0.6316, "step": 1773 }, { "epoch": 1.86, "grad_norm": 2.1893746852874756, "learning_rate": 0.00012803738317757008, "loss": 0.9665, "step": 1774 }, { "epoch": 1.86, "grad_norm": 3.2439510822296143, "learning_rate": 0.00012792056074766353, "loss": 1.1231, "step": 1775 }, { "epoch": 1.86, "grad_norm": 2.1773808002471924, "learning_rate": 0.000127803738317757, "loss": 1.114, "step": 1776 }, { "epoch": 1.86, "grad_norm": 2.366384983062744, "learning_rate": 0.00012768691588785046, "loss": 0.8207, "step": 1777 }, { "epoch": 1.86, "grad_norm": 0.9399102926254272, "learning_rate": 0.0001275700934579439, "loss": 0.5248, "step": 1778 }, { "epoch": 1.86, "grad_norm": 0.47793617844581604, "learning_rate": 0.00012745327102803736, "loss": 0.1291, "step": 1779 }, { "epoch": 1.86, "grad_norm": 1.048606514930725, "learning_rate": 0.00012733644859813084, "loss": 0.5026, "step": 1780 }, { "epoch": 1.86, "grad_norm": 2.299088478088379, "learning_rate": 0.0001272196261682243, "loss": 0.7212, "step": 1781 }, { "epoch": 1.86, "grad_norm": 0.593243420124054, "learning_rate": 0.00012710280373831774, "loss": 0.164, "step": 1782 }, { "epoch": 1.87, "grad_norm": 0.7880874276161194, "learning_rate": 0.00012698598130841122, "loss": 0.4402, "step": 1783 }, { "epoch": 1.87, "grad_norm": 3.372981309890747, "learning_rate": 0.00012686915887850467, "loss": 0.9855, "step": 1784 }, { "epoch": 1.87, "grad_norm": 1.975346326828003, "learning_rate": 0.00012675233644859812, "loss": 0.9623, "step": 1785 }, { "epoch": 1.87, "grad_norm": 2.360755205154419, "learning_rate": 0.00012663551401869157, "loss": 0.6337, "step": 1786 }, { "epoch": 1.87, "grad_norm": 4.685933589935303, "learning_rate": 0.00012651869158878504, "loss": 1.1126, "step": 1787 }, { "epoch": 1.87, "grad_norm": 2.2752182483673096, "learning_rate": 0.0001264018691588785, "loss": 1.0364, "step": 1788 }, { "epoch": 1.87, "grad_norm": 3.0285208225250244, "learning_rate": 0.00012628504672897194, "loss": 0.6778, "step": 1789 }, { "epoch": 1.87, "grad_norm": 0.5671124458312988, "learning_rate": 0.0001261682242990654, "loss": 0.2275, "step": 1790 }, { "epoch": 1.87, "grad_norm": 1.9352588653564453, "learning_rate": 0.00012605140186915887, "loss": 0.4641, "step": 1791 }, { "epoch": 1.87, "grad_norm": 4.273382663726807, "learning_rate": 0.00012593457943925232, "loss": 0.907, "step": 1792 }, { "epoch": 1.88, "grad_norm": 1.3872417211532593, "learning_rate": 0.00012581775700934577, "loss": 0.3541, "step": 1793 }, { "epoch": 1.88, "grad_norm": 2.6732919216156006, "learning_rate": 0.00012570093457943925, "loss": 1.3397, "step": 1794 }, { "epoch": 1.88, "grad_norm": 1.4396637678146362, "learning_rate": 0.0001255841121495327, "loss": 0.8367, "step": 1795 }, { "epoch": 1.88, "grad_norm": 2.2039003372192383, "learning_rate": 0.00012546728971962618, "loss": 0.8741, "step": 1796 }, { "epoch": 1.88, "grad_norm": 2.8381385803222656, "learning_rate": 0.0001253504672897196, "loss": 0.4666, "step": 1797 }, { "epoch": 1.88, "grad_norm": 1.1700317859649658, "learning_rate": 0.00012523364485981308, "loss": 0.5679, "step": 1798 }, { "epoch": 1.88, "grad_norm": 2.4915921688079834, "learning_rate": 0.00012511682242990653, "loss": 1.4566, "step": 1799 }, { "epoch": 1.88, "grad_norm": 3.598515033721924, "learning_rate": 0.000125, "loss": 1.4513, "step": 1800 }, { "epoch": 1.88, "eval_loss": 1.143676996231079, "eval_runtime": 1.0027, "eval_samples_per_second": 4.987, "eval_steps_per_second": 0.997, "step": 1800 } ], "logging_steps": 1, "max_steps": 2868, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 300, "total_flos": 6.46034707094569e+16, "train_batch_size": 1, "trial_name": null, "trial_params": null }