watsonchua's picture
Upload folder using huggingface_hub
046fdc8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9983193277310924,
"eval_steps": 500,
"global_step": 1338,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002240896358543417,
"grad_norm": 0.22982779145240784,
"learning_rate": 1.1235955056179775e-06,
"loss": 1.1973,
"step": 1
},
{
"epoch": 0.004481792717086834,
"grad_norm": 0.2098255604505539,
"learning_rate": 2.247191011235955e-06,
"loss": 1.2685,
"step": 2
},
{
"epoch": 0.0067226890756302525,
"grad_norm": 0.2404344230890274,
"learning_rate": 3.3707865168539327e-06,
"loss": 1.2292,
"step": 3
},
{
"epoch": 0.008963585434173669,
"grad_norm": 0.23824891448020935,
"learning_rate": 4.49438202247191e-06,
"loss": 1.1901,
"step": 4
},
{
"epoch": 0.011204481792717087,
"grad_norm": 0.22885526716709137,
"learning_rate": 5.617977528089888e-06,
"loss": 1.2695,
"step": 5
},
{
"epoch": 0.013445378151260505,
"grad_norm": 0.2576353847980499,
"learning_rate": 6.741573033707865e-06,
"loss": 1.2872,
"step": 6
},
{
"epoch": 0.01568627450980392,
"grad_norm": 0.2190636247396469,
"learning_rate": 7.865168539325843e-06,
"loss": 1.1952,
"step": 7
},
{
"epoch": 0.017927170868347338,
"grad_norm": 0.21357473731040955,
"learning_rate": 8.98876404494382e-06,
"loss": 1.2597,
"step": 8
},
{
"epoch": 0.020168067226890758,
"grad_norm": 0.24502670764923096,
"learning_rate": 1.0112359550561798e-05,
"loss": 1.279,
"step": 9
},
{
"epoch": 0.022408963585434174,
"grad_norm": 0.25532686710357666,
"learning_rate": 1.1235955056179776e-05,
"loss": 1.2163,
"step": 10
},
{
"epoch": 0.02464985994397759,
"grad_norm": 0.250683456659317,
"learning_rate": 1.2359550561797752e-05,
"loss": 1.2735,
"step": 11
},
{
"epoch": 0.02689075630252101,
"grad_norm": 0.2523595988750458,
"learning_rate": 1.348314606741573e-05,
"loss": 1.3471,
"step": 12
},
{
"epoch": 0.029131652661064426,
"grad_norm": 0.23875640332698822,
"learning_rate": 1.4606741573033709e-05,
"loss": 1.2595,
"step": 13
},
{
"epoch": 0.03137254901960784,
"grad_norm": 0.24998927116394043,
"learning_rate": 1.5730337078651687e-05,
"loss": 1.2142,
"step": 14
},
{
"epoch": 0.03361344537815126,
"grad_norm": 0.2783028483390808,
"learning_rate": 1.6853932584269665e-05,
"loss": 1.185,
"step": 15
},
{
"epoch": 0.035854341736694675,
"grad_norm": 0.2796604633331299,
"learning_rate": 1.797752808988764e-05,
"loss": 1.226,
"step": 16
},
{
"epoch": 0.0380952380952381,
"grad_norm": 0.2899184823036194,
"learning_rate": 1.9101123595505618e-05,
"loss": 1.1672,
"step": 17
},
{
"epoch": 0.040336134453781515,
"grad_norm": 0.2601150572299957,
"learning_rate": 2.0224719101123596e-05,
"loss": 1.1127,
"step": 18
},
{
"epoch": 0.04257703081232493,
"grad_norm": 0.3052348792552948,
"learning_rate": 2.1348314606741574e-05,
"loss": 1.2865,
"step": 19
},
{
"epoch": 0.04481792717086835,
"grad_norm": 0.24783451855182648,
"learning_rate": 2.2471910112359552e-05,
"loss": 1.0888,
"step": 20
},
{
"epoch": 0.047058823529411764,
"grad_norm": 0.24703693389892578,
"learning_rate": 2.359550561797753e-05,
"loss": 1.059,
"step": 21
},
{
"epoch": 0.04929971988795518,
"grad_norm": 0.2503679096698761,
"learning_rate": 2.4719101123595505e-05,
"loss": 1.1602,
"step": 22
},
{
"epoch": 0.0515406162464986,
"grad_norm": 0.2639842927455902,
"learning_rate": 2.5842696629213486e-05,
"loss": 1.0911,
"step": 23
},
{
"epoch": 0.05378151260504202,
"grad_norm": 0.2942507266998291,
"learning_rate": 2.696629213483146e-05,
"loss": 1.0777,
"step": 24
},
{
"epoch": 0.056022408963585436,
"grad_norm": 0.28088828921318054,
"learning_rate": 2.8089887640449443e-05,
"loss": 1.1111,
"step": 25
},
{
"epoch": 0.05826330532212885,
"grad_norm": 0.26791295409202576,
"learning_rate": 2.9213483146067417e-05,
"loss": 1.0016,
"step": 26
},
{
"epoch": 0.06050420168067227,
"grad_norm": 0.2685791850090027,
"learning_rate": 3.0337078651685396e-05,
"loss": 1.1085,
"step": 27
},
{
"epoch": 0.06274509803921569,
"grad_norm": 0.2627420127391815,
"learning_rate": 3.1460674157303374e-05,
"loss": 1.0704,
"step": 28
},
{
"epoch": 0.06498599439775911,
"grad_norm": 0.3000424802303314,
"learning_rate": 3.258426966292135e-05,
"loss": 1.0786,
"step": 29
},
{
"epoch": 0.06722689075630252,
"grad_norm": 0.3018706738948822,
"learning_rate": 3.370786516853933e-05,
"loss": 1.0423,
"step": 30
},
{
"epoch": 0.06946778711484594,
"grad_norm": 0.27565667033195496,
"learning_rate": 3.483146067415731e-05,
"loss": 0.9834,
"step": 31
},
{
"epoch": 0.07170868347338935,
"grad_norm": 0.30244842171669006,
"learning_rate": 3.595505617977528e-05,
"loss": 1.0927,
"step": 32
},
{
"epoch": 0.07394957983193277,
"grad_norm": 0.3654678165912628,
"learning_rate": 3.7078651685393264e-05,
"loss": 0.9812,
"step": 33
},
{
"epoch": 0.0761904761904762,
"grad_norm": 0.36483272910118103,
"learning_rate": 3.8202247191011236e-05,
"loss": 0.9693,
"step": 34
},
{
"epoch": 0.0784313725490196,
"grad_norm": 0.2949022054672241,
"learning_rate": 3.9325842696629214e-05,
"loss": 0.9647,
"step": 35
},
{
"epoch": 0.08067226890756303,
"grad_norm": 0.36239683628082275,
"learning_rate": 4.044943820224719e-05,
"loss": 0.9725,
"step": 36
},
{
"epoch": 0.08291316526610644,
"grad_norm": 0.32511067390441895,
"learning_rate": 4.157303370786517e-05,
"loss": 1.0136,
"step": 37
},
{
"epoch": 0.08515406162464986,
"grad_norm": 0.32111090421676636,
"learning_rate": 4.269662921348315e-05,
"loss": 0.9207,
"step": 38
},
{
"epoch": 0.08739495798319327,
"grad_norm": 0.3080519735813141,
"learning_rate": 4.3820224719101126e-05,
"loss": 0.9729,
"step": 39
},
{
"epoch": 0.0896358543417367,
"grad_norm": 0.2933235764503479,
"learning_rate": 4.4943820224719104e-05,
"loss": 0.9403,
"step": 40
},
{
"epoch": 0.09187675070028012,
"grad_norm": 0.3132348358631134,
"learning_rate": 4.606741573033708e-05,
"loss": 0.9587,
"step": 41
},
{
"epoch": 0.09411764705882353,
"grad_norm": 0.3013119101524353,
"learning_rate": 4.719101123595506e-05,
"loss": 0.9162,
"step": 42
},
{
"epoch": 0.09635854341736695,
"grad_norm": 0.300485223531723,
"learning_rate": 4.831460674157304e-05,
"loss": 0.8984,
"step": 43
},
{
"epoch": 0.09859943977591036,
"grad_norm": 0.2726304233074188,
"learning_rate": 4.943820224719101e-05,
"loss": 0.987,
"step": 44
},
{
"epoch": 0.10084033613445378,
"grad_norm": 0.2733825445175171,
"learning_rate": 5.0561797752808995e-05,
"loss": 0.9438,
"step": 45
},
{
"epoch": 0.1030812324929972,
"grad_norm": 0.28922319412231445,
"learning_rate": 5.168539325842697e-05,
"loss": 0.8674,
"step": 46
},
{
"epoch": 0.10532212885154062,
"grad_norm": 0.2743085026741028,
"learning_rate": 5.2808988764044944e-05,
"loss": 0.9041,
"step": 47
},
{
"epoch": 0.10756302521008404,
"grad_norm": 0.28649550676345825,
"learning_rate": 5.393258426966292e-05,
"loss": 0.9057,
"step": 48
},
{
"epoch": 0.10980392156862745,
"grad_norm": 0.2877427339553833,
"learning_rate": 5.50561797752809e-05,
"loss": 0.9139,
"step": 49
},
{
"epoch": 0.11204481792717087,
"grad_norm": 0.27738648653030396,
"learning_rate": 5.6179775280898885e-05,
"loss": 0.8518,
"step": 50
},
{
"epoch": 0.11428571428571428,
"grad_norm": 0.2839404046535492,
"learning_rate": 5.730337078651685e-05,
"loss": 0.8634,
"step": 51
},
{
"epoch": 0.1165266106442577,
"grad_norm": 0.2748688757419586,
"learning_rate": 5.8426966292134835e-05,
"loss": 0.9504,
"step": 52
},
{
"epoch": 0.11876750700280111,
"grad_norm": 0.2953556776046753,
"learning_rate": 5.955056179775281e-05,
"loss": 0.861,
"step": 53
},
{
"epoch": 0.12100840336134454,
"grad_norm": 0.2947392165660858,
"learning_rate": 6.067415730337079e-05,
"loss": 0.8578,
"step": 54
},
{
"epoch": 0.12324929971988796,
"grad_norm": 0.3029733896255493,
"learning_rate": 6.179775280898876e-05,
"loss": 0.895,
"step": 55
},
{
"epoch": 0.12549019607843137,
"grad_norm": 0.28483161330223083,
"learning_rate": 6.292134831460675e-05,
"loss": 0.9316,
"step": 56
},
{
"epoch": 0.12773109243697478,
"grad_norm": 0.280499666929245,
"learning_rate": 6.404494382022472e-05,
"loss": 0.8763,
"step": 57
},
{
"epoch": 0.12997198879551822,
"grad_norm": 0.2687634825706482,
"learning_rate": 6.51685393258427e-05,
"loss": 0.9216,
"step": 58
},
{
"epoch": 0.13221288515406163,
"grad_norm": 0.2869422435760498,
"learning_rate": 6.629213483146067e-05,
"loss": 0.8783,
"step": 59
},
{
"epoch": 0.13445378151260504,
"grad_norm": 0.29140859842300415,
"learning_rate": 6.741573033707866e-05,
"loss": 0.9613,
"step": 60
},
{
"epoch": 0.13669467787114845,
"grad_norm": 0.29342207312583923,
"learning_rate": 6.853932584269663e-05,
"loss": 0.982,
"step": 61
},
{
"epoch": 0.13893557422969188,
"grad_norm": 0.29143285751342773,
"learning_rate": 6.966292134831462e-05,
"loss": 0.8857,
"step": 62
},
{
"epoch": 0.1411764705882353,
"grad_norm": 0.2944166660308838,
"learning_rate": 7.078651685393259e-05,
"loss": 0.8668,
"step": 63
},
{
"epoch": 0.1434173669467787,
"grad_norm": 0.27430447936058044,
"learning_rate": 7.191011235955056e-05,
"loss": 0.9403,
"step": 64
},
{
"epoch": 0.14565826330532214,
"grad_norm": 0.274800568819046,
"learning_rate": 7.303370786516854e-05,
"loss": 0.8689,
"step": 65
},
{
"epoch": 0.14789915966386555,
"grad_norm": 0.3063383102416992,
"learning_rate": 7.415730337078653e-05,
"loss": 0.9217,
"step": 66
},
{
"epoch": 0.15014005602240896,
"grad_norm": 0.26958584785461426,
"learning_rate": 7.52808988764045e-05,
"loss": 0.949,
"step": 67
},
{
"epoch": 0.1523809523809524,
"grad_norm": 0.3030094504356384,
"learning_rate": 7.640449438202247e-05,
"loss": 0.8814,
"step": 68
},
{
"epoch": 0.1546218487394958,
"grad_norm": 0.26790764927864075,
"learning_rate": 7.752808988764046e-05,
"loss": 0.9814,
"step": 69
},
{
"epoch": 0.1568627450980392,
"grad_norm": 0.28143975138664246,
"learning_rate": 7.865168539325843e-05,
"loss": 0.8667,
"step": 70
},
{
"epoch": 0.15910364145658262,
"grad_norm": 0.32539746165275574,
"learning_rate": 7.97752808988764e-05,
"loss": 1.0709,
"step": 71
},
{
"epoch": 0.16134453781512606,
"grad_norm": 0.28114452958106995,
"learning_rate": 8.089887640449438e-05,
"loss": 0.9253,
"step": 72
},
{
"epoch": 0.16358543417366947,
"grad_norm": 0.2680191993713379,
"learning_rate": 8.202247191011237e-05,
"loss": 0.9345,
"step": 73
},
{
"epoch": 0.16582633053221288,
"grad_norm": 0.2928783893585205,
"learning_rate": 8.314606741573034e-05,
"loss": 0.906,
"step": 74
},
{
"epoch": 0.16806722689075632,
"grad_norm": 0.2979438900947571,
"learning_rate": 8.426966292134831e-05,
"loss": 0.9581,
"step": 75
},
{
"epoch": 0.17030812324929973,
"grad_norm": 0.27883946895599365,
"learning_rate": 8.53932584269663e-05,
"loss": 0.9203,
"step": 76
},
{
"epoch": 0.17254901960784313,
"grad_norm": 0.3945503234863281,
"learning_rate": 8.651685393258427e-05,
"loss": 0.8136,
"step": 77
},
{
"epoch": 0.17478991596638654,
"grad_norm": 0.27328184247016907,
"learning_rate": 8.764044943820225e-05,
"loss": 0.8966,
"step": 78
},
{
"epoch": 0.17703081232492998,
"grad_norm": 0.3049486577510834,
"learning_rate": 8.876404494382022e-05,
"loss": 0.9658,
"step": 79
},
{
"epoch": 0.1792717086834734,
"grad_norm": 0.3143669366836548,
"learning_rate": 8.988764044943821e-05,
"loss": 0.9867,
"step": 80
},
{
"epoch": 0.1815126050420168,
"grad_norm": 0.30048221349716187,
"learning_rate": 9.101123595505618e-05,
"loss": 0.9249,
"step": 81
},
{
"epoch": 0.18375350140056024,
"grad_norm": 0.28177568316459656,
"learning_rate": 9.213483146067416e-05,
"loss": 0.9899,
"step": 82
},
{
"epoch": 0.18599439775910365,
"grad_norm": 0.296478807926178,
"learning_rate": 9.325842696629214e-05,
"loss": 0.8476,
"step": 83
},
{
"epoch": 0.18823529411764706,
"grad_norm": 0.2855791449546814,
"learning_rate": 9.438202247191012e-05,
"loss": 0.8869,
"step": 84
},
{
"epoch": 0.19047619047619047,
"grad_norm": 0.2829147279262543,
"learning_rate": 9.550561797752809e-05,
"loss": 0.9274,
"step": 85
},
{
"epoch": 0.1927170868347339,
"grad_norm": 0.2549046277999878,
"learning_rate": 9.662921348314608e-05,
"loss": 0.8535,
"step": 86
},
{
"epoch": 0.1949579831932773,
"grad_norm": 0.2655014991760254,
"learning_rate": 9.775280898876405e-05,
"loss": 0.8905,
"step": 87
},
{
"epoch": 0.19719887955182072,
"grad_norm": 0.27607807517051697,
"learning_rate": 9.887640449438202e-05,
"loss": 0.8788,
"step": 88
},
{
"epoch": 0.19943977591036416,
"grad_norm": 0.24970707297325134,
"learning_rate": 0.0001,
"loss": 0.7632,
"step": 89
},
{
"epoch": 0.20168067226890757,
"grad_norm": 0.2821432650089264,
"learning_rate": 0.00010112359550561799,
"loss": 0.9314,
"step": 90
},
{
"epoch": 0.20392156862745098,
"grad_norm": 0.28368303179740906,
"learning_rate": 0.00010224719101123596,
"loss": 0.8424,
"step": 91
},
{
"epoch": 0.2061624649859944,
"grad_norm": 0.3087138235569,
"learning_rate": 0.00010337078651685395,
"loss": 0.9787,
"step": 92
},
{
"epoch": 0.20840336134453782,
"grad_norm": 0.2952103614807129,
"learning_rate": 0.00010449438202247193,
"loss": 0.9679,
"step": 93
},
{
"epoch": 0.21064425770308123,
"grad_norm": 0.2875281572341919,
"learning_rate": 0.00010561797752808989,
"loss": 0.9181,
"step": 94
},
{
"epoch": 0.21288515406162464,
"grad_norm": 0.3084465265274048,
"learning_rate": 0.00010674157303370786,
"loss": 0.9615,
"step": 95
},
{
"epoch": 0.21512605042016808,
"grad_norm": 0.28005871176719666,
"learning_rate": 0.00010786516853932584,
"loss": 0.8757,
"step": 96
},
{
"epoch": 0.2173669467787115,
"grad_norm": 0.2795560657978058,
"learning_rate": 0.00010898876404494383,
"loss": 0.8266,
"step": 97
},
{
"epoch": 0.2196078431372549,
"grad_norm": 0.2671497166156769,
"learning_rate": 0.0001101123595505618,
"loss": 0.8693,
"step": 98
},
{
"epoch": 0.2218487394957983,
"grad_norm": 0.24397605657577515,
"learning_rate": 0.00011123595505617979,
"loss": 0.8334,
"step": 99
},
{
"epoch": 0.22408963585434175,
"grad_norm": 0.26632094383239746,
"learning_rate": 0.00011235955056179777,
"loss": 0.966,
"step": 100
},
{
"epoch": 0.22633053221288515,
"grad_norm": 0.2785671055316925,
"learning_rate": 0.00011348314606741574,
"loss": 0.8823,
"step": 101
},
{
"epoch": 0.22857142857142856,
"grad_norm": 0.26920077204704285,
"learning_rate": 0.0001146067415730337,
"loss": 0.9726,
"step": 102
},
{
"epoch": 0.230812324929972,
"grad_norm": 0.2633483409881592,
"learning_rate": 0.00011573033707865168,
"loss": 0.8849,
"step": 103
},
{
"epoch": 0.2330532212885154,
"grad_norm": 0.26563239097595215,
"learning_rate": 0.00011685393258426967,
"loss": 0.7896,
"step": 104
},
{
"epoch": 0.23529411764705882,
"grad_norm": 0.27475470304489136,
"learning_rate": 0.00011797752808988764,
"loss": 0.9659,
"step": 105
},
{
"epoch": 0.23753501400560223,
"grad_norm": 0.2691081166267395,
"learning_rate": 0.00011910112359550563,
"loss": 0.9758,
"step": 106
},
{
"epoch": 0.23977591036414567,
"grad_norm": 0.2714226543903351,
"learning_rate": 0.00012022471910112361,
"loss": 0.8736,
"step": 107
},
{
"epoch": 0.24201680672268908,
"grad_norm": 0.2638857960700989,
"learning_rate": 0.00012134831460674158,
"loss": 0.8792,
"step": 108
},
{
"epoch": 0.24425770308123249,
"grad_norm": 0.25696009397506714,
"learning_rate": 0.00012247191011235955,
"loss": 0.8813,
"step": 109
},
{
"epoch": 0.24649859943977592,
"grad_norm": 0.27648770809173584,
"learning_rate": 0.00012359550561797752,
"loss": 0.9341,
"step": 110
},
{
"epoch": 0.24873949579831933,
"grad_norm": 0.27543121576309204,
"learning_rate": 0.0001247191011235955,
"loss": 0.8102,
"step": 111
},
{
"epoch": 0.25098039215686274,
"grad_norm": 0.2804222106933594,
"learning_rate": 0.0001258426966292135,
"loss": 0.9004,
"step": 112
},
{
"epoch": 0.25322128851540615,
"grad_norm": 0.27446436882019043,
"learning_rate": 0.00012696629213483147,
"loss": 0.8432,
"step": 113
},
{
"epoch": 0.25546218487394956,
"grad_norm": 0.27675163745880127,
"learning_rate": 0.00012808988764044944,
"loss": 0.8868,
"step": 114
},
{
"epoch": 0.25770308123249297,
"grad_norm": 0.24772736430168152,
"learning_rate": 0.00012921348314606744,
"loss": 0.8711,
"step": 115
},
{
"epoch": 0.25994397759103643,
"grad_norm": 0.2736036479473114,
"learning_rate": 0.0001303370786516854,
"loss": 0.922,
"step": 116
},
{
"epoch": 0.26218487394957984,
"grad_norm": 0.23815171420574188,
"learning_rate": 0.00013146067415730338,
"loss": 0.8469,
"step": 117
},
{
"epoch": 0.26442577030812325,
"grad_norm": 0.2564987540245056,
"learning_rate": 0.00013258426966292135,
"loss": 0.7626,
"step": 118
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.26105883717536926,
"learning_rate": 0.00013370786516853932,
"loss": 0.7911,
"step": 119
},
{
"epoch": 0.2689075630252101,
"grad_norm": 0.2915550768375397,
"learning_rate": 0.00013483146067415732,
"loss": 0.9643,
"step": 120
},
{
"epoch": 0.2711484593837535,
"grad_norm": 0.28031301498413086,
"learning_rate": 0.0001359550561797753,
"loss": 0.8797,
"step": 121
},
{
"epoch": 0.2733893557422969,
"grad_norm": 0.2468908429145813,
"learning_rate": 0.00013707865168539326,
"loss": 0.8497,
"step": 122
},
{
"epoch": 0.27563025210084036,
"grad_norm": 0.2498752921819687,
"learning_rate": 0.00013820224719101123,
"loss": 0.842,
"step": 123
},
{
"epoch": 0.27787114845938377,
"grad_norm": 0.2517074942588806,
"learning_rate": 0.00013932584269662923,
"loss": 0.8072,
"step": 124
},
{
"epoch": 0.2801120448179272,
"grad_norm": 0.24205273389816284,
"learning_rate": 0.0001404494382022472,
"loss": 0.796,
"step": 125
},
{
"epoch": 0.2823529411764706,
"grad_norm": 0.259343683719635,
"learning_rate": 0.00014157303370786517,
"loss": 0.8609,
"step": 126
},
{
"epoch": 0.284593837535014,
"grad_norm": 0.2564795911312103,
"learning_rate": 0.00014269662921348315,
"loss": 0.8976,
"step": 127
},
{
"epoch": 0.2868347338935574,
"grad_norm": 0.2650969922542572,
"learning_rate": 0.00014382022471910112,
"loss": 0.8239,
"step": 128
},
{
"epoch": 0.28907563025210087,
"grad_norm": 0.26443368196487427,
"learning_rate": 0.00014494382022471912,
"loss": 0.8479,
"step": 129
},
{
"epoch": 0.2913165266106443,
"grad_norm": 0.26897069811820984,
"learning_rate": 0.0001460674157303371,
"loss": 0.9104,
"step": 130
},
{
"epoch": 0.2935574229691877,
"grad_norm": 0.2578631341457367,
"learning_rate": 0.00014719101123595506,
"loss": 0.8151,
"step": 131
},
{
"epoch": 0.2957983193277311,
"grad_norm": 0.2454749345779419,
"learning_rate": 0.00014831460674157306,
"loss": 0.7915,
"step": 132
},
{
"epoch": 0.2980392156862745,
"grad_norm": 0.25589731335639954,
"learning_rate": 0.00014943820224719103,
"loss": 0.8952,
"step": 133
},
{
"epoch": 0.3002801120448179,
"grad_norm": 0.2591662108898163,
"learning_rate": 0.000150561797752809,
"loss": 0.8112,
"step": 134
},
{
"epoch": 0.3025210084033613,
"grad_norm": 0.26816102862358093,
"learning_rate": 0.00015168539325842697,
"loss": 0.8893,
"step": 135
},
{
"epoch": 0.3047619047619048,
"grad_norm": 0.24405767023563385,
"learning_rate": 0.00015280898876404494,
"loss": 0.877,
"step": 136
},
{
"epoch": 0.3070028011204482,
"grad_norm": 0.2588540315628052,
"learning_rate": 0.00015393258426966294,
"loss": 0.7779,
"step": 137
},
{
"epoch": 0.3092436974789916,
"grad_norm": 0.27598896622657776,
"learning_rate": 0.0001550561797752809,
"loss": 0.7752,
"step": 138
},
{
"epoch": 0.311484593837535,
"grad_norm": 0.24622836709022522,
"learning_rate": 0.00015617977528089888,
"loss": 0.8849,
"step": 139
},
{
"epoch": 0.3137254901960784,
"grad_norm": 0.2502545118331909,
"learning_rate": 0.00015730337078651685,
"loss": 0.8012,
"step": 140
},
{
"epoch": 0.31596638655462184,
"grad_norm": 0.25384724140167236,
"learning_rate": 0.00015842696629213485,
"loss": 0.8395,
"step": 141
},
{
"epoch": 0.31820728291316525,
"grad_norm": 0.2527698874473572,
"learning_rate": 0.0001595505617977528,
"loss": 0.831,
"step": 142
},
{
"epoch": 0.3204481792717087,
"grad_norm": 0.24567513167858124,
"learning_rate": 0.0001606741573033708,
"loss": 0.7959,
"step": 143
},
{
"epoch": 0.3226890756302521,
"grad_norm": 0.24283619225025177,
"learning_rate": 0.00016179775280898877,
"loss": 0.8146,
"step": 144
},
{
"epoch": 0.32492997198879553,
"grad_norm": 0.2708129286766052,
"learning_rate": 0.00016292134831460674,
"loss": 0.905,
"step": 145
},
{
"epoch": 0.32717086834733894,
"grad_norm": 0.27091729640960693,
"learning_rate": 0.00016404494382022474,
"loss": 0.8681,
"step": 146
},
{
"epoch": 0.32941176470588235,
"grad_norm": 0.2502163350582123,
"learning_rate": 0.0001651685393258427,
"loss": 0.8859,
"step": 147
},
{
"epoch": 0.33165266106442576,
"grad_norm": 0.23066553473472595,
"learning_rate": 0.00016629213483146068,
"loss": 0.9034,
"step": 148
},
{
"epoch": 0.33389355742296917,
"grad_norm": 0.2503213882446289,
"learning_rate": 0.00016741573033707868,
"loss": 0.8348,
"step": 149
},
{
"epoch": 0.33613445378151263,
"grad_norm": 0.25126519799232483,
"learning_rate": 0.00016853932584269662,
"loss": 0.8553,
"step": 150
},
{
"epoch": 0.33837535014005604,
"grad_norm": 0.241397425532341,
"learning_rate": 0.00016966292134831462,
"loss": 0.9132,
"step": 151
},
{
"epoch": 0.34061624649859945,
"grad_norm": 0.25682827830314636,
"learning_rate": 0.0001707865168539326,
"loss": 0.9481,
"step": 152
},
{
"epoch": 0.34285714285714286,
"grad_norm": 0.24024637043476105,
"learning_rate": 0.00017191011235955056,
"loss": 0.943,
"step": 153
},
{
"epoch": 0.34509803921568627,
"grad_norm": 0.2626055181026459,
"learning_rate": 0.00017303370786516853,
"loss": 0.7863,
"step": 154
},
{
"epoch": 0.3473389355742297,
"grad_norm": 0.24571183323860168,
"learning_rate": 0.00017415730337078653,
"loss": 0.926,
"step": 155
},
{
"epoch": 0.3495798319327731,
"grad_norm": 0.2499912977218628,
"learning_rate": 0.0001752808988764045,
"loss": 0.8232,
"step": 156
},
{
"epoch": 0.35182072829131655,
"grad_norm": 0.25473934412002563,
"learning_rate": 0.00017640449438202248,
"loss": 0.9021,
"step": 157
},
{
"epoch": 0.35406162464985996,
"grad_norm": 0.25837019085884094,
"learning_rate": 0.00017752808988764045,
"loss": 0.7753,
"step": 158
},
{
"epoch": 0.3563025210084034,
"grad_norm": 0.255958616733551,
"learning_rate": 0.00017865168539325842,
"loss": 0.9155,
"step": 159
},
{
"epoch": 0.3585434173669468,
"grad_norm": 0.24315786361694336,
"learning_rate": 0.00017977528089887642,
"loss": 0.8039,
"step": 160
},
{
"epoch": 0.3607843137254902,
"grad_norm": 0.24614644050598145,
"learning_rate": 0.0001808988764044944,
"loss": 0.8857,
"step": 161
},
{
"epoch": 0.3630252100840336,
"grad_norm": 0.24280671775341034,
"learning_rate": 0.00018202247191011236,
"loss": 0.8354,
"step": 162
},
{
"epoch": 0.365266106442577,
"grad_norm": 0.2597411572933197,
"learning_rate": 0.00018314606741573036,
"loss": 0.8852,
"step": 163
},
{
"epoch": 0.3675070028011205,
"grad_norm": 0.2597702145576477,
"learning_rate": 0.00018426966292134833,
"loss": 0.9029,
"step": 164
},
{
"epoch": 0.3697478991596639,
"grad_norm": 0.23551709949970245,
"learning_rate": 0.0001853932584269663,
"loss": 0.9118,
"step": 165
},
{
"epoch": 0.3719887955182073,
"grad_norm": 0.2516990303993225,
"learning_rate": 0.00018651685393258427,
"loss": 0.7997,
"step": 166
},
{
"epoch": 0.3742296918767507,
"grad_norm": 0.2297232747077942,
"learning_rate": 0.00018764044943820224,
"loss": 0.7823,
"step": 167
},
{
"epoch": 0.3764705882352941,
"grad_norm": 0.22287945449352264,
"learning_rate": 0.00018876404494382024,
"loss": 0.8609,
"step": 168
},
{
"epoch": 0.3787114845938375,
"grad_norm": 0.2428523749113083,
"learning_rate": 0.0001898876404494382,
"loss": 0.8842,
"step": 169
},
{
"epoch": 0.38095238095238093,
"grad_norm": 0.25949063897132874,
"learning_rate": 0.00019101123595505618,
"loss": 0.7737,
"step": 170
},
{
"epoch": 0.3831932773109244,
"grad_norm": 0.2532324492931366,
"learning_rate": 0.00019213483146067416,
"loss": 0.7548,
"step": 171
},
{
"epoch": 0.3854341736694678,
"grad_norm": 0.2657643258571625,
"learning_rate": 0.00019325842696629215,
"loss": 0.8375,
"step": 172
},
{
"epoch": 0.3876750700280112,
"grad_norm": 0.2578238546848297,
"learning_rate": 0.00019438202247191013,
"loss": 0.8591,
"step": 173
},
{
"epoch": 0.3899159663865546,
"grad_norm": 0.24303969740867615,
"learning_rate": 0.0001955056179775281,
"loss": 0.8131,
"step": 174
},
{
"epoch": 0.39215686274509803,
"grad_norm": 0.271139919757843,
"learning_rate": 0.00019662921348314607,
"loss": 0.8234,
"step": 175
},
{
"epoch": 0.39439775910364144,
"grad_norm": 0.2569217085838318,
"learning_rate": 0.00019775280898876404,
"loss": 0.8907,
"step": 176
},
{
"epoch": 0.39663865546218485,
"grad_norm": 0.23687879741191864,
"learning_rate": 0.00019887640449438204,
"loss": 0.8082,
"step": 177
},
{
"epoch": 0.3988795518207283,
"grad_norm": 0.24828100204467773,
"learning_rate": 0.0002,
"loss": 0.8575,
"step": 178
},
{
"epoch": 0.4011204481792717,
"grad_norm": 0.24300478398799896,
"learning_rate": 0.00019999980867200105,
"loss": 0.7963,
"step": 179
},
{
"epoch": 0.40336134453781514,
"grad_norm": 0.2662373483181,
"learning_rate": 0.00019999923468873635,
"loss": 0.8871,
"step": 180
},
{
"epoch": 0.40560224089635855,
"grad_norm": 0.2600310146808624,
"learning_rate": 0.00019999827805240226,
"loss": 0.8477,
"step": 181
},
{
"epoch": 0.40784313725490196,
"grad_norm": 0.26801761984825134,
"learning_rate": 0.00019999693876665938,
"loss": 0.8978,
"step": 182
},
{
"epoch": 0.41008403361344536,
"grad_norm": 0.26510775089263916,
"learning_rate": 0.00019999521683663262,
"loss": 0.8387,
"step": 183
},
{
"epoch": 0.4123249299719888,
"grad_norm": 0.25927454233169556,
"learning_rate": 0.00019999311226891103,
"loss": 0.884,
"step": 184
},
{
"epoch": 0.41456582633053224,
"grad_norm": 0.2489653080701828,
"learning_rate": 0.00019999062507154784,
"loss": 0.9473,
"step": 185
},
{
"epoch": 0.41680672268907565,
"grad_norm": 0.2461009919643402,
"learning_rate": 0.0001999877552540605,
"loss": 0.8609,
"step": 186
},
{
"epoch": 0.41904761904761906,
"grad_norm": 0.24591152369976044,
"learning_rate": 0.00019998450282743052,
"loss": 0.9284,
"step": 187
},
{
"epoch": 0.42128851540616247,
"grad_norm": 0.23085853457450867,
"learning_rate": 0.00019998086780410353,
"loss": 0.8898,
"step": 188
},
{
"epoch": 0.4235294117647059,
"grad_norm": 0.2512655556201935,
"learning_rate": 0.00019997685019798912,
"loss": 0.8275,
"step": 189
},
{
"epoch": 0.4257703081232493,
"grad_norm": 0.24720723927021027,
"learning_rate": 0.0001999724500244609,
"loss": 0.8809,
"step": 190
},
{
"epoch": 0.4280112044817927,
"grad_norm": 0.25972652435302734,
"learning_rate": 0.00019996766730035642,
"loss": 0.891,
"step": 191
},
{
"epoch": 0.43025210084033616,
"grad_norm": 0.26756009459495544,
"learning_rate": 0.0001999625020439771,
"loss": 0.923,
"step": 192
},
{
"epoch": 0.43249299719887957,
"grad_norm": 0.24553890526294708,
"learning_rate": 0.000199956954275088,
"loss": 0.8222,
"step": 193
},
{
"epoch": 0.434733893557423,
"grad_norm": 0.24937503039836884,
"learning_rate": 0.0001999510240149181,
"loss": 0.788,
"step": 194
},
{
"epoch": 0.4369747899159664,
"grad_norm": 0.2749722898006439,
"learning_rate": 0.00019994471128615985,
"loss": 0.9383,
"step": 195
},
{
"epoch": 0.4392156862745098,
"grad_norm": 0.25508439540863037,
"learning_rate": 0.00019993801611296923,
"loss": 0.8234,
"step": 196
},
{
"epoch": 0.4414565826330532,
"grad_norm": 0.26953256130218506,
"learning_rate": 0.00019993093852096582,
"loss": 0.9895,
"step": 197
},
{
"epoch": 0.4436974789915966,
"grad_norm": 0.26215213537216187,
"learning_rate": 0.0001999234785372324,
"loss": 0.8432,
"step": 198
},
{
"epoch": 0.4459383753501401,
"grad_norm": 0.24869990348815918,
"learning_rate": 0.00019991563619031508,
"loss": 0.8786,
"step": 199
},
{
"epoch": 0.4481792717086835,
"grad_norm": 0.23765677213668823,
"learning_rate": 0.00019990741151022301,
"loss": 0.9402,
"step": 200
},
{
"epoch": 0.4504201680672269,
"grad_norm": 0.24099862575531006,
"learning_rate": 0.00019989880452842847,
"loss": 0.9192,
"step": 201
},
{
"epoch": 0.4526610644257703,
"grad_norm": 0.2570018172264099,
"learning_rate": 0.00019988981527786654,
"loss": 0.846,
"step": 202
},
{
"epoch": 0.4549019607843137,
"grad_norm": 0.2420913279056549,
"learning_rate": 0.00019988044379293523,
"loss": 0.9021,
"step": 203
},
{
"epoch": 0.45714285714285713,
"grad_norm": 0.25546249747276306,
"learning_rate": 0.00019987069010949496,
"loss": 0.9191,
"step": 204
},
{
"epoch": 0.45938375350140054,
"grad_norm": 0.25439539551734924,
"learning_rate": 0.00019986055426486887,
"loss": 0.9046,
"step": 205
},
{
"epoch": 0.461624649859944,
"grad_norm": 0.23050928115844727,
"learning_rate": 0.00019985003629784237,
"loss": 0.7777,
"step": 206
},
{
"epoch": 0.4638655462184874,
"grad_norm": 0.25798794627189636,
"learning_rate": 0.00019983913624866304,
"loss": 0.9232,
"step": 207
},
{
"epoch": 0.4661064425770308,
"grad_norm": 0.2508363127708435,
"learning_rate": 0.00019982785415904064,
"loss": 0.9924,
"step": 208
},
{
"epoch": 0.46834733893557423,
"grad_norm": 0.25454050302505493,
"learning_rate": 0.00019981619007214673,
"loss": 0.9056,
"step": 209
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.24120093882083893,
"learning_rate": 0.0001998041440326146,
"loss": 0.8339,
"step": 210
},
{
"epoch": 0.47282913165266105,
"grad_norm": 0.2521376609802246,
"learning_rate": 0.00019979171608653924,
"loss": 0.7907,
"step": 211
},
{
"epoch": 0.47507002801120446,
"grad_norm": 0.2513487637042999,
"learning_rate": 0.00019977890628147682,
"loss": 0.9142,
"step": 212
},
{
"epoch": 0.4773109243697479,
"grad_norm": 0.2808438241481781,
"learning_rate": 0.00019976571466644492,
"loss": 0.8708,
"step": 213
},
{
"epoch": 0.47955182072829133,
"grad_norm": 0.2564331293106079,
"learning_rate": 0.00019975214129192196,
"loss": 0.8726,
"step": 214
},
{
"epoch": 0.48179271708683474,
"grad_norm": 0.2591661214828491,
"learning_rate": 0.00019973818620984738,
"loss": 0.791,
"step": 215
},
{
"epoch": 0.48403361344537815,
"grad_norm": 0.2555089592933655,
"learning_rate": 0.00019972384947362101,
"loss": 0.9129,
"step": 216
},
{
"epoch": 0.48627450980392156,
"grad_norm": 0.2542738616466522,
"learning_rate": 0.00019970913113810334,
"loss": 0.8097,
"step": 217
},
{
"epoch": 0.48851540616246497,
"grad_norm": 0.26333680748939514,
"learning_rate": 0.0001996940312596149,
"loss": 0.8342,
"step": 218
},
{
"epoch": 0.4907563025210084,
"grad_norm": 0.24918504059314728,
"learning_rate": 0.00019967854989593633,
"loss": 0.8441,
"step": 219
},
{
"epoch": 0.49299719887955185,
"grad_norm": 0.26157376170158386,
"learning_rate": 0.00019966268710630797,
"loss": 0.8891,
"step": 220
},
{
"epoch": 0.49523809523809526,
"grad_norm": 0.277658611536026,
"learning_rate": 0.00019964644295142968,
"loss": 0.8862,
"step": 221
},
{
"epoch": 0.49747899159663866,
"grad_norm": 0.2798251211643219,
"learning_rate": 0.00019962981749346078,
"loss": 0.8742,
"step": 222
},
{
"epoch": 0.4997198879551821,
"grad_norm": 0.28391703963279724,
"learning_rate": 0.00019961281079601957,
"loss": 0.8928,
"step": 223
},
{
"epoch": 0.5019607843137255,
"grad_norm": 0.2569884955883026,
"learning_rate": 0.00019959542292418317,
"loss": 0.8289,
"step": 224
},
{
"epoch": 0.5042016806722689,
"grad_norm": 0.2689199447631836,
"learning_rate": 0.00019957765394448728,
"loss": 0.8002,
"step": 225
},
{
"epoch": 0.5064425770308123,
"grad_norm": 0.2564886510372162,
"learning_rate": 0.00019955950392492604,
"loss": 0.8817,
"step": 226
},
{
"epoch": 0.5086834733893557,
"grad_norm": 0.2417922168970108,
"learning_rate": 0.00019954097293495155,
"loss": 0.8838,
"step": 227
},
{
"epoch": 0.5109243697478991,
"grad_norm": 0.22530929744243622,
"learning_rate": 0.00019952206104547376,
"loss": 0.7967,
"step": 228
},
{
"epoch": 0.5131652661064425,
"grad_norm": 0.23360571265220642,
"learning_rate": 0.00019950276832886017,
"loss": 0.8364,
"step": 229
},
{
"epoch": 0.5154061624649859,
"grad_norm": 0.2611772418022156,
"learning_rate": 0.00019948309485893549,
"loss": 0.8786,
"step": 230
},
{
"epoch": 0.5176470588235295,
"grad_norm": 0.24788020551204681,
"learning_rate": 0.00019946304071098142,
"loss": 0.86,
"step": 231
},
{
"epoch": 0.5198879551820729,
"grad_norm": 0.23259863257408142,
"learning_rate": 0.00019944260596173641,
"loss": 0.822,
"step": 232
},
{
"epoch": 0.5221288515406163,
"grad_norm": 0.2678159773349762,
"learning_rate": 0.0001994217906893952,
"loss": 0.8522,
"step": 233
},
{
"epoch": 0.5243697478991597,
"grad_norm": 0.268537312746048,
"learning_rate": 0.00019940059497360873,
"loss": 0.8638,
"step": 234
},
{
"epoch": 0.5266106442577031,
"grad_norm": 0.26122671365737915,
"learning_rate": 0.0001993790188954836,
"loss": 0.8509,
"step": 235
},
{
"epoch": 0.5288515406162465,
"grad_norm": 0.38401761651039124,
"learning_rate": 0.00019935706253758207,
"loss": 0.8184,
"step": 236
},
{
"epoch": 0.5310924369747899,
"grad_norm": 0.2459888607263565,
"learning_rate": 0.00019933472598392138,
"loss": 0.8042,
"step": 237
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.27041634917259216,
"learning_rate": 0.0001993120093199737,
"loss": 0.9426,
"step": 238
},
{
"epoch": 0.5355742296918767,
"grad_norm": 0.2503630220890045,
"learning_rate": 0.00019928891263266578,
"loss": 0.901,
"step": 239
},
{
"epoch": 0.5378151260504201,
"grad_norm": 0.2358655333518982,
"learning_rate": 0.00019926543601037842,
"loss": 0.8165,
"step": 240
},
{
"epoch": 0.5400560224089636,
"grad_norm": 0.24630171060562134,
"learning_rate": 0.00019924157954294628,
"loss": 0.8083,
"step": 241
},
{
"epoch": 0.542296918767507,
"grad_norm": 0.2504923939704895,
"learning_rate": 0.00019921734332165766,
"loss": 0.7973,
"step": 242
},
{
"epoch": 0.5445378151260504,
"grad_norm": 0.2728249132633209,
"learning_rate": 0.00019919272743925385,
"loss": 0.8817,
"step": 243
},
{
"epoch": 0.5467787114845938,
"grad_norm": 0.2694825828075409,
"learning_rate": 0.000199167731989929,
"loss": 0.8458,
"step": 244
},
{
"epoch": 0.5490196078431373,
"grad_norm": 0.31109780073165894,
"learning_rate": 0.00019914235706932972,
"loss": 0.939,
"step": 245
},
{
"epoch": 0.5512605042016807,
"grad_norm": 0.25847890973091125,
"learning_rate": 0.0001991166027745547,
"loss": 0.8704,
"step": 246
},
{
"epoch": 0.5535014005602241,
"grad_norm": 0.2678435742855072,
"learning_rate": 0.00019909046920415423,
"loss": 0.7445,
"step": 247
},
{
"epoch": 0.5557422969187675,
"grad_norm": 0.25951921939849854,
"learning_rate": 0.00019906395645812998,
"loss": 0.8869,
"step": 248
},
{
"epoch": 0.5579831932773109,
"grad_norm": 0.25794005393981934,
"learning_rate": 0.00019903706463793462,
"loss": 0.9506,
"step": 249
},
{
"epoch": 0.5602240896358543,
"grad_norm": 0.2340375930070877,
"learning_rate": 0.00019900979384647127,
"loss": 0.811,
"step": 250
},
{
"epoch": 0.5624649859943978,
"grad_norm": 0.25867462158203125,
"learning_rate": 0.0001989821441880933,
"loss": 0.8621,
"step": 251
},
{
"epoch": 0.5647058823529412,
"grad_norm": 0.2387334555387497,
"learning_rate": 0.0001989541157686037,
"loss": 0.8514,
"step": 252
},
{
"epoch": 0.5669467787114846,
"grad_norm": 0.26006007194519043,
"learning_rate": 0.00019892570869525496,
"loss": 0.8946,
"step": 253
},
{
"epoch": 0.569187675070028,
"grad_norm": 0.2735587954521179,
"learning_rate": 0.00019889692307674845,
"loss": 0.8605,
"step": 254
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.28292015194892883,
"learning_rate": 0.00019886775902323405,
"loss": 1.0231,
"step": 255
},
{
"epoch": 0.5736694677871148,
"grad_norm": 0.2500753402709961,
"learning_rate": 0.00019883821664630976,
"loss": 0.9155,
"step": 256
},
{
"epoch": 0.5759103641456582,
"grad_norm": 0.23072321712970734,
"learning_rate": 0.00019880829605902126,
"loss": 0.8268,
"step": 257
},
{
"epoch": 0.5781512605042017,
"grad_norm": 0.25353720784187317,
"learning_rate": 0.0001987779973758615,
"loss": 0.9315,
"step": 258
},
{
"epoch": 0.5803921568627451,
"grad_norm": 0.24684756994247437,
"learning_rate": 0.00019874732071277013,
"loss": 0.8652,
"step": 259
},
{
"epoch": 0.5826330532212886,
"grad_norm": 0.2583218812942505,
"learning_rate": 0.0001987162661871333,
"loss": 0.7722,
"step": 260
},
{
"epoch": 0.584873949579832,
"grad_norm": 0.23789426684379578,
"learning_rate": 0.00019868483391778302,
"loss": 0.9084,
"step": 261
},
{
"epoch": 0.5871148459383754,
"grad_norm": 0.24503661692142487,
"learning_rate": 0.00019865302402499678,
"loss": 0.8683,
"step": 262
},
{
"epoch": 0.5893557422969188,
"grad_norm": 0.2724620997905731,
"learning_rate": 0.00019862083663049694,
"loss": 0.8323,
"step": 263
},
{
"epoch": 0.5915966386554622,
"grad_norm": 0.27704504132270813,
"learning_rate": 0.0001985882718574506,
"loss": 0.9132,
"step": 264
},
{
"epoch": 0.5938375350140056,
"grad_norm": 0.2760598063468933,
"learning_rate": 0.00019855532983046876,
"loss": 0.7416,
"step": 265
},
{
"epoch": 0.596078431372549,
"grad_norm": 0.26945289969444275,
"learning_rate": 0.00019852201067560606,
"loss": 0.965,
"step": 266
},
{
"epoch": 0.5983193277310924,
"grad_norm": 0.2512185573577881,
"learning_rate": 0.0001984883145203603,
"loss": 0.8582,
"step": 267
},
{
"epoch": 0.6005602240896358,
"grad_norm": 0.24201013147830963,
"learning_rate": 0.00019845424149367177,
"loss": 0.8433,
"step": 268
},
{
"epoch": 0.6028011204481792,
"grad_norm": 0.24099834263324738,
"learning_rate": 0.000198419791725923,
"loss": 0.9501,
"step": 269
},
{
"epoch": 0.6050420168067226,
"grad_norm": 0.24521470069885254,
"learning_rate": 0.00019838496534893806,
"loss": 0.8458,
"step": 270
},
{
"epoch": 0.6072829131652661,
"grad_norm": 0.2377120554447174,
"learning_rate": 0.00019834976249598221,
"loss": 0.8934,
"step": 271
},
{
"epoch": 0.6095238095238096,
"grad_norm": 0.2444024235010147,
"learning_rate": 0.00019831418330176125,
"loss": 0.8326,
"step": 272
},
{
"epoch": 0.611764705882353,
"grad_norm": 0.23458163440227509,
"learning_rate": 0.0001982782279024211,
"loss": 0.8743,
"step": 273
},
{
"epoch": 0.6140056022408964,
"grad_norm": 0.2571447491645813,
"learning_rate": 0.00019824189643554725,
"loss": 0.8265,
"step": 274
},
{
"epoch": 0.6162464985994398,
"grad_norm": 0.25711604952812195,
"learning_rate": 0.00019820518904016426,
"loss": 0.8418,
"step": 275
},
{
"epoch": 0.6184873949579832,
"grad_norm": 0.26878827810287476,
"learning_rate": 0.00019816810585673514,
"loss": 0.8007,
"step": 276
},
{
"epoch": 0.6207282913165266,
"grad_norm": 0.2555851638317108,
"learning_rate": 0.00019813064702716094,
"loss": 0.8536,
"step": 277
},
{
"epoch": 0.62296918767507,
"grad_norm": 0.2718588411808014,
"learning_rate": 0.00019809281269478012,
"loss": 0.8884,
"step": 278
},
{
"epoch": 0.6252100840336134,
"grad_norm": 0.25706928968429565,
"learning_rate": 0.00019805460300436803,
"loss": 0.8729,
"step": 279
},
{
"epoch": 0.6274509803921569,
"grad_norm": 0.23707084357738495,
"learning_rate": 0.00019801601810213635,
"loss": 0.8268,
"step": 280
},
{
"epoch": 0.6296918767507003,
"grad_norm": 0.24094390869140625,
"learning_rate": 0.00019797705813573245,
"loss": 0.8457,
"step": 281
},
{
"epoch": 0.6319327731092437,
"grad_norm": 0.24794656038284302,
"learning_rate": 0.00019793772325423908,
"loss": 0.8495,
"step": 282
},
{
"epoch": 0.6341736694677871,
"grad_norm": 0.2355436086654663,
"learning_rate": 0.00019789801360817346,
"loss": 0.8167,
"step": 283
},
{
"epoch": 0.6364145658263305,
"grad_norm": 0.24097253382205963,
"learning_rate": 0.00019785792934948695,
"loss": 0.8887,
"step": 284
},
{
"epoch": 0.6386554621848739,
"grad_norm": 0.23715715110301971,
"learning_rate": 0.00019781747063156435,
"loss": 0.8205,
"step": 285
},
{
"epoch": 0.6408963585434174,
"grad_norm": 0.2627374529838562,
"learning_rate": 0.00019777663760922343,
"loss": 0.9175,
"step": 286
},
{
"epoch": 0.6431372549019608,
"grad_norm": 0.25734400749206543,
"learning_rate": 0.00019773543043871412,
"loss": 0.8104,
"step": 287
},
{
"epoch": 0.6453781512605042,
"grad_norm": 0.2477787584066391,
"learning_rate": 0.0001976938492777182,
"loss": 0.9969,
"step": 288
},
{
"epoch": 0.6476190476190476,
"grad_norm": 0.2440386414527893,
"learning_rate": 0.0001976518942853484,
"loss": 0.8084,
"step": 289
},
{
"epoch": 0.6498599439775911,
"grad_norm": 0.2703987956047058,
"learning_rate": 0.00019760956562214806,
"loss": 0.7827,
"step": 290
},
{
"epoch": 0.6521008403361345,
"grad_norm": 0.29450032114982605,
"learning_rate": 0.0001975668634500904,
"loss": 0.8807,
"step": 291
},
{
"epoch": 0.6543417366946779,
"grad_norm": 0.27439677715301514,
"learning_rate": 0.00019752378793257776,
"loss": 0.8152,
"step": 292
},
{
"epoch": 0.6565826330532213,
"grad_norm": 0.2940295338630676,
"learning_rate": 0.00019748033923444122,
"loss": 0.9051,
"step": 293
},
{
"epoch": 0.6588235294117647,
"grad_norm": 0.2529405355453491,
"learning_rate": 0.00019743651752193982,
"loss": 0.7429,
"step": 294
},
{
"epoch": 0.6610644257703081,
"grad_norm": 0.26964515447616577,
"learning_rate": 0.00019739232296276003,
"loss": 0.8584,
"step": 295
},
{
"epoch": 0.6633053221288515,
"grad_norm": 0.24733929336071014,
"learning_rate": 0.00019734775572601487,
"loss": 0.8465,
"step": 296
},
{
"epoch": 0.6655462184873949,
"grad_norm": 0.2559841275215149,
"learning_rate": 0.00019730281598224364,
"loss": 0.883,
"step": 297
},
{
"epoch": 0.6677871148459383,
"grad_norm": 0.24375739693641663,
"learning_rate": 0.00019725750390341094,
"loss": 0.8042,
"step": 298
},
{
"epoch": 0.6700280112044817,
"grad_norm": 0.25603991746902466,
"learning_rate": 0.00019721181966290613,
"loss": 0.9363,
"step": 299
},
{
"epoch": 0.6722689075630253,
"grad_norm": 0.2391873151063919,
"learning_rate": 0.00019716576343554274,
"loss": 0.9364,
"step": 300
},
{
"epoch": 0.6745098039215687,
"grad_norm": 0.24241115152835846,
"learning_rate": 0.00019711933539755765,
"loss": 0.8168,
"step": 301
},
{
"epoch": 0.6767507002801121,
"grad_norm": 0.2441554218530655,
"learning_rate": 0.00019707253572661055,
"loss": 0.9179,
"step": 302
},
{
"epoch": 0.6789915966386555,
"grad_norm": 0.2645583748817444,
"learning_rate": 0.00019702536460178318,
"loss": 0.7971,
"step": 303
},
{
"epoch": 0.6812324929971989,
"grad_norm": 0.2557383179664612,
"learning_rate": 0.0001969778222035787,
"loss": 0.9112,
"step": 304
},
{
"epoch": 0.6834733893557423,
"grad_norm": 0.25000718235969543,
"learning_rate": 0.0001969299087139209,
"loss": 0.7842,
"step": 305
},
{
"epoch": 0.6857142857142857,
"grad_norm": 0.23475897312164307,
"learning_rate": 0.00019688162431615367,
"loss": 0.7806,
"step": 306
},
{
"epoch": 0.6879551820728291,
"grad_norm": 0.2505607306957245,
"learning_rate": 0.00019683296919504012,
"loss": 0.922,
"step": 307
},
{
"epoch": 0.6901960784313725,
"grad_norm": 0.2522655725479126,
"learning_rate": 0.00019678394353676203,
"loss": 0.7934,
"step": 308
},
{
"epoch": 0.692436974789916,
"grad_norm": 0.2722029387950897,
"learning_rate": 0.000196734547528919,
"loss": 0.8775,
"step": 309
},
{
"epoch": 0.6946778711484594,
"grad_norm": 0.26442402601242065,
"learning_rate": 0.00019668478136052774,
"loss": 0.8696,
"step": 310
},
{
"epoch": 0.6969187675070028,
"grad_norm": 0.26388052105903625,
"learning_rate": 0.00019663464522202162,
"loss": 0.8569,
"step": 311
},
{
"epoch": 0.6991596638655462,
"grad_norm": 0.26141613721847534,
"learning_rate": 0.00019658413930524952,
"loss": 0.8485,
"step": 312
},
{
"epoch": 0.7014005602240896,
"grad_norm": 0.23456323146820068,
"learning_rate": 0.00019653326380347533,
"loss": 0.8913,
"step": 313
},
{
"epoch": 0.7036414565826331,
"grad_norm": 0.24977454543113708,
"learning_rate": 0.00019648201891137723,
"loss": 0.8729,
"step": 314
},
{
"epoch": 0.7058823529411765,
"grad_norm": 0.22706039249897003,
"learning_rate": 0.0001964304048250469,
"loss": 0.7885,
"step": 315
},
{
"epoch": 0.7081232492997199,
"grad_norm": 0.24601607024669647,
"learning_rate": 0.00019637842174198867,
"loss": 0.8387,
"step": 316
},
{
"epoch": 0.7103641456582633,
"grad_norm": 0.2545998990535736,
"learning_rate": 0.000196326069861119,
"loss": 0.8114,
"step": 317
},
{
"epoch": 0.7126050420168067,
"grad_norm": 0.24375082552433014,
"learning_rate": 0.00019627334938276546,
"loss": 0.843,
"step": 318
},
{
"epoch": 0.7148459383753502,
"grad_norm": 0.2519029378890991,
"learning_rate": 0.00019622026050866614,
"loss": 0.9088,
"step": 319
},
{
"epoch": 0.7170868347338936,
"grad_norm": 0.27405208349227905,
"learning_rate": 0.0001961668034419688,
"loss": 0.958,
"step": 320
},
{
"epoch": 0.719327731092437,
"grad_norm": 0.25491318106651306,
"learning_rate": 0.0001961129783872301,
"loss": 0.7692,
"step": 321
},
{
"epoch": 0.7215686274509804,
"grad_norm": 0.250347375869751,
"learning_rate": 0.00019605878555041485,
"loss": 0.8321,
"step": 322
},
{
"epoch": 0.7238095238095238,
"grad_norm": 0.24250715970993042,
"learning_rate": 0.00019600422513889516,
"loss": 0.7832,
"step": 323
},
{
"epoch": 0.7260504201680672,
"grad_norm": 0.2599019706249237,
"learning_rate": 0.00019594929736144976,
"loss": 0.8736,
"step": 324
},
{
"epoch": 0.7282913165266106,
"grad_norm": 0.25766611099243164,
"learning_rate": 0.00019589400242826305,
"loss": 0.7419,
"step": 325
},
{
"epoch": 0.730532212885154,
"grad_norm": 0.254658967256546,
"learning_rate": 0.00019583834055092445,
"loss": 0.9058,
"step": 326
},
{
"epoch": 0.7327731092436974,
"grad_norm": 0.2731349468231201,
"learning_rate": 0.00019578231194242743,
"loss": 0.9683,
"step": 327
},
{
"epoch": 0.735014005602241,
"grad_norm": 0.22153563797473907,
"learning_rate": 0.00019572591681716887,
"loss": 0.8146,
"step": 328
},
{
"epoch": 0.7372549019607844,
"grad_norm": 0.24707463383674622,
"learning_rate": 0.00019566915539094803,
"loss": 0.8204,
"step": 329
},
{
"epoch": 0.7394957983193278,
"grad_norm": 0.2541884779930115,
"learning_rate": 0.00019561202788096597,
"loss": 0.7638,
"step": 330
},
{
"epoch": 0.7417366946778712,
"grad_norm": 0.2314218282699585,
"learning_rate": 0.00019555453450582452,
"loss": 0.7502,
"step": 331
},
{
"epoch": 0.7439775910364146,
"grad_norm": 0.24334260821342468,
"learning_rate": 0.00019549667548552556,
"loss": 0.912,
"step": 332
},
{
"epoch": 0.746218487394958,
"grad_norm": 0.24473878741264343,
"learning_rate": 0.00019543845104147,
"loss": 0.7558,
"step": 333
},
{
"epoch": 0.7484593837535014,
"grad_norm": 0.25403597950935364,
"learning_rate": 0.00019537986139645726,
"loss": 0.8406,
"step": 334
},
{
"epoch": 0.7507002801120448,
"grad_norm": 0.24638508260250092,
"learning_rate": 0.0001953209067746841,
"loss": 0.736,
"step": 335
},
{
"epoch": 0.7529411764705882,
"grad_norm": 0.25604501366615295,
"learning_rate": 0.00019526158740174393,
"loss": 0.867,
"step": 336
},
{
"epoch": 0.7551820728291316,
"grad_norm": 0.2617231011390686,
"learning_rate": 0.00019520190350462584,
"loss": 0.8654,
"step": 337
},
{
"epoch": 0.757422969187675,
"grad_norm": 0.2620690166950226,
"learning_rate": 0.0001951418553117139,
"loss": 0.8428,
"step": 338
},
{
"epoch": 0.7596638655462185,
"grad_norm": 0.25454917550086975,
"learning_rate": 0.0001950814430527861,
"loss": 0.8676,
"step": 339
},
{
"epoch": 0.7619047619047619,
"grad_norm": 0.26449549198150635,
"learning_rate": 0.00019502066695901358,
"loss": 0.8709,
"step": 340
},
{
"epoch": 0.7641456582633053,
"grad_norm": 0.24709953367710114,
"learning_rate": 0.0001949595272629597,
"loss": 0.8312,
"step": 341
},
{
"epoch": 0.7663865546218488,
"grad_norm": 0.2593318819999695,
"learning_rate": 0.00019489802419857917,
"loss": 0.8274,
"step": 342
},
{
"epoch": 0.7686274509803922,
"grad_norm": 0.2448015958070755,
"learning_rate": 0.00019483615800121716,
"loss": 0.8226,
"step": 343
},
{
"epoch": 0.7708683473389356,
"grad_norm": 0.26445460319519043,
"learning_rate": 0.00019477392890760839,
"loss": 0.8492,
"step": 344
},
{
"epoch": 0.773109243697479,
"grad_norm": 0.2573208808898926,
"learning_rate": 0.00019471133715587622,
"loss": 0.8294,
"step": 345
},
{
"epoch": 0.7753501400560224,
"grad_norm": 0.26753151416778564,
"learning_rate": 0.00019464838298553173,
"loss": 0.8446,
"step": 346
},
{
"epoch": 0.7775910364145658,
"grad_norm": 0.2667967975139618,
"learning_rate": 0.00019458506663747285,
"loss": 0.8388,
"step": 347
},
{
"epoch": 0.7798319327731092,
"grad_norm": 0.25934740900993347,
"learning_rate": 0.00019452138835398332,
"loss": 0.8149,
"step": 348
},
{
"epoch": 0.7820728291316527,
"grad_norm": 0.2525850832462311,
"learning_rate": 0.00019445734837873202,
"loss": 0.8325,
"step": 349
},
{
"epoch": 0.7843137254901961,
"grad_norm": 0.2837854325771332,
"learning_rate": 0.00019439294695677167,
"loss": 0.8795,
"step": 350
},
{
"epoch": 0.7865546218487395,
"grad_norm": 0.26006975769996643,
"learning_rate": 0.00019432818433453818,
"loss": 0.8267,
"step": 351
},
{
"epoch": 0.7887955182072829,
"grad_norm": 0.24872632324695587,
"learning_rate": 0.00019426306075984965,
"loss": 0.8553,
"step": 352
},
{
"epoch": 0.7910364145658263,
"grad_norm": 0.23633168637752533,
"learning_rate": 0.00019419757648190533,
"loss": 0.8544,
"step": 353
},
{
"epoch": 0.7932773109243697,
"grad_norm": 0.2425261288881302,
"learning_rate": 0.00019413173175128473,
"loss": 0.8645,
"step": 354
},
{
"epoch": 0.7955182072829131,
"grad_norm": 0.23861053586006165,
"learning_rate": 0.00019406552681994663,
"loss": 0.8363,
"step": 355
},
{
"epoch": 0.7977591036414566,
"grad_norm": 0.24729354679584503,
"learning_rate": 0.00019399896194122822,
"loss": 0.8401,
"step": 356
},
{
"epoch": 0.8,
"grad_norm": 0.23853589594364166,
"learning_rate": 0.000193932037369844,
"loss": 0.8237,
"step": 357
},
{
"epoch": 0.8022408963585435,
"grad_norm": 0.2654939293861389,
"learning_rate": 0.00019386475336188484,
"loss": 0.7491,
"step": 358
},
{
"epoch": 0.8044817927170869,
"grad_norm": 0.27639004588127136,
"learning_rate": 0.000193797110174817,
"loss": 0.8755,
"step": 359
},
{
"epoch": 0.8067226890756303,
"grad_norm": 0.23675435781478882,
"learning_rate": 0.00019372910806748125,
"loss": 0.8292,
"step": 360
},
{
"epoch": 0.8089635854341737,
"grad_norm": 0.2371446043252945,
"learning_rate": 0.0001936607473000917,
"loss": 0.809,
"step": 361
},
{
"epoch": 0.8112044817927171,
"grad_norm": 0.2529555559158325,
"learning_rate": 0.0001935920281342349,
"loss": 0.8403,
"step": 362
},
{
"epoch": 0.8134453781512605,
"grad_norm": 0.2568047344684601,
"learning_rate": 0.00019352295083286896,
"loss": 0.7848,
"step": 363
},
{
"epoch": 0.8156862745098039,
"grad_norm": 0.2613024413585663,
"learning_rate": 0.0001934535156603222,
"loss": 0.844,
"step": 364
},
{
"epoch": 0.8179271708683473,
"grad_norm": 0.24079741537570953,
"learning_rate": 0.0001933837228822925,
"loss": 0.8029,
"step": 365
},
{
"epoch": 0.8201680672268907,
"grad_norm": 0.24890519678592682,
"learning_rate": 0.0001933135727658462,
"loss": 0.8712,
"step": 366
},
{
"epoch": 0.8224089635854341,
"grad_norm": 0.25198838114738464,
"learning_rate": 0.00019324306557941682,
"loss": 0.8368,
"step": 367
},
{
"epoch": 0.8246498599439775,
"grad_norm": 0.2470208704471588,
"learning_rate": 0.0001931722015928044,
"loss": 0.8896,
"step": 368
},
{
"epoch": 0.826890756302521,
"grad_norm": 0.25046589970588684,
"learning_rate": 0.00019310098107717418,
"loss": 0.7602,
"step": 369
},
{
"epoch": 0.8291316526610645,
"grad_norm": 0.28005844354629517,
"learning_rate": 0.0001930294043050558,
"loss": 0.9443,
"step": 370
},
{
"epoch": 0.8313725490196079,
"grad_norm": 0.6102829575538635,
"learning_rate": 0.00019295747155034202,
"loss": 0.911,
"step": 371
},
{
"epoch": 0.8336134453781513,
"grad_norm": 0.2472289353609085,
"learning_rate": 0.0001928851830882879,
"loss": 0.8271,
"step": 372
},
{
"epoch": 0.8358543417366947,
"grad_norm": 0.24470177292823792,
"learning_rate": 0.0001928125391955095,
"loss": 0.7837,
"step": 373
},
{
"epoch": 0.8380952380952381,
"grad_norm": 0.2458019107580185,
"learning_rate": 0.00019273954014998308,
"loss": 0.8496,
"step": 374
},
{
"epoch": 0.8403361344537815,
"grad_norm": 0.249577596783638,
"learning_rate": 0.00019266618623104385,
"loss": 0.7729,
"step": 375
},
{
"epoch": 0.8425770308123249,
"grad_norm": 0.2461290806531906,
"learning_rate": 0.000192592477719385,
"loss": 0.7955,
"step": 376
},
{
"epoch": 0.8448179271708683,
"grad_norm": 0.27021750807762146,
"learning_rate": 0.00019251841489705655,
"loss": 0.9384,
"step": 377
},
{
"epoch": 0.8470588235294118,
"grad_norm": 0.2547335624694824,
"learning_rate": 0.00019244399804746435,
"loss": 0.8254,
"step": 378
},
{
"epoch": 0.8492997198879552,
"grad_norm": 0.2620961368083954,
"learning_rate": 0.0001923692274553689,
"loss": 0.8587,
"step": 379
},
{
"epoch": 0.8515406162464986,
"grad_norm": 0.25756919384002686,
"learning_rate": 0.0001922941034068844,
"loss": 0.7906,
"step": 380
},
{
"epoch": 0.853781512605042,
"grad_norm": 0.25694355368614197,
"learning_rate": 0.0001922186261894775,
"loss": 0.7887,
"step": 381
},
{
"epoch": 0.8560224089635854,
"grad_norm": 0.2538197636604309,
"learning_rate": 0.0001921427960919663,
"loss": 0.8861,
"step": 382
},
{
"epoch": 0.8582633053221288,
"grad_norm": 0.2463122308254242,
"learning_rate": 0.00019206661340451925,
"loss": 0.8175,
"step": 383
},
{
"epoch": 0.8605042016806723,
"grad_norm": 0.23905646800994873,
"learning_rate": 0.00019199007841865396,
"loss": 0.7914,
"step": 384
},
{
"epoch": 0.8627450980392157,
"grad_norm": 0.2590409517288208,
"learning_rate": 0.0001919131914272361,
"loss": 0.8385,
"step": 385
},
{
"epoch": 0.8649859943977591,
"grad_norm": 0.2520500719547272,
"learning_rate": 0.00019183595272447842,
"loss": 0.7961,
"step": 386
},
{
"epoch": 0.8672268907563025,
"grad_norm": 0.2568177878856659,
"learning_rate": 0.00019175836260593938,
"loss": 0.8051,
"step": 387
},
{
"epoch": 0.869467787114846,
"grad_norm": 0.27684271335601807,
"learning_rate": 0.00019168042136852228,
"loss": 0.8953,
"step": 388
},
{
"epoch": 0.8717086834733894,
"grad_norm": 0.26281246542930603,
"learning_rate": 0.0001916021293104739,
"loss": 0.8602,
"step": 389
},
{
"epoch": 0.8739495798319328,
"grad_norm": 0.29199978709220886,
"learning_rate": 0.00019152348673138353,
"loss": 0.9776,
"step": 390
},
{
"epoch": 0.8761904761904762,
"grad_norm": 0.25914543867111206,
"learning_rate": 0.0001914444939321817,
"loss": 0.8394,
"step": 391
},
{
"epoch": 0.8784313725490196,
"grad_norm": 0.2472960352897644,
"learning_rate": 0.0001913651512151391,
"loss": 0.7995,
"step": 392
},
{
"epoch": 0.880672268907563,
"grad_norm": 0.24599507451057434,
"learning_rate": 0.00019128545888386536,
"loss": 0.7801,
"step": 393
},
{
"epoch": 0.8829131652661064,
"grad_norm": 0.23776014149188995,
"learning_rate": 0.00019120541724330803,
"loss": 0.8068,
"step": 394
},
{
"epoch": 0.8851540616246498,
"grad_norm": 0.27298274636268616,
"learning_rate": 0.0001911250265997512,
"loss": 0.9139,
"step": 395
},
{
"epoch": 0.8873949579831932,
"grad_norm": 0.2653828561306,
"learning_rate": 0.0001910442872608145,
"loss": 0.8917,
"step": 396
},
{
"epoch": 0.8896358543417366,
"grad_norm": 0.25794684886932373,
"learning_rate": 0.00019096319953545185,
"loss": 0.819,
"step": 397
},
{
"epoch": 0.8918767507002802,
"grad_norm": 0.24696165323257446,
"learning_rate": 0.0001908817637339503,
"loss": 0.7535,
"step": 398
},
{
"epoch": 0.8941176470588236,
"grad_norm": 0.2529006898403168,
"learning_rate": 0.00019079998016792885,
"loss": 0.7379,
"step": 399
},
{
"epoch": 0.896358543417367,
"grad_norm": 0.24874147772789001,
"learning_rate": 0.00019071784915033717,
"loss": 0.8973,
"step": 400
},
{
"epoch": 0.8985994397759104,
"grad_norm": 0.27747422456741333,
"learning_rate": 0.00019063537099545455,
"loss": 0.8685,
"step": 401
},
{
"epoch": 0.9008403361344538,
"grad_norm": 0.25184211134910583,
"learning_rate": 0.00019055254601888866,
"loss": 0.7937,
"step": 402
},
{
"epoch": 0.9030812324929972,
"grad_norm": 0.2565883994102478,
"learning_rate": 0.00019046937453757413,
"loss": 0.8677,
"step": 403
},
{
"epoch": 0.9053221288515406,
"grad_norm": 0.24833756685256958,
"learning_rate": 0.00019038585686977167,
"loss": 0.8777,
"step": 404
},
{
"epoch": 0.907563025210084,
"grad_norm": 0.24929295480251312,
"learning_rate": 0.00019030199333506666,
"loss": 0.8167,
"step": 405
},
{
"epoch": 0.9098039215686274,
"grad_norm": 0.24500809609889984,
"learning_rate": 0.00019021778425436795,
"loss": 0.8675,
"step": 406
},
{
"epoch": 0.9120448179271708,
"grad_norm": 0.25895681977272034,
"learning_rate": 0.0001901332299499066,
"loss": 0.8718,
"step": 407
},
{
"epoch": 0.9142857142857143,
"grad_norm": 0.22836971282958984,
"learning_rate": 0.00019004833074523478,
"loss": 0.8602,
"step": 408
},
{
"epoch": 0.9165266106442577,
"grad_norm": 0.26318010687828064,
"learning_rate": 0.00018996308696522433,
"loss": 0.838,
"step": 409
},
{
"epoch": 0.9187675070028011,
"grad_norm": 0.24389663338661194,
"learning_rate": 0.00018987749893606575,
"loss": 0.7798,
"step": 410
},
{
"epoch": 0.9210084033613445,
"grad_norm": 0.26448702812194824,
"learning_rate": 0.0001897915669852667,
"loss": 0.8053,
"step": 411
},
{
"epoch": 0.923249299719888,
"grad_norm": 0.25245535373687744,
"learning_rate": 0.000189705291441651,
"loss": 0.7915,
"step": 412
},
{
"epoch": 0.9254901960784314,
"grad_norm": 0.25623124837875366,
"learning_rate": 0.00018961867263535715,
"loss": 0.9167,
"step": 413
},
{
"epoch": 0.9277310924369748,
"grad_norm": 0.25271835923194885,
"learning_rate": 0.00018953171089783723,
"loss": 0.8663,
"step": 414
},
{
"epoch": 0.9299719887955182,
"grad_norm": 0.24757803976535797,
"learning_rate": 0.00018944440656185556,
"loss": 0.8411,
"step": 415
},
{
"epoch": 0.9322128851540616,
"grad_norm": 0.23879915475845337,
"learning_rate": 0.00018935675996148738,
"loss": 0.8071,
"step": 416
},
{
"epoch": 0.934453781512605,
"grad_norm": 0.3934721350669861,
"learning_rate": 0.0001892687714321177,
"loss": 0.7911,
"step": 417
},
{
"epoch": 0.9366946778711485,
"grad_norm": 0.27968111634254456,
"learning_rate": 0.00018918044131043985,
"loss": 0.8452,
"step": 418
},
{
"epoch": 0.9389355742296919,
"grad_norm": 0.2701101303100586,
"learning_rate": 0.00018909176993445442,
"loss": 0.8723,
"step": 419
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.2606533169746399,
"learning_rate": 0.00018900275764346768,
"loss": 0.7908,
"step": 420
},
{
"epoch": 0.9434173669467787,
"grad_norm": 0.2672193944454193,
"learning_rate": 0.00018891340477809055,
"loss": 0.9491,
"step": 421
},
{
"epoch": 0.9456582633053221,
"grad_norm": 0.2676648795604706,
"learning_rate": 0.00018882371168023706,
"loss": 0.8352,
"step": 422
},
{
"epoch": 0.9478991596638655,
"grad_norm": 0.265023410320282,
"learning_rate": 0.0001887336786931233,
"loss": 0.7894,
"step": 423
},
{
"epoch": 0.9501400560224089,
"grad_norm": 0.24014434218406677,
"learning_rate": 0.00018864330616126586,
"loss": 0.8394,
"step": 424
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.24389366805553436,
"learning_rate": 0.00018855259443048067,
"loss": 0.7857,
"step": 425
},
{
"epoch": 0.9546218487394958,
"grad_norm": 0.2471270114183426,
"learning_rate": 0.00018846154384788162,
"loss": 0.8576,
"step": 426
},
{
"epoch": 0.9568627450980393,
"grad_norm": 0.2577723264694214,
"learning_rate": 0.00018837015476187916,
"loss": 0.8377,
"step": 427
},
{
"epoch": 0.9591036414565827,
"grad_norm": 0.3224787414073944,
"learning_rate": 0.00018827842752217917,
"loss": 0.8801,
"step": 428
},
{
"epoch": 0.9613445378151261,
"grad_norm": 0.25494757294654846,
"learning_rate": 0.00018818636247978145,
"loss": 0.8173,
"step": 429
},
{
"epoch": 0.9635854341736695,
"grad_norm": 0.24220331013202667,
"learning_rate": 0.00018809395998697833,
"loss": 0.7747,
"step": 430
},
{
"epoch": 0.9658263305322129,
"grad_norm": 0.2741996645927429,
"learning_rate": 0.00018800122039735358,
"loss": 0.8636,
"step": 431
},
{
"epoch": 0.9680672268907563,
"grad_norm": 0.2558667063713074,
"learning_rate": 0.0001879081440657807,
"loss": 0.8456,
"step": 432
},
{
"epoch": 0.9703081232492997,
"grad_norm": 0.25905126333236694,
"learning_rate": 0.00018781473134842197,
"loss": 0.7961,
"step": 433
},
{
"epoch": 0.9725490196078431,
"grad_norm": 0.23688547313213348,
"learning_rate": 0.00018772098260272667,
"loss": 0.7555,
"step": 434
},
{
"epoch": 0.9747899159663865,
"grad_norm": 0.2701859176158905,
"learning_rate": 0.00018762689818743007,
"loss": 0.9353,
"step": 435
},
{
"epoch": 0.9770308123249299,
"grad_norm": 0.24914319813251495,
"learning_rate": 0.00018753247846255174,
"loss": 0.7678,
"step": 436
},
{
"epoch": 0.9792717086834734,
"grad_norm": 0.26182475686073303,
"learning_rate": 0.00018743772378939448,
"loss": 0.8374,
"step": 437
},
{
"epoch": 0.9815126050420168,
"grad_norm": 0.2661891281604767,
"learning_rate": 0.00018734263453054273,
"loss": 0.8816,
"step": 438
},
{
"epoch": 0.9837535014005602,
"grad_norm": 0.24797801673412323,
"learning_rate": 0.0001872472110498612,
"loss": 0.8069,
"step": 439
},
{
"epoch": 0.9859943977591037,
"grad_norm": 0.24370808899402618,
"learning_rate": 0.0001871514537124936,
"loss": 0.7405,
"step": 440
},
{
"epoch": 0.9882352941176471,
"grad_norm": 0.2744685709476471,
"learning_rate": 0.00018705536288486118,
"loss": 0.7706,
"step": 441
},
{
"epoch": 0.9904761904761905,
"grad_norm": 0.265438437461853,
"learning_rate": 0.0001869589389346611,
"loss": 0.8649,
"step": 442
},
{
"epoch": 0.9927170868347339,
"grad_norm": 0.24661187827587128,
"learning_rate": 0.0001868621822308655,
"loss": 0.8138,
"step": 443
},
{
"epoch": 0.9949579831932773,
"grad_norm": 0.2580495774745941,
"learning_rate": 0.00018676509314371974,
"loss": 0.7765,
"step": 444
},
{
"epoch": 0.9971988795518207,
"grad_norm": 0.2546556293964386,
"learning_rate": 0.00018666767204474094,
"loss": 0.8873,
"step": 445
},
{
"epoch": 0.9994397759103641,
"grad_norm": 0.25944411754608154,
"learning_rate": 0.00018656991930671686,
"loss": 0.8651,
"step": 446
},
{
"epoch": 1.0016806722689076,
"grad_norm": 0.25578224658966064,
"learning_rate": 0.00018647183530370415,
"loss": 0.9027,
"step": 447
},
{
"epoch": 1.003921568627451,
"grad_norm": 0.24953621625900269,
"learning_rate": 0.00018637342041102718,
"loss": 0.6952,
"step": 448
},
{
"epoch": 1.0061624649859944,
"grad_norm": 0.24714386463165283,
"learning_rate": 0.0001862746750052764,
"loss": 0.6983,
"step": 449
},
{
"epoch": 1.0084033613445378,
"grad_norm": 0.2614993155002594,
"learning_rate": 0.00018617559946430706,
"loss": 0.7272,
"step": 450
},
{
"epoch": 1.0106442577030812,
"grad_norm": 0.27017897367477417,
"learning_rate": 0.00018607619416723765,
"loss": 0.7644,
"step": 451
},
{
"epoch": 1.0128851540616246,
"grad_norm": 0.28906649351119995,
"learning_rate": 0.0001859764594944485,
"loss": 0.7651,
"step": 452
},
{
"epoch": 1.015126050420168,
"grad_norm": 0.28600478172302246,
"learning_rate": 0.00018587639582758031,
"loss": 0.732,
"step": 453
},
{
"epoch": 1.0173669467787114,
"grad_norm": 0.28704679012298584,
"learning_rate": 0.0001857760035495327,
"loss": 0.713,
"step": 454
},
{
"epoch": 1.0196078431372548,
"grad_norm": 0.36615249514579773,
"learning_rate": 0.0001856752830444628,
"loss": 0.8691,
"step": 455
},
{
"epoch": 1.0218487394957982,
"grad_norm": 0.28983429074287415,
"learning_rate": 0.00018557423469778357,
"loss": 0.7513,
"step": 456
},
{
"epoch": 1.0240896358543417,
"grad_norm": 0.2740822732448578,
"learning_rate": 0.0001854728588961626,
"loss": 0.7319,
"step": 457
},
{
"epoch": 1.026330532212885,
"grad_norm": 0.26059162616729736,
"learning_rate": 0.00018537115602752053,
"loss": 0.7727,
"step": 458
},
{
"epoch": 1.0285714285714285,
"grad_norm": 0.25983962416648865,
"learning_rate": 0.00018526912648102943,
"loss": 0.7428,
"step": 459
},
{
"epoch": 1.0308123249299719,
"grad_norm": 0.2570248246192932,
"learning_rate": 0.0001851667706471115,
"loss": 0.8065,
"step": 460
},
{
"epoch": 1.0330532212885155,
"grad_norm": 0.2523268163204193,
"learning_rate": 0.0001850640889174375,
"loss": 0.729,
"step": 461
},
{
"epoch": 1.035294117647059,
"grad_norm": 0.2655740976333618,
"learning_rate": 0.0001849610816849252,
"loss": 0.7717,
"step": 462
},
{
"epoch": 1.0375350140056023,
"grad_norm": 0.2684931755065918,
"learning_rate": 0.0001848577493437379,
"loss": 0.6862,
"step": 463
},
{
"epoch": 1.0397759103641457,
"grad_norm": 0.29195067286491394,
"learning_rate": 0.00018475409228928312,
"loss": 0.7204,
"step": 464
},
{
"epoch": 1.0420168067226891,
"grad_norm": 0.299556165933609,
"learning_rate": 0.00018465011091821072,
"loss": 0.8587,
"step": 465
},
{
"epoch": 1.0442577030812326,
"grad_norm": 0.2898513078689575,
"learning_rate": 0.00018454580562841163,
"loss": 0.7628,
"step": 466
},
{
"epoch": 1.046498599439776,
"grad_norm": 0.30156147480010986,
"learning_rate": 0.00018444117681901638,
"loss": 0.7437,
"step": 467
},
{
"epoch": 1.0487394957983194,
"grad_norm": 0.2778584361076355,
"learning_rate": 0.00018433622489039334,
"loss": 0.6211,
"step": 468
},
{
"epoch": 1.0509803921568628,
"grad_norm": 0.29723167419433594,
"learning_rate": 0.00018423095024414733,
"loss": 0.6958,
"step": 469
},
{
"epoch": 1.0532212885154062,
"grad_norm": 0.27120769023895264,
"learning_rate": 0.00018412535328311814,
"loss": 0.8491,
"step": 470
},
{
"epoch": 1.0554621848739496,
"grad_norm": 0.28341060876846313,
"learning_rate": 0.00018401943441137886,
"loss": 0.65,
"step": 471
},
{
"epoch": 1.057703081232493,
"grad_norm": 0.3047507107257843,
"learning_rate": 0.00018391319403423436,
"loss": 0.7248,
"step": 472
},
{
"epoch": 1.0599439775910364,
"grad_norm": 0.41425445675849915,
"learning_rate": 0.00018380663255821995,
"loss": 0.7587,
"step": 473
},
{
"epoch": 1.0621848739495798,
"grad_norm": 0.29975569248199463,
"learning_rate": 0.00018369975039109936,
"loss": 0.75,
"step": 474
},
{
"epoch": 1.0644257703081232,
"grad_norm": 0.2992658317089081,
"learning_rate": 0.0001835925479418637,
"loss": 0.7145,
"step": 475
},
{
"epoch": 1.0666666666666667,
"grad_norm": 0.29061266779899597,
"learning_rate": 0.00018348502562072955,
"loss": 0.7085,
"step": 476
},
{
"epoch": 1.06890756302521,
"grad_norm": 0.2972713112831116,
"learning_rate": 0.00018337718383913752,
"loss": 0.6741,
"step": 477
},
{
"epoch": 1.0711484593837535,
"grad_norm": 0.33825528621673584,
"learning_rate": 0.0001832690230097506,
"loss": 0.9284,
"step": 478
},
{
"epoch": 1.0733893557422969,
"grad_norm": 0.439996600151062,
"learning_rate": 0.00018316054354645283,
"loss": 0.7515,
"step": 479
},
{
"epoch": 1.0756302521008403,
"grad_norm": 0.28441861271858215,
"learning_rate": 0.00018305174586434725,
"loss": 0.73,
"step": 480
},
{
"epoch": 1.0778711484593837,
"grad_norm": 0.26948654651641846,
"learning_rate": 0.00018294263037975475,
"loss": 0.6871,
"step": 481
},
{
"epoch": 1.080112044817927,
"grad_norm": 0.29222843050956726,
"learning_rate": 0.00018283319751021232,
"loss": 0.6827,
"step": 482
},
{
"epoch": 1.0823529411764705,
"grad_norm": 0.31007933616638184,
"learning_rate": 0.00018272344767447134,
"loss": 0.793,
"step": 483
},
{
"epoch": 1.084593837535014,
"grad_norm": 0.29892498254776,
"learning_rate": 0.0001826133812924962,
"loss": 0.6541,
"step": 484
},
{
"epoch": 1.0868347338935573,
"grad_norm": 0.3294057250022888,
"learning_rate": 0.00018250299878546245,
"loss": 0.7719,
"step": 485
},
{
"epoch": 1.0890756302521007,
"grad_norm": 0.2897263765335083,
"learning_rate": 0.00018239230057575542,
"loss": 0.7306,
"step": 486
},
{
"epoch": 1.0913165266106442,
"grad_norm": 0.2852244973182678,
"learning_rate": 0.00018228128708696844,
"loss": 0.6512,
"step": 487
},
{
"epoch": 1.0935574229691878,
"grad_norm": 0.3132948577404022,
"learning_rate": 0.00018216995874390128,
"loss": 0.6859,
"step": 488
},
{
"epoch": 1.0957983193277312,
"grad_norm": 0.33552494645118713,
"learning_rate": 0.0001820583159725585,
"loss": 0.7039,
"step": 489
},
{
"epoch": 1.0980392156862746,
"grad_norm": 0.3188576102256775,
"learning_rate": 0.0001819463592001479,
"loss": 0.702,
"step": 490
},
{
"epoch": 1.100280112044818,
"grad_norm": 0.3157289922237396,
"learning_rate": 0.00018183408885507873,
"loss": 0.706,
"step": 491
},
{
"epoch": 1.1025210084033614,
"grad_norm": 0.27868008613586426,
"learning_rate": 0.00018172150536696025,
"loss": 0.6802,
"step": 492
},
{
"epoch": 1.1047619047619048,
"grad_norm": 0.29156798124313354,
"learning_rate": 0.0001816086091665999,
"loss": 0.7465,
"step": 493
},
{
"epoch": 1.1070028011204482,
"grad_norm": 0.2826281487941742,
"learning_rate": 0.00018149540068600182,
"loss": 0.7023,
"step": 494
},
{
"epoch": 1.1092436974789917,
"grad_norm": 0.3162992596626282,
"learning_rate": 0.00018138188035836497,
"loss": 0.7039,
"step": 495
},
{
"epoch": 1.111484593837535,
"grad_norm": 0.2732091248035431,
"learning_rate": 0.00018126804861808176,
"loss": 0.7372,
"step": 496
},
{
"epoch": 1.1137254901960785,
"grad_norm": 0.3039727807044983,
"learning_rate": 0.0001811539059007361,
"loss": 0.7148,
"step": 497
},
{
"epoch": 1.1159663865546219,
"grad_norm": 0.29023319482803345,
"learning_rate": 0.00018103945264310204,
"loss": 0.644,
"step": 498
},
{
"epoch": 1.1182072829131653,
"grad_norm": 0.2907212972640991,
"learning_rate": 0.00018092468928314172,
"loss": 0.7281,
"step": 499
},
{
"epoch": 1.1204481792717087,
"grad_norm": 0.2867254912853241,
"learning_rate": 0.0001808096162600041,
"loss": 0.7652,
"step": 500
},
{
"epoch": 1.122689075630252,
"grad_norm": 0.29929518699645996,
"learning_rate": 0.000180694234014023,
"loss": 0.713,
"step": 501
},
{
"epoch": 1.1249299719887955,
"grad_norm": 0.3078075647354126,
"learning_rate": 0.00018057854298671546,
"loss": 0.7255,
"step": 502
},
{
"epoch": 1.127170868347339,
"grad_norm": 0.3214311897754669,
"learning_rate": 0.0001804625436207802,
"loss": 0.7369,
"step": 503
},
{
"epoch": 1.1294117647058823,
"grad_norm": 0.3541346490383148,
"learning_rate": 0.00018034623636009568,
"loss": 0.8864,
"step": 504
},
{
"epoch": 1.1316526610644257,
"grad_norm": 0.31777986884117126,
"learning_rate": 0.00018022962164971867,
"loss": 0.7382,
"step": 505
},
{
"epoch": 1.1338935574229692,
"grad_norm": 0.3204372525215149,
"learning_rate": 0.00018011269993588232,
"loss": 0.7998,
"step": 506
},
{
"epoch": 1.1361344537815126,
"grad_norm": 0.30363792181015015,
"learning_rate": 0.0001799954716659946,
"loss": 0.7145,
"step": 507
},
{
"epoch": 1.138375350140056,
"grad_norm": 0.3061518371105194,
"learning_rate": 0.00017987793728863651,
"loss": 0.7217,
"step": 508
},
{
"epoch": 1.1406162464985994,
"grad_norm": 0.3020443618297577,
"learning_rate": 0.00017976009725356038,
"loss": 0.6765,
"step": 509
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.324278861284256,
"learning_rate": 0.00017964195201168817,
"loss": 0.8419,
"step": 510
},
{
"epoch": 1.1450980392156862,
"grad_norm": 0.3056583106517792,
"learning_rate": 0.00017952350201510978,
"loss": 0.8153,
"step": 511
},
{
"epoch": 1.1473389355742296,
"grad_norm": 0.3339795470237732,
"learning_rate": 0.00017940474771708115,
"loss": 0.8701,
"step": 512
},
{
"epoch": 1.149579831932773,
"grad_norm": 0.2845865488052368,
"learning_rate": 0.00017928568957202278,
"loss": 0.6865,
"step": 513
},
{
"epoch": 1.1518207282913164,
"grad_norm": 0.3049021065235138,
"learning_rate": 0.0001791663280355178,
"loss": 0.6772,
"step": 514
},
{
"epoch": 1.1540616246498598,
"grad_norm": 0.2814830541610718,
"learning_rate": 0.00017904666356431028,
"loss": 0.7092,
"step": 515
},
{
"epoch": 1.1563025210084033,
"grad_norm": 0.2870534360408783,
"learning_rate": 0.0001789266966163035,
"loss": 0.7056,
"step": 516
},
{
"epoch": 1.1585434173669467,
"grad_norm": 0.2947506606578827,
"learning_rate": 0.00017880642765055816,
"loss": 0.7208,
"step": 517
},
{
"epoch": 1.1607843137254903,
"grad_norm": 0.29607728123664856,
"learning_rate": 0.00017868585712729068,
"loss": 0.7441,
"step": 518
},
{
"epoch": 1.1630252100840337,
"grad_norm": 0.3445536494255066,
"learning_rate": 0.00017856498550787144,
"loss": 0.7116,
"step": 519
},
{
"epoch": 1.165266106442577,
"grad_norm": 0.3803198039531708,
"learning_rate": 0.0001784438132548229,
"loss": 0.7871,
"step": 520
},
{
"epoch": 1.1675070028011205,
"grad_norm": 0.37584301829338074,
"learning_rate": 0.00017832234083181795,
"loss": 0.7877,
"step": 521
},
{
"epoch": 1.169747899159664,
"grad_norm": 0.3182366192340851,
"learning_rate": 0.0001782005687036781,
"loss": 0.7112,
"step": 522
},
{
"epoch": 1.1719887955182073,
"grad_norm": 0.3022582530975342,
"learning_rate": 0.00017807849733637176,
"loss": 0.7275,
"step": 523
},
{
"epoch": 1.1742296918767507,
"grad_norm": 0.31011196970939636,
"learning_rate": 0.00017795612719701226,
"loss": 0.7439,
"step": 524
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.30964523553848267,
"learning_rate": 0.00017783345875385634,
"loss": 0.7788,
"step": 525
},
{
"epoch": 1.1787114845938376,
"grad_norm": 0.29043567180633545,
"learning_rate": 0.00017771049247630215,
"loss": 0.7925,
"step": 526
},
{
"epoch": 1.180952380952381,
"grad_norm": 0.28265780210494995,
"learning_rate": 0.00017758722883488745,
"loss": 0.8563,
"step": 527
},
{
"epoch": 1.1831932773109244,
"grad_norm": 0.2864142060279846,
"learning_rate": 0.00017746366830128803,
"loss": 0.6427,
"step": 528
},
{
"epoch": 1.1854341736694678,
"grad_norm": 0.28759056329727173,
"learning_rate": 0.00017733981134831567,
"loss": 0.6629,
"step": 529
},
{
"epoch": 1.1876750700280112,
"grad_norm": 0.3355376422405243,
"learning_rate": 0.00017721565844991643,
"loss": 0.7418,
"step": 530
},
{
"epoch": 1.1899159663865546,
"grad_norm": 0.3486466109752655,
"learning_rate": 0.0001770912100811688,
"loss": 0.7885,
"step": 531
},
{
"epoch": 1.192156862745098,
"grad_norm": 0.2857566475868225,
"learning_rate": 0.000176966466718282,
"loss": 0.6359,
"step": 532
},
{
"epoch": 1.1943977591036414,
"grad_norm": 0.3142206072807312,
"learning_rate": 0.00017684142883859388,
"loss": 0.6964,
"step": 533
},
{
"epoch": 1.1966386554621848,
"grad_norm": 0.3039001524448395,
"learning_rate": 0.00017671609692056946,
"loss": 0.6951,
"step": 534
},
{
"epoch": 1.1988795518207283,
"grad_norm": 0.31849658489227295,
"learning_rate": 0.00017659047144379878,
"loss": 0.7428,
"step": 535
},
{
"epoch": 1.2011204481792717,
"grad_norm": 0.2935810983181,
"learning_rate": 0.00017646455288899534,
"loss": 0.7668,
"step": 536
},
{
"epoch": 1.203361344537815,
"grad_norm": 0.2911589443683624,
"learning_rate": 0.00017633834173799403,
"loss": 0.7237,
"step": 537
},
{
"epoch": 1.2056022408963585,
"grad_norm": 0.28567075729370117,
"learning_rate": 0.00017621183847374935,
"loss": 0.7464,
"step": 538
},
{
"epoch": 1.2078431372549019,
"grad_norm": 0.299709677696228,
"learning_rate": 0.00017608504358033363,
"loss": 0.7176,
"step": 539
},
{
"epoch": 1.2100840336134453,
"grad_norm": 0.2959325313568115,
"learning_rate": 0.00017595795754293513,
"loss": 0.6807,
"step": 540
},
{
"epoch": 1.2123249299719887,
"grad_norm": 0.27634045481681824,
"learning_rate": 0.00017583058084785625,
"loss": 0.6773,
"step": 541
},
{
"epoch": 1.2145658263305321,
"grad_norm": 0.33680498600006104,
"learning_rate": 0.00017570291398251152,
"loss": 0.7985,
"step": 542
},
{
"epoch": 1.2168067226890757,
"grad_norm": 0.318645179271698,
"learning_rate": 0.00017557495743542585,
"loss": 0.6376,
"step": 543
},
{
"epoch": 1.2190476190476192,
"grad_norm": 0.31514203548431396,
"learning_rate": 0.0001754467116962326,
"loss": 0.7327,
"step": 544
},
{
"epoch": 1.2212885154061626,
"grad_norm": 0.32726311683654785,
"learning_rate": 0.0001753181772556719,
"loss": 0.7329,
"step": 545
},
{
"epoch": 1.223529411764706,
"grad_norm": 0.3274557292461395,
"learning_rate": 0.00017518935460558838,
"loss": 0.6818,
"step": 546
},
{
"epoch": 1.2257703081232494,
"grad_norm": 0.3218080699443817,
"learning_rate": 0.0001750602442389297,
"loss": 0.734,
"step": 547
},
{
"epoch": 1.2280112044817928,
"grad_norm": 0.3002873659133911,
"learning_rate": 0.0001749308466497444,
"loss": 0.617,
"step": 548
},
{
"epoch": 1.2302521008403362,
"grad_norm": 0.30696091055870056,
"learning_rate": 0.00017480116233318014,
"loss": 0.7417,
"step": 549
},
{
"epoch": 1.2324929971988796,
"grad_norm": 0.341802716255188,
"learning_rate": 0.0001746711917854817,
"loss": 0.7806,
"step": 550
},
{
"epoch": 1.234733893557423,
"grad_norm": 0.3180257976055145,
"learning_rate": 0.00017454093550398918,
"loss": 0.6813,
"step": 551
},
{
"epoch": 1.2369747899159664,
"grad_norm": 0.3485969007015228,
"learning_rate": 0.00017441039398713608,
"loss": 0.7385,
"step": 552
},
{
"epoch": 1.2392156862745098,
"grad_norm": 0.3271244466304779,
"learning_rate": 0.00017427956773444732,
"loss": 0.6891,
"step": 553
},
{
"epoch": 1.2414565826330533,
"grad_norm": 0.31936562061309814,
"learning_rate": 0.00017414845724653743,
"loss": 0.683,
"step": 554
},
{
"epoch": 1.2436974789915967,
"grad_norm": 0.33722570538520813,
"learning_rate": 0.0001740170630251085,
"loss": 0.8026,
"step": 555
},
{
"epoch": 1.24593837535014,
"grad_norm": 0.3108736276626587,
"learning_rate": 0.00017388538557294852,
"loss": 0.6923,
"step": 556
},
{
"epoch": 1.2481792717086835,
"grad_norm": 0.32877469062805176,
"learning_rate": 0.00017375342539392903,
"loss": 0.7007,
"step": 557
},
{
"epoch": 1.250420168067227,
"grad_norm": 0.3236118257045746,
"learning_rate": 0.00017362118299300361,
"loss": 0.6852,
"step": 558
},
{
"epoch": 1.2526610644257703,
"grad_norm": 0.30720242857933044,
"learning_rate": 0.00017348865887620573,
"loss": 0.6665,
"step": 559
},
{
"epoch": 1.2549019607843137,
"grad_norm": 0.32177719473838806,
"learning_rate": 0.00017335585355064692,
"loss": 0.7097,
"step": 560
},
{
"epoch": 1.2571428571428571,
"grad_norm": 0.31638845801353455,
"learning_rate": 0.0001732227675245147,
"loss": 0.6966,
"step": 561
},
{
"epoch": 1.2593837535014005,
"grad_norm": 0.3173747956752777,
"learning_rate": 0.00017308940130707069,
"loss": 0.6972,
"step": 562
},
{
"epoch": 1.261624649859944,
"grad_norm": 0.31755855679512024,
"learning_rate": 0.00017295575540864877,
"loss": 0.7192,
"step": 563
},
{
"epoch": 1.2638655462184873,
"grad_norm": 0.30816033482551575,
"learning_rate": 0.00017282183034065296,
"loss": 0.6723,
"step": 564
},
{
"epoch": 1.2661064425770308,
"grad_norm": 0.3106657564640045,
"learning_rate": 0.00017268762661555557,
"loss": 0.754,
"step": 565
},
{
"epoch": 1.2683473389355742,
"grad_norm": 0.3460423946380615,
"learning_rate": 0.00017255314474689523,
"loss": 0.7836,
"step": 566
},
{
"epoch": 1.2705882352941176,
"grad_norm": 0.33377137780189514,
"learning_rate": 0.00017241838524927484,
"loss": 0.8304,
"step": 567
},
{
"epoch": 1.272829131652661,
"grad_norm": 0.3150513470172882,
"learning_rate": 0.0001722833486383597,
"loss": 0.9003,
"step": 568
},
{
"epoch": 1.2750700280112044,
"grad_norm": 0.2992112934589386,
"learning_rate": 0.00017214803543087555,
"loss": 0.7655,
"step": 569
},
{
"epoch": 1.2773109243697478,
"grad_norm": 0.2952958345413208,
"learning_rate": 0.00017201244614460643,
"loss": 0.6524,
"step": 570
},
{
"epoch": 1.2795518207282912,
"grad_norm": 0.31578466296195984,
"learning_rate": 0.00017187658129839294,
"loss": 0.7415,
"step": 571
},
{
"epoch": 1.2817927170868346,
"grad_norm": 0.33429616689682007,
"learning_rate": 0.00017174044141213,
"loss": 0.8142,
"step": 572
},
{
"epoch": 1.284033613445378,
"grad_norm": 0.32365548610687256,
"learning_rate": 0.0001716040270067651,
"loss": 0.6806,
"step": 573
},
{
"epoch": 1.2862745098039214,
"grad_norm": 0.3116409182548523,
"learning_rate": 0.00017146733860429612,
"loss": 0.6899,
"step": 574
},
{
"epoch": 1.2885154061624648,
"grad_norm": 0.35744285583496094,
"learning_rate": 0.00017133037672776942,
"loss": 0.74,
"step": 575
},
{
"epoch": 1.2907563025210085,
"grad_norm": 0.2882843613624573,
"learning_rate": 0.00017119314190127788,
"loss": 0.6437,
"step": 576
},
{
"epoch": 1.292997198879552,
"grad_norm": 0.31813856959342957,
"learning_rate": 0.00017105563464995873,
"loss": 0.6738,
"step": 577
},
{
"epoch": 1.2952380952380953,
"grad_norm": 0.32429373264312744,
"learning_rate": 0.00017091785549999176,
"loss": 0.8131,
"step": 578
},
{
"epoch": 1.2974789915966387,
"grad_norm": 0.32878828048706055,
"learning_rate": 0.00017077980497859713,
"loss": 0.6904,
"step": 579
},
{
"epoch": 1.2997198879551821,
"grad_norm": 0.30764883756637573,
"learning_rate": 0.00017064148361403347,
"loss": 0.7343,
"step": 580
},
{
"epoch": 1.3019607843137255,
"grad_norm": 0.319346159696579,
"learning_rate": 0.00017050289193559578,
"loss": 0.6926,
"step": 581
},
{
"epoch": 1.304201680672269,
"grad_norm": 0.31245169043540955,
"learning_rate": 0.00017036403047361335,
"loss": 0.7876,
"step": 582
},
{
"epoch": 1.3064425770308123,
"grad_norm": 0.29874372482299805,
"learning_rate": 0.000170224899759448,
"loss": 0.6569,
"step": 583
},
{
"epoch": 1.3086834733893558,
"grad_norm": 0.3087962865829468,
"learning_rate": 0.00017008550032549167,
"loss": 0.6892,
"step": 584
},
{
"epoch": 1.3109243697478992,
"grad_norm": 0.31366726756095886,
"learning_rate": 0.0001699458327051647,
"loss": 0.7782,
"step": 585
},
{
"epoch": 1.3131652661064426,
"grad_norm": 0.304023802280426,
"learning_rate": 0.00016980589743291363,
"loss": 0.7213,
"step": 586
},
{
"epoch": 1.315406162464986,
"grad_norm": 0.2973663806915283,
"learning_rate": 0.00016966569504420914,
"loss": 0.6942,
"step": 587
},
{
"epoch": 1.3176470588235294,
"grad_norm": 0.3077528178691864,
"learning_rate": 0.0001695252260755441,
"loss": 0.7192,
"step": 588
},
{
"epoch": 1.3198879551820728,
"grad_norm": 0.3302268087863922,
"learning_rate": 0.00016938449106443138,
"loss": 0.7904,
"step": 589
},
{
"epoch": 1.3221288515406162,
"grad_norm": 0.3201766312122345,
"learning_rate": 0.00016924349054940204,
"loss": 0.6851,
"step": 590
},
{
"epoch": 1.3243697478991596,
"grad_norm": 0.31299325823783875,
"learning_rate": 0.00016910222507000294,
"loss": 0.6355,
"step": 591
},
{
"epoch": 1.326610644257703,
"grad_norm": 0.3279891610145569,
"learning_rate": 0.00016896069516679493,
"loss": 0.7587,
"step": 592
},
{
"epoch": 1.3288515406162464,
"grad_norm": 0.3423704206943512,
"learning_rate": 0.0001688189013813507,
"loss": 0.7352,
"step": 593
},
{
"epoch": 1.3310924369747898,
"grad_norm": 0.3414992690086365,
"learning_rate": 0.00016867684425625262,
"loss": 0.6362,
"step": 594
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.3365124464035034,
"learning_rate": 0.00016853452433509086,
"loss": 0.7817,
"step": 595
},
{
"epoch": 1.3355742296918767,
"grad_norm": 0.3402608633041382,
"learning_rate": 0.00016839194216246108,
"loss": 0.713,
"step": 596
},
{
"epoch": 1.3378151260504203,
"grad_norm": 0.3226512670516968,
"learning_rate": 0.00016824909828396255,
"loss": 0.6853,
"step": 597
},
{
"epoch": 1.3400560224089637,
"grad_norm": 0.3510406017303467,
"learning_rate": 0.0001681059932461959,
"loss": 0.7548,
"step": 598
},
{
"epoch": 1.3422969187675071,
"grad_norm": 0.3165980875492096,
"learning_rate": 0.00016796262759676117,
"loss": 0.6952,
"step": 599
},
{
"epoch": 1.3445378151260505,
"grad_norm": 0.3366287052631378,
"learning_rate": 0.00016781900188425562,
"loss": 0.7872,
"step": 600
},
{
"epoch": 1.346778711484594,
"grad_norm": 0.30944567918777466,
"learning_rate": 0.00016767511665827166,
"loss": 0.6944,
"step": 601
},
{
"epoch": 1.3490196078431373,
"grad_norm": 0.31927919387817383,
"learning_rate": 0.00016753097246939474,
"loss": 0.6778,
"step": 602
},
{
"epoch": 1.3512605042016808,
"grad_norm": 0.36404141783714294,
"learning_rate": 0.0001673865698692012,
"loss": 0.8198,
"step": 603
},
{
"epoch": 1.3535014005602242,
"grad_norm": 0.28067895770072937,
"learning_rate": 0.00016724190941025627,
"loss": 0.6561,
"step": 604
},
{
"epoch": 1.3557422969187676,
"grad_norm": 0.33814799785614014,
"learning_rate": 0.00016709699164611192,
"loss": 0.725,
"step": 605
},
{
"epoch": 1.357983193277311,
"grad_norm": 0.3168368935585022,
"learning_rate": 0.0001669518171313046,
"loss": 0.7295,
"step": 606
},
{
"epoch": 1.3602240896358544,
"grad_norm": 0.37248319387435913,
"learning_rate": 0.00016680638642135336,
"loss": 0.7566,
"step": 607
},
{
"epoch": 1.3624649859943978,
"grad_norm": 0.32318514585494995,
"learning_rate": 0.00016666070007275748,
"loss": 0.6949,
"step": 608
},
{
"epoch": 1.3647058823529412,
"grad_norm": 0.34030449390411377,
"learning_rate": 0.00016651475864299452,
"loss": 0.6823,
"step": 609
},
{
"epoch": 1.3669467787114846,
"grad_norm": 0.3123067319393158,
"learning_rate": 0.00016636856269051814,
"loss": 0.7236,
"step": 610
},
{
"epoch": 1.369187675070028,
"grad_norm": 0.32035380601882935,
"learning_rate": 0.0001662221127747559,
"loss": 0.7254,
"step": 611
},
{
"epoch": 1.3714285714285714,
"grad_norm": 0.3208359181880951,
"learning_rate": 0.00016607540945610722,
"loss": 0.7309,
"step": 612
},
{
"epoch": 1.3736694677871149,
"grad_norm": 0.31935855746269226,
"learning_rate": 0.00016592845329594112,
"loss": 0.8216,
"step": 613
},
{
"epoch": 1.3759103641456583,
"grad_norm": 0.2906378209590912,
"learning_rate": 0.00016578124485659414,
"loss": 0.7283,
"step": 614
},
{
"epoch": 1.3781512605042017,
"grad_norm": 0.3059033751487732,
"learning_rate": 0.00016563378470136822,
"loss": 0.7455,
"step": 615
},
{
"epoch": 1.380392156862745,
"grad_norm": 0.30803483724594116,
"learning_rate": 0.00016548607339452853,
"loss": 0.6463,
"step": 616
},
{
"epoch": 1.3826330532212885,
"grad_norm": 0.32379990816116333,
"learning_rate": 0.00016533811150130117,
"loss": 0.7454,
"step": 617
},
{
"epoch": 1.384873949579832,
"grad_norm": 0.3142244815826416,
"learning_rate": 0.00016518989958787126,
"loss": 0.7142,
"step": 618
},
{
"epoch": 1.3871148459383753,
"grad_norm": 0.32228538393974304,
"learning_rate": 0.00016504143822138056,
"loss": 0.626,
"step": 619
},
{
"epoch": 1.3893557422969187,
"grad_norm": 0.3554072082042694,
"learning_rate": 0.00016489272796992537,
"loss": 0.6818,
"step": 620
},
{
"epoch": 1.3915966386554621,
"grad_norm": 0.3763510584831238,
"learning_rate": 0.00016474376940255444,
"loss": 0.7584,
"step": 621
},
{
"epoch": 1.3938375350140055,
"grad_norm": 0.3938269317150116,
"learning_rate": 0.0001645945630892666,
"loss": 0.815,
"step": 622
},
{
"epoch": 1.396078431372549,
"grad_norm": 0.32350701093673706,
"learning_rate": 0.00016444510960100879,
"loss": 0.7073,
"step": 623
},
{
"epoch": 1.3983193277310924,
"grad_norm": 0.33908578753471375,
"learning_rate": 0.00016429540950967371,
"loss": 0.8126,
"step": 624
},
{
"epoch": 1.4005602240896358,
"grad_norm": 0.32018956542015076,
"learning_rate": 0.0001641454633880978,
"loss": 0.7489,
"step": 625
},
{
"epoch": 1.4028011204481792,
"grad_norm": 0.324883371591568,
"learning_rate": 0.0001639952718100589,
"loss": 0.7915,
"step": 626
},
{
"epoch": 1.4050420168067226,
"grad_norm": 0.3411893844604492,
"learning_rate": 0.000163844835350274,
"loss": 0.7736,
"step": 627
},
{
"epoch": 1.407282913165266,
"grad_norm": 0.3181053400039673,
"learning_rate": 0.0001636941545843973,
"loss": 0.7398,
"step": 628
},
{
"epoch": 1.4095238095238094,
"grad_norm": 0.3206365704536438,
"learning_rate": 0.00016354323008901776,
"loss": 0.7825,
"step": 629
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.30065983533859253,
"learning_rate": 0.00016339206244165704,
"loss": 0.714,
"step": 630
},
{
"epoch": 1.4140056022408962,
"grad_norm": 0.3132588565349579,
"learning_rate": 0.00016324065222076718,
"loss": 0.8118,
"step": 631
},
{
"epoch": 1.4162464985994399,
"grad_norm": 0.3143763840198517,
"learning_rate": 0.00016308900000572851,
"loss": 0.7045,
"step": 632
},
{
"epoch": 1.4184873949579833,
"grad_norm": 0.3230004608631134,
"learning_rate": 0.00016293710637684732,
"loss": 0.5901,
"step": 633
},
{
"epoch": 1.4207282913165267,
"grad_norm": 0.31268176436424255,
"learning_rate": 0.00016278497191535364,
"loss": 0.7745,
"step": 634
},
{
"epoch": 1.42296918767507,
"grad_norm": 0.33265724778175354,
"learning_rate": 0.00016263259720339916,
"loss": 0.7444,
"step": 635
},
{
"epoch": 1.4252100840336135,
"grad_norm": 0.32188957929611206,
"learning_rate": 0.00016247998282405485,
"loss": 0.7899,
"step": 636
},
{
"epoch": 1.427450980392157,
"grad_norm": 0.3007752001285553,
"learning_rate": 0.0001623271293613088,
"loss": 0.6891,
"step": 637
},
{
"epoch": 1.4296918767507003,
"grad_norm": 0.29558807611465454,
"learning_rate": 0.0001621740374000639,
"loss": 0.7253,
"step": 638
},
{
"epoch": 1.4319327731092437,
"grad_norm": 0.32872480154037476,
"learning_rate": 0.0001620207075261358,
"loss": 0.7858,
"step": 639
},
{
"epoch": 1.4341736694677871,
"grad_norm": 0.30687081813812256,
"learning_rate": 0.00016186714032625035,
"loss": 0.7395,
"step": 640
},
{
"epoch": 1.4364145658263305,
"grad_norm": 0.31080397963523865,
"learning_rate": 0.00016171333638804176,
"loss": 0.7491,
"step": 641
},
{
"epoch": 1.438655462184874,
"grad_norm": 0.3163350820541382,
"learning_rate": 0.00016155929630004996,
"loss": 0.7222,
"step": 642
},
{
"epoch": 1.4408963585434174,
"grad_norm": 0.3228285610675812,
"learning_rate": 0.00016140502065171863,
"loss": 0.722,
"step": 643
},
{
"epoch": 1.4431372549019608,
"grad_norm": 0.3150191605091095,
"learning_rate": 0.00016125051003339276,
"loss": 0.8325,
"step": 644
},
{
"epoch": 1.4453781512605042,
"grad_norm": 0.2988094687461853,
"learning_rate": 0.00016109576503631646,
"loss": 0.6268,
"step": 645
},
{
"epoch": 1.4476190476190476,
"grad_norm": 0.3043825030326843,
"learning_rate": 0.00016094078625263083,
"loss": 0.6615,
"step": 646
},
{
"epoch": 1.449859943977591,
"grad_norm": 0.3339695930480957,
"learning_rate": 0.00016078557427537144,
"loss": 0.7564,
"step": 647
},
{
"epoch": 1.4521008403361344,
"grad_norm": 0.37711572647094727,
"learning_rate": 0.00016063012969846625,
"loss": 0.8914,
"step": 648
},
{
"epoch": 1.4543417366946778,
"grad_norm": 0.3254697322845459,
"learning_rate": 0.0001604744531167332,
"loss": 0.7375,
"step": 649
},
{
"epoch": 1.4565826330532212,
"grad_norm": 0.30244678258895874,
"learning_rate": 0.0001603185451258781,
"loss": 0.649,
"step": 650
},
{
"epoch": 1.4588235294117646,
"grad_norm": 0.3273305892944336,
"learning_rate": 0.00016016240632249224,
"loss": 0.7635,
"step": 651
},
{
"epoch": 1.4610644257703083,
"grad_norm": 0.3089764714241028,
"learning_rate": 0.00016000603730405012,
"loss": 0.7321,
"step": 652
},
{
"epoch": 1.4633053221288517,
"grad_norm": 0.3246336579322815,
"learning_rate": 0.00015984943866890718,
"loss": 0.7163,
"step": 653
},
{
"epoch": 1.465546218487395,
"grad_norm": 0.29445281624794006,
"learning_rate": 0.00015969261101629742,
"loss": 0.7109,
"step": 654
},
{
"epoch": 1.4677871148459385,
"grad_norm": 0.3255884051322937,
"learning_rate": 0.00015953555494633136,
"loss": 0.7698,
"step": 655
},
{
"epoch": 1.470028011204482,
"grad_norm": 0.3040069043636322,
"learning_rate": 0.00015937827105999336,
"loss": 0.7155,
"step": 656
},
{
"epoch": 1.4722689075630253,
"grad_norm": 0.30603310465812683,
"learning_rate": 0.00015922075995913974,
"loss": 0.7512,
"step": 657
},
{
"epoch": 1.4745098039215687,
"grad_norm": 0.3210160732269287,
"learning_rate": 0.00015906302224649615,
"loss": 0.7153,
"step": 658
},
{
"epoch": 1.4767507002801121,
"grad_norm": 0.3401980996131897,
"learning_rate": 0.0001589050585256554,
"loss": 0.705,
"step": 659
},
{
"epoch": 1.4789915966386555,
"grad_norm": 0.3499704599380493,
"learning_rate": 0.00015874686940107506,
"loss": 0.7966,
"step": 660
},
{
"epoch": 1.481232492997199,
"grad_norm": 0.31771430373191833,
"learning_rate": 0.00015858845547807543,
"loss": 0.7383,
"step": 661
},
{
"epoch": 1.4834733893557424,
"grad_norm": 0.3159136474132538,
"learning_rate": 0.00015842981736283686,
"loss": 0.6415,
"step": 662
},
{
"epoch": 1.4857142857142858,
"grad_norm": 0.3231973946094513,
"learning_rate": 0.0001582709556623976,
"loss": 0.7462,
"step": 663
},
{
"epoch": 1.4879551820728292,
"grad_norm": 0.3222534954547882,
"learning_rate": 0.0001581118709846514,
"loss": 0.6833,
"step": 664
},
{
"epoch": 1.4901960784313726,
"grad_norm": 0.34087467193603516,
"learning_rate": 0.00015795256393834545,
"loss": 0.7325,
"step": 665
},
{
"epoch": 1.492436974789916,
"grad_norm": 0.33144867420196533,
"learning_rate": 0.00015779303513307764,
"loss": 0.7278,
"step": 666
},
{
"epoch": 1.4946778711484594,
"grad_norm": 0.3868750035762787,
"learning_rate": 0.0001576332851792945,
"loss": 0.9117,
"step": 667
},
{
"epoch": 1.4969187675070028,
"grad_norm": 0.3211595118045807,
"learning_rate": 0.00015747331468828887,
"loss": 0.7015,
"step": 668
},
{
"epoch": 1.4991596638655462,
"grad_norm": 0.30169111490249634,
"learning_rate": 0.00015731312427219737,
"loss": 0.8056,
"step": 669
},
{
"epoch": 1.5014005602240896,
"grad_norm": 0.30103328824043274,
"learning_rate": 0.0001571527145439983,
"loss": 0.6391,
"step": 670
},
{
"epoch": 1.503641456582633,
"grad_norm": 0.3065439462661743,
"learning_rate": 0.00015699208611750902,
"loss": 0.7279,
"step": 671
},
{
"epoch": 1.5058823529411764,
"grad_norm": 0.3444465100765228,
"learning_rate": 0.00015683123960738392,
"loss": 0.7274,
"step": 672
},
{
"epoch": 1.5081232492997199,
"grad_norm": 0.3165910542011261,
"learning_rate": 0.00015667017562911176,
"loss": 0.6784,
"step": 673
},
{
"epoch": 1.5103641456582633,
"grad_norm": 0.3377099335193634,
"learning_rate": 0.00015650889479901356,
"loss": 0.8188,
"step": 674
},
{
"epoch": 1.5126050420168067,
"grad_norm": 0.3051726818084717,
"learning_rate": 0.00015634739773424006,
"loss": 0.657,
"step": 675
},
{
"epoch": 1.51484593837535,
"grad_norm": 0.3249804675579071,
"learning_rate": 0.00015618568505276946,
"loss": 0.7148,
"step": 676
},
{
"epoch": 1.5170868347338935,
"grad_norm": 0.3395232856273651,
"learning_rate": 0.00015602375737340507,
"loss": 0.6749,
"step": 677
},
{
"epoch": 1.519327731092437,
"grad_norm": 0.33914825320243835,
"learning_rate": 0.0001558616153157728,
"loss": 0.7133,
"step": 678
},
{
"epoch": 1.5215686274509803,
"grad_norm": 0.3608812689781189,
"learning_rate": 0.00015569925950031908,
"loss": 0.7519,
"step": 679
},
{
"epoch": 1.5238095238095237,
"grad_norm": 0.37755894660949707,
"learning_rate": 0.00015553669054830805,
"loss": 0.757,
"step": 680
},
{
"epoch": 1.5260504201680671,
"grad_norm": 0.313101589679718,
"learning_rate": 0.0001553739090818196,
"loss": 0.7849,
"step": 681
},
{
"epoch": 1.5282913165266105,
"grad_norm": 0.2970316410064697,
"learning_rate": 0.0001552109157237468,
"loss": 0.6365,
"step": 682
},
{
"epoch": 1.530532212885154,
"grad_norm": 0.3263746201992035,
"learning_rate": 0.00015504771109779348,
"loss": 0.6997,
"step": 683
},
{
"epoch": 1.5327731092436974,
"grad_norm": 0.3154606521129608,
"learning_rate": 0.00015488429582847192,
"loss": 0.7298,
"step": 684
},
{
"epoch": 1.5350140056022408,
"grad_norm": 0.3260630965232849,
"learning_rate": 0.00015472067054110052,
"loss": 0.7921,
"step": 685
},
{
"epoch": 1.5372549019607842,
"grad_norm": 0.32374054193496704,
"learning_rate": 0.00015455683586180116,
"loss": 0.718,
"step": 686
},
{
"epoch": 1.5394957983193276,
"grad_norm": 0.3299919366836548,
"learning_rate": 0.00015439279241749715,
"loss": 0.7179,
"step": 687
},
{
"epoch": 1.541736694677871,
"grad_norm": 0.32276657223701477,
"learning_rate": 0.0001542285408359105,
"loss": 0.7589,
"step": 688
},
{
"epoch": 1.5439775910364144,
"grad_norm": 0.3329083323478699,
"learning_rate": 0.00015406408174555976,
"loss": 0.7549,
"step": 689
},
{
"epoch": 1.5462184873949578,
"grad_norm": 0.3457712233066559,
"learning_rate": 0.00015389941577575753,
"loss": 0.7139,
"step": 690
},
{
"epoch": 1.5484593837535015,
"grad_norm": 0.3504430949687958,
"learning_rate": 0.00015373454355660802,
"loss": 0.788,
"step": 691
},
{
"epoch": 1.5507002801120449,
"grad_norm": 0.3265605866909027,
"learning_rate": 0.00015356946571900464,
"loss": 0.7077,
"step": 692
},
{
"epoch": 1.5529411764705883,
"grad_norm": 0.3154314458370209,
"learning_rate": 0.00015340418289462764,
"loss": 0.7,
"step": 693
},
{
"epoch": 1.5551820728291317,
"grad_norm": 0.3218802511692047,
"learning_rate": 0.00015323869571594166,
"loss": 0.7735,
"step": 694
},
{
"epoch": 1.557422969187675,
"grad_norm": 0.31704089045524597,
"learning_rate": 0.00015307300481619333,
"loss": 0.7946,
"step": 695
},
{
"epoch": 1.5596638655462185,
"grad_norm": 0.3201853632926941,
"learning_rate": 0.0001529071108294088,
"loss": 0.7154,
"step": 696
},
{
"epoch": 1.561904761904762,
"grad_norm": 0.3154377341270447,
"learning_rate": 0.00015274101439039138,
"loss": 0.6905,
"step": 697
},
{
"epoch": 1.5641456582633053,
"grad_norm": 0.30675947666168213,
"learning_rate": 0.00015257471613471906,
"loss": 0.6897,
"step": 698
},
{
"epoch": 1.5663865546218487,
"grad_norm": 0.30410274863243103,
"learning_rate": 0.00015240821669874202,
"loss": 0.7181,
"step": 699
},
{
"epoch": 1.5686274509803921,
"grad_norm": 0.3099222779273987,
"learning_rate": 0.00015224151671958043,
"loss": 0.732,
"step": 700
},
{
"epoch": 1.5708683473389355,
"grad_norm": 0.32526805996894836,
"learning_rate": 0.00015207461683512175,
"loss": 0.7442,
"step": 701
},
{
"epoch": 1.573109243697479,
"grad_norm": 0.3597675859928131,
"learning_rate": 0.00015190751768401833,
"loss": 0.7059,
"step": 702
},
{
"epoch": 1.5753501400560224,
"grad_norm": 0.34002843499183655,
"learning_rate": 0.00015174021990568517,
"loss": 0.7309,
"step": 703
},
{
"epoch": 1.5775910364145658,
"grad_norm": 0.3448459804058075,
"learning_rate": 0.0001515727241402972,
"loss": 0.7543,
"step": 704
},
{
"epoch": 1.5798319327731094,
"grad_norm": 0.3388139605522156,
"learning_rate": 0.000151405031028787,
"loss": 0.6604,
"step": 705
},
{
"epoch": 1.5820728291316528,
"grad_norm": 0.33875682950019836,
"learning_rate": 0.0001512371412128424,
"loss": 0.7234,
"step": 706
},
{
"epoch": 1.5843137254901962,
"grad_norm": 0.3654973804950714,
"learning_rate": 0.00015106905533490372,
"loss": 0.8452,
"step": 707
},
{
"epoch": 1.5865546218487396,
"grad_norm": 0.31986573338508606,
"learning_rate": 0.00015090077403816178,
"loss": 0.6683,
"step": 708
},
{
"epoch": 1.588795518207283,
"grad_norm": 0.29579469561576843,
"learning_rate": 0.00015073229796655504,
"loss": 0.723,
"step": 709
},
{
"epoch": 1.5910364145658265,
"grad_norm": 0.31319934129714966,
"learning_rate": 0.0001505636277647672,
"loss": 0.7384,
"step": 710
},
{
"epoch": 1.5932773109243699,
"grad_norm": 0.30319178104400635,
"learning_rate": 0.00015039476407822502,
"loss": 0.7044,
"step": 711
},
{
"epoch": 1.5955182072829133,
"grad_norm": 0.3266353905200958,
"learning_rate": 0.0001502257075530954,
"loss": 0.7777,
"step": 712
},
{
"epoch": 1.5977591036414567,
"grad_norm": 0.3113664388656616,
"learning_rate": 0.00015005645883628342,
"loss": 0.7394,
"step": 713
},
{
"epoch": 1.6,
"grad_norm": 0.29923635721206665,
"learning_rate": 0.00014988701857542933,
"loss": 0.6867,
"step": 714
},
{
"epoch": 1.6022408963585435,
"grad_norm": 0.32305604219436646,
"learning_rate": 0.00014971738741890647,
"loss": 0.7512,
"step": 715
},
{
"epoch": 1.604481792717087,
"grad_norm": 0.30608245730400085,
"learning_rate": 0.0001495475660158187,
"loss": 0.7076,
"step": 716
},
{
"epoch": 1.6067226890756303,
"grad_norm": 0.3273196220397949,
"learning_rate": 0.00014937755501599772,
"loss": 0.7259,
"step": 717
},
{
"epoch": 1.6089635854341737,
"grad_norm": 0.3238683342933655,
"learning_rate": 0.0001492073550700009,
"loss": 0.716,
"step": 718
},
{
"epoch": 1.6112044817927171,
"grad_norm": 0.3258748948574066,
"learning_rate": 0.00014903696682910846,
"loss": 0.739,
"step": 719
},
{
"epoch": 1.6134453781512605,
"grad_norm": 0.32485276460647583,
"learning_rate": 0.00014886639094532128,
"loss": 0.681,
"step": 720
},
{
"epoch": 1.615686274509804,
"grad_norm": 0.3358374536037445,
"learning_rate": 0.0001486956280713582,
"loss": 0.7625,
"step": 721
},
{
"epoch": 1.6179271708683474,
"grad_norm": 0.30938801169395447,
"learning_rate": 0.00014852467886065357,
"loss": 0.6942,
"step": 722
},
{
"epoch": 1.6201680672268908,
"grad_norm": 0.35836121439933777,
"learning_rate": 0.00014835354396735482,
"loss": 0.7397,
"step": 723
},
{
"epoch": 1.6224089635854342,
"grad_norm": 0.3162292540073395,
"learning_rate": 0.00014818222404631992,
"loss": 0.7071,
"step": 724
},
{
"epoch": 1.6246498599439776,
"grad_norm": 0.32497039437294006,
"learning_rate": 0.0001480107197531148,
"loss": 0.7396,
"step": 725
},
{
"epoch": 1.626890756302521,
"grad_norm": 0.33408358693122864,
"learning_rate": 0.00014783903174401085,
"loss": 0.7593,
"step": 726
},
{
"epoch": 1.6291316526610644,
"grad_norm": 0.3246319591999054,
"learning_rate": 0.00014766716067598262,
"loss": 0.7302,
"step": 727
},
{
"epoch": 1.6313725490196078,
"grad_norm": 0.32565364241600037,
"learning_rate": 0.00014749510720670506,
"loss": 0.8113,
"step": 728
},
{
"epoch": 1.6336134453781512,
"grad_norm": 0.32790714502334595,
"learning_rate": 0.00014732287199455103,
"loss": 0.7884,
"step": 729
},
{
"epoch": 1.6358543417366946,
"grad_norm": 0.3148501217365265,
"learning_rate": 0.00014715045569858894,
"loss": 0.7214,
"step": 730
},
{
"epoch": 1.638095238095238,
"grad_norm": 0.30682241916656494,
"learning_rate": 0.00014697785897858012,
"loss": 0.725,
"step": 731
},
{
"epoch": 1.6403361344537815,
"grad_norm": 0.3391934037208557,
"learning_rate": 0.00014680508249497622,
"loss": 0.84,
"step": 732
},
{
"epoch": 1.6425770308123249,
"grad_norm": 0.31384411454200745,
"learning_rate": 0.0001466321269089168,
"loss": 0.6867,
"step": 733
},
{
"epoch": 1.6448179271708683,
"grad_norm": 0.33052271604537964,
"learning_rate": 0.00014645899288222687,
"loss": 0.7761,
"step": 734
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.3304360806941986,
"learning_rate": 0.00014628568107741407,
"loss": 0.8817,
"step": 735
},
{
"epoch": 1.649299719887955,
"grad_norm": 0.34404993057250977,
"learning_rate": 0.0001461121921576665,
"loss": 0.8155,
"step": 736
},
{
"epoch": 1.6515406162464985,
"grad_norm": 0.33394402265548706,
"learning_rate": 0.00014593852678684984,
"loss": 0.7656,
"step": 737
},
{
"epoch": 1.653781512605042,
"grad_norm": 0.35093650221824646,
"learning_rate": 0.0001457646856295051,
"loss": 0.8171,
"step": 738
},
{
"epoch": 1.6560224089635853,
"grad_norm": 0.31678837537765503,
"learning_rate": 0.00014559066935084588,
"loss": 0.7061,
"step": 739
},
{
"epoch": 1.6582633053221287,
"grad_norm": 0.3170653283596039,
"learning_rate": 0.00014541647861675592,
"loss": 0.7351,
"step": 740
},
{
"epoch": 1.6605042016806721,
"grad_norm": 0.34166768193244934,
"learning_rate": 0.0001452421140937865,
"loss": 0.7937,
"step": 741
},
{
"epoch": 1.6627450980392156,
"grad_norm": 0.3269054591655731,
"learning_rate": 0.00014506757644915393,
"loss": 0.8038,
"step": 742
},
{
"epoch": 1.664985994397759,
"grad_norm": 0.3237577974796295,
"learning_rate": 0.00014489286635073693,
"loss": 0.657,
"step": 743
},
{
"epoch": 1.6672268907563024,
"grad_norm": 0.3239234685897827,
"learning_rate": 0.00014471798446707426,
"loss": 0.6732,
"step": 744
},
{
"epoch": 1.6694677871148458,
"grad_norm": 0.30346667766571045,
"learning_rate": 0.00014454293146736187,
"loss": 0.669,
"step": 745
},
{
"epoch": 1.6717086834733892,
"grad_norm": 0.32579341530799866,
"learning_rate": 0.00014436770802145059,
"loss": 0.6693,
"step": 746
},
{
"epoch": 1.6739495798319328,
"grad_norm": 0.34438079595565796,
"learning_rate": 0.0001441923147998434,
"loss": 0.8594,
"step": 747
},
{
"epoch": 1.6761904761904762,
"grad_norm": 0.3287278115749359,
"learning_rate": 0.00014401675247369307,
"loss": 0.7633,
"step": 748
},
{
"epoch": 1.6784313725490196,
"grad_norm": 0.34410589933395386,
"learning_rate": 0.0001438410217147993,
"loss": 0.7815,
"step": 749
},
{
"epoch": 1.680672268907563,
"grad_norm": 0.4113364815711975,
"learning_rate": 0.0001436651231956064,
"loss": 0.7617,
"step": 750
},
{
"epoch": 1.6829131652661065,
"grad_norm": 0.3100757598876953,
"learning_rate": 0.0001434890575892006,
"loss": 0.673,
"step": 751
},
{
"epoch": 1.6851540616246499,
"grad_norm": 0.32215428352355957,
"learning_rate": 0.0001433128255693075,
"loss": 0.7694,
"step": 752
},
{
"epoch": 1.6873949579831933,
"grad_norm": 0.3526766002178192,
"learning_rate": 0.00014313642781028953,
"loss": 0.7397,
"step": 753
},
{
"epoch": 1.6896358543417367,
"grad_norm": 0.3373475968837738,
"learning_rate": 0.00014295986498714326,
"loss": 0.8275,
"step": 754
},
{
"epoch": 1.69187675070028,
"grad_norm": 0.32114461064338684,
"learning_rate": 0.0001427831377754969,
"loss": 0.7225,
"step": 755
},
{
"epoch": 1.6941176470588235,
"grad_norm": 0.320982426404953,
"learning_rate": 0.00014260624685160777,
"loss": 0.7966,
"step": 756
},
{
"epoch": 1.696358543417367,
"grad_norm": 0.28297820687294006,
"learning_rate": 0.0001424291928923596,
"loss": 0.6789,
"step": 757
},
{
"epoch": 1.6985994397759103,
"grad_norm": 0.31439271569252014,
"learning_rate": 0.00014225197657525995,
"loss": 0.7194,
"step": 758
},
{
"epoch": 1.7008403361344537,
"grad_norm": 0.33543679118156433,
"learning_rate": 0.0001420745985784377,
"loss": 0.8552,
"step": 759
},
{
"epoch": 1.7030812324929971,
"grad_norm": 0.33300474286079407,
"learning_rate": 0.0001418970595806404,
"loss": 0.867,
"step": 760
},
{
"epoch": 1.7053221288515408,
"grad_norm": 0.3134838938713074,
"learning_rate": 0.00014171936026123168,
"loss": 0.6651,
"step": 761
},
{
"epoch": 1.7075630252100842,
"grad_norm": 0.32312485575675964,
"learning_rate": 0.00014154150130018866,
"loss": 0.7978,
"step": 762
},
{
"epoch": 1.7098039215686276,
"grad_norm": 0.3270231783390045,
"learning_rate": 0.00014136348337809927,
"loss": 0.7574,
"step": 763
},
{
"epoch": 1.712044817927171,
"grad_norm": 0.32470935583114624,
"learning_rate": 0.0001411853071761598,
"loss": 0.6881,
"step": 764
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.3110044300556183,
"learning_rate": 0.00014100697337617217,
"loss": 0.6684,
"step": 765
},
{
"epoch": 1.7165266106442578,
"grad_norm": 0.344539076089859,
"learning_rate": 0.00014082848266054135,
"loss": 0.7387,
"step": 766
},
{
"epoch": 1.7187675070028012,
"grad_norm": 0.3174740672111511,
"learning_rate": 0.0001406498357122728,
"loss": 0.6459,
"step": 767
},
{
"epoch": 1.7210084033613446,
"grad_norm": 0.339348167181015,
"learning_rate": 0.00014047103321496976,
"loss": 0.658,
"step": 768
},
{
"epoch": 1.723249299719888,
"grad_norm": 0.3239974081516266,
"learning_rate": 0.0001402920758528307,
"loss": 0.6356,
"step": 769
},
{
"epoch": 1.7254901960784315,
"grad_norm": 0.33558371663093567,
"learning_rate": 0.00014011296431064676,
"loss": 0.7531,
"step": 770
},
{
"epoch": 1.7277310924369749,
"grad_norm": 0.35468053817749023,
"learning_rate": 0.0001399336992737989,
"loss": 0.783,
"step": 771
},
{
"epoch": 1.7299719887955183,
"grad_norm": 0.34820565581321716,
"learning_rate": 0.0001397542814282556,
"loss": 0.7227,
"step": 772
},
{
"epoch": 1.7322128851540617,
"grad_norm": 0.3655050992965698,
"learning_rate": 0.00013957471146056998,
"loss": 0.7366,
"step": 773
},
{
"epoch": 1.734453781512605,
"grad_norm": 0.3187251389026642,
"learning_rate": 0.00013939499005787736,
"loss": 0.7304,
"step": 774
},
{
"epoch": 1.7366946778711485,
"grad_norm": 0.3165695071220398,
"learning_rate": 0.00013921511790789234,
"loss": 0.7757,
"step": 775
},
{
"epoch": 1.738935574229692,
"grad_norm": 0.3043893873691559,
"learning_rate": 0.00013903509569890662,
"loss": 0.7689,
"step": 776
},
{
"epoch": 1.7411764705882353,
"grad_norm": 0.30263444781303406,
"learning_rate": 0.0001388549241197859,
"loss": 0.6638,
"step": 777
},
{
"epoch": 1.7434173669467787,
"grad_norm": 0.3344278931617737,
"learning_rate": 0.00013867460385996754,
"loss": 0.7027,
"step": 778
},
{
"epoch": 1.7456582633053221,
"grad_norm": 0.309756338596344,
"learning_rate": 0.00013849413560945787,
"loss": 0.7662,
"step": 779
},
{
"epoch": 1.7478991596638656,
"grad_norm": 0.33109283447265625,
"learning_rate": 0.00013831352005882946,
"loss": 0.7888,
"step": 780
},
{
"epoch": 1.750140056022409,
"grad_norm": 0.32347214221954346,
"learning_rate": 0.00013813275789921855,
"loss": 0.7313,
"step": 781
},
{
"epoch": 1.7523809523809524,
"grad_norm": 0.3274037539958954,
"learning_rate": 0.00013795184982232233,
"loss": 0.779,
"step": 782
},
{
"epoch": 1.7546218487394958,
"grad_norm": 0.3241097927093506,
"learning_rate": 0.0001377707965203965,
"loss": 0.744,
"step": 783
},
{
"epoch": 1.7568627450980392,
"grad_norm": 0.32738566398620605,
"learning_rate": 0.00013758959868625232,
"loss": 0.7585,
"step": 784
},
{
"epoch": 1.7591036414565826,
"grad_norm": 0.3516925275325775,
"learning_rate": 0.00013740825701325418,
"loss": 0.7445,
"step": 785
},
{
"epoch": 1.761344537815126,
"grad_norm": 0.33777570724487305,
"learning_rate": 0.00013722677219531683,
"loss": 0.7038,
"step": 786
},
{
"epoch": 1.7635854341736694,
"grad_norm": 0.35104092955589294,
"learning_rate": 0.0001370451449269029,
"loss": 0.7527,
"step": 787
},
{
"epoch": 1.7658263305322128,
"grad_norm": 0.33028444647789,
"learning_rate": 0.00013686337590301995,
"loss": 0.7227,
"step": 788
},
{
"epoch": 1.7680672268907562,
"grad_norm": 0.3553149104118347,
"learning_rate": 0.0001366814658192181,
"loss": 0.6937,
"step": 789
},
{
"epoch": 1.7703081232492996,
"grad_norm": 0.32514214515686035,
"learning_rate": 0.0001364994153715872,
"loss": 0.7198,
"step": 790
},
{
"epoch": 1.772549019607843,
"grad_norm": 0.3584795892238617,
"learning_rate": 0.00013631722525675412,
"loss": 0.8049,
"step": 791
},
{
"epoch": 1.7747899159663865,
"grad_norm": 0.3179537057876587,
"learning_rate": 0.0001361348961718804,
"loss": 0.7795,
"step": 792
},
{
"epoch": 1.7770308123249299,
"grad_norm": 0.2919451594352722,
"learning_rate": 0.0001359524288146591,
"loss": 0.6553,
"step": 793
},
{
"epoch": 1.7792717086834733,
"grad_norm": 0.31730496883392334,
"learning_rate": 0.0001357698238833126,
"loss": 0.812,
"step": 794
},
{
"epoch": 1.7815126050420167,
"grad_norm": 0.31148526072502136,
"learning_rate": 0.00013558708207658948,
"loss": 0.6558,
"step": 795
},
{
"epoch": 1.78375350140056,
"grad_norm": 0.3431221544742584,
"learning_rate": 0.00013540420409376236,
"loss": 0.766,
"step": 796
},
{
"epoch": 1.7859943977591035,
"grad_norm": 0.32656344771385193,
"learning_rate": 0.00013522119063462482,
"loss": 0.744,
"step": 797
},
{
"epoch": 1.788235294117647,
"grad_norm": 0.3525942862033844,
"learning_rate": 0.00013503804239948874,
"loss": 0.7954,
"step": 798
},
{
"epoch": 1.7904761904761903,
"grad_norm": 0.3282519280910492,
"learning_rate": 0.00013485476008918184,
"loss": 0.72,
"step": 799
},
{
"epoch": 1.7927170868347337,
"grad_norm": 0.3348991870880127,
"learning_rate": 0.00013467134440504495,
"loss": 0.6925,
"step": 800
},
{
"epoch": 1.7949579831932772,
"grad_norm": 0.325764000415802,
"learning_rate": 0.00013448779604892917,
"loss": 0.7261,
"step": 801
},
{
"epoch": 1.7971988795518208,
"grad_norm": 0.33784714341163635,
"learning_rate": 0.00013430411572319323,
"loss": 0.7637,
"step": 802
},
{
"epoch": 1.7994397759103642,
"grad_norm": 0.31615525484085083,
"learning_rate": 0.00013412030413070095,
"loss": 0.649,
"step": 803
},
{
"epoch": 1.8016806722689076,
"grad_norm": 0.30358991026878357,
"learning_rate": 0.00013393636197481842,
"loss": 0.6566,
"step": 804
},
{
"epoch": 1.803921568627451,
"grad_norm": 0.3021032512187958,
"learning_rate": 0.00013375228995941133,
"loss": 0.7694,
"step": 805
},
{
"epoch": 1.8061624649859944,
"grad_norm": 0.3464861810207367,
"learning_rate": 0.00013356808878884228,
"loss": 0.7604,
"step": 806
},
{
"epoch": 1.8084033613445378,
"grad_norm": 0.3448963165283203,
"learning_rate": 0.00013338375916796812,
"loss": 0.7434,
"step": 807
},
{
"epoch": 1.8106442577030812,
"grad_norm": 0.3179613947868347,
"learning_rate": 0.00013319930180213712,
"loss": 0.6885,
"step": 808
},
{
"epoch": 1.8128851540616246,
"grad_norm": 0.31533604860305786,
"learning_rate": 0.0001330147173971866,
"loss": 0.6736,
"step": 809
},
{
"epoch": 1.815126050420168,
"grad_norm": 0.3316449224948883,
"learning_rate": 0.00013283000665943972,
"loss": 0.7354,
"step": 810
},
{
"epoch": 1.8173669467787115,
"grad_norm": 0.3497374951839447,
"learning_rate": 0.00013264517029570324,
"loss": 0.7956,
"step": 811
},
{
"epoch": 1.8196078431372549,
"grad_norm": 0.33198150992393494,
"learning_rate": 0.00013246020901326464,
"loss": 0.7105,
"step": 812
},
{
"epoch": 1.8218487394957983,
"grad_norm": 0.3188040554523468,
"learning_rate": 0.00013227512351988925,
"loss": 0.7141,
"step": 813
},
{
"epoch": 1.8240896358543417,
"grad_norm": 0.30372536182403564,
"learning_rate": 0.00013208991452381798,
"loss": 0.7223,
"step": 814
},
{
"epoch": 1.826330532212885,
"grad_norm": 0.6289974451065063,
"learning_rate": 0.00013190458273376404,
"loss": 0.7972,
"step": 815
},
{
"epoch": 1.8285714285714287,
"grad_norm": 0.3201170563697815,
"learning_rate": 0.00013171912885891063,
"loss": 0.7433,
"step": 816
},
{
"epoch": 1.8308123249299721,
"grad_norm": 0.35013505816459656,
"learning_rate": 0.00013153355360890815,
"loss": 0.7361,
"step": 817
},
{
"epoch": 1.8330532212885156,
"grad_norm": 0.34328508377075195,
"learning_rate": 0.00013134785769387147,
"loss": 0.7031,
"step": 818
},
{
"epoch": 1.835294117647059,
"grad_norm": 0.3286396265029907,
"learning_rate": 0.0001311620418243771,
"loss": 0.7657,
"step": 819
},
{
"epoch": 1.8375350140056024,
"grad_norm": 0.3106536567211151,
"learning_rate": 0.00013097610671146065,
"loss": 0.7554,
"step": 820
},
{
"epoch": 1.8397759103641458,
"grad_norm": 0.3301306366920471,
"learning_rate": 0.0001307900530666139,
"loss": 0.7126,
"step": 821
},
{
"epoch": 1.8420168067226892,
"grad_norm": 0.34890398383140564,
"learning_rate": 0.00013060388160178235,
"loss": 0.7953,
"step": 822
},
{
"epoch": 1.8442577030812326,
"grad_norm": 0.31672972440719604,
"learning_rate": 0.0001304175930293623,
"loss": 0.7359,
"step": 823
},
{
"epoch": 1.846498599439776,
"grad_norm": 0.3178110122680664,
"learning_rate": 0.0001302311880621981,
"loss": 0.7662,
"step": 824
},
{
"epoch": 1.8487394957983194,
"grad_norm": 0.35035309195518494,
"learning_rate": 0.0001300446674135795,
"loss": 0.7105,
"step": 825
},
{
"epoch": 1.8509803921568628,
"grad_norm": 0.3116212785243988,
"learning_rate": 0.00012985803179723903,
"loss": 0.7424,
"step": 826
},
{
"epoch": 1.8532212885154062,
"grad_norm": 0.34882014989852905,
"learning_rate": 0.00012967128192734902,
"loss": 0.7574,
"step": 827
},
{
"epoch": 1.8554621848739496,
"grad_norm": 0.31585201621055603,
"learning_rate": 0.0001294844185185191,
"loss": 0.805,
"step": 828
},
{
"epoch": 1.857703081232493,
"grad_norm": 0.314979612827301,
"learning_rate": 0.00012929744228579323,
"loss": 0.5924,
"step": 829
},
{
"epoch": 1.8599439775910365,
"grad_norm": 0.3267279863357544,
"learning_rate": 0.00012911035394464723,
"loss": 0.763,
"step": 830
},
{
"epoch": 1.8621848739495799,
"grad_norm": 0.3523454964160919,
"learning_rate": 0.00012892315421098586,
"loss": 0.8221,
"step": 831
},
{
"epoch": 1.8644257703081233,
"grad_norm": 0.31915390491485596,
"learning_rate": 0.00012873584380114012,
"loss": 0.8081,
"step": 832
},
{
"epoch": 1.8666666666666667,
"grad_norm": 0.32959550619125366,
"learning_rate": 0.00012854842343186455,
"loss": 0.7208,
"step": 833
},
{
"epoch": 1.86890756302521,
"grad_norm": 0.32218843698501587,
"learning_rate": 0.0001283608938203344,
"loss": 0.7514,
"step": 834
},
{
"epoch": 1.8711484593837535,
"grad_norm": 0.3358590602874756,
"learning_rate": 0.00012817325568414297,
"loss": 0.7401,
"step": 835
},
{
"epoch": 1.873389355742297,
"grad_norm": 0.3331390917301178,
"learning_rate": 0.00012798550974129887,
"loss": 0.6833,
"step": 836
},
{
"epoch": 1.8756302521008403,
"grad_norm": 0.33320388197898865,
"learning_rate": 0.00012779765671022325,
"loss": 0.6287,
"step": 837
},
{
"epoch": 1.8778711484593837,
"grad_norm": 0.3285226821899414,
"learning_rate": 0.00012760969730974694,
"loss": 0.6698,
"step": 838
},
{
"epoch": 1.8801120448179272,
"grad_norm": 0.3503000736236572,
"learning_rate": 0.0001274216322591078,
"loss": 0.6997,
"step": 839
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.35233861207962036,
"learning_rate": 0.00012723346227794816,
"loss": 0.7241,
"step": 840
},
{
"epoch": 1.884593837535014,
"grad_norm": 0.3368009924888611,
"learning_rate": 0.00012704518808631166,
"loss": 0.7379,
"step": 841
},
{
"epoch": 1.8868347338935574,
"grad_norm": 0.3214666247367859,
"learning_rate": 0.0001268568104046408,
"loss": 0.6946,
"step": 842
},
{
"epoch": 1.8890756302521008,
"grad_norm": 0.3163416385650635,
"learning_rate": 0.0001266683299537741,
"loss": 0.7044,
"step": 843
},
{
"epoch": 1.8913165266106442,
"grad_norm": 0.3129757046699524,
"learning_rate": 0.0001264797474549433,
"loss": 0.6105,
"step": 844
},
{
"epoch": 1.8935574229691876,
"grad_norm": 0.35761409997940063,
"learning_rate": 0.00012629106362977064,
"loss": 0.8279,
"step": 845
},
{
"epoch": 1.895798319327731,
"grad_norm": 0.3365795314311981,
"learning_rate": 0.00012610227920026608,
"loss": 0.6859,
"step": 846
},
{
"epoch": 1.8980392156862744,
"grad_norm": 0.3187201917171478,
"learning_rate": 0.00012591339488882456,
"loss": 0.6907,
"step": 847
},
{
"epoch": 1.9002801120448178,
"grad_norm": 0.31638118624687195,
"learning_rate": 0.0001257244114182232,
"loss": 0.7243,
"step": 848
},
{
"epoch": 1.9025210084033612,
"grad_norm": 0.34840863943099976,
"learning_rate": 0.0001255353295116187,
"loss": 0.687,
"step": 849
},
{
"epoch": 1.9047619047619047,
"grad_norm": 0.3138674199581146,
"learning_rate": 0.00012534614989254423,
"loss": 0.7198,
"step": 850
},
{
"epoch": 1.907002801120448,
"grad_norm": 0.362076997756958,
"learning_rate": 0.00012515687328490695,
"loss": 0.7757,
"step": 851
},
{
"epoch": 1.9092436974789915,
"grad_norm": 0.32191118597984314,
"learning_rate": 0.00012496750041298516,
"loss": 0.7103,
"step": 852
},
{
"epoch": 1.9114845938375349,
"grad_norm": 0.3364141583442688,
"learning_rate": 0.0001247780320014255,
"loss": 0.7509,
"step": 853
},
{
"epoch": 1.9137254901960783,
"grad_norm": 0.35079094767570496,
"learning_rate": 0.00012458846877524024,
"loss": 0.6844,
"step": 854
},
{
"epoch": 1.9159663865546217,
"grad_norm": 0.33669090270996094,
"learning_rate": 0.00012439881145980444,
"loss": 0.6801,
"step": 855
},
{
"epoch": 1.9182072829131651,
"grad_norm": 0.31381383538246155,
"learning_rate": 0.00012420906078085316,
"loss": 0.6341,
"step": 856
},
{
"epoch": 1.9204481792717085,
"grad_norm": 0.3355211615562439,
"learning_rate": 0.00012401921746447872,
"loss": 0.7244,
"step": 857
},
{
"epoch": 1.9226890756302522,
"grad_norm": 0.3467940092086792,
"learning_rate": 0.00012382928223712806,
"loss": 0.7195,
"step": 858
},
{
"epoch": 1.9249299719887956,
"grad_norm": 0.33815139532089233,
"learning_rate": 0.00012363925582559968,
"loss": 0.704,
"step": 859
},
{
"epoch": 1.927170868347339,
"grad_norm": 0.315331369638443,
"learning_rate": 0.00012344913895704097,
"loss": 0.6618,
"step": 860
},
{
"epoch": 1.9294117647058824,
"grad_norm": 0.3420298099517822,
"learning_rate": 0.00012325893235894564,
"loss": 0.7005,
"step": 861
},
{
"epoch": 1.9316526610644258,
"grad_norm": 0.33387431502342224,
"learning_rate": 0.00012306863675915056,
"loss": 0.8085,
"step": 862
},
{
"epoch": 1.9338935574229692,
"grad_norm": 0.35296881198883057,
"learning_rate": 0.0001228782528858333,
"loss": 0.7798,
"step": 863
},
{
"epoch": 1.9361344537815126,
"grad_norm": 0.3383727967739105,
"learning_rate": 0.00012268778146750915,
"loss": 0.6835,
"step": 864
},
{
"epoch": 1.938375350140056,
"grad_norm": 0.33279767632484436,
"learning_rate": 0.00012249722323302842,
"loss": 0.7388,
"step": 865
},
{
"epoch": 1.9406162464985994,
"grad_norm": 0.3255688548088074,
"learning_rate": 0.00012230657891157363,
"loss": 0.7167,
"step": 866
},
{
"epoch": 1.9428571428571428,
"grad_norm": 0.34093961119651794,
"learning_rate": 0.00012211584923265672,
"loss": 0.7139,
"step": 867
},
{
"epoch": 1.9450980392156862,
"grad_norm": 0.3231501281261444,
"learning_rate": 0.00012192503492611625,
"loss": 0.6996,
"step": 868
},
{
"epoch": 1.9473389355742297,
"grad_norm": 0.3145335614681244,
"learning_rate": 0.00012173413672211458,
"loss": 0.7754,
"step": 869
},
{
"epoch": 1.949579831932773,
"grad_norm": 0.32647013664245605,
"learning_rate": 0.00012154315535113511,
"loss": 0.6923,
"step": 870
},
{
"epoch": 1.9518207282913165,
"grad_norm": 0.3004699647426605,
"learning_rate": 0.00012135209154397962,
"loss": 0.6856,
"step": 871
},
{
"epoch": 1.95406162464986,
"grad_norm": 0.33689776062965393,
"learning_rate": 0.00012116094603176513,
"loss": 0.7621,
"step": 872
},
{
"epoch": 1.9563025210084035,
"grad_norm": 0.30934080481529236,
"learning_rate": 0.00012096971954592145,
"loss": 0.7126,
"step": 873
},
{
"epoch": 1.958543417366947,
"grad_norm": 0.346757173538208,
"learning_rate": 0.00012077841281818816,
"loss": 0.708,
"step": 874
},
{
"epoch": 1.9607843137254903,
"grad_norm": 0.3257780075073242,
"learning_rate": 0.00012058702658061197,
"loss": 0.6621,
"step": 875
},
{
"epoch": 1.9630252100840337,
"grad_norm": 0.3402419090270996,
"learning_rate": 0.0001203955615655438,
"loss": 0.7003,
"step": 876
},
{
"epoch": 1.9652661064425772,
"grad_norm": 0.3437844514846802,
"learning_rate": 0.00012020401850563596,
"loss": 0.7469,
"step": 877
},
{
"epoch": 1.9675070028011206,
"grad_norm": 0.3671571612358093,
"learning_rate": 0.00012001239813383951,
"loss": 0.785,
"step": 878
},
{
"epoch": 1.969747899159664,
"grad_norm": 0.32898640632629395,
"learning_rate": 0.00011982070118340127,
"loss": 0.7292,
"step": 879
},
{
"epoch": 1.9719887955182074,
"grad_norm": 0.3525889217853546,
"learning_rate": 0.00011962892838786115,
"loss": 0.7226,
"step": 880
},
{
"epoch": 1.9742296918767508,
"grad_norm": 0.33842483162879944,
"learning_rate": 0.00011943708048104922,
"loss": 0.6483,
"step": 881
},
{
"epoch": 1.9764705882352942,
"grad_norm": 0.3088597357273102,
"learning_rate": 0.000119245158197083,
"loss": 0.6449,
"step": 882
},
{
"epoch": 1.9787114845938376,
"grad_norm": 0.32097896933555603,
"learning_rate": 0.00011905316227036465,
"loss": 0.7823,
"step": 883
},
{
"epoch": 1.980952380952381,
"grad_norm": 0.316945880651474,
"learning_rate": 0.00011886109343557808,
"loss": 0.6802,
"step": 884
},
{
"epoch": 1.9831932773109244,
"grad_norm": 0.3052690327167511,
"learning_rate": 0.00011866895242768621,
"loss": 0.6601,
"step": 885
},
{
"epoch": 1.9854341736694678,
"grad_norm": 0.3162146210670471,
"learning_rate": 0.00011847673998192815,
"loss": 0.7377,
"step": 886
},
{
"epoch": 1.9876750700280112,
"grad_norm": 0.3386717736721039,
"learning_rate": 0.00011828445683381628,
"loss": 0.7741,
"step": 887
},
{
"epoch": 1.9899159663865547,
"grad_norm": 0.36137786507606506,
"learning_rate": 0.00011809210371913368,
"loss": 0.765,
"step": 888
},
{
"epoch": 1.992156862745098,
"grad_norm": 0.37440425157546997,
"learning_rate": 0.00011789968137393108,
"loss": 0.7596,
"step": 889
},
{
"epoch": 1.9943977591036415,
"grad_norm": 0.36767804622650146,
"learning_rate": 0.00011770719053452407,
"loss": 0.761,
"step": 890
},
{
"epoch": 1.9966386554621849,
"grad_norm": 0.39699792861938477,
"learning_rate": 0.00011751463193749044,
"loss": 0.7781,
"step": 891
},
{
"epoch": 1.9988795518207283,
"grad_norm": 0.35793137550354004,
"learning_rate": 0.00011732200631966716,
"loss": 0.7117,
"step": 892
},
{
"epoch": 2.0011204481792717,
"grad_norm": 0.3200768828392029,
"learning_rate": 0.00011712931441814776,
"loss": 0.5798,
"step": 893
},
{
"epoch": 2.003361344537815,
"grad_norm": 0.3186042308807373,
"learning_rate": 0.00011693655697027934,
"loss": 0.5207,
"step": 894
},
{
"epoch": 2.0056022408963585,
"grad_norm": 0.3172122538089752,
"learning_rate": 0.00011674373471365987,
"loss": 0.6287,
"step": 895
},
{
"epoch": 2.007843137254902,
"grad_norm": 0.31914347410202026,
"learning_rate": 0.00011655084838613519,
"loss": 0.5249,
"step": 896
},
{
"epoch": 2.0100840336134453,
"grad_norm": 0.33173030614852905,
"learning_rate": 0.00011635789872579647,
"loss": 0.5942,
"step": 897
},
{
"epoch": 2.0123249299719888,
"grad_norm": 0.32792940735816956,
"learning_rate": 0.00011616488647097718,
"loss": 0.5245,
"step": 898
},
{
"epoch": 2.014565826330532,
"grad_norm": 0.3804270625114441,
"learning_rate": 0.00011597181236025023,
"loss": 0.5439,
"step": 899
},
{
"epoch": 2.0168067226890756,
"grad_norm": 0.387532114982605,
"learning_rate": 0.00011577867713242531,
"loss": 0.5392,
"step": 900
},
{
"epoch": 2.019047619047619,
"grad_norm": 0.4034246504306793,
"learning_rate": 0.00011558548152654596,
"loss": 0.6632,
"step": 901
},
{
"epoch": 2.0212885154061624,
"grad_norm": 0.367631733417511,
"learning_rate": 0.00011539222628188675,
"loss": 0.483,
"step": 902
},
{
"epoch": 2.023529411764706,
"grad_norm": 0.3613899052143097,
"learning_rate": 0.00011519891213795049,
"loss": 0.5233,
"step": 903
},
{
"epoch": 2.025770308123249,
"grad_norm": 0.3706178665161133,
"learning_rate": 0.00011500553983446527,
"loss": 0.5169,
"step": 904
},
{
"epoch": 2.0280112044817926,
"grad_norm": 0.349579781293869,
"learning_rate": 0.00011481211011138188,
"loss": 0.5005,
"step": 905
},
{
"epoch": 2.030252100840336,
"grad_norm": 0.3860476016998291,
"learning_rate": 0.00011461862370887076,
"loss": 0.4231,
"step": 906
},
{
"epoch": 2.0324929971988794,
"grad_norm": 0.3320625424385071,
"learning_rate": 0.00011442508136731918,
"loss": 0.454,
"step": 907
},
{
"epoch": 2.034733893557423,
"grad_norm": 0.37210145592689514,
"learning_rate": 0.00011423148382732853,
"loss": 0.5261,
"step": 908
},
{
"epoch": 2.0369747899159663,
"grad_norm": 0.3801030218601227,
"learning_rate": 0.00011403783182971144,
"loss": 0.4948,
"step": 909
},
{
"epoch": 2.0392156862745097,
"grad_norm": 0.3721443712711334,
"learning_rate": 0.00011384412611548886,
"loss": 0.5317,
"step": 910
},
{
"epoch": 2.041456582633053,
"grad_norm": 0.3912367522716522,
"learning_rate": 0.00011365036742588739,
"loss": 0.5498,
"step": 911
},
{
"epoch": 2.0436974789915965,
"grad_norm": 0.3820691704750061,
"learning_rate": 0.0001134565565023362,
"loss": 0.505,
"step": 912
},
{
"epoch": 2.04593837535014,
"grad_norm": 0.38775426149368286,
"learning_rate": 0.00011326269408646444,
"loss": 0.5591,
"step": 913
},
{
"epoch": 2.0481792717086833,
"grad_norm": 0.3893496096134186,
"learning_rate": 0.00011306878092009828,
"loss": 0.4718,
"step": 914
},
{
"epoch": 2.0504201680672267,
"grad_norm": 0.41591331362724304,
"learning_rate": 0.0001128748177452581,
"loss": 0.4804,
"step": 915
},
{
"epoch": 2.05266106442577,
"grad_norm": 0.428853839635849,
"learning_rate": 0.00011268080530415557,
"loss": 0.5182,
"step": 916
},
{
"epoch": 2.0549019607843135,
"grad_norm": 0.4089893698692322,
"learning_rate": 0.00011248674433919097,
"loss": 0.5354,
"step": 917
},
{
"epoch": 2.057142857142857,
"grad_norm": 0.4228387773036957,
"learning_rate": 0.00011229263559295021,
"loss": 0.5265,
"step": 918
},
{
"epoch": 2.0593837535014003,
"grad_norm": 0.35249876976013184,
"learning_rate": 0.00011209847980820208,
"loss": 0.4888,
"step": 919
},
{
"epoch": 2.0616246498599438,
"grad_norm": 0.3890874683856964,
"learning_rate": 0.00011190427772789529,
"loss": 0.5673,
"step": 920
},
{
"epoch": 2.063865546218487,
"grad_norm": 0.3728325366973877,
"learning_rate": 0.00011171003009515578,
"loss": 0.5417,
"step": 921
},
{
"epoch": 2.066106442577031,
"grad_norm": 0.34896883368492126,
"learning_rate": 0.00011151573765328373,
"loss": 0.5229,
"step": 922
},
{
"epoch": 2.0683473389355744,
"grad_norm": 0.3686348795890808,
"learning_rate": 0.00011132140114575085,
"loss": 0.5064,
"step": 923
},
{
"epoch": 2.070588235294118,
"grad_norm": 0.37526431679725647,
"learning_rate": 0.00011112702131619746,
"loss": 0.5321,
"step": 924
},
{
"epoch": 2.0728291316526612,
"grad_norm": 0.36911851167678833,
"learning_rate": 0.00011093259890842962,
"loss": 0.5587,
"step": 925
},
{
"epoch": 2.0750700280112047,
"grad_norm": 0.37566208839416504,
"learning_rate": 0.00011073813466641632,
"loss": 0.4848,
"step": 926
},
{
"epoch": 2.077310924369748,
"grad_norm": 0.36949753761291504,
"learning_rate": 0.00011054362933428666,
"loss": 0.4581,
"step": 927
},
{
"epoch": 2.0795518207282915,
"grad_norm": 0.3933381140232086,
"learning_rate": 0.00011034908365632695,
"loss": 0.5107,
"step": 928
},
{
"epoch": 2.081792717086835,
"grad_norm": 0.4068252742290497,
"learning_rate": 0.00011015449837697791,
"loss": 0.576,
"step": 929
},
{
"epoch": 2.0840336134453783,
"grad_norm": 0.3985835015773773,
"learning_rate": 0.00010995987424083178,
"loss": 0.53,
"step": 930
},
{
"epoch": 2.0862745098039217,
"grad_norm": 0.404930055141449,
"learning_rate": 0.00010976521199262945,
"loss": 0.4857,
"step": 931
},
{
"epoch": 2.088515406162465,
"grad_norm": 0.4016098082065582,
"learning_rate": 0.00010957051237725775,
"loss": 0.5401,
"step": 932
},
{
"epoch": 2.0907563025210085,
"grad_norm": 0.4225110113620758,
"learning_rate": 0.00010937577613974641,
"loss": 0.518,
"step": 933
},
{
"epoch": 2.092997198879552,
"grad_norm": 0.409502238035202,
"learning_rate": 0.00010918100402526532,
"loss": 0.5052,
"step": 934
},
{
"epoch": 2.0952380952380953,
"grad_norm": 0.4569055736064911,
"learning_rate": 0.00010898619677912165,
"loss": 0.5265,
"step": 935
},
{
"epoch": 2.0974789915966388,
"grad_norm": 0.3866918087005615,
"learning_rate": 0.00010879135514675705,
"loss": 0.484,
"step": 936
},
{
"epoch": 2.099719887955182,
"grad_norm": 0.3866770267486572,
"learning_rate": 0.00010859647987374467,
"loss": 0.5016,
"step": 937
},
{
"epoch": 2.1019607843137256,
"grad_norm": 0.6051259636878967,
"learning_rate": 0.00010840157170578644,
"loss": 0.5076,
"step": 938
},
{
"epoch": 2.104201680672269,
"grad_norm": 0.8082510828971863,
"learning_rate": 0.00010820663138871017,
"loss": 0.5766,
"step": 939
},
{
"epoch": 2.1064425770308124,
"grad_norm": 0.38558223843574524,
"learning_rate": 0.00010801165966846663,
"loss": 0.4923,
"step": 940
},
{
"epoch": 2.108683473389356,
"grad_norm": 0.3915948271751404,
"learning_rate": 0.00010781665729112687,
"loss": 0.4829,
"step": 941
},
{
"epoch": 2.110924369747899,
"grad_norm": 0.39486053586006165,
"learning_rate": 0.00010762162500287915,
"loss": 0.4444,
"step": 942
},
{
"epoch": 2.1131652661064426,
"grad_norm": 0.4007076919078827,
"learning_rate": 0.00010742656355002622,
"loss": 0.5095,
"step": 943
},
{
"epoch": 2.115406162464986,
"grad_norm": 0.38552355766296387,
"learning_rate": 0.00010723147367898243,
"loss": 0.4971,
"step": 944
},
{
"epoch": 2.1176470588235294,
"grad_norm": 0.39927443861961365,
"learning_rate": 0.00010703635613627083,
"loss": 0.4723,
"step": 945
},
{
"epoch": 2.119887955182073,
"grad_norm": 0.4161412715911865,
"learning_rate": 0.0001068412116685205,
"loss": 0.5383,
"step": 946
},
{
"epoch": 2.1221288515406163,
"grad_norm": 0.41281387209892273,
"learning_rate": 0.00010664604102246336,
"loss": 0.5495,
"step": 947
},
{
"epoch": 2.1243697478991597,
"grad_norm": 0.4148519039154053,
"learning_rate": 0.00010645084494493165,
"loss": 0.507,
"step": 948
},
{
"epoch": 2.126610644257703,
"grad_norm": 0.3995266556739807,
"learning_rate": 0.00010625562418285482,
"loss": 0.5555,
"step": 949
},
{
"epoch": 2.1288515406162465,
"grad_norm": 0.37733468413352966,
"learning_rate": 0.00010606037948325687,
"loss": 0.4587,
"step": 950
},
{
"epoch": 2.13109243697479,
"grad_norm": 0.4213622212409973,
"learning_rate": 0.00010586511159325332,
"loss": 0.4619,
"step": 951
},
{
"epoch": 2.1333333333333333,
"grad_norm": 0.4159161448478699,
"learning_rate": 0.00010566982126004847,
"loss": 0.4313,
"step": 952
},
{
"epoch": 2.1355742296918767,
"grad_norm": 0.39115792512893677,
"learning_rate": 0.00010547450923093247,
"loss": 0.4614,
"step": 953
},
{
"epoch": 2.13781512605042,
"grad_norm": 0.41564685106277466,
"learning_rate": 0.0001052791762532786,
"loss": 0.4961,
"step": 954
},
{
"epoch": 2.1400560224089635,
"grad_norm": 0.43851983547210693,
"learning_rate": 0.00010508382307454012,
"loss": 0.5061,
"step": 955
},
{
"epoch": 2.142296918767507,
"grad_norm": 0.4009677469730377,
"learning_rate": 0.00010488845044224773,
"loss": 0.4842,
"step": 956
},
{
"epoch": 2.1445378151260504,
"grad_norm": 0.3707684278488159,
"learning_rate": 0.0001046930591040065,
"loss": 0.4427,
"step": 957
},
{
"epoch": 2.1467787114845938,
"grad_norm": 0.41808104515075684,
"learning_rate": 0.00010449764980749317,
"loss": 0.5344,
"step": 958
},
{
"epoch": 2.149019607843137,
"grad_norm": 0.3968338668346405,
"learning_rate": 0.00010430222330045304,
"loss": 0.4834,
"step": 959
},
{
"epoch": 2.1512605042016806,
"grad_norm": 0.4360380172729492,
"learning_rate": 0.00010410678033069745,
"loss": 0.5652,
"step": 960
},
{
"epoch": 2.153501400560224,
"grad_norm": 0.3921569883823395,
"learning_rate": 0.0001039113216461006,
"loss": 0.5277,
"step": 961
},
{
"epoch": 2.1557422969187674,
"grad_norm": 0.3638018071651459,
"learning_rate": 0.00010371584799459684,
"loss": 0.4433,
"step": 962
},
{
"epoch": 2.157983193277311,
"grad_norm": 0.41490018367767334,
"learning_rate": 0.00010352036012417787,
"loss": 0.5051,
"step": 963
},
{
"epoch": 2.160224089635854,
"grad_norm": 0.40135008096694946,
"learning_rate": 0.00010332485878288976,
"loss": 0.5134,
"step": 964
},
{
"epoch": 2.1624649859943976,
"grad_norm": 0.43049126863479614,
"learning_rate": 0.00010312934471883007,
"loss": 0.4991,
"step": 965
},
{
"epoch": 2.164705882352941,
"grad_norm": 0.426323264837265,
"learning_rate": 0.0001029338186801451,
"loss": 0.4926,
"step": 966
},
{
"epoch": 2.1669467787114844,
"grad_norm": 0.4975692629814148,
"learning_rate": 0.00010273828141502701,
"loss": 0.5008,
"step": 967
},
{
"epoch": 2.169187675070028,
"grad_norm": 0.4585362374782562,
"learning_rate": 0.00010254273367171085,
"loss": 0.5372,
"step": 968
},
{
"epoch": 2.1714285714285713,
"grad_norm": 0.4774106740951538,
"learning_rate": 0.0001023471761984718,
"loss": 0.5576,
"step": 969
},
{
"epoch": 2.1736694677871147,
"grad_norm": 0.38876253366470337,
"learning_rate": 0.00010215160974362223,
"loss": 0.5064,
"step": 970
},
{
"epoch": 2.175910364145658,
"grad_norm": 0.39690589904785156,
"learning_rate": 0.00010195603505550892,
"loss": 0.4827,
"step": 971
},
{
"epoch": 2.1781512605042015,
"grad_norm": 0.38885483145713806,
"learning_rate": 0.00010176045288251015,
"loss": 0.5288,
"step": 972
},
{
"epoch": 2.180392156862745,
"grad_norm": 0.4034357964992523,
"learning_rate": 0.00010156486397303284,
"loss": 0.4972,
"step": 973
},
{
"epoch": 2.1826330532212883,
"grad_norm": 0.4167933762073517,
"learning_rate": 0.00010136926907550967,
"loss": 0.4697,
"step": 974
},
{
"epoch": 2.184873949579832,
"grad_norm": 0.43337056040763855,
"learning_rate": 0.00010117366893839625,
"loss": 0.6123,
"step": 975
},
{
"epoch": 2.1871148459383756,
"grad_norm": 0.4040941298007965,
"learning_rate": 0.00010097806431016826,
"loss": 0.5713,
"step": 976
},
{
"epoch": 2.189355742296919,
"grad_norm": 0.3885524272918701,
"learning_rate": 0.00010078245593931852,
"loss": 0.4791,
"step": 977
},
{
"epoch": 2.1915966386554624,
"grad_norm": 0.4015814960002899,
"learning_rate": 0.0001005868445743542,
"loss": 0.5125,
"step": 978
},
{
"epoch": 2.193837535014006,
"grad_norm": 0.38841763138771057,
"learning_rate": 0.00010039123096379387,
"loss": 0.5713,
"step": 979
},
{
"epoch": 2.196078431372549,
"grad_norm": 0.42656928300857544,
"learning_rate": 0.00010019561585616486,
"loss": 0.5685,
"step": 980
},
{
"epoch": 2.1983193277310926,
"grad_norm": 0.40539607405662537,
"learning_rate": 0.0001,
"loss": 0.5409,
"step": 981
},
{
"epoch": 2.200560224089636,
"grad_norm": 0.38911890983581543,
"learning_rate": 9.980438414383517e-05,
"loss": 0.4751,
"step": 982
},
{
"epoch": 2.2028011204481794,
"grad_norm": 0.4134393632411957,
"learning_rate": 9.960876903620613e-05,
"loss": 0.4815,
"step": 983
},
{
"epoch": 2.205042016806723,
"grad_norm": 0.4219866096973419,
"learning_rate": 9.941315542564582e-05,
"loss": 0.4524,
"step": 984
},
{
"epoch": 2.2072829131652663,
"grad_norm": 0.39480990171432495,
"learning_rate": 9.92175440606815e-05,
"loss": 0.4704,
"step": 985
},
{
"epoch": 2.2095238095238097,
"grad_norm": 0.44161802530288696,
"learning_rate": 9.902193568983175e-05,
"loss": 0.5188,
"step": 986
},
{
"epoch": 2.211764705882353,
"grad_norm": 0.45645326375961304,
"learning_rate": 9.882633106160376e-05,
"loss": 0.5173,
"step": 987
},
{
"epoch": 2.2140056022408965,
"grad_norm": 0.43310198187828064,
"learning_rate": 9.863073092449034e-05,
"loss": 0.4756,
"step": 988
},
{
"epoch": 2.21624649859944,
"grad_norm": 0.4334922134876251,
"learning_rate": 9.843513602696717e-05,
"loss": 0.5117,
"step": 989
},
{
"epoch": 2.2184873949579833,
"grad_norm": 0.4409882128238678,
"learning_rate": 9.823954711748986e-05,
"loss": 0.5314,
"step": 990
},
{
"epoch": 2.2207282913165267,
"grad_norm": 0.44057101011276245,
"learning_rate": 9.80439649444911e-05,
"loss": 0.5062,
"step": 991
},
{
"epoch": 2.22296918767507,
"grad_norm": 0.4002052843570709,
"learning_rate": 9.784839025637778e-05,
"loss": 0.493,
"step": 992
},
{
"epoch": 2.2252100840336135,
"grad_norm": 0.45808616280555725,
"learning_rate": 9.765282380152821e-05,
"loss": 0.575,
"step": 993
},
{
"epoch": 2.227450980392157,
"grad_norm": 0.3935520350933075,
"learning_rate": 9.745726632828913e-05,
"loss": 0.4905,
"step": 994
},
{
"epoch": 2.2296918767507004,
"grad_norm": 0.4213854968547821,
"learning_rate": 9.726171858497297e-05,
"loss": 0.514,
"step": 995
},
{
"epoch": 2.2319327731092438,
"grad_norm": 0.39440077543258667,
"learning_rate": 9.706618131985489e-05,
"loss": 0.4915,
"step": 996
},
{
"epoch": 2.234173669467787,
"grad_norm": 0.40655189752578735,
"learning_rate": 9.687065528116996e-05,
"loss": 0.5126,
"step": 997
},
{
"epoch": 2.2364145658263306,
"grad_norm": 0.39869415760040283,
"learning_rate": 9.667514121711025e-05,
"loss": 0.4476,
"step": 998
},
{
"epoch": 2.238655462184874,
"grad_norm": 0.41084200143814087,
"learning_rate": 9.647963987582212e-05,
"loss": 0.5781,
"step": 999
},
{
"epoch": 2.2408963585434174,
"grad_norm": 0.4361216723918915,
"learning_rate": 9.628415200540317e-05,
"loss": 0.5486,
"step": 1000
},
{
"epoch": 2.243137254901961,
"grad_norm": 0.41102784872055054,
"learning_rate": 9.608867835389943e-05,
"loss": 0.5043,
"step": 1001
},
{
"epoch": 2.245378151260504,
"grad_norm": 0.38688912987709045,
"learning_rate": 9.589321966930255e-05,
"loss": 0.6149,
"step": 1002
},
{
"epoch": 2.2476190476190476,
"grad_norm": 0.4278058409690857,
"learning_rate": 9.569777669954694e-05,
"loss": 0.4524,
"step": 1003
},
{
"epoch": 2.249859943977591,
"grad_norm": 0.41745269298553467,
"learning_rate": 9.550235019250688e-05,
"loss": 0.5073,
"step": 1004
},
{
"epoch": 2.2521008403361344,
"grad_norm": 0.41152727603912354,
"learning_rate": 9.530694089599351e-05,
"loss": 0.4801,
"step": 1005
},
{
"epoch": 2.254341736694678,
"grad_norm": 0.416883260011673,
"learning_rate": 9.511154955775231e-05,
"loss": 0.5401,
"step": 1006
},
{
"epoch": 2.2565826330532213,
"grad_norm": 0.4182227849960327,
"learning_rate": 9.491617692545992e-05,
"loss": 0.5813,
"step": 1007
},
{
"epoch": 2.2588235294117647,
"grad_norm": 0.3997863531112671,
"learning_rate": 9.472082374672146e-05,
"loss": 0.5075,
"step": 1008
},
{
"epoch": 2.261064425770308,
"grad_norm": 0.41225093603134155,
"learning_rate": 9.452549076906755e-05,
"loss": 0.5302,
"step": 1009
},
{
"epoch": 2.2633053221288515,
"grad_norm": 0.41989865899086,
"learning_rate": 9.433017873995158e-05,
"loss": 0.4532,
"step": 1010
},
{
"epoch": 2.265546218487395,
"grad_norm": 0.3788585364818573,
"learning_rate": 9.413488840674673e-05,
"loss": 0.5296,
"step": 1011
},
{
"epoch": 2.2677871148459383,
"grad_norm": 0.39189624786376953,
"learning_rate": 9.393962051674318e-05,
"loss": 0.4802,
"step": 1012
},
{
"epoch": 2.2700280112044817,
"grad_norm": 0.4206637740135193,
"learning_rate": 9.374437581714523e-05,
"loss": 0.5033,
"step": 1013
},
{
"epoch": 2.272268907563025,
"grad_norm": 0.44238483905792236,
"learning_rate": 9.354915505506839e-05,
"loss": 0.4976,
"step": 1014
},
{
"epoch": 2.2745098039215685,
"grad_norm": 0.44965559244155884,
"learning_rate": 9.335395897753666e-05,
"loss": 0.5419,
"step": 1015
},
{
"epoch": 2.276750700280112,
"grad_norm": 0.4196763038635254,
"learning_rate": 9.315878833147953e-05,
"loss": 0.5027,
"step": 1016
},
{
"epoch": 2.2789915966386554,
"grad_norm": 0.3971319794654846,
"learning_rate": 9.296364386372918e-05,
"loss": 0.5604,
"step": 1017
},
{
"epoch": 2.2812324929971988,
"grad_norm": 0.4141682982444763,
"learning_rate": 9.27685263210176e-05,
"loss": 0.4437,
"step": 1018
},
{
"epoch": 2.283473389355742,
"grad_norm": 0.46171408891677856,
"learning_rate": 9.25734364499738e-05,
"loss": 0.6103,
"step": 1019
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.41306909918785095,
"learning_rate": 9.237837499712088e-05,
"loss": 0.556,
"step": 1020
},
{
"epoch": 2.287955182072829,
"grad_norm": 0.4035610556602478,
"learning_rate": 9.218334270887316e-05,
"loss": 0.5183,
"step": 1021
},
{
"epoch": 2.2901960784313724,
"grad_norm": 0.402854859828949,
"learning_rate": 9.19883403315334e-05,
"loss": 0.4592,
"step": 1022
},
{
"epoch": 2.292436974789916,
"grad_norm": 0.4036504030227661,
"learning_rate": 9.179336861128987e-05,
"loss": 0.4513,
"step": 1023
},
{
"epoch": 2.2946778711484592,
"grad_norm": 0.42101335525512695,
"learning_rate": 9.159842829421358e-05,
"loss": 0.5122,
"step": 1024
},
{
"epoch": 2.2969187675070026,
"grad_norm": 0.41138187050819397,
"learning_rate": 9.140352012625537e-05,
"loss": 0.5554,
"step": 1025
},
{
"epoch": 2.299159663865546,
"grad_norm": 0.42715969681739807,
"learning_rate": 9.120864485324299e-05,
"loss": 0.4697,
"step": 1026
},
{
"epoch": 2.3014005602240895,
"grad_norm": 0.3975067138671875,
"learning_rate": 9.101380322087836e-05,
"loss": 0.4772,
"step": 1027
},
{
"epoch": 2.303641456582633,
"grad_norm": 0.42877110838890076,
"learning_rate": 9.081899597473469e-05,
"loss": 0.4915,
"step": 1028
},
{
"epoch": 2.3058823529411763,
"grad_norm": 0.40029025077819824,
"learning_rate": 9.06242238602536e-05,
"loss": 0.5242,
"step": 1029
},
{
"epoch": 2.3081232492997197,
"grad_norm": 0.44601088762283325,
"learning_rate": 9.042948762274227e-05,
"loss": 0.5101,
"step": 1030
},
{
"epoch": 2.310364145658263,
"grad_norm": 0.41966551542282104,
"learning_rate": 9.023478800737057e-05,
"loss": 0.4863,
"step": 1031
},
{
"epoch": 2.3126050420168065,
"grad_norm": 0.4753289222717285,
"learning_rate": 9.004012575916824e-05,
"loss": 0.6469,
"step": 1032
},
{
"epoch": 2.31484593837535,
"grad_norm": 0.4360703229904175,
"learning_rate": 8.984550162302211e-05,
"loss": 0.5078,
"step": 1033
},
{
"epoch": 2.3170868347338933,
"grad_norm": 0.4141191840171814,
"learning_rate": 8.965091634367306e-05,
"loss": 0.4478,
"step": 1034
},
{
"epoch": 2.3193277310924367,
"grad_norm": 0.4165865182876587,
"learning_rate": 8.945637066571337e-05,
"loss": 0.4496,
"step": 1035
},
{
"epoch": 2.3215686274509806,
"grad_norm": 0.41498279571533203,
"learning_rate": 8.92618653335837e-05,
"loss": 0.572,
"step": 1036
},
{
"epoch": 2.323809523809524,
"grad_norm": 0.43287456035614014,
"learning_rate": 8.906740109157039e-05,
"loss": 0.5647,
"step": 1037
},
{
"epoch": 2.3260504201680674,
"grad_norm": 0.42451250553131104,
"learning_rate": 8.887297868380255e-05,
"loss": 0.5155,
"step": 1038
},
{
"epoch": 2.328291316526611,
"grad_norm": 0.4161588251590729,
"learning_rate": 8.867859885424916e-05,
"loss": 0.5157,
"step": 1039
},
{
"epoch": 2.330532212885154,
"grad_norm": 0.4384361803531647,
"learning_rate": 8.848426234671628e-05,
"loss": 0.4937,
"step": 1040
},
{
"epoch": 2.3327731092436976,
"grad_norm": 0.42889484763145447,
"learning_rate": 8.828996990484423e-05,
"loss": 0.5067,
"step": 1041
},
{
"epoch": 2.335014005602241,
"grad_norm": 0.42189306020736694,
"learning_rate": 8.809572227210472e-05,
"loss": 0.4646,
"step": 1042
},
{
"epoch": 2.3372549019607844,
"grad_norm": 0.3996080458164215,
"learning_rate": 8.790152019179793e-05,
"loss": 0.5148,
"step": 1043
},
{
"epoch": 2.339495798319328,
"grad_norm": 0.40798112750053406,
"learning_rate": 8.770736440704979e-05,
"loss": 0.5793,
"step": 1044
},
{
"epoch": 2.3417366946778713,
"grad_norm": 0.40013808012008667,
"learning_rate": 8.751325566080905e-05,
"loss": 0.4127,
"step": 1045
},
{
"epoch": 2.3439775910364147,
"grad_norm": 0.41228431463241577,
"learning_rate": 8.731919469584442e-05,
"loss": 0.5119,
"step": 1046
},
{
"epoch": 2.346218487394958,
"grad_norm": 0.416579931974411,
"learning_rate": 8.712518225474191e-05,
"loss": 0.5385,
"step": 1047
},
{
"epoch": 2.3484593837535015,
"grad_norm": 0.45098868012428284,
"learning_rate": 8.693121907990176e-05,
"loss": 0.4893,
"step": 1048
},
{
"epoch": 2.350700280112045,
"grad_norm": 0.4259752333164215,
"learning_rate": 8.673730591353559e-05,
"loss": 0.4854,
"step": 1049
},
{
"epoch": 2.3529411764705883,
"grad_norm": 0.4256626069545746,
"learning_rate": 8.654344349766383e-05,
"loss": 0.5299,
"step": 1050
},
{
"epoch": 2.3551820728291317,
"grad_norm": 0.4528552293777466,
"learning_rate": 8.634963257411266e-05,
"loss": 0.6318,
"step": 1051
},
{
"epoch": 2.357422969187675,
"grad_norm": 0.43573692440986633,
"learning_rate": 8.615587388451116e-05,
"loss": 0.5874,
"step": 1052
},
{
"epoch": 2.3596638655462185,
"grad_norm": 0.43992942571640015,
"learning_rate": 8.59621681702886e-05,
"loss": 0.4871,
"step": 1053
},
{
"epoch": 2.361904761904762,
"grad_norm": 0.45180973410606384,
"learning_rate": 8.57685161726715e-05,
"loss": 0.569,
"step": 1054
},
{
"epoch": 2.3641456582633054,
"grad_norm": 0.44669607281684875,
"learning_rate": 8.557491863268087e-05,
"loss": 0.4861,
"step": 1055
},
{
"epoch": 2.3663865546218488,
"grad_norm": 0.4159175157546997,
"learning_rate": 8.53813762911293e-05,
"loss": 0.465,
"step": 1056
},
{
"epoch": 2.368627450980392,
"grad_norm": 0.408426970243454,
"learning_rate": 8.518788988861815e-05,
"loss": 0.5288,
"step": 1057
},
{
"epoch": 2.3708683473389356,
"grad_norm": 0.38745927810668945,
"learning_rate": 8.499446016553474e-05,
"loss": 0.4711,
"step": 1058
},
{
"epoch": 2.373109243697479,
"grad_norm": 0.4092199504375458,
"learning_rate": 8.480108786204955e-05,
"loss": 0.4444,
"step": 1059
},
{
"epoch": 2.3753501400560224,
"grad_norm": 0.3924252986907959,
"learning_rate": 8.460777371811327e-05,
"loss": 0.4669,
"step": 1060
},
{
"epoch": 2.377591036414566,
"grad_norm": 0.40003421902656555,
"learning_rate": 8.441451847345407e-05,
"loss": 0.5144,
"step": 1061
},
{
"epoch": 2.3798319327731092,
"grad_norm": 0.4236973524093628,
"learning_rate": 8.42213228675747e-05,
"loss": 0.4746,
"step": 1062
},
{
"epoch": 2.3820728291316526,
"grad_norm": 0.4162292182445526,
"learning_rate": 8.402818763974978e-05,
"loss": 0.5576,
"step": 1063
},
{
"epoch": 2.384313725490196,
"grad_norm": 0.4187072813510895,
"learning_rate": 8.383511352902285e-05,
"loss": 0.4439,
"step": 1064
},
{
"epoch": 2.3865546218487395,
"grad_norm": 0.42568284273147583,
"learning_rate": 8.364210127420354e-05,
"loss": 0.5249,
"step": 1065
},
{
"epoch": 2.388795518207283,
"grad_norm": 0.4502849280834198,
"learning_rate": 8.344915161386483e-05,
"loss": 0.4901,
"step": 1066
},
{
"epoch": 2.3910364145658263,
"grad_norm": 0.41129040718078613,
"learning_rate": 8.325626528634017e-05,
"loss": 0.4934,
"step": 1067
},
{
"epoch": 2.3932773109243697,
"grad_norm": 0.4660985767841339,
"learning_rate": 8.306344302972067e-05,
"loss": 0.4783,
"step": 1068
},
{
"epoch": 2.395518207282913,
"grad_norm": 0.4513007700443268,
"learning_rate": 8.287068558185225e-05,
"loss": 0.5745,
"step": 1069
},
{
"epoch": 2.3977591036414565,
"grad_norm": 0.45751145482063293,
"learning_rate": 8.267799368033286e-05,
"loss": 0.496,
"step": 1070
},
{
"epoch": 2.4,
"grad_norm": 0.40702319145202637,
"learning_rate": 8.248536806250958e-05,
"loss": 0.4638,
"step": 1071
},
{
"epoch": 2.4022408963585433,
"grad_norm": 0.4384859800338745,
"learning_rate": 8.229280946547595e-05,
"loss": 0.4855,
"step": 1072
},
{
"epoch": 2.4044817927170867,
"grad_norm": 0.40514451265335083,
"learning_rate": 8.210031862606895e-05,
"loss": 0.45,
"step": 1073
},
{
"epoch": 2.40672268907563,
"grad_norm": 0.4280382990837097,
"learning_rate": 8.190789628086632e-05,
"loss": 0.4715,
"step": 1074
},
{
"epoch": 2.4089635854341735,
"grad_norm": 0.39819565415382385,
"learning_rate": 8.171554316618374e-05,
"loss": 0.5013,
"step": 1075
},
{
"epoch": 2.411204481792717,
"grad_norm": 0.4179481863975525,
"learning_rate": 8.152326001807189e-05,
"loss": 0.4887,
"step": 1076
},
{
"epoch": 2.4134453781512604,
"grad_norm": 0.41948774456977844,
"learning_rate": 8.13310475723138e-05,
"loss": 0.5164,
"step": 1077
},
{
"epoch": 2.4156862745098038,
"grad_norm": 0.4570457935333252,
"learning_rate": 8.113890656442193e-05,
"loss": 0.6124,
"step": 1078
},
{
"epoch": 2.417927170868347,
"grad_norm": 0.42094066739082336,
"learning_rate": 8.094683772963537e-05,
"loss": 0.4857,
"step": 1079
},
{
"epoch": 2.4201680672268906,
"grad_norm": 0.42249664664268494,
"learning_rate": 8.075484180291701e-05,
"loss": 0.5505,
"step": 1080
},
{
"epoch": 2.422408963585434,
"grad_norm": 0.41316911578178406,
"learning_rate": 8.056291951895079e-05,
"loss": 0.5063,
"step": 1081
},
{
"epoch": 2.4246498599439774,
"grad_norm": 0.423313170671463,
"learning_rate": 8.037107161213886e-05,
"loss": 0.4174,
"step": 1082
},
{
"epoch": 2.426890756302521,
"grad_norm": 0.45118579268455505,
"learning_rate": 8.017929881659874e-05,
"loss": 0.5024,
"step": 1083
},
{
"epoch": 2.4291316526610642,
"grad_norm": 0.431448757648468,
"learning_rate": 7.99876018661605e-05,
"loss": 0.5391,
"step": 1084
},
{
"epoch": 2.431372549019608,
"grad_norm": 0.43506568670272827,
"learning_rate": 7.979598149436403e-05,
"loss": 0.5026,
"step": 1085
},
{
"epoch": 2.4336134453781515,
"grad_norm": 0.42530521750450134,
"learning_rate": 7.960443843445621e-05,
"loss": 0.5372,
"step": 1086
},
{
"epoch": 2.435854341736695,
"grad_norm": 0.4265046715736389,
"learning_rate": 7.941297341938803e-05,
"loss": 0.5104,
"step": 1087
},
{
"epoch": 2.4380952380952383,
"grad_norm": 0.4294826090335846,
"learning_rate": 7.922158718181185e-05,
"loss": 0.5355,
"step": 1088
},
{
"epoch": 2.4403361344537817,
"grad_norm": 0.3806697726249695,
"learning_rate": 7.903028045407857e-05,
"loss": 0.4372,
"step": 1089
},
{
"epoch": 2.442577030812325,
"grad_norm": 0.4443027377128601,
"learning_rate": 7.883905396823486e-05,
"loss": 0.5097,
"step": 1090
},
{
"epoch": 2.4448179271708685,
"grad_norm": 0.4052489101886749,
"learning_rate": 7.864790845602039e-05,
"loss": 0.4297,
"step": 1091
},
{
"epoch": 2.447058823529412,
"grad_norm": 0.38045182824134827,
"learning_rate": 7.845684464886487e-05,
"loss": 0.507,
"step": 1092
},
{
"epoch": 2.4492997198879554,
"grad_norm": 0.3937990069389343,
"learning_rate": 7.826586327788547e-05,
"loss": 0.5029,
"step": 1093
},
{
"epoch": 2.4515406162464988,
"grad_norm": 0.4530947804450989,
"learning_rate": 7.80749650738838e-05,
"loss": 0.5362,
"step": 1094
},
{
"epoch": 2.453781512605042,
"grad_norm": 0.46375659108161926,
"learning_rate": 7.788415076734333e-05,
"loss": 0.5646,
"step": 1095
},
{
"epoch": 2.4560224089635856,
"grad_norm": 0.4116787314414978,
"learning_rate": 7.76934210884264e-05,
"loss": 0.6341,
"step": 1096
},
{
"epoch": 2.458263305322129,
"grad_norm": 0.4486657381057739,
"learning_rate": 7.75027767669716e-05,
"loss": 0.5135,
"step": 1097
},
{
"epoch": 2.4605042016806724,
"grad_norm": 0.42132434248924255,
"learning_rate": 7.731221853249088e-05,
"loss": 0.5435,
"step": 1098
},
{
"epoch": 2.462745098039216,
"grad_norm": 0.42719486355781555,
"learning_rate": 7.712174711416674e-05,
"loss": 0.5286,
"step": 1099
},
{
"epoch": 2.4649859943977592,
"grad_norm": 0.4020358622074127,
"learning_rate": 7.693136324084948e-05,
"loss": 0.4945,
"step": 1100
},
{
"epoch": 2.4672268907563026,
"grad_norm": 0.4580785036087036,
"learning_rate": 7.674106764105442e-05,
"loss": 0.589,
"step": 1101
},
{
"epoch": 2.469467787114846,
"grad_norm": 0.4302530288696289,
"learning_rate": 7.655086104295904e-05,
"loss": 0.5739,
"step": 1102
},
{
"epoch": 2.4717086834733895,
"grad_norm": 0.4251365661621094,
"learning_rate": 7.636074417440036e-05,
"loss": 0.4898,
"step": 1103
},
{
"epoch": 2.473949579831933,
"grad_norm": 0.40312501788139343,
"learning_rate": 7.617071776287196e-05,
"loss": 0.5138,
"step": 1104
},
{
"epoch": 2.4761904761904763,
"grad_norm": 0.43058258295059204,
"learning_rate": 7.598078253552129e-05,
"loss": 0.5195,
"step": 1105
},
{
"epoch": 2.4784313725490197,
"grad_norm": 0.41228926181793213,
"learning_rate": 7.579093921914688e-05,
"loss": 0.4914,
"step": 1106
},
{
"epoch": 2.480672268907563,
"grad_norm": 0.3853016197681427,
"learning_rate": 7.560118854019559e-05,
"loss": 0.4631,
"step": 1107
},
{
"epoch": 2.4829131652661065,
"grad_norm": 0.446071594953537,
"learning_rate": 7.541153122475978e-05,
"loss": 0.5478,
"step": 1108
},
{
"epoch": 2.48515406162465,
"grad_norm": 0.39395663142204285,
"learning_rate": 7.522196799857453e-05,
"loss": 0.4713,
"step": 1109
},
{
"epoch": 2.4873949579831933,
"grad_norm": 0.44347715377807617,
"learning_rate": 7.503249958701489e-05,
"loss": 0.5647,
"step": 1110
},
{
"epoch": 2.4896358543417367,
"grad_norm": 0.4742254614830017,
"learning_rate": 7.484312671509306e-05,
"loss": 0.5057,
"step": 1111
},
{
"epoch": 2.49187675070028,
"grad_norm": 0.4173833429813385,
"learning_rate": 7.465385010745579e-05,
"loss": 0.4004,
"step": 1112
},
{
"epoch": 2.4941176470588236,
"grad_norm": 0.4468843638896942,
"learning_rate": 7.446467048838131e-05,
"loss": 0.5514,
"step": 1113
},
{
"epoch": 2.496358543417367,
"grad_norm": 0.48716241121292114,
"learning_rate": 7.427558858177679e-05,
"loss": 0.4748,
"step": 1114
},
{
"epoch": 2.4985994397759104,
"grad_norm": 0.45233801007270813,
"learning_rate": 7.408660511117546e-05,
"loss": 0.5237,
"step": 1115
},
{
"epoch": 2.500840336134454,
"grad_norm": 0.4716877043247223,
"learning_rate": 7.389772079973396e-05,
"loss": 0.5594,
"step": 1116
},
{
"epoch": 2.503081232492997,
"grad_norm": 0.4315103590488434,
"learning_rate": 7.37089363702294e-05,
"loss": 0.4487,
"step": 1117
},
{
"epoch": 2.5053221288515406,
"grad_norm": 0.44232210516929626,
"learning_rate": 7.352025254505673e-05,
"loss": 0.5537,
"step": 1118
},
{
"epoch": 2.507563025210084,
"grad_norm": 0.44675686955451965,
"learning_rate": 7.333167004622592e-05,
"loss": 0.4987,
"step": 1119
},
{
"epoch": 2.5098039215686274,
"grad_norm": 0.3972238004207611,
"learning_rate": 7.31431895953592e-05,
"loss": 0.5414,
"step": 1120
},
{
"epoch": 2.512044817927171,
"grad_norm": 0.4098942279815674,
"learning_rate": 7.295481191368836e-05,
"loss": 0.5653,
"step": 1121
},
{
"epoch": 2.5142857142857142,
"grad_norm": 0.4425329566001892,
"learning_rate": 7.276653772205186e-05,
"loss": 0.502,
"step": 1122
},
{
"epoch": 2.5165266106442576,
"grad_norm": 0.44188177585601807,
"learning_rate": 7.257836774089222e-05,
"loss": 0.5204,
"step": 1123
},
{
"epoch": 2.518767507002801,
"grad_norm": 0.40702101588249207,
"learning_rate": 7.239030269025311e-05,
"loss": 0.5016,
"step": 1124
},
{
"epoch": 2.5210084033613445,
"grad_norm": 0.47361934185028076,
"learning_rate": 7.220234328977677e-05,
"loss": 0.5352,
"step": 1125
},
{
"epoch": 2.523249299719888,
"grad_norm": 0.4118124842643738,
"learning_rate": 7.201449025870113e-05,
"loss": 0.483,
"step": 1126
},
{
"epoch": 2.5254901960784313,
"grad_norm": 0.42633935809135437,
"learning_rate": 7.182674431585704e-05,
"loss": 0.5221,
"step": 1127
},
{
"epoch": 2.5277310924369747,
"grad_norm": 0.41050711274147034,
"learning_rate": 7.163910617966563e-05,
"loss": 0.4936,
"step": 1128
},
{
"epoch": 2.529971988795518,
"grad_norm": 0.4213041365146637,
"learning_rate": 7.145157656813546e-05,
"loss": 0.5258,
"step": 1129
},
{
"epoch": 2.5322128851540615,
"grad_norm": 0.44016504287719727,
"learning_rate": 7.126415619885986e-05,
"loss": 0.5254,
"step": 1130
},
{
"epoch": 2.534453781512605,
"grad_norm": 0.4144013226032257,
"learning_rate": 7.107684578901414e-05,
"loss": 0.5113,
"step": 1131
},
{
"epoch": 2.5366946778711483,
"grad_norm": 0.4324791431427002,
"learning_rate": 7.088964605535278e-05,
"loss": 0.4534,
"step": 1132
},
{
"epoch": 2.5389355742296917,
"grad_norm": 0.4527924656867981,
"learning_rate": 7.070255771420678e-05,
"loss": 0.5252,
"step": 1133
},
{
"epoch": 2.541176470588235,
"grad_norm": 0.42882460355758667,
"learning_rate": 7.051558148148092e-05,
"loss": 0.4494,
"step": 1134
},
{
"epoch": 2.5434173669467786,
"grad_norm": 0.45250919461250305,
"learning_rate": 7.032871807265096e-05,
"loss": 0.4848,
"step": 1135
},
{
"epoch": 2.545658263305322,
"grad_norm": 0.452020525932312,
"learning_rate": 7.014196820276097e-05,
"loss": 0.5801,
"step": 1136
},
{
"epoch": 2.5478991596638654,
"grad_norm": 0.44832712411880493,
"learning_rate": 6.99553325864205e-05,
"loss": 0.4844,
"step": 1137
},
{
"epoch": 2.550140056022409,
"grad_norm": 0.44664841890335083,
"learning_rate": 6.976881193780196e-05,
"loss": 0.4997,
"step": 1138
},
{
"epoch": 2.552380952380952,
"grad_norm": 0.45292359590530396,
"learning_rate": 6.958240697063773e-05,
"loss": 0.4583,
"step": 1139
},
{
"epoch": 2.5546218487394956,
"grad_norm": 0.44887852668762207,
"learning_rate": 6.939611839821767e-05,
"loss": 0.5134,
"step": 1140
},
{
"epoch": 2.556862745098039,
"grad_norm": 0.46710190176963806,
"learning_rate": 6.920994693338612e-05,
"loss": 0.5406,
"step": 1141
},
{
"epoch": 2.5591036414565824,
"grad_norm": 0.4302251935005188,
"learning_rate": 6.90238932885394e-05,
"loss": 0.4367,
"step": 1142
},
{
"epoch": 2.561344537815126,
"grad_norm": 0.39620494842529297,
"learning_rate": 6.883795817562293e-05,
"loss": 0.4373,
"step": 1143
},
{
"epoch": 2.5635854341736692,
"grad_norm": 0.4327729046344757,
"learning_rate": 6.865214230612857e-05,
"loss": 0.4567,
"step": 1144
},
{
"epoch": 2.5658263305322127,
"grad_norm": 0.4318556487560272,
"learning_rate": 6.846644639109186e-05,
"loss": 0.4956,
"step": 1145
},
{
"epoch": 2.568067226890756,
"grad_norm": 0.410940021276474,
"learning_rate": 6.82808711410894e-05,
"loss": 0.4715,
"step": 1146
},
{
"epoch": 2.5703081232492995,
"grad_norm": 0.44518476724624634,
"learning_rate": 6.809541726623601e-05,
"loss": 0.5613,
"step": 1147
},
{
"epoch": 2.572549019607843,
"grad_norm": 0.4663293957710266,
"learning_rate": 6.791008547618206e-05,
"loss": 0.4837,
"step": 1148
},
{
"epoch": 2.5747899159663863,
"grad_norm": 0.43246734142303467,
"learning_rate": 6.772487648011074e-05,
"loss": 0.5244,
"step": 1149
},
{
"epoch": 2.5770308123249297,
"grad_norm": 0.471043199300766,
"learning_rate": 6.753979098673538e-05,
"loss": 0.5452,
"step": 1150
},
{
"epoch": 2.5792717086834736,
"grad_norm": 0.43286454677581787,
"learning_rate": 6.735482970429676e-05,
"loss": 0.448,
"step": 1151
},
{
"epoch": 2.581512605042017,
"grad_norm": 0.43996450304985046,
"learning_rate": 6.716999334056031e-05,
"loss": 0.4486,
"step": 1152
},
{
"epoch": 2.5837535014005604,
"grad_norm": 0.40108829736709595,
"learning_rate": 6.698528260281345e-05,
"loss": 0.39,
"step": 1153
},
{
"epoch": 2.585994397759104,
"grad_norm": 0.4602837562561035,
"learning_rate": 6.680069819786287e-05,
"loss": 0.5458,
"step": 1154
},
{
"epoch": 2.588235294117647,
"grad_norm": 0.44124406576156616,
"learning_rate": 6.66162408320319e-05,
"loss": 0.5629,
"step": 1155
},
{
"epoch": 2.5904761904761906,
"grad_norm": 0.41357114911079407,
"learning_rate": 6.643191121115773e-05,
"loss": 0.4054,
"step": 1156
},
{
"epoch": 2.592717086834734,
"grad_norm": 0.4528425633907318,
"learning_rate": 6.624771004058868e-05,
"loss": 0.547,
"step": 1157
},
{
"epoch": 2.5949579831932774,
"grad_norm": 0.4345560371875763,
"learning_rate": 6.606363802518159e-05,
"loss": 0.4827,
"step": 1158
},
{
"epoch": 2.597198879551821,
"grad_norm": 0.44091105461120605,
"learning_rate": 6.587969586929906e-05,
"loss": 0.5285,
"step": 1159
},
{
"epoch": 2.5994397759103642,
"grad_norm": 0.4253813326358795,
"learning_rate": 6.569588427680678e-05,
"loss": 0.4701,
"step": 1160
},
{
"epoch": 2.6016806722689076,
"grad_norm": 0.4591507315635681,
"learning_rate": 6.551220395107085e-05,
"loss": 0.4894,
"step": 1161
},
{
"epoch": 2.603921568627451,
"grad_norm": 0.4634987711906433,
"learning_rate": 6.532865559495504e-05,
"loss": 0.5739,
"step": 1162
},
{
"epoch": 2.6061624649859945,
"grad_norm": 0.4665946960449219,
"learning_rate": 6.514523991081815e-05,
"loss": 0.5316,
"step": 1163
},
{
"epoch": 2.608403361344538,
"grad_norm": 0.42269474267959595,
"learning_rate": 6.496195760051129e-05,
"loss": 0.5457,
"step": 1164
},
{
"epoch": 2.6106442577030813,
"grad_norm": 0.39959385991096497,
"learning_rate": 6.477880936537521e-05,
"loss": 0.4687,
"step": 1165
},
{
"epoch": 2.6128851540616247,
"grad_norm": 0.45105481147766113,
"learning_rate": 6.459579590623763e-05,
"loss": 0.4732,
"step": 1166
},
{
"epoch": 2.615126050420168,
"grad_norm": 0.4361535906791687,
"learning_rate": 6.441291792341053e-05,
"loss": 0.5406,
"step": 1167
},
{
"epoch": 2.6173669467787115,
"grad_norm": 0.4421353340148926,
"learning_rate": 6.423017611668745e-05,
"loss": 0.4506,
"step": 1168
},
{
"epoch": 2.619607843137255,
"grad_norm": 0.40406641364097595,
"learning_rate": 6.40475711853409e-05,
"loss": 0.512,
"step": 1169
},
{
"epoch": 2.6218487394957983,
"grad_norm": 0.4766140878200531,
"learning_rate": 6.386510382811962e-05,
"loss": 0.4282,
"step": 1170
},
{
"epoch": 2.6240896358543417,
"grad_norm": 0.4143347442150116,
"learning_rate": 6.368277474324587e-05,
"loss": 0.4387,
"step": 1171
},
{
"epoch": 2.626330532212885,
"grad_norm": 0.42166438698768616,
"learning_rate": 6.350058462841283e-05,
"loss": 0.4871,
"step": 1172
},
{
"epoch": 2.6285714285714286,
"grad_norm": 0.4512467086315155,
"learning_rate": 6.331853418078189e-05,
"loss": 0.4713,
"step": 1173
},
{
"epoch": 2.630812324929972,
"grad_norm": 0.4364948272705078,
"learning_rate": 6.313662409698004e-05,
"loss": 0.5566,
"step": 1174
},
{
"epoch": 2.6330532212885154,
"grad_norm": 0.4114439785480499,
"learning_rate": 6.29548550730971e-05,
"loss": 0.442,
"step": 1175
},
{
"epoch": 2.635294117647059,
"grad_norm": 0.4442448914051056,
"learning_rate": 6.277322780468316e-05,
"loss": 0.4886,
"step": 1176
},
{
"epoch": 2.637535014005602,
"grad_norm": 0.44406870007514954,
"learning_rate": 6.259174298674586e-05,
"loss": 0.4747,
"step": 1177
},
{
"epoch": 2.6397759103641456,
"grad_norm": 0.4712062478065491,
"learning_rate": 6.241040131374769e-05,
"loss": 0.5397,
"step": 1178
},
{
"epoch": 2.642016806722689,
"grad_norm": 0.4444422125816345,
"learning_rate": 6.22292034796035e-05,
"loss": 0.4413,
"step": 1179
},
{
"epoch": 2.6442577030812324,
"grad_norm": 0.43544045090675354,
"learning_rate": 6.204815017767767e-05,
"loss": 0.4931,
"step": 1180
},
{
"epoch": 2.646498599439776,
"grad_norm": 0.4698467254638672,
"learning_rate": 6.186724210078148e-05,
"loss": 0.5931,
"step": 1181
},
{
"epoch": 2.6487394957983192,
"grad_norm": 0.4257587194442749,
"learning_rate": 6.168647994117058e-05,
"loss": 0.4852,
"step": 1182
},
{
"epoch": 2.6509803921568627,
"grad_norm": 0.44725215435028076,
"learning_rate": 6.150586439054215e-05,
"loss": 0.5422,
"step": 1183
},
{
"epoch": 2.653221288515406,
"grad_norm": 0.4267517626285553,
"learning_rate": 6.132539614003249e-05,
"loss": 0.5024,
"step": 1184
},
{
"epoch": 2.6554621848739495,
"grad_norm": 0.44710227847099304,
"learning_rate": 6.114507588021412e-05,
"loss": 0.5129,
"step": 1185
},
{
"epoch": 2.657703081232493,
"grad_norm": 0.40692219138145447,
"learning_rate": 6.096490430109343e-05,
"loss": 0.4922,
"step": 1186
},
{
"epoch": 2.6599439775910363,
"grad_norm": 0.4133314788341522,
"learning_rate": 6.078488209210769e-05,
"loss": 0.5757,
"step": 1187
},
{
"epoch": 2.6621848739495797,
"grad_norm": 0.4211864471435547,
"learning_rate": 6.060500994212271e-05,
"loss": 0.467,
"step": 1188
},
{
"epoch": 2.664425770308123,
"grad_norm": 0.4237733483314514,
"learning_rate": 6.042528853943004e-05,
"loss": 0.4428,
"step": 1189
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.41876572370529175,
"learning_rate": 6.024571857174443e-05,
"loss": 0.4409,
"step": 1190
},
{
"epoch": 2.66890756302521,
"grad_norm": 0.45893558859825134,
"learning_rate": 6.006630072620113e-05,
"loss": 0.5965,
"step": 1191
},
{
"epoch": 2.6711484593837533,
"grad_norm": 0.5006417036056519,
"learning_rate": 5.9887035689353285e-05,
"loss": 0.492,
"step": 1192
},
{
"epoch": 2.673389355742297,
"grad_norm": 0.4403904676437378,
"learning_rate": 5.9707924147169315e-05,
"loss": 0.5206,
"step": 1193
},
{
"epoch": 2.6756302521008406,
"grad_norm": 0.4360312819480896,
"learning_rate": 5.952896678503025e-05,
"loss": 0.4696,
"step": 1194
},
{
"epoch": 2.677871148459384,
"grad_norm": 0.4301529824733734,
"learning_rate": 5.935016428772721e-05,
"loss": 0.5675,
"step": 1195
},
{
"epoch": 2.6801120448179274,
"grad_norm": 0.42791324853897095,
"learning_rate": 5.9171517339458646e-05,
"loss": 0.4538,
"step": 1196
},
{
"epoch": 2.682352941176471,
"grad_norm": 0.43683886528015137,
"learning_rate": 5.899302662382785e-05,
"loss": 0.5144,
"step": 1197
},
{
"epoch": 2.6845938375350142,
"grad_norm": 0.419698029756546,
"learning_rate": 5.88146928238402e-05,
"loss": 0.4614,
"step": 1198
},
{
"epoch": 2.6868347338935576,
"grad_norm": 0.48740556836128235,
"learning_rate": 5.863651662190075e-05,
"loss": 0.4979,
"step": 1199
},
{
"epoch": 2.689075630252101,
"grad_norm": 0.4334785044193268,
"learning_rate": 5.845849869981137e-05,
"loss": 0.6011,
"step": 1200
},
{
"epoch": 2.6913165266106445,
"grad_norm": 0.450212299823761,
"learning_rate": 5.828063973876834e-05,
"loss": 0.4903,
"step": 1201
},
{
"epoch": 2.693557422969188,
"grad_norm": 0.4369335472583771,
"learning_rate": 5.81029404193596e-05,
"loss": 0.4872,
"step": 1202
},
{
"epoch": 2.6957983193277313,
"grad_norm": 0.4307531714439392,
"learning_rate": 5.792540142156233e-05,
"loss": 0.4272,
"step": 1203
},
{
"epoch": 2.6980392156862747,
"grad_norm": 0.44111305475234985,
"learning_rate": 5.7748023424740083e-05,
"loss": 0.5081,
"step": 1204
},
{
"epoch": 2.700280112044818,
"grad_norm": 0.4529974162578583,
"learning_rate": 5.757080710764041e-05,
"loss": 0.4823,
"step": 1205
},
{
"epoch": 2.7025210084033615,
"grad_norm": 0.4316770136356354,
"learning_rate": 5.739375314839226e-05,
"loss": 0.4983,
"step": 1206
},
{
"epoch": 2.704761904761905,
"grad_norm": 0.4267878830432892,
"learning_rate": 5.721686222450311e-05,
"loss": 0.4634,
"step": 1207
},
{
"epoch": 2.7070028011204483,
"grad_norm": 0.4317241907119751,
"learning_rate": 5.7040135012856786e-05,
"loss": 0.481,
"step": 1208
},
{
"epoch": 2.7092436974789917,
"grad_norm": 0.45203784108161926,
"learning_rate": 5.686357218971049e-05,
"loss": 0.5158,
"step": 1209
},
{
"epoch": 2.711484593837535,
"grad_norm": 0.4764373004436493,
"learning_rate": 5.668717443069249e-05,
"loss": 0.6243,
"step": 1210
},
{
"epoch": 2.7137254901960786,
"grad_norm": 0.4173125922679901,
"learning_rate": 5.6510942410799415e-05,
"loss": 0.5189,
"step": 1211
},
{
"epoch": 2.715966386554622,
"grad_norm": 0.43439099192619324,
"learning_rate": 5.633487680439361e-05,
"loss": 0.4766,
"step": 1212
},
{
"epoch": 2.7182072829131654,
"grad_norm": 0.46178168058395386,
"learning_rate": 5.615897828520074e-05,
"loss": 0.5407,
"step": 1213
},
{
"epoch": 2.720448179271709,
"grad_norm": 0.42440083622932434,
"learning_rate": 5.598324752630695e-05,
"loss": 0.471,
"step": 1214
},
{
"epoch": 2.722689075630252,
"grad_norm": 0.47547709941864014,
"learning_rate": 5.580768520015658e-05,
"loss": 0.6282,
"step": 1215
},
{
"epoch": 2.7249299719887956,
"grad_norm": 0.40208446979522705,
"learning_rate": 5.5632291978549444e-05,
"loss": 0.5341,
"step": 1216
},
{
"epoch": 2.727170868347339,
"grad_norm": 0.41205033659935,
"learning_rate": 5.545706853263814e-05,
"loss": 0.4511,
"step": 1217
},
{
"epoch": 2.7294117647058824,
"grad_norm": 0.43807148933410645,
"learning_rate": 5.528201553292578e-05,
"loss": 0.513,
"step": 1218
},
{
"epoch": 2.731652661064426,
"grad_norm": 0.4304596781730652,
"learning_rate": 5.5107133649263077e-05,
"loss": 0.5122,
"step": 1219
},
{
"epoch": 2.7338935574229692,
"grad_norm": 0.4425150454044342,
"learning_rate": 5.493242355084609e-05,
"loss": 0.5225,
"step": 1220
},
{
"epoch": 2.7361344537815127,
"grad_norm": 0.47015315294265747,
"learning_rate": 5.4757885906213525e-05,
"loss": 0.5872,
"step": 1221
},
{
"epoch": 2.738375350140056,
"grad_norm": 0.4287366271018982,
"learning_rate": 5.4583521383244076e-05,
"loss": 0.5247,
"step": 1222
},
{
"epoch": 2.7406162464985995,
"grad_norm": 0.4516020715236664,
"learning_rate": 5.440933064915414e-05,
"loss": 0.5552,
"step": 1223
},
{
"epoch": 2.742857142857143,
"grad_norm": 0.46535906195640564,
"learning_rate": 5.4235314370494905e-05,
"loss": 0.5469,
"step": 1224
},
{
"epoch": 2.7450980392156863,
"grad_norm": 0.41264423727989197,
"learning_rate": 5.4061473213150146e-05,
"loss": 0.4707,
"step": 1225
},
{
"epoch": 2.7473389355742297,
"grad_norm": 0.4428427815437317,
"learning_rate": 5.3887807842333536e-05,
"loss": 0.462,
"step": 1226
},
{
"epoch": 2.749579831932773,
"grad_norm": 0.4278802275657654,
"learning_rate": 5.371431892258596e-05,
"loss": 0.45,
"step": 1227
},
{
"epoch": 2.7518207282913165,
"grad_norm": 0.4178737699985504,
"learning_rate": 5.354100711777317e-05,
"loss": 0.521,
"step": 1228
},
{
"epoch": 2.75406162464986,
"grad_norm": 0.4378300607204437,
"learning_rate": 5.336787309108324e-05,
"loss": 0.5341,
"step": 1229
},
{
"epoch": 2.7563025210084033,
"grad_norm": 0.428938627243042,
"learning_rate": 5.319491750502383e-05,
"loss": 0.5213,
"step": 1230
},
{
"epoch": 2.7585434173669467,
"grad_norm": 0.44063812494277954,
"learning_rate": 5.302214102141991e-05,
"loss": 0.4556,
"step": 1231
},
{
"epoch": 2.76078431372549,
"grad_norm": 0.4765867590904236,
"learning_rate": 5.2849544301411094e-05,
"loss": 0.4859,
"step": 1232
},
{
"epoch": 2.7630252100840336,
"grad_norm": 0.4411394000053406,
"learning_rate": 5.2677128005449e-05,
"loss": 0.4559,
"step": 1233
},
{
"epoch": 2.765266106442577,
"grad_norm": 0.4739338755607605,
"learning_rate": 5.2504892793295e-05,
"loss": 0.5517,
"step": 1234
},
{
"epoch": 2.7675070028011204,
"grad_norm": 0.40744367241859436,
"learning_rate": 5.233283932401741e-05,
"loss": 0.4635,
"step": 1235
},
{
"epoch": 2.769747899159664,
"grad_norm": 0.4491940140724182,
"learning_rate": 5.2160968255989176e-05,
"loss": 0.4626,
"step": 1236
},
{
"epoch": 2.771988795518207,
"grad_norm": 0.44875219464302063,
"learning_rate": 5.1989280246885275e-05,
"loss": 0.4961,
"step": 1237
},
{
"epoch": 2.7742296918767506,
"grad_norm": 0.4800289273262024,
"learning_rate": 5.181777595368009e-05,
"loss": 0.5092,
"step": 1238
},
{
"epoch": 2.776470588235294,
"grad_norm": 0.4514055550098419,
"learning_rate": 5.16464560326452e-05,
"loss": 0.4889,
"step": 1239
},
{
"epoch": 2.7787114845938374,
"grad_norm": 0.44190871715545654,
"learning_rate": 5.1475321139346456e-05,
"loss": 0.5033,
"step": 1240
},
{
"epoch": 2.780952380952381,
"grad_norm": 0.4622608423233032,
"learning_rate": 5.130437192864182e-05,
"loss": 0.4663,
"step": 1241
},
{
"epoch": 2.7831932773109243,
"grad_norm": 0.45036548376083374,
"learning_rate": 5.113360905467874e-05,
"loss": 0.5187,
"step": 1242
},
{
"epoch": 2.7854341736694677,
"grad_norm": 0.4636622667312622,
"learning_rate": 5.096303317089155e-05,
"loss": 0.4426,
"step": 1243
},
{
"epoch": 2.787675070028011,
"grad_norm": 0.4120221734046936,
"learning_rate": 5.079264492999915e-05,
"loss": 0.4223,
"step": 1244
},
{
"epoch": 2.7899159663865545,
"grad_norm": 0.44056281447410583,
"learning_rate": 5.062244498400228e-05,
"loss": 0.4905,
"step": 1245
},
{
"epoch": 2.792156862745098,
"grad_norm": 0.44559818506240845,
"learning_rate": 5.045243398418131e-05,
"loss": 0.5281,
"step": 1246
},
{
"epoch": 2.7943977591036413,
"grad_norm": 0.4363376498222351,
"learning_rate": 5.028261258109355e-05,
"loss": 0.4977,
"step": 1247
},
{
"epoch": 2.7966386554621847,
"grad_norm": 0.45672979950904846,
"learning_rate": 5.011298142457069e-05,
"loss": 0.5212,
"step": 1248
},
{
"epoch": 2.798879551820728,
"grad_norm": 0.4205709993839264,
"learning_rate": 4.994354116371659e-05,
"loss": 0.5179,
"step": 1249
},
{
"epoch": 2.8011204481792715,
"grad_norm": 0.4712340831756592,
"learning_rate": 4.9774292446904605e-05,
"loss": 0.5128,
"step": 1250
},
{
"epoch": 2.803361344537815,
"grad_norm": 0.4125993251800537,
"learning_rate": 4.9605235921775006e-05,
"loss": 0.5666,
"step": 1251
},
{
"epoch": 2.8056022408963583,
"grad_norm": 0.4541053771972656,
"learning_rate": 4.9436372235232816e-05,
"loss": 0.5223,
"step": 1252
},
{
"epoch": 2.8078431372549018,
"grad_norm": 0.42332640290260315,
"learning_rate": 4.9267702033444996e-05,
"loss": 0.4782,
"step": 1253
},
{
"epoch": 2.810084033613445,
"grad_norm": 0.4206790328025818,
"learning_rate": 4.9099225961838215e-05,
"loss": 0.4846,
"step": 1254
},
{
"epoch": 2.8123249299719886,
"grad_norm": 0.47637486457824707,
"learning_rate": 4.893094466509629e-05,
"loss": 0.5505,
"step": 1255
},
{
"epoch": 2.814565826330532,
"grad_norm": 0.4126443564891815,
"learning_rate": 4.876285878715764e-05,
"loss": 0.436,
"step": 1256
},
{
"epoch": 2.8168067226890754,
"grad_norm": 0.4458106756210327,
"learning_rate": 4.859496897121303e-05,
"loss": 0.5215,
"step": 1257
},
{
"epoch": 2.819047619047619,
"grad_norm": 0.4408244788646698,
"learning_rate": 4.8427275859702833e-05,
"loss": 0.5526,
"step": 1258
},
{
"epoch": 2.821288515406162,
"grad_norm": 0.47083553671836853,
"learning_rate": 4.825978009431484e-05,
"loss": 0.5461,
"step": 1259
},
{
"epoch": 2.8235294117647056,
"grad_norm": 0.44672128558158875,
"learning_rate": 4.809248231598168e-05,
"loss": 0.5313,
"step": 1260
},
{
"epoch": 2.825770308123249,
"grad_norm": 0.4665304124355316,
"learning_rate": 4.792538316487824e-05,
"loss": 0.4801,
"step": 1261
},
{
"epoch": 2.8280112044817924,
"grad_norm": 0.4424595832824707,
"learning_rate": 4.775848328041956e-05,
"loss": 0.5804,
"step": 1262
},
{
"epoch": 2.8302521008403363,
"grad_norm": 0.4141983389854431,
"learning_rate": 4.7591783301257955e-05,
"loss": 0.5124,
"step": 1263
},
{
"epoch": 2.8324929971988797,
"grad_norm": 0.44667792320251465,
"learning_rate": 4.742528386528093e-05,
"loss": 0.5156,
"step": 1264
},
{
"epoch": 2.834733893557423,
"grad_norm": 0.43313977122306824,
"learning_rate": 4.725898560960862e-05,
"loss": 0.5087,
"step": 1265
},
{
"epoch": 2.8369747899159665,
"grad_norm": 0.4425276219844818,
"learning_rate": 4.709288917059118e-05,
"loss": 0.4842,
"step": 1266
},
{
"epoch": 2.83921568627451,
"grad_norm": 0.4620510935783386,
"learning_rate": 4.6926995183806644e-05,
"loss": 0.5418,
"step": 1267
},
{
"epoch": 2.8414565826330533,
"grad_norm": 0.4190068542957306,
"learning_rate": 4.676130428405834e-05,
"loss": 0.4785,
"step": 1268
},
{
"epoch": 2.8436974789915967,
"grad_norm": 0.43840011954307556,
"learning_rate": 4.6595817105372354e-05,
"loss": 0.482,
"step": 1269
},
{
"epoch": 2.84593837535014,
"grad_norm": 0.4305482506752014,
"learning_rate": 4.6430534280995376e-05,
"loss": 0.4899,
"step": 1270
},
{
"epoch": 2.8481792717086836,
"grad_norm": 0.4740334153175354,
"learning_rate": 4.626545644339202e-05,
"loss": 0.5599,
"step": 1271
},
{
"epoch": 2.850420168067227,
"grad_norm": 0.4739833474159241,
"learning_rate": 4.610058422424248e-05,
"loss": 0.5854,
"step": 1272
},
{
"epoch": 2.8526610644257704,
"grad_norm": 0.42749541997909546,
"learning_rate": 4.593591825444028e-05,
"loss": 0.458,
"step": 1273
},
{
"epoch": 2.854901960784314,
"grad_norm": 0.4204394221305847,
"learning_rate": 4.577145916408955e-05,
"loss": 0.4389,
"step": 1274
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.4521435499191284,
"learning_rate": 4.560720758250289e-05,
"loss": 0.4788,
"step": 1275
},
{
"epoch": 2.8593837535014006,
"grad_norm": 0.46246248483657837,
"learning_rate": 4.544316413819888e-05,
"loss": 0.5504,
"step": 1276
},
{
"epoch": 2.861624649859944,
"grad_norm": 0.4419306516647339,
"learning_rate": 4.5279329458899525e-05,
"loss": 0.4313,
"step": 1277
},
{
"epoch": 2.8638655462184874,
"grad_norm": 0.4536859393119812,
"learning_rate": 4.5115704171528105e-05,
"loss": 0.5285,
"step": 1278
},
{
"epoch": 2.866106442577031,
"grad_norm": 0.4236634075641632,
"learning_rate": 4.495228890220655e-05,
"loss": 0.4777,
"step": 1279
},
{
"epoch": 2.8683473389355743,
"grad_norm": 0.4374547302722931,
"learning_rate": 4.478908427625323e-05,
"loss": 0.4361,
"step": 1280
},
{
"epoch": 2.8705882352941177,
"grad_norm": 0.47177979350090027,
"learning_rate": 4.462609091818043e-05,
"loss": 0.4774,
"step": 1281
},
{
"epoch": 2.872829131652661,
"grad_norm": 0.4400593936443329,
"learning_rate": 4.446330945169197e-05,
"loss": 0.5595,
"step": 1282
},
{
"epoch": 2.8750700280112045,
"grad_norm": 0.45247647166252136,
"learning_rate": 4.430074049968097e-05,
"loss": 0.4463,
"step": 1283
},
{
"epoch": 2.877310924369748,
"grad_norm": 0.45194771885871887,
"learning_rate": 4.41383846842272e-05,
"loss": 0.5107,
"step": 1284
},
{
"epoch": 2.8795518207282913,
"grad_norm": 0.46045300364494324,
"learning_rate": 4.397624262659494e-05,
"loss": 0.5606,
"step": 1285
},
{
"epoch": 2.8817927170868347,
"grad_norm": 0.4386404752731323,
"learning_rate": 4.381431494723056e-05,
"loss": 0.5098,
"step": 1286
},
{
"epoch": 2.884033613445378,
"grad_norm": 0.4073896110057831,
"learning_rate": 4.365260226575996e-05,
"loss": 0.4859,
"step": 1287
},
{
"epoch": 2.8862745098039215,
"grad_norm": 0.43394744396209717,
"learning_rate": 4.349110520098644e-05,
"loss": 0.4872,
"step": 1288
},
{
"epoch": 2.888515406162465,
"grad_norm": 0.4351210296154022,
"learning_rate": 4.332982437088825e-05,
"loss": 0.5044,
"step": 1289
},
{
"epoch": 2.8907563025210083,
"grad_norm": 0.4198545217514038,
"learning_rate": 4.316876039261609e-05,
"loss": 0.4236,
"step": 1290
},
{
"epoch": 2.8929971988795518,
"grad_norm": 0.4112950265407562,
"learning_rate": 4.3007913882491e-05,
"loss": 0.4094,
"step": 1291
},
{
"epoch": 2.895238095238095,
"grad_norm": 0.42472249269485474,
"learning_rate": 4.284728545600174e-05,
"loss": 0.5063,
"step": 1292
},
{
"epoch": 2.8974789915966386,
"grad_norm": 0.46323516964912415,
"learning_rate": 4.268687572780262e-05,
"loss": 0.4832,
"step": 1293
},
{
"epoch": 2.899719887955182,
"grad_norm": 0.4436538517475128,
"learning_rate": 4.252668531171117e-05,
"loss": 0.4717,
"step": 1294
},
{
"epoch": 2.9019607843137254,
"grad_norm": 0.45396503806114197,
"learning_rate": 4.236671482070551e-05,
"loss": 0.5104,
"step": 1295
},
{
"epoch": 2.904201680672269,
"grad_norm": 0.3761095106601715,
"learning_rate": 4.220696486692242e-05,
"loss": 0.4395,
"step": 1296
},
{
"epoch": 2.906442577030812,
"grad_norm": 0.44329094886779785,
"learning_rate": 4.204743606165458e-05,
"loss": 0.5193,
"step": 1297
},
{
"epoch": 2.9086834733893556,
"grad_norm": 0.42952483892440796,
"learning_rate": 4.18881290153486e-05,
"loss": 0.4772,
"step": 1298
},
{
"epoch": 2.910924369747899,
"grad_norm": 0.44214001297950745,
"learning_rate": 4.172904433760245e-05,
"loss": 0.642,
"step": 1299
},
{
"epoch": 2.9131652661064424,
"grad_norm": 0.434265673160553,
"learning_rate": 4.1570182637163155e-05,
"loss": 0.5146,
"step": 1300
},
{
"epoch": 2.915406162464986,
"grad_norm": 0.44314271211624146,
"learning_rate": 4.141154452192458e-05,
"loss": 0.5096,
"step": 1301
},
{
"epoch": 2.9176470588235293,
"grad_norm": 0.434539794921875,
"learning_rate": 4.125313059892494e-05,
"loss": 0.5027,
"step": 1302
},
{
"epoch": 2.9198879551820727,
"grad_norm": 0.45350441336631775,
"learning_rate": 4.109494147434464e-05,
"loss": 0.5134,
"step": 1303
},
{
"epoch": 2.9221288515406165,
"grad_norm": 0.4504285156726837,
"learning_rate": 4.093697775350388e-05,
"loss": 0.4712,
"step": 1304
},
{
"epoch": 2.92436974789916,
"grad_norm": 0.43399813771247864,
"learning_rate": 4.077924004086025e-05,
"loss": 0.466,
"step": 1305
},
{
"epoch": 2.9266106442577033,
"grad_norm": 0.45415496826171875,
"learning_rate": 4.0621728940006646e-05,
"loss": 0.4261,
"step": 1306
},
{
"epoch": 2.9288515406162468,
"grad_norm": 0.42973750829696655,
"learning_rate": 4.0464445053668666e-05,
"loss": 0.454,
"step": 1307
},
{
"epoch": 2.93109243697479,
"grad_norm": 0.4402119219303131,
"learning_rate": 4.0307388983702554e-05,
"loss": 0.5414,
"step": 1308
},
{
"epoch": 2.9333333333333336,
"grad_norm": 0.4354257583618164,
"learning_rate": 4.015056133109284e-05,
"loss": 0.487,
"step": 1309
},
{
"epoch": 2.935574229691877,
"grad_norm": 0.4353284239768982,
"learning_rate": 3.999396269594986e-05,
"loss": 0.4894,
"step": 1310
},
{
"epoch": 2.9378151260504204,
"grad_norm": 0.4558074474334717,
"learning_rate": 3.9837593677507726e-05,
"loss": 0.509,
"step": 1311
},
{
"epoch": 2.940056022408964,
"grad_norm": 0.4599749743938446,
"learning_rate": 3.9681454874121905e-05,
"loss": 0.5366,
"step": 1312
},
{
"epoch": 2.942296918767507,
"grad_norm": 0.4445565342903137,
"learning_rate": 3.9525546883266806e-05,
"loss": 0.4795,
"step": 1313
},
{
"epoch": 2.9445378151260506,
"grad_norm": 0.4135204255580902,
"learning_rate": 3.9369870301533784e-05,
"loss": 0.474,
"step": 1314
},
{
"epoch": 2.946778711484594,
"grad_norm": 0.4500633776187897,
"learning_rate": 3.921442572462856e-05,
"loss": 0.5392,
"step": 1315
},
{
"epoch": 2.9490196078431374,
"grad_norm": 0.4244655668735504,
"learning_rate": 3.905921374736918e-05,
"loss": 0.4312,
"step": 1316
},
{
"epoch": 2.951260504201681,
"grad_norm": 0.4241161048412323,
"learning_rate": 3.890423496368357e-05,
"loss": 0.4528,
"step": 1317
},
{
"epoch": 2.9535014005602243,
"grad_norm": 0.4363906681537628,
"learning_rate": 3.8749489966607296e-05,
"loss": 0.5197,
"step": 1318
},
{
"epoch": 2.9557422969187677,
"grad_norm": 0.4631204903125763,
"learning_rate": 3.85949793482814e-05,
"loss": 0.5183,
"step": 1319
},
{
"epoch": 2.957983193277311,
"grad_norm": 0.4488946795463562,
"learning_rate": 3.8440703699950075e-05,
"loss": 0.575,
"step": 1320
},
{
"epoch": 2.9602240896358545,
"grad_norm": 0.451129674911499,
"learning_rate": 3.828666361195827e-05,
"loss": 0.4806,
"step": 1321
},
{
"epoch": 2.962464985994398,
"grad_norm": 0.428025484085083,
"learning_rate": 3.813285967374969e-05,
"loss": 0.4556,
"step": 1322
},
{
"epoch": 2.9647058823529413,
"grad_norm": 0.4540148675441742,
"learning_rate": 3.7979292473864257e-05,
"loss": 0.4273,
"step": 1323
},
{
"epoch": 2.9669467787114847,
"grad_norm": 0.45857563614845276,
"learning_rate": 3.782596259993611e-05,
"loss": 0.5061,
"step": 1324
},
{
"epoch": 2.969187675070028,
"grad_norm": 0.4698602557182312,
"learning_rate": 3.767287063869125e-05,
"loss": 0.5311,
"step": 1325
},
{
"epoch": 2.9714285714285715,
"grad_norm": 0.44710850715637207,
"learning_rate": 3.752001717594517e-05,
"loss": 0.5881,
"step": 1326
},
{
"epoch": 2.973669467787115,
"grad_norm": 0.5294246673583984,
"learning_rate": 3.736740279660087e-05,
"loss": 0.5108,
"step": 1327
},
{
"epoch": 2.9759103641456583,
"grad_norm": 0.46404391527175903,
"learning_rate": 3.7215028084646386e-05,
"loss": 0.5784,
"step": 1328
},
{
"epoch": 2.9781512605042018,
"grad_norm": 0.47567376494407654,
"learning_rate": 3.706289362315271e-05,
"loss": 0.5864,
"step": 1329
},
{
"epoch": 2.980392156862745,
"grad_norm": 0.4270058274269104,
"learning_rate": 3.691099999427152e-05,
"loss": 0.4804,
"step": 1330
},
{
"epoch": 2.9826330532212886,
"grad_norm": 0.47204577922821045,
"learning_rate": 3.675934777923283e-05,
"loss": 0.578,
"step": 1331
},
{
"epoch": 2.984873949579832,
"grad_norm": 0.4929749071598053,
"learning_rate": 3.660793755834298e-05,
"loss": 0.5297,
"step": 1332
},
{
"epoch": 2.9871148459383754,
"grad_norm": 0.45627254247665405,
"learning_rate": 3.645676991098227e-05,
"loss": 0.4705,
"step": 1333
},
{
"epoch": 2.989355742296919,
"grad_norm": 0.48187679052352905,
"learning_rate": 3.630584541560272e-05,
"loss": 0.4823,
"step": 1334
},
{
"epoch": 2.991596638655462,
"grad_norm": 0.42817258834838867,
"learning_rate": 3.615516464972604e-05,
"loss": 0.3985,
"step": 1335
},
{
"epoch": 2.9938375350140056,
"grad_norm": 0.4278976023197174,
"learning_rate": 3.600472818994114e-05,
"loss": 0.4891,
"step": 1336
},
{
"epoch": 2.996078431372549,
"grad_norm": 0.42993059754371643,
"learning_rate": 3.585453661190218e-05,
"loss": 0.4632,
"step": 1337
},
{
"epoch": 2.9983193277310924,
"grad_norm": 0.4411959648132324,
"learning_rate": 3.5704590490326295e-05,
"loss": 0.4865,
"step": 1338
}
],
"logging_steps": 1,
"max_steps": 1784,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 446,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.931866348158976e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}