watsonchua's picture
Upload folder using huggingface_hub
046fdc8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9988795518207283,
"eval_steps": 500,
"global_step": 892,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002240896358543417,
"grad_norm": 0.22982779145240784,
"learning_rate": 1.1235955056179775e-06,
"loss": 1.1973,
"step": 1
},
{
"epoch": 0.004481792717086834,
"grad_norm": 0.2098255604505539,
"learning_rate": 2.247191011235955e-06,
"loss": 1.2685,
"step": 2
},
{
"epoch": 0.0067226890756302525,
"grad_norm": 0.2404344230890274,
"learning_rate": 3.3707865168539327e-06,
"loss": 1.2292,
"step": 3
},
{
"epoch": 0.008963585434173669,
"grad_norm": 0.23824891448020935,
"learning_rate": 4.49438202247191e-06,
"loss": 1.1901,
"step": 4
},
{
"epoch": 0.011204481792717087,
"grad_norm": 0.22885526716709137,
"learning_rate": 5.617977528089888e-06,
"loss": 1.2695,
"step": 5
},
{
"epoch": 0.013445378151260505,
"grad_norm": 0.2576353847980499,
"learning_rate": 6.741573033707865e-06,
"loss": 1.2872,
"step": 6
},
{
"epoch": 0.01568627450980392,
"grad_norm": 0.2190636247396469,
"learning_rate": 7.865168539325843e-06,
"loss": 1.1952,
"step": 7
},
{
"epoch": 0.017927170868347338,
"grad_norm": 0.21357473731040955,
"learning_rate": 8.98876404494382e-06,
"loss": 1.2597,
"step": 8
},
{
"epoch": 0.020168067226890758,
"grad_norm": 0.24502670764923096,
"learning_rate": 1.0112359550561798e-05,
"loss": 1.279,
"step": 9
},
{
"epoch": 0.022408963585434174,
"grad_norm": 0.25532686710357666,
"learning_rate": 1.1235955056179776e-05,
"loss": 1.2163,
"step": 10
},
{
"epoch": 0.02464985994397759,
"grad_norm": 0.250683456659317,
"learning_rate": 1.2359550561797752e-05,
"loss": 1.2735,
"step": 11
},
{
"epoch": 0.02689075630252101,
"grad_norm": 0.2523595988750458,
"learning_rate": 1.348314606741573e-05,
"loss": 1.3471,
"step": 12
},
{
"epoch": 0.029131652661064426,
"grad_norm": 0.23875640332698822,
"learning_rate": 1.4606741573033709e-05,
"loss": 1.2595,
"step": 13
},
{
"epoch": 0.03137254901960784,
"grad_norm": 0.24998927116394043,
"learning_rate": 1.5730337078651687e-05,
"loss": 1.2142,
"step": 14
},
{
"epoch": 0.03361344537815126,
"grad_norm": 0.2783028483390808,
"learning_rate": 1.6853932584269665e-05,
"loss": 1.185,
"step": 15
},
{
"epoch": 0.035854341736694675,
"grad_norm": 0.2796604633331299,
"learning_rate": 1.797752808988764e-05,
"loss": 1.226,
"step": 16
},
{
"epoch": 0.0380952380952381,
"grad_norm": 0.2899184823036194,
"learning_rate": 1.9101123595505618e-05,
"loss": 1.1672,
"step": 17
},
{
"epoch": 0.040336134453781515,
"grad_norm": 0.2601150572299957,
"learning_rate": 2.0224719101123596e-05,
"loss": 1.1127,
"step": 18
},
{
"epoch": 0.04257703081232493,
"grad_norm": 0.3052348792552948,
"learning_rate": 2.1348314606741574e-05,
"loss": 1.2865,
"step": 19
},
{
"epoch": 0.04481792717086835,
"grad_norm": 0.24783451855182648,
"learning_rate": 2.2471910112359552e-05,
"loss": 1.0888,
"step": 20
},
{
"epoch": 0.047058823529411764,
"grad_norm": 0.24703693389892578,
"learning_rate": 2.359550561797753e-05,
"loss": 1.059,
"step": 21
},
{
"epoch": 0.04929971988795518,
"grad_norm": 0.2503679096698761,
"learning_rate": 2.4719101123595505e-05,
"loss": 1.1602,
"step": 22
},
{
"epoch": 0.0515406162464986,
"grad_norm": 0.2639842927455902,
"learning_rate": 2.5842696629213486e-05,
"loss": 1.0911,
"step": 23
},
{
"epoch": 0.05378151260504202,
"grad_norm": 0.2942507266998291,
"learning_rate": 2.696629213483146e-05,
"loss": 1.0777,
"step": 24
},
{
"epoch": 0.056022408963585436,
"grad_norm": 0.28088828921318054,
"learning_rate": 2.8089887640449443e-05,
"loss": 1.1111,
"step": 25
},
{
"epoch": 0.05826330532212885,
"grad_norm": 0.26791295409202576,
"learning_rate": 2.9213483146067417e-05,
"loss": 1.0016,
"step": 26
},
{
"epoch": 0.06050420168067227,
"grad_norm": 0.2685791850090027,
"learning_rate": 3.0337078651685396e-05,
"loss": 1.1085,
"step": 27
},
{
"epoch": 0.06274509803921569,
"grad_norm": 0.2627420127391815,
"learning_rate": 3.1460674157303374e-05,
"loss": 1.0704,
"step": 28
},
{
"epoch": 0.06498599439775911,
"grad_norm": 0.3000424802303314,
"learning_rate": 3.258426966292135e-05,
"loss": 1.0786,
"step": 29
},
{
"epoch": 0.06722689075630252,
"grad_norm": 0.3018706738948822,
"learning_rate": 3.370786516853933e-05,
"loss": 1.0423,
"step": 30
},
{
"epoch": 0.06946778711484594,
"grad_norm": 0.27565667033195496,
"learning_rate": 3.483146067415731e-05,
"loss": 0.9834,
"step": 31
},
{
"epoch": 0.07170868347338935,
"grad_norm": 0.30244842171669006,
"learning_rate": 3.595505617977528e-05,
"loss": 1.0927,
"step": 32
},
{
"epoch": 0.07394957983193277,
"grad_norm": 0.3654678165912628,
"learning_rate": 3.7078651685393264e-05,
"loss": 0.9812,
"step": 33
},
{
"epoch": 0.0761904761904762,
"grad_norm": 0.36483272910118103,
"learning_rate": 3.8202247191011236e-05,
"loss": 0.9693,
"step": 34
},
{
"epoch": 0.0784313725490196,
"grad_norm": 0.2949022054672241,
"learning_rate": 3.9325842696629214e-05,
"loss": 0.9647,
"step": 35
},
{
"epoch": 0.08067226890756303,
"grad_norm": 0.36239683628082275,
"learning_rate": 4.044943820224719e-05,
"loss": 0.9725,
"step": 36
},
{
"epoch": 0.08291316526610644,
"grad_norm": 0.32511067390441895,
"learning_rate": 4.157303370786517e-05,
"loss": 1.0136,
"step": 37
},
{
"epoch": 0.08515406162464986,
"grad_norm": 0.32111090421676636,
"learning_rate": 4.269662921348315e-05,
"loss": 0.9207,
"step": 38
},
{
"epoch": 0.08739495798319327,
"grad_norm": 0.3080519735813141,
"learning_rate": 4.3820224719101126e-05,
"loss": 0.9729,
"step": 39
},
{
"epoch": 0.0896358543417367,
"grad_norm": 0.2933235764503479,
"learning_rate": 4.4943820224719104e-05,
"loss": 0.9403,
"step": 40
},
{
"epoch": 0.09187675070028012,
"grad_norm": 0.3132348358631134,
"learning_rate": 4.606741573033708e-05,
"loss": 0.9587,
"step": 41
},
{
"epoch": 0.09411764705882353,
"grad_norm": 0.3013119101524353,
"learning_rate": 4.719101123595506e-05,
"loss": 0.9162,
"step": 42
},
{
"epoch": 0.09635854341736695,
"grad_norm": 0.300485223531723,
"learning_rate": 4.831460674157304e-05,
"loss": 0.8984,
"step": 43
},
{
"epoch": 0.09859943977591036,
"grad_norm": 0.2726304233074188,
"learning_rate": 4.943820224719101e-05,
"loss": 0.987,
"step": 44
},
{
"epoch": 0.10084033613445378,
"grad_norm": 0.2733825445175171,
"learning_rate": 5.0561797752808995e-05,
"loss": 0.9438,
"step": 45
},
{
"epoch": 0.1030812324929972,
"grad_norm": 0.28922319412231445,
"learning_rate": 5.168539325842697e-05,
"loss": 0.8674,
"step": 46
},
{
"epoch": 0.10532212885154062,
"grad_norm": 0.2743085026741028,
"learning_rate": 5.2808988764044944e-05,
"loss": 0.9041,
"step": 47
},
{
"epoch": 0.10756302521008404,
"grad_norm": 0.28649550676345825,
"learning_rate": 5.393258426966292e-05,
"loss": 0.9057,
"step": 48
},
{
"epoch": 0.10980392156862745,
"grad_norm": 0.2877427339553833,
"learning_rate": 5.50561797752809e-05,
"loss": 0.9139,
"step": 49
},
{
"epoch": 0.11204481792717087,
"grad_norm": 0.27738648653030396,
"learning_rate": 5.6179775280898885e-05,
"loss": 0.8518,
"step": 50
},
{
"epoch": 0.11428571428571428,
"grad_norm": 0.2839404046535492,
"learning_rate": 5.730337078651685e-05,
"loss": 0.8634,
"step": 51
},
{
"epoch": 0.1165266106442577,
"grad_norm": 0.2748688757419586,
"learning_rate": 5.8426966292134835e-05,
"loss": 0.9504,
"step": 52
},
{
"epoch": 0.11876750700280111,
"grad_norm": 0.2953556776046753,
"learning_rate": 5.955056179775281e-05,
"loss": 0.861,
"step": 53
},
{
"epoch": 0.12100840336134454,
"grad_norm": 0.2947392165660858,
"learning_rate": 6.067415730337079e-05,
"loss": 0.8578,
"step": 54
},
{
"epoch": 0.12324929971988796,
"grad_norm": 0.3029733896255493,
"learning_rate": 6.179775280898876e-05,
"loss": 0.895,
"step": 55
},
{
"epoch": 0.12549019607843137,
"grad_norm": 0.28483161330223083,
"learning_rate": 6.292134831460675e-05,
"loss": 0.9316,
"step": 56
},
{
"epoch": 0.12773109243697478,
"grad_norm": 0.280499666929245,
"learning_rate": 6.404494382022472e-05,
"loss": 0.8763,
"step": 57
},
{
"epoch": 0.12997198879551822,
"grad_norm": 0.2687634825706482,
"learning_rate": 6.51685393258427e-05,
"loss": 0.9216,
"step": 58
},
{
"epoch": 0.13221288515406163,
"grad_norm": 0.2869422435760498,
"learning_rate": 6.629213483146067e-05,
"loss": 0.8783,
"step": 59
},
{
"epoch": 0.13445378151260504,
"grad_norm": 0.29140859842300415,
"learning_rate": 6.741573033707866e-05,
"loss": 0.9613,
"step": 60
},
{
"epoch": 0.13669467787114845,
"grad_norm": 0.29342207312583923,
"learning_rate": 6.853932584269663e-05,
"loss": 0.982,
"step": 61
},
{
"epoch": 0.13893557422969188,
"grad_norm": 0.29143285751342773,
"learning_rate": 6.966292134831462e-05,
"loss": 0.8857,
"step": 62
},
{
"epoch": 0.1411764705882353,
"grad_norm": 0.2944166660308838,
"learning_rate": 7.078651685393259e-05,
"loss": 0.8668,
"step": 63
},
{
"epoch": 0.1434173669467787,
"grad_norm": 0.27430447936058044,
"learning_rate": 7.191011235955056e-05,
"loss": 0.9403,
"step": 64
},
{
"epoch": 0.14565826330532214,
"grad_norm": 0.274800568819046,
"learning_rate": 7.303370786516854e-05,
"loss": 0.8689,
"step": 65
},
{
"epoch": 0.14789915966386555,
"grad_norm": 0.3063383102416992,
"learning_rate": 7.415730337078653e-05,
"loss": 0.9217,
"step": 66
},
{
"epoch": 0.15014005602240896,
"grad_norm": 0.26958584785461426,
"learning_rate": 7.52808988764045e-05,
"loss": 0.949,
"step": 67
},
{
"epoch": 0.1523809523809524,
"grad_norm": 0.3030094504356384,
"learning_rate": 7.640449438202247e-05,
"loss": 0.8814,
"step": 68
},
{
"epoch": 0.1546218487394958,
"grad_norm": 0.26790764927864075,
"learning_rate": 7.752808988764046e-05,
"loss": 0.9814,
"step": 69
},
{
"epoch": 0.1568627450980392,
"grad_norm": 0.28143975138664246,
"learning_rate": 7.865168539325843e-05,
"loss": 0.8667,
"step": 70
},
{
"epoch": 0.15910364145658262,
"grad_norm": 0.32539746165275574,
"learning_rate": 7.97752808988764e-05,
"loss": 1.0709,
"step": 71
},
{
"epoch": 0.16134453781512606,
"grad_norm": 0.28114452958106995,
"learning_rate": 8.089887640449438e-05,
"loss": 0.9253,
"step": 72
},
{
"epoch": 0.16358543417366947,
"grad_norm": 0.2680191993713379,
"learning_rate": 8.202247191011237e-05,
"loss": 0.9345,
"step": 73
},
{
"epoch": 0.16582633053221288,
"grad_norm": 0.2928783893585205,
"learning_rate": 8.314606741573034e-05,
"loss": 0.906,
"step": 74
},
{
"epoch": 0.16806722689075632,
"grad_norm": 0.2979438900947571,
"learning_rate": 8.426966292134831e-05,
"loss": 0.9581,
"step": 75
},
{
"epoch": 0.17030812324929973,
"grad_norm": 0.27883946895599365,
"learning_rate": 8.53932584269663e-05,
"loss": 0.9203,
"step": 76
},
{
"epoch": 0.17254901960784313,
"grad_norm": 0.3945503234863281,
"learning_rate": 8.651685393258427e-05,
"loss": 0.8136,
"step": 77
},
{
"epoch": 0.17478991596638654,
"grad_norm": 0.27328184247016907,
"learning_rate": 8.764044943820225e-05,
"loss": 0.8966,
"step": 78
},
{
"epoch": 0.17703081232492998,
"grad_norm": 0.3049486577510834,
"learning_rate": 8.876404494382022e-05,
"loss": 0.9658,
"step": 79
},
{
"epoch": 0.1792717086834734,
"grad_norm": 0.3143669366836548,
"learning_rate": 8.988764044943821e-05,
"loss": 0.9867,
"step": 80
},
{
"epoch": 0.1815126050420168,
"grad_norm": 0.30048221349716187,
"learning_rate": 9.101123595505618e-05,
"loss": 0.9249,
"step": 81
},
{
"epoch": 0.18375350140056024,
"grad_norm": 0.28177568316459656,
"learning_rate": 9.213483146067416e-05,
"loss": 0.9899,
"step": 82
},
{
"epoch": 0.18599439775910365,
"grad_norm": 0.296478807926178,
"learning_rate": 9.325842696629214e-05,
"loss": 0.8476,
"step": 83
},
{
"epoch": 0.18823529411764706,
"grad_norm": 0.2855791449546814,
"learning_rate": 9.438202247191012e-05,
"loss": 0.8869,
"step": 84
},
{
"epoch": 0.19047619047619047,
"grad_norm": 0.2829147279262543,
"learning_rate": 9.550561797752809e-05,
"loss": 0.9274,
"step": 85
},
{
"epoch": 0.1927170868347339,
"grad_norm": 0.2549046277999878,
"learning_rate": 9.662921348314608e-05,
"loss": 0.8535,
"step": 86
},
{
"epoch": 0.1949579831932773,
"grad_norm": 0.2655014991760254,
"learning_rate": 9.775280898876405e-05,
"loss": 0.8905,
"step": 87
},
{
"epoch": 0.19719887955182072,
"grad_norm": 0.27607807517051697,
"learning_rate": 9.887640449438202e-05,
"loss": 0.8788,
"step": 88
},
{
"epoch": 0.19943977591036416,
"grad_norm": 0.24970707297325134,
"learning_rate": 0.0001,
"loss": 0.7632,
"step": 89
},
{
"epoch": 0.20168067226890757,
"grad_norm": 0.2821432650089264,
"learning_rate": 0.00010112359550561799,
"loss": 0.9314,
"step": 90
},
{
"epoch": 0.20392156862745098,
"grad_norm": 0.28368303179740906,
"learning_rate": 0.00010224719101123596,
"loss": 0.8424,
"step": 91
},
{
"epoch": 0.2061624649859944,
"grad_norm": 0.3087138235569,
"learning_rate": 0.00010337078651685395,
"loss": 0.9787,
"step": 92
},
{
"epoch": 0.20840336134453782,
"grad_norm": 0.2952103614807129,
"learning_rate": 0.00010449438202247193,
"loss": 0.9679,
"step": 93
},
{
"epoch": 0.21064425770308123,
"grad_norm": 0.2875281572341919,
"learning_rate": 0.00010561797752808989,
"loss": 0.9181,
"step": 94
},
{
"epoch": 0.21288515406162464,
"grad_norm": 0.3084465265274048,
"learning_rate": 0.00010674157303370786,
"loss": 0.9615,
"step": 95
},
{
"epoch": 0.21512605042016808,
"grad_norm": 0.28005871176719666,
"learning_rate": 0.00010786516853932584,
"loss": 0.8757,
"step": 96
},
{
"epoch": 0.2173669467787115,
"grad_norm": 0.2795560657978058,
"learning_rate": 0.00010898876404494383,
"loss": 0.8266,
"step": 97
},
{
"epoch": 0.2196078431372549,
"grad_norm": 0.2671497166156769,
"learning_rate": 0.0001101123595505618,
"loss": 0.8693,
"step": 98
},
{
"epoch": 0.2218487394957983,
"grad_norm": 0.24397605657577515,
"learning_rate": 0.00011123595505617979,
"loss": 0.8334,
"step": 99
},
{
"epoch": 0.22408963585434175,
"grad_norm": 0.26632094383239746,
"learning_rate": 0.00011235955056179777,
"loss": 0.966,
"step": 100
},
{
"epoch": 0.22633053221288515,
"grad_norm": 0.2785671055316925,
"learning_rate": 0.00011348314606741574,
"loss": 0.8823,
"step": 101
},
{
"epoch": 0.22857142857142856,
"grad_norm": 0.26920077204704285,
"learning_rate": 0.0001146067415730337,
"loss": 0.9726,
"step": 102
},
{
"epoch": 0.230812324929972,
"grad_norm": 0.2633483409881592,
"learning_rate": 0.00011573033707865168,
"loss": 0.8849,
"step": 103
},
{
"epoch": 0.2330532212885154,
"grad_norm": 0.26563239097595215,
"learning_rate": 0.00011685393258426967,
"loss": 0.7896,
"step": 104
},
{
"epoch": 0.23529411764705882,
"grad_norm": 0.27475470304489136,
"learning_rate": 0.00011797752808988764,
"loss": 0.9659,
"step": 105
},
{
"epoch": 0.23753501400560223,
"grad_norm": 0.2691081166267395,
"learning_rate": 0.00011910112359550563,
"loss": 0.9758,
"step": 106
},
{
"epoch": 0.23977591036414567,
"grad_norm": 0.2714226543903351,
"learning_rate": 0.00012022471910112361,
"loss": 0.8736,
"step": 107
},
{
"epoch": 0.24201680672268908,
"grad_norm": 0.2638857960700989,
"learning_rate": 0.00012134831460674158,
"loss": 0.8792,
"step": 108
},
{
"epoch": 0.24425770308123249,
"grad_norm": 0.25696009397506714,
"learning_rate": 0.00012247191011235955,
"loss": 0.8813,
"step": 109
},
{
"epoch": 0.24649859943977592,
"grad_norm": 0.27648770809173584,
"learning_rate": 0.00012359550561797752,
"loss": 0.9341,
"step": 110
},
{
"epoch": 0.24873949579831933,
"grad_norm": 0.27543121576309204,
"learning_rate": 0.0001247191011235955,
"loss": 0.8102,
"step": 111
},
{
"epoch": 0.25098039215686274,
"grad_norm": 0.2804222106933594,
"learning_rate": 0.0001258426966292135,
"loss": 0.9004,
"step": 112
},
{
"epoch": 0.25322128851540615,
"grad_norm": 0.27446436882019043,
"learning_rate": 0.00012696629213483147,
"loss": 0.8432,
"step": 113
},
{
"epoch": 0.25546218487394956,
"grad_norm": 0.27675163745880127,
"learning_rate": 0.00012808988764044944,
"loss": 0.8868,
"step": 114
},
{
"epoch": 0.25770308123249297,
"grad_norm": 0.24772736430168152,
"learning_rate": 0.00012921348314606744,
"loss": 0.8711,
"step": 115
},
{
"epoch": 0.25994397759103643,
"grad_norm": 0.2736036479473114,
"learning_rate": 0.0001303370786516854,
"loss": 0.922,
"step": 116
},
{
"epoch": 0.26218487394957984,
"grad_norm": 0.23815171420574188,
"learning_rate": 0.00013146067415730338,
"loss": 0.8469,
"step": 117
},
{
"epoch": 0.26442577030812325,
"grad_norm": 0.2564987540245056,
"learning_rate": 0.00013258426966292135,
"loss": 0.7626,
"step": 118
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.26105883717536926,
"learning_rate": 0.00013370786516853932,
"loss": 0.7911,
"step": 119
},
{
"epoch": 0.2689075630252101,
"grad_norm": 0.2915550768375397,
"learning_rate": 0.00013483146067415732,
"loss": 0.9643,
"step": 120
},
{
"epoch": 0.2711484593837535,
"grad_norm": 0.28031301498413086,
"learning_rate": 0.0001359550561797753,
"loss": 0.8797,
"step": 121
},
{
"epoch": 0.2733893557422969,
"grad_norm": 0.2468908429145813,
"learning_rate": 0.00013707865168539326,
"loss": 0.8497,
"step": 122
},
{
"epoch": 0.27563025210084036,
"grad_norm": 0.2498752921819687,
"learning_rate": 0.00013820224719101123,
"loss": 0.842,
"step": 123
},
{
"epoch": 0.27787114845938377,
"grad_norm": 0.2517074942588806,
"learning_rate": 0.00013932584269662923,
"loss": 0.8072,
"step": 124
},
{
"epoch": 0.2801120448179272,
"grad_norm": 0.24205273389816284,
"learning_rate": 0.0001404494382022472,
"loss": 0.796,
"step": 125
},
{
"epoch": 0.2823529411764706,
"grad_norm": 0.259343683719635,
"learning_rate": 0.00014157303370786517,
"loss": 0.8609,
"step": 126
},
{
"epoch": 0.284593837535014,
"grad_norm": 0.2564795911312103,
"learning_rate": 0.00014269662921348315,
"loss": 0.8976,
"step": 127
},
{
"epoch": 0.2868347338935574,
"grad_norm": 0.2650969922542572,
"learning_rate": 0.00014382022471910112,
"loss": 0.8239,
"step": 128
},
{
"epoch": 0.28907563025210087,
"grad_norm": 0.26443368196487427,
"learning_rate": 0.00014494382022471912,
"loss": 0.8479,
"step": 129
},
{
"epoch": 0.2913165266106443,
"grad_norm": 0.26897069811820984,
"learning_rate": 0.0001460674157303371,
"loss": 0.9104,
"step": 130
},
{
"epoch": 0.2935574229691877,
"grad_norm": 0.2578631341457367,
"learning_rate": 0.00014719101123595506,
"loss": 0.8151,
"step": 131
},
{
"epoch": 0.2957983193277311,
"grad_norm": 0.2454749345779419,
"learning_rate": 0.00014831460674157306,
"loss": 0.7915,
"step": 132
},
{
"epoch": 0.2980392156862745,
"grad_norm": 0.25589731335639954,
"learning_rate": 0.00014943820224719103,
"loss": 0.8952,
"step": 133
},
{
"epoch": 0.3002801120448179,
"grad_norm": 0.2591662108898163,
"learning_rate": 0.000150561797752809,
"loss": 0.8112,
"step": 134
},
{
"epoch": 0.3025210084033613,
"grad_norm": 0.26816102862358093,
"learning_rate": 0.00015168539325842697,
"loss": 0.8893,
"step": 135
},
{
"epoch": 0.3047619047619048,
"grad_norm": 0.24405767023563385,
"learning_rate": 0.00015280898876404494,
"loss": 0.877,
"step": 136
},
{
"epoch": 0.3070028011204482,
"grad_norm": 0.2588540315628052,
"learning_rate": 0.00015393258426966294,
"loss": 0.7779,
"step": 137
},
{
"epoch": 0.3092436974789916,
"grad_norm": 0.27598896622657776,
"learning_rate": 0.0001550561797752809,
"loss": 0.7752,
"step": 138
},
{
"epoch": 0.311484593837535,
"grad_norm": 0.24622836709022522,
"learning_rate": 0.00015617977528089888,
"loss": 0.8849,
"step": 139
},
{
"epoch": 0.3137254901960784,
"grad_norm": 0.2502545118331909,
"learning_rate": 0.00015730337078651685,
"loss": 0.8012,
"step": 140
},
{
"epoch": 0.31596638655462184,
"grad_norm": 0.25384724140167236,
"learning_rate": 0.00015842696629213485,
"loss": 0.8395,
"step": 141
},
{
"epoch": 0.31820728291316525,
"grad_norm": 0.2527698874473572,
"learning_rate": 0.0001595505617977528,
"loss": 0.831,
"step": 142
},
{
"epoch": 0.3204481792717087,
"grad_norm": 0.24567513167858124,
"learning_rate": 0.0001606741573033708,
"loss": 0.7959,
"step": 143
},
{
"epoch": 0.3226890756302521,
"grad_norm": 0.24283619225025177,
"learning_rate": 0.00016179775280898877,
"loss": 0.8146,
"step": 144
},
{
"epoch": 0.32492997198879553,
"grad_norm": 0.2708129286766052,
"learning_rate": 0.00016292134831460674,
"loss": 0.905,
"step": 145
},
{
"epoch": 0.32717086834733894,
"grad_norm": 0.27091729640960693,
"learning_rate": 0.00016404494382022474,
"loss": 0.8681,
"step": 146
},
{
"epoch": 0.32941176470588235,
"grad_norm": 0.2502163350582123,
"learning_rate": 0.0001651685393258427,
"loss": 0.8859,
"step": 147
},
{
"epoch": 0.33165266106442576,
"grad_norm": 0.23066553473472595,
"learning_rate": 0.00016629213483146068,
"loss": 0.9034,
"step": 148
},
{
"epoch": 0.33389355742296917,
"grad_norm": 0.2503213882446289,
"learning_rate": 0.00016741573033707868,
"loss": 0.8348,
"step": 149
},
{
"epoch": 0.33613445378151263,
"grad_norm": 0.25126519799232483,
"learning_rate": 0.00016853932584269662,
"loss": 0.8553,
"step": 150
},
{
"epoch": 0.33837535014005604,
"grad_norm": 0.241397425532341,
"learning_rate": 0.00016966292134831462,
"loss": 0.9132,
"step": 151
},
{
"epoch": 0.34061624649859945,
"grad_norm": 0.25682827830314636,
"learning_rate": 0.0001707865168539326,
"loss": 0.9481,
"step": 152
},
{
"epoch": 0.34285714285714286,
"grad_norm": 0.24024637043476105,
"learning_rate": 0.00017191011235955056,
"loss": 0.943,
"step": 153
},
{
"epoch": 0.34509803921568627,
"grad_norm": 0.2626055181026459,
"learning_rate": 0.00017303370786516853,
"loss": 0.7863,
"step": 154
},
{
"epoch": 0.3473389355742297,
"grad_norm": 0.24571183323860168,
"learning_rate": 0.00017415730337078653,
"loss": 0.926,
"step": 155
},
{
"epoch": 0.3495798319327731,
"grad_norm": 0.2499912977218628,
"learning_rate": 0.0001752808988764045,
"loss": 0.8232,
"step": 156
},
{
"epoch": 0.35182072829131655,
"grad_norm": 0.25473934412002563,
"learning_rate": 0.00017640449438202248,
"loss": 0.9021,
"step": 157
},
{
"epoch": 0.35406162464985996,
"grad_norm": 0.25837019085884094,
"learning_rate": 0.00017752808988764045,
"loss": 0.7753,
"step": 158
},
{
"epoch": 0.3563025210084034,
"grad_norm": 0.255958616733551,
"learning_rate": 0.00017865168539325842,
"loss": 0.9155,
"step": 159
},
{
"epoch": 0.3585434173669468,
"grad_norm": 0.24315786361694336,
"learning_rate": 0.00017977528089887642,
"loss": 0.8039,
"step": 160
},
{
"epoch": 0.3607843137254902,
"grad_norm": 0.24614644050598145,
"learning_rate": 0.0001808988764044944,
"loss": 0.8857,
"step": 161
},
{
"epoch": 0.3630252100840336,
"grad_norm": 0.24280671775341034,
"learning_rate": 0.00018202247191011236,
"loss": 0.8354,
"step": 162
},
{
"epoch": 0.365266106442577,
"grad_norm": 0.2597411572933197,
"learning_rate": 0.00018314606741573036,
"loss": 0.8852,
"step": 163
},
{
"epoch": 0.3675070028011205,
"grad_norm": 0.2597702145576477,
"learning_rate": 0.00018426966292134833,
"loss": 0.9029,
"step": 164
},
{
"epoch": 0.3697478991596639,
"grad_norm": 0.23551709949970245,
"learning_rate": 0.0001853932584269663,
"loss": 0.9118,
"step": 165
},
{
"epoch": 0.3719887955182073,
"grad_norm": 0.2516990303993225,
"learning_rate": 0.00018651685393258427,
"loss": 0.7997,
"step": 166
},
{
"epoch": 0.3742296918767507,
"grad_norm": 0.2297232747077942,
"learning_rate": 0.00018764044943820224,
"loss": 0.7823,
"step": 167
},
{
"epoch": 0.3764705882352941,
"grad_norm": 0.22287945449352264,
"learning_rate": 0.00018876404494382024,
"loss": 0.8609,
"step": 168
},
{
"epoch": 0.3787114845938375,
"grad_norm": 0.2428523749113083,
"learning_rate": 0.0001898876404494382,
"loss": 0.8842,
"step": 169
},
{
"epoch": 0.38095238095238093,
"grad_norm": 0.25949063897132874,
"learning_rate": 0.00019101123595505618,
"loss": 0.7737,
"step": 170
},
{
"epoch": 0.3831932773109244,
"grad_norm": 0.2532324492931366,
"learning_rate": 0.00019213483146067416,
"loss": 0.7548,
"step": 171
},
{
"epoch": 0.3854341736694678,
"grad_norm": 0.2657643258571625,
"learning_rate": 0.00019325842696629215,
"loss": 0.8375,
"step": 172
},
{
"epoch": 0.3876750700280112,
"grad_norm": 0.2578238546848297,
"learning_rate": 0.00019438202247191013,
"loss": 0.8591,
"step": 173
},
{
"epoch": 0.3899159663865546,
"grad_norm": 0.24303969740867615,
"learning_rate": 0.0001955056179775281,
"loss": 0.8131,
"step": 174
},
{
"epoch": 0.39215686274509803,
"grad_norm": 0.271139919757843,
"learning_rate": 0.00019662921348314607,
"loss": 0.8234,
"step": 175
},
{
"epoch": 0.39439775910364144,
"grad_norm": 0.2569217085838318,
"learning_rate": 0.00019775280898876404,
"loss": 0.8907,
"step": 176
},
{
"epoch": 0.39663865546218485,
"grad_norm": 0.23687879741191864,
"learning_rate": 0.00019887640449438204,
"loss": 0.8082,
"step": 177
},
{
"epoch": 0.3988795518207283,
"grad_norm": 0.24828100204467773,
"learning_rate": 0.0002,
"loss": 0.8575,
"step": 178
},
{
"epoch": 0.4011204481792717,
"grad_norm": 0.24300478398799896,
"learning_rate": 0.00019999980867200105,
"loss": 0.7963,
"step": 179
},
{
"epoch": 0.40336134453781514,
"grad_norm": 0.2662373483181,
"learning_rate": 0.00019999923468873635,
"loss": 0.8871,
"step": 180
},
{
"epoch": 0.40560224089635855,
"grad_norm": 0.2600310146808624,
"learning_rate": 0.00019999827805240226,
"loss": 0.8477,
"step": 181
},
{
"epoch": 0.40784313725490196,
"grad_norm": 0.26801761984825134,
"learning_rate": 0.00019999693876665938,
"loss": 0.8978,
"step": 182
},
{
"epoch": 0.41008403361344536,
"grad_norm": 0.26510775089263916,
"learning_rate": 0.00019999521683663262,
"loss": 0.8387,
"step": 183
},
{
"epoch": 0.4123249299719888,
"grad_norm": 0.25927454233169556,
"learning_rate": 0.00019999311226891103,
"loss": 0.884,
"step": 184
},
{
"epoch": 0.41456582633053224,
"grad_norm": 0.2489653080701828,
"learning_rate": 0.00019999062507154784,
"loss": 0.9473,
"step": 185
},
{
"epoch": 0.41680672268907565,
"grad_norm": 0.2461009919643402,
"learning_rate": 0.0001999877552540605,
"loss": 0.8609,
"step": 186
},
{
"epoch": 0.41904761904761906,
"grad_norm": 0.24591152369976044,
"learning_rate": 0.00019998450282743052,
"loss": 0.9284,
"step": 187
},
{
"epoch": 0.42128851540616247,
"grad_norm": 0.23085853457450867,
"learning_rate": 0.00019998086780410353,
"loss": 0.8898,
"step": 188
},
{
"epoch": 0.4235294117647059,
"grad_norm": 0.2512655556201935,
"learning_rate": 0.00019997685019798912,
"loss": 0.8275,
"step": 189
},
{
"epoch": 0.4257703081232493,
"grad_norm": 0.24720723927021027,
"learning_rate": 0.0001999724500244609,
"loss": 0.8809,
"step": 190
},
{
"epoch": 0.4280112044817927,
"grad_norm": 0.25972652435302734,
"learning_rate": 0.00019996766730035642,
"loss": 0.891,
"step": 191
},
{
"epoch": 0.43025210084033616,
"grad_norm": 0.26756009459495544,
"learning_rate": 0.0001999625020439771,
"loss": 0.923,
"step": 192
},
{
"epoch": 0.43249299719887957,
"grad_norm": 0.24553890526294708,
"learning_rate": 0.000199956954275088,
"loss": 0.8222,
"step": 193
},
{
"epoch": 0.434733893557423,
"grad_norm": 0.24937503039836884,
"learning_rate": 0.0001999510240149181,
"loss": 0.788,
"step": 194
},
{
"epoch": 0.4369747899159664,
"grad_norm": 0.2749722898006439,
"learning_rate": 0.00019994471128615985,
"loss": 0.9383,
"step": 195
},
{
"epoch": 0.4392156862745098,
"grad_norm": 0.25508439540863037,
"learning_rate": 0.00019993801611296923,
"loss": 0.8234,
"step": 196
},
{
"epoch": 0.4414565826330532,
"grad_norm": 0.26953256130218506,
"learning_rate": 0.00019993093852096582,
"loss": 0.9895,
"step": 197
},
{
"epoch": 0.4436974789915966,
"grad_norm": 0.26215213537216187,
"learning_rate": 0.0001999234785372324,
"loss": 0.8432,
"step": 198
},
{
"epoch": 0.4459383753501401,
"grad_norm": 0.24869990348815918,
"learning_rate": 0.00019991563619031508,
"loss": 0.8786,
"step": 199
},
{
"epoch": 0.4481792717086835,
"grad_norm": 0.23765677213668823,
"learning_rate": 0.00019990741151022301,
"loss": 0.9402,
"step": 200
},
{
"epoch": 0.4504201680672269,
"grad_norm": 0.24099862575531006,
"learning_rate": 0.00019989880452842847,
"loss": 0.9192,
"step": 201
},
{
"epoch": 0.4526610644257703,
"grad_norm": 0.2570018172264099,
"learning_rate": 0.00019988981527786654,
"loss": 0.846,
"step": 202
},
{
"epoch": 0.4549019607843137,
"grad_norm": 0.2420913279056549,
"learning_rate": 0.00019988044379293523,
"loss": 0.9021,
"step": 203
},
{
"epoch": 0.45714285714285713,
"grad_norm": 0.25546249747276306,
"learning_rate": 0.00019987069010949496,
"loss": 0.9191,
"step": 204
},
{
"epoch": 0.45938375350140054,
"grad_norm": 0.25439539551734924,
"learning_rate": 0.00019986055426486887,
"loss": 0.9046,
"step": 205
},
{
"epoch": 0.461624649859944,
"grad_norm": 0.23050928115844727,
"learning_rate": 0.00019985003629784237,
"loss": 0.7777,
"step": 206
},
{
"epoch": 0.4638655462184874,
"grad_norm": 0.25798794627189636,
"learning_rate": 0.00019983913624866304,
"loss": 0.9232,
"step": 207
},
{
"epoch": 0.4661064425770308,
"grad_norm": 0.2508363127708435,
"learning_rate": 0.00019982785415904064,
"loss": 0.9924,
"step": 208
},
{
"epoch": 0.46834733893557423,
"grad_norm": 0.25454050302505493,
"learning_rate": 0.00019981619007214673,
"loss": 0.9056,
"step": 209
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.24120093882083893,
"learning_rate": 0.0001998041440326146,
"loss": 0.8339,
"step": 210
},
{
"epoch": 0.47282913165266105,
"grad_norm": 0.2521376609802246,
"learning_rate": 0.00019979171608653924,
"loss": 0.7907,
"step": 211
},
{
"epoch": 0.47507002801120446,
"grad_norm": 0.2513487637042999,
"learning_rate": 0.00019977890628147682,
"loss": 0.9142,
"step": 212
},
{
"epoch": 0.4773109243697479,
"grad_norm": 0.2808438241481781,
"learning_rate": 0.00019976571466644492,
"loss": 0.8708,
"step": 213
},
{
"epoch": 0.47955182072829133,
"grad_norm": 0.2564331293106079,
"learning_rate": 0.00019975214129192196,
"loss": 0.8726,
"step": 214
},
{
"epoch": 0.48179271708683474,
"grad_norm": 0.2591661214828491,
"learning_rate": 0.00019973818620984738,
"loss": 0.791,
"step": 215
},
{
"epoch": 0.48403361344537815,
"grad_norm": 0.2555089592933655,
"learning_rate": 0.00019972384947362101,
"loss": 0.9129,
"step": 216
},
{
"epoch": 0.48627450980392156,
"grad_norm": 0.2542738616466522,
"learning_rate": 0.00019970913113810334,
"loss": 0.8097,
"step": 217
},
{
"epoch": 0.48851540616246497,
"grad_norm": 0.26333680748939514,
"learning_rate": 0.0001996940312596149,
"loss": 0.8342,
"step": 218
},
{
"epoch": 0.4907563025210084,
"grad_norm": 0.24918504059314728,
"learning_rate": 0.00019967854989593633,
"loss": 0.8441,
"step": 219
},
{
"epoch": 0.49299719887955185,
"grad_norm": 0.26157376170158386,
"learning_rate": 0.00019966268710630797,
"loss": 0.8891,
"step": 220
},
{
"epoch": 0.49523809523809526,
"grad_norm": 0.277658611536026,
"learning_rate": 0.00019964644295142968,
"loss": 0.8862,
"step": 221
},
{
"epoch": 0.49747899159663866,
"grad_norm": 0.2798251211643219,
"learning_rate": 0.00019962981749346078,
"loss": 0.8742,
"step": 222
},
{
"epoch": 0.4997198879551821,
"grad_norm": 0.28391703963279724,
"learning_rate": 0.00019961281079601957,
"loss": 0.8928,
"step": 223
},
{
"epoch": 0.5019607843137255,
"grad_norm": 0.2569884955883026,
"learning_rate": 0.00019959542292418317,
"loss": 0.8289,
"step": 224
},
{
"epoch": 0.5042016806722689,
"grad_norm": 0.2689199447631836,
"learning_rate": 0.00019957765394448728,
"loss": 0.8002,
"step": 225
},
{
"epoch": 0.5064425770308123,
"grad_norm": 0.2564886510372162,
"learning_rate": 0.00019955950392492604,
"loss": 0.8817,
"step": 226
},
{
"epoch": 0.5086834733893557,
"grad_norm": 0.2417922168970108,
"learning_rate": 0.00019954097293495155,
"loss": 0.8838,
"step": 227
},
{
"epoch": 0.5109243697478991,
"grad_norm": 0.22530929744243622,
"learning_rate": 0.00019952206104547376,
"loss": 0.7967,
"step": 228
},
{
"epoch": 0.5131652661064425,
"grad_norm": 0.23360571265220642,
"learning_rate": 0.00019950276832886017,
"loss": 0.8364,
"step": 229
},
{
"epoch": 0.5154061624649859,
"grad_norm": 0.2611772418022156,
"learning_rate": 0.00019948309485893549,
"loss": 0.8786,
"step": 230
},
{
"epoch": 0.5176470588235295,
"grad_norm": 0.24788020551204681,
"learning_rate": 0.00019946304071098142,
"loss": 0.86,
"step": 231
},
{
"epoch": 0.5198879551820729,
"grad_norm": 0.23259863257408142,
"learning_rate": 0.00019944260596173641,
"loss": 0.822,
"step": 232
},
{
"epoch": 0.5221288515406163,
"grad_norm": 0.2678159773349762,
"learning_rate": 0.0001994217906893952,
"loss": 0.8522,
"step": 233
},
{
"epoch": 0.5243697478991597,
"grad_norm": 0.268537312746048,
"learning_rate": 0.00019940059497360873,
"loss": 0.8638,
"step": 234
},
{
"epoch": 0.5266106442577031,
"grad_norm": 0.26122671365737915,
"learning_rate": 0.0001993790188954836,
"loss": 0.8509,
"step": 235
},
{
"epoch": 0.5288515406162465,
"grad_norm": 0.38401761651039124,
"learning_rate": 0.00019935706253758207,
"loss": 0.8184,
"step": 236
},
{
"epoch": 0.5310924369747899,
"grad_norm": 0.2459888607263565,
"learning_rate": 0.00019933472598392138,
"loss": 0.8042,
"step": 237
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.27041634917259216,
"learning_rate": 0.0001993120093199737,
"loss": 0.9426,
"step": 238
},
{
"epoch": 0.5355742296918767,
"grad_norm": 0.2503630220890045,
"learning_rate": 0.00019928891263266578,
"loss": 0.901,
"step": 239
},
{
"epoch": 0.5378151260504201,
"grad_norm": 0.2358655333518982,
"learning_rate": 0.00019926543601037842,
"loss": 0.8165,
"step": 240
},
{
"epoch": 0.5400560224089636,
"grad_norm": 0.24630171060562134,
"learning_rate": 0.00019924157954294628,
"loss": 0.8083,
"step": 241
},
{
"epoch": 0.542296918767507,
"grad_norm": 0.2504923939704895,
"learning_rate": 0.00019921734332165766,
"loss": 0.7973,
"step": 242
},
{
"epoch": 0.5445378151260504,
"grad_norm": 0.2728249132633209,
"learning_rate": 0.00019919272743925385,
"loss": 0.8817,
"step": 243
},
{
"epoch": 0.5467787114845938,
"grad_norm": 0.2694825828075409,
"learning_rate": 0.000199167731989929,
"loss": 0.8458,
"step": 244
},
{
"epoch": 0.5490196078431373,
"grad_norm": 0.31109780073165894,
"learning_rate": 0.00019914235706932972,
"loss": 0.939,
"step": 245
},
{
"epoch": 0.5512605042016807,
"grad_norm": 0.25847890973091125,
"learning_rate": 0.0001991166027745547,
"loss": 0.8704,
"step": 246
},
{
"epoch": 0.5535014005602241,
"grad_norm": 0.2678435742855072,
"learning_rate": 0.00019909046920415423,
"loss": 0.7445,
"step": 247
},
{
"epoch": 0.5557422969187675,
"grad_norm": 0.25951921939849854,
"learning_rate": 0.00019906395645812998,
"loss": 0.8869,
"step": 248
},
{
"epoch": 0.5579831932773109,
"grad_norm": 0.25794005393981934,
"learning_rate": 0.00019903706463793462,
"loss": 0.9506,
"step": 249
},
{
"epoch": 0.5602240896358543,
"grad_norm": 0.2340375930070877,
"learning_rate": 0.00019900979384647127,
"loss": 0.811,
"step": 250
},
{
"epoch": 0.5624649859943978,
"grad_norm": 0.25867462158203125,
"learning_rate": 0.0001989821441880933,
"loss": 0.8621,
"step": 251
},
{
"epoch": 0.5647058823529412,
"grad_norm": 0.2387334555387497,
"learning_rate": 0.0001989541157686037,
"loss": 0.8514,
"step": 252
},
{
"epoch": 0.5669467787114846,
"grad_norm": 0.26006007194519043,
"learning_rate": 0.00019892570869525496,
"loss": 0.8946,
"step": 253
},
{
"epoch": 0.569187675070028,
"grad_norm": 0.2735587954521179,
"learning_rate": 0.00019889692307674845,
"loss": 0.8605,
"step": 254
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.28292015194892883,
"learning_rate": 0.00019886775902323405,
"loss": 1.0231,
"step": 255
},
{
"epoch": 0.5736694677871148,
"grad_norm": 0.2500753402709961,
"learning_rate": 0.00019883821664630976,
"loss": 0.9155,
"step": 256
},
{
"epoch": 0.5759103641456582,
"grad_norm": 0.23072321712970734,
"learning_rate": 0.00019880829605902126,
"loss": 0.8268,
"step": 257
},
{
"epoch": 0.5781512605042017,
"grad_norm": 0.25353720784187317,
"learning_rate": 0.0001987779973758615,
"loss": 0.9315,
"step": 258
},
{
"epoch": 0.5803921568627451,
"grad_norm": 0.24684756994247437,
"learning_rate": 0.00019874732071277013,
"loss": 0.8652,
"step": 259
},
{
"epoch": 0.5826330532212886,
"grad_norm": 0.2583218812942505,
"learning_rate": 0.0001987162661871333,
"loss": 0.7722,
"step": 260
},
{
"epoch": 0.584873949579832,
"grad_norm": 0.23789426684379578,
"learning_rate": 0.00019868483391778302,
"loss": 0.9084,
"step": 261
},
{
"epoch": 0.5871148459383754,
"grad_norm": 0.24503661692142487,
"learning_rate": 0.00019865302402499678,
"loss": 0.8683,
"step": 262
},
{
"epoch": 0.5893557422969188,
"grad_norm": 0.2724620997905731,
"learning_rate": 0.00019862083663049694,
"loss": 0.8323,
"step": 263
},
{
"epoch": 0.5915966386554622,
"grad_norm": 0.27704504132270813,
"learning_rate": 0.0001985882718574506,
"loss": 0.9132,
"step": 264
},
{
"epoch": 0.5938375350140056,
"grad_norm": 0.2760598063468933,
"learning_rate": 0.00019855532983046876,
"loss": 0.7416,
"step": 265
},
{
"epoch": 0.596078431372549,
"grad_norm": 0.26945289969444275,
"learning_rate": 0.00019852201067560606,
"loss": 0.965,
"step": 266
},
{
"epoch": 0.5983193277310924,
"grad_norm": 0.2512185573577881,
"learning_rate": 0.0001984883145203603,
"loss": 0.8582,
"step": 267
},
{
"epoch": 0.6005602240896358,
"grad_norm": 0.24201013147830963,
"learning_rate": 0.00019845424149367177,
"loss": 0.8433,
"step": 268
},
{
"epoch": 0.6028011204481792,
"grad_norm": 0.24099834263324738,
"learning_rate": 0.000198419791725923,
"loss": 0.9501,
"step": 269
},
{
"epoch": 0.6050420168067226,
"grad_norm": 0.24521470069885254,
"learning_rate": 0.00019838496534893806,
"loss": 0.8458,
"step": 270
},
{
"epoch": 0.6072829131652661,
"grad_norm": 0.2377120554447174,
"learning_rate": 0.00019834976249598221,
"loss": 0.8934,
"step": 271
},
{
"epoch": 0.6095238095238096,
"grad_norm": 0.2444024235010147,
"learning_rate": 0.00019831418330176125,
"loss": 0.8326,
"step": 272
},
{
"epoch": 0.611764705882353,
"grad_norm": 0.23458163440227509,
"learning_rate": 0.0001982782279024211,
"loss": 0.8743,
"step": 273
},
{
"epoch": 0.6140056022408964,
"grad_norm": 0.2571447491645813,
"learning_rate": 0.00019824189643554725,
"loss": 0.8265,
"step": 274
},
{
"epoch": 0.6162464985994398,
"grad_norm": 0.25711604952812195,
"learning_rate": 0.00019820518904016426,
"loss": 0.8418,
"step": 275
},
{
"epoch": 0.6184873949579832,
"grad_norm": 0.26878827810287476,
"learning_rate": 0.00019816810585673514,
"loss": 0.8007,
"step": 276
},
{
"epoch": 0.6207282913165266,
"grad_norm": 0.2555851638317108,
"learning_rate": 0.00019813064702716094,
"loss": 0.8536,
"step": 277
},
{
"epoch": 0.62296918767507,
"grad_norm": 0.2718588411808014,
"learning_rate": 0.00019809281269478012,
"loss": 0.8884,
"step": 278
},
{
"epoch": 0.6252100840336134,
"grad_norm": 0.25706928968429565,
"learning_rate": 0.00019805460300436803,
"loss": 0.8729,
"step": 279
},
{
"epoch": 0.6274509803921569,
"grad_norm": 0.23707084357738495,
"learning_rate": 0.00019801601810213635,
"loss": 0.8268,
"step": 280
},
{
"epoch": 0.6296918767507003,
"grad_norm": 0.24094390869140625,
"learning_rate": 0.00019797705813573245,
"loss": 0.8457,
"step": 281
},
{
"epoch": 0.6319327731092437,
"grad_norm": 0.24794656038284302,
"learning_rate": 0.00019793772325423908,
"loss": 0.8495,
"step": 282
},
{
"epoch": 0.6341736694677871,
"grad_norm": 0.2355436086654663,
"learning_rate": 0.00019789801360817346,
"loss": 0.8167,
"step": 283
},
{
"epoch": 0.6364145658263305,
"grad_norm": 0.24097253382205963,
"learning_rate": 0.00019785792934948695,
"loss": 0.8887,
"step": 284
},
{
"epoch": 0.6386554621848739,
"grad_norm": 0.23715715110301971,
"learning_rate": 0.00019781747063156435,
"loss": 0.8205,
"step": 285
},
{
"epoch": 0.6408963585434174,
"grad_norm": 0.2627374529838562,
"learning_rate": 0.00019777663760922343,
"loss": 0.9175,
"step": 286
},
{
"epoch": 0.6431372549019608,
"grad_norm": 0.25734400749206543,
"learning_rate": 0.00019773543043871412,
"loss": 0.8104,
"step": 287
},
{
"epoch": 0.6453781512605042,
"grad_norm": 0.2477787584066391,
"learning_rate": 0.0001976938492777182,
"loss": 0.9969,
"step": 288
},
{
"epoch": 0.6476190476190476,
"grad_norm": 0.2440386414527893,
"learning_rate": 0.0001976518942853484,
"loss": 0.8084,
"step": 289
},
{
"epoch": 0.6498599439775911,
"grad_norm": 0.2703987956047058,
"learning_rate": 0.00019760956562214806,
"loss": 0.7827,
"step": 290
},
{
"epoch": 0.6521008403361345,
"grad_norm": 0.29450032114982605,
"learning_rate": 0.0001975668634500904,
"loss": 0.8807,
"step": 291
},
{
"epoch": 0.6543417366946779,
"grad_norm": 0.27439677715301514,
"learning_rate": 0.00019752378793257776,
"loss": 0.8152,
"step": 292
},
{
"epoch": 0.6565826330532213,
"grad_norm": 0.2940295338630676,
"learning_rate": 0.00019748033923444122,
"loss": 0.9051,
"step": 293
},
{
"epoch": 0.6588235294117647,
"grad_norm": 0.2529405355453491,
"learning_rate": 0.00019743651752193982,
"loss": 0.7429,
"step": 294
},
{
"epoch": 0.6610644257703081,
"grad_norm": 0.26964515447616577,
"learning_rate": 0.00019739232296276003,
"loss": 0.8584,
"step": 295
},
{
"epoch": 0.6633053221288515,
"grad_norm": 0.24733929336071014,
"learning_rate": 0.00019734775572601487,
"loss": 0.8465,
"step": 296
},
{
"epoch": 0.6655462184873949,
"grad_norm": 0.2559841275215149,
"learning_rate": 0.00019730281598224364,
"loss": 0.883,
"step": 297
},
{
"epoch": 0.6677871148459383,
"grad_norm": 0.24375739693641663,
"learning_rate": 0.00019725750390341094,
"loss": 0.8042,
"step": 298
},
{
"epoch": 0.6700280112044817,
"grad_norm": 0.25603991746902466,
"learning_rate": 0.00019721181966290613,
"loss": 0.9363,
"step": 299
},
{
"epoch": 0.6722689075630253,
"grad_norm": 0.2391873151063919,
"learning_rate": 0.00019716576343554274,
"loss": 0.9364,
"step": 300
},
{
"epoch": 0.6745098039215687,
"grad_norm": 0.24241115152835846,
"learning_rate": 0.00019711933539755765,
"loss": 0.8168,
"step": 301
},
{
"epoch": 0.6767507002801121,
"grad_norm": 0.2441554218530655,
"learning_rate": 0.00019707253572661055,
"loss": 0.9179,
"step": 302
},
{
"epoch": 0.6789915966386555,
"grad_norm": 0.2645583748817444,
"learning_rate": 0.00019702536460178318,
"loss": 0.7971,
"step": 303
},
{
"epoch": 0.6812324929971989,
"grad_norm": 0.2557383179664612,
"learning_rate": 0.0001969778222035787,
"loss": 0.9112,
"step": 304
},
{
"epoch": 0.6834733893557423,
"grad_norm": 0.25000718235969543,
"learning_rate": 0.0001969299087139209,
"loss": 0.7842,
"step": 305
},
{
"epoch": 0.6857142857142857,
"grad_norm": 0.23475897312164307,
"learning_rate": 0.00019688162431615367,
"loss": 0.7806,
"step": 306
},
{
"epoch": 0.6879551820728291,
"grad_norm": 0.2505607306957245,
"learning_rate": 0.00019683296919504012,
"loss": 0.922,
"step": 307
},
{
"epoch": 0.6901960784313725,
"grad_norm": 0.2522655725479126,
"learning_rate": 0.00019678394353676203,
"loss": 0.7934,
"step": 308
},
{
"epoch": 0.692436974789916,
"grad_norm": 0.2722029387950897,
"learning_rate": 0.000196734547528919,
"loss": 0.8775,
"step": 309
},
{
"epoch": 0.6946778711484594,
"grad_norm": 0.26442402601242065,
"learning_rate": 0.00019668478136052774,
"loss": 0.8696,
"step": 310
},
{
"epoch": 0.6969187675070028,
"grad_norm": 0.26388052105903625,
"learning_rate": 0.00019663464522202162,
"loss": 0.8569,
"step": 311
},
{
"epoch": 0.6991596638655462,
"grad_norm": 0.26141613721847534,
"learning_rate": 0.00019658413930524952,
"loss": 0.8485,
"step": 312
},
{
"epoch": 0.7014005602240896,
"grad_norm": 0.23456323146820068,
"learning_rate": 0.00019653326380347533,
"loss": 0.8913,
"step": 313
},
{
"epoch": 0.7036414565826331,
"grad_norm": 0.24977454543113708,
"learning_rate": 0.00019648201891137723,
"loss": 0.8729,
"step": 314
},
{
"epoch": 0.7058823529411765,
"grad_norm": 0.22706039249897003,
"learning_rate": 0.0001964304048250469,
"loss": 0.7885,
"step": 315
},
{
"epoch": 0.7081232492997199,
"grad_norm": 0.24601607024669647,
"learning_rate": 0.00019637842174198867,
"loss": 0.8387,
"step": 316
},
{
"epoch": 0.7103641456582633,
"grad_norm": 0.2545998990535736,
"learning_rate": 0.000196326069861119,
"loss": 0.8114,
"step": 317
},
{
"epoch": 0.7126050420168067,
"grad_norm": 0.24375082552433014,
"learning_rate": 0.00019627334938276546,
"loss": 0.843,
"step": 318
},
{
"epoch": 0.7148459383753502,
"grad_norm": 0.2519029378890991,
"learning_rate": 0.00019622026050866614,
"loss": 0.9088,
"step": 319
},
{
"epoch": 0.7170868347338936,
"grad_norm": 0.27405208349227905,
"learning_rate": 0.0001961668034419688,
"loss": 0.958,
"step": 320
},
{
"epoch": 0.719327731092437,
"grad_norm": 0.25491318106651306,
"learning_rate": 0.0001961129783872301,
"loss": 0.7692,
"step": 321
},
{
"epoch": 0.7215686274509804,
"grad_norm": 0.250347375869751,
"learning_rate": 0.00019605878555041485,
"loss": 0.8321,
"step": 322
},
{
"epoch": 0.7238095238095238,
"grad_norm": 0.24250715970993042,
"learning_rate": 0.00019600422513889516,
"loss": 0.7832,
"step": 323
},
{
"epoch": 0.7260504201680672,
"grad_norm": 0.2599019706249237,
"learning_rate": 0.00019594929736144976,
"loss": 0.8736,
"step": 324
},
{
"epoch": 0.7282913165266106,
"grad_norm": 0.25766611099243164,
"learning_rate": 0.00019589400242826305,
"loss": 0.7419,
"step": 325
},
{
"epoch": 0.730532212885154,
"grad_norm": 0.254658967256546,
"learning_rate": 0.00019583834055092445,
"loss": 0.9058,
"step": 326
},
{
"epoch": 0.7327731092436974,
"grad_norm": 0.2731349468231201,
"learning_rate": 0.00019578231194242743,
"loss": 0.9683,
"step": 327
},
{
"epoch": 0.735014005602241,
"grad_norm": 0.22153563797473907,
"learning_rate": 0.00019572591681716887,
"loss": 0.8146,
"step": 328
},
{
"epoch": 0.7372549019607844,
"grad_norm": 0.24707463383674622,
"learning_rate": 0.00019566915539094803,
"loss": 0.8204,
"step": 329
},
{
"epoch": 0.7394957983193278,
"grad_norm": 0.2541884779930115,
"learning_rate": 0.00019561202788096597,
"loss": 0.7638,
"step": 330
},
{
"epoch": 0.7417366946778712,
"grad_norm": 0.2314218282699585,
"learning_rate": 0.00019555453450582452,
"loss": 0.7502,
"step": 331
},
{
"epoch": 0.7439775910364146,
"grad_norm": 0.24334260821342468,
"learning_rate": 0.00019549667548552556,
"loss": 0.912,
"step": 332
},
{
"epoch": 0.746218487394958,
"grad_norm": 0.24473878741264343,
"learning_rate": 0.00019543845104147,
"loss": 0.7558,
"step": 333
},
{
"epoch": 0.7484593837535014,
"grad_norm": 0.25403597950935364,
"learning_rate": 0.00019537986139645726,
"loss": 0.8406,
"step": 334
},
{
"epoch": 0.7507002801120448,
"grad_norm": 0.24638508260250092,
"learning_rate": 0.0001953209067746841,
"loss": 0.736,
"step": 335
},
{
"epoch": 0.7529411764705882,
"grad_norm": 0.25604501366615295,
"learning_rate": 0.00019526158740174393,
"loss": 0.867,
"step": 336
},
{
"epoch": 0.7551820728291316,
"grad_norm": 0.2617231011390686,
"learning_rate": 0.00019520190350462584,
"loss": 0.8654,
"step": 337
},
{
"epoch": 0.757422969187675,
"grad_norm": 0.2620690166950226,
"learning_rate": 0.0001951418553117139,
"loss": 0.8428,
"step": 338
},
{
"epoch": 0.7596638655462185,
"grad_norm": 0.25454917550086975,
"learning_rate": 0.0001950814430527861,
"loss": 0.8676,
"step": 339
},
{
"epoch": 0.7619047619047619,
"grad_norm": 0.26449549198150635,
"learning_rate": 0.00019502066695901358,
"loss": 0.8709,
"step": 340
},
{
"epoch": 0.7641456582633053,
"grad_norm": 0.24709953367710114,
"learning_rate": 0.0001949595272629597,
"loss": 0.8312,
"step": 341
},
{
"epoch": 0.7663865546218488,
"grad_norm": 0.2593318819999695,
"learning_rate": 0.00019489802419857917,
"loss": 0.8274,
"step": 342
},
{
"epoch": 0.7686274509803922,
"grad_norm": 0.2448015958070755,
"learning_rate": 0.00019483615800121716,
"loss": 0.8226,
"step": 343
},
{
"epoch": 0.7708683473389356,
"grad_norm": 0.26445460319519043,
"learning_rate": 0.00019477392890760839,
"loss": 0.8492,
"step": 344
},
{
"epoch": 0.773109243697479,
"grad_norm": 0.2573208808898926,
"learning_rate": 0.00019471133715587622,
"loss": 0.8294,
"step": 345
},
{
"epoch": 0.7753501400560224,
"grad_norm": 0.26753151416778564,
"learning_rate": 0.00019464838298553173,
"loss": 0.8446,
"step": 346
},
{
"epoch": 0.7775910364145658,
"grad_norm": 0.2667967975139618,
"learning_rate": 0.00019458506663747285,
"loss": 0.8388,
"step": 347
},
{
"epoch": 0.7798319327731092,
"grad_norm": 0.25934740900993347,
"learning_rate": 0.00019452138835398332,
"loss": 0.8149,
"step": 348
},
{
"epoch": 0.7820728291316527,
"grad_norm": 0.2525850832462311,
"learning_rate": 0.00019445734837873202,
"loss": 0.8325,
"step": 349
},
{
"epoch": 0.7843137254901961,
"grad_norm": 0.2837854325771332,
"learning_rate": 0.00019439294695677167,
"loss": 0.8795,
"step": 350
},
{
"epoch": 0.7865546218487395,
"grad_norm": 0.26006975769996643,
"learning_rate": 0.00019432818433453818,
"loss": 0.8267,
"step": 351
},
{
"epoch": 0.7887955182072829,
"grad_norm": 0.24872632324695587,
"learning_rate": 0.00019426306075984965,
"loss": 0.8553,
"step": 352
},
{
"epoch": 0.7910364145658263,
"grad_norm": 0.23633168637752533,
"learning_rate": 0.00019419757648190533,
"loss": 0.8544,
"step": 353
},
{
"epoch": 0.7932773109243697,
"grad_norm": 0.2425261288881302,
"learning_rate": 0.00019413173175128473,
"loss": 0.8645,
"step": 354
},
{
"epoch": 0.7955182072829131,
"grad_norm": 0.23861053586006165,
"learning_rate": 0.00019406552681994663,
"loss": 0.8363,
"step": 355
},
{
"epoch": 0.7977591036414566,
"grad_norm": 0.24729354679584503,
"learning_rate": 0.00019399896194122822,
"loss": 0.8401,
"step": 356
},
{
"epoch": 0.8,
"grad_norm": 0.23853589594364166,
"learning_rate": 0.000193932037369844,
"loss": 0.8237,
"step": 357
},
{
"epoch": 0.8022408963585435,
"grad_norm": 0.2654939293861389,
"learning_rate": 0.00019386475336188484,
"loss": 0.7491,
"step": 358
},
{
"epoch": 0.8044817927170869,
"grad_norm": 0.27639004588127136,
"learning_rate": 0.000193797110174817,
"loss": 0.8755,
"step": 359
},
{
"epoch": 0.8067226890756303,
"grad_norm": 0.23675435781478882,
"learning_rate": 0.00019372910806748125,
"loss": 0.8292,
"step": 360
},
{
"epoch": 0.8089635854341737,
"grad_norm": 0.2371446043252945,
"learning_rate": 0.0001936607473000917,
"loss": 0.809,
"step": 361
},
{
"epoch": 0.8112044817927171,
"grad_norm": 0.2529555559158325,
"learning_rate": 0.0001935920281342349,
"loss": 0.8403,
"step": 362
},
{
"epoch": 0.8134453781512605,
"grad_norm": 0.2568047344684601,
"learning_rate": 0.00019352295083286896,
"loss": 0.7848,
"step": 363
},
{
"epoch": 0.8156862745098039,
"grad_norm": 0.2613024413585663,
"learning_rate": 0.0001934535156603222,
"loss": 0.844,
"step": 364
},
{
"epoch": 0.8179271708683473,
"grad_norm": 0.24079741537570953,
"learning_rate": 0.0001933837228822925,
"loss": 0.8029,
"step": 365
},
{
"epoch": 0.8201680672268907,
"grad_norm": 0.24890519678592682,
"learning_rate": 0.0001933135727658462,
"loss": 0.8712,
"step": 366
},
{
"epoch": 0.8224089635854341,
"grad_norm": 0.25198838114738464,
"learning_rate": 0.00019324306557941682,
"loss": 0.8368,
"step": 367
},
{
"epoch": 0.8246498599439775,
"grad_norm": 0.2470208704471588,
"learning_rate": 0.0001931722015928044,
"loss": 0.8896,
"step": 368
},
{
"epoch": 0.826890756302521,
"grad_norm": 0.25046589970588684,
"learning_rate": 0.00019310098107717418,
"loss": 0.7602,
"step": 369
},
{
"epoch": 0.8291316526610645,
"grad_norm": 0.28005844354629517,
"learning_rate": 0.0001930294043050558,
"loss": 0.9443,
"step": 370
},
{
"epoch": 0.8313725490196079,
"grad_norm": 0.6102829575538635,
"learning_rate": 0.00019295747155034202,
"loss": 0.911,
"step": 371
},
{
"epoch": 0.8336134453781513,
"grad_norm": 0.2472289353609085,
"learning_rate": 0.0001928851830882879,
"loss": 0.8271,
"step": 372
},
{
"epoch": 0.8358543417366947,
"grad_norm": 0.24470177292823792,
"learning_rate": 0.0001928125391955095,
"loss": 0.7837,
"step": 373
},
{
"epoch": 0.8380952380952381,
"grad_norm": 0.2458019107580185,
"learning_rate": 0.00019273954014998308,
"loss": 0.8496,
"step": 374
},
{
"epoch": 0.8403361344537815,
"grad_norm": 0.249577596783638,
"learning_rate": 0.00019266618623104385,
"loss": 0.7729,
"step": 375
},
{
"epoch": 0.8425770308123249,
"grad_norm": 0.2461290806531906,
"learning_rate": 0.000192592477719385,
"loss": 0.7955,
"step": 376
},
{
"epoch": 0.8448179271708683,
"grad_norm": 0.27021750807762146,
"learning_rate": 0.00019251841489705655,
"loss": 0.9384,
"step": 377
},
{
"epoch": 0.8470588235294118,
"grad_norm": 0.2547335624694824,
"learning_rate": 0.00019244399804746435,
"loss": 0.8254,
"step": 378
},
{
"epoch": 0.8492997198879552,
"grad_norm": 0.2620961368083954,
"learning_rate": 0.0001923692274553689,
"loss": 0.8587,
"step": 379
},
{
"epoch": 0.8515406162464986,
"grad_norm": 0.25756919384002686,
"learning_rate": 0.0001922941034068844,
"loss": 0.7906,
"step": 380
},
{
"epoch": 0.853781512605042,
"grad_norm": 0.25694355368614197,
"learning_rate": 0.0001922186261894775,
"loss": 0.7887,
"step": 381
},
{
"epoch": 0.8560224089635854,
"grad_norm": 0.2538197636604309,
"learning_rate": 0.0001921427960919663,
"loss": 0.8861,
"step": 382
},
{
"epoch": 0.8582633053221288,
"grad_norm": 0.2463122308254242,
"learning_rate": 0.00019206661340451925,
"loss": 0.8175,
"step": 383
},
{
"epoch": 0.8605042016806723,
"grad_norm": 0.23905646800994873,
"learning_rate": 0.00019199007841865396,
"loss": 0.7914,
"step": 384
},
{
"epoch": 0.8627450980392157,
"grad_norm": 0.2590409517288208,
"learning_rate": 0.0001919131914272361,
"loss": 0.8385,
"step": 385
},
{
"epoch": 0.8649859943977591,
"grad_norm": 0.2520500719547272,
"learning_rate": 0.00019183595272447842,
"loss": 0.7961,
"step": 386
},
{
"epoch": 0.8672268907563025,
"grad_norm": 0.2568177878856659,
"learning_rate": 0.00019175836260593938,
"loss": 0.8051,
"step": 387
},
{
"epoch": 0.869467787114846,
"grad_norm": 0.27684271335601807,
"learning_rate": 0.00019168042136852228,
"loss": 0.8953,
"step": 388
},
{
"epoch": 0.8717086834733894,
"grad_norm": 0.26281246542930603,
"learning_rate": 0.0001916021293104739,
"loss": 0.8602,
"step": 389
},
{
"epoch": 0.8739495798319328,
"grad_norm": 0.29199978709220886,
"learning_rate": 0.00019152348673138353,
"loss": 0.9776,
"step": 390
},
{
"epoch": 0.8761904761904762,
"grad_norm": 0.25914543867111206,
"learning_rate": 0.0001914444939321817,
"loss": 0.8394,
"step": 391
},
{
"epoch": 0.8784313725490196,
"grad_norm": 0.2472960352897644,
"learning_rate": 0.0001913651512151391,
"loss": 0.7995,
"step": 392
},
{
"epoch": 0.880672268907563,
"grad_norm": 0.24599507451057434,
"learning_rate": 0.00019128545888386536,
"loss": 0.7801,
"step": 393
},
{
"epoch": 0.8829131652661064,
"grad_norm": 0.23776014149188995,
"learning_rate": 0.00019120541724330803,
"loss": 0.8068,
"step": 394
},
{
"epoch": 0.8851540616246498,
"grad_norm": 0.27298274636268616,
"learning_rate": 0.0001911250265997512,
"loss": 0.9139,
"step": 395
},
{
"epoch": 0.8873949579831932,
"grad_norm": 0.2653828561306,
"learning_rate": 0.0001910442872608145,
"loss": 0.8917,
"step": 396
},
{
"epoch": 0.8896358543417366,
"grad_norm": 0.25794684886932373,
"learning_rate": 0.00019096319953545185,
"loss": 0.819,
"step": 397
},
{
"epoch": 0.8918767507002802,
"grad_norm": 0.24696165323257446,
"learning_rate": 0.0001908817637339503,
"loss": 0.7535,
"step": 398
},
{
"epoch": 0.8941176470588236,
"grad_norm": 0.2529006898403168,
"learning_rate": 0.00019079998016792885,
"loss": 0.7379,
"step": 399
},
{
"epoch": 0.896358543417367,
"grad_norm": 0.24874147772789001,
"learning_rate": 0.00019071784915033717,
"loss": 0.8973,
"step": 400
},
{
"epoch": 0.8985994397759104,
"grad_norm": 0.27747422456741333,
"learning_rate": 0.00019063537099545455,
"loss": 0.8685,
"step": 401
},
{
"epoch": 0.9008403361344538,
"grad_norm": 0.25184211134910583,
"learning_rate": 0.00019055254601888866,
"loss": 0.7937,
"step": 402
},
{
"epoch": 0.9030812324929972,
"grad_norm": 0.2565883994102478,
"learning_rate": 0.00019046937453757413,
"loss": 0.8677,
"step": 403
},
{
"epoch": 0.9053221288515406,
"grad_norm": 0.24833756685256958,
"learning_rate": 0.00019038585686977167,
"loss": 0.8777,
"step": 404
},
{
"epoch": 0.907563025210084,
"grad_norm": 0.24929295480251312,
"learning_rate": 0.00019030199333506666,
"loss": 0.8167,
"step": 405
},
{
"epoch": 0.9098039215686274,
"grad_norm": 0.24500809609889984,
"learning_rate": 0.00019021778425436795,
"loss": 0.8675,
"step": 406
},
{
"epoch": 0.9120448179271708,
"grad_norm": 0.25895681977272034,
"learning_rate": 0.0001901332299499066,
"loss": 0.8718,
"step": 407
},
{
"epoch": 0.9142857142857143,
"grad_norm": 0.22836971282958984,
"learning_rate": 0.00019004833074523478,
"loss": 0.8602,
"step": 408
},
{
"epoch": 0.9165266106442577,
"grad_norm": 0.26318010687828064,
"learning_rate": 0.00018996308696522433,
"loss": 0.838,
"step": 409
},
{
"epoch": 0.9187675070028011,
"grad_norm": 0.24389663338661194,
"learning_rate": 0.00018987749893606575,
"loss": 0.7798,
"step": 410
},
{
"epoch": 0.9210084033613445,
"grad_norm": 0.26448702812194824,
"learning_rate": 0.0001897915669852667,
"loss": 0.8053,
"step": 411
},
{
"epoch": 0.923249299719888,
"grad_norm": 0.25245535373687744,
"learning_rate": 0.000189705291441651,
"loss": 0.7915,
"step": 412
},
{
"epoch": 0.9254901960784314,
"grad_norm": 0.25623124837875366,
"learning_rate": 0.00018961867263535715,
"loss": 0.9167,
"step": 413
},
{
"epoch": 0.9277310924369748,
"grad_norm": 0.25271835923194885,
"learning_rate": 0.00018953171089783723,
"loss": 0.8663,
"step": 414
},
{
"epoch": 0.9299719887955182,
"grad_norm": 0.24757803976535797,
"learning_rate": 0.00018944440656185556,
"loss": 0.8411,
"step": 415
},
{
"epoch": 0.9322128851540616,
"grad_norm": 0.23879915475845337,
"learning_rate": 0.00018935675996148738,
"loss": 0.8071,
"step": 416
},
{
"epoch": 0.934453781512605,
"grad_norm": 0.3934721350669861,
"learning_rate": 0.0001892687714321177,
"loss": 0.7911,
"step": 417
},
{
"epoch": 0.9366946778711485,
"grad_norm": 0.27968111634254456,
"learning_rate": 0.00018918044131043985,
"loss": 0.8452,
"step": 418
},
{
"epoch": 0.9389355742296919,
"grad_norm": 0.2701101303100586,
"learning_rate": 0.00018909176993445442,
"loss": 0.8723,
"step": 419
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.2606533169746399,
"learning_rate": 0.00018900275764346768,
"loss": 0.7908,
"step": 420
},
{
"epoch": 0.9434173669467787,
"grad_norm": 0.2672193944454193,
"learning_rate": 0.00018891340477809055,
"loss": 0.9491,
"step": 421
},
{
"epoch": 0.9456582633053221,
"grad_norm": 0.2676648795604706,
"learning_rate": 0.00018882371168023706,
"loss": 0.8352,
"step": 422
},
{
"epoch": 0.9478991596638655,
"grad_norm": 0.265023410320282,
"learning_rate": 0.0001887336786931233,
"loss": 0.7894,
"step": 423
},
{
"epoch": 0.9501400560224089,
"grad_norm": 0.24014434218406677,
"learning_rate": 0.00018864330616126586,
"loss": 0.8394,
"step": 424
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.24389366805553436,
"learning_rate": 0.00018855259443048067,
"loss": 0.7857,
"step": 425
},
{
"epoch": 0.9546218487394958,
"grad_norm": 0.2471270114183426,
"learning_rate": 0.00018846154384788162,
"loss": 0.8576,
"step": 426
},
{
"epoch": 0.9568627450980393,
"grad_norm": 0.2577723264694214,
"learning_rate": 0.00018837015476187916,
"loss": 0.8377,
"step": 427
},
{
"epoch": 0.9591036414565827,
"grad_norm": 0.3224787414073944,
"learning_rate": 0.00018827842752217917,
"loss": 0.8801,
"step": 428
},
{
"epoch": 0.9613445378151261,
"grad_norm": 0.25494757294654846,
"learning_rate": 0.00018818636247978145,
"loss": 0.8173,
"step": 429
},
{
"epoch": 0.9635854341736695,
"grad_norm": 0.24220331013202667,
"learning_rate": 0.00018809395998697833,
"loss": 0.7747,
"step": 430
},
{
"epoch": 0.9658263305322129,
"grad_norm": 0.2741996645927429,
"learning_rate": 0.00018800122039735358,
"loss": 0.8636,
"step": 431
},
{
"epoch": 0.9680672268907563,
"grad_norm": 0.2558667063713074,
"learning_rate": 0.0001879081440657807,
"loss": 0.8456,
"step": 432
},
{
"epoch": 0.9703081232492997,
"grad_norm": 0.25905126333236694,
"learning_rate": 0.00018781473134842197,
"loss": 0.7961,
"step": 433
},
{
"epoch": 0.9725490196078431,
"grad_norm": 0.23688547313213348,
"learning_rate": 0.00018772098260272667,
"loss": 0.7555,
"step": 434
},
{
"epoch": 0.9747899159663865,
"grad_norm": 0.2701859176158905,
"learning_rate": 0.00018762689818743007,
"loss": 0.9353,
"step": 435
},
{
"epoch": 0.9770308123249299,
"grad_norm": 0.24914319813251495,
"learning_rate": 0.00018753247846255174,
"loss": 0.7678,
"step": 436
},
{
"epoch": 0.9792717086834734,
"grad_norm": 0.26182475686073303,
"learning_rate": 0.00018743772378939448,
"loss": 0.8374,
"step": 437
},
{
"epoch": 0.9815126050420168,
"grad_norm": 0.2661891281604767,
"learning_rate": 0.00018734263453054273,
"loss": 0.8816,
"step": 438
},
{
"epoch": 0.9837535014005602,
"grad_norm": 0.24797801673412323,
"learning_rate": 0.0001872472110498612,
"loss": 0.8069,
"step": 439
},
{
"epoch": 0.9859943977591037,
"grad_norm": 0.24370808899402618,
"learning_rate": 0.0001871514537124936,
"loss": 0.7405,
"step": 440
},
{
"epoch": 0.9882352941176471,
"grad_norm": 0.2744685709476471,
"learning_rate": 0.00018705536288486118,
"loss": 0.7706,
"step": 441
},
{
"epoch": 0.9904761904761905,
"grad_norm": 0.265438437461853,
"learning_rate": 0.0001869589389346611,
"loss": 0.8649,
"step": 442
},
{
"epoch": 0.9927170868347339,
"grad_norm": 0.24661187827587128,
"learning_rate": 0.0001868621822308655,
"loss": 0.8138,
"step": 443
},
{
"epoch": 0.9949579831932773,
"grad_norm": 0.2580495774745941,
"learning_rate": 0.00018676509314371974,
"loss": 0.7765,
"step": 444
},
{
"epoch": 0.9971988795518207,
"grad_norm": 0.2546556293964386,
"learning_rate": 0.00018666767204474094,
"loss": 0.8873,
"step": 445
},
{
"epoch": 0.9994397759103641,
"grad_norm": 0.25944411754608154,
"learning_rate": 0.00018656991930671686,
"loss": 0.8651,
"step": 446
},
{
"epoch": 1.0016806722689076,
"grad_norm": 0.25578224658966064,
"learning_rate": 0.00018647183530370415,
"loss": 0.9027,
"step": 447
},
{
"epoch": 1.003921568627451,
"grad_norm": 0.24953621625900269,
"learning_rate": 0.00018637342041102718,
"loss": 0.6952,
"step": 448
},
{
"epoch": 1.0061624649859944,
"grad_norm": 0.24714386463165283,
"learning_rate": 0.0001862746750052764,
"loss": 0.6983,
"step": 449
},
{
"epoch": 1.0084033613445378,
"grad_norm": 0.2614993155002594,
"learning_rate": 0.00018617559946430706,
"loss": 0.7272,
"step": 450
},
{
"epoch": 1.0106442577030812,
"grad_norm": 0.27017897367477417,
"learning_rate": 0.00018607619416723765,
"loss": 0.7644,
"step": 451
},
{
"epoch": 1.0128851540616246,
"grad_norm": 0.28906649351119995,
"learning_rate": 0.0001859764594944485,
"loss": 0.7651,
"step": 452
},
{
"epoch": 1.015126050420168,
"grad_norm": 0.28600478172302246,
"learning_rate": 0.00018587639582758031,
"loss": 0.732,
"step": 453
},
{
"epoch": 1.0173669467787114,
"grad_norm": 0.28704679012298584,
"learning_rate": 0.0001857760035495327,
"loss": 0.713,
"step": 454
},
{
"epoch": 1.0196078431372548,
"grad_norm": 0.36615249514579773,
"learning_rate": 0.0001856752830444628,
"loss": 0.8691,
"step": 455
},
{
"epoch": 1.0218487394957982,
"grad_norm": 0.28983429074287415,
"learning_rate": 0.00018557423469778357,
"loss": 0.7513,
"step": 456
},
{
"epoch": 1.0240896358543417,
"grad_norm": 0.2740822732448578,
"learning_rate": 0.0001854728588961626,
"loss": 0.7319,
"step": 457
},
{
"epoch": 1.026330532212885,
"grad_norm": 0.26059162616729736,
"learning_rate": 0.00018537115602752053,
"loss": 0.7727,
"step": 458
},
{
"epoch": 1.0285714285714285,
"grad_norm": 0.25983962416648865,
"learning_rate": 0.00018526912648102943,
"loss": 0.7428,
"step": 459
},
{
"epoch": 1.0308123249299719,
"grad_norm": 0.2570248246192932,
"learning_rate": 0.0001851667706471115,
"loss": 0.8065,
"step": 460
},
{
"epoch": 1.0330532212885155,
"grad_norm": 0.2523268163204193,
"learning_rate": 0.0001850640889174375,
"loss": 0.729,
"step": 461
},
{
"epoch": 1.035294117647059,
"grad_norm": 0.2655740976333618,
"learning_rate": 0.0001849610816849252,
"loss": 0.7717,
"step": 462
},
{
"epoch": 1.0375350140056023,
"grad_norm": 0.2684931755065918,
"learning_rate": 0.0001848577493437379,
"loss": 0.6862,
"step": 463
},
{
"epoch": 1.0397759103641457,
"grad_norm": 0.29195067286491394,
"learning_rate": 0.00018475409228928312,
"loss": 0.7204,
"step": 464
},
{
"epoch": 1.0420168067226891,
"grad_norm": 0.299556165933609,
"learning_rate": 0.00018465011091821072,
"loss": 0.8587,
"step": 465
},
{
"epoch": 1.0442577030812326,
"grad_norm": 0.2898513078689575,
"learning_rate": 0.00018454580562841163,
"loss": 0.7628,
"step": 466
},
{
"epoch": 1.046498599439776,
"grad_norm": 0.30156147480010986,
"learning_rate": 0.00018444117681901638,
"loss": 0.7437,
"step": 467
},
{
"epoch": 1.0487394957983194,
"grad_norm": 0.2778584361076355,
"learning_rate": 0.00018433622489039334,
"loss": 0.6211,
"step": 468
},
{
"epoch": 1.0509803921568628,
"grad_norm": 0.29723167419433594,
"learning_rate": 0.00018423095024414733,
"loss": 0.6958,
"step": 469
},
{
"epoch": 1.0532212885154062,
"grad_norm": 0.27120769023895264,
"learning_rate": 0.00018412535328311814,
"loss": 0.8491,
"step": 470
},
{
"epoch": 1.0554621848739496,
"grad_norm": 0.28341060876846313,
"learning_rate": 0.00018401943441137886,
"loss": 0.65,
"step": 471
},
{
"epoch": 1.057703081232493,
"grad_norm": 0.3047507107257843,
"learning_rate": 0.00018391319403423436,
"loss": 0.7248,
"step": 472
},
{
"epoch": 1.0599439775910364,
"grad_norm": 0.41425445675849915,
"learning_rate": 0.00018380663255821995,
"loss": 0.7587,
"step": 473
},
{
"epoch": 1.0621848739495798,
"grad_norm": 0.29975569248199463,
"learning_rate": 0.00018369975039109936,
"loss": 0.75,
"step": 474
},
{
"epoch": 1.0644257703081232,
"grad_norm": 0.2992658317089081,
"learning_rate": 0.0001835925479418637,
"loss": 0.7145,
"step": 475
},
{
"epoch": 1.0666666666666667,
"grad_norm": 0.29061266779899597,
"learning_rate": 0.00018348502562072955,
"loss": 0.7085,
"step": 476
},
{
"epoch": 1.06890756302521,
"grad_norm": 0.2972713112831116,
"learning_rate": 0.00018337718383913752,
"loss": 0.6741,
"step": 477
},
{
"epoch": 1.0711484593837535,
"grad_norm": 0.33825528621673584,
"learning_rate": 0.0001832690230097506,
"loss": 0.9284,
"step": 478
},
{
"epoch": 1.0733893557422969,
"grad_norm": 0.439996600151062,
"learning_rate": 0.00018316054354645283,
"loss": 0.7515,
"step": 479
},
{
"epoch": 1.0756302521008403,
"grad_norm": 0.28441861271858215,
"learning_rate": 0.00018305174586434725,
"loss": 0.73,
"step": 480
},
{
"epoch": 1.0778711484593837,
"grad_norm": 0.26948654651641846,
"learning_rate": 0.00018294263037975475,
"loss": 0.6871,
"step": 481
},
{
"epoch": 1.080112044817927,
"grad_norm": 0.29222843050956726,
"learning_rate": 0.00018283319751021232,
"loss": 0.6827,
"step": 482
},
{
"epoch": 1.0823529411764705,
"grad_norm": 0.31007933616638184,
"learning_rate": 0.00018272344767447134,
"loss": 0.793,
"step": 483
},
{
"epoch": 1.084593837535014,
"grad_norm": 0.29892498254776,
"learning_rate": 0.0001826133812924962,
"loss": 0.6541,
"step": 484
},
{
"epoch": 1.0868347338935573,
"grad_norm": 0.3294057250022888,
"learning_rate": 0.00018250299878546245,
"loss": 0.7719,
"step": 485
},
{
"epoch": 1.0890756302521007,
"grad_norm": 0.2897263765335083,
"learning_rate": 0.00018239230057575542,
"loss": 0.7306,
"step": 486
},
{
"epoch": 1.0913165266106442,
"grad_norm": 0.2852244973182678,
"learning_rate": 0.00018228128708696844,
"loss": 0.6512,
"step": 487
},
{
"epoch": 1.0935574229691878,
"grad_norm": 0.3132948577404022,
"learning_rate": 0.00018216995874390128,
"loss": 0.6859,
"step": 488
},
{
"epoch": 1.0957983193277312,
"grad_norm": 0.33552494645118713,
"learning_rate": 0.0001820583159725585,
"loss": 0.7039,
"step": 489
},
{
"epoch": 1.0980392156862746,
"grad_norm": 0.3188576102256775,
"learning_rate": 0.0001819463592001479,
"loss": 0.702,
"step": 490
},
{
"epoch": 1.100280112044818,
"grad_norm": 0.3157289922237396,
"learning_rate": 0.00018183408885507873,
"loss": 0.706,
"step": 491
},
{
"epoch": 1.1025210084033614,
"grad_norm": 0.27868008613586426,
"learning_rate": 0.00018172150536696025,
"loss": 0.6802,
"step": 492
},
{
"epoch": 1.1047619047619048,
"grad_norm": 0.29156798124313354,
"learning_rate": 0.0001816086091665999,
"loss": 0.7465,
"step": 493
},
{
"epoch": 1.1070028011204482,
"grad_norm": 0.2826281487941742,
"learning_rate": 0.00018149540068600182,
"loss": 0.7023,
"step": 494
},
{
"epoch": 1.1092436974789917,
"grad_norm": 0.3162992596626282,
"learning_rate": 0.00018138188035836497,
"loss": 0.7039,
"step": 495
},
{
"epoch": 1.111484593837535,
"grad_norm": 0.2732091248035431,
"learning_rate": 0.00018126804861808176,
"loss": 0.7372,
"step": 496
},
{
"epoch": 1.1137254901960785,
"grad_norm": 0.3039727807044983,
"learning_rate": 0.0001811539059007361,
"loss": 0.7148,
"step": 497
},
{
"epoch": 1.1159663865546219,
"grad_norm": 0.29023319482803345,
"learning_rate": 0.00018103945264310204,
"loss": 0.644,
"step": 498
},
{
"epoch": 1.1182072829131653,
"grad_norm": 0.2907212972640991,
"learning_rate": 0.00018092468928314172,
"loss": 0.7281,
"step": 499
},
{
"epoch": 1.1204481792717087,
"grad_norm": 0.2867254912853241,
"learning_rate": 0.0001808096162600041,
"loss": 0.7652,
"step": 500
},
{
"epoch": 1.122689075630252,
"grad_norm": 0.29929518699645996,
"learning_rate": 0.000180694234014023,
"loss": 0.713,
"step": 501
},
{
"epoch": 1.1249299719887955,
"grad_norm": 0.3078075647354126,
"learning_rate": 0.00018057854298671546,
"loss": 0.7255,
"step": 502
},
{
"epoch": 1.127170868347339,
"grad_norm": 0.3214311897754669,
"learning_rate": 0.0001804625436207802,
"loss": 0.7369,
"step": 503
},
{
"epoch": 1.1294117647058823,
"grad_norm": 0.3541346490383148,
"learning_rate": 0.00018034623636009568,
"loss": 0.8864,
"step": 504
},
{
"epoch": 1.1316526610644257,
"grad_norm": 0.31777986884117126,
"learning_rate": 0.00018022962164971867,
"loss": 0.7382,
"step": 505
},
{
"epoch": 1.1338935574229692,
"grad_norm": 0.3204372525215149,
"learning_rate": 0.00018011269993588232,
"loss": 0.7998,
"step": 506
},
{
"epoch": 1.1361344537815126,
"grad_norm": 0.30363792181015015,
"learning_rate": 0.0001799954716659946,
"loss": 0.7145,
"step": 507
},
{
"epoch": 1.138375350140056,
"grad_norm": 0.3061518371105194,
"learning_rate": 0.00017987793728863651,
"loss": 0.7217,
"step": 508
},
{
"epoch": 1.1406162464985994,
"grad_norm": 0.3020443618297577,
"learning_rate": 0.00017976009725356038,
"loss": 0.6765,
"step": 509
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.324278861284256,
"learning_rate": 0.00017964195201168817,
"loss": 0.8419,
"step": 510
},
{
"epoch": 1.1450980392156862,
"grad_norm": 0.3056583106517792,
"learning_rate": 0.00017952350201510978,
"loss": 0.8153,
"step": 511
},
{
"epoch": 1.1473389355742296,
"grad_norm": 0.3339795470237732,
"learning_rate": 0.00017940474771708115,
"loss": 0.8701,
"step": 512
},
{
"epoch": 1.149579831932773,
"grad_norm": 0.2845865488052368,
"learning_rate": 0.00017928568957202278,
"loss": 0.6865,
"step": 513
},
{
"epoch": 1.1518207282913164,
"grad_norm": 0.3049021065235138,
"learning_rate": 0.0001791663280355178,
"loss": 0.6772,
"step": 514
},
{
"epoch": 1.1540616246498598,
"grad_norm": 0.2814830541610718,
"learning_rate": 0.00017904666356431028,
"loss": 0.7092,
"step": 515
},
{
"epoch": 1.1563025210084033,
"grad_norm": 0.2870534360408783,
"learning_rate": 0.0001789266966163035,
"loss": 0.7056,
"step": 516
},
{
"epoch": 1.1585434173669467,
"grad_norm": 0.2947506606578827,
"learning_rate": 0.00017880642765055816,
"loss": 0.7208,
"step": 517
},
{
"epoch": 1.1607843137254903,
"grad_norm": 0.29607728123664856,
"learning_rate": 0.00017868585712729068,
"loss": 0.7441,
"step": 518
},
{
"epoch": 1.1630252100840337,
"grad_norm": 0.3445536494255066,
"learning_rate": 0.00017856498550787144,
"loss": 0.7116,
"step": 519
},
{
"epoch": 1.165266106442577,
"grad_norm": 0.3803198039531708,
"learning_rate": 0.0001784438132548229,
"loss": 0.7871,
"step": 520
},
{
"epoch": 1.1675070028011205,
"grad_norm": 0.37584301829338074,
"learning_rate": 0.00017832234083181795,
"loss": 0.7877,
"step": 521
},
{
"epoch": 1.169747899159664,
"grad_norm": 0.3182366192340851,
"learning_rate": 0.0001782005687036781,
"loss": 0.7112,
"step": 522
},
{
"epoch": 1.1719887955182073,
"grad_norm": 0.3022582530975342,
"learning_rate": 0.00017807849733637176,
"loss": 0.7275,
"step": 523
},
{
"epoch": 1.1742296918767507,
"grad_norm": 0.31011196970939636,
"learning_rate": 0.00017795612719701226,
"loss": 0.7439,
"step": 524
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.30964523553848267,
"learning_rate": 0.00017783345875385634,
"loss": 0.7788,
"step": 525
},
{
"epoch": 1.1787114845938376,
"grad_norm": 0.29043567180633545,
"learning_rate": 0.00017771049247630215,
"loss": 0.7925,
"step": 526
},
{
"epoch": 1.180952380952381,
"grad_norm": 0.28265780210494995,
"learning_rate": 0.00017758722883488745,
"loss": 0.8563,
"step": 527
},
{
"epoch": 1.1831932773109244,
"grad_norm": 0.2864142060279846,
"learning_rate": 0.00017746366830128803,
"loss": 0.6427,
"step": 528
},
{
"epoch": 1.1854341736694678,
"grad_norm": 0.28759056329727173,
"learning_rate": 0.00017733981134831567,
"loss": 0.6629,
"step": 529
},
{
"epoch": 1.1876750700280112,
"grad_norm": 0.3355376422405243,
"learning_rate": 0.00017721565844991643,
"loss": 0.7418,
"step": 530
},
{
"epoch": 1.1899159663865546,
"grad_norm": 0.3486466109752655,
"learning_rate": 0.0001770912100811688,
"loss": 0.7885,
"step": 531
},
{
"epoch": 1.192156862745098,
"grad_norm": 0.2857566475868225,
"learning_rate": 0.000176966466718282,
"loss": 0.6359,
"step": 532
},
{
"epoch": 1.1943977591036414,
"grad_norm": 0.3142206072807312,
"learning_rate": 0.00017684142883859388,
"loss": 0.6964,
"step": 533
},
{
"epoch": 1.1966386554621848,
"grad_norm": 0.3039001524448395,
"learning_rate": 0.00017671609692056946,
"loss": 0.6951,
"step": 534
},
{
"epoch": 1.1988795518207283,
"grad_norm": 0.31849658489227295,
"learning_rate": 0.00017659047144379878,
"loss": 0.7428,
"step": 535
},
{
"epoch": 1.2011204481792717,
"grad_norm": 0.2935810983181,
"learning_rate": 0.00017646455288899534,
"loss": 0.7668,
"step": 536
},
{
"epoch": 1.203361344537815,
"grad_norm": 0.2911589443683624,
"learning_rate": 0.00017633834173799403,
"loss": 0.7237,
"step": 537
},
{
"epoch": 1.2056022408963585,
"grad_norm": 0.28567075729370117,
"learning_rate": 0.00017621183847374935,
"loss": 0.7464,
"step": 538
},
{
"epoch": 1.2078431372549019,
"grad_norm": 0.299709677696228,
"learning_rate": 0.00017608504358033363,
"loss": 0.7176,
"step": 539
},
{
"epoch": 1.2100840336134453,
"grad_norm": 0.2959325313568115,
"learning_rate": 0.00017595795754293513,
"loss": 0.6807,
"step": 540
},
{
"epoch": 1.2123249299719887,
"grad_norm": 0.27634045481681824,
"learning_rate": 0.00017583058084785625,
"loss": 0.6773,
"step": 541
},
{
"epoch": 1.2145658263305321,
"grad_norm": 0.33680498600006104,
"learning_rate": 0.00017570291398251152,
"loss": 0.7985,
"step": 542
},
{
"epoch": 1.2168067226890757,
"grad_norm": 0.318645179271698,
"learning_rate": 0.00017557495743542585,
"loss": 0.6376,
"step": 543
},
{
"epoch": 1.2190476190476192,
"grad_norm": 0.31514203548431396,
"learning_rate": 0.0001754467116962326,
"loss": 0.7327,
"step": 544
},
{
"epoch": 1.2212885154061626,
"grad_norm": 0.32726311683654785,
"learning_rate": 0.0001753181772556719,
"loss": 0.7329,
"step": 545
},
{
"epoch": 1.223529411764706,
"grad_norm": 0.3274557292461395,
"learning_rate": 0.00017518935460558838,
"loss": 0.6818,
"step": 546
},
{
"epoch": 1.2257703081232494,
"grad_norm": 0.3218080699443817,
"learning_rate": 0.0001750602442389297,
"loss": 0.734,
"step": 547
},
{
"epoch": 1.2280112044817928,
"grad_norm": 0.3002873659133911,
"learning_rate": 0.0001749308466497444,
"loss": 0.617,
"step": 548
},
{
"epoch": 1.2302521008403362,
"grad_norm": 0.30696091055870056,
"learning_rate": 0.00017480116233318014,
"loss": 0.7417,
"step": 549
},
{
"epoch": 1.2324929971988796,
"grad_norm": 0.341802716255188,
"learning_rate": 0.0001746711917854817,
"loss": 0.7806,
"step": 550
},
{
"epoch": 1.234733893557423,
"grad_norm": 0.3180257976055145,
"learning_rate": 0.00017454093550398918,
"loss": 0.6813,
"step": 551
},
{
"epoch": 1.2369747899159664,
"grad_norm": 0.3485969007015228,
"learning_rate": 0.00017441039398713608,
"loss": 0.7385,
"step": 552
},
{
"epoch": 1.2392156862745098,
"grad_norm": 0.3271244466304779,
"learning_rate": 0.00017427956773444732,
"loss": 0.6891,
"step": 553
},
{
"epoch": 1.2414565826330533,
"grad_norm": 0.31936562061309814,
"learning_rate": 0.00017414845724653743,
"loss": 0.683,
"step": 554
},
{
"epoch": 1.2436974789915967,
"grad_norm": 0.33722570538520813,
"learning_rate": 0.0001740170630251085,
"loss": 0.8026,
"step": 555
},
{
"epoch": 1.24593837535014,
"grad_norm": 0.3108736276626587,
"learning_rate": 0.00017388538557294852,
"loss": 0.6923,
"step": 556
},
{
"epoch": 1.2481792717086835,
"grad_norm": 0.32877469062805176,
"learning_rate": 0.00017375342539392903,
"loss": 0.7007,
"step": 557
},
{
"epoch": 1.250420168067227,
"grad_norm": 0.3236118257045746,
"learning_rate": 0.00017362118299300361,
"loss": 0.6852,
"step": 558
},
{
"epoch": 1.2526610644257703,
"grad_norm": 0.30720242857933044,
"learning_rate": 0.00017348865887620573,
"loss": 0.6665,
"step": 559
},
{
"epoch": 1.2549019607843137,
"grad_norm": 0.32177719473838806,
"learning_rate": 0.00017335585355064692,
"loss": 0.7097,
"step": 560
},
{
"epoch": 1.2571428571428571,
"grad_norm": 0.31638845801353455,
"learning_rate": 0.0001732227675245147,
"loss": 0.6966,
"step": 561
},
{
"epoch": 1.2593837535014005,
"grad_norm": 0.3173747956752777,
"learning_rate": 0.00017308940130707069,
"loss": 0.6972,
"step": 562
},
{
"epoch": 1.261624649859944,
"grad_norm": 0.31755855679512024,
"learning_rate": 0.00017295575540864877,
"loss": 0.7192,
"step": 563
},
{
"epoch": 1.2638655462184873,
"grad_norm": 0.30816033482551575,
"learning_rate": 0.00017282183034065296,
"loss": 0.6723,
"step": 564
},
{
"epoch": 1.2661064425770308,
"grad_norm": 0.3106657564640045,
"learning_rate": 0.00017268762661555557,
"loss": 0.754,
"step": 565
},
{
"epoch": 1.2683473389355742,
"grad_norm": 0.3460423946380615,
"learning_rate": 0.00017255314474689523,
"loss": 0.7836,
"step": 566
},
{
"epoch": 1.2705882352941176,
"grad_norm": 0.33377137780189514,
"learning_rate": 0.00017241838524927484,
"loss": 0.8304,
"step": 567
},
{
"epoch": 1.272829131652661,
"grad_norm": 0.3150513470172882,
"learning_rate": 0.0001722833486383597,
"loss": 0.9003,
"step": 568
},
{
"epoch": 1.2750700280112044,
"grad_norm": 0.2992112934589386,
"learning_rate": 0.00017214803543087555,
"loss": 0.7655,
"step": 569
},
{
"epoch": 1.2773109243697478,
"grad_norm": 0.2952958345413208,
"learning_rate": 0.00017201244614460643,
"loss": 0.6524,
"step": 570
},
{
"epoch": 1.2795518207282912,
"grad_norm": 0.31578466296195984,
"learning_rate": 0.00017187658129839294,
"loss": 0.7415,
"step": 571
},
{
"epoch": 1.2817927170868346,
"grad_norm": 0.33429616689682007,
"learning_rate": 0.00017174044141213,
"loss": 0.8142,
"step": 572
},
{
"epoch": 1.284033613445378,
"grad_norm": 0.32365548610687256,
"learning_rate": 0.0001716040270067651,
"loss": 0.6806,
"step": 573
},
{
"epoch": 1.2862745098039214,
"grad_norm": 0.3116409182548523,
"learning_rate": 0.00017146733860429612,
"loss": 0.6899,
"step": 574
},
{
"epoch": 1.2885154061624648,
"grad_norm": 0.35744285583496094,
"learning_rate": 0.00017133037672776942,
"loss": 0.74,
"step": 575
},
{
"epoch": 1.2907563025210085,
"grad_norm": 0.2882843613624573,
"learning_rate": 0.00017119314190127788,
"loss": 0.6437,
"step": 576
},
{
"epoch": 1.292997198879552,
"grad_norm": 0.31813856959342957,
"learning_rate": 0.00017105563464995873,
"loss": 0.6738,
"step": 577
},
{
"epoch": 1.2952380952380953,
"grad_norm": 0.32429373264312744,
"learning_rate": 0.00017091785549999176,
"loss": 0.8131,
"step": 578
},
{
"epoch": 1.2974789915966387,
"grad_norm": 0.32878828048706055,
"learning_rate": 0.00017077980497859713,
"loss": 0.6904,
"step": 579
},
{
"epoch": 1.2997198879551821,
"grad_norm": 0.30764883756637573,
"learning_rate": 0.00017064148361403347,
"loss": 0.7343,
"step": 580
},
{
"epoch": 1.3019607843137255,
"grad_norm": 0.319346159696579,
"learning_rate": 0.00017050289193559578,
"loss": 0.6926,
"step": 581
},
{
"epoch": 1.304201680672269,
"grad_norm": 0.31245169043540955,
"learning_rate": 0.00017036403047361335,
"loss": 0.7876,
"step": 582
},
{
"epoch": 1.3064425770308123,
"grad_norm": 0.29874372482299805,
"learning_rate": 0.000170224899759448,
"loss": 0.6569,
"step": 583
},
{
"epoch": 1.3086834733893558,
"grad_norm": 0.3087962865829468,
"learning_rate": 0.00017008550032549167,
"loss": 0.6892,
"step": 584
},
{
"epoch": 1.3109243697478992,
"grad_norm": 0.31366726756095886,
"learning_rate": 0.0001699458327051647,
"loss": 0.7782,
"step": 585
},
{
"epoch": 1.3131652661064426,
"grad_norm": 0.304023802280426,
"learning_rate": 0.00016980589743291363,
"loss": 0.7213,
"step": 586
},
{
"epoch": 1.315406162464986,
"grad_norm": 0.2973663806915283,
"learning_rate": 0.00016966569504420914,
"loss": 0.6942,
"step": 587
},
{
"epoch": 1.3176470588235294,
"grad_norm": 0.3077528178691864,
"learning_rate": 0.0001695252260755441,
"loss": 0.7192,
"step": 588
},
{
"epoch": 1.3198879551820728,
"grad_norm": 0.3302268087863922,
"learning_rate": 0.00016938449106443138,
"loss": 0.7904,
"step": 589
},
{
"epoch": 1.3221288515406162,
"grad_norm": 0.3201766312122345,
"learning_rate": 0.00016924349054940204,
"loss": 0.6851,
"step": 590
},
{
"epoch": 1.3243697478991596,
"grad_norm": 0.31299325823783875,
"learning_rate": 0.00016910222507000294,
"loss": 0.6355,
"step": 591
},
{
"epoch": 1.326610644257703,
"grad_norm": 0.3279891610145569,
"learning_rate": 0.00016896069516679493,
"loss": 0.7587,
"step": 592
},
{
"epoch": 1.3288515406162464,
"grad_norm": 0.3423704206943512,
"learning_rate": 0.0001688189013813507,
"loss": 0.7352,
"step": 593
},
{
"epoch": 1.3310924369747898,
"grad_norm": 0.3414992690086365,
"learning_rate": 0.00016867684425625262,
"loss": 0.6362,
"step": 594
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.3365124464035034,
"learning_rate": 0.00016853452433509086,
"loss": 0.7817,
"step": 595
},
{
"epoch": 1.3355742296918767,
"grad_norm": 0.3402608633041382,
"learning_rate": 0.00016839194216246108,
"loss": 0.713,
"step": 596
},
{
"epoch": 1.3378151260504203,
"grad_norm": 0.3226512670516968,
"learning_rate": 0.00016824909828396255,
"loss": 0.6853,
"step": 597
},
{
"epoch": 1.3400560224089637,
"grad_norm": 0.3510406017303467,
"learning_rate": 0.0001681059932461959,
"loss": 0.7548,
"step": 598
},
{
"epoch": 1.3422969187675071,
"grad_norm": 0.3165980875492096,
"learning_rate": 0.00016796262759676117,
"loss": 0.6952,
"step": 599
},
{
"epoch": 1.3445378151260505,
"grad_norm": 0.3366287052631378,
"learning_rate": 0.00016781900188425562,
"loss": 0.7872,
"step": 600
},
{
"epoch": 1.346778711484594,
"grad_norm": 0.30944567918777466,
"learning_rate": 0.00016767511665827166,
"loss": 0.6944,
"step": 601
},
{
"epoch": 1.3490196078431373,
"grad_norm": 0.31927919387817383,
"learning_rate": 0.00016753097246939474,
"loss": 0.6778,
"step": 602
},
{
"epoch": 1.3512605042016808,
"grad_norm": 0.36404141783714294,
"learning_rate": 0.0001673865698692012,
"loss": 0.8198,
"step": 603
},
{
"epoch": 1.3535014005602242,
"grad_norm": 0.28067895770072937,
"learning_rate": 0.00016724190941025627,
"loss": 0.6561,
"step": 604
},
{
"epoch": 1.3557422969187676,
"grad_norm": 0.33814799785614014,
"learning_rate": 0.00016709699164611192,
"loss": 0.725,
"step": 605
},
{
"epoch": 1.357983193277311,
"grad_norm": 0.3168368935585022,
"learning_rate": 0.0001669518171313046,
"loss": 0.7295,
"step": 606
},
{
"epoch": 1.3602240896358544,
"grad_norm": 0.37248319387435913,
"learning_rate": 0.00016680638642135336,
"loss": 0.7566,
"step": 607
},
{
"epoch": 1.3624649859943978,
"grad_norm": 0.32318514585494995,
"learning_rate": 0.00016666070007275748,
"loss": 0.6949,
"step": 608
},
{
"epoch": 1.3647058823529412,
"grad_norm": 0.34030449390411377,
"learning_rate": 0.00016651475864299452,
"loss": 0.6823,
"step": 609
},
{
"epoch": 1.3669467787114846,
"grad_norm": 0.3123067319393158,
"learning_rate": 0.00016636856269051814,
"loss": 0.7236,
"step": 610
},
{
"epoch": 1.369187675070028,
"grad_norm": 0.32035380601882935,
"learning_rate": 0.0001662221127747559,
"loss": 0.7254,
"step": 611
},
{
"epoch": 1.3714285714285714,
"grad_norm": 0.3208359181880951,
"learning_rate": 0.00016607540945610722,
"loss": 0.7309,
"step": 612
},
{
"epoch": 1.3736694677871149,
"grad_norm": 0.31935855746269226,
"learning_rate": 0.00016592845329594112,
"loss": 0.8216,
"step": 613
},
{
"epoch": 1.3759103641456583,
"grad_norm": 0.2906378209590912,
"learning_rate": 0.00016578124485659414,
"loss": 0.7283,
"step": 614
},
{
"epoch": 1.3781512605042017,
"grad_norm": 0.3059033751487732,
"learning_rate": 0.00016563378470136822,
"loss": 0.7455,
"step": 615
},
{
"epoch": 1.380392156862745,
"grad_norm": 0.30803483724594116,
"learning_rate": 0.00016548607339452853,
"loss": 0.6463,
"step": 616
},
{
"epoch": 1.3826330532212885,
"grad_norm": 0.32379990816116333,
"learning_rate": 0.00016533811150130117,
"loss": 0.7454,
"step": 617
},
{
"epoch": 1.384873949579832,
"grad_norm": 0.3142244815826416,
"learning_rate": 0.00016518989958787126,
"loss": 0.7142,
"step": 618
},
{
"epoch": 1.3871148459383753,
"grad_norm": 0.32228538393974304,
"learning_rate": 0.00016504143822138056,
"loss": 0.626,
"step": 619
},
{
"epoch": 1.3893557422969187,
"grad_norm": 0.3554072082042694,
"learning_rate": 0.00016489272796992537,
"loss": 0.6818,
"step": 620
},
{
"epoch": 1.3915966386554621,
"grad_norm": 0.3763510584831238,
"learning_rate": 0.00016474376940255444,
"loss": 0.7584,
"step": 621
},
{
"epoch": 1.3938375350140055,
"grad_norm": 0.3938269317150116,
"learning_rate": 0.0001645945630892666,
"loss": 0.815,
"step": 622
},
{
"epoch": 1.396078431372549,
"grad_norm": 0.32350701093673706,
"learning_rate": 0.00016444510960100879,
"loss": 0.7073,
"step": 623
},
{
"epoch": 1.3983193277310924,
"grad_norm": 0.33908578753471375,
"learning_rate": 0.00016429540950967371,
"loss": 0.8126,
"step": 624
},
{
"epoch": 1.4005602240896358,
"grad_norm": 0.32018956542015076,
"learning_rate": 0.0001641454633880978,
"loss": 0.7489,
"step": 625
},
{
"epoch": 1.4028011204481792,
"grad_norm": 0.324883371591568,
"learning_rate": 0.0001639952718100589,
"loss": 0.7915,
"step": 626
},
{
"epoch": 1.4050420168067226,
"grad_norm": 0.3411893844604492,
"learning_rate": 0.000163844835350274,
"loss": 0.7736,
"step": 627
},
{
"epoch": 1.407282913165266,
"grad_norm": 0.3181053400039673,
"learning_rate": 0.0001636941545843973,
"loss": 0.7398,
"step": 628
},
{
"epoch": 1.4095238095238094,
"grad_norm": 0.3206365704536438,
"learning_rate": 0.00016354323008901776,
"loss": 0.7825,
"step": 629
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.30065983533859253,
"learning_rate": 0.00016339206244165704,
"loss": 0.714,
"step": 630
},
{
"epoch": 1.4140056022408962,
"grad_norm": 0.3132588565349579,
"learning_rate": 0.00016324065222076718,
"loss": 0.8118,
"step": 631
},
{
"epoch": 1.4162464985994399,
"grad_norm": 0.3143763840198517,
"learning_rate": 0.00016308900000572851,
"loss": 0.7045,
"step": 632
},
{
"epoch": 1.4184873949579833,
"grad_norm": 0.3230004608631134,
"learning_rate": 0.00016293710637684732,
"loss": 0.5901,
"step": 633
},
{
"epoch": 1.4207282913165267,
"grad_norm": 0.31268176436424255,
"learning_rate": 0.00016278497191535364,
"loss": 0.7745,
"step": 634
},
{
"epoch": 1.42296918767507,
"grad_norm": 0.33265724778175354,
"learning_rate": 0.00016263259720339916,
"loss": 0.7444,
"step": 635
},
{
"epoch": 1.4252100840336135,
"grad_norm": 0.32188957929611206,
"learning_rate": 0.00016247998282405485,
"loss": 0.7899,
"step": 636
},
{
"epoch": 1.427450980392157,
"grad_norm": 0.3007752001285553,
"learning_rate": 0.0001623271293613088,
"loss": 0.6891,
"step": 637
},
{
"epoch": 1.4296918767507003,
"grad_norm": 0.29558807611465454,
"learning_rate": 0.0001621740374000639,
"loss": 0.7253,
"step": 638
},
{
"epoch": 1.4319327731092437,
"grad_norm": 0.32872480154037476,
"learning_rate": 0.0001620207075261358,
"loss": 0.7858,
"step": 639
},
{
"epoch": 1.4341736694677871,
"grad_norm": 0.30687081813812256,
"learning_rate": 0.00016186714032625035,
"loss": 0.7395,
"step": 640
},
{
"epoch": 1.4364145658263305,
"grad_norm": 0.31080397963523865,
"learning_rate": 0.00016171333638804176,
"loss": 0.7491,
"step": 641
},
{
"epoch": 1.438655462184874,
"grad_norm": 0.3163350820541382,
"learning_rate": 0.00016155929630004996,
"loss": 0.7222,
"step": 642
},
{
"epoch": 1.4408963585434174,
"grad_norm": 0.3228285610675812,
"learning_rate": 0.00016140502065171863,
"loss": 0.722,
"step": 643
},
{
"epoch": 1.4431372549019608,
"grad_norm": 0.3150191605091095,
"learning_rate": 0.00016125051003339276,
"loss": 0.8325,
"step": 644
},
{
"epoch": 1.4453781512605042,
"grad_norm": 0.2988094687461853,
"learning_rate": 0.00016109576503631646,
"loss": 0.6268,
"step": 645
},
{
"epoch": 1.4476190476190476,
"grad_norm": 0.3043825030326843,
"learning_rate": 0.00016094078625263083,
"loss": 0.6615,
"step": 646
},
{
"epoch": 1.449859943977591,
"grad_norm": 0.3339695930480957,
"learning_rate": 0.00016078557427537144,
"loss": 0.7564,
"step": 647
},
{
"epoch": 1.4521008403361344,
"grad_norm": 0.37711572647094727,
"learning_rate": 0.00016063012969846625,
"loss": 0.8914,
"step": 648
},
{
"epoch": 1.4543417366946778,
"grad_norm": 0.3254697322845459,
"learning_rate": 0.0001604744531167332,
"loss": 0.7375,
"step": 649
},
{
"epoch": 1.4565826330532212,
"grad_norm": 0.30244678258895874,
"learning_rate": 0.0001603185451258781,
"loss": 0.649,
"step": 650
},
{
"epoch": 1.4588235294117646,
"grad_norm": 0.3273305892944336,
"learning_rate": 0.00016016240632249224,
"loss": 0.7635,
"step": 651
},
{
"epoch": 1.4610644257703083,
"grad_norm": 0.3089764714241028,
"learning_rate": 0.00016000603730405012,
"loss": 0.7321,
"step": 652
},
{
"epoch": 1.4633053221288517,
"grad_norm": 0.3246336579322815,
"learning_rate": 0.00015984943866890718,
"loss": 0.7163,
"step": 653
},
{
"epoch": 1.465546218487395,
"grad_norm": 0.29445281624794006,
"learning_rate": 0.00015969261101629742,
"loss": 0.7109,
"step": 654
},
{
"epoch": 1.4677871148459385,
"grad_norm": 0.3255884051322937,
"learning_rate": 0.00015953555494633136,
"loss": 0.7698,
"step": 655
},
{
"epoch": 1.470028011204482,
"grad_norm": 0.3040069043636322,
"learning_rate": 0.00015937827105999336,
"loss": 0.7155,
"step": 656
},
{
"epoch": 1.4722689075630253,
"grad_norm": 0.30603310465812683,
"learning_rate": 0.00015922075995913974,
"loss": 0.7512,
"step": 657
},
{
"epoch": 1.4745098039215687,
"grad_norm": 0.3210160732269287,
"learning_rate": 0.00015906302224649615,
"loss": 0.7153,
"step": 658
},
{
"epoch": 1.4767507002801121,
"grad_norm": 0.3401980996131897,
"learning_rate": 0.0001589050585256554,
"loss": 0.705,
"step": 659
},
{
"epoch": 1.4789915966386555,
"grad_norm": 0.3499704599380493,
"learning_rate": 0.00015874686940107506,
"loss": 0.7966,
"step": 660
},
{
"epoch": 1.481232492997199,
"grad_norm": 0.31771430373191833,
"learning_rate": 0.00015858845547807543,
"loss": 0.7383,
"step": 661
},
{
"epoch": 1.4834733893557424,
"grad_norm": 0.3159136474132538,
"learning_rate": 0.00015842981736283686,
"loss": 0.6415,
"step": 662
},
{
"epoch": 1.4857142857142858,
"grad_norm": 0.3231973946094513,
"learning_rate": 0.0001582709556623976,
"loss": 0.7462,
"step": 663
},
{
"epoch": 1.4879551820728292,
"grad_norm": 0.3222534954547882,
"learning_rate": 0.0001581118709846514,
"loss": 0.6833,
"step": 664
},
{
"epoch": 1.4901960784313726,
"grad_norm": 0.34087467193603516,
"learning_rate": 0.00015795256393834545,
"loss": 0.7325,
"step": 665
},
{
"epoch": 1.492436974789916,
"grad_norm": 0.33144867420196533,
"learning_rate": 0.00015779303513307764,
"loss": 0.7278,
"step": 666
},
{
"epoch": 1.4946778711484594,
"grad_norm": 0.3868750035762787,
"learning_rate": 0.0001576332851792945,
"loss": 0.9117,
"step": 667
},
{
"epoch": 1.4969187675070028,
"grad_norm": 0.3211595118045807,
"learning_rate": 0.00015747331468828887,
"loss": 0.7015,
"step": 668
},
{
"epoch": 1.4991596638655462,
"grad_norm": 0.30169111490249634,
"learning_rate": 0.00015731312427219737,
"loss": 0.8056,
"step": 669
},
{
"epoch": 1.5014005602240896,
"grad_norm": 0.30103328824043274,
"learning_rate": 0.0001571527145439983,
"loss": 0.6391,
"step": 670
},
{
"epoch": 1.503641456582633,
"grad_norm": 0.3065439462661743,
"learning_rate": 0.00015699208611750902,
"loss": 0.7279,
"step": 671
},
{
"epoch": 1.5058823529411764,
"grad_norm": 0.3444465100765228,
"learning_rate": 0.00015683123960738392,
"loss": 0.7274,
"step": 672
},
{
"epoch": 1.5081232492997199,
"grad_norm": 0.3165910542011261,
"learning_rate": 0.00015667017562911176,
"loss": 0.6784,
"step": 673
},
{
"epoch": 1.5103641456582633,
"grad_norm": 0.3377099335193634,
"learning_rate": 0.00015650889479901356,
"loss": 0.8188,
"step": 674
},
{
"epoch": 1.5126050420168067,
"grad_norm": 0.3051726818084717,
"learning_rate": 0.00015634739773424006,
"loss": 0.657,
"step": 675
},
{
"epoch": 1.51484593837535,
"grad_norm": 0.3249804675579071,
"learning_rate": 0.00015618568505276946,
"loss": 0.7148,
"step": 676
},
{
"epoch": 1.5170868347338935,
"grad_norm": 0.3395232856273651,
"learning_rate": 0.00015602375737340507,
"loss": 0.6749,
"step": 677
},
{
"epoch": 1.519327731092437,
"grad_norm": 0.33914825320243835,
"learning_rate": 0.0001558616153157728,
"loss": 0.7133,
"step": 678
},
{
"epoch": 1.5215686274509803,
"grad_norm": 0.3608812689781189,
"learning_rate": 0.00015569925950031908,
"loss": 0.7519,
"step": 679
},
{
"epoch": 1.5238095238095237,
"grad_norm": 0.37755894660949707,
"learning_rate": 0.00015553669054830805,
"loss": 0.757,
"step": 680
},
{
"epoch": 1.5260504201680671,
"grad_norm": 0.313101589679718,
"learning_rate": 0.0001553739090818196,
"loss": 0.7849,
"step": 681
},
{
"epoch": 1.5282913165266105,
"grad_norm": 0.2970316410064697,
"learning_rate": 0.0001552109157237468,
"loss": 0.6365,
"step": 682
},
{
"epoch": 1.530532212885154,
"grad_norm": 0.3263746201992035,
"learning_rate": 0.00015504771109779348,
"loss": 0.6997,
"step": 683
},
{
"epoch": 1.5327731092436974,
"grad_norm": 0.3154606521129608,
"learning_rate": 0.00015488429582847192,
"loss": 0.7298,
"step": 684
},
{
"epoch": 1.5350140056022408,
"grad_norm": 0.3260630965232849,
"learning_rate": 0.00015472067054110052,
"loss": 0.7921,
"step": 685
},
{
"epoch": 1.5372549019607842,
"grad_norm": 0.32374054193496704,
"learning_rate": 0.00015455683586180116,
"loss": 0.718,
"step": 686
},
{
"epoch": 1.5394957983193276,
"grad_norm": 0.3299919366836548,
"learning_rate": 0.00015439279241749715,
"loss": 0.7179,
"step": 687
},
{
"epoch": 1.541736694677871,
"grad_norm": 0.32276657223701477,
"learning_rate": 0.0001542285408359105,
"loss": 0.7589,
"step": 688
},
{
"epoch": 1.5439775910364144,
"grad_norm": 0.3329083323478699,
"learning_rate": 0.00015406408174555976,
"loss": 0.7549,
"step": 689
},
{
"epoch": 1.5462184873949578,
"grad_norm": 0.3457712233066559,
"learning_rate": 0.00015389941577575753,
"loss": 0.7139,
"step": 690
},
{
"epoch": 1.5484593837535015,
"grad_norm": 0.3504430949687958,
"learning_rate": 0.00015373454355660802,
"loss": 0.788,
"step": 691
},
{
"epoch": 1.5507002801120449,
"grad_norm": 0.3265605866909027,
"learning_rate": 0.00015356946571900464,
"loss": 0.7077,
"step": 692
},
{
"epoch": 1.5529411764705883,
"grad_norm": 0.3154314458370209,
"learning_rate": 0.00015340418289462764,
"loss": 0.7,
"step": 693
},
{
"epoch": 1.5551820728291317,
"grad_norm": 0.3218802511692047,
"learning_rate": 0.00015323869571594166,
"loss": 0.7735,
"step": 694
},
{
"epoch": 1.557422969187675,
"grad_norm": 0.31704089045524597,
"learning_rate": 0.00015307300481619333,
"loss": 0.7946,
"step": 695
},
{
"epoch": 1.5596638655462185,
"grad_norm": 0.3201853632926941,
"learning_rate": 0.0001529071108294088,
"loss": 0.7154,
"step": 696
},
{
"epoch": 1.561904761904762,
"grad_norm": 0.3154377341270447,
"learning_rate": 0.00015274101439039138,
"loss": 0.6905,
"step": 697
},
{
"epoch": 1.5641456582633053,
"grad_norm": 0.30675947666168213,
"learning_rate": 0.00015257471613471906,
"loss": 0.6897,
"step": 698
},
{
"epoch": 1.5663865546218487,
"grad_norm": 0.30410274863243103,
"learning_rate": 0.00015240821669874202,
"loss": 0.7181,
"step": 699
},
{
"epoch": 1.5686274509803921,
"grad_norm": 0.3099222779273987,
"learning_rate": 0.00015224151671958043,
"loss": 0.732,
"step": 700
},
{
"epoch": 1.5708683473389355,
"grad_norm": 0.32526805996894836,
"learning_rate": 0.00015207461683512175,
"loss": 0.7442,
"step": 701
},
{
"epoch": 1.573109243697479,
"grad_norm": 0.3597675859928131,
"learning_rate": 0.00015190751768401833,
"loss": 0.7059,
"step": 702
},
{
"epoch": 1.5753501400560224,
"grad_norm": 0.34002843499183655,
"learning_rate": 0.00015174021990568517,
"loss": 0.7309,
"step": 703
},
{
"epoch": 1.5775910364145658,
"grad_norm": 0.3448459804058075,
"learning_rate": 0.0001515727241402972,
"loss": 0.7543,
"step": 704
},
{
"epoch": 1.5798319327731094,
"grad_norm": 0.3388139605522156,
"learning_rate": 0.000151405031028787,
"loss": 0.6604,
"step": 705
},
{
"epoch": 1.5820728291316528,
"grad_norm": 0.33875682950019836,
"learning_rate": 0.0001512371412128424,
"loss": 0.7234,
"step": 706
},
{
"epoch": 1.5843137254901962,
"grad_norm": 0.3654973804950714,
"learning_rate": 0.00015106905533490372,
"loss": 0.8452,
"step": 707
},
{
"epoch": 1.5865546218487396,
"grad_norm": 0.31986573338508606,
"learning_rate": 0.00015090077403816178,
"loss": 0.6683,
"step": 708
},
{
"epoch": 1.588795518207283,
"grad_norm": 0.29579469561576843,
"learning_rate": 0.00015073229796655504,
"loss": 0.723,
"step": 709
},
{
"epoch": 1.5910364145658265,
"grad_norm": 0.31319934129714966,
"learning_rate": 0.0001505636277647672,
"loss": 0.7384,
"step": 710
},
{
"epoch": 1.5932773109243699,
"grad_norm": 0.30319178104400635,
"learning_rate": 0.00015039476407822502,
"loss": 0.7044,
"step": 711
},
{
"epoch": 1.5955182072829133,
"grad_norm": 0.3266353905200958,
"learning_rate": 0.0001502257075530954,
"loss": 0.7777,
"step": 712
},
{
"epoch": 1.5977591036414567,
"grad_norm": 0.3113664388656616,
"learning_rate": 0.00015005645883628342,
"loss": 0.7394,
"step": 713
},
{
"epoch": 1.6,
"grad_norm": 0.29923635721206665,
"learning_rate": 0.00014988701857542933,
"loss": 0.6867,
"step": 714
},
{
"epoch": 1.6022408963585435,
"grad_norm": 0.32305604219436646,
"learning_rate": 0.00014971738741890647,
"loss": 0.7512,
"step": 715
},
{
"epoch": 1.604481792717087,
"grad_norm": 0.30608245730400085,
"learning_rate": 0.0001495475660158187,
"loss": 0.7076,
"step": 716
},
{
"epoch": 1.6067226890756303,
"grad_norm": 0.3273196220397949,
"learning_rate": 0.00014937755501599772,
"loss": 0.7259,
"step": 717
},
{
"epoch": 1.6089635854341737,
"grad_norm": 0.3238683342933655,
"learning_rate": 0.0001492073550700009,
"loss": 0.716,
"step": 718
},
{
"epoch": 1.6112044817927171,
"grad_norm": 0.3258748948574066,
"learning_rate": 0.00014903696682910846,
"loss": 0.739,
"step": 719
},
{
"epoch": 1.6134453781512605,
"grad_norm": 0.32485276460647583,
"learning_rate": 0.00014886639094532128,
"loss": 0.681,
"step": 720
},
{
"epoch": 1.615686274509804,
"grad_norm": 0.3358374536037445,
"learning_rate": 0.0001486956280713582,
"loss": 0.7625,
"step": 721
},
{
"epoch": 1.6179271708683474,
"grad_norm": 0.30938801169395447,
"learning_rate": 0.00014852467886065357,
"loss": 0.6942,
"step": 722
},
{
"epoch": 1.6201680672268908,
"grad_norm": 0.35836121439933777,
"learning_rate": 0.00014835354396735482,
"loss": 0.7397,
"step": 723
},
{
"epoch": 1.6224089635854342,
"grad_norm": 0.3162292540073395,
"learning_rate": 0.00014818222404631992,
"loss": 0.7071,
"step": 724
},
{
"epoch": 1.6246498599439776,
"grad_norm": 0.32497039437294006,
"learning_rate": 0.0001480107197531148,
"loss": 0.7396,
"step": 725
},
{
"epoch": 1.626890756302521,
"grad_norm": 0.33408358693122864,
"learning_rate": 0.00014783903174401085,
"loss": 0.7593,
"step": 726
},
{
"epoch": 1.6291316526610644,
"grad_norm": 0.3246319591999054,
"learning_rate": 0.00014766716067598262,
"loss": 0.7302,
"step": 727
},
{
"epoch": 1.6313725490196078,
"grad_norm": 0.32565364241600037,
"learning_rate": 0.00014749510720670506,
"loss": 0.8113,
"step": 728
},
{
"epoch": 1.6336134453781512,
"grad_norm": 0.32790714502334595,
"learning_rate": 0.00014732287199455103,
"loss": 0.7884,
"step": 729
},
{
"epoch": 1.6358543417366946,
"grad_norm": 0.3148501217365265,
"learning_rate": 0.00014715045569858894,
"loss": 0.7214,
"step": 730
},
{
"epoch": 1.638095238095238,
"grad_norm": 0.30682241916656494,
"learning_rate": 0.00014697785897858012,
"loss": 0.725,
"step": 731
},
{
"epoch": 1.6403361344537815,
"grad_norm": 0.3391934037208557,
"learning_rate": 0.00014680508249497622,
"loss": 0.84,
"step": 732
},
{
"epoch": 1.6425770308123249,
"grad_norm": 0.31384411454200745,
"learning_rate": 0.0001466321269089168,
"loss": 0.6867,
"step": 733
},
{
"epoch": 1.6448179271708683,
"grad_norm": 0.33052271604537964,
"learning_rate": 0.00014645899288222687,
"loss": 0.7761,
"step": 734
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.3304360806941986,
"learning_rate": 0.00014628568107741407,
"loss": 0.8817,
"step": 735
},
{
"epoch": 1.649299719887955,
"grad_norm": 0.34404993057250977,
"learning_rate": 0.0001461121921576665,
"loss": 0.8155,
"step": 736
},
{
"epoch": 1.6515406162464985,
"grad_norm": 0.33394402265548706,
"learning_rate": 0.00014593852678684984,
"loss": 0.7656,
"step": 737
},
{
"epoch": 1.653781512605042,
"grad_norm": 0.35093650221824646,
"learning_rate": 0.0001457646856295051,
"loss": 0.8171,
"step": 738
},
{
"epoch": 1.6560224089635853,
"grad_norm": 0.31678837537765503,
"learning_rate": 0.00014559066935084588,
"loss": 0.7061,
"step": 739
},
{
"epoch": 1.6582633053221287,
"grad_norm": 0.3170653283596039,
"learning_rate": 0.00014541647861675592,
"loss": 0.7351,
"step": 740
},
{
"epoch": 1.6605042016806721,
"grad_norm": 0.34166768193244934,
"learning_rate": 0.0001452421140937865,
"loss": 0.7937,
"step": 741
},
{
"epoch": 1.6627450980392156,
"grad_norm": 0.3269054591655731,
"learning_rate": 0.00014506757644915393,
"loss": 0.8038,
"step": 742
},
{
"epoch": 1.664985994397759,
"grad_norm": 0.3237577974796295,
"learning_rate": 0.00014489286635073693,
"loss": 0.657,
"step": 743
},
{
"epoch": 1.6672268907563024,
"grad_norm": 0.3239234685897827,
"learning_rate": 0.00014471798446707426,
"loss": 0.6732,
"step": 744
},
{
"epoch": 1.6694677871148458,
"grad_norm": 0.30346667766571045,
"learning_rate": 0.00014454293146736187,
"loss": 0.669,
"step": 745
},
{
"epoch": 1.6717086834733892,
"grad_norm": 0.32579341530799866,
"learning_rate": 0.00014436770802145059,
"loss": 0.6693,
"step": 746
},
{
"epoch": 1.6739495798319328,
"grad_norm": 0.34438079595565796,
"learning_rate": 0.0001441923147998434,
"loss": 0.8594,
"step": 747
},
{
"epoch": 1.6761904761904762,
"grad_norm": 0.3287278115749359,
"learning_rate": 0.00014401675247369307,
"loss": 0.7633,
"step": 748
},
{
"epoch": 1.6784313725490196,
"grad_norm": 0.34410589933395386,
"learning_rate": 0.0001438410217147993,
"loss": 0.7815,
"step": 749
},
{
"epoch": 1.680672268907563,
"grad_norm": 0.4113364815711975,
"learning_rate": 0.0001436651231956064,
"loss": 0.7617,
"step": 750
},
{
"epoch": 1.6829131652661065,
"grad_norm": 0.3100757598876953,
"learning_rate": 0.0001434890575892006,
"loss": 0.673,
"step": 751
},
{
"epoch": 1.6851540616246499,
"grad_norm": 0.32215428352355957,
"learning_rate": 0.0001433128255693075,
"loss": 0.7694,
"step": 752
},
{
"epoch": 1.6873949579831933,
"grad_norm": 0.3526766002178192,
"learning_rate": 0.00014313642781028953,
"loss": 0.7397,
"step": 753
},
{
"epoch": 1.6896358543417367,
"grad_norm": 0.3373475968837738,
"learning_rate": 0.00014295986498714326,
"loss": 0.8275,
"step": 754
},
{
"epoch": 1.69187675070028,
"grad_norm": 0.32114461064338684,
"learning_rate": 0.0001427831377754969,
"loss": 0.7225,
"step": 755
},
{
"epoch": 1.6941176470588235,
"grad_norm": 0.320982426404953,
"learning_rate": 0.00014260624685160777,
"loss": 0.7966,
"step": 756
},
{
"epoch": 1.696358543417367,
"grad_norm": 0.28297820687294006,
"learning_rate": 0.0001424291928923596,
"loss": 0.6789,
"step": 757
},
{
"epoch": 1.6985994397759103,
"grad_norm": 0.31439271569252014,
"learning_rate": 0.00014225197657525995,
"loss": 0.7194,
"step": 758
},
{
"epoch": 1.7008403361344537,
"grad_norm": 0.33543679118156433,
"learning_rate": 0.0001420745985784377,
"loss": 0.8552,
"step": 759
},
{
"epoch": 1.7030812324929971,
"grad_norm": 0.33300474286079407,
"learning_rate": 0.0001418970595806404,
"loss": 0.867,
"step": 760
},
{
"epoch": 1.7053221288515408,
"grad_norm": 0.3134838938713074,
"learning_rate": 0.00014171936026123168,
"loss": 0.6651,
"step": 761
},
{
"epoch": 1.7075630252100842,
"grad_norm": 0.32312485575675964,
"learning_rate": 0.00014154150130018866,
"loss": 0.7978,
"step": 762
},
{
"epoch": 1.7098039215686276,
"grad_norm": 0.3270231783390045,
"learning_rate": 0.00014136348337809927,
"loss": 0.7574,
"step": 763
},
{
"epoch": 1.712044817927171,
"grad_norm": 0.32470935583114624,
"learning_rate": 0.0001411853071761598,
"loss": 0.6881,
"step": 764
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.3110044300556183,
"learning_rate": 0.00014100697337617217,
"loss": 0.6684,
"step": 765
},
{
"epoch": 1.7165266106442578,
"grad_norm": 0.344539076089859,
"learning_rate": 0.00014082848266054135,
"loss": 0.7387,
"step": 766
},
{
"epoch": 1.7187675070028012,
"grad_norm": 0.3174740672111511,
"learning_rate": 0.0001406498357122728,
"loss": 0.6459,
"step": 767
},
{
"epoch": 1.7210084033613446,
"grad_norm": 0.339348167181015,
"learning_rate": 0.00014047103321496976,
"loss": 0.658,
"step": 768
},
{
"epoch": 1.723249299719888,
"grad_norm": 0.3239974081516266,
"learning_rate": 0.0001402920758528307,
"loss": 0.6356,
"step": 769
},
{
"epoch": 1.7254901960784315,
"grad_norm": 0.33558371663093567,
"learning_rate": 0.00014011296431064676,
"loss": 0.7531,
"step": 770
},
{
"epoch": 1.7277310924369749,
"grad_norm": 0.35468053817749023,
"learning_rate": 0.0001399336992737989,
"loss": 0.783,
"step": 771
},
{
"epoch": 1.7299719887955183,
"grad_norm": 0.34820565581321716,
"learning_rate": 0.0001397542814282556,
"loss": 0.7227,
"step": 772
},
{
"epoch": 1.7322128851540617,
"grad_norm": 0.3655050992965698,
"learning_rate": 0.00013957471146056998,
"loss": 0.7366,
"step": 773
},
{
"epoch": 1.734453781512605,
"grad_norm": 0.3187251389026642,
"learning_rate": 0.00013939499005787736,
"loss": 0.7304,
"step": 774
},
{
"epoch": 1.7366946778711485,
"grad_norm": 0.3165695071220398,
"learning_rate": 0.00013921511790789234,
"loss": 0.7757,
"step": 775
},
{
"epoch": 1.738935574229692,
"grad_norm": 0.3043893873691559,
"learning_rate": 0.00013903509569890662,
"loss": 0.7689,
"step": 776
},
{
"epoch": 1.7411764705882353,
"grad_norm": 0.30263444781303406,
"learning_rate": 0.0001388549241197859,
"loss": 0.6638,
"step": 777
},
{
"epoch": 1.7434173669467787,
"grad_norm": 0.3344278931617737,
"learning_rate": 0.00013867460385996754,
"loss": 0.7027,
"step": 778
},
{
"epoch": 1.7456582633053221,
"grad_norm": 0.309756338596344,
"learning_rate": 0.00013849413560945787,
"loss": 0.7662,
"step": 779
},
{
"epoch": 1.7478991596638656,
"grad_norm": 0.33109283447265625,
"learning_rate": 0.00013831352005882946,
"loss": 0.7888,
"step": 780
},
{
"epoch": 1.750140056022409,
"grad_norm": 0.32347214221954346,
"learning_rate": 0.00013813275789921855,
"loss": 0.7313,
"step": 781
},
{
"epoch": 1.7523809523809524,
"grad_norm": 0.3274037539958954,
"learning_rate": 0.00013795184982232233,
"loss": 0.779,
"step": 782
},
{
"epoch": 1.7546218487394958,
"grad_norm": 0.3241097927093506,
"learning_rate": 0.0001377707965203965,
"loss": 0.744,
"step": 783
},
{
"epoch": 1.7568627450980392,
"grad_norm": 0.32738566398620605,
"learning_rate": 0.00013758959868625232,
"loss": 0.7585,
"step": 784
},
{
"epoch": 1.7591036414565826,
"grad_norm": 0.3516925275325775,
"learning_rate": 0.00013740825701325418,
"loss": 0.7445,
"step": 785
},
{
"epoch": 1.761344537815126,
"grad_norm": 0.33777570724487305,
"learning_rate": 0.00013722677219531683,
"loss": 0.7038,
"step": 786
},
{
"epoch": 1.7635854341736694,
"grad_norm": 0.35104092955589294,
"learning_rate": 0.0001370451449269029,
"loss": 0.7527,
"step": 787
},
{
"epoch": 1.7658263305322128,
"grad_norm": 0.33028444647789,
"learning_rate": 0.00013686337590301995,
"loss": 0.7227,
"step": 788
},
{
"epoch": 1.7680672268907562,
"grad_norm": 0.3553149104118347,
"learning_rate": 0.0001366814658192181,
"loss": 0.6937,
"step": 789
},
{
"epoch": 1.7703081232492996,
"grad_norm": 0.32514214515686035,
"learning_rate": 0.0001364994153715872,
"loss": 0.7198,
"step": 790
},
{
"epoch": 1.772549019607843,
"grad_norm": 0.3584795892238617,
"learning_rate": 0.00013631722525675412,
"loss": 0.8049,
"step": 791
},
{
"epoch": 1.7747899159663865,
"grad_norm": 0.3179537057876587,
"learning_rate": 0.0001361348961718804,
"loss": 0.7795,
"step": 792
},
{
"epoch": 1.7770308123249299,
"grad_norm": 0.2919451594352722,
"learning_rate": 0.0001359524288146591,
"loss": 0.6553,
"step": 793
},
{
"epoch": 1.7792717086834733,
"grad_norm": 0.31730496883392334,
"learning_rate": 0.0001357698238833126,
"loss": 0.812,
"step": 794
},
{
"epoch": 1.7815126050420167,
"grad_norm": 0.31148526072502136,
"learning_rate": 0.00013558708207658948,
"loss": 0.6558,
"step": 795
},
{
"epoch": 1.78375350140056,
"grad_norm": 0.3431221544742584,
"learning_rate": 0.00013540420409376236,
"loss": 0.766,
"step": 796
},
{
"epoch": 1.7859943977591035,
"grad_norm": 0.32656344771385193,
"learning_rate": 0.00013522119063462482,
"loss": 0.744,
"step": 797
},
{
"epoch": 1.788235294117647,
"grad_norm": 0.3525942862033844,
"learning_rate": 0.00013503804239948874,
"loss": 0.7954,
"step": 798
},
{
"epoch": 1.7904761904761903,
"grad_norm": 0.3282519280910492,
"learning_rate": 0.00013485476008918184,
"loss": 0.72,
"step": 799
},
{
"epoch": 1.7927170868347337,
"grad_norm": 0.3348991870880127,
"learning_rate": 0.00013467134440504495,
"loss": 0.6925,
"step": 800
},
{
"epoch": 1.7949579831932772,
"grad_norm": 0.325764000415802,
"learning_rate": 0.00013448779604892917,
"loss": 0.7261,
"step": 801
},
{
"epoch": 1.7971988795518208,
"grad_norm": 0.33784714341163635,
"learning_rate": 0.00013430411572319323,
"loss": 0.7637,
"step": 802
},
{
"epoch": 1.7994397759103642,
"grad_norm": 0.31615525484085083,
"learning_rate": 0.00013412030413070095,
"loss": 0.649,
"step": 803
},
{
"epoch": 1.8016806722689076,
"grad_norm": 0.30358991026878357,
"learning_rate": 0.00013393636197481842,
"loss": 0.6566,
"step": 804
},
{
"epoch": 1.803921568627451,
"grad_norm": 0.3021032512187958,
"learning_rate": 0.00013375228995941133,
"loss": 0.7694,
"step": 805
},
{
"epoch": 1.8061624649859944,
"grad_norm": 0.3464861810207367,
"learning_rate": 0.00013356808878884228,
"loss": 0.7604,
"step": 806
},
{
"epoch": 1.8084033613445378,
"grad_norm": 0.3448963165283203,
"learning_rate": 0.00013338375916796812,
"loss": 0.7434,
"step": 807
},
{
"epoch": 1.8106442577030812,
"grad_norm": 0.3179613947868347,
"learning_rate": 0.00013319930180213712,
"loss": 0.6885,
"step": 808
},
{
"epoch": 1.8128851540616246,
"grad_norm": 0.31533604860305786,
"learning_rate": 0.0001330147173971866,
"loss": 0.6736,
"step": 809
},
{
"epoch": 1.815126050420168,
"grad_norm": 0.3316449224948883,
"learning_rate": 0.00013283000665943972,
"loss": 0.7354,
"step": 810
},
{
"epoch": 1.8173669467787115,
"grad_norm": 0.3497374951839447,
"learning_rate": 0.00013264517029570324,
"loss": 0.7956,
"step": 811
},
{
"epoch": 1.8196078431372549,
"grad_norm": 0.33198150992393494,
"learning_rate": 0.00013246020901326464,
"loss": 0.7105,
"step": 812
},
{
"epoch": 1.8218487394957983,
"grad_norm": 0.3188040554523468,
"learning_rate": 0.00013227512351988925,
"loss": 0.7141,
"step": 813
},
{
"epoch": 1.8240896358543417,
"grad_norm": 0.30372536182403564,
"learning_rate": 0.00013208991452381798,
"loss": 0.7223,
"step": 814
},
{
"epoch": 1.826330532212885,
"grad_norm": 0.6289974451065063,
"learning_rate": 0.00013190458273376404,
"loss": 0.7972,
"step": 815
},
{
"epoch": 1.8285714285714287,
"grad_norm": 0.3201170563697815,
"learning_rate": 0.00013171912885891063,
"loss": 0.7433,
"step": 816
},
{
"epoch": 1.8308123249299721,
"grad_norm": 0.35013505816459656,
"learning_rate": 0.00013153355360890815,
"loss": 0.7361,
"step": 817
},
{
"epoch": 1.8330532212885156,
"grad_norm": 0.34328508377075195,
"learning_rate": 0.00013134785769387147,
"loss": 0.7031,
"step": 818
},
{
"epoch": 1.835294117647059,
"grad_norm": 0.3286396265029907,
"learning_rate": 0.0001311620418243771,
"loss": 0.7657,
"step": 819
},
{
"epoch": 1.8375350140056024,
"grad_norm": 0.3106536567211151,
"learning_rate": 0.00013097610671146065,
"loss": 0.7554,
"step": 820
},
{
"epoch": 1.8397759103641458,
"grad_norm": 0.3301306366920471,
"learning_rate": 0.0001307900530666139,
"loss": 0.7126,
"step": 821
},
{
"epoch": 1.8420168067226892,
"grad_norm": 0.34890398383140564,
"learning_rate": 0.00013060388160178235,
"loss": 0.7953,
"step": 822
},
{
"epoch": 1.8442577030812326,
"grad_norm": 0.31672972440719604,
"learning_rate": 0.0001304175930293623,
"loss": 0.7359,
"step": 823
},
{
"epoch": 1.846498599439776,
"grad_norm": 0.3178110122680664,
"learning_rate": 0.0001302311880621981,
"loss": 0.7662,
"step": 824
},
{
"epoch": 1.8487394957983194,
"grad_norm": 0.35035309195518494,
"learning_rate": 0.0001300446674135795,
"loss": 0.7105,
"step": 825
},
{
"epoch": 1.8509803921568628,
"grad_norm": 0.3116212785243988,
"learning_rate": 0.00012985803179723903,
"loss": 0.7424,
"step": 826
},
{
"epoch": 1.8532212885154062,
"grad_norm": 0.34882014989852905,
"learning_rate": 0.00012967128192734902,
"loss": 0.7574,
"step": 827
},
{
"epoch": 1.8554621848739496,
"grad_norm": 0.31585201621055603,
"learning_rate": 0.0001294844185185191,
"loss": 0.805,
"step": 828
},
{
"epoch": 1.857703081232493,
"grad_norm": 0.314979612827301,
"learning_rate": 0.00012929744228579323,
"loss": 0.5924,
"step": 829
},
{
"epoch": 1.8599439775910365,
"grad_norm": 0.3267279863357544,
"learning_rate": 0.00012911035394464723,
"loss": 0.763,
"step": 830
},
{
"epoch": 1.8621848739495799,
"grad_norm": 0.3523454964160919,
"learning_rate": 0.00012892315421098586,
"loss": 0.8221,
"step": 831
},
{
"epoch": 1.8644257703081233,
"grad_norm": 0.31915390491485596,
"learning_rate": 0.00012873584380114012,
"loss": 0.8081,
"step": 832
},
{
"epoch": 1.8666666666666667,
"grad_norm": 0.32959550619125366,
"learning_rate": 0.00012854842343186455,
"loss": 0.7208,
"step": 833
},
{
"epoch": 1.86890756302521,
"grad_norm": 0.32218843698501587,
"learning_rate": 0.0001283608938203344,
"loss": 0.7514,
"step": 834
},
{
"epoch": 1.8711484593837535,
"grad_norm": 0.3358590602874756,
"learning_rate": 0.00012817325568414297,
"loss": 0.7401,
"step": 835
},
{
"epoch": 1.873389355742297,
"grad_norm": 0.3331390917301178,
"learning_rate": 0.00012798550974129887,
"loss": 0.6833,
"step": 836
},
{
"epoch": 1.8756302521008403,
"grad_norm": 0.33320388197898865,
"learning_rate": 0.00012779765671022325,
"loss": 0.6287,
"step": 837
},
{
"epoch": 1.8778711484593837,
"grad_norm": 0.3285226821899414,
"learning_rate": 0.00012760969730974694,
"loss": 0.6698,
"step": 838
},
{
"epoch": 1.8801120448179272,
"grad_norm": 0.3503000736236572,
"learning_rate": 0.0001274216322591078,
"loss": 0.6997,
"step": 839
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.35233861207962036,
"learning_rate": 0.00012723346227794816,
"loss": 0.7241,
"step": 840
},
{
"epoch": 1.884593837535014,
"grad_norm": 0.3368009924888611,
"learning_rate": 0.00012704518808631166,
"loss": 0.7379,
"step": 841
},
{
"epoch": 1.8868347338935574,
"grad_norm": 0.3214666247367859,
"learning_rate": 0.0001268568104046408,
"loss": 0.6946,
"step": 842
},
{
"epoch": 1.8890756302521008,
"grad_norm": 0.3163416385650635,
"learning_rate": 0.0001266683299537741,
"loss": 0.7044,
"step": 843
},
{
"epoch": 1.8913165266106442,
"grad_norm": 0.3129757046699524,
"learning_rate": 0.0001264797474549433,
"loss": 0.6105,
"step": 844
},
{
"epoch": 1.8935574229691876,
"grad_norm": 0.35761409997940063,
"learning_rate": 0.00012629106362977064,
"loss": 0.8279,
"step": 845
},
{
"epoch": 1.895798319327731,
"grad_norm": 0.3365795314311981,
"learning_rate": 0.00012610227920026608,
"loss": 0.6859,
"step": 846
},
{
"epoch": 1.8980392156862744,
"grad_norm": 0.3187201917171478,
"learning_rate": 0.00012591339488882456,
"loss": 0.6907,
"step": 847
},
{
"epoch": 1.9002801120448178,
"grad_norm": 0.31638118624687195,
"learning_rate": 0.0001257244114182232,
"loss": 0.7243,
"step": 848
},
{
"epoch": 1.9025210084033612,
"grad_norm": 0.34840863943099976,
"learning_rate": 0.0001255353295116187,
"loss": 0.687,
"step": 849
},
{
"epoch": 1.9047619047619047,
"grad_norm": 0.3138674199581146,
"learning_rate": 0.00012534614989254423,
"loss": 0.7198,
"step": 850
},
{
"epoch": 1.907002801120448,
"grad_norm": 0.362076997756958,
"learning_rate": 0.00012515687328490695,
"loss": 0.7757,
"step": 851
},
{
"epoch": 1.9092436974789915,
"grad_norm": 0.32191118597984314,
"learning_rate": 0.00012496750041298516,
"loss": 0.7103,
"step": 852
},
{
"epoch": 1.9114845938375349,
"grad_norm": 0.3364141583442688,
"learning_rate": 0.0001247780320014255,
"loss": 0.7509,
"step": 853
},
{
"epoch": 1.9137254901960783,
"grad_norm": 0.35079094767570496,
"learning_rate": 0.00012458846877524024,
"loss": 0.6844,
"step": 854
},
{
"epoch": 1.9159663865546217,
"grad_norm": 0.33669090270996094,
"learning_rate": 0.00012439881145980444,
"loss": 0.6801,
"step": 855
},
{
"epoch": 1.9182072829131651,
"grad_norm": 0.31381383538246155,
"learning_rate": 0.00012420906078085316,
"loss": 0.6341,
"step": 856
},
{
"epoch": 1.9204481792717085,
"grad_norm": 0.3355211615562439,
"learning_rate": 0.00012401921746447872,
"loss": 0.7244,
"step": 857
},
{
"epoch": 1.9226890756302522,
"grad_norm": 0.3467940092086792,
"learning_rate": 0.00012382928223712806,
"loss": 0.7195,
"step": 858
},
{
"epoch": 1.9249299719887956,
"grad_norm": 0.33815139532089233,
"learning_rate": 0.00012363925582559968,
"loss": 0.704,
"step": 859
},
{
"epoch": 1.927170868347339,
"grad_norm": 0.315331369638443,
"learning_rate": 0.00012344913895704097,
"loss": 0.6618,
"step": 860
},
{
"epoch": 1.9294117647058824,
"grad_norm": 0.3420298099517822,
"learning_rate": 0.00012325893235894564,
"loss": 0.7005,
"step": 861
},
{
"epoch": 1.9316526610644258,
"grad_norm": 0.33387431502342224,
"learning_rate": 0.00012306863675915056,
"loss": 0.8085,
"step": 862
},
{
"epoch": 1.9338935574229692,
"grad_norm": 0.35296881198883057,
"learning_rate": 0.0001228782528858333,
"loss": 0.7798,
"step": 863
},
{
"epoch": 1.9361344537815126,
"grad_norm": 0.3383727967739105,
"learning_rate": 0.00012268778146750915,
"loss": 0.6835,
"step": 864
},
{
"epoch": 1.938375350140056,
"grad_norm": 0.33279767632484436,
"learning_rate": 0.00012249722323302842,
"loss": 0.7388,
"step": 865
},
{
"epoch": 1.9406162464985994,
"grad_norm": 0.3255688548088074,
"learning_rate": 0.00012230657891157363,
"loss": 0.7167,
"step": 866
},
{
"epoch": 1.9428571428571428,
"grad_norm": 0.34093961119651794,
"learning_rate": 0.00012211584923265672,
"loss": 0.7139,
"step": 867
},
{
"epoch": 1.9450980392156862,
"grad_norm": 0.3231501281261444,
"learning_rate": 0.00012192503492611625,
"loss": 0.6996,
"step": 868
},
{
"epoch": 1.9473389355742297,
"grad_norm": 0.3145335614681244,
"learning_rate": 0.00012173413672211458,
"loss": 0.7754,
"step": 869
},
{
"epoch": 1.949579831932773,
"grad_norm": 0.32647013664245605,
"learning_rate": 0.00012154315535113511,
"loss": 0.6923,
"step": 870
},
{
"epoch": 1.9518207282913165,
"grad_norm": 0.3004699647426605,
"learning_rate": 0.00012135209154397962,
"loss": 0.6856,
"step": 871
},
{
"epoch": 1.95406162464986,
"grad_norm": 0.33689776062965393,
"learning_rate": 0.00012116094603176513,
"loss": 0.7621,
"step": 872
},
{
"epoch": 1.9563025210084035,
"grad_norm": 0.30934080481529236,
"learning_rate": 0.00012096971954592145,
"loss": 0.7126,
"step": 873
},
{
"epoch": 1.958543417366947,
"grad_norm": 0.346757173538208,
"learning_rate": 0.00012077841281818816,
"loss": 0.708,
"step": 874
},
{
"epoch": 1.9607843137254903,
"grad_norm": 0.3257780075073242,
"learning_rate": 0.00012058702658061197,
"loss": 0.6621,
"step": 875
},
{
"epoch": 1.9630252100840337,
"grad_norm": 0.3402419090270996,
"learning_rate": 0.0001203955615655438,
"loss": 0.7003,
"step": 876
},
{
"epoch": 1.9652661064425772,
"grad_norm": 0.3437844514846802,
"learning_rate": 0.00012020401850563596,
"loss": 0.7469,
"step": 877
},
{
"epoch": 1.9675070028011206,
"grad_norm": 0.3671571612358093,
"learning_rate": 0.00012001239813383951,
"loss": 0.785,
"step": 878
},
{
"epoch": 1.969747899159664,
"grad_norm": 0.32898640632629395,
"learning_rate": 0.00011982070118340127,
"loss": 0.7292,
"step": 879
},
{
"epoch": 1.9719887955182074,
"grad_norm": 0.3525889217853546,
"learning_rate": 0.00011962892838786115,
"loss": 0.7226,
"step": 880
},
{
"epoch": 1.9742296918767508,
"grad_norm": 0.33842483162879944,
"learning_rate": 0.00011943708048104922,
"loss": 0.6483,
"step": 881
},
{
"epoch": 1.9764705882352942,
"grad_norm": 0.3088597357273102,
"learning_rate": 0.000119245158197083,
"loss": 0.6449,
"step": 882
},
{
"epoch": 1.9787114845938376,
"grad_norm": 0.32097896933555603,
"learning_rate": 0.00011905316227036465,
"loss": 0.7823,
"step": 883
},
{
"epoch": 1.980952380952381,
"grad_norm": 0.316945880651474,
"learning_rate": 0.00011886109343557808,
"loss": 0.6802,
"step": 884
},
{
"epoch": 1.9831932773109244,
"grad_norm": 0.3052690327167511,
"learning_rate": 0.00011866895242768621,
"loss": 0.6601,
"step": 885
},
{
"epoch": 1.9854341736694678,
"grad_norm": 0.3162146210670471,
"learning_rate": 0.00011847673998192815,
"loss": 0.7377,
"step": 886
},
{
"epoch": 1.9876750700280112,
"grad_norm": 0.3386717736721039,
"learning_rate": 0.00011828445683381628,
"loss": 0.7741,
"step": 887
},
{
"epoch": 1.9899159663865547,
"grad_norm": 0.36137786507606506,
"learning_rate": 0.00011809210371913368,
"loss": 0.765,
"step": 888
},
{
"epoch": 1.992156862745098,
"grad_norm": 0.37440425157546997,
"learning_rate": 0.00011789968137393108,
"loss": 0.7596,
"step": 889
},
{
"epoch": 1.9943977591036415,
"grad_norm": 0.36767804622650146,
"learning_rate": 0.00011770719053452407,
"loss": 0.761,
"step": 890
},
{
"epoch": 1.9966386554621849,
"grad_norm": 0.39699792861938477,
"learning_rate": 0.00011751463193749044,
"loss": 0.7781,
"step": 891
},
{
"epoch": 1.9988795518207283,
"grad_norm": 0.35793137550354004,
"learning_rate": 0.00011732200631966716,
"loss": 0.7117,
"step": 892
}
],
"logging_steps": 1,
"max_steps": 1784,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 446,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.621244232105984e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}