GS-Reasoner / trainer_state.json
ymccccc's picture
Upload folder using huggingface_hub
cbac23f verified
{
"best_metric": 0.16857390105724335,
"best_model_checkpoint": "./ckpt/llavanext-qwen-video3dllm-uniform-autoreg-vg-dataagugallF++-frame32-epoch1-alldata-finetune-bs256/checkpoint-1200",
"epoch": 0.999000199960008,
"eval_steps": 50,
"global_step": 1249,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 23.044811248779297,
"learning_rate": 2.6315789473684213e-07,
"loss": 2.8862,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 31.060749053955078,
"learning_rate": 5.263157894736843e-07,
"loss": 2.2014,
"step": 2
},
{
"epoch": 0.0,
"grad_norm": 30.554189682006836,
"learning_rate": 7.894736842105263e-07,
"loss": 2.2537,
"step": 3
},
{
"epoch": 0.0,
"grad_norm": 24.01386070251465,
"learning_rate": 1.0526315789473685e-06,
"loss": 2.9292,
"step": 4
},
{
"epoch": 0.0,
"grad_norm": 31.66695785522461,
"learning_rate": 1.3157894736842106e-06,
"loss": 2.2537,
"step": 5
},
{
"epoch": 0.0,
"grad_norm": 32.51780700683594,
"learning_rate": 1.5789473684210526e-06,
"loss": 2.1596,
"step": 6
},
{
"epoch": 0.01,
"grad_norm": 21.205398559570312,
"learning_rate": 1.8421052631578948e-06,
"loss": 2.7517,
"step": 7
},
{
"epoch": 0.01,
"grad_norm": 22.60314178466797,
"learning_rate": 2.105263157894737e-06,
"loss": 2.8438,
"step": 8
},
{
"epoch": 0.01,
"grad_norm": 20.44194984436035,
"learning_rate": 2.368421052631579e-06,
"loss": 2.8111,
"step": 9
},
{
"epoch": 0.01,
"grad_norm": 17.109813690185547,
"learning_rate": 2.631578947368421e-06,
"loss": 1.6737,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 13.585737228393555,
"learning_rate": 2.8947368421052634e-06,
"loss": 2.279,
"step": 11
},
{
"epoch": 0.01,
"grad_norm": 12.35000228881836,
"learning_rate": 3.157894736842105e-06,
"loss": 2.2015,
"step": 12
},
{
"epoch": 0.01,
"grad_norm": 8.916664123535156,
"learning_rate": 3.421052631578948e-06,
"loss": 1.8539,
"step": 13
},
{
"epoch": 0.01,
"grad_norm": 17.41905975341797,
"learning_rate": 3.6842105263157896e-06,
"loss": 1.418,
"step": 14
},
{
"epoch": 0.01,
"grad_norm": 5.954832553863525,
"learning_rate": 3.947368421052632e-06,
"loss": 1.5505,
"step": 15
},
{
"epoch": 0.01,
"grad_norm": 25.4913272857666,
"learning_rate": 4.210526315789474e-06,
"loss": 3.2727,
"step": 16
},
{
"epoch": 0.01,
"grad_norm": 8.982560157775879,
"learning_rate": 4.473684210526316e-06,
"loss": 1.1862,
"step": 17
},
{
"epoch": 0.01,
"grad_norm": 8.32353687286377,
"learning_rate": 4.736842105263158e-06,
"loss": 0.9529,
"step": 18
},
{
"epoch": 0.02,
"grad_norm": 4.653748989105225,
"learning_rate": 5e-06,
"loss": 1.0857,
"step": 19
},
{
"epoch": 0.02,
"grad_norm": 4.2638726234436035,
"learning_rate": 5.263157894736842e-06,
"loss": 0.9583,
"step": 20
},
{
"epoch": 0.02,
"grad_norm": 15.734344482421875,
"learning_rate": 5.526315789473685e-06,
"loss": 2.3258,
"step": 21
},
{
"epoch": 0.02,
"grad_norm": 7.165862560272217,
"learning_rate": 5.789473684210527e-06,
"loss": 0.6352,
"step": 22
},
{
"epoch": 0.02,
"grad_norm": 2.9405505657196045,
"learning_rate": 6.0526315789473685e-06,
"loss": 0.7807,
"step": 23
},
{
"epoch": 0.02,
"grad_norm": 2.641676425933838,
"learning_rate": 6.31578947368421e-06,
"loss": 0.7131,
"step": 24
},
{
"epoch": 0.02,
"grad_norm": 14.434457778930664,
"learning_rate": 6.578947368421054e-06,
"loss": 2.0395,
"step": 25
},
{
"epoch": 0.02,
"grad_norm": 7.488234996795654,
"learning_rate": 6.842105263157896e-06,
"loss": 0.4789,
"step": 26
},
{
"epoch": 0.02,
"grad_norm": 2.120142936706543,
"learning_rate": 7.1052631578947375e-06,
"loss": 0.5413,
"step": 27
},
{
"epoch": 0.02,
"grad_norm": 1.2815049886703491,
"learning_rate": 7.368421052631579e-06,
"loss": 0.4593,
"step": 28
},
{
"epoch": 0.02,
"grad_norm": 1.0471339225769043,
"learning_rate": 7.631578947368423e-06,
"loss": 0.4047,
"step": 29
},
{
"epoch": 0.02,
"grad_norm": 0.9930760264396667,
"learning_rate": 7.894736842105265e-06,
"loss": 0.3877,
"step": 30
},
{
"epoch": 0.02,
"grad_norm": 0.7368630170822144,
"learning_rate": 8.157894736842106e-06,
"loss": 0.3533,
"step": 31
},
{
"epoch": 0.03,
"grad_norm": 0.6118948459625244,
"learning_rate": 8.421052631578948e-06,
"loss": 0.3553,
"step": 32
},
{
"epoch": 0.03,
"grad_norm": 3.6724331378936768,
"learning_rate": 8.68421052631579e-06,
"loss": 0.3004,
"step": 33
},
{
"epoch": 0.03,
"grad_norm": 0.5961164832115173,
"learning_rate": 8.947368421052632e-06,
"loss": 0.3356,
"step": 34
},
{
"epoch": 0.03,
"grad_norm": 1.6245735883712769,
"learning_rate": 9.210526315789474e-06,
"loss": 0.2538,
"step": 35
},
{
"epoch": 0.03,
"grad_norm": 0.331278532743454,
"learning_rate": 9.473684210526315e-06,
"loss": 0.3138,
"step": 36
},
{
"epoch": 0.03,
"grad_norm": 0.40372923016548157,
"learning_rate": 9.736842105263159e-06,
"loss": 0.3286,
"step": 37
},
{
"epoch": 0.03,
"grad_norm": 0.3106740415096283,
"learning_rate": 1e-05,
"loss": 0.3079,
"step": 38
},
{
"epoch": 0.03,
"grad_norm": 0.26451367139816284,
"learning_rate": 9.999983202901414e-06,
"loss": 0.308,
"step": 39
},
{
"epoch": 0.03,
"grad_norm": 0.3279931843280792,
"learning_rate": 9.999932811718507e-06,
"loss": 0.3247,
"step": 40
},
{
"epoch": 0.03,
"grad_norm": 15.684857368469238,
"learning_rate": 9.999848826789853e-06,
"loss": 1.4494,
"step": 41
},
{
"epoch": 0.03,
"grad_norm": 0.3096879720687866,
"learning_rate": 9.999731248679734e-06,
"loss": 0.3182,
"step": 42
},
{
"epoch": 0.03,
"grad_norm": 124.41254425048828,
"learning_rate": 9.999580078178134e-06,
"loss": 0.2911,
"step": 43
},
{
"epoch": 0.04,
"grad_norm": 0.21817980706691742,
"learning_rate": 9.999395316300748e-06,
"loss": 0.3025,
"step": 44
},
{
"epoch": 0.04,
"grad_norm": 3.227489471435547,
"learning_rate": 9.999176964288958e-06,
"loss": 0.24,
"step": 45
},
{
"epoch": 0.04,
"grad_norm": 1.7333533763885498,
"learning_rate": 9.99892502360984e-06,
"loss": 0.2229,
"step": 46
},
{
"epoch": 0.04,
"grad_norm": 0.3966839909553528,
"learning_rate": 9.99863949595614e-06,
"loss": 0.2904,
"step": 47
},
{
"epoch": 0.04,
"grad_norm": 1.4309226274490356,
"learning_rate": 9.998320383246271e-06,
"loss": 0.2298,
"step": 48
},
{
"epoch": 0.04,
"grad_norm": 0.25818589329719543,
"learning_rate": 9.997967687624304e-06,
"loss": 0.3065,
"step": 49
},
{
"epoch": 0.04,
"grad_norm": 0.24939408898353577,
"learning_rate": 9.99758141145994e-06,
"loss": 0.3148,
"step": 50
},
{
"epoch": 0.04,
"eval_loss": 0.2127760648727417,
"eval_runtime": 127.8408,
"eval_samples_per_second": 34.418,
"eval_steps_per_second": 1.079,
"step": 50
},
{
"epoch": 0.04,
"grad_norm": 1.032554030418396,
"learning_rate": 9.99716155734851e-06,
"loss": 0.2034,
"step": 51
},
{
"epoch": 0.04,
"grad_norm": 0.38443437218666077,
"learning_rate": 9.996708128110947e-06,
"loss": 0.3106,
"step": 52
},
{
"epoch": 0.04,
"grad_norm": 0.22864608466625214,
"learning_rate": 9.996221126793766e-06,
"loss": 0.2977,
"step": 53
},
{
"epoch": 0.04,
"grad_norm": 1.3222233057022095,
"learning_rate": 9.995700556669052e-06,
"loss": 0.1841,
"step": 54
},
{
"epoch": 0.04,
"grad_norm": 0.985467791557312,
"learning_rate": 9.995146421234434e-06,
"loss": 0.1732,
"step": 55
},
{
"epoch": 0.04,
"grad_norm": 0.7126708030700684,
"learning_rate": 9.994558724213056e-06,
"loss": 0.3081,
"step": 56
},
{
"epoch": 0.05,
"grad_norm": 0.37900444865226746,
"learning_rate": 9.99393746955356e-06,
"loss": 0.3064,
"step": 57
},
{
"epoch": 0.05,
"grad_norm": 0.345359742641449,
"learning_rate": 9.993282661430058e-06,
"loss": 0.2883,
"step": 58
},
{
"epoch": 0.05,
"grad_norm": 0.4348618686199188,
"learning_rate": 9.9925943042421e-06,
"loss": 0.2846,
"step": 59
},
{
"epoch": 0.05,
"grad_norm": 0.37012800574302673,
"learning_rate": 9.991872402614648e-06,
"loss": 0.3055,
"step": 60
},
{
"epoch": 0.05,
"grad_norm": 0.2713848948478699,
"learning_rate": 9.991116961398044e-06,
"loss": 0.2795,
"step": 61
},
{
"epoch": 0.05,
"grad_norm": 0.3882791996002197,
"learning_rate": 9.990327985667972e-06,
"loss": 0.2869,
"step": 62
},
{
"epoch": 0.05,
"grad_norm": 1.31046462059021,
"learning_rate": 9.989505480725438e-06,
"loss": 0.184,
"step": 63
},
{
"epoch": 0.05,
"grad_norm": 0.38074883818626404,
"learning_rate": 9.988649452096719e-06,
"loss": 0.3085,
"step": 64
},
{
"epoch": 0.05,
"grad_norm": 0.40416786074638367,
"learning_rate": 9.987759905533333e-06,
"loss": 0.3054,
"step": 65
},
{
"epoch": 0.05,
"grad_norm": 0.2648641765117645,
"learning_rate": 9.986836847012001e-06,
"loss": 0.3138,
"step": 66
},
{
"epoch": 0.05,
"grad_norm": 0.2414146363735199,
"learning_rate": 9.985880282734604e-06,
"loss": 0.2839,
"step": 67
},
{
"epoch": 0.05,
"grad_norm": 0.31883278489112854,
"learning_rate": 9.984890219128148e-06,
"loss": 0.3059,
"step": 68
},
{
"epoch": 0.06,
"grad_norm": 0.29476046562194824,
"learning_rate": 9.983866662844706e-06,
"loss": 0.3083,
"step": 69
},
{
"epoch": 0.06,
"grad_norm": 1.5760763883590698,
"learning_rate": 9.98280962076139e-06,
"loss": 0.1941,
"step": 70
},
{
"epoch": 0.06,
"grad_norm": 1.0190508365631104,
"learning_rate": 9.9817190999803e-06,
"loss": 0.1566,
"step": 71
},
{
"epoch": 0.06,
"grad_norm": 0.3555128872394562,
"learning_rate": 9.980595107828465e-06,
"loss": 0.2913,
"step": 72
},
{
"epoch": 0.06,
"grad_norm": 0.9496508836746216,
"learning_rate": 9.979437651857809e-06,
"loss": 0.1364,
"step": 73
},
{
"epoch": 0.06,
"grad_norm": 0.3887488842010498,
"learning_rate": 9.978246739845095e-06,
"loss": 0.2998,
"step": 74
},
{
"epoch": 0.06,
"grad_norm": 0.24264326691627502,
"learning_rate": 9.977022379791865e-06,
"loss": 0.2978,
"step": 75
},
{
"epoch": 0.06,
"grad_norm": 1.0547157526016235,
"learning_rate": 9.975764579924402e-06,
"loss": 0.1535,
"step": 76
},
{
"epoch": 0.06,
"grad_norm": 0.35592320561408997,
"learning_rate": 9.974473348693661e-06,
"loss": 0.2874,
"step": 77
},
{
"epoch": 0.06,
"grad_norm": 0.980772078037262,
"learning_rate": 9.973148694775217e-06,
"loss": 0.1661,
"step": 78
},
{
"epoch": 0.06,
"grad_norm": 0.36755406856536865,
"learning_rate": 9.971790627069205e-06,
"loss": 0.2795,
"step": 79
},
{
"epoch": 0.06,
"grad_norm": 0.27261894941329956,
"learning_rate": 9.970399154700264e-06,
"loss": 0.2732,
"step": 80
},
{
"epoch": 0.06,
"grad_norm": 0.23866336047649384,
"learning_rate": 9.968974287017474e-06,
"loss": 0.2947,
"step": 81
},
{
"epoch": 0.07,
"grad_norm": 0.9532305002212524,
"learning_rate": 9.967516033594295e-06,
"loss": 0.1491,
"step": 82
},
{
"epoch": 0.07,
"grad_norm": 0.3923441469669342,
"learning_rate": 9.966024404228495e-06,
"loss": 0.2702,
"step": 83
},
{
"epoch": 0.07,
"grad_norm": 0.9652503132820129,
"learning_rate": 9.964499408942093e-06,
"loss": 0.1692,
"step": 84
},
{
"epoch": 0.07,
"grad_norm": 0.2849303185939789,
"learning_rate": 9.962941057981285e-06,
"loss": 0.2854,
"step": 85
},
{
"epoch": 0.07,
"grad_norm": 0.3102649450302124,
"learning_rate": 9.961349361816384e-06,
"loss": 0.277,
"step": 86
},
{
"epoch": 0.07,
"grad_norm": 6.806617736816406,
"learning_rate": 9.95972433114174e-06,
"loss": 1.1664,
"step": 87
},
{
"epoch": 0.07,
"grad_norm": 1.0248916149139404,
"learning_rate": 9.958065976875671e-06,
"loss": 0.158,
"step": 88
},
{
"epoch": 0.07,
"grad_norm": 4.860145568847656,
"learning_rate": 9.956374310160398e-06,
"loss": 1.0526,
"step": 89
},
{
"epoch": 0.07,
"grad_norm": 0.5925382375717163,
"learning_rate": 9.954649342361952e-06,
"loss": 0.2891,
"step": 90
},
{
"epoch": 0.07,
"grad_norm": 0.33539047837257385,
"learning_rate": 9.95289108507012e-06,
"loss": 0.2767,
"step": 91
},
{
"epoch": 0.07,
"grad_norm": 0.3716072738170624,
"learning_rate": 9.951099550098349e-06,
"loss": 0.2695,
"step": 92
},
{
"epoch": 0.07,
"grad_norm": 0.3734178841114044,
"learning_rate": 9.949274749483671e-06,
"loss": 0.28,
"step": 93
},
{
"epoch": 0.08,
"grad_norm": 0.36469775438308716,
"learning_rate": 9.947416695486633e-06,
"loss": 0.2811,
"step": 94
},
{
"epoch": 0.08,
"grad_norm": 0.40128713846206665,
"learning_rate": 9.9455254005912e-06,
"loss": 0.2781,
"step": 95
},
{
"epoch": 0.08,
"grad_norm": 0.2535058259963989,
"learning_rate": 9.943600877504679e-06,
"loss": 0.2899,
"step": 96
},
{
"epoch": 0.08,
"grad_norm": 1.8747999668121338,
"learning_rate": 9.941643139157631e-06,
"loss": 0.1575,
"step": 97
},
{
"epoch": 0.08,
"grad_norm": 1.455507516860962,
"learning_rate": 9.939652198703785e-06,
"loss": 0.1614,
"step": 98
},
{
"epoch": 0.08,
"grad_norm": 0.3626094460487366,
"learning_rate": 9.93762806951995e-06,
"loss": 0.2645,
"step": 99
},
{
"epoch": 0.08,
"grad_norm": 0.34168311953544617,
"learning_rate": 9.935570765205927e-06,
"loss": 0.2845,
"step": 100
},
{
"epoch": 0.08,
"eval_loss": 0.1866653561592102,
"eval_runtime": 125.3013,
"eval_samples_per_second": 35.115,
"eval_steps_per_second": 1.101,
"step": 100
},
{
"epoch": 0.08,
"grad_norm": 0.2769843637943268,
"learning_rate": 9.933480299584413e-06,
"loss": 0.271,
"step": 101
},
{
"epoch": 0.08,
"grad_norm": 4.831402778625488,
"learning_rate": 9.93135668670091e-06,
"loss": 0.9937,
"step": 102
},
{
"epoch": 0.08,
"grad_norm": 0.32933509349823,
"learning_rate": 9.92919994082363e-06,
"loss": 0.2743,
"step": 103
},
{
"epoch": 0.08,
"grad_norm": 1.5838934183120728,
"learning_rate": 9.927010076443408e-06,
"loss": 0.1687,
"step": 104
},
{
"epoch": 0.08,
"grad_norm": 1.3275009393692017,
"learning_rate": 9.924787108273585e-06,
"loss": 0.1498,
"step": 105
},
{
"epoch": 0.08,
"grad_norm": 0.427726686000824,
"learning_rate": 9.92253105124993e-06,
"loss": 0.2637,
"step": 106
},
{
"epoch": 0.09,
"grad_norm": 0.8452320694923401,
"learning_rate": 9.920241920530529e-06,
"loss": 0.1436,
"step": 107
},
{
"epoch": 0.09,
"grad_norm": 0.296527236700058,
"learning_rate": 9.91791973149568e-06,
"loss": 0.2706,
"step": 108
},
{
"epoch": 0.09,
"grad_norm": 0.23078039288520813,
"learning_rate": 9.915564499747803e-06,
"loss": 0.2439,
"step": 109
},
{
"epoch": 0.09,
"grad_norm": 0.2343069314956665,
"learning_rate": 9.91317624111132e-06,
"loss": 0.2651,
"step": 110
},
{
"epoch": 0.09,
"grad_norm": 2.1916985511779785,
"learning_rate": 9.910754971632555e-06,
"loss": 0.1737,
"step": 111
},
{
"epoch": 0.09,
"grad_norm": 1.9547094106674194,
"learning_rate": 9.908300707579633e-06,
"loss": 0.1694,
"step": 112
},
{
"epoch": 0.09,
"grad_norm": 1.210723638534546,
"learning_rate": 9.905813465442355e-06,
"loss": 0.143,
"step": 113
},
{
"epoch": 0.09,
"grad_norm": 0.8066962957382202,
"learning_rate": 9.903293261932106e-06,
"loss": 0.1372,
"step": 114
},
{
"epoch": 0.09,
"grad_norm": 0.8957458138465881,
"learning_rate": 9.900740113981726e-06,
"loss": 0.1512,
"step": 115
},
{
"epoch": 0.09,
"grad_norm": 0.44530943036079407,
"learning_rate": 9.898154038745408e-06,
"loss": 0.2756,
"step": 116
},
{
"epoch": 0.09,
"grad_norm": 0.4530113935470581,
"learning_rate": 9.895535053598577e-06,
"loss": 0.3019,
"step": 117
},
{
"epoch": 0.09,
"grad_norm": 1.139613389968872,
"learning_rate": 9.89288317613777e-06,
"loss": 0.1469,
"step": 118
},
{
"epoch": 0.1,
"grad_norm": 1.5474779605865479,
"learning_rate": 9.89019842418053e-06,
"loss": 0.1737,
"step": 119
},
{
"epoch": 0.1,
"grad_norm": 0.40321314334869385,
"learning_rate": 9.887480815765272e-06,
"loss": 0.2729,
"step": 120
},
{
"epoch": 0.1,
"grad_norm": 12.440898895263672,
"learning_rate": 9.88473036915117e-06,
"loss": 1.3209,
"step": 121
},
{
"epoch": 0.1,
"grad_norm": 0.3455779552459717,
"learning_rate": 9.881947102818036e-06,
"loss": 0.2766,
"step": 122
},
{
"epoch": 0.1,
"grad_norm": 0.27867040038108826,
"learning_rate": 9.879131035466187e-06,
"loss": 0.2745,
"step": 123
},
{
"epoch": 0.1,
"grad_norm": 0.26845067739486694,
"learning_rate": 9.876282186016328e-06,
"loss": 0.2602,
"step": 124
},
{
"epoch": 0.1,
"grad_norm": 0.24670612812042236,
"learning_rate": 9.873400573609422e-06,
"loss": 0.2723,
"step": 125
},
{
"epoch": 0.1,
"grad_norm": 3.64876389503479,
"learning_rate": 9.870486217606557e-06,
"loss": 0.99,
"step": 126
},
{
"epoch": 0.1,
"grad_norm": 0.2439020425081253,
"learning_rate": 9.867539137588827e-06,
"loss": 0.272,
"step": 127
},
{
"epoch": 0.1,
"grad_norm": 1.152934193611145,
"learning_rate": 9.864559353357189e-06,
"loss": 0.1624,
"step": 128
},
{
"epoch": 0.1,
"grad_norm": 0.3583047389984131,
"learning_rate": 9.861546884932331e-06,
"loss": 0.2731,
"step": 129
},
{
"epoch": 0.1,
"grad_norm": 1.1359823942184448,
"learning_rate": 9.858501752554548e-06,
"loss": 0.1731,
"step": 130
},
{
"epoch": 0.1,
"grad_norm": 0.28555968403816223,
"learning_rate": 9.855423976683597e-06,
"loss": 0.2651,
"step": 131
},
{
"epoch": 0.11,
"grad_norm": 0.8387528657913208,
"learning_rate": 9.852313577998555e-06,
"loss": 0.1322,
"step": 132
},
{
"epoch": 0.11,
"grad_norm": 0.9203013777732849,
"learning_rate": 9.849170577397695e-06,
"loss": 0.126,
"step": 133
},
{
"epoch": 0.11,
"grad_norm": 0.9038960337638855,
"learning_rate": 9.845994995998332e-06,
"loss": 0.1351,
"step": 134
},
{
"epoch": 0.11,
"grad_norm": 0.7782374620437622,
"learning_rate": 9.842786855136688e-06,
"loss": 0.1416,
"step": 135
},
{
"epoch": 0.11,
"grad_norm": 0.8540252447128296,
"learning_rate": 9.839546176367745e-06,
"loss": 0.1244,
"step": 136
},
{
"epoch": 0.11,
"grad_norm": 0.9569166302680969,
"learning_rate": 9.836272981465107e-06,
"loss": 0.1614,
"step": 137
},
{
"epoch": 0.11,
"grad_norm": 0.4781632721424103,
"learning_rate": 9.83296729242084e-06,
"loss": 0.2737,
"step": 138
},
{
"epoch": 0.11,
"grad_norm": 0.24423356354236603,
"learning_rate": 9.829629131445342e-06,
"loss": 0.2514,
"step": 139
},
{
"epoch": 0.11,
"grad_norm": 0.20708948373794556,
"learning_rate": 9.826258520967178e-06,
"loss": 0.2405,
"step": 140
},
{
"epoch": 0.11,
"grad_norm": 6.202610969543457,
"learning_rate": 9.822855483632942e-06,
"loss": 0.9642,
"step": 141
},
{
"epoch": 0.11,
"grad_norm": 0.37259483337402344,
"learning_rate": 9.819420042307091e-06,
"loss": 0.253,
"step": 142
},
{
"epoch": 0.11,
"grad_norm": 0.3819926381111145,
"learning_rate": 9.815952220071807e-06,
"loss": 0.2716,
"step": 143
},
{
"epoch": 0.12,
"grad_norm": 0.28403234481811523,
"learning_rate": 9.812452040226828e-06,
"loss": 0.2509,
"step": 144
},
{
"epoch": 0.12,
"grad_norm": 0.29554104804992676,
"learning_rate": 9.808919526289303e-06,
"loss": 0.2638,
"step": 145
},
{
"epoch": 0.12,
"grad_norm": 2.375548839569092,
"learning_rate": 9.805354701993624e-06,
"loss": 0.189,
"step": 146
},
{
"epoch": 0.12,
"grad_norm": 0.3697062134742737,
"learning_rate": 9.801757591291275e-06,
"loss": 0.2921,
"step": 147
},
{
"epoch": 0.12,
"grad_norm": 0.2786528468132019,
"learning_rate": 9.798128218350662e-06,
"loss": 0.2541,
"step": 148
},
{
"epoch": 0.12,
"grad_norm": 2.406625747680664,
"learning_rate": 9.794466607556963e-06,
"loss": 0.9735,
"step": 149
},
{
"epoch": 0.12,
"grad_norm": 0.2333746999502182,
"learning_rate": 9.79077278351195e-06,
"loss": 0.2585,
"step": 150
},
{
"epoch": 0.12,
"eval_loss": 0.18337422609329224,
"eval_runtime": 126.4578,
"eval_samples_per_second": 34.794,
"eval_steps_per_second": 1.091,
"step": 150
},
{
"epoch": 0.12,
"grad_norm": 0.35202762484550476,
"learning_rate": 9.787046771033836e-06,
"loss": 0.2778,
"step": 151
},
{
"epoch": 0.12,
"grad_norm": 1.0789929628372192,
"learning_rate": 9.7832885951571e-06,
"loss": 0.1456,
"step": 152
},
{
"epoch": 0.12,
"grad_norm": 3.102879047393799,
"learning_rate": 9.77949828113232e-06,
"loss": 0.892,
"step": 153
},
{
"epoch": 0.12,
"grad_norm": 0.4323928654193878,
"learning_rate": 9.77567585442601e-06,
"loss": 0.2768,
"step": 154
},
{
"epoch": 0.12,
"grad_norm": 0.9265918135643005,
"learning_rate": 9.77182134072044e-06,
"loss": 0.1577,
"step": 155
},
{
"epoch": 0.12,
"grad_norm": 0.2472756952047348,
"learning_rate": 9.767934765913469e-06,
"loss": 0.246,
"step": 156
},
{
"epoch": 0.13,
"grad_norm": 0.7626774907112122,
"learning_rate": 9.76401615611837e-06,
"loss": 0.2929,
"step": 157
},
{
"epoch": 0.13,
"grad_norm": 0.20579107105731964,
"learning_rate": 9.76006553766365e-06,
"loss": 0.2593,
"step": 158
},
{
"epoch": 0.13,
"grad_norm": 0.3477925658226013,
"learning_rate": 9.756082937092884e-06,
"loss": 0.2562,
"step": 159
},
{
"epoch": 0.13,
"grad_norm": 0.2534414529800415,
"learning_rate": 9.752068381164523e-06,
"loss": 0.2576,
"step": 160
},
{
"epoch": 0.13,
"grad_norm": 0.25206950306892395,
"learning_rate": 9.748021896851725e-06,
"loss": 0.2821,
"step": 161
},
{
"epoch": 0.13,
"grad_norm": 0.9337000846862793,
"learning_rate": 9.743943511342168e-06,
"loss": 0.1188,
"step": 162
},
{
"epoch": 0.13,
"grad_norm": 3.3424620628356934,
"learning_rate": 9.739833252037869e-06,
"loss": 0.8829,
"step": 163
},
{
"epoch": 0.13,
"grad_norm": 2.2740018367767334,
"learning_rate": 9.735691146555002e-06,
"loss": 0.8606,
"step": 164
},
{
"epoch": 0.13,
"grad_norm": 0.6022862792015076,
"learning_rate": 9.731517222723705e-06,
"loss": 0.2565,
"step": 165
},
{
"epoch": 0.13,
"grad_norm": 0.5306522250175476,
"learning_rate": 9.727311508587907e-06,
"loss": 0.2595,
"step": 166
},
{
"epoch": 0.13,
"grad_norm": 0.3691086173057556,
"learning_rate": 9.723074032405121e-06,
"loss": 0.2672,
"step": 167
},
{
"epoch": 0.13,
"grad_norm": 0.9032378196716309,
"learning_rate": 9.718804822646274e-06,
"loss": 0.1239,
"step": 168
},
{
"epoch": 0.14,
"grad_norm": 3.268183946609497,
"learning_rate": 9.714503907995497e-06,
"loss": 0.9096,
"step": 169
},
{
"epoch": 0.14,
"grad_norm": 0.7538490295410156,
"learning_rate": 9.710171317349946e-06,
"loss": 0.2891,
"step": 170
},
{
"epoch": 0.14,
"grad_norm": 0.506138026714325,
"learning_rate": 9.705807079819603e-06,
"loss": 0.2694,
"step": 171
},
{
"epoch": 0.14,
"grad_norm": 0.35388875007629395,
"learning_rate": 9.701411224727077e-06,
"loss": 0.272,
"step": 172
},
{
"epoch": 0.14,
"grad_norm": 0.46719998121261597,
"learning_rate": 9.696983781607417e-06,
"loss": 0.2834,
"step": 173
},
{
"epoch": 0.14,
"grad_norm": 2.4254088401794434,
"learning_rate": 9.692524780207897e-06,
"loss": 0.8995,
"step": 174
},
{
"epoch": 0.14,
"grad_norm": 0.3909335136413574,
"learning_rate": 9.688034250487835e-06,
"loss": 0.2588,
"step": 175
},
{
"epoch": 0.14,
"grad_norm": 0.32924899458885193,
"learning_rate": 9.683512222618376e-06,
"loss": 0.256,
"step": 176
},
{
"epoch": 0.14,
"grad_norm": 0.2901920974254608,
"learning_rate": 9.678958726982302e-06,
"loss": 0.2503,
"step": 177
},
{
"epoch": 0.14,
"grad_norm": 0.36759233474731445,
"learning_rate": 9.674373794173818e-06,
"loss": 0.2833,
"step": 178
},
{
"epoch": 0.14,
"grad_norm": 0.2940978407859802,
"learning_rate": 9.669757454998353e-06,
"loss": 0.2576,
"step": 179
},
{
"epoch": 0.14,
"grad_norm": 0.23943258821964264,
"learning_rate": 9.665109740472346e-06,
"loss": 0.254,
"step": 180
},
{
"epoch": 0.14,
"grad_norm": 1.4819834232330322,
"learning_rate": 9.660430681823047e-06,
"loss": 0.158,
"step": 181
},
{
"epoch": 0.15,
"grad_norm": 1.2044161558151245,
"learning_rate": 9.655720310488298e-06,
"loss": 0.1432,
"step": 182
},
{
"epoch": 0.15,
"grad_norm": 0.2901252508163452,
"learning_rate": 9.650978658116329e-06,
"loss": 0.2637,
"step": 183
},
{
"epoch": 0.15,
"grad_norm": 0.9040230512619019,
"learning_rate": 9.64620575656554e-06,
"loss": 0.1408,
"step": 184
},
{
"epoch": 0.15,
"grad_norm": 3.700429916381836,
"learning_rate": 9.64140163790429e-06,
"loss": 0.8883,
"step": 185
},
{
"epoch": 0.15,
"grad_norm": 2.9336678981781006,
"learning_rate": 9.636566334410682e-06,
"loss": 0.8769,
"step": 186
},
{
"epoch": 0.15,
"grad_norm": 0.3975153863430023,
"learning_rate": 9.631699878572343e-06,
"loss": 0.2772,
"step": 187
},
{
"epoch": 0.15,
"grad_norm": 0.3097396492958069,
"learning_rate": 9.62680230308621e-06,
"loss": 0.2598,
"step": 188
},
{
"epoch": 0.15,
"grad_norm": 0.2801744043827057,
"learning_rate": 9.621873640858302e-06,
"loss": 0.2607,
"step": 189
},
{
"epoch": 0.15,
"grad_norm": 1.0821884870529175,
"learning_rate": 9.616913925003514e-06,
"loss": 0.1583,
"step": 190
},
{
"epoch": 0.15,
"grad_norm": 0.2554050087928772,
"learning_rate": 9.611923188845377e-06,
"loss": 0.2851,
"step": 191
},
{
"epoch": 0.15,
"grad_norm": 0.9589229822158813,
"learning_rate": 9.606901465915848e-06,
"loss": 0.139,
"step": 192
},
{
"epoch": 0.15,
"grad_norm": 3.1027727127075195,
"learning_rate": 9.601848789955078e-06,
"loss": 0.8425,
"step": 193
},
{
"epoch": 0.16,
"grad_norm": 3.0621137619018555,
"learning_rate": 9.596765194911182e-06,
"loss": 0.886,
"step": 194
},
{
"epoch": 0.16,
"grad_norm": 0.9031972289085388,
"learning_rate": 9.591650714940022e-06,
"loss": 0.1601,
"step": 195
},
{
"epoch": 0.16,
"grad_norm": 0.2778763175010681,
"learning_rate": 9.586505384404967e-06,
"loss": 0.2641,
"step": 196
},
{
"epoch": 0.16,
"grad_norm": 0.836699366569519,
"learning_rate": 9.581329237876664e-06,
"loss": 0.121,
"step": 197
},
{
"epoch": 0.16,
"grad_norm": 0.8168700337409973,
"learning_rate": 9.576122310132814e-06,
"loss": 0.1402,
"step": 198
},
{
"epoch": 0.16,
"grad_norm": 0.8636983633041382,
"learning_rate": 9.570884636157928e-06,
"loss": 0.1448,
"step": 199
},
{
"epoch": 0.16,
"grad_norm": 0.2751665711402893,
"learning_rate": 9.565616251143094e-06,
"loss": 0.2685,
"step": 200
},
{
"epoch": 0.16,
"eval_loss": 0.17989018559455872,
"eval_runtime": 693.6384,
"eval_samples_per_second": 6.343,
"eval_steps_per_second": 0.199,
"step": 200
},
{
"epoch": 0.16,
"grad_norm": 0.2639569938182831,
"learning_rate": 9.560317190485748e-06,
"loss": 0.2757,
"step": 201
},
{
"epoch": 0.16,
"grad_norm": 1.1106971502304077,
"learning_rate": 9.554987489789426e-06,
"loss": 0.1325,
"step": 202
},
{
"epoch": 0.16,
"grad_norm": 0.8799528479576111,
"learning_rate": 9.549627184863531e-06,
"loss": 0.1475,
"step": 203
},
{
"epoch": 0.16,
"grad_norm": 0.20529350638389587,
"learning_rate": 9.544236311723091e-06,
"loss": 0.2626,
"step": 204
},
{
"epoch": 0.16,
"grad_norm": 0.834397554397583,
"learning_rate": 9.538814906588519e-06,
"loss": 0.1375,
"step": 205
},
{
"epoch": 0.16,
"grad_norm": 0.2027309089899063,
"learning_rate": 9.533363005885362e-06,
"loss": 0.2665,
"step": 206
},
{
"epoch": 0.17,
"grad_norm": 0.9026845693588257,
"learning_rate": 9.527880646244071e-06,
"loss": 0.1146,
"step": 207
},
{
"epoch": 0.17,
"grad_norm": 0.8519452810287476,
"learning_rate": 9.522367864499736e-06,
"loss": 0.1224,
"step": 208
},
{
"epoch": 0.17,
"grad_norm": 0.20462436974048615,
"learning_rate": 9.51682469769185e-06,
"loss": 0.2641,
"step": 209
},
{
"epoch": 0.17,
"grad_norm": 0.20429855585098267,
"learning_rate": 9.511251183064068e-06,
"loss": 0.2416,
"step": 210
},
{
"epoch": 0.17,
"grad_norm": 0.1986883580684662,
"learning_rate": 9.505647358063933e-06,
"loss": 0.2409,
"step": 211
},
{
"epoch": 0.17,
"grad_norm": 0.8620755672454834,
"learning_rate": 9.50001326034265e-06,
"loss": 0.1255,
"step": 212
},
{
"epoch": 0.17,
"grad_norm": 0.18794576823711395,
"learning_rate": 9.494348927754816e-06,
"loss": 0.2599,
"step": 213
},
{
"epoch": 0.17,
"grad_norm": 1.0955018997192383,
"learning_rate": 9.48865439835817e-06,
"loss": 0.1589,
"step": 214
},
{
"epoch": 0.17,
"grad_norm": 0.1930237114429474,
"learning_rate": 9.482929710413343e-06,
"loss": 0.2558,
"step": 215
},
{
"epoch": 0.17,
"grad_norm": 0.2716980576515198,
"learning_rate": 9.477174902383593e-06,
"loss": 0.2765,
"step": 216
},
{
"epoch": 0.17,
"grad_norm": 5.3866472244262695,
"learning_rate": 9.471390012934549e-06,
"loss": 0.9258,
"step": 217
},
{
"epoch": 0.17,
"grad_norm": 4.305013179779053,
"learning_rate": 9.465575080933959e-06,
"loss": 0.9043,
"step": 218
},
{
"epoch": 0.18,
"grad_norm": 1.0870715379714966,
"learning_rate": 9.459730145451414e-06,
"loss": 0.1364,
"step": 219
},
{
"epoch": 0.18,
"grad_norm": 0.8194337487220764,
"learning_rate": 9.453855245758098e-06,
"loss": 0.125,
"step": 220
},
{
"epoch": 0.18,
"grad_norm": 1.0974235534667969,
"learning_rate": 9.44795042132652e-06,
"loss": 0.1459,
"step": 221
},
{
"epoch": 0.18,
"grad_norm": 0.8618761301040649,
"learning_rate": 9.442015711830246e-06,
"loss": 0.1339,
"step": 222
},
{
"epoch": 0.18,
"grad_norm": 0.805436909198761,
"learning_rate": 9.436051157143635e-06,
"loss": 0.1325,
"step": 223
},
{
"epoch": 0.18,
"grad_norm": 0.5011575818061829,
"learning_rate": 9.430056797341574e-06,
"loss": 0.2793,
"step": 224
},
{
"epoch": 0.18,
"grad_norm": 0.9908254146575928,
"learning_rate": 9.424032672699205e-06,
"loss": 0.1382,
"step": 225
},
{
"epoch": 0.18,
"grad_norm": 0.24774260818958282,
"learning_rate": 9.417978823691652e-06,
"loss": 0.2572,
"step": 226
},
{
"epoch": 0.18,
"grad_norm": 0.26598531007766724,
"learning_rate": 9.411895290993754e-06,
"loss": 0.2635,
"step": 227
},
{
"epoch": 0.18,
"grad_norm": 0.24133723974227905,
"learning_rate": 9.405782115479793e-06,
"loss": 0.2523,
"step": 228
},
{
"epoch": 0.18,
"grad_norm": 0.22140967845916748,
"learning_rate": 9.399639338223213e-06,
"loss": 0.2372,
"step": 229
},
{
"epoch": 0.18,
"grad_norm": 0.3572402000427246,
"learning_rate": 9.393467000496345e-06,
"loss": 0.2893,
"step": 230
},
{
"epoch": 0.18,
"grad_norm": 0.20770740509033203,
"learning_rate": 9.38726514377014e-06,
"loss": 0.2661,
"step": 231
},
{
"epoch": 0.19,
"grad_norm": 4.8314995765686035,
"learning_rate": 9.381033809713872e-06,
"loss": 0.8688,
"step": 232
},
{
"epoch": 0.19,
"grad_norm": 0.2563977539539337,
"learning_rate": 9.37477304019488e-06,
"loss": 0.2654,
"step": 233
},
{
"epoch": 0.19,
"grad_norm": 0.33227619528770447,
"learning_rate": 9.368482877278264e-06,
"loss": 0.2646,
"step": 234
},
{
"epoch": 0.19,
"grad_norm": 1.2100895643234253,
"learning_rate": 9.362163363226622e-06,
"loss": 0.1447,
"step": 235
},
{
"epoch": 0.19,
"grad_norm": 1.0367554426193237,
"learning_rate": 9.355814540499753e-06,
"loss": 0.1435,
"step": 236
},
{
"epoch": 0.19,
"grad_norm": 0.19879880547523499,
"learning_rate": 9.349436451754378e-06,
"loss": 0.2437,
"step": 237
},
{
"epoch": 0.19,
"grad_norm": 0.24105799198150635,
"learning_rate": 9.34302913984385e-06,
"loss": 0.2558,
"step": 238
},
{
"epoch": 0.19,
"grad_norm": 3.0425894260406494,
"learning_rate": 9.33659264781787e-06,
"loss": 0.8779,
"step": 239
},
{
"epoch": 0.19,
"grad_norm": 2.376372814178467,
"learning_rate": 9.330127018922195e-06,
"loss": 0.8308,
"step": 240
},
{
"epoch": 0.19,
"grad_norm": 1.7529139518737793,
"learning_rate": 9.323632296598343e-06,
"loss": 0.8489,
"step": 241
},
{
"epoch": 0.19,
"grad_norm": 0.8360892534255981,
"learning_rate": 9.317108524483319e-06,
"loss": 0.1367,
"step": 242
},
{
"epoch": 0.19,
"grad_norm": 0.26334020495414734,
"learning_rate": 9.310555746409293e-06,
"loss": 0.2392,
"step": 243
},
{
"epoch": 0.2,
"grad_norm": 0.23332957923412323,
"learning_rate": 9.303974006403332e-06,
"loss": 0.2454,
"step": 244
},
{
"epoch": 0.2,
"grad_norm": 0.8559702038764954,
"learning_rate": 9.297363348687087e-06,
"loss": 0.1379,
"step": 245
},
{
"epoch": 0.2,
"grad_norm": 2.7465600967407227,
"learning_rate": 9.29072381767651e-06,
"loss": 0.844,
"step": 246
},
{
"epoch": 0.2,
"grad_norm": 0.3794975280761719,
"learning_rate": 9.284055457981541e-06,
"loss": 0.2588,
"step": 247
},
{
"epoch": 0.2,
"grad_norm": 0.9326925873756409,
"learning_rate": 9.27735831440582e-06,
"loss": 0.1226,
"step": 248
},
{
"epoch": 0.2,
"grad_norm": 0.3299356698989868,
"learning_rate": 9.270632431946377e-06,
"loss": 0.2604,
"step": 249
},
{
"epoch": 0.2,
"grad_norm": 0.7161573171615601,
"learning_rate": 9.26387785579334e-06,
"loss": 0.1022,
"step": 250
},
{
"epoch": 0.2,
"eval_loss": 0.17743495106697083,
"eval_runtime": 124.9423,
"eval_samples_per_second": 35.216,
"eval_steps_per_second": 1.105,
"step": 250
},
{
"epoch": 0.2,
"grad_norm": 0.2541516125202179,
"learning_rate": 9.257094631329617e-06,
"loss": 0.2488,
"step": 251
},
{
"epoch": 0.2,
"grad_norm": 0.250922828912735,
"learning_rate": 9.250282804130607e-06,
"loss": 0.2446,
"step": 252
},
{
"epoch": 0.2,
"grad_norm": 3.501448154449463,
"learning_rate": 9.243442419963884e-06,
"loss": 0.8441,
"step": 253
},
{
"epoch": 0.2,
"grad_norm": 0.7573642134666443,
"learning_rate": 9.236573524788888e-06,
"loss": 0.128,
"step": 254
},
{
"epoch": 0.2,
"grad_norm": 0.2414775937795639,
"learning_rate": 9.229676164756624e-06,
"loss": 0.2538,
"step": 255
},
{
"epoch": 0.2,
"grad_norm": 0.7211123704910278,
"learning_rate": 9.222750386209349e-06,
"loss": 0.1229,
"step": 256
},
{
"epoch": 0.21,
"grad_norm": 0.2653588354587555,
"learning_rate": 9.215796235680253e-06,
"loss": 0.2692,
"step": 257
},
{
"epoch": 0.21,
"grad_norm": 0.7575809955596924,
"learning_rate": 9.208813759893158e-06,
"loss": 0.1136,
"step": 258
},
{
"epoch": 0.21,
"grad_norm": 0.21408897638320923,
"learning_rate": 9.2018030057622e-06,
"loss": 0.2615,
"step": 259
},
{
"epoch": 0.21,
"grad_norm": 0.2540892958641052,
"learning_rate": 9.194764020391507e-06,
"loss": 0.2639,
"step": 260
},
{
"epoch": 0.21,
"grad_norm": 0.25612032413482666,
"learning_rate": 9.187696851074894e-06,
"loss": 0.2547,
"step": 261
},
{
"epoch": 0.21,
"grad_norm": 2.1312899589538574,
"learning_rate": 9.180601545295535e-06,
"loss": 0.8459,
"step": 262
},
{
"epoch": 0.21,
"grad_norm": 1.6619058847427368,
"learning_rate": 9.173478150725652e-06,
"loss": 0.8301,
"step": 263
},
{
"epoch": 0.21,
"grad_norm": 0.2908408045768738,
"learning_rate": 9.166326715226188e-06,
"loss": 0.2569,
"step": 264
},
{
"epoch": 0.21,
"grad_norm": 0.24241584539413452,
"learning_rate": 9.159147286846493e-06,
"loss": 0.2383,
"step": 265
},
{
"epoch": 0.21,
"grad_norm": 0.1878461092710495,
"learning_rate": 9.151939913823988e-06,
"loss": 0.2613,
"step": 266
},
{
"epoch": 0.21,
"grad_norm": 0.21349014341831207,
"learning_rate": 9.14470464458386e-06,
"loss": 0.2388,
"step": 267
},
{
"epoch": 0.21,
"grad_norm": 0.276134729385376,
"learning_rate": 9.137441527738718e-06,
"loss": 0.2398,
"step": 268
},
{
"epoch": 0.22,
"grad_norm": 0.2875753939151764,
"learning_rate": 9.13015061208828e-06,
"loss": 0.2543,
"step": 269
},
{
"epoch": 0.22,
"grad_norm": 0.19407491385936737,
"learning_rate": 9.122831946619038e-06,
"loss": 0.2496,
"step": 270
},
{
"epoch": 0.22,
"grad_norm": 0.2206961214542389,
"learning_rate": 9.115485580503927e-06,
"loss": 0.2609,
"step": 271
},
{
"epoch": 0.22,
"grad_norm": 0.2454129308462143,
"learning_rate": 9.108111563102005e-06,
"loss": 0.2407,
"step": 272
},
{
"epoch": 0.22,
"grad_norm": 0.24640630185604095,
"learning_rate": 9.100709943958108e-06,
"loss": 0.2641,
"step": 273
},
{
"epoch": 0.22,
"grad_norm": 1.0371023416519165,
"learning_rate": 9.093280772802527e-06,
"loss": 0.1377,
"step": 274
},
{
"epoch": 0.22,
"grad_norm": 0.29923540353775024,
"learning_rate": 9.085824099550674e-06,
"loss": 0.2538,
"step": 275
},
{
"epoch": 0.22,
"grad_norm": 0.9111459255218506,
"learning_rate": 9.078339974302735e-06,
"loss": 0.1157,
"step": 276
},
{
"epoch": 0.22,
"grad_norm": 0.26333245635032654,
"learning_rate": 9.070828447343346e-06,
"loss": 0.2467,
"step": 277
},
{
"epoch": 0.22,
"grad_norm": 0.19055511057376862,
"learning_rate": 9.063289569141251e-06,
"loss": 0.235,
"step": 278
},
{
"epoch": 0.22,
"grad_norm": 1.2110968828201294,
"learning_rate": 9.055723390348966e-06,
"loss": 0.1339,
"step": 279
},
{
"epoch": 0.22,
"grad_norm": 0.7919692397117615,
"learning_rate": 9.048129961802425e-06,
"loss": 0.1244,
"step": 280
},
{
"epoch": 0.22,
"grad_norm": 0.3272060453891754,
"learning_rate": 9.040509334520662e-06,
"loss": 0.2194,
"step": 281
},
{
"epoch": 0.23,
"grad_norm": 0.29253754019737244,
"learning_rate": 9.032861559705442e-06,
"loss": 0.253,
"step": 282
},
{
"epoch": 0.23,
"grad_norm": 0.2354600876569748,
"learning_rate": 9.025186688740939e-06,
"loss": 0.2409,
"step": 283
},
{
"epoch": 0.23,
"grad_norm": 0.30340975522994995,
"learning_rate": 9.01748477319338e-06,
"loss": 0.2668,
"step": 284
},
{
"epoch": 0.23,
"grad_norm": 0.349874883890152,
"learning_rate": 9.009755864810695e-06,
"loss": 0.2538,
"step": 285
},
{
"epoch": 0.23,
"grad_norm": 4.808541774749756,
"learning_rate": 9.002000015522182e-06,
"loss": 0.9168,
"step": 286
},
{
"epoch": 0.23,
"grad_norm": 0.7351348400115967,
"learning_rate": 8.994217277438145e-06,
"loss": 0.1238,
"step": 287
},
{
"epoch": 0.23,
"grad_norm": 0.82793128490448,
"learning_rate": 8.986407702849552e-06,
"loss": 0.1043,
"step": 288
},
{
"epoch": 0.23,
"grad_norm": 3.377779483795166,
"learning_rate": 8.978571344227681e-06,
"loss": 0.8556,
"step": 289
},
{
"epoch": 0.23,
"grad_norm": 1.0280252695083618,
"learning_rate": 8.970708254223768e-06,
"loss": 0.1494,
"step": 290
},
{
"epoch": 0.23,
"grad_norm": 0.5633714199066162,
"learning_rate": 8.962818485668652e-06,
"loss": 0.2595,
"step": 291
},
{
"epoch": 0.23,
"grad_norm": 0.8542232513427734,
"learning_rate": 8.954902091572419e-06,
"loss": 0.1084,
"step": 292
},
{
"epoch": 0.23,
"grad_norm": 0.24046172201633453,
"learning_rate": 8.946959125124053e-06,
"loss": 0.2312,
"step": 293
},
{
"epoch": 0.24,
"grad_norm": 0.7138242721557617,
"learning_rate": 8.938989639691068e-06,
"loss": 0.1144,
"step": 294
},
{
"epoch": 0.24,
"grad_norm": 0.8094290494918823,
"learning_rate": 8.930993688819157e-06,
"loss": 0.1158,
"step": 295
},
{
"epoch": 0.24,
"grad_norm": 0.7016668319702148,
"learning_rate": 8.92297132623183e-06,
"loss": 0.2889,
"step": 296
},
{
"epoch": 0.24,
"grad_norm": 0.38069477677345276,
"learning_rate": 8.914922605830055e-06,
"loss": 0.2454,
"step": 297
},
{
"epoch": 0.24,
"grad_norm": 0.23061558604240417,
"learning_rate": 8.90684758169189e-06,
"loss": 0.2537,
"step": 298
},
{
"epoch": 0.24,
"grad_norm": 0.8746384978294373,
"learning_rate": 8.898746308072128e-06,
"loss": 0.1193,
"step": 299
},
{
"epoch": 0.24,
"grad_norm": 0.6383428573608398,
"learning_rate": 8.890618839401923e-06,
"loss": 0.2879,
"step": 300
},
{
"epoch": 0.24,
"eval_loss": 0.17809468507766724,
"eval_runtime": 126.726,
"eval_samples_per_second": 34.721,
"eval_steps_per_second": 1.089,
"step": 300
},
{
"epoch": 0.24,
"grad_norm": 0.5001509189605713,
"learning_rate": 8.882465230288434e-06,
"loss": 0.2671,
"step": 301
},
{
"epoch": 0.24,
"grad_norm": 0.7503305673599243,
"learning_rate": 8.87428553551445e-06,
"loss": 0.1155,
"step": 302
},
{
"epoch": 0.24,
"grad_norm": 0.2926233410835266,
"learning_rate": 8.866079810038027e-06,
"loss": 0.266,
"step": 303
},
{
"epoch": 0.24,
"grad_norm": 0.39748167991638184,
"learning_rate": 8.857848108992117e-06,
"loss": 0.2638,
"step": 304
},
{
"epoch": 0.24,
"grad_norm": 2.9256198406219482,
"learning_rate": 8.849590487684198e-06,
"loss": 0.8412,
"step": 305
},
{
"epoch": 0.24,
"grad_norm": 0.3115037977695465,
"learning_rate": 8.841307001595904e-06,
"loss": 0.235,
"step": 306
},
{
"epoch": 0.25,
"grad_norm": 0.25119268894195557,
"learning_rate": 8.832997706382644e-06,
"loss": 0.2533,
"step": 307
},
{
"epoch": 0.25,
"grad_norm": 0.26972174644470215,
"learning_rate": 8.82466265787324e-06,
"loss": 0.2566,
"step": 308
},
{
"epoch": 0.25,
"grad_norm": 0.297839492559433,
"learning_rate": 8.816301912069543e-06,
"loss": 0.2509,
"step": 309
},
{
"epoch": 0.25,
"grad_norm": 1.7829856872558594,
"learning_rate": 8.807915525146065e-06,
"loss": 0.8151,
"step": 310
},
{
"epoch": 0.25,
"grad_norm": 0.33616122603416443,
"learning_rate": 8.79950355344959e-06,
"loss": 0.2412,
"step": 311
},
{
"epoch": 0.25,
"grad_norm": 0.3606199324131012,
"learning_rate": 8.791066053498808e-06,
"loss": 0.2546,
"step": 312
},
{
"epoch": 0.25,
"grad_norm": 0.28510981798171997,
"learning_rate": 8.782603081983924e-06,
"loss": 0.2673,
"step": 313
},
{
"epoch": 0.25,
"grad_norm": 0.24838398396968842,
"learning_rate": 8.774114695766286e-06,
"loss": 0.2581,
"step": 314
},
{
"epoch": 0.25,
"grad_norm": 0.29941707849502563,
"learning_rate": 8.765600951877997e-06,
"loss": 0.2457,
"step": 315
},
{
"epoch": 0.25,
"grad_norm": 1.4045259952545166,
"learning_rate": 8.757061907521536e-06,
"loss": 0.1242,
"step": 316
},
{
"epoch": 0.25,
"grad_norm": 0.32180342078208923,
"learning_rate": 8.748497620069372e-06,
"loss": 0.2546,
"step": 317
},
{
"epoch": 0.25,
"grad_norm": 2.2117414474487305,
"learning_rate": 8.739908147063576e-06,
"loss": 0.8115,
"step": 318
},
{
"epoch": 0.26,
"grad_norm": 0.28217563033103943,
"learning_rate": 8.731293546215438e-06,
"loss": 0.2583,
"step": 319
},
{
"epoch": 0.26,
"grad_norm": 0.24044464528560638,
"learning_rate": 8.722653875405077e-06,
"loss": 0.2452,
"step": 320
},
{
"epoch": 0.26,
"grad_norm": 0.23203817009925842,
"learning_rate": 8.713989192681056e-06,
"loss": 0.2469,
"step": 321
},
{
"epoch": 0.26,
"grad_norm": 0.920863926410675,
"learning_rate": 8.705299556259986e-06,
"loss": 0.1343,
"step": 322
},
{
"epoch": 0.26,
"grad_norm": 0.3616194427013397,
"learning_rate": 8.696585024526137e-06,
"loss": 0.2681,
"step": 323
},
{
"epoch": 0.26,
"grad_norm": 0.946262001991272,
"learning_rate": 8.687845656031051e-06,
"loss": 0.1284,
"step": 324
},
{
"epoch": 0.26,
"grad_norm": 0.29203060269355774,
"learning_rate": 8.679081509493141e-06,
"loss": 0.2663,
"step": 325
},
{
"epoch": 0.26,
"grad_norm": 0.20759929716587067,
"learning_rate": 8.670292643797302e-06,
"loss": 0.2503,
"step": 326
},
{
"epoch": 0.26,
"grad_norm": 0.19175031781196594,
"learning_rate": 8.661479117994508e-06,
"loss": 0.2427,
"step": 327
},
{
"epoch": 0.26,
"grad_norm": 0.8310448527336121,
"learning_rate": 8.652640991301425e-06,
"loss": 0.1286,
"step": 328
},
{
"epoch": 0.26,
"grad_norm": 0.7470892667770386,
"learning_rate": 8.64377832310001e-06,
"loss": 0.1345,
"step": 329
},
{
"epoch": 0.26,
"grad_norm": 0.399453729391098,
"learning_rate": 8.634891172937102e-06,
"loss": 0.2525,
"step": 330
},
{
"epoch": 0.26,
"grad_norm": 2.1489763259887695,
"learning_rate": 8.625979600524041e-06,
"loss": 0.8409,
"step": 331
},
{
"epoch": 0.27,
"grad_norm": 0.9179878234863281,
"learning_rate": 8.61704366573625e-06,
"loss": 0.134,
"step": 332
},
{
"epoch": 0.27,
"grad_norm": 0.863291323184967,
"learning_rate": 8.608083428612837e-06,
"loss": 0.1328,
"step": 333
},
{
"epoch": 0.27,
"grad_norm": 1.5060096979141235,
"learning_rate": 8.599098949356201e-06,
"loss": 0.7738,
"step": 334
},
{
"epoch": 0.27,
"grad_norm": 0.8645356893539429,
"learning_rate": 8.590090288331615e-06,
"loss": 0.1363,
"step": 335
},
{
"epoch": 0.27,
"grad_norm": 0.4028846323490143,
"learning_rate": 8.581057506066821e-06,
"loss": 0.2554,
"step": 336
},
{
"epoch": 0.27,
"grad_norm": 0.27831801772117615,
"learning_rate": 8.572000663251638e-06,
"loss": 0.2597,
"step": 337
},
{
"epoch": 0.27,
"grad_norm": 1.732809066772461,
"learning_rate": 8.562919820737537e-06,
"loss": 0.7849,
"step": 338
},
{
"epoch": 0.27,
"grad_norm": 0.8418837785720825,
"learning_rate": 8.553815039537238e-06,
"loss": 0.1137,
"step": 339
},
{
"epoch": 0.27,
"grad_norm": 0.4697979688644409,
"learning_rate": 8.544686380824309e-06,
"loss": 0.2676,
"step": 340
},
{
"epoch": 0.27,
"grad_norm": 0.36946678161621094,
"learning_rate": 8.535533905932739e-06,
"loss": 0.2502,
"step": 341
},
{
"epoch": 0.27,
"grad_norm": 0.26440319418907166,
"learning_rate": 8.526357676356538e-06,
"loss": 0.2468,
"step": 342
},
{
"epoch": 0.27,
"grad_norm": 0.9330527782440186,
"learning_rate": 8.517157753749318e-06,
"loss": 0.1326,
"step": 343
},
{
"epoch": 0.28,
"grad_norm": 0.4522755742073059,
"learning_rate": 8.507934199923884e-06,
"loss": 0.2618,
"step": 344
},
{
"epoch": 0.28,
"grad_norm": 0.3574436604976654,
"learning_rate": 8.498687076851813e-06,
"loss": 0.2592,
"step": 345
},
{
"epoch": 0.28,
"grad_norm": 0.23295027017593384,
"learning_rate": 8.489416446663037e-06,
"loss": 0.2546,
"step": 346
},
{
"epoch": 0.28,
"grad_norm": 0.8735469579696655,
"learning_rate": 8.480122371645433e-06,
"loss": 0.1219,
"step": 347
},
{
"epoch": 0.28,
"grad_norm": 0.45125022530555725,
"learning_rate": 8.470804914244403e-06,
"loss": 0.2662,
"step": 348
},
{
"epoch": 0.28,
"grad_norm": 0.4210889935493469,
"learning_rate": 8.461464137062443e-06,
"loss": 0.2534,
"step": 349
},
{
"epoch": 0.28,
"grad_norm": 0.23260442912578583,
"learning_rate": 8.452100102858734e-06,
"loss": 0.2613,
"step": 350
},
{
"epoch": 0.28,
"eval_loss": 0.1757211834192276,
"eval_runtime": 128.5534,
"eval_samples_per_second": 34.227,
"eval_steps_per_second": 1.073,
"step": 350
},
{
"epoch": 0.28,
"grad_norm": 0.24398007988929749,
"learning_rate": 8.442712874548722e-06,
"loss": 0.2436,
"step": 351
},
{
"epoch": 0.28,
"grad_norm": 0.4280300438404083,
"learning_rate": 8.433302515203685e-06,
"loss": 0.2539,
"step": 352
},
{
"epoch": 0.28,
"grad_norm": 0.7873167395591736,
"learning_rate": 8.423869088050316e-06,
"loss": 0.1147,
"step": 353
},
{
"epoch": 0.28,
"grad_norm": 0.3588729500770569,
"learning_rate": 8.414412656470297e-06,
"loss": 0.2649,
"step": 354
},
{
"epoch": 0.28,
"grad_norm": 0.25401613116264343,
"learning_rate": 8.404933283999876e-06,
"loss": 0.2658,
"step": 355
},
{
"epoch": 0.28,
"grad_norm": 0.3902299702167511,
"learning_rate": 8.395431034329431e-06,
"loss": 0.2603,
"step": 356
},
{
"epoch": 0.29,
"grad_norm": 0.7768390774726868,
"learning_rate": 8.385905971303054e-06,
"loss": 0.1185,
"step": 357
},
{
"epoch": 0.29,
"grad_norm": 0.31169575452804565,
"learning_rate": 8.376358158918114e-06,
"loss": 0.2366,
"step": 358
},
{
"epoch": 0.29,
"grad_norm": 0.8341612219810486,
"learning_rate": 8.36678766132483e-06,
"loss": 0.1169,
"step": 359
},
{
"epoch": 0.29,
"grad_norm": 0.20535218715667725,
"learning_rate": 8.357194542825835e-06,
"loss": 0.2573,
"step": 360
},
{
"epoch": 0.29,
"grad_norm": 0.9014405608177185,
"learning_rate": 8.347578867875756e-06,
"loss": 0.1384,
"step": 361
},
{
"epoch": 0.29,
"grad_norm": 0.33684036135673523,
"learning_rate": 8.33794070108077e-06,
"loss": 0.2623,
"step": 362
},
{
"epoch": 0.29,
"grad_norm": 2.2853899002075195,
"learning_rate": 8.328280107198165e-06,
"loss": 0.847,
"step": 363
},
{
"epoch": 0.29,
"grad_norm": 0.22882255911827087,
"learning_rate": 8.31859715113593e-06,
"loss": 0.2633,
"step": 364
},
{
"epoch": 0.29,
"grad_norm": 0.22882211208343506,
"learning_rate": 8.308891897952282e-06,
"loss": 0.258,
"step": 365
},
{
"epoch": 0.29,
"grad_norm": 0.7905156016349792,
"learning_rate": 8.299164412855268e-06,
"loss": 0.1144,
"step": 366
},
{
"epoch": 0.29,
"grad_norm": 0.32525917887687683,
"learning_rate": 8.289414761202293e-06,
"loss": 0.2618,
"step": 367
},
{
"epoch": 0.29,
"grad_norm": 0.23701588809490204,
"learning_rate": 8.2796430084997e-06,
"loss": 0.261,
"step": 368
},
{
"epoch": 0.3,
"grad_norm": 0.8737034201622009,
"learning_rate": 8.26984922040233e-06,
"loss": 0.1503,
"step": 369
},
{
"epoch": 0.3,
"grad_norm": 0.8326207399368286,
"learning_rate": 8.260033462713073e-06,
"loss": 0.1146,
"step": 370
},
{
"epoch": 0.3,
"grad_norm": 0.8591614365577698,
"learning_rate": 8.250195801382426e-06,
"loss": 0.1179,
"step": 371
},
{
"epoch": 0.3,
"grad_norm": 0.3488956689834595,
"learning_rate": 8.240336302508056e-06,
"loss": 0.2331,
"step": 372
},
{
"epoch": 0.3,
"grad_norm": 0.39037784934043884,
"learning_rate": 8.230455032334355e-06,
"loss": 0.2619,
"step": 373
},
{
"epoch": 0.3,
"grad_norm": 0.2138894498348236,
"learning_rate": 8.22055205725199e-06,
"loss": 0.2428,
"step": 374
},
{
"epoch": 0.3,
"grad_norm": 0.41095003485679626,
"learning_rate": 8.210627443797459e-06,
"loss": 0.2745,
"step": 375
},
{
"epoch": 0.3,
"grad_norm": 0.790916919708252,
"learning_rate": 8.200681258652648e-06,
"loss": 0.1269,
"step": 376
},
{
"epoch": 0.3,
"grad_norm": 0.4224022626876831,
"learning_rate": 8.190713568644378e-06,
"loss": 0.251,
"step": 377
},
{
"epoch": 0.3,
"grad_norm": 0.2843644917011261,
"learning_rate": 8.180724440743957e-06,
"loss": 0.2677,
"step": 378
},
{
"epoch": 0.3,
"grad_norm": 0.929864764213562,
"learning_rate": 8.17071394206673e-06,
"loss": 0.1276,
"step": 379
},
{
"epoch": 0.3,
"grad_norm": 0.4565240442752838,
"learning_rate": 8.160682139871634e-06,
"loss": 0.2482,
"step": 380
},
{
"epoch": 0.3,
"grad_norm": 0.7745142579078674,
"learning_rate": 8.150629101560732e-06,
"loss": 0.1111,
"step": 381
},
{
"epoch": 0.31,
"grad_norm": 0.47168248891830444,
"learning_rate": 8.14055489467878e-06,
"loss": 0.2573,
"step": 382
},
{
"epoch": 0.31,
"grad_norm": 0.8176670670509338,
"learning_rate": 8.130459586912753e-06,
"loss": 0.1149,
"step": 383
},
{
"epoch": 0.31,
"grad_norm": 0.2612771689891815,
"learning_rate": 8.120343246091403e-06,
"loss": 0.2522,
"step": 384
},
{
"epoch": 0.31,
"grad_norm": 0.8001154065132141,
"learning_rate": 8.1102059401848e-06,
"loss": 0.1001,
"step": 385
},
{
"epoch": 0.31,
"grad_norm": 0.7723381519317627,
"learning_rate": 8.100047737303877e-06,
"loss": 0.1226,
"step": 386
},
{
"epoch": 0.31,
"grad_norm": 0.7428209185600281,
"learning_rate": 8.089868705699963e-06,
"loss": 0.1065,
"step": 387
},
{
"epoch": 0.31,
"grad_norm": 0.7014505863189697,
"learning_rate": 8.079668913764343e-06,
"loss": 0.1078,
"step": 388
},
{
"epoch": 0.31,
"grad_norm": 0.46105244755744934,
"learning_rate": 8.069448430027778e-06,
"loss": 0.2439,
"step": 389
},
{
"epoch": 0.31,
"grad_norm": 0.4538716673851013,
"learning_rate": 8.059207323160057e-06,
"loss": 0.2587,
"step": 390
},
{
"epoch": 0.31,
"grad_norm": 0.7957661151885986,
"learning_rate": 8.048945661969531e-06,
"loss": 0.093,
"step": 391
},
{
"epoch": 0.31,
"grad_norm": 0.30844560265541077,
"learning_rate": 8.038663515402659e-06,
"loss": 0.2613,
"step": 392
},
{
"epoch": 0.31,
"grad_norm": 0.4774548411369324,
"learning_rate": 8.028360952543528e-06,
"loss": 0.2757,
"step": 393
},
{
"epoch": 0.32,
"grad_norm": 0.3150812089443207,
"learning_rate": 8.018038042613407e-06,
"loss": 0.2344,
"step": 394
},
{
"epoch": 0.32,
"grad_norm": 0.268145889043808,
"learning_rate": 8.00769485497027e-06,
"loss": 0.2546,
"step": 395
},
{
"epoch": 0.32,
"grad_norm": 0.8446844816207886,
"learning_rate": 7.99733145910833e-06,
"loss": 0.1194,
"step": 396
},
{
"epoch": 0.32,
"grad_norm": 1.099281668663025,
"learning_rate": 7.986947924657584e-06,
"loss": 0.1433,
"step": 397
},
{
"epoch": 0.32,
"grad_norm": 0.5173322558403015,
"learning_rate": 7.97654432138333e-06,
"loss": 0.259,
"step": 398
},
{
"epoch": 0.32,
"grad_norm": 0.4889124631881714,
"learning_rate": 7.96612071918571e-06,
"loss": 0.274,
"step": 399
},
{
"epoch": 0.32,
"grad_norm": 0.34199294447898865,
"learning_rate": 7.955677188099234e-06,
"loss": 0.2569,
"step": 400
},
{
"epoch": 0.32,
"eval_loss": 0.17327886819839478,
"eval_runtime": 126.8257,
"eval_samples_per_second": 34.693,
"eval_steps_per_second": 1.088,
"step": 400
},
{
"epoch": 0.32,
"grad_norm": 0.8484892845153809,
"learning_rate": 7.94521379829231e-06,
"loss": 0.1143,
"step": 401
},
{
"epoch": 0.32,
"grad_norm": 0.37039560079574585,
"learning_rate": 7.93473062006677e-06,
"loss": 0.2336,
"step": 402
},
{
"epoch": 0.32,
"grad_norm": 0.8371801376342773,
"learning_rate": 7.924227723857411e-06,
"loss": 0.1185,
"step": 403
},
{
"epoch": 0.32,
"grad_norm": 0.7657596468925476,
"learning_rate": 7.913705180231505e-06,
"loss": 0.1105,
"step": 404
},
{
"epoch": 0.32,
"grad_norm": 0.5779002904891968,
"learning_rate": 7.90316305988833e-06,
"loss": 0.2552,
"step": 405
},
{
"epoch": 0.32,
"grad_norm": 0.46475157141685486,
"learning_rate": 7.892601433658705e-06,
"loss": 0.2608,
"step": 406
},
{
"epoch": 0.33,
"grad_norm": 0.27325791120529175,
"learning_rate": 7.882020372504494e-06,
"loss": 0.2508,
"step": 407
},
{
"epoch": 0.33,
"grad_norm": 0.906908392906189,
"learning_rate": 7.871419947518152e-06,
"loss": 0.1312,
"step": 408
},
{
"epoch": 0.33,
"grad_norm": 0.3190006911754608,
"learning_rate": 7.860800229922234e-06,
"loss": 0.2457,
"step": 409
},
{
"epoch": 0.33,
"grad_norm": 0.7692751884460449,
"learning_rate": 7.850161291068915e-06,
"loss": 0.1054,
"step": 410
},
{
"epoch": 0.33,
"grad_norm": 0.5385792255401611,
"learning_rate": 7.839503202439517e-06,
"loss": 0.294,
"step": 411
},
{
"epoch": 0.33,
"grad_norm": 0.33525821566581726,
"learning_rate": 7.828826035644025e-06,
"loss": 0.2344,
"step": 412
},
{
"epoch": 0.33,
"grad_norm": 0.3262925148010254,
"learning_rate": 7.818129862420612e-06,
"loss": 0.2605,
"step": 413
},
{
"epoch": 0.33,
"grad_norm": 1.0018166303634644,
"learning_rate": 7.807414754635145e-06,
"loss": 0.1162,
"step": 414
},
{
"epoch": 0.33,
"grad_norm": 0.2465881109237671,
"learning_rate": 7.796680784280714e-06,
"loss": 0.2378,
"step": 415
},
{
"epoch": 0.33,
"grad_norm": 0.3238285183906555,
"learning_rate": 7.785928023477142e-06,
"loss": 0.2574,
"step": 416
},
{
"epoch": 0.33,
"grad_norm": 0.8106104731559753,
"learning_rate": 7.775156544470506e-06,
"loss": 0.1337,
"step": 417
},
{
"epoch": 0.33,
"grad_norm": 0.41265976428985596,
"learning_rate": 7.764366419632636e-06,
"loss": 0.2644,
"step": 418
},
{
"epoch": 0.34,
"grad_norm": 0.2705645263195038,
"learning_rate": 7.753557721460656e-06,
"loss": 0.239,
"step": 419
},
{
"epoch": 0.34,
"grad_norm": 0.2286556512117386,
"learning_rate": 7.742730522576469e-06,
"loss": 0.2587,
"step": 420
},
{
"epoch": 0.34,
"grad_norm": 0.21768417954444885,
"learning_rate": 7.731884895726287e-06,
"loss": 0.2317,
"step": 421
},
{
"epoch": 0.34,
"grad_norm": 0.8450128436088562,
"learning_rate": 7.721020913780137e-06,
"loss": 0.1011,
"step": 422
},
{
"epoch": 0.34,
"grad_norm": 2.724947690963745,
"learning_rate": 7.710138649731367e-06,
"loss": 0.778,
"step": 423
},
{
"epoch": 0.34,
"grad_norm": 0.3697688579559326,
"learning_rate": 7.699238176696161e-06,
"loss": 0.2398,
"step": 424
},
{
"epoch": 0.34,
"grad_norm": 1.023916482925415,
"learning_rate": 7.688319567913054e-06,
"loss": 0.1502,
"step": 425
},
{
"epoch": 0.34,
"grad_norm": 0.3471025824546814,
"learning_rate": 7.677382896742417e-06,
"loss": 0.2486,
"step": 426
},
{
"epoch": 0.34,
"grad_norm": 0.24734561145305634,
"learning_rate": 7.66642823666599e-06,
"loss": 0.2485,
"step": 427
},
{
"epoch": 0.34,
"grad_norm": 0.7840192914009094,
"learning_rate": 7.655455661286376e-06,
"loss": 0.112,
"step": 428
},
{
"epoch": 0.34,
"grad_norm": 0.8701856732368469,
"learning_rate": 7.644465244326547e-06,
"loss": 0.1344,
"step": 429
},
{
"epoch": 0.34,
"grad_norm": 1.7876603603363037,
"learning_rate": 7.63345705962935e-06,
"loss": 0.7815,
"step": 430
},
{
"epoch": 0.34,
"grad_norm": 0.42289087176322937,
"learning_rate": 7.622431181157011e-06,
"loss": 0.2608,
"step": 431
},
{
"epoch": 0.35,
"grad_norm": 0.348501980304718,
"learning_rate": 7.611387682990636e-06,
"loss": 0.2485,
"step": 432
},
{
"epoch": 0.35,
"grad_norm": 0.27631887793540955,
"learning_rate": 7.600326639329716e-06,
"loss": 0.2313,
"step": 433
},
{
"epoch": 0.35,
"grad_norm": 0.2248000055551529,
"learning_rate": 7.589248124491627e-06,
"loss": 0.2315,
"step": 434
},
{
"epoch": 0.35,
"grad_norm": 0.2563694715499878,
"learning_rate": 7.578152212911134e-06,
"loss": 0.2616,
"step": 435
},
{
"epoch": 0.35,
"grad_norm": 1.42692232131958,
"learning_rate": 7.567038979139882e-06,
"loss": 0.1283,
"step": 436
},
{
"epoch": 0.35,
"grad_norm": 0.38915354013442993,
"learning_rate": 7.555908497845905e-06,
"loss": 0.2662,
"step": 437
},
{
"epoch": 0.35,
"grad_norm": 1.6744861602783203,
"learning_rate": 7.544760843813122e-06,
"loss": 0.8252,
"step": 438
},
{
"epoch": 0.35,
"grad_norm": 0.34255263209342957,
"learning_rate": 7.533596091940829e-06,
"loss": 0.2766,
"step": 439
},
{
"epoch": 0.35,
"grad_norm": 0.23356464505195618,
"learning_rate": 7.5224143172432e-06,
"loss": 0.2316,
"step": 440
},
{
"epoch": 0.35,
"grad_norm": 0.1892404854297638,
"learning_rate": 7.511215594848784e-06,
"loss": 0.2541,
"step": 441
},
{
"epoch": 0.35,
"grad_norm": 0.8359056115150452,
"learning_rate": 7.500000000000001e-06,
"loss": 0.1173,
"step": 442
},
{
"epoch": 0.35,
"grad_norm": 0.9140292406082153,
"learning_rate": 7.488767608052629e-06,
"loss": 0.1269,
"step": 443
},
{
"epoch": 0.36,
"grad_norm": 0.7507352828979492,
"learning_rate": 7.477518494475309e-06,
"loss": 0.1179,
"step": 444
},
{
"epoch": 0.36,
"grad_norm": 1.7874459028244019,
"learning_rate": 7.466252734849027e-06,
"loss": 0.8189,
"step": 445
},
{
"epoch": 0.36,
"grad_norm": 0.6044875383377075,
"learning_rate": 7.454970404866612e-06,
"loss": 0.2833,
"step": 446
},
{
"epoch": 0.36,
"grad_norm": 0.4065121114253998,
"learning_rate": 7.44367158033223e-06,
"loss": 0.2489,
"step": 447
},
{
"epoch": 0.36,
"grad_norm": 0.8592169880867004,
"learning_rate": 7.4323563371608665e-06,
"loss": 0.1157,
"step": 448
},
{
"epoch": 0.36,
"grad_norm": 1.345414638519287,
"learning_rate": 7.421024751377825e-06,
"loss": 0.806,
"step": 449
},
{
"epoch": 0.36,
"grad_norm": 0.2932509779930115,
"learning_rate": 7.409676899118213e-06,
"loss": 0.2607,
"step": 450
},
{
"epoch": 0.36,
"eval_loss": 0.17497119307518005,
"eval_runtime": 133.2011,
"eval_samples_per_second": 33.033,
"eval_steps_per_second": 1.036,
"step": 450
},
{
"epoch": 0.36,
"grad_norm": 0.7178137302398682,
"learning_rate": 7.398312856626424e-06,
"loss": 0.0998,
"step": 451
},
{
"epoch": 0.36,
"grad_norm": 0.7720945477485657,
"learning_rate": 7.386932700255635e-06,
"loss": 0.1242,
"step": 452
},
{
"epoch": 0.36,
"grad_norm": 0.35302647948265076,
"learning_rate": 7.375536506467294e-06,
"loss": 0.2618,
"step": 453
},
{
"epoch": 0.36,
"grad_norm": 0.3657301068305969,
"learning_rate": 7.3641243518305915e-06,
"loss": 0.2592,
"step": 454
},
{
"epoch": 0.36,
"grad_norm": 0.7217091917991638,
"learning_rate": 7.352696313021966e-06,
"loss": 0.1121,
"step": 455
},
{
"epoch": 0.36,
"grad_norm": 1.6583247184753418,
"learning_rate": 7.341252466824572e-06,
"loss": 0.7967,
"step": 456
},
{
"epoch": 0.37,
"grad_norm": 0.23964492976665497,
"learning_rate": 7.329792890127778e-06,
"loss": 0.2575,
"step": 457
},
{
"epoch": 0.37,
"grad_norm": 0.2414589673280716,
"learning_rate": 7.318317659926637e-06,
"loss": 0.2464,
"step": 458
},
{
"epoch": 0.37,
"grad_norm": 0.777942419052124,
"learning_rate": 7.30682685332138e-06,
"loss": 0.1173,
"step": 459
},
{
"epoch": 0.37,
"grad_norm": 0.3086945116519928,
"learning_rate": 7.295320547516893e-06,
"loss": 0.2539,
"step": 460
},
{
"epoch": 0.37,
"grad_norm": 0.735599160194397,
"learning_rate": 7.283798819822193e-06,
"loss": 0.1124,
"step": 461
},
{
"epoch": 0.37,
"grad_norm": 0.22278796136379242,
"learning_rate": 7.272261747649922e-06,
"loss": 0.2407,
"step": 462
},
{
"epoch": 0.37,
"grad_norm": 0.25805866718292236,
"learning_rate": 7.2607094085158135e-06,
"loss": 0.2475,
"step": 463
},
{
"epoch": 0.37,
"grad_norm": 1.715925693511963,
"learning_rate": 7.249141880038181e-06,
"loss": 0.7653,
"step": 464
},
{
"epoch": 0.37,
"grad_norm": 0.22921590507030487,
"learning_rate": 7.237559239937388e-06,
"loss": 0.2381,
"step": 465
},
{
"epoch": 0.37,
"grad_norm": 0.27067118883132935,
"learning_rate": 7.225961566035335e-06,
"loss": 0.2571,
"step": 466
},
{
"epoch": 0.37,
"grad_norm": 0.33429858088493347,
"learning_rate": 7.214348936254934e-06,
"loss": 0.2625,
"step": 467
},
{
"epoch": 0.37,
"grad_norm": 0.21541927754878998,
"learning_rate": 7.202721428619576e-06,
"loss": 0.2373,
"step": 468
},
{
"epoch": 0.38,
"grad_norm": 0.2614475190639496,
"learning_rate": 7.191079121252618e-06,
"loss": 0.2585,
"step": 469
},
{
"epoch": 0.38,
"grad_norm": 0.8877506852149963,
"learning_rate": 7.179422092376856e-06,
"loss": 0.114,
"step": 470
},
{
"epoch": 0.38,
"grad_norm": 0.8758447170257568,
"learning_rate": 7.167750420313994e-06,
"loss": 0.1096,
"step": 471
},
{
"epoch": 0.38,
"grad_norm": 0.35043734312057495,
"learning_rate": 7.156064183484122e-06,
"loss": 0.2409,
"step": 472
},
{
"epoch": 0.38,
"grad_norm": 0.8281682729721069,
"learning_rate": 7.144363460405191e-06,
"loss": 0.1275,
"step": 473
},
{
"epoch": 0.38,
"grad_norm": 0.7467338442802429,
"learning_rate": 7.132648329692478e-06,
"loss": 0.1193,
"step": 474
},
{
"epoch": 0.38,
"grad_norm": 0.8526357412338257,
"learning_rate": 7.1209188700580666e-06,
"loss": 0.0952,
"step": 475
},
{
"epoch": 0.38,
"grad_norm": 0.2877821624279022,
"learning_rate": 7.109175160310312e-06,
"loss": 0.242,
"step": 476
},
{
"epoch": 0.38,
"grad_norm": 0.7923797369003296,
"learning_rate": 7.097417279353316e-06,
"loss": 0.1013,
"step": 477
},
{
"epoch": 0.38,
"grad_norm": 0.3010237216949463,
"learning_rate": 7.085645306186391e-06,
"loss": 0.2639,
"step": 478
},
{
"epoch": 0.38,
"grad_norm": 0.2335115224123001,
"learning_rate": 7.073859319903536e-06,
"loss": 0.2427,
"step": 479
},
{
"epoch": 0.38,
"grad_norm": 0.7458134293556213,
"learning_rate": 7.062059399692899e-06,
"loss": 0.0961,
"step": 480
},
{
"epoch": 0.38,
"grad_norm": 0.20358052849769592,
"learning_rate": 7.0502456248362496e-06,
"loss": 0.2441,
"step": 481
},
{
"epoch": 0.39,
"grad_norm": 1.0379178524017334,
"learning_rate": 7.038418074708444e-06,
"loss": 0.1283,
"step": 482
},
{
"epoch": 0.39,
"grad_norm": 0.26605701446533203,
"learning_rate": 7.026576828776895e-06,
"loss": 0.2603,
"step": 483
},
{
"epoch": 0.39,
"grad_norm": 0.22112296521663666,
"learning_rate": 7.014721966601029e-06,
"loss": 0.2531,
"step": 484
},
{
"epoch": 0.39,
"grad_norm": 3.0812671184539795,
"learning_rate": 7.0028535678317645e-06,
"loss": 0.802,
"step": 485
},
{
"epoch": 0.39,
"grad_norm": 0.7515444159507751,
"learning_rate": 6.990971712210966e-06,
"loss": 0.0943,
"step": 486
},
{
"epoch": 0.39,
"grad_norm": 0.30441251397132874,
"learning_rate": 6.979076479570912e-06,
"loss": 0.2316,
"step": 487
},
{
"epoch": 0.39,
"grad_norm": 0.8908866047859192,
"learning_rate": 6.967167949833763e-06,
"loss": 0.1125,
"step": 488
},
{
"epoch": 0.39,
"grad_norm": 0.30447858572006226,
"learning_rate": 6.955246203011016e-06,
"loss": 0.2622,
"step": 489
},
{
"epoch": 0.39,
"grad_norm": 0.7095188498497009,
"learning_rate": 6.943311319202976e-06,
"loss": 0.0973,
"step": 490
},
{
"epoch": 0.39,
"grad_norm": 0.25172391533851624,
"learning_rate": 6.93136337859821e-06,
"loss": 0.272,
"step": 491
},
{
"epoch": 0.39,
"grad_norm": 0.23503896594047546,
"learning_rate": 6.919402461473013e-06,
"loss": 0.2507,
"step": 492
},
{
"epoch": 0.39,
"grad_norm": 0.26883500814437866,
"learning_rate": 6.907428648190865e-06,
"loss": 0.2525,
"step": 493
},
{
"epoch": 0.4,
"grad_norm": 0.22378547489643097,
"learning_rate": 6.895442019201898e-06,
"loss": 0.2423,
"step": 494
},
{
"epoch": 0.4,
"grad_norm": 1.8482476472854614,
"learning_rate": 6.8834426550423435e-06,
"loss": 0.7594,
"step": 495
},
{
"epoch": 0.4,
"grad_norm": 1.0197372436523438,
"learning_rate": 6.871430636334005e-06,
"loss": 0.1186,
"step": 496
},
{
"epoch": 0.4,
"grad_norm": 0.3354429304599762,
"learning_rate": 6.859406043783707e-06,
"loss": 0.2416,
"step": 497
},
{
"epoch": 0.4,
"grad_norm": 0.8121775388717651,
"learning_rate": 6.8473689581827585e-06,
"loss": 0.1152,
"step": 498
},
{
"epoch": 0.4,
"grad_norm": 0.2374291718006134,
"learning_rate": 6.8353194604064e-06,
"loss": 0.2529,
"step": 499
},
{
"epoch": 0.4,
"grad_norm": 0.7673168778419495,
"learning_rate": 6.8232576314132755e-06,
"loss": 0.1069,
"step": 500
},
{
"epoch": 0.4,
"eval_loss": 0.17257176339626312,
"eval_runtime": 129.1563,
"eval_samples_per_second": 34.067,
"eval_steps_per_second": 1.068,
"step": 500
},
{
"epoch": 0.4,
"grad_norm": 0.26591578125953674,
"learning_rate": 6.811183552244879e-06,
"loss": 0.2513,
"step": 501
},
{
"epoch": 0.4,
"grad_norm": 0.7341824173927307,
"learning_rate": 6.7990973040250055e-06,
"loss": 0.0992,
"step": 502
},
{
"epoch": 0.4,
"grad_norm": 0.2251487672328949,
"learning_rate": 6.78699896795922e-06,
"loss": 0.2535,
"step": 503
},
{
"epoch": 0.4,
"grad_norm": 0.22974683344364166,
"learning_rate": 6.774888625334295e-06,
"loss": 0.246,
"step": 504
},
{
"epoch": 0.4,
"grad_norm": 0.7044336795806885,
"learning_rate": 6.7627663575176825e-06,
"loss": 0.0998,
"step": 505
},
{
"epoch": 0.4,
"grad_norm": 0.21585382521152496,
"learning_rate": 6.750632245956954e-06,
"loss": 0.2428,
"step": 506
},
{
"epoch": 0.41,
"grad_norm": 0.7919189929962158,
"learning_rate": 6.738486372179253e-06,
"loss": 0.1152,
"step": 507
},
{
"epoch": 0.41,
"grad_norm": 0.7740060687065125,
"learning_rate": 6.7263288177907604e-06,
"loss": 0.113,
"step": 508
},
{
"epoch": 0.41,
"grad_norm": 0.34114816784858704,
"learning_rate": 6.714159664476127e-06,
"loss": 0.2374,
"step": 509
},
{
"epoch": 0.41,
"grad_norm": 0.2690570056438446,
"learning_rate": 6.701978993997942e-06,
"loss": 0.2473,
"step": 510
},
{
"epoch": 0.41,
"grad_norm": 0.21625067293643951,
"learning_rate": 6.689786888196175e-06,
"loss": 0.2523,
"step": 511
},
{
"epoch": 0.41,
"grad_norm": 0.29779431223869324,
"learning_rate": 6.677583428987625e-06,
"loss": 0.2314,
"step": 512
},
{
"epoch": 0.41,
"grad_norm": 0.2767280638217926,
"learning_rate": 6.66536869836538e-06,
"loss": 0.2325,
"step": 513
},
{
"epoch": 0.41,
"grad_norm": 0.7299302816390991,
"learning_rate": 6.653142778398247e-06,
"loss": 0.0938,
"step": 514
},
{
"epoch": 0.41,
"grad_norm": 2.016296625137329,
"learning_rate": 6.640905751230224e-06,
"loss": 0.7972,
"step": 515
},
{
"epoch": 0.41,
"grad_norm": 0.7118121981620789,
"learning_rate": 6.6286576990799325e-06,
"loss": 0.0902,
"step": 516
},
{
"epoch": 0.41,
"grad_norm": 0.41206905245780945,
"learning_rate": 6.616398704240064e-06,
"loss": 0.2604,
"step": 517
},
{
"epoch": 0.41,
"grad_norm": 0.3278159201145172,
"learning_rate": 6.6041288490768385e-06,
"loss": 0.2374,
"step": 518
},
{
"epoch": 0.42,
"grad_norm": 0.2507029175758362,
"learning_rate": 6.591848216029444e-06,
"loss": 0.2412,
"step": 519
},
{
"epoch": 0.42,
"grad_norm": 0.3146171569824219,
"learning_rate": 6.579556887609481e-06,
"loss": 0.2498,
"step": 520
},
{
"epoch": 0.42,
"grad_norm": 0.31674060225486755,
"learning_rate": 6.567254946400411e-06,
"loss": 0.2383,
"step": 521
},
{
"epoch": 0.42,
"grad_norm": 0.26629751920700073,
"learning_rate": 6.554942475057003e-06,
"loss": 0.2498,
"step": 522
},
{
"epoch": 0.42,
"grad_norm": 0.2559276223182678,
"learning_rate": 6.542619556304774e-06,
"loss": 0.2486,
"step": 523
},
{
"epoch": 0.42,
"grad_norm": 0.21022208034992218,
"learning_rate": 6.530286272939438e-06,
"loss": 0.2358,
"step": 524
},
{
"epoch": 0.42,
"grad_norm": 0.27593278884887695,
"learning_rate": 6.517942707826342e-06,
"loss": 0.2462,
"step": 525
},
{
"epoch": 0.42,
"grad_norm": 1.1079552173614502,
"learning_rate": 6.505588943899923e-06,
"loss": 0.1267,
"step": 526
},
{
"epoch": 0.42,
"grad_norm": 0.2666323781013489,
"learning_rate": 6.493225064163134e-06,
"loss": 0.241,
"step": 527
},
{
"epoch": 0.42,
"grad_norm": 0.7184049487113953,
"learning_rate": 6.4808511516868976e-06,
"loss": 0.106,
"step": 528
},
{
"epoch": 0.42,
"grad_norm": 0.2685374319553375,
"learning_rate": 6.468467289609547e-06,
"loss": 0.2385,
"step": 529
},
{
"epoch": 0.42,
"grad_norm": 0.24653129279613495,
"learning_rate": 6.456073561136261e-06,
"loss": 0.2393,
"step": 530
},
{
"epoch": 0.42,
"grad_norm": 2.0868330001831055,
"learning_rate": 6.443670049538512e-06,
"loss": 0.7806,
"step": 531
},
{
"epoch": 0.43,
"grad_norm": 0.24887517094612122,
"learning_rate": 6.4312568381535045e-06,
"loss": 0.2399,
"step": 532
},
{
"epoch": 0.43,
"grad_norm": 0.8147097229957581,
"learning_rate": 6.41883401038361e-06,
"loss": 0.1101,
"step": 533
},
{
"epoch": 0.43,
"grad_norm": 0.7025034427642822,
"learning_rate": 6.406401649695814e-06,
"loss": 0.0977,
"step": 534
},
{
"epoch": 0.43,
"grad_norm": 0.8896757960319519,
"learning_rate": 6.393959839621154e-06,
"loss": 0.1151,
"step": 535
},
{
"epoch": 0.43,
"grad_norm": 0.2679365575313568,
"learning_rate": 6.381508663754152e-06,
"loss": 0.2554,
"step": 536
},
{
"epoch": 0.43,
"grad_norm": 1.352323293685913,
"learning_rate": 6.369048205752261e-06,
"loss": 0.7772,
"step": 537
},
{
"epoch": 0.43,
"grad_norm": 0.7524544596672058,
"learning_rate": 6.356578549335295e-06,
"loss": 0.1228,
"step": 538
},
{
"epoch": 0.43,
"grad_norm": 0.25852134823799133,
"learning_rate": 6.3440997782848764e-06,
"loss": 0.2496,
"step": 539
},
{
"epoch": 0.43,
"grad_norm": 0.8458032608032227,
"learning_rate": 6.331611976443862e-06,
"loss": 0.1078,
"step": 540
},
{
"epoch": 0.43,
"grad_norm": 0.7436122894287109,
"learning_rate": 6.31911522771579e-06,
"loss": 0.1014,
"step": 541
},
{
"epoch": 0.43,
"grad_norm": 0.22990009188652039,
"learning_rate": 6.306609616064304e-06,
"loss": 0.249,
"step": 542
},
{
"epoch": 0.43,
"grad_norm": 0.2187103033065796,
"learning_rate": 6.294095225512604e-06,
"loss": 0.2435,
"step": 543
},
{
"epoch": 0.44,
"grad_norm": 0.23281478881835938,
"learning_rate": 6.281572140142871e-06,
"loss": 0.2369,
"step": 544
},
{
"epoch": 0.44,
"grad_norm": 0.2722567021846771,
"learning_rate": 6.269040444095704e-06,
"loss": 0.2546,
"step": 545
},
{
"epoch": 0.44,
"grad_norm": 0.8616167306900024,
"learning_rate": 6.256500221569556e-06,
"loss": 0.1249,
"step": 546
},
{
"epoch": 0.44,
"grad_norm": 0.2369868904352188,
"learning_rate": 6.243951556820169e-06,
"loss": 0.2685,
"step": 547
},
{
"epoch": 0.44,
"grad_norm": 0.20875300467014313,
"learning_rate": 6.231394534160008e-06,
"loss": 0.2421,
"step": 548
},
{
"epoch": 0.44,
"grad_norm": 0.7908800840377808,
"learning_rate": 6.218829237957689e-06,
"loss": 0.0987,
"step": 549
},
{
"epoch": 0.44,
"grad_norm": 0.9249074459075928,
"learning_rate": 6.2062557526374226e-06,
"loss": 0.0866,
"step": 550
},
{
"epoch": 0.44,
"eval_loss": 0.17182637751102448,
"eval_runtime": 126.9651,
"eval_samples_per_second": 34.655,
"eval_steps_per_second": 1.087,
"step": 550
},
{
"epoch": 0.44,
"grad_norm": 1.5728058815002441,
"learning_rate": 6.193674162678437e-06,
"loss": 0.7705,
"step": 551
},
{
"epoch": 0.44,
"grad_norm": 0.32587730884552,
"learning_rate": 6.181084552614414e-06,
"loss": 0.2551,
"step": 552
},
{
"epoch": 0.44,
"grad_norm": 0.26025548577308655,
"learning_rate": 6.168487007032922e-06,
"loss": 0.2476,
"step": 553
},
{
"epoch": 0.44,
"grad_norm": 0.25537562370300293,
"learning_rate": 6.15588161057485e-06,
"loss": 0.243,
"step": 554
},
{
"epoch": 0.44,
"grad_norm": 0.2507646083831787,
"learning_rate": 6.143268447933828e-06,
"loss": 0.2347,
"step": 555
},
{
"epoch": 0.44,
"grad_norm": 0.39545267820358276,
"learning_rate": 6.130647603855674e-06,
"loss": 0.2542,
"step": 556
},
{
"epoch": 0.45,
"grad_norm": 1.7832762002944946,
"learning_rate": 6.118019163137814e-06,
"loss": 0.7204,
"step": 557
},
{
"epoch": 0.45,
"grad_norm": 0.9707966446876526,
"learning_rate": 6.10538321062871e-06,
"loss": 0.1127,
"step": 558
},
{
"epoch": 0.45,
"grad_norm": 0.3201599717140198,
"learning_rate": 6.092739831227298e-06,
"loss": 0.2353,
"step": 559
},
{
"epoch": 0.45,
"grad_norm": 1.2938932180404663,
"learning_rate": 6.080089109882419e-06,
"loss": 0.7762,
"step": 560
},
{
"epoch": 0.45,
"grad_norm": 0.3063291609287262,
"learning_rate": 6.067431131592234e-06,
"loss": 0.2725,
"step": 561
},
{
"epoch": 0.45,
"grad_norm": 1.2505972385406494,
"learning_rate": 6.0547659814036664e-06,
"loss": 0.7757,
"step": 562
},
{
"epoch": 0.45,
"grad_norm": 1.2083958387374878,
"learning_rate": 6.042093744411829e-06,
"loss": 0.7502,
"step": 563
},
{
"epoch": 0.45,
"grad_norm": 0.29970356822013855,
"learning_rate": 6.029414505759448e-06,
"loss": 0.2349,
"step": 564
},
{
"epoch": 0.45,
"grad_norm": 0.2557036578655243,
"learning_rate": 6.016728350636289e-06,
"loss": 0.2412,
"step": 565
},
{
"epoch": 0.45,
"grad_norm": 1.27855384349823,
"learning_rate": 6.004035364278593e-06,
"loss": 0.7615,
"step": 566
},
{
"epoch": 0.45,
"grad_norm": 0.8690054416656494,
"learning_rate": 5.991335631968498e-06,
"loss": 0.1059,
"step": 567
},
{
"epoch": 0.45,
"grad_norm": 0.8990349173545837,
"learning_rate": 5.978629239033465e-06,
"loss": 0.1088,
"step": 568
},
{
"epoch": 0.46,
"grad_norm": 0.40394002199172974,
"learning_rate": 5.96591627084571e-06,
"loss": 0.2538,
"step": 569
},
{
"epoch": 0.46,
"grad_norm": 0.6969928741455078,
"learning_rate": 5.953196812821622e-06,
"loss": 0.1016,
"step": 570
},
{
"epoch": 0.46,
"grad_norm": 0.678952693939209,
"learning_rate": 5.940470950421199e-06,
"loss": 0.1003,
"step": 571
},
{
"epoch": 0.46,
"grad_norm": 0.23090852797031403,
"learning_rate": 5.927738769147467e-06,
"loss": 0.2289,
"step": 572
},
{
"epoch": 0.46,
"grad_norm": 0.21523523330688477,
"learning_rate": 5.915000354545908e-06,
"loss": 0.2306,
"step": 573
},
{
"epoch": 0.46,
"grad_norm": 0.7646777033805847,
"learning_rate": 5.902255792203882e-06,
"loss": 0.103,
"step": 574
},
{
"epoch": 0.46,
"grad_norm": 1.8262460231781006,
"learning_rate": 5.88950516775006e-06,
"loss": 0.8071,
"step": 575
},
{
"epoch": 0.46,
"grad_norm": 0.7033870220184326,
"learning_rate": 5.876748566853839e-06,
"loss": 0.1272,
"step": 576
},
{
"epoch": 0.46,
"grad_norm": 0.25240471959114075,
"learning_rate": 5.8639860752247726e-06,
"loss": 0.248,
"step": 577
},
{
"epoch": 0.46,
"grad_norm": 0.8612571358680725,
"learning_rate": 5.851217778611994e-06,
"loss": 0.0929,
"step": 578
},
{
"epoch": 0.46,
"grad_norm": 0.3180033564567566,
"learning_rate": 5.838443762803636e-06,
"loss": 0.2691,
"step": 579
},
{
"epoch": 0.46,
"grad_norm": 0.2315218150615692,
"learning_rate": 5.825664113626258e-06,
"loss": 0.2526,
"step": 580
},
{
"epoch": 0.46,
"grad_norm": 0.8243306875228882,
"learning_rate": 5.812878916944276e-06,
"loss": 0.105,
"step": 581
},
{
"epoch": 0.47,
"grad_norm": 0.19831788539886475,
"learning_rate": 5.800088258659371e-06,
"loss": 0.24,
"step": 582
},
{
"epoch": 0.47,
"grad_norm": 0.24731282889842987,
"learning_rate": 5.7872922247099206e-06,
"loss": 0.2578,
"step": 583
},
{
"epoch": 0.47,
"grad_norm": 0.18566380441188812,
"learning_rate": 5.774490901070424e-06,
"loss": 0.2432,
"step": 584
},
{
"epoch": 0.47,
"grad_norm": 1.4072928428649902,
"learning_rate": 5.7616843737509195e-06,
"loss": 0.7281,
"step": 585
},
{
"epoch": 0.47,
"grad_norm": 0.23175321519374847,
"learning_rate": 5.748872728796409e-06,
"loss": 0.248,
"step": 586
},
{
"epoch": 0.47,
"grad_norm": 0.32236453890800476,
"learning_rate": 5.736056052286274e-06,
"loss": 0.2592,
"step": 587
},
{
"epoch": 0.47,
"grad_norm": 0.2272377759218216,
"learning_rate": 5.723234430333711e-06,
"loss": 0.2173,
"step": 588
},
{
"epoch": 0.47,
"grad_norm": 0.2605978548526764,
"learning_rate": 5.710407949085135e-06,
"loss": 0.2461,
"step": 589
},
{
"epoch": 0.47,
"grad_norm": 0.19611631333827972,
"learning_rate": 5.697576694719616e-06,
"loss": 0.2331,
"step": 590
},
{
"epoch": 0.47,
"grad_norm": 0.7686142921447754,
"learning_rate": 5.684740753448291e-06,
"loss": 0.1066,
"step": 591
},
{
"epoch": 0.47,
"grad_norm": 0.24265241622924805,
"learning_rate": 5.6719002115137914e-06,
"loss": 0.2529,
"step": 592
},
{
"epoch": 0.47,
"grad_norm": 1.831364393234253,
"learning_rate": 5.659055155189651e-06,
"loss": 0.7488,
"step": 593
},
{
"epoch": 0.48,
"grad_norm": 0.7736395597457886,
"learning_rate": 5.646205670779745e-06,
"loss": 0.1088,
"step": 594
},
{
"epoch": 0.48,
"grad_norm": 0.7180992960929871,
"learning_rate": 5.6333518446176974e-06,
"loss": 0.1052,
"step": 595
},
{
"epoch": 0.48,
"grad_norm": 0.7724286317825317,
"learning_rate": 5.6204937630662974e-06,
"loss": 0.1146,
"step": 596
},
{
"epoch": 0.48,
"grad_norm": 0.7194610238075256,
"learning_rate": 5.607631512516934e-06,
"loss": 0.1034,
"step": 597
},
{
"epoch": 0.48,
"grad_norm": 0.34931981563568115,
"learning_rate": 5.594765179389003e-06,
"loss": 0.2617,
"step": 598
},
{
"epoch": 0.48,
"grad_norm": 0.38497787714004517,
"learning_rate": 5.581894850129328e-06,
"loss": 0.2663,
"step": 599
},
{
"epoch": 0.48,
"grad_norm": 0.8474921584129333,
"learning_rate": 5.569020611211589e-06,
"loss": 0.1136,
"step": 600
},
{
"epoch": 0.48,
"eval_loss": 0.17212402820587158,
"eval_runtime": 131.1531,
"eval_samples_per_second": 33.549,
"eval_steps_per_second": 1.052,
"step": 600
},
{
"epoch": 0.48,
"grad_norm": 1.4955337047576904,
"learning_rate": 5.556142549135725e-06,
"loss": 0.7654,
"step": 601
},
{
"epoch": 0.48,
"grad_norm": 0.45665034651756287,
"learning_rate": 5.543260750427373e-06,
"loss": 0.2487,
"step": 602
},
{
"epoch": 0.48,
"grad_norm": 0.8311600685119629,
"learning_rate": 5.5303753016372675e-06,
"loss": 0.0893,
"step": 603
},
{
"epoch": 0.48,
"grad_norm": 0.3638245761394501,
"learning_rate": 5.517486289340669e-06,
"loss": 0.2578,
"step": 604
},
{
"epoch": 0.48,
"grad_norm": 0.31249916553497314,
"learning_rate": 5.5045938001367824e-06,
"loss": 0.2396,
"step": 605
},
{
"epoch": 0.48,
"grad_norm": 0.2066921591758728,
"learning_rate": 5.4916979206481745e-06,
"loss": 0.2405,
"step": 606
},
{
"epoch": 0.49,
"grad_norm": 1.2871233224868774,
"learning_rate": 5.478798737520187e-06,
"loss": 0.7462,
"step": 607
},
{
"epoch": 0.49,
"grad_norm": 1.1793088912963867,
"learning_rate": 5.465896337420359e-06,
"loss": 0.7043,
"step": 608
},
{
"epoch": 0.49,
"grad_norm": 0.5960167646408081,
"learning_rate": 5.452990807037847e-06,
"loss": 0.285,
"step": 609
},
{
"epoch": 0.49,
"grad_norm": 0.9260637164115906,
"learning_rate": 5.440082233082837e-06,
"loss": 0.0916,
"step": 610
},
{
"epoch": 0.49,
"grad_norm": 0.31530794501304626,
"learning_rate": 5.427170702285964e-06,
"loss": 0.2513,
"step": 611
},
{
"epoch": 0.49,
"grad_norm": 0.28861063718795776,
"learning_rate": 5.414256301397731e-06,
"loss": 0.2574,
"step": 612
},
{
"epoch": 0.49,
"grad_norm": 1.250362515449524,
"learning_rate": 5.401339117187926e-06,
"loss": 0.7563,
"step": 613
},
{
"epoch": 0.49,
"grad_norm": 0.26689571142196655,
"learning_rate": 5.388419236445033e-06,
"loss": 0.2341,
"step": 614
},
{
"epoch": 0.49,
"grad_norm": 0.29688704013824463,
"learning_rate": 5.375496745975655e-06,
"loss": 0.2534,
"step": 615
},
{
"epoch": 0.49,
"grad_norm": 0.7108520865440369,
"learning_rate": 5.362571732603934e-06,
"loss": 0.0898,
"step": 616
},
{
"epoch": 0.49,
"grad_norm": 0.2955131530761719,
"learning_rate": 5.349644283170957e-06,
"loss": 0.2493,
"step": 617
},
{
"epoch": 0.49,
"grad_norm": 0.6830461621284485,
"learning_rate": 5.336714484534183e-06,
"loss": 0.1062,
"step": 618
},
{
"epoch": 0.5,
"grad_norm": 1.32274329662323,
"learning_rate": 5.32378242356685e-06,
"loss": 0.7814,
"step": 619
},
{
"epoch": 0.5,
"grad_norm": 0.7143470048904419,
"learning_rate": 5.310848187157404e-06,
"loss": 0.104,
"step": 620
},
{
"epoch": 0.5,
"grad_norm": 0.6177361607551575,
"learning_rate": 5.2979118622088976e-06,
"loss": 0.0801,
"step": 621
},
{
"epoch": 0.5,
"grad_norm": 0.7132307887077332,
"learning_rate": 5.284973535638424e-06,
"loss": 0.1088,
"step": 622
},
{
"epoch": 0.5,
"grad_norm": 0.35282617807388306,
"learning_rate": 5.272033294376522e-06,
"loss": 0.2343,
"step": 623
},
{
"epoch": 0.5,
"grad_norm": 0.31337809562683105,
"learning_rate": 5.2590912253665925e-06,
"loss": 0.2502,
"step": 624
},
{
"epoch": 0.5,
"grad_norm": 0.7704228758811951,
"learning_rate": 5.246147415564321e-06,
"loss": 0.1189,
"step": 625
},
{
"epoch": 0.5,
"grad_norm": 0.27498576045036316,
"learning_rate": 5.233201951937088e-06,
"loss": 0.2503,
"step": 626
},
{
"epoch": 0.5,
"grad_norm": 0.40920963883399963,
"learning_rate": 5.220254921463384e-06,
"loss": 0.256,
"step": 627
},
{
"epoch": 0.5,
"grad_norm": 0.7178544402122498,
"learning_rate": 5.207306411132228e-06,
"loss": 0.1124,
"step": 628
},
{
"epoch": 0.5,
"grad_norm": 1.618476152420044,
"learning_rate": 5.1943565079425805e-06,
"loss": 0.7361,
"step": 629
},
{
"epoch": 0.5,
"grad_norm": 0.7033463716506958,
"learning_rate": 5.181405298902763e-06,
"loss": 0.1087,
"step": 630
},
{
"epoch": 0.5,
"grad_norm": 0.7392492294311523,
"learning_rate": 5.168452871029871e-06,
"loss": 0.115,
"step": 631
},
{
"epoch": 0.51,
"grad_norm": 0.25571781396865845,
"learning_rate": 5.155499311349185e-06,
"loss": 0.2539,
"step": 632
},
{
"epoch": 0.51,
"grad_norm": 0.2640807330608368,
"learning_rate": 5.142544706893595e-06,
"loss": 0.2501,
"step": 633
},
{
"epoch": 0.51,
"grad_norm": 0.23950879275798798,
"learning_rate": 5.1295891447030056e-06,
"loss": 0.2427,
"step": 634
},
{
"epoch": 0.51,
"grad_norm": 0.2711010277271271,
"learning_rate": 5.116632711823762e-06,
"loss": 0.2452,
"step": 635
},
{
"epoch": 0.51,
"grad_norm": 0.26640841364860535,
"learning_rate": 5.103675495308054e-06,
"loss": 0.2447,
"step": 636
},
{
"epoch": 0.51,
"grad_norm": 0.3056611120700836,
"learning_rate": 5.090717582213338e-06,
"loss": 0.2601,
"step": 637
},
{
"epoch": 0.51,
"grad_norm": 0.2731577157974243,
"learning_rate": 5.077759059601756e-06,
"loss": 0.2402,
"step": 638
},
{
"epoch": 0.51,
"grad_norm": 0.20222416520118713,
"learning_rate": 5.064800014539536e-06,
"loss": 0.2425,
"step": 639
},
{
"epoch": 0.51,
"grad_norm": 0.21446771919727325,
"learning_rate": 5.051840534096422e-06,
"loss": 0.2349,
"step": 640
},
{
"epoch": 0.51,
"grad_norm": 0.6989370584487915,
"learning_rate": 5.038880705345086e-06,
"loss": 0.0977,
"step": 641
},
{
"epoch": 0.51,
"grad_norm": 0.25862133502960205,
"learning_rate": 5.025920615360532e-06,
"loss": 0.2374,
"step": 642
},
{
"epoch": 0.51,
"grad_norm": 0.8224275708198547,
"learning_rate": 5.0129603512195255e-06,
"loss": 0.1125,
"step": 643
},
{
"epoch": 0.52,
"grad_norm": 0.7922216653823853,
"learning_rate": 5e-06,
"loss": 0.1054,
"step": 644
},
{
"epoch": 0.52,
"grad_norm": 0.23588776588439941,
"learning_rate": 4.987039648780475e-06,
"loss": 0.2318,
"step": 645
},
{
"epoch": 0.52,
"grad_norm": 0.7823762893676758,
"learning_rate": 4.974079384639469e-06,
"loss": 0.1293,
"step": 646
},
{
"epoch": 0.52,
"grad_norm": 1.3290942907333374,
"learning_rate": 4.961119294654915e-06,
"loss": 0.7341,
"step": 647
},
{
"epoch": 0.52,
"grad_norm": 0.7482376098632812,
"learning_rate": 4.948159465903578e-06,
"loss": 0.0947,
"step": 648
},
{
"epoch": 0.52,
"grad_norm": 1.2851827144622803,
"learning_rate": 4.935199985460466e-06,
"loss": 0.6908,
"step": 649
},
{
"epoch": 0.52,
"grad_norm": 0.7733777761459351,
"learning_rate": 4.922240940398246e-06,
"loss": 0.1137,
"step": 650
},
{
"epoch": 0.52,
"eval_loss": 0.17226335406303406,
"eval_runtime": 125.2705,
"eval_samples_per_second": 35.124,
"eval_steps_per_second": 1.102,
"step": 650
},
{
"epoch": 0.52,
"grad_norm": 0.7104854583740234,
"learning_rate": 4.909282417786662e-06,
"loss": 0.1123,
"step": 651
},
{
"epoch": 0.52,
"grad_norm": 1.1987510919570923,
"learning_rate": 4.89632450469195e-06,
"loss": 0.7609,
"step": 652
},
{
"epoch": 0.52,
"grad_norm": 1.4330857992172241,
"learning_rate": 4.883367288176239e-06,
"loss": 0.7684,
"step": 653
},
{
"epoch": 0.52,
"grad_norm": 0.4067681133747101,
"learning_rate": 4.870410855296994e-06,
"loss": 0.2589,
"step": 654
},
{
"epoch": 0.52,
"grad_norm": 0.3044109642505646,
"learning_rate": 4.857455293106408e-06,
"loss": 0.2596,
"step": 655
},
{
"epoch": 0.52,
"grad_norm": 0.27583155035972595,
"learning_rate": 4.844500688650817e-06,
"loss": 0.265,
"step": 656
},
{
"epoch": 0.53,
"grad_norm": 1.4379302263259888,
"learning_rate": 4.831547128970129e-06,
"loss": 0.7279,
"step": 657
},
{
"epoch": 0.53,
"grad_norm": 0.8062425851821899,
"learning_rate": 4.818594701097239e-06,
"loss": 0.1172,
"step": 658
},
{
"epoch": 0.53,
"grad_norm": 1.231427550315857,
"learning_rate": 4.80564349205742e-06,
"loss": 0.7418,
"step": 659
},
{
"epoch": 0.53,
"grad_norm": 0.5487538576126099,
"learning_rate": 4.792693588867774e-06,
"loss": 0.2624,
"step": 660
},
{
"epoch": 0.53,
"grad_norm": 0.8645210266113281,
"learning_rate": 4.779745078536618e-06,
"loss": 0.1261,
"step": 661
},
{
"epoch": 0.53,
"grad_norm": 0.3868701756000519,
"learning_rate": 4.766798048062913e-06,
"loss": 0.222,
"step": 662
},
{
"epoch": 0.53,
"grad_norm": 0.3177044689655304,
"learning_rate": 4.753852584435679e-06,
"loss": 0.2692,
"step": 663
},
{
"epoch": 0.53,
"grad_norm": 0.7175981402397156,
"learning_rate": 4.740908774633408e-06,
"loss": 0.0905,
"step": 664
},
{
"epoch": 0.53,
"grad_norm": 0.8378758430480957,
"learning_rate": 4.72796670562348e-06,
"loss": 0.1151,
"step": 665
},
{
"epoch": 0.53,
"grad_norm": 0.7432547211647034,
"learning_rate": 4.715026464361576e-06,
"loss": 0.0971,
"step": 666
},
{
"epoch": 0.53,
"grad_norm": 0.6319710612297058,
"learning_rate": 4.702088137791104e-06,
"loss": 0.1084,
"step": 667
},
{
"epoch": 0.53,
"grad_norm": 0.5788446068763733,
"learning_rate": 4.689151812842598e-06,
"loss": 0.253,
"step": 668
},
{
"epoch": 0.54,
"grad_norm": 0.6017256379127502,
"learning_rate": 4.676217576433149e-06,
"loss": 0.2652,
"step": 669
},
{
"epoch": 0.54,
"grad_norm": 0.7286050319671631,
"learning_rate": 4.663285515465818e-06,
"loss": 0.0908,
"step": 670
},
{
"epoch": 0.54,
"grad_norm": 0.784467339515686,
"learning_rate": 4.650355716829044e-06,
"loss": 0.0964,
"step": 671
},
{
"epoch": 0.54,
"grad_norm": 0.8168984055519104,
"learning_rate": 4.637428267396069e-06,
"loss": 0.0914,
"step": 672
},
{
"epoch": 0.54,
"grad_norm": 2.087724208831787,
"learning_rate": 4.624503254024348e-06,
"loss": 0.7294,
"step": 673
},
{
"epoch": 0.54,
"grad_norm": 0.558775007724762,
"learning_rate": 4.611580763554969e-06,
"loss": 0.2364,
"step": 674
},
{
"epoch": 0.54,
"grad_norm": 1.536269187927246,
"learning_rate": 4.598660882812077e-06,
"loss": 0.7447,
"step": 675
},
{
"epoch": 0.54,
"grad_norm": 0.4555722773075104,
"learning_rate": 4.58574369860227e-06,
"loss": 0.2483,
"step": 676
},
{
"epoch": 0.54,
"grad_norm": 0.39679044485092163,
"learning_rate": 4.572829297714037e-06,
"loss": 0.2804,
"step": 677
},
{
"epoch": 0.54,
"grad_norm": 0.3710139989852905,
"learning_rate": 4.559917766917166e-06,
"loss": 0.2687,
"step": 678
},
{
"epoch": 0.54,
"grad_norm": 0.6935705542564392,
"learning_rate": 4.547009192962155e-06,
"loss": 0.0879,
"step": 679
},
{
"epoch": 0.54,
"grad_norm": 0.3818451762199402,
"learning_rate": 4.534103662579643e-06,
"loss": 0.2541,
"step": 680
},
{
"epoch": 0.54,
"grad_norm": 0.7783706784248352,
"learning_rate": 4.521201262479816e-06,
"loss": 0.1026,
"step": 681
},
{
"epoch": 0.55,
"grad_norm": 0.38123849034309387,
"learning_rate": 4.508302079351827e-06,
"loss": 0.2567,
"step": 682
},
{
"epoch": 0.55,
"grad_norm": 0.73029625415802,
"learning_rate": 4.4954061998632175e-06,
"loss": 0.0885,
"step": 683
},
{
"epoch": 0.55,
"grad_norm": 0.44833430647850037,
"learning_rate": 4.482513710659333e-06,
"loss": 0.2575,
"step": 684
},
{
"epoch": 0.55,
"grad_norm": 0.3061580955982208,
"learning_rate": 4.469624698362734e-06,
"loss": 0.2265,
"step": 685
},
{
"epoch": 0.55,
"grad_norm": 0.26141464710235596,
"learning_rate": 4.456739249572628e-06,
"loss": 0.2536,
"step": 686
},
{
"epoch": 0.55,
"grad_norm": 0.7800106406211853,
"learning_rate": 4.4438574508642755e-06,
"loss": 0.1043,
"step": 687
},
{
"epoch": 0.55,
"grad_norm": 0.32099348306655884,
"learning_rate": 4.430979388788413e-06,
"loss": 0.2426,
"step": 688
},
{
"epoch": 0.55,
"grad_norm": 0.32918378710746765,
"learning_rate": 4.418105149870673e-06,
"loss": 0.2525,
"step": 689
},
{
"epoch": 0.55,
"grad_norm": 0.7075226306915283,
"learning_rate": 4.405234820611001e-06,
"loss": 0.0973,
"step": 690
},
{
"epoch": 0.55,
"grad_norm": 0.8305571675300598,
"learning_rate": 4.392368487483067e-06,
"loss": 0.1179,
"step": 691
},
{
"epoch": 0.55,
"grad_norm": 0.6249308586120605,
"learning_rate": 4.379506236933703e-06,
"loss": 0.0829,
"step": 692
},
{
"epoch": 0.55,
"grad_norm": 0.30378952622413635,
"learning_rate": 4.366648155382305e-06,
"loss": 0.2375,
"step": 693
},
{
"epoch": 0.56,
"grad_norm": 0.7136104702949524,
"learning_rate": 4.3537943292202555e-06,
"loss": 0.0881,
"step": 694
},
{
"epoch": 0.56,
"grad_norm": 0.7012856602668762,
"learning_rate": 4.3409448448103495e-06,
"loss": 0.0947,
"step": 695
},
{
"epoch": 0.56,
"grad_norm": 2.034573793411255,
"learning_rate": 4.328099788486212e-06,
"loss": 0.7128,
"step": 696
},
{
"epoch": 0.56,
"grad_norm": 0.29149001836776733,
"learning_rate": 4.3152592465517104e-06,
"loss": 0.2604,
"step": 697
},
{
"epoch": 0.56,
"grad_norm": 0.29406610131263733,
"learning_rate": 4.3024233052803855e-06,
"loss": 0.2524,
"step": 698
},
{
"epoch": 0.56,
"grad_norm": 0.1930028349161148,
"learning_rate": 4.289592050914867e-06,
"loss": 0.253,
"step": 699
},
{
"epoch": 0.56,
"grad_norm": 0.23443476855754852,
"learning_rate": 4.276765569666292e-06,
"loss": 0.2566,
"step": 700
},
{
"epoch": 0.56,
"eval_loss": 0.1715490221977234,
"eval_runtime": 126.577,
"eval_samples_per_second": 34.761,
"eval_steps_per_second": 1.09,
"step": 700
},
{
"epoch": 0.56,
"grad_norm": 0.7931630611419678,
"learning_rate": 4.263943947713727e-06,
"loss": 0.1062,
"step": 701
},
{
"epoch": 0.56,
"grad_norm": 0.31464895606040955,
"learning_rate": 4.251127271203593e-06,
"loss": 0.2573,
"step": 702
},
{
"epoch": 0.56,
"grad_norm": 1.2291940450668335,
"learning_rate": 4.238315626249081e-06,
"loss": 0.7277,
"step": 703
},
{
"epoch": 0.56,
"grad_norm": 0.31031131744384766,
"learning_rate": 4.2255090989295765e-06,
"loss": 0.249,
"step": 704
},
{
"epoch": 0.56,
"grad_norm": 0.9248325228691101,
"learning_rate": 4.212707775290081e-06,
"loss": 0.1123,
"step": 705
},
{
"epoch": 0.56,
"grad_norm": 0.7715104818344116,
"learning_rate": 4.199911741340631e-06,
"loss": 0.1156,
"step": 706
},
{
"epoch": 0.57,
"grad_norm": 0.644995391368866,
"learning_rate": 4.187121083055724e-06,
"loss": 0.0865,
"step": 707
},
{
"epoch": 0.57,
"grad_norm": 0.24592100083827972,
"learning_rate": 4.174335886373744e-06,
"loss": 0.2435,
"step": 708
},
{
"epoch": 0.57,
"grad_norm": 0.805558443069458,
"learning_rate": 4.161556237196366e-06,
"loss": 0.1121,
"step": 709
},
{
"epoch": 0.57,
"grad_norm": 0.2514585256576538,
"learning_rate": 4.148782221388007e-06,
"loss": 0.2394,
"step": 710
},
{
"epoch": 0.57,
"grad_norm": 1.6131137609481812,
"learning_rate": 4.136013924775228e-06,
"loss": 0.7225,
"step": 711
},
{
"epoch": 0.57,
"grad_norm": 0.7845091819763184,
"learning_rate": 4.123251433146162e-06,
"loss": 0.1039,
"step": 712
},
{
"epoch": 0.57,
"grad_norm": 0.21544544398784637,
"learning_rate": 4.11049483224994e-06,
"loss": 0.2295,
"step": 713
},
{
"epoch": 0.57,
"grad_norm": 0.2165309339761734,
"learning_rate": 4.097744207796119e-06,
"loss": 0.2404,
"step": 714
},
{
"epoch": 0.57,
"grad_norm": 1.3429161310195923,
"learning_rate": 4.0849996454540945e-06,
"loss": 0.7288,
"step": 715
},
{
"epoch": 0.57,
"grad_norm": 0.8344590663909912,
"learning_rate": 4.072261230852534e-06,
"loss": 0.1019,
"step": 716
},
{
"epoch": 0.57,
"grad_norm": 0.27863648533821106,
"learning_rate": 4.059529049578803e-06,
"loss": 0.2422,
"step": 717
},
{
"epoch": 0.57,
"grad_norm": 0.22490869462490082,
"learning_rate": 4.04680318717838e-06,
"loss": 0.2475,
"step": 718
},
{
"epoch": 0.58,
"grad_norm": 0.8715108036994934,
"learning_rate": 4.034083729154291e-06,
"loss": 0.1072,
"step": 719
},
{
"epoch": 0.58,
"grad_norm": 0.24501661956310272,
"learning_rate": 4.021370760966536e-06,
"loss": 0.2534,
"step": 720
},
{
"epoch": 0.58,
"grad_norm": 0.6684597134590149,
"learning_rate": 4.008664368031503e-06,
"loss": 0.0927,
"step": 721
},
{
"epoch": 0.58,
"grad_norm": 0.24028076231479645,
"learning_rate": 3.995964635721409e-06,
"loss": 0.2423,
"step": 722
},
{
"epoch": 0.58,
"grad_norm": 1.7033452987670898,
"learning_rate": 3.983271649363713e-06,
"loss": 0.7541,
"step": 723
},
{
"epoch": 0.58,
"grad_norm": 1.5531734228134155,
"learning_rate": 3.970585494240554e-06,
"loss": 0.7236,
"step": 724
},
{
"epoch": 0.58,
"grad_norm": 0.7803600430488586,
"learning_rate": 3.957906255588174e-06,
"loss": 0.098,
"step": 725
},
{
"epoch": 0.58,
"grad_norm": 0.30403950810432434,
"learning_rate": 3.945234018596335e-06,
"loss": 0.2561,
"step": 726
},
{
"epoch": 0.58,
"grad_norm": 0.2124655842781067,
"learning_rate": 3.932568868407768e-06,
"loss": 0.237,
"step": 727
},
{
"epoch": 0.58,
"grad_norm": 0.23335345089435577,
"learning_rate": 3.919910890117584e-06,
"loss": 0.2485,
"step": 728
},
{
"epoch": 0.58,
"grad_norm": 1.6244477033615112,
"learning_rate": 3.907260168772703e-06,
"loss": 0.731,
"step": 729
},
{
"epoch": 0.58,
"grad_norm": 0.2952788472175598,
"learning_rate": 3.8946167893712916e-06,
"loss": 0.2503,
"step": 730
},
{
"epoch": 0.58,
"grad_norm": 0.29336994886398315,
"learning_rate": 3.8819808368621895e-06,
"loss": 0.2454,
"step": 731
},
{
"epoch": 0.59,
"grad_norm": 1.5762007236480713,
"learning_rate": 3.869352396144327e-06,
"loss": 0.6874,
"step": 732
},
{
"epoch": 0.59,
"grad_norm": 1.3270254135131836,
"learning_rate": 3.856731552066173e-06,
"loss": 0.6907,
"step": 733
},
{
"epoch": 0.59,
"grad_norm": 0.665510892868042,
"learning_rate": 3.844118389425154e-06,
"loss": 0.0996,
"step": 734
},
{
"epoch": 0.59,
"grad_norm": 0.49461278319358826,
"learning_rate": 3.831512992967079e-06,
"loss": 0.258,
"step": 735
},
{
"epoch": 0.59,
"grad_norm": 0.3135206699371338,
"learning_rate": 3.818915447385588e-06,
"loss": 0.2675,
"step": 736
},
{
"epoch": 0.59,
"grad_norm": 0.7065826654434204,
"learning_rate": 3.806325837321565e-06,
"loss": 0.0885,
"step": 737
},
{
"epoch": 0.59,
"grad_norm": 0.3554627001285553,
"learning_rate": 3.7937442473625787e-06,
"loss": 0.2249,
"step": 738
},
{
"epoch": 0.59,
"grad_norm": 0.6542179584503174,
"learning_rate": 3.7811707620423118e-06,
"loss": 0.0929,
"step": 739
},
{
"epoch": 0.59,
"grad_norm": 0.3531971573829651,
"learning_rate": 3.768605465839994e-06,
"loss": 0.238,
"step": 740
},
{
"epoch": 0.59,
"grad_norm": 0.2867192327976227,
"learning_rate": 3.7560484431798316e-06,
"loss": 0.2614,
"step": 741
},
{
"epoch": 0.59,
"grad_norm": 0.2395482361316681,
"learning_rate": 3.743499778430445e-06,
"loss": 0.2544,
"step": 742
},
{
"epoch": 0.59,
"grad_norm": 0.682070791721344,
"learning_rate": 3.7309595559042977e-06,
"loss": 0.107,
"step": 743
},
{
"epoch": 0.6,
"grad_norm": 0.39497658610343933,
"learning_rate": 3.7184278598571298e-06,
"loss": 0.2254,
"step": 744
},
{
"epoch": 0.6,
"grad_norm": 0.4273214340209961,
"learning_rate": 3.705904774487396e-06,
"loss": 0.2513,
"step": 745
},
{
"epoch": 0.6,
"grad_norm": 0.23918572068214417,
"learning_rate": 3.6933903839356983e-06,
"loss": 0.2457,
"step": 746
},
{
"epoch": 0.6,
"grad_norm": 0.7423481345176697,
"learning_rate": 3.680884772284212e-06,
"loss": 0.1022,
"step": 747
},
{
"epoch": 0.6,
"grad_norm": 0.5464329719543457,
"learning_rate": 3.6683880235561383e-06,
"loss": 0.2666,
"step": 748
},
{
"epoch": 0.6,
"grad_norm": 0.36674508452415466,
"learning_rate": 3.6559002217151256e-06,
"loss": 0.2571,
"step": 749
},
{
"epoch": 0.6,
"grad_norm": 0.25653377175331116,
"learning_rate": 3.6434214506647064e-06,
"loss": 0.2295,
"step": 750
},
{
"epoch": 0.6,
"eval_loss": 0.16999034583568573,
"eval_runtime": 129.3907,
"eval_samples_per_second": 34.006,
"eval_steps_per_second": 1.067,
"step": 750
},
{
"epoch": 0.6,
"grad_norm": 0.3934086859226227,
"learning_rate": 3.63095179424774e-06,
"loss": 0.229,
"step": 751
},
{
"epoch": 0.6,
"grad_norm": 0.6927469968795776,
"learning_rate": 3.6184913362458497e-06,
"loss": 0.0898,
"step": 752
},
{
"epoch": 0.6,
"grad_norm": 0.5401495099067688,
"learning_rate": 3.6060401603788476e-06,
"loss": 0.2362,
"step": 753
},
{
"epoch": 0.6,
"grad_norm": 0.3733712136745453,
"learning_rate": 3.5935983503041864e-06,
"loss": 0.2451,
"step": 754
},
{
"epoch": 0.6,
"grad_norm": 0.27751830220222473,
"learning_rate": 3.581165989616392e-06,
"loss": 0.2394,
"step": 755
},
{
"epoch": 0.6,
"grad_norm": 2.7958240509033203,
"learning_rate": 3.568743161846497e-06,
"loss": 0.717,
"step": 756
},
{
"epoch": 0.61,
"grad_norm": 0.7180008292198181,
"learning_rate": 3.5563299504614883e-06,
"loss": 0.0935,
"step": 757
},
{
"epoch": 0.61,
"grad_norm": 0.7136401534080505,
"learning_rate": 3.5439264388637407e-06,
"loss": 0.0939,
"step": 758
},
{
"epoch": 0.61,
"grad_norm": 0.8162506222724915,
"learning_rate": 3.5315327103904545e-06,
"loss": 0.08,
"step": 759
},
{
"epoch": 0.61,
"grad_norm": 0.6645429134368896,
"learning_rate": 3.5191488483131033e-06,
"loss": 0.2392,
"step": 760
},
{
"epoch": 0.61,
"grad_norm": 1.905449390411377,
"learning_rate": 3.506774935836868e-06,
"loss": 0.7538,
"step": 761
},
{
"epoch": 0.61,
"grad_norm": 1.7625935077667236,
"learning_rate": 3.4944110561000785e-06,
"loss": 0.7105,
"step": 762
},
{
"epoch": 0.61,
"grad_norm": 0.5459256172180176,
"learning_rate": 3.482057292173658e-06,
"loss": 0.2441,
"step": 763
},
{
"epoch": 0.61,
"grad_norm": 0.2979334890842438,
"learning_rate": 3.469713727060564e-06,
"loss": 0.224,
"step": 764
},
{
"epoch": 0.61,
"grad_norm": 0.7048526406288147,
"learning_rate": 3.4573804436952265e-06,
"loss": 0.0903,
"step": 765
},
{
"epoch": 0.61,
"grad_norm": 0.40703466534614563,
"learning_rate": 3.4450575249429975e-06,
"loss": 0.2446,
"step": 766
},
{
"epoch": 0.61,
"grad_norm": 0.4916253089904785,
"learning_rate": 3.432745053599591e-06,
"loss": 0.2386,
"step": 767
},
{
"epoch": 0.61,
"grad_norm": 0.8090611696243286,
"learning_rate": 3.4204431123905195e-06,
"loss": 0.0949,
"step": 768
},
{
"epoch": 0.62,
"grad_norm": 2.2344298362731934,
"learning_rate": 3.4081517839705557e-06,
"loss": 0.7194,
"step": 769
},
{
"epoch": 0.62,
"grad_norm": 2.0639944076538086,
"learning_rate": 3.3958711509231627e-06,
"loss": 0.7562,
"step": 770
},
{
"epoch": 0.62,
"grad_norm": 0.7143884301185608,
"learning_rate": 3.383601295759938e-06,
"loss": 0.0811,
"step": 771
},
{
"epoch": 0.62,
"grad_norm": 0.32340705394744873,
"learning_rate": 3.371342300920071e-06,
"loss": 0.253,
"step": 772
},
{
"epoch": 0.62,
"grad_norm": 0.25489377975463867,
"learning_rate": 3.359094248769777e-06,
"loss": 0.2485,
"step": 773
},
{
"epoch": 0.62,
"grad_norm": 0.2620706260204315,
"learning_rate": 3.3468572216017536e-06,
"loss": 0.2233,
"step": 774
},
{
"epoch": 0.62,
"grad_norm": 0.31240496039390564,
"learning_rate": 3.334631301634623e-06,
"loss": 0.2439,
"step": 775
},
{
"epoch": 0.62,
"grad_norm": 0.6251383423805237,
"learning_rate": 3.322416571012376e-06,
"loss": 0.0891,
"step": 776
},
{
"epoch": 0.62,
"grad_norm": 0.37054863572120667,
"learning_rate": 3.3102131118038274e-06,
"loss": 0.2651,
"step": 777
},
{
"epoch": 0.62,
"grad_norm": 0.264030396938324,
"learning_rate": 3.29802100600206e-06,
"loss": 0.2279,
"step": 778
},
{
"epoch": 0.62,
"grad_norm": 0.29201561212539673,
"learning_rate": 3.2858403355238745e-06,
"loss": 0.2569,
"step": 779
},
{
"epoch": 0.62,
"grad_norm": 0.263555109500885,
"learning_rate": 3.273671182209241e-06,
"loss": 0.2615,
"step": 780
},
{
"epoch": 0.62,
"grad_norm": 0.765890896320343,
"learning_rate": 3.261513627820747e-06,
"loss": 0.0896,
"step": 781
},
{
"epoch": 0.63,
"grad_norm": 0.23219743371009827,
"learning_rate": 3.249367754043047e-06,
"loss": 0.231,
"step": 782
},
{
"epoch": 0.63,
"grad_norm": 0.3147435188293457,
"learning_rate": 3.237233642482317e-06,
"loss": 0.2446,
"step": 783
},
{
"epoch": 0.63,
"grad_norm": 0.3469120264053345,
"learning_rate": 3.225111374665707e-06,
"loss": 0.2525,
"step": 784
},
{
"epoch": 0.63,
"grad_norm": 0.8331325054168701,
"learning_rate": 3.2130010320407824e-06,
"loss": 0.0989,
"step": 785
},
{
"epoch": 0.63,
"grad_norm": 0.21551278233528137,
"learning_rate": 3.200902695974995e-06,
"loss": 0.243,
"step": 786
},
{
"epoch": 0.63,
"grad_norm": 0.28955933451652527,
"learning_rate": 3.188816447755124e-06,
"loss": 0.2366,
"step": 787
},
{
"epoch": 0.63,
"grad_norm": 0.287164568901062,
"learning_rate": 3.176742368586725e-06,
"loss": 0.2224,
"step": 788
},
{
"epoch": 0.63,
"grad_norm": 1.5003104209899902,
"learning_rate": 3.1646805395935996e-06,
"loss": 0.702,
"step": 789
},
{
"epoch": 0.63,
"grad_norm": 0.8306986093521118,
"learning_rate": 3.152631041817244e-06,
"loss": 0.1145,
"step": 790
},
{
"epoch": 0.63,
"grad_norm": 0.21188777685165405,
"learning_rate": 3.1405939562162934e-06,
"loss": 0.2459,
"step": 791
},
{
"epoch": 0.63,
"grad_norm": 0.22515565156936646,
"learning_rate": 3.1285693636659953e-06,
"loss": 0.244,
"step": 792
},
{
"epoch": 0.63,
"grad_norm": 0.22754062712192535,
"learning_rate": 3.116557344957658e-06,
"loss": 0.233,
"step": 793
},
{
"epoch": 0.64,
"grad_norm": 0.2095058709383011,
"learning_rate": 3.104557980798104e-06,
"loss": 0.2445,
"step": 794
},
{
"epoch": 0.64,
"grad_norm": 0.72348552942276,
"learning_rate": 3.0925713518091348e-06,
"loss": 0.1039,
"step": 795
},
{
"epoch": 0.64,
"grad_norm": 0.2232055962085724,
"learning_rate": 3.0805975385269883e-06,
"loss": 0.2568,
"step": 796
},
{
"epoch": 0.64,
"grad_norm": 0.7563778758049011,
"learning_rate": 3.0686366214017904e-06,
"loss": 0.1052,
"step": 797
},
{
"epoch": 0.64,
"grad_norm": 0.2356943041086197,
"learning_rate": 3.056688680797024e-06,
"loss": 0.2325,
"step": 798
},
{
"epoch": 0.64,
"grad_norm": 0.6660679578781128,
"learning_rate": 3.0447537969889852e-06,
"loss": 0.1011,
"step": 799
},
{
"epoch": 0.64,
"grad_norm": 0.719944179058075,
"learning_rate": 3.032832050166239e-06,
"loss": 0.1029,
"step": 800
},
{
"epoch": 0.64,
"eval_loss": 0.17016293108463287,
"eval_runtime": 128.0162,
"eval_samples_per_second": 34.371,
"eval_steps_per_second": 1.078,
"step": 800
},
{
"epoch": 0.64,
"grad_norm": 0.7572996020317078,
"learning_rate": 3.0209235204290886e-06,
"loss": 0.0998,
"step": 801
},
{
"epoch": 0.64,
"grad_norm": 0.2846776843070984,
"learning_rate": 3.0090282877890376e-06,
"loss": 0.2578,
"step": 802
},
{
"epoch": 0.64,
"grad_norm": 0.24966636300086975,
"learning_rate": 2.9971464321682364e-06,
"loss": 0.2342,
"step": 803
},
{
"epoch": 0.64,
"grad_norm": 0.7679559588432312,
"learning_rate": 2.9852780333989706e-06,
"loss": 0.0815,
"step": 804
},
{
"epoch": 0.64,
"grad_norm": 0.2663658559322357,
"learning_rate": 2.9734231712231073e-06,
"loss": 0.2337,
"step": 805
},
{
"epoch": 0.64,
"grad_norm": 0.25905749201774597,
"learning_rate": 2.961581925291557e-06,
"loss": 0.2432,
"step": 806
},
{
"epoch": 0.65,
"grad_norm": 0.23444600403308868,
"learning_rate": 2.949754375163751e-06,
"loss": 0.2497,
"step": 807
},
{
"epoch": 0.65,
"grad_norm": 0.767976701259613,
"learning_rate": 2.937940600307104e-06,
"loss": 0.1042,
"step": 808
},
{
"epoch": 0.65,
"grad_norm": 0.2620820105075836,
"learning_rate": 2.9261406800964665e-06,
"loss": 0.2387,
"step": 809
},
{
"epoch": 0.65,
"grad_norm": 1.5155500173568726,
"learning_rate": 2.9143546938136093e-06,
"loss": 0.6781,
"step": 810
},
{
"epoch": 0.65,
"grad_norm": 1.2395851612091064,
"learning_rate": 2.902582720646685e-06,
"loss": 0.7216,
"step": 811
},
{
"epoch": 0.65,
"grad_norm": 0.2558597922325134,
"learning_rate": 2.8908248396896893e-06,
"loss": 0.2254,
"step": 812
},
{
"epoch": 0.65,
"grad_norm": 0.24844790995121002,
"learning_rate": 2.8790811299419334e-06,
"loss": 0.2472,
"step": 813
},
{
"epoch": 0.65,
"grad_norm": 1.2689884901046753,
"learning_rate": 2.8673516703075247e-06,
"loss": 0.7292,
"step": 814
},
{
"epoch": 0.65,
"grad_norm": 0.6802056431770325,
"learning_rate": 2.8556365395948106e-06,
"loss": 0.0828,
"step": 815
},
{
"epoch": 0.65,
"grad_norm": 0.33545202016830444,
"learning_rate": 2.843935816515877e-06,
"loss": 0.2326,
"step": 816
},
{
"epoch": 0.65,
"grad_norm": 0.29078230261802673,
"learning_rate": 2.8322495796860083e-06,
"loss": 0.2479,
"step": 817
},
{
"epoch": 0.65,
"grad_norm": 0.21646979451179504,
"learning_rate": 2.820577907623145e-06,
"loss": 0.2559,
"step": 818
},
{
"epoch": 0.66,
"grad_norm": 0.277942955493927,
"learning_rate": 2.808920878747381e-06,
"loss": 0.2607,
"step": 819
},
{
"epoch": 0.66,
"grad_norm": 1.4038424491882324,
"learning_rate": 2.7972785713804264e-06,
"loss": 0.7794,
"step": 820
},
{
"epoch": 0.66,
"grad_norm": 0.3337160050868988,
"learning_rate": 2.7856510637450666e-06,
"loss": 0.2467,
"step": 821
},
{
"epoch": 0.66,
"grad_norm": 0.8176087737083435,
"learning_rate": 2.7740384339646655e-06,
"loss": 0.1051,
"step": 822
},
{
"epoch": 0.66,
"grad_norm": 0.6850245594978333,
"learning_rate": 2.7624407600626144e-06,
"loss": 0.0914,
"step": 823
},
{
"epoch": 0.66,
"grad_norm": 0.664178729057312,
"learning_rate": 2.750858119961821e-06,
"loss": 0.0823,
"step": 824
},
{
"epoch": 0.66,
"grad_norm": 0.30444636940956116,
"learning_rate": 2.7392905914841882e-06,
"loss": 0.2514,
"step": 825
},
{
"epoch": 0.66,
"grad_norm": 0.8350824117660522,
"learning_rate": 2.7277382523500804e-06,
"loss": 0.1124,
"step": 826
},
{
"epoch": 0.66,
"grad_norm": 0.7100653648376465,
"learning_rate": 2.716201180177808e-06,
"loss": 0.1094,
"step": 827
},
{
"epoch": 0.66,
"grad_norm": 1.2965682744979858,
"learning_rate": 2.7046794524831088e-06,
"loss": 0.6966,
"step": 828
},
{
"epoch": 0.66,
"grad_norm": 0.7987070679664612,
"learning_rate": 2.693173146678621e-06,
"loss": 0.1,
"step": 829
},
{
"epoch": 0.66,
"grad_norm": 0.7385662794113159,
"learning_rate": 2.6816823400733628e-06,
"loss": 0.1048,
"step": 830
},
{
"epoch": 0.66,
"grad_norm": 1.3140537738800049,
"learning_rate": 2.6702071098722255e-06,
"loss": 0.7206,
"step": 831
},
{
"epoch": 0.67,
"grad_norm": 0.3514021337032318,
"learning_rate": 2.65874753317543e-06,
"loss": 0.2474,
"step": 832
},
{
"epoch": 0.67,
"grad_norm": 0.2625245451927185,
"learning_rate": 2.6473036869780356e-06,
"loss": 0.233,
"step": 833
},
{
"epoch": 0.67,
"grad_norm": 0.21421027183532715,
"learning_rate": 2.6358756481694115e-06,
"loss": 0.2478,
"step": 834
},
{
"epoch": 0.67,
"grad_norm": 0.6813749670982361,
"learning_rate": 2.6244634935327084e-06,
"loss": 0.0827,
"step": 835
},
{
"epoch": 0.67,
"grad_norm": 0.8017157912254333,
"learning_rate": 2.613067299744364e-06,
"loss": 0.0907,
"step": 836
},
{
"epoch": 0.67,
"grad_norm": 0.3578203320503235,
"learning_rate": 2.6016871433735793e-06,
"loss": 0.2383,
"step": 837
},
{
"epoch": 0.67,
"grad_norm": 0.29165777564048767,
"learning_rate": 2.5903231008817888e-06,
"loss": 0.2327,
"step": 838
},
{
"epoch": 0.67,
"grad_norm": 0.2571057975292206,
"learning_rate": 2.578975248622175e-06,
"loss": 0.2293,
"step": 839
},
{
"epoch": 0.67,
"grad_norm": 0.7894619107246399,
"learning_rate": 2.5676436628391356e-06,
"loss": 0.0965,
"step": 840
},
{
"epoch": 0.67,
"grad_norm": 0.2505764365196228,
"learning_rate": 2.556328419667772e-06,
"loss": 0.2401,
"step": 841
},
{
"epoch": 0.67,
"grad_norm": 0.3279572129249573,
"learning_rate": 2.5450295951333896e-06,
"loss": 0.2739,
"step": 842
},
{
"epoch": 0.67,
"grad_norm": 0.7705656886100769,
"learning_rate": 2.5337472651509767e-06,
"loss": 0.1073,
"step": 843
},
{
"epoch": 0.68,
"grad_norm": 0.8020524382591248,
"learning_rate": 2.522481505524692e-06,
"loss": 0.1098,
"step": 844
},
{
"epoch": 0.68,
"grad_norm": 1.5687676668167114,
"learning_rate": 2.5112323919473717e-06,
"loss": 0.7034,
"step": 845
},
{
"epoch": 0.68,
"grad_norm": 0.830295741558075,
"learning_rate": 2.5000000000000015e-06,
"loss": 0.0968,
"step": 846
},
{
"epoch": 0.68,
"grad_norm": 0.7056903839111328,
"learning_rate": 2.488784405151216e-06,
"loss": 0.1016,
"step": 847
},
{
"epoch": 0.68,
"grad_norm": 0.2978593409061432,
"learning_rate": 2.4775856827568016e-06,
"loss": 0.2422,
"step": 848
},
{
"epoch": 0.68,
"grad_norm": 0.2963080406188965,
"learning_rate": 2.4664039080591733e-06,
"loss": 0.2119,
"step": 849
},
{
"epoch": 0.68,
"grad_norm": 0.8135718107223511,
"learning_rate": 2.4552391561868783e-06,
"loss": 0.1046,
"step": 850
},
{
"epoch": 0.68,
"eval_loss": 0.1697417050600052,
"eval_runtime": 129.624,
"eval_samples_per_second": 33.944,
"eval_steps_per_second": 1.065,
"step": 850
},
{
"epoch": 0.68,
"grad_norm": 0.27792826294898987,
"learning_rate": 2.444091502154095e-06,
"loss": 0.2474,
"step": 851
},
{
"epoch": 0.68,
"grad_norm": 0.2584989368915558,
"learning_rate": 2.4329610208601195e-06,
"loss": 0.2453,
"step": 852
},
{
"epoch": 0.68,
"grad_norm": 0.24065913259983063,
"learning_rate": 2.4218477870888686e-06,
"loss": 0.2367,
"step": 853
},
{
"epoch": 0.68,
"grad_norm": 0.2912527620792389,
"learning_rate": 2.410751875508373e-06,
"loss": 0.2426,
"step": 854
},
{
"epoch": 0.68,
"grad_norm": 0.2489793300628662,
"learning_rate": 2.3996733606702856e-06,
"loss": 0.2382,
"step": 855
},
{
"epoch": 0.68,
"grad_norm": 0.7197203636169434,
"learning_rate": 2.388612317009366e-06,
"loss": 0.1205,
"step": 856
},
{
"epoch": 0.69,
"grad_norm": 0.21114397048950195,
"learning_rate": 2.3775688188429897e-06,
"loss": 0.2362,
"step": 857
},
{
"epoch": 0.69,
"grad_norm": 0.27203845977783203,
"learning_rate": 2.3665429403706506e-06,
"loss": 0.2523,
"step": 858
},
{
"epoch": 0.69,
"grad_norm": 0.20314949750900269,
"learning_rate": 2.3555347556734544e-06,
"loss": 0.2213,
"step": 859
},
{
"epoch": 0.69,
"grad_norm": 1.2743370532989502,
"learning_rate": 2.3445443387136247e-06,
"loss": 0.6775,
"step": 860
},
{
"epoch": 0.69,
"grad_norm": 0.27874651551246643,
"learning_rate": 2.333571763334011e-06,
"loss": 0.2525,
"step": 861
},
{
"epoch": 0.69,
"grad_norm": 0.7329751253128052,
"learning_rate": 2.3226171032575856e-06,
"loss": 0.0839,
"step": 862
},
{
"epoch": 0.69,
"grad_norm": 0.2831548750400543,
"learning_rate": 2.3116804320869467e-06,
"loss": 0.2195,
"step": 863
},
{
"epoch": 0.69,
"grad_norm": 0.2916233539581299,
"learning_rate": 2.3007618233038377e-06,
"loss": 0.2355,
"step": 864
},
{
"epoch": 0.69,
"grad_norm": 1.1095150709152222,
"learning_rate": 2.289861350268634e-06,
"loss": 0.6959,
"step": 865
},
{
"epoch": 0.69,
"grad_norm": 0.2547200620174408,
"learning_rate": 2.278979086219863e-06,
"loss": 0.2421,
"step": 866
},
{
"epoch": 0.69,
"grad_norm": 1.2263802289962769,
"learning_rate": 2.2681151042737124e-06,
"loss": 0.7284,
"step": 867
},
{
"epoch": 0.69,
"grad_norm": 0.7119380235671997,
"learning_rate": 2.2572694774235322e-06,
"loss": 0.0964,
"step": 868
},
{
"epoch": 0.7,
"grad_norm": 1.1749521493911743,
"learning_rate": 2.246442278539344e-06,
"loss": 0.6931,
"step": 869
},
{
"epoch": 0.7,
"grad_norm": 0.3053778409957886,
"learning_rate": 2.2356335803673655e-06,
"loss": 0.2264,
"step": 870
},
{
"epoch": 0.7,
"grad_norm": 0.2645954489707947,
"learning_rate": 2.224843455529496e-06,
"loss": 0.2367,
"step": 871
},
{
"epoch": 0.7,
"grad_norm": 0.7360409498214722,
"learning_rate": 2.2140719765228587e-06,
"loss": 0.0908,
"step": 872
},
{
"epoch": 0.7,
"grad_norm": 0.23821556568145752,
"learning_rate": 2.2033192157192877e-06,
"loss": 0.2317,
"step": 873
},
{
"epoch": 0.7,
"grad_norm": 0.2907634973526001,
"learning_rate": 2.192585245364856e-06,
"loss": 0.249,
"step": 874
},
{
"epoch": 0.7,
"grad_norm": 0.20257404446601868,
"learning_rate": 2.18187013757939e-06,
"loss": 0.2373,
"step": 875
},
{
"epoch": 0.7,
"grad_norm": 0.9349608421325684,
"learning_rate": 2.1711739643559767e-06,
"loss": 0.1002,
"step": 876
},
{
"epoch": 0.7,
"grad_norm": 0.2354966104030609,
"learning_rate": 2.1604967975604847e-06,
"loss": 0.246,
"step": 877
},
{
"epoch": 0.7,
"grad_norm": 0.7493621706962585,
"learning_rate": 2.149838708931087e-06,
"loss": 0.1006,
"step": 878
},
{
"epoch": 0.7,
"grad_norm": 0.20398782193660736,
"learning_rate": 2.139199770077768e-06,
"loss": 0.239,
"step": 879
},
{
"epoch": 0.7,
"grad_norm": 0.7295287251472473,
"learning_rate": 2.1285800524818477e-06,
"loss": 0.1115,
"step": 880
},
{
"epoch": 0.7,
"grad_norm": 0.23287217319011688,
"learning_rate": 2.1179796274955073e-06,
"loss": 0.2379,
"step": 881
},
{
"epoch": 0.71,
"grad_norm": 0.27526363730430603,
"learning_rate": 2.1073985663412984e-06,
"loss": 0.2326,
"step": 882
},
{
"epoch": 0.71,
"grad_norm": 0.23017112910747528,
"learning_rate": 2.0968369401116696e-06,
"loss": 0.2342,
"step": 883
},
{
"epoch": 0.71,
"grad_norm": 0.69927978515625,
"learning_rate": 2.086294819768496e-06,
"loss": 0.1017,
"step": 884
},
{
"epoch": 0.71,
"grad_norm": 0.5941361784934998,
"learning_rate": 2.075772276142589e-06,
"loss": 0.0798,
"step": 885
},
{
"epoch": 0.71,
"grad_norm": 0.6996074914932251,
"learning_rate": 2.0652693799332286e-06,
"loss": 0.0898,
"step": 886
},
{
"epoch": 0.71,
"grad_norm": 0.29270604252815247,
"learning_rate": 2.054786201707693e-06,
"loss": 0.2491,
"step": 887
},
{
"epoch": 0.71,
"grad_norm": 0.22176364064216614,
"learning_rate": 2.044322811900767e-06,
"loss": 0.2383,
"step": 888
},
{
"epoch": 0.71,
"grad_norm": 0.24688348174095154,
"learning_rate": 2.0338792808142887e-06,
"loss": 0.2451,
"step": 889
},
{
"epoch": 0.71,
"grad_norm": 0.2619679272174835,
"learning_rate": 2.0234556786166715e-06,
"loss": 0.2274,
"step": 890
},
{
"epoch": 0.71,
"grad_norm": 1.2927919626235962,
"learning_rate": 2.0130520753424175e-06,
"loss": 0.7042,
"step": 891
},
{
"epoch": 0.71,
"grad_norm": 0.890327513217926,
"learning_rate": 2.00266854089167e-06,
"loss": 0.1204,
"step": 892
},
{
"epoch": 0.71,
"grad_norm": 0.7316093444824219,
"learning_rate": 1.9923051450297337e-06,
"loss": 0.0778,
"step": 893
},
{
"epoch": 0.72,
"grad_norm": 0.6988334655761719,
"learning_rate": 1.9819619573865932e-06,
"loss": 0.0991,
"step": 894
},
{
"epoch": 0.72,
"grad_norm": 0.33937904238700867,
"learning_rate": 1.971639047456473e-06,
"loss": 0.233,
"step": 895
},
{
"epoch": 0.72,
"grad_norm": 0.7985022068023682,
"learning_rate": 1.9613364845973433e-06,
"loss": 0.0831,
"step": 896
},
{
"epoch": 0.72,
"grad_norm": 1.2045460939407349,
"learning_rate": 1.9510543380304686e-06,
"loss": 0.6552,
"step": 897
},
{
"epoch": 0.72,
"grad_norm": 0.7385668754577637,
"learning_rate": 1.9407926768399456e-06,
"loss": 0.0974,
"step": 898
},
{
"epoch": 0.72,
"grad_norm": 0.30413779616355896,
"learning_rate": 1.930551569972224e-06,
"loss": 0.256,
"step": 899
},
{
"epoch": 0.72,
"grad_norm": 0.7384352684020996,
"learning_rate": 1.9203310862356577e-06,
"loss": 0.0975,
"step": 900
},
{
"epoch": 0.72,
"eval_loss": 0.1696886271238327,
"eval_runtime": 126.9779,
"eval_samples_per_second": 34.652,
"eval_steps_per_second": 1.087,
"step": 900
},
{
"epoch": 0.72,
"grad_norm": 0.2813623547554016,
"learning_rate": 1.9101312943000372e-06,
"loss": 0.2492,
"step": 901
},
{
"epoch": 0.72,
"grad_norm": 0.26536938548088074,
"learning_rate": 1.8999522626961254e-06,
"loss": 0.2384,
"step": 902
},
{
"epoch": 0.72,
"grad_norm": 0.22477126121520996,
"learning_rate": 1.8897940598151998e-06,
"loss": 0.2159,
"step": 903
},
{
"epoch": 0.72,
"grad_norm": 0.25735536217689514,
"learning_rate": 1.879656753908598e-06,
"loss": 0.2387,
"step": 904
},
{
"epoch": 0.72,
"grad_norm": 0.6589800715446472,
"learning_rate": 1.869540413087249e-06,
"loss": 0.101,
"step": 905
},
{
"epoch": 0.72,
"grad_norm": 0.2775154113769531,
"learning_rate": 1.859445105321221e-06,
"loss": 0.2388,
"step": 906
},
{
"epoch": 0.73,
"grad_norm": 0.2855861783027649,
"learning_rate": 1.8493708984392682e-06,
"loss": 0.2261,
"step": 907
},
{
"epoch": 0.73,
"grad_norm": 0.7179957628250122,
"learning_rate": 1.8393178601283684e-06,
"loss": 0.0906,
"step": 908
},
{
"epoch": 0.73,
"grad_norm": 0.690298318862915,
"learning_rate": 1.8292860579332706e-06,
"loss": 0.09,
"step": 909
},
{
"epoch": 0.73,
"grad_norm": 0.6613442897796631,
"learning_rate": 1.8192755592560446e-06,
"loss": 0.0809,
"step": 910
},
{
"epoch": 0.73,
"grad_norm": 1.1665738821029663,
"learning_rate": 1.8092864313556236e-06,
"loss": 0.6767,
"step": 911
},
{
"epoch": 0.73,
"grad_norm": 0.7107550501823425,
"learning_rate": 1.7993187413473534e-06,
"loss": 0.1007,
"step": 912
},
{
"epoch": 0.73,
"grad_norm": 0.3048025071620941,
"learning_rate": 1.7893725562025416e-06,
"loss": 0.2554,
"step": 913
},
{
"epoch": 0.73,
"grad_norm": 0.6904020309448242,
"learning_rate": 1.7794479427480115e-06,
"loss": 0.0857,
"step": 914
},
{
"epoch": 0.73,
"grad_norm": 0.6889138221740723,
"learning_rate": 1.7695449676656467e-06,
"loss": 0.0876,
"step": 915
},
{
"epoch": 0.73,
"grad_norm": 0.49559327960014343,
"learning_rate": 1.759663697491944e-06,
"loss": 0.2362,
"step": 916
},
{
"epoch": 0.73,
"grad_norm": 0.6628426909446716,
"learning_rate": 1.7498041986175757e-06,
"loss": 0.0837,
"step": 917
},
{
"epoch": 0.73,
"grad_norm": 0.7636250853538513,
"learning_rate": 1.739966537286929e-06,
"loss": 0.0858,
"step": 918
},
{
"epoch": 0.74,
"grad_norm": 1.200890302658081,
"learning_rate": 1.7301507795976697e-06,
"loss": 0.7125,
"step": 919
},
{
"epoch": 0.74,
"grad_norm": 1.3808542490005493,
"learning_rate": 1.7203569915003005e-06,
"loss": 0.702,
"step": 920
},
{
"epoch": 0.74,
"grad_norm": 0.3265986144542694,
"learning_rate": 1.7105852387977096e-06,
"loss": 0.2382,
"step": 921
},
{
"epoch": 0.74,
"grad_norm": 1.3810663223266602,
"learning_rate": 1.7008355871447345e-06,
"loss": 0.7735,
"step": 922
},
{
"epoch": 0.74,
"grad_norm": 0.2844899892807007,
"learning_rate": 1.6911081020477178e-06,
"loss": 0.252,
"step": 923
},
{
"epoch": 0.74,
"grad_norm": 0.80921870470047,
"learning_rate": 1.6814028488640728e-06,
"loss": 0.0999,
"step": 924
},
{
"epoch": 0.74,
"grad_norm": 0.3330190181732178,
"learning_rate": 1.6717198928018352e-06,
"loss": 0.2538,
"step": 925
},
{
"epoch": 0.74,
"grad_norm": 0.2513427138328552,
"learning_rate": 1.6620592989192318e-06,
"loss": 0.2325,
"step": 926
},
{
"epoch": 0.74,
"grad_norm": 0.35313689708709717,
"learning_rate": 1.6524211321242445e-06,
"loss": 0.2392,
"step": 927
},
{
"epoch": 0.74,
"grad_norm": 1.2241019010543823,
"learning_rate": 1.6428054571741658e-06,
"loss": 0.7316,
"step": 928
},
{
"epoch": 0.74,
"grad_norm": 0.3235074579715729,
"learning_rate": 1.633212338675173e-06,
"loss": 0.245,
"step": 929
},
{
"epoch": 0.74,
"grad_norm": 0.39651674032211304,
"learning_rate": 1.6236418410818872e-06,
"loss": 0.2371,
"step": 930
},
{
"epoch": 0.74,
"grad_norm": 0.20827996730804443,
"learning_rate": 1.6140940286969475e-06,
"loss": 0.2429,
"step": 931
},
{
"epoch": 0.75,
"grad_norm": 0.28390830755233765,
"learning_rate": 1.6045689656705715e-06,
"loss": 0.2289,
"step": 932
},
{
"epoch": 0.75,
"grad_norm": 0.23866818845272064,
"learning_rate": 1.595066716000126e-06,
"loss": 0.2185,
"step": 933
},
{
"epoch": 0.75,
"grad_norm": 0.256644070148468,
"learning_rate": 1.5855873435297042e-06,
"loss": 0.2327,
"step": 934
},
{
"epoch": 0.75,
"grad_norm": 0.22517281770706177,
"learning_rate": 1.5761309119496864e-06,
"loss": 0.2281,
"step": 935
},
{
"epoch": 0.75,
"grad_norm": 0.6767497658729553,
"learning_rate": 1.5666974847963162e-06,
"loss": 0.104,
"step": 936
},
{
"epoch": 0.75,
"grad_norm": 0.23979002237319946,
"learning_rate": 1.557287125451279e-06,
"loss": 0.24,
"step": 937
},
{
"epoch": 0.75,
"grad_norm": 0.24018900096416473,
"learning_rate": 1.5478998971412669e-06,
"loss": 0.2333,
"step": 938
},
{
"epoch": 0.75,
"grad_norm": 0.25306275486946106,
"learning_rate": 1.538535862937558e-06,
"loss": 0.2537,
"step": 939
},
{
"epoch": 0.75,
"grad_norm": 0.7163013815879822,
"learning_rate": 1.5291950857555982e-06,
"loss": 0.0987,
"step": 940
},
{
"epoch": 0.75,
"grad_norm": 1.299111247062683,
"learning_rate": 1.519877628354567e-06,
"loss": 0.696,
"step": 941
},
{
"epoch": 0.75,
"grad_norm": 0.2897432744503021,
"learning_rate": 1.510583553336964e-06,
"loss": 0.261,
"step": 942
},
{
"epoch": 0.75,
"grad_norm": 0.30022773146629333,
"learning_rate": 1.5013129231481894e-06,
"loss": 0.2369,
"step": 943
},
{
"epoch": 0.76,
"grad_norm": 0.723784863948822,
"learning_rate": 1.4920658000761172e-06,
"loss": 0.0831,
"step": 944
},
{
"epoch": 0.76,
"grad_norm": 0.22743889689445496,
"learning_rate": 1.4828422462506819e-06,
"loss": 0.2413,
"step": 945
},
{
"epoch": 0.76,
"grad_norm": 0.3439815938472748,
"learning_rate": 1.473642323643465e-06,
"loss": 0.2623,
"step": 946
},
{
"epoch": 0.76,
"grad_norm": 0.2627880275249481,
"learning_rate": 1.4644660940672628e-06,
"loss": 0.2415,
"step": 947
},
{
"epoch": 0.76,
"grad_norm": 0.26256901025772095,
"learning_rate": 1.4553136191756916e-06,
"loss": 0.2456,
"step": 948
},
{
"epoch": 0.76,
"grad_norm": 1.2843241691589355,
"learning_rate": 1.4461849604627643e-06,
"loss": 0.7257,
"step": 949
},
{
"epoch": 0.76,
"grad_norm": 0.26860275864601135,
"learning_rate": 1.4370801792624656e-06,
"loss": 0.2453,
"step": 950
},
{
"epoch": 0.76,
"eval_loss": 0.16911761462688446,
"eval_runtime": 131.1311,
"eval_samples_per_second": 33.554,
"eval_steps_per_second": 1.052,
"step": 950
},
{
"epoch": 0.76,
"grad_norm": 0.761739194393158,
"learning_rate": 1.427999336748364e-06,
"loss": 0.0771,
"step": 951
},
{
"epoch": 0.76,
"grad_norm": 0.7120491862297058,
"learning_rate": 1.4189424939331815e-06,
"loss": 0.109,
"step": 952
},
{
"epoch": 0.76,
"grad_norm": 0.683159589767456,
"learning_rate": 1.4099097116683874e-06,
"loss": 0.0859,
"step": 953
},
{
"epoch": 0.76,
"grad_norm": 0.8068253993988037,
"learning_rate": 1.4009010506437997e-06,
"loss": 0.0898,
"step": 954
},
{
"epoch": 0.76,
"grad_norm": 1.1357736587524414,
"learning_rate": 1.391916571387164e-06,
"loss": 0.6951,
"step": 955
},
{
"epoch": 0.76,
"grad_norm": 0.3836369216442108,
"learning_rate": 1.3829563342637514e-06,
"loss": 0.2454,
"step": 956
},
{
"epoch": 0.77,
"grad_norm": 0.2868916690349579,
"learning_rate": 1.37402039947596e-06,
"loss": 0.2262,
"step": 957
},
{
"epoch": 0.77,
"grad_norm": 0.7387321591377258,
"learning_rate": 1.3651088270628992e-06,
"loss": 0.0733,
"step": 958
},
{
"epoch": 0.77,
"grad_norm": 0.768443763256073,
"learning_rate": 1.3562216768999919e-06,
"loss": 0.0833,
"step": 959
},
{
"epoch": 0.77,
"grad_norm": 0.6564372181892395,
"learning_rate": 1.3473590086985756e-06,
"loss": 0.0831,
"step": 960
},
{
"epoch": 0.77,
"grad_norm": 0.2516481578350067,
"learning_rate": 1.338520882005494e-06,
"loss": 0.2399,
"step": 961
},
{
"epoch": 0.77,
"grad_norm": 0.673554539680481,
"learning_rate": 1.3297073562026992e-06,
"loss": 0.0806,
"step": 962
},
{
"epoch": 0.77,
"grad_norm": 0.3957008123397827,
"learning_rate": 1.3209184905068595e-06,
"loss": 0.2329,
"step": 963
},
{
"epoch": 0.77,
"grad_norm": 0.3042967617511749,
"learning_rate": 1.31215434396895e-06,
"loss": 0.2431,
"step": 964
},
{
"epoch": 0.77,
"grad_norm": 0.2621539831161499,
"learning_rate": 1.3034149754738634e-06,
"loss": 0.2388,
"step": 965
},
{
"epoch": 0.77,
"grad_norm": 0.2188318371772766,
"learning_rate": 1.2947004437400161e-06,
"loss": 0.2526,
"step": 966
},
{
"epoch": 0.77,
"grad_norm": 0.28755953907966614,
"learning_rate": 1.286010807318946e-06,
"loss": 0.2389,
"step": 967
},
{
"epoch": 0.77,
"grad_norm": 0.30778053402900696,
"learning_rate": 1.2773461245949249e-06,
"loss": 0.2576,
"step": 968
},
{
"epoch": 0.78,
"grad_norm": 0.24511705338954926,
"learning_rate": 1.2687064537845635e-06,
"loss": 0.2539,
"step": 969
},
{
"epoch": 0.78,
"grad_norm": 0.21742559969425201,
"learning_rate": 1.2600918529364253e-06,
"loss": 0.2143,
"step": 970
},
{
"epoch": 0.78,
"grad_norm": 0.22307029366493225,
"learning_rate": 1.2515023799306292e-06,
"loss": 0.247,
"step": 971
},
{
"epoch": 0.78,
"grad_norm": 0.2391587644815445,
"learning_rate": 1.242938092478464e-06,
"loss": 0.2445,
"step": 972
},
{
"epoch": 0.78,
"grad_norm": 0.2958686649799347,
"learning_rate": 1.2343990481220036e-06,
"loss": 0.2432,
"step": 973
},
{
"epoch": 0.78,
"grad_norm": 0.6836528182029724,
"learning_rate": 1.225885304233716e-06,
"loss": 0.0742,
"step": 974
},
{
"epoch": 0.78,
"grad_norm": 0.7265254855155945,
"learning_rate": 1.2173969180160782e-06,
"loss": 0.0944,
"step": 975
},
{
"epoch": 0.78,
"grad_norm": 1.2596927881240845,
"learning_rate": 1.2089339465011935e-06,
"loss": 0.6737,
"step": 976
},
{
"epoch": 0.78,
"grad_norm": 0.26803356409072876,
"learning_rate": 1.200496446550411e-06,
"loss": 0.2483,
"step": 977
},
{
"epoch": 0.78,
"grad_norm": 0.2087995409965515,
"learning_rate": 1.1920844748539373e-06,
"loss": 0.2265,
"step": 978
},
{
"epoch": 0.78,
"grad_norm": 0.27871960401535034,
"learning_rate": 1.1836980879304578e-06,
"loss": 0.2337,
"step": 979
},
{
"epoch": 0.78,
"grad_norm": 0.7193799614906311,
"learning_rate": 1.1753373421267622e-06,
"loss": 0.0769,
"step": 980
},
{
"epoch": 0.78,
"grad_norm": 0.226898655295372,
"learning_rate": 1.1670022936173587e-06,
"loss": 0.2407,
"step": 981
},
{
"epoch": 0.79,
"grad_norm": 0.3594352602958679,
"learning_rate": 1.1586929984040974e-06,
"loss": 0.2571,
"step": 982
},
{
"epoch": 0.79,
"grad_norm": 0.2518743872642517,
"learning_rate": 1.1504095123158016e-06,
"loss": 0.2252,
"step": 983
},
{
"epoch": 0.79,
"grad_norm": 0.2381879836320877,
"learning_rate": 1.1421518910078839e-06,
"loss": 0.2511,
"step": 984
},
{
"epoch": 0.79,
"grad_norm": 0.22159552574157715,
"learning_rate": 1.133920189961975e-06,
"loss": 0.2325,
"step": 985
},
{
"epoch": 0.79,
"grad_norm": 0.6609593629837036,
"learning_rate": 1.125714464485551e-06,
"loss": 0.0943,
"step": 986
},
{
"epoch": 0.79,
"grad_norm": 0.646896481513977,
"learning_rate": 1.1175347697115673e-06,
"loss": 0.0777,
"step": 987
},
{
"epoch": 0.79,
"grad_norm": 0.27957817912101746,
"learning_rate": 1.109381160598078e-06,
"loss": 0.2428,
"step": 988
},
{
"epoch": 0.79,
"grad_norm": 0.7439534664154053,
"learning_rate": 1.1012536919278727e-06,
"loss": 0.0976,
"step": 989
},
{
"epoch": 0.79,
"grad_norm": 0.2650754749774933,
"learning_rate": 1.0931524183081105e-06,
"loss": 0.2375,
"step": 990
},
{
"epoch": 0.79,
"grad_norm": 0.7232276797294617,
"learning_rate": 1.085077394169946e-06,
"loss": 0.0968,
"step": 991
},
{
"epoch": 0.79,
"grad_norm": 0.3142140209674835,
"learning_rate": 1.0770286737681701e-06,
"loss": 0.2576,
"step": 992
},
{
"epoch": 0.79,
"grad_norm": 0.673344075679779,
"learning_rate": 1.0690063111808447e-06,
"loss": 0.0887,
"step": 993
},
{
"epoch": 0.8,
"grad_norm": 0.696208655834198,
"learning_rate": 1.0610103603089345e-06,
"loss": 0.0948,
"step": 994
},
{
"epoch": 0.8,
"grad_norm": 1.1610864400863647,
"learning_rate": 1.0530408748759485e-06,
"loss": 0.6773,
"step": 995
},
{
"epoch": 0.8,
"grad_norm": 1.2443311214447021,
"learning_rate": 1.045097908427582e-06,
"loss": 0.7365,
"step": 996
},
{
"epoch": 0.8,
"grad_norm": 0.7688786387443542,
"learning_rate": 1.0371815143313502e-06,
"loss": 0.0848,
"step": 997
},
{
"epoch": 0.8,
"grad_norm": 0.2692795991897583,
"learning_rate": 1.0292917457762325e-06,
"loss": 0.2403,
"step": 998
},
{
"epoch": 0.8,
"grad_norm": 1.074401617050171,
"learning_rate": 1.0214286557723197e-06,
"loss": 0.6623,
"step": 999
},
{
"epoch": 0.8,
"grad_norm": 0.7074705958366394,
"learning_rate": 1.0135922971504492e-06,
"loss": 0.0987,
"step": 1000
},
{
"epoch": 0.8,
"eval_loss": 0.1691107600927353,
"eval_runtime": 128.9196,
"eval_samples_per_second": 34.13,
"eval_steps_per_second": 1.07,
"step": 1000
},
{
"epoch": 0.8,
"grad_norm": 0.27079758048057556,
"learning_rate": 1.0057827225618556e-06,
"loss": 0.232,
"step": 1001
},
{
"epoch": 0.8,
"grad_norm": 0.2587214410305023,
"learning_rate": 9.979999844778203e-07,
"loss": 0.2482,
"step": 1002
},
{
"epoch": 0.8,
"grad_norm": 0.6793025732040405,
"learning_rate": 9.902441351893061e-07,
"loss": 0.0864,
"step": 1003
},
{
"epoch": 0.8,
"grad_norm": 0.22912313044071198,
"learning_rate": 9.825152268066213e-07,
"loss": 0.2333,
"step": 1004
},
{
"epoch": 0.8,
"grad_norm": 0.38006147742271423,
"learning_rate": 9.748133112590624e-07,
"loss": 0.262,
"step": 1005
},
{
"epoch": 0.8,
"grad_norm": 0.22492729127407074,
"learning_rate": 9.671384402945588e-07,
"loss": 0.2334,
"step": 1006
},
{
"epoch": 0.81,
"grad_norm": 0.6469173431396484,
"learning_rate": 9.59490665479339e-07,
"loss": 0.0824,
"step": 1007
},
{
"epoch": 0.81,
"grad_norm": 0.7131785750389099,
"learning_rate": 9.518700381975754e-07,
"loss": 0.0829,
"step": 1008
},
{
"epoch": 0.81,
"grad_norm": 0.6592961549758911,
"learning_rate": 9.442766096510353e-07,
"loss": 0.0896,
"step": 1009
},
{
"epoch": 0.81,
"grad_norm": 0.7779393196105957,
"learning_rate": 9.367104308587493e-07,
"loss": 0.0886,
"step": 1010
},
{
"epoch": 0.81,
"grad_norm": 0.271415650844574,
"learning_rate": 9.291715526566563e-07,
"loss": 0.2614,
"step": 1011
},
{
"epoch": 0.81,
"grad_norm": 0.2636536955833435,
"learning_rate": 9.216600256972669e-07,
"loss": 0.224,
"step": 1012
},
{
"epoch": 0.81,
"grad_norm": 0.2453872114419937,
"learning_rate": 9.141759004493283e-07,
"loss": 0.2497,
"step": 1013
},
{
"epoch": 0.81,
"grad_norm": 1.3394646644592285,
"learning_rate": 9.06719227197474e-07,
"loss": 0.7238,
"step": 1014
},
{
"epoch": 0.81,
"grad_norm": 0.7085133194923401,
"learning_rate": 8.992900560418932e-07,
"loss": 0.0895,
"step": 1015
},
{
"epoch": 0.81,
"grad_norm": 0.7438020706176758,
"learning_rate": 8.918884368979969e-07,
"loss": 0.1022,
"step": 1016
},
{
"epoch": 0.81,
"grad_norm": 0.712546169757843,
"learning_rate": 8.845144194960748e-07,
"loss": 0.0881,
"step": 1017
},
{
"epoch": 0.81,
"grad_norm": 0.3092121481895447,
"learning_rate": 8.771680533809634e-07,
"loss": 0.2447,
"step": 1018
},
{
"epoch": 0.82,
"grad_norm": 0.20704470574855804,
"learning_rate": 8.698493879117209e-07,
"loss": 0.2223,
"step": 1019
},
{
"epoch": 0.82,
"grad_norm": 0.3531644344329834,
"learning_rate": 8.625584722612829e-07,
"loss": 0.258,
"step": 1020
},
{
"epoch": 0.82,
"grad_norm": 0.6483511328697205,
"learning_rate": 8.552953554161408e-07,
"loss": 0.0818,
"step": 1021
},
{
"epoch": 0.82,
"grad_norm": 1.1429930925369263,
"learning_rate": 8.480600861760124e-07,
"loss": 0.6865,
"step": 1022
},
{
"epoch": 0.82,
"grad_norm": 0.24745474755764008,
"learning_rate": 8.408527131535088e-07,
"loss": 0.238,
"step": 1023
},
{
"epoch": 0.82,
"grad_norm": 0.7191245555877686,
"learning_rate": 8.336732847738116e-07,
"loss": 0.0786,
"step": 1024
},
{
"epoch": 0.82,
"grad_norm": 0.2668357193470001,
"learning_rate": 8.265218492743498e-07,
"loss": 0.2253,
"step": 1025
},
{
"epoch": 0.82,
"grad_norm": 0.2478071004152298,
"learning_rate": 8.193984547044659e-07,
"loss": 0.2466,
"step": 1026
},
{
"epoch": 0.82,
"grad_norm": 0.6562392115592957,
"learning_rate": 8.123031489251082e-07,
"loss": 0.0684,
"step": 1027
},
{
"epoch": 0.82,
"grad_norm": 0.29157230257987976,
"learning_rate": 8.052359796084952e-07,
"loss": 0.259,
"step": 1028
},
{
"epoch": 0.82,
"grad_norm": 1.2103424072265625,
"learning_rate": 7.981969942378021e-07,
"loss": 0.6847,
"step": 1029
},
{
"epoch": 0.82,
"grad_norm": 0.3412635624408722,
"learning_rate": 7.911862401068431e-07,
"loss": 0.2494,
"step": 1030
},
{
"epoch": 0.82,
"grad_norm": 0.24211619794368744,
"learning_rate": 7.842037643197492e-07,
"loss": 0.244,
"step": 1031
},
{
"epoch": 0.83,
"grad_norm": 0.7716163396835327,
"learning_rate": 7.772496137906527e-07,
"loss": 0.1042,
"step": 1032
},
{
"epoch": 0.83,
"grad_norm": 0.7771759033203125,
"learning_rate": 7.703238352433762e-07,
"loss": 0.1023,
"step": 1033
},
{
"epoch": 0.83,
"grad_norm": 1.2187319993972778,
"learning_rate": 7.634264752111131e-07,
"loss": 0.7273,
"step": 1034
},
{
"epoch": 0.83,
"grad_norm": 0.6588415503501892,
"learning_rate": 7.565575800361169e-07,
"loss": 0.0924,
"step": 1035
},
{
"epoch": 0.83,
"grad_norm": 0.25650665163993835,
"learning_rate": 7.497171958693927e-07,
"loss": 0.2379,
"step": 1036
},
{
"epoch": 0.83,
"grad_norm": 0.2609238624572754,
"learning_rate": 7.429053686703835e-07,
"loss": 0.2341,
"step": 1037
},
{
"epoch": 0.83,
"grad_norm": 0.25511491298675537,
"learning_rate": 7.361221442066607e-07,
"loss": 0.2381,
"step": 1038
},
{
"epoch": 0.83,
"grad_norm": 0.7682725787162781,
"learning_rate": 7.293675680536227e-07,
"loss": 0.0921,
"step": 1039
},
{
"epoch": 0.83,
"grad_norm": 1.1804888248443604,
"learning_rate": 7.226416855941814e-07,
"loss": 0.7208,
"step": 1040
},
{
"epoch": 0.83,
"grad_norm": 0.7506102919578552,
"learning_rate": 7.159445420184591e-07,
"loss": 0.0906,
"step": 1041
},
{
"epoch": 0.83,
"grad_norm": 0.22670745849609375,
"learning_rate": 7.092761823234911e-07,
"loss": 0.224,
"step": 1042
},
{
"epoch": 0.83,
"grad_norm": 0.281209796667099,
"learning_rate": 7.02636651312914e-07,
"loss": 0.2577,
"step": 1043
},
{
"epoch": 0.84,
"grad_norm": 0.6177349090576172,
"learning_rate": 6.960259935966712e-07,
"loss": 0.0686,
"step": 1044
},
{
"epoch": 0.84,
"grad_norm": 0.723450243473053,
"learning_rate": 6.894442535907086e-07,
"loss": 0.104,
"step": 1045
},
{
"epoch": 0.84,
"grad_norm": 0.7044495344161987,
"learning_rate": 6.828914755166826e-07,
"loss": 0.0863,
"step": 1046
},
{
"epoch": 0.84,
"grad_norm": 0.27352821826934814,
"learning_rate": 6.763677034016569e-07,
"loss": 0.2584,
"step": 1047
},
{
"epoch": 0.84,
"grad_norm": 0.2998514473438263,
"learning_rate": 6.698729810778065e-07,
"loss": 0.2373,
"step": 1048
},
{
"epoch": 0.84,
"grad_norm": 0.24910956621170044,
"learning_rate": 6.63407352182131e-07,
"loss": 0.2414,
"step": 1049
},
{
"epoch": 0.84,
"grad_norm": 0.19957736134529114,
"learning_rate": 6.569708601561515e-07,
"loss": 0.2249,
"step": 1050
},
{
"epoch": 0.84,
"eval_loss": 0.16869135200977325,
"eval_runtime": 164.227,
"eval_samples_per_second": 26.792,
"eval_steps_per_second": 0.84,
"step": 1050
},
{
"epoch": 0.84,
"grad_norm": 0.34233665466308594,
"learning_rate": 6.505635482456229e-07,
"loss": 0.2497,
"step": 1051
},
{
"epoch": 0.84,
"grad_norm": 0.2971556782722473,
"learning_rate": 6.441854595002478e-07,
"loss": 0.2426,
"step": 1052
},
{
"epoch": 0.84,
"grad_norm": 0.2889886498451233,
"learning_rate": 6.378366367733791e-07,
"loss": 0.2521,
"step": 1053
},
{
"epoch": 0.84,
"grad_norm": 0.7054500579833984,
"learning_rate": 6.315171227217365e-07,
"loss": 0.0862,
"step": 1054
},
{
"epoch": 0.84,
"grad_norm": 0.2858125865459442,
"learning_rate": 6.252269598051219e-07,
"loss": 0.2348,
"step": 1055
},
{
"epoch": 0.84,
"grad_norm": 0.24050450325012207,
"learning_rate": 6.189661902861288e-07,
"loss": 0.2344,
"step": 1056
},
{
"epoch": 0.85,
"grad_norm": 1.1963467597961426,
"learning_rate": 6.127348562298619e-07,
"loss": 0.7081,
"step": 1057
},
{
"epoch": 0.85,
"grad_norm": 0.22256433963775635,
"learning_rate": 6.065329995036573e-07,
"loss": 0.2307,
"step": 1058
},
{
"epoch": 0.85,
"grad_norm": 0.23858867585659027,
"learning_rate": 6.003606617767893e-07,
"loss": 0.23,
"step": 1059
},
{
"epoch": 0.85,
"grad_norm": 0.6338880062103271,
"learning_rate": 5.942178845202079e-07,
"loss": 0.0824,
"step": 1060
},
{
"epoch": 0.85,
"grad_norm": 0.23957502841949463,
"learning_rate": 5.881047090062475e-07,
"loss": 0.2421,
"step": 1061
},
{
"epoch": 0.85,
"grad_norm": 1.1224992275238037,
"learning_rate": 5.820211763083494e-07,
"loss": 0.7007,
"step": 1062
},
{
"epoch": 0.85,
"grad_norm": 0.21783921122550964,
"learning_rate": 5.759673273007954e-07,
"loss": 0.2374,
"step": 1063
},
{
"epoch": 0.85,
"grad_norm": 0.2673005163669586,
"learning_rate": 5.699432026584267e-07,
"loss": 0.2312,
"step": 1064
},
{
"epoch": 0.85,
"grad_norm": 0.7608351111412048,
"learning_rate": 5.639488428563655e-07,
"loss": 0.09,
"step": 1065
},
{
"epoch": 0.85,
"grad_norm": 0.21611708402633667,
"learning_rate": 5.579842881697556e-07,
"loss": 0.2542,
"step": 1066
},
{
"epoch": 0.85,
"grad_norm": 0.7400150895118713,
"learning_rate": 5.520495786734814e-07,
"loss": 0.0944,
"step": 1067
},
{
"epoch": 0.85,
"grad_norm": 0.2647181451320648,
"learning_rate": 5.461447542419018e-07,
"loss": 0.2605,
"step": 1068
},
{
"epoch": 0.86,
"grad_norm": 0.23510676622390747,
"learning_rate": 5.402698545485869e-07,
"loss": 0.2445,
"step": 1069
},
{
"epoch": 0.86,
"grad_norm": 0.6659393310546875,
"learning_rate": 5.344249190660427e-07,
"loss": 0.086,
"step": 1070
},
{
"epoch": 0.86,
"grad_norm": 1.1414800882339478,
"learning_rate": 5.286099870654515e-07,
"loss": 0.6666,
"step": 1071
},
{
"epoch": 0.86,
"grad_norm": 0.2836971879005432,
"learning_rate": 5.228250976164096e-07,
"loss": 0.2291,
"step": 1072
},
{
"epoch": 0.86,
"grad_norm": 0.25476551055908203,
"learning_rate": 5.170702895866591e-07,
"loss": 0.2451,
"step": 1073
},
{
"epoch": 0.86,
"grad_norm": 0.7066990733146667,
"learning_rate": 5.113456016418305e-07,
"loss": 0.0945,
"step": 1074
},
{
"epoch": 0.86,
"grad_norm": 0.24966813623905182,
"learning_rate": 5.056510722451862e-07,
"loss": 0.2287,
"step": 1075
},
{
"epoch": 0.86,
"grad_norm": 0.6916797757148743,
"learning_rate": 4.999867396573499e-07,
"loss": 0.0709,
"step": 1076
},
{
"epoch": 0.86,
"grad_norm": 0.7006617188453674,
"learning_rate": 4.943526419360661e-07,
"loss": 0.0933,
"step": 1077
},
{
"epoch": 0.86,
"grad_norm": 0.6635639071464539,
"learning_rate": 4.88748816935934e-07,
"loss": 0.0789,
"step": 1078
},
{
"epoch": 0.86,
"grad_norm": 0.26977306604385376,
"learning_rate": 4.831753023081493e-07,
"loss": 0.2559,
"step": 1079
},
{
"epoch": 0.86,
"grad_norm": 0.2267359048128128,
"learning_rate": 4.77632135500265e-07,
"loss": 0.2287,
"step": 1080
},
{
"epoch": 0.86,
"grad_norm": 0.2883506119251251,
"learning_rate": 4.72119353755931e-07,
"loss": 0.2325,
"step": 1081
},
{
"epoch": 0.87,
"grad_norm": 1.2782996892929077,
"learning_rate": 4.666369941146376e-07,
"loss": 0.7098,
"step": 1082
},
{
"epoch": 0.87,
"grad_norm": 0.698481559753418,
"learning_rate": 4.611850934114825e-07,
"loss": 0.0696,
"step": 1083
},
{
"epoch": 0.87,
"grad_norm": 0.7168501019477844,
"learning_rate": 4.557636882769101e-07,
"loss": 0.1106,
"step": 1084
},
{
"epoch": 0.87,
"grad_norm": 0.25228315591812134,
"learning_rate": 4.5037281513647e-07,
"loss": 0.2532,
"step": 1085
},
{
"epoch": 0.87,
"grad_norm": 0.22020013630390167,
"learning_rate": 4.4501251021057566e-07,
"loss": 0.226,
"step": 1086
},
{
"epoch": 0.87,
"grad_norm": 0.29458579421043396,
"learning_rate": 4.3968280951425356e-07,
"loss": 0.2318,
"step": 1087
},
{
"epoch": 0.87,
"grad_norm": 0.2559264302253723,
"learning_rate": 4.343837488569058e-07,
"loss": 0.2303,
"step": 1088
},
{
"epoch": 0.87,
"grad_norm": 0.2425624132156372,
"learning_rate": 4.291153638420731e-07,
"loss": 0.2478,
"step": 1089
},
{
"epoch": 0.87,
"grad_norm": 0.6711142063140869,
"learning_rate": 4.2387768986718644e-07,
"loss": 0.0766,
"step": 1090
},
{
"epoch": 0.87,
"grad_norm": 0.33298006653785706,
"learning_rate": 4.1867076212333603e-07,
"loss": 0.2267,
"step": 1091
},
{
"epoch": 0.87,
"grad_norm": 1.3318347930908203,
"learning_rate": 4.134946155950348e-07,
"loss": 0.6411,
"step": 1092
},
{
"epoch": 0.87,
"grad_norm": 0.2536270320415497,
"learning_rate": 4.0834928505997907e-07,
"loss": 0.2276,
"step": 1093
},
{
"epoch": 0.88,
"grad_norm": 0.3227570354938507,
"learning_rate": 4.03234805088818e-07,
"loss": 0.2366,
"step": 1094
},
{
"epoch": 0.88,
"grad_norm": 0.2696918249130249,
"learning_rate": 3.981512100449231e-07,
"loss": 0.2724,
"step": 1095
},
{
"epoch": 0.88,
"grad_norm": 0.7151411175727844,
"learning_rate": 3.9309853408415253e-07,
"loss": 0.0838,
"step": 1096
},
{
"epoch": 0.88,
"grad_norm": 0.2688385844230652,
"learning_rate": 3.8807681115462294e-07,
"loss": 0.2345,
"step": 1097
},
{
"epoch": 0.88,
"grad_norm": 0.7291595935821533,
"learning_rate": 3.8308607499648765e-07,
"loss": 0.0999,
"step": 1098
},
{
"epoch": 0.88,
"grad_norm": 0.20649194717407227,
"learning_rate": 3.781263591416989e-07,
"loss": 0.2375,
"step": 1099
},
{
"epoch": 0.88,
"grad_norm": 0.2302989959716797,
"learning_rate": 3.7319769691379295e-07,
"loss": 0.2275,
"step": 1100
},
{
"epoch": 0.88,
"eval_loss": 0.16864913702011108,
"eval_runtime": 128.9864,
"eval_samples_per_second": 34.112,
"eval_steps_per_second": 1.07,
"step": 1100
},
{
"epoch": 0.88,
"grad_norm": 1.1351572275161743,
"learning_rate": 3.683001214276577e-07,
"loss": 0.7099,
"step": 1101
},
{
"epoch": 0.88,
"grad_norm": 0.7533503174781799,
"learning_rate": 3.634336655893189e-07,
"loss": 0.0992,
"step": 1102
},
{
"epoch": 0.88,
"grad_norm": 0.6613625884056091,
"learning_rate": 3.585983620957112e-07,
"loss": 0.0718,
"step": 1103
},
{
"epoch": 0.88,
"grad_norm": 0.25793662667274475,
"learning_rate": 3.53794243434461e-07,
"loss": 0.2466,
"step": 1104
},
{
"epoch": 0.88,
"grad_norm": 0.20211161673069,
"learning_rate": 3.4902134188367187e-07,
"loss": 0.2377,
"step": 1105
},
{
"epoch": 0.88,
"grad_norm": 0.23606599867343903,
"learning_rate": 3.4427968951170287e-07,
"loss": 0.2395,
"step": 1106
},
{
"epoch": 0.89,
"grad_norm": 0.2653366029262543,
"learning_rate": 3.3956931817695326e-07,
"loss": 0.2509,
"step": 1107
},
{
"epoch": 0.89,
"grad_norm": 0.7437542676925659,
"learning_rate": 3.348902595276543e-07,
"loss": 0.0994,
"step": 1108
},
{
"epoch": 0.89,
"grad_norm": 0.28426849842071533,
"learning_rate": 3.302425450016478e-07,
"loss": 0.2369,
"step": 1109
},
{
"epoch": 0.89,
"grad_norm": 1.1791760921478271,
"learning_rate": 3.256262058261816e-07,
"loss": 0.7085,
"step": 1110
},
{
"epoch": 0.89,
"grad_norm": 0.6732091903686523,
"learning_rate": 3.2104127301769873e-07,
"loss": 0.0771,
"step": 1111
},
{
"epoch": 0.89,
"grad_norm": 0.344350665807724,
"learning_rate": 3.1648777738162496e-07,
"loss": 0.2491,
"step": 1112
},
{
"epoch": 0.89,
"grad_norm": 0.6799191832542419,
"learning_rate": 3.1196574951216693e-07,
"loss": 0.0716,
"step": 1113
},
{
"epoch": 0.89,
"grad_norm": 0.7077190279960632,
"learning_rate": 3.0747521979210436e-07,
"loss": 0.0815,
"step": 1114
},
{
"epoch": 0.89,
"grad_norm": 0.6257649064064026,
"learning_rate": 3.03016218392585e-07,
"loss": 0.0744,
"step": 1115
},
{
"epoch": 0.89,
"grad_norm": 0.21694709360599518,
"learning_rate": 2.985887752729222e-07,
"loss": 0.2238,
"step": 1116
},
{
"epoch": 0.89,
"grad_norm": 0.31290382146835327,
"learning_rate": 2.9419292018039834e-07,
"loss": 0.2363,
"step": 1117
},
{
"epoch": 0.89,
"grad_norm": 0.6019454002380371,
"learning_rate": 2.8982868265005457e-07,
"loss": 0.0739,
"step": 1118
},
{
"epoch": 0.9,
"grad_norm": 0.665901780128479,
"learning_rate": 2.854960920045036e-07,
"loss": 0.0745,
"step": 1119
},
{
"epoch": 0.9,
"grad_norm": 0.21621929109096527,
"learning_rate": 2.811951773537275e-07,
"loss": 0.2287,
"step": 1120
},
{
"epoch": 0.9,
"grad_norm": 0.24231766164302826,
"learning_rate": 2.7692596759487877e-07,
"loss": 0.2371,
"step": 1121
},
{
"epoch": 0.9,
"grad_norm": 0.2319820374250412,
"learning_rate": 2.726884914120936e-07,
"loss": 0.2451,
"step": 1122
},
{
"epoch": 0.9,
"grad_norm": 0.2858652174472809,
"learning_rate": 2.6848277727629547e-07,
"loss": 0.245,
"step": 1123
},
{
"epoch": 0.9,
"grad_norm": 0.6982702612876892,
"learning_rate": 2.6430885344499944e-07,
"loss": 0.0773,
"step": 1124
},
{
"epoch": 0.9,
"grad_norm": 1.0747313499450684,
"learning_rate": 2.601667479621317e-07,
"loss": 0.6946,
"step": 1125
},
{
"epoch": 0.9,
"grad_norm": 0.2545463442802429,
"learning_rate": 2.5605648865783315e-07,
"loss": 0.2366,
"step": 1126
},
{
"epoch": 0.9,
"grad_norm": 0.22086362540721893,
"learning_rate": 2.519781031482754e-07,
"loss": 0.2413,
"step": 1127
},
{
"epoch": 0.9,
"grad_norm": 0.6967388391494751,
"learning_rate": 2.47931618835478e-07,
"loss": 0.0779,
"step": 1128
},
{
"epoch": 0.9,
"grad_norm": 0.6539435982704163,
"learning_rate": 2.4391706290711745e-07,
"loss": 0.0646,
"step": 1129
},
{
"epoch": 0.9,
"grad_norm": 0.34695157408714294,
"learning_rate": 2.399344623363503e-07,
"loss": 0.2277,
"step": 1130
},
{
"epoch": 0.9,
"grad_norm": 0.7515861392021179,
"learning_rate": 2.3598384388163198e-07,
"loss": 0.0931,
"step": 1131
},
{
"epoch": 0.91,
"grad_norm": 1.1696515083312988,
"learning_rate": 2.3206523408653202e-07,
"loss": 0.6968,
"step": 1132
},
{
"epoch": 0.91,
"grad_norm": 0.2860327661037445,
"learning_rate": 2.2817865927956095e-07,
"loss": 0.2538,
"step": 1133
},
{
"epoch": 0.91,
"grad_norm": 0.24429316818714142,
"learning_rate": 2.2432414557399197e-07,
"loss": 0.2411,
"step": 1134
},
{
"epoch": 0.91,
"grad_norm": 0.686296284198761,
"learning_rate": 2.2050171886768113e-07,
"loss": 0.0731,
"step": 1135
},
{
"epoch": 0.91,
"grad_norm": 0.7920968532562256,
"learning_rate": 2.1671140484290144e-07,
"loss": 0.0979,
"step": 1136
},
{
"epoch": 0.91,
"grad_norm": 0.6936158537864685,
"learning_rate": 2.129532289661651e-07,
"loss": 0.0946,
"step": 1137
},
{
"epoch": 0.91,
"grad_norm": 0.27205023169517517,
"learning_rate": 2.0922721648805045e-07,
"loss": 0.2383,
"step": 1138
},
{
"epoch": 0.91,
"grad_norm": 1.1254873275756836,
"learning_rate": 2.055333924430375e-07,
"loss": 0.6755,
"step": 1139
},
{
"epoch": 0.91,
"grad_norm": 1.2394695281982422,
"learning_rate": 2.018717816493393e-07,
"loss": 0.6923,
"step": 1140
},
{
"epoch": 0.91,
"grad_norm": 0.2013547122478485,
"learning_rate": 1.98242408708727e-07,
"loss": 0.2348,
"step": 1141
},
{
"epoch": 0.91,
"grad_norm": 0.7502107620239258,
"learning_rate": 1.9464529800637731e-07,
"loss": 0.0867,
"step": 1142
},
{
"epoch": 0.91,
"grad_norm": 0.7319158911705017,
"learning_rate": 1.9108047371069917e-07,
"loss": 0.0893,
"step": 1143
},
{
"epoch": 0.92,
"grad_norm": 0.6257764101028442,
"learning_rate": 1.875479597731733e-07,
"loss": 0.0869,
"step": 1144
},
{
"epoch": 0.92,
"grad_norm": 0.7535856366157532,
"learning_rate": 1.8404777992819533e-07,
"loss": 0.1001,
"step": 1145
},
{
"epoch": 0.92,
"grad_norm": 0.35098588466644287,
"learning_rate": 1.805799576929107e-07,
"loss": 0.2074,
"step": 1146
},
{
"epoch": 0.92,
"grad_norm": 1.2048077583312988,
"learning_rate": 1.7714451636705933e-07,
"loss": 0.7034,
"step": 1147
},
{
"epoch": 0.92,
"grad_norm": 0.19751304388046265,
"learning_rate": 1.737414790328218e-07,
"loss": 0.2203,
"step": 1148
},
{
"epoch": 0.92,
"grad_norm": 0.21580639481544495,
"learning_rate": 1.7037086855465902e-07,
"loss": 0.2215,
"step": 1149
},
{
"epoch": 0.92,
"grad_norm": 0.19512581825256348,
"learning_rate": 1.6703270757916e-07,
"loss": 0.2403,
"step": 1150
},
{
"epoch": 0.92,
"eval_loss": 0.16860051453113556,
"eval_runtime": 132.804,
"eval_samples_per_second": 33.132,
"eval_steps_per_second": 1.039,
"step": 1150
},
{
"epoch": 0.92,
"grad_norm": 0.6464491486549377,
"learning_rate": 1.6372701853489438e-07,
"loss": 0.0715,
"step": 1151
},
{
"epoch": 0.92,
"grad_norm": 0.2787306606769562,
"learning_rate": 1.604538236322556e-07,
"loss": 0.2593,
"step": 1152
},
{
"epoch": 0.92,
"grad_norm": 0.6717149615287781,
"learning_rate": 1.5721314486331352e-07,
"loss": 0.086,
"step": 1153
},
{
"epoch": 0.92,
"grad_norm": 0.6615636348724365,
"learning_rate": 1.540050040016694e-07,
"loss": 0.0727,
"step": 1154
},
{
"epoch": 0.92,
"grad_norm": 0.2032528519630432,
"learning_rate": 1.508294226023066e-07,
"loss": 0.2382,
"step": 1155
},
{
"epoch": 0.92,
"grad_norm": 0.6388039588928223,
"learning_rate": 1.4768642200144677e-07,
"loss": 0.0782,
"step": 1156
},
{
"epoch": 0.93,
"grad_norm": 0.8168476819992065,
"learning_rate": 1.4457602331640507e-07,
"loss": 0.0881,
"step": 1157
},
{
"epoch": 0.93,
"grad_norm": 0.2804236114025116,
"learning_rate": 1.414982474454524e-07,
"loss": 0.24,
"step": 1158
},
{
"epoch": 0.93,
"grad_norm": 0.25469687581062317,
"learning_rate": 1.384531150676699e-07,
"loss": 0.2372,
"step": 1159
},
{
"epoch": 0.93,
"grad_norm": 0.25749433040618896,
"learning_rate": 1.3544064664281266e-07,
"loss": 0.2446,
"step": 1160
},
{
"epoch": 0.93,
"grad_norm": 0.7562451362609863,
"learning_rate": 1.324608624111734e-07,
"loss": 0.0849,
"step": 1161
},
{
"epoch": 0.93,
"grad_norm": 0.2554783225059509,
"learning_rate": 1.2951378239344337e-07,
"loss": 0.2544,
"step": 1162
},
{
"epoch": 0.93,
"grad_norm": 0.2207408994436264,
"learning_rate": 1.2659942639057954e-07,
"loss": 0.233,
"step": 1163
},
{
"epoch": 0.93,
"grad_norm": 1.5055978298187256,
"learning_rate": 1.237178139836731e-07,
"loss": 0.6535,
"step": 1164
},
{
"epoch": 0.93,
"grad_norm": 0.26087498664855957,
"learning_rate": 1.2086896453381403e-07,
"loss": 0.2462,
"step": 1165
},
{
"epoch": 0.93,
"grad_norm": 0.623890221118927,
"learning_rate": 1.1805289718196499e-07,
"loss": 0.0745,
"step": 1166
},
{
"epoch": 0.93,
"grad_norm": 0.21314860880374908,
"learning_rate": 1.1526963084882992e-07,
"loss": 0.2296,
"step": 1167
},
{
"epoch": 0.93,
"grad_norm": 0.21873103082180023,
"learning_rate": 1.1251918423472896e-07,
"loss": 0.2302,
"step": 1168
},
{
"epoch": 0.94,
"grad_norm": 0.6766714453697205,
"learning_rate": 1.0980157581947038e-07,
"loss": 0.0862,
"step": 1169
},
{
"epoch": 0.94,
"grad_norm": 0.2645447254180908,
"learning_rate": 1.0711682386222943e-07,
"loss": 0.2596,
"step": 1170
},
{
"epoch": 0.94,
"grad_norm": 0.2341335266828537,
"learning_rate": 1.0446494640142413e-07,
"loss": 0.2371,
"step": 1171
},
{
"epoch": 0.94,
"grad_norm": 0.2846052646636963,
"learning_rate": 1.0184596125459134e-07,
"loss": 0.2498,
"step": 1172
},
{
"epoch": 0.94,
"grad_norm": 0.6258840560913086,
"learning_rate": 9.925988601827419e-08,
"loss": 0.077,
"step": 1173
},
{
"epoch": 0.94,
"grad_norm": 0.23131778836250305,
"learning_rate": 9.670673806789543e-08,
"loss": 0.2396,
"step": 1174
},
{
"epoch": 0.94,
"grad_norm": 0.3215251564979553,
"learning_rate": 9.418653455764593e-08,
"loss": 0.2531,
"step": 1175
},
{
"epoch": 0.94,
"grad_norm": 0.21265847980976105,
"learning_rate": 9.169929242036967e-08,
"loss": 0.2255,
"step": 1176
},
{
"epoch": 0.94,
"grad_norm": 0.8155443072319031,
"learning_rate": 8.924502836744564e-08,
"loss": 0.0856,
"step": 1177
},
{
"epoch": 0.94,
"grad_norm": 0.6700699925422668,
"learning_rate": 8.682375888868167e-08,
"loss": 0.0778,
"step": 1178
},
{
"epoch": 0.94,
"grad_norm": 0.2133197784423828,
"learning_rate": 8.443550025219793e-08,
"loss": 0.2398,
"step": 1179
},
{
"epoch": 0.94,
"grad_norm": 0.23686644434928894,
"learning_rate": 8.208026850431983e-08,
"loss": 0.2429,
"step": 1180
},
{
"epoch": 0.94,
"grad_norm": 0.272497296333313,
"learning_rate": 7.975807946947245e-08,
"loss": 0.2622,
"step": 1181
},
{
"epoch": 0.95,
"grad_norm": 0.26796138286590576,
"learning_rate": 7.746894875007016e-08,
"loss": 0.2331,
"step": 1182
},
{
"epoch": 0.95,
"grad_norm": 0.7658330798149109,
"learning_rate": 7.521289172641555e-08,
"loss": 0.0874,
"step": 1183
},
{
"epoch": 0.95,
"grad_norm": 0.3211049735546112,
"learning_rate": 7.29899235565934e-08,
"loss": 0.2295,
"step": 1184
},
{
"epoch": 0.95,
"grad_norm": 1.1338473558425903,
"learning_rate": 7.080005917636968e-08,
"loss": 0.7104,
"step": 1185
},
{
"epoch": 0.95,
"grad_norm": 1.1308341026306152,
"learning_rate": 6.864331329909102e-08,
"loss": 0.7023,
"step": 1186
},
{
"epoch": 0.95,
"grad_norm": 0.6929430961608887,
"learning_rate": 6.651970041558764e-08,
"loss": 0.0836,
"step": 1187
},
{
"epoch": 0.95,
"grad_norm": 0.2531970143318176,
"learning_rate": 6.442923479407337e-08,
"loss": 0.2517,
"step": 1188
},
{
"epoch": 0.95,
"grad_norm": 0.22909294068813324,
"learning_rate": 6.237193048005019e-08,
"loss": 0.2446,
"step": 1189
},
{
"epoch": 0.95,
"grad_norm": 0.20393189787864685,
"learning_rate": 6.034780129621664e-08,
"loss": 0.2369,
"step": 1190
},
{
"epoch": 0.95,
"grad_norm": 0.2690567076206207,
"learning_rate": 5.8356860842370685e-08,
"loss": 0.2654,
"step": 1191
},
{
"epoch": 0.95,
"grad_norm": 0.25129109621047974,
"learning_rate": 5.639912249532198e-08,
"loss": 0.2409,
"step": 1192
},
{
"epoch": 0.95,
"grad_norm": 0.7960008978843689,
"learning_rate": 5.447459940880084e-08,
"loss": 0.0942,
"step": 1193
},
{
"epoch": 0.96,
"grad_norm": 0.6810144782066345,
"learning_rate": 5.258330451336724e-08,
"loss": 0.0746,
"step": 1194
},
{
"epoch": 0.96,
"grad_norm": 1.2213472127914429,
"learning_rate": 5.072525051632915e-08,
"loss": 0.6947,
"step": 1195
},
{
"epoch": 0.96,
"grad_norm": 0.21284088492393494,
"learning_rate": 4.8900449901653214e-08,
"loss": 0.2344,
"step": 1196
},
{
"epoch": 0.96,
"grad_norm": 1.0726029872894287,
"learning_rate": 4.710891492988035e-08,
"loss": 0.6759,
"step": 1197
},
{
"epoch": 0.96,
"grad_norm": 0.2358914166688919,
"learning_rate": 4.535065763804802e-08,
"loss": 0.2426,
"step": 1198
},
{
"epoch": 0.96,
"grad_norm": 0.7339521050453186,
"learning_rate": 4.3625689839603694e-08,
"loss": 0.1047,
"step": 1199
},
{
"epoch": 0.96,
"grad_norm": 0.27887779474258423,
"learning_rate": 4.193402312432926e-08,
"loss": 0.2277,
"step": 1200
},
{
"epoch": 0.96,
"eval_loss": 0.16857390105724335,
"eval_runtime": 237.6621,
"eval_samples_per_second": 18.514,
"eval_steps_per_second": 0.581,
"step": 1200
},
{
"epoch": 0.96,
"grad_norm": 0.6925386190414429,
"learning_rate": 4.027566885826173e-08,
"loss": 0.0857,
"step": 1201
},
{
"epoch": 0.96,
"grad_norm": 1.1317211389541626,
"learning_rate": 3.8650638183617695e-08,
"loss": 0.6916,
"step": 1202
},
{
"epoch": 0.96,
"grad_norm": 0.7401204109191895,
"learning_rate": 3.705894201871618e-08,
"loss": 0.0845,
"step": 1203
},
{
"epoch": 0.96,
"grad_norm": 1.1506571769714355,
"learning_rate": 3.550059105790926e-08,
"loss": 0.6899,
"step": 1204
},
{
"epoch": 0.96,
"grad_norm": 0.24000149965286255,
"learning_rate": 3.3975595771505996e-08,
"loss": 0.234,
"step": 1205
},
{
"epoch": 0.96,
"grad_norm": 0.3384413719177246,
"learning_rate": 3.248396640570528e-08,
"loss": 0.2435,
"step": 1206
},
{
"epoch": 0.97,
"grad_norm": 0.6880319118499756,
"learning_rate": 3.102571298252588e-08,
"loss": 0.0823,
"step": 1207
},
{
"epoch": 0.97,
"grad_norm": 0.22655218839645386,
"learning_rate": 2.960084529973706e-08,
"loss": 0.2272,
"step": 1208
},
{
"epoch": 0.97,
"grad_norm": 0.30787375569343567,
"learning_rate": 2.8209372930796953e-08,
"loss": 0.2427,
"step": 1209
},
{
"epoch": 0.97,
"grad_norm": 0.1972927749156952,
"learning_rate": 2.685130522478485e-08,
"loss": 0.2469,
"step": 1210
},
{
"epoch": 0.97,
"grad_norm": 1.1720728874206543,
"learning_rate": 2.552665130633958e-08,
"loss": 0.7036,
"step": 1211
},
{
"epoch": 0.97,
"grad_norm": 0.20404624938964844,
"learning_rate": 2.4235420075597872e-08,
"loss": 0.2277,
"step": 1212
},
{
"epoch": 0.97,
"grad_norm": 0.2189277857542038,
"learning_rate": 2.2977620208135543e-08,
"loss": 0.2367,
"step": 1213
},
{
"epoch": 0.97,
"grad_norm": 0.32372334599494934,
"learning_rate": 2.1753260154906973e-08,
"loss": 0.2338,
"step": 1214
},
{
"epoch": 0.97,
"grad_norm": 0.21352948248386383,
"learning_rate": 2.0562348142191822e-08,
"loss": 0.2478,
"step": 1215
},
{
"epoch": 0.97,
"grad_norm": 0.6892623901367188,
"learning_rate": 1.9404892171536182e-08,
"loss": 0.0826,
"step": 1216
},
{
"epoch": 0.97,
"grad_norm": 0.7555148601531982,
"learning_rate": 1.8280900019701508e-08,
"loss": 0.0838,
"step": 1217
},
{
"epoch": 0.97,
"grad_norm": 0.2220543771982193,
"learning_rate": 1.7190379238609666e-08,
"loss": 0.2343,
"step": 1218
},
{
"epoch": 0.98,
"grad_norm": 0.7003951668739319,
"learning_rate": 1.6133337155294638e-08,
"loss": 0.0727,
"step": 1219
},
{
"epoch": 0.98,
"grad_norm": 0.8174451589584351,
"learning_rate": 1.5109780871853663e-08,
"loss": 0.113,
"step": 1220
},
{
"epoch": 0.98,
"grad_norm": 0.3314027488231659,
"learning_rate": 1.4119717265396182e-08,
"loss": 0.2579,
"step": 1221
},
{
"epoch": 0.98,
"grad_norm": 0.27618908882141113,
"learning_rate": 1.3163152988000527e-08,
"loss": 0.2271,
"step": 1222
},
{
"epoch": 0.98,
"grad_norm": 0.7132062911987305,
"learning_rate": 1.2240094466668406e-08,
"loss": 0.0785,
"step": 1223
},
{
"epoch": 0.98,
"grad_norm": 0.7738111615180969,
"learning_rate": 1.1350547903282716e-08,
"loss": 0.1085,
"step": 1224
},
{
"epoch": 0.98,
"grad_norm": 0.2625243663787842,
"learning_rate": 1.0494519274562576e-08,
"loss": 0.245,
"step": 1225
},
{
"epoch": 0.98,
"grad_norm": 0.20780685544013977,
"learning_rate": 9.672014332028357e-09,
"loss": 0.2435,
"step": 1226
},
{
"epoch": 0.98,
"grad_norm": 0.2389998733997345,
"learning_rate": 8.883038601957827e-09,
"loss": 0.241,
"step": 1227
},
{
"epoch": 0.98,
"grad_norm": 0.6934259533882141,
"learning_rate": 8.127597385352293e-09,
"loss": 0.0893,
"step": 1228
},
{
"epoch": 0.98,
"grad_norm": 0.25668832659721375,
"learning_rate": 7.4056957578999554e-09,
"loss": 0.2389,
"step": 1229
},
{
"epoch": 0.98,
"grad_norm": 0.21722805500030518,
"learning_rate": 6.717338569942611e-09,
"loss": 0.2143,
"step": 1230
},
{
"epoch": 0.98,
"grad_norm": 0.67095947265625,
"learning_rate": 6.062530446440673e-09,
"loss": 0.0782,
"step": 1231
},
{
"epoch": 0.99,
"grad_norm": 0.3172873556613922,
"learning_rate": 5.4412757869459765e-09,
"loss": 0.2519,
"step": 1232
},
{
"epoch": 0.99,
"grad_norm": 0.6598693132400513,
"learning_rate": 4.853578765567357e-09,
"loss": 0.0723,
"step": 1233
},
{
"epoch": 0.99,
"grad_norm": 0.7503123879432678,
"learning_rate": 4.299443330947895e-09,
"loss": 0.0868,
"step": 1234
},
{
"epoch": 0.99,
"grad_norm": 0.2925339937210083,
"learning_rate": 3.778873206234379e-09,
"loss": 0.236,
"step": 1235
},
{
"epoch": 0.99,
"grad_norm": 1.110285758972168,
"learning_rate": 3.291871889053444e-09,
"loss": 0.7011,
"step": 1236
},
{
"epoch": 0.99,
"grad_norm": 0.22674871981143951,
"learning_rate": 2.8384426514893594e-09,
"loss": 0.2358,
"step": 1237
},
{
"epoch": 0.99,
"grad_norm": 0.6210582852363586,
"learning_rate": 2.4185885400596076e-09,
"loss": 0.0669,
"step": 1238
},
{
"epoch": 0.99,
"grad_norm": 1.1709364652633667,
"learning_rate": 2.032312375697121e-09,
"loss": 0.6951,
"step": 1239
},
{
"epoch": 0.99,
"grad_norm": 0.2120194137096405,
"learning_rate": 1.6796167537297403e-09,
"loss": 0.2145,
"step": 1240
},
{
"epoch": 0.99,
"grad_norm": 0.2024804651737213,
"learning_rate": 1.3605040438618989e-09,
"loss": 0.2303,
"step": 1241
},
{
"epoch": 0.99,
"grad_norm": 0.6983115077018738,
"learning_rate": 1.0749763901607425e-09,
"loss": 0.0755,
"step": 1242
},
{
"epoch": 0.99,
"grad_norm": 0.7006844282150269,
"learning_rate": 8.230357110416976e-10,
"loss": 0.0765,
"step": 1243
},
{
"epoch": 1.0,
"grad_norm": 1.195613145828247,
"learning_rate": 6.04683699252373e-10,
"loss": 0.7267,
"step": 1244
},
{
"epoch": 1.0,
"grad_norm": 0.243184894323349,
"learning_rate": 4.199218218658985e-10,
"loss": 0.2422,
"step": 1245
},
{
"epoch": 1.0,
"grad_norm": 0.6484447121620178,
"learning_rate": 2.6875132026760173e-10,
"loss": 0.0858,
"step": 1246
},
{
"epoch": 1.0,
"grad_norm": 0.27762192487716675,
"learning_rate": 1.511732101472374e-10,
"loss": 0.2552,
"step": 1247
},
{
"epoch": 1.0,
"grad_norm": 0.727144718170166,
"learning_rate": 6.718828149343548e-11,
"loss": 0.0895,
"step": 1248
},
{
"epoch": 1.0,
"grad_norm": 1.069000482559204,
"learning_rate": 1.6797098587595372e-11,
"loss": 0.7069,
"step": 1249
},
{
"epoch": 1.0,
"step": 1249,
"total_flos": 9.939587019241947e+18,
"train_loss": 0.2987806487968676,
"train_runtime": 26956.4315,
"train_samples_per_second": 11.874,
"train_steps_per_second": 0.046
}
],
"logging_steps": 1.0,
"max_steps": 1250,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"total_flos": 9.939587019241947e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}