VILA-2.7b / trainer_state.json
klldmofashi's picture
Upload files with huggingface_hub
b401d23 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 527,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 18.569529999247134,
"learning_rate": 6.25e-06,
"loss": 2.2344,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 17.774880014109563,
"learning_rate": 1.25e-05,
"loss": 2.2188,
"step": 2
},
{
"epoch": 0.01,
"grad_norm": 16.126910180369833,
"learning_rate": 1.8750000000000002e-05,
"loss": 2.0938,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 4.004205399706303,
"learning_rate": 2.5e-05,
"loss": 1.5625,
"step": 4
},
{
"epoch": 0.01,
"grad_norm": 4.475863116578534,
"learning_rate": 3.125e-05,
"loss": 1.5547,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 3.9418457877357183,
"learning_rate": 3.7500000000000003e-05,
"loss": 1.5312,
"step": 6
},
{
"epoch": 0.01,
"grad_norm": 3.35497476295258,
"learning_rate": 4.375e-05,
"loss": 1.5312,
"step": 7
},
{
"epoch": 0.02,
"grad_norm": 2.0238486012287824,
"learning_rate": 5e-05,
"loss": 1.4453,
"step": 8
},
{
"epoch": 0.02,
"grad_norm": 1.3025920158121342,
"learning_rate": 5.6250000000000005e-05,
"loss": 1.4062,
"step": 9
},
{
"epoch": 0.02,
"grad_norm": 2.1578221590331648,
"learning_rate": 6.25e-05,
"loss": 1.4141,
"step": 10
},
{
"epoch": 0.02,
"grad_norm": 1.639048247445056,
"learning_rate": 6.875e-05,
"loss": 1.3906,
"step": 11
},
{
"epoch": 0.02,
"grad_norm": 1.0758955455274364,
"learning_rate": 7.500000000000001e-05,
"loss": 1.3672,
"step": 12
},
{
"epoch": 0.02,
"grad_norm": 0.9988841624058318,
"learning_rate": 8.125000000000001e-05,
"loss": 1.3516,
"step": 13
},
{
"epoch": 0.03,
"grad_norm": 1.036582323851064,
"learning_rate": 8.75e-05,
"loss": 1.3438,
"step": 14
},
{
"epoch": 0.03,
"grad_norm": 1.0040586950026587,
"learning_rate": 9.375e-05,
"loss": 1.3438,
"step": 15
},
{
"epoch": 0.03,
"grad_norm": 0.8112402128458829,
"learning_rate": 0.0001,
"loss": 1.3047,
"step": 16
},
{
"epoch": 0.03,
"grad_norm": 0.806608090114202,
"learning_rate": 9.999905507663936e-05,
"loss": 1.3203,
"step": 17
},
{
"epoch": 0.03,
"grad_norm": 0.8164159045230494,
"learning_rate": 9.99962203422726e-05,
"loss": 1.2812,
"step": 18
},
{
"epoch": 0.04,
"grad_norm": 0.7335139492736167,
"learning_rate": 9.9991495904044e-05,
"loss": 1.2969,
"step": 19
},
{
"epoch": 0.04,
"grad_norm": 0.799772371827826,
"learning_rate": 9.998488194052287e-05,
"loss": 1.2734,
"step": 20
},
{
"epoch": 0.04,
"grad_norm": 0.6718755777667486,
"learning_rate": 9.997637870169672e-05,
"loss": 1.2734,
"step": 21
},
{
"epoch": 0.04,
"grad_norm": 0.7599864750693303,
"learning_rate": 9.996598650896192e-05,
"loss": 1.2578,
"step": 22
},
{
"epoch": 0.04,
"grad_norm": 0.6886726902695142,
"learning_rate": 9.995370575511151e-05,
"loss": 1.2656,
"step": 23
},
{
"epoch": 0.05,
"grad_norm": 0.6823417918062775,
"learning_rate": 9.993953690432031e-05,
"loss": 1.2734,
"step": 24
},
{
"epoch": 0.05,
"grad_norm": 0.6382222948594751,
"learning_rate": 9.99234804921275e-05,
"loss": 1.2422,
"step": 25
},
{
"epoch": 0.05,
"grad_norm": 0.6452753693126838,
"learning_rate": 9.990553712541617e-05,
"loss": 1.2422,
"step": 26
},
{
"epoch": 0.05,
"grad_norm": 0.5904936283655213,
"learning_rate": 9.988570748239062e-05,
"loss": 1.2266,
"step": 27
},
{
"epoch": 0.05,
"grad_norm": 0.5640365175871925,
"learning_rate": 9.986399231255056e-05,
"loss": 1.2031,
"step": 28
},
{
"epoch": 0.06,
"grad_norm": 0.514358456822939,
"learning_rate": 9.984039243666283e-05,
"loss": 1.2344,
"step": 29
},
{
"epoch": 0.06,
"grad_norm": 0.636209974460906,
"learning_rate": 9.981490874673039e-05,
"loss": 1.1953,
"step": 30
},
{
"epoch": 0.06,
"grad_norm": 0.5479990507502773,
"learning_rate": 9.978754220595861e-05,
"loss": 1.2031,
"step": 31
},
{
"epoch": 0.06,
"grad_norm": 0.5561261900343224,
"learning_rate": 9.975829384871884e-05,
"loss": 1.2344,
"step": 32
},
{
"epoch": 0.06,
"grad_norm": 0.4948569883144301,
"learning_rate": 9.97271647805093e-05,
"loss": 1.2109,
"step": 33
},
{
"epoch": 0.06,
"grad_norm": 0.5167119119909488,
"learning_rate": 9.969415617791336e-05,
"loss": 1.2266,
"step": 34
},
{
"epoch": 0.07,
"grad_norm": 0.508354119180698,
"learning_rate": 9.965926928855499e-05,
"loss": 1.2109,
"step": 35
},
{
"epoch": 0.07,
"grad_norm": 0.5326133584880987,
"learning_rate": 9.962250543105167e-05,
"loss": 1.1797,
"step": 36
},
{
"epoch": 0.07,
"grad_norm": 0.47996354720772333,
"learning_rate": 9.95838659949645e-05,
"loss": 1.1719,
"step": 37
},
{
"epoch": 0.07,
"grad_norm": 0.4584403899961059,
"learning_rate": 9.954335244074574e-05,
"loss": 1.1797,
"step": 38
},
{
"epoch": 0.07,
"grad_norm": 0.478024134726985,
"learning_rate": 9.950096629968352e-05,
"loss": 1.1953,
"step": 39
},
{
"epoch": 0.08,
"grad_norm": 0.4831225145909792,
"learning_rate": 9.945670917384403e-05,
"loss": 1.1797,
"step": 40
},
{
"epoch": 0.08,
"grad_norm": 0.47054504799219404,
"learning_rate": 9.941058273601096e-05,
"loss": 1.1719,
"step": 41
},
{
"epoch": 0.08,
"grad_norm": 0.4940788339187542,
"learning_rate": 9.936258872962228e-05,
"loss": 1.1719,
"step": 42
},
{
"epoch": 0.08,
"grad_norm": 0.44655071198353546,
"learning_rate": 9.931272896870426e-05,
"loss": 1.1953,
"step": 43
},
{
"epoch": 0.08,
"grad_norm": 0.4620978351861828,
"learning_rate": 9.926100533780303e-05,
"loss": 1.1797,
"step": 44
},
{
"epoch": 0.09,
"grad_norm": 0.4777738095085538,
"learning_rate": 9.920741979191331e-05,
"loss": 1.1641,
"step": 45
},
{
"epoch": 0.09,
"grad_norm": 0.46687195896903444,
"learning_rate": 9.91519743564044e-05,
"loss": 1.1562,
"step": 46
},
{
"epoch": 0.09,
"grad_norm": 0.4572029233269296,
"learning_rate": 9.909467112694384e-05,
"loss": 1.1719,
"step": 47
},
{
"epoch": 0.09,
"grad_norm": 0.4379619610466461,
"learning_rate": 9.903551226941801e-05,
"loss": 1.1562,
"step": 48
},
{
"epoch": 0.09,
"grad_norm": 0.4625672515990539,
"learning_rate": 9.897450001985039e-05,
"loss": 1.1406,
"step": 49
},
{
"epoch": 0.09,
"grad_norm": 0.45763509639124433,
"learning_rate": 9.891163668431695e-05,
"loss": 1.1719,
"step": 50
},
{
"epoch": 0.1,
"grad_norm": 0.4759593297993926,
"learning_rate": 9.88469246388591e-05,
"loss": 1.1484,
"step": 51
},
{
"epoch": 0.1,
"grad_norm": 0.47690674058291993,
"learning_rate": 9.878036632939374e-05,
"loss": 1.2031,
"step": 52
},
{
"epoch": 0.1,
"grad_norm": 0.44532349751989236,
"learning_rate": 9.871196427162092e-05,
"loss": 1.1172,
"step": 53
},
{
"epoch": 0.1,
"grad_norm": 0.452106382176349,
"learning_rate": 9.86417210509288e-05,
"loss": 1.1094,
"step": 54
},
{
"epoch": 0.1,
"grad_norm": 0.452753191760464,
"learning_rate": 9.85696393222957e-05,
"loss": 1.1953,
"step": 55
},
{
"epoch": 0.11,
"grad_norm": 0.45441530517528445,
"learning_rate": 9.849572181019007e-05,
"loss": 1.1406,
"step": 56
},
{
"epoch": 0.11,
"grad_norm": 0.5007896403646297,
"learning_rate": 9.84199713084672e-05,
"loss": 1.1406,
"step": 57
},
{
"epoch": 0.11,
"grad_norm": 0.4608644722591853,
"learning_rate": 9.834239068026387e-05,
"loss": 1.1328,
"step": 58
},
{
"epoch": 0.11,
"grad_norm": 0.4968111745587068,
"learning_rate": 9.826298285789002e-05,
"loss": 1.1328,
"step": 59
},
{
"epoch": 0.11,
"grad_norm": 0.44573050581792,
"learning_rate": 9.818175084271786e-05,
"loss": 1.125,
"step": 60
},
{
"epoch": 0.12,
"grad_norm": 0.46589557608902915,
"learning_rate": 9.809869770506856e-05,
"loss": 1.1328,
"step": 61
},
{
"epoch": 0.12,
"grad_norm": 0.45678602237538535,
"learning_rate": 9.80138265840961e-05,
"loss": 1.1328,
"step": 62
},
{
"epoch": 0.12,
"grad_norm": 0.43959375518292026,
"learning_rate": 9.792714068766872e-05,
"loss": 1.125,
"step": 63
},
{
"epoch": 0.12,
"grad_norm": 0.41984631355693813,
"learning_rate": 9.783864329224752e-05,
"loss": 1.1406,
"step": 64
},
{
"epoch": 0.12,
"grad_norm": 0.43389373514903795,
"learning_rate": 9.774833774276278e-05,
"loss": 1.1172,
"step": 65
},
{
"epoch": 0.13,
"grad_norm": 0.4336892341975021,
"learning_rate": 9.765622745248739e-05,
"loss": 1.1406,
"step": 66
},
{
"epoch": 0.13,
"grad_norm": 0.4410380396458464,
"learning_rate": 9.7562315902908e-05,
"loss": 1.1172,
"step": 67
},
{
"epoch": 0.13,
"grad_norm": 0.5141753129119535,
"learning_rate": 9.746660664359326e-05,
"loss": 1.1172,
"step": 68
},
{
"epoch": 0.13,
"grad_norm": 0.46159702650365997,
"learning_rate": 9.736910329205978e-05,
"loss": 1.1328,
"step": 69
},
{
"epoch": 0.13,
"grad_norm": 0.45575998013176866,
"learning_rate": 9.726980953363536e-05,
"loss": 1.1328,
"step": 70
},
{
"epoch": 0.13,
"grad_norm": 0.4458400134106401,
"learning_rate": 9.716872912131964e-05,
"loss": 1.1172,
"step": 71
},
{
"epoch": 0.14,
"grad_norm": 0.4403442516042718,
"learning_rate": 9.706586587564237e-05,
"loss": 1.1406,
"step": 72
},
{
"epoch": 0.14,
"grad_norm": 0.4513799624319814,
"learning_rate": 9.696122368451886e-05,
"loss": 1.1406,
"step": 73
},
{
"epoch": 0.14,
"grad_norm": 0.4348698465314392,
"learning_rate": 9.685480650310318e-05,
"loss": 1.1641,
"step": 74
},
{
"epoch": 0.14,
"grad_norm": 0.4498345537005005,
"learning_rate": 9.674661835363858e-05,
"loss": 1.125,
"step": 75
},
{
"epoch": 0.14,
"grad_norm": 0.43144309981302603,
"learning_rate": 9.663666332530541e-05,
"loss": 1.1172,
"step": 76
},
{
"epoch": 0.15,
"grad_norm": 0.4399921408069473,
"learning_rate": 9.652494557406666e-05,
"loss": 1.125,
"step": 77
},
{
"epoch": 0.15,
"grad_norm": 0.47704120684842194,
"learning_rate": 9.641146932251089e-05,
"loss": 1.1328,
"step": 78
},
{
"epoch": 0.15,
"grad_norm": 0.44749472258572454,
"learning_rate": 9.62962388596925e-05,
"loss": 1.1406,
"step": 79
},
{
"epoch": 0.15,
"grad_norm": 0.4413794417064817,
"learning_rate": 9.617925854096975e-05,
"loss": 1.1406,
"step": 80
},
{
"epoch": 0.15,
"grad_norm": 0.42859705779561913,
"learning_rate": 9.606053278784009e-05,
"loss": 1.1172,
"step": 81
},
{
"epoch": 0.16,
"grad_norm": 0.45418306254807467,
"learning_rate": 9.5940066087773e-05,
"loss": 1.1016,
"step": 82
},
{
"epoch": 0.16,
"grad_norm": 0.43688517954319694,
"learning_rate": 9.581786299404045e-05,
"loss": 1.1172,
"step": 83
},
{
"epoch": 0.16,
"grad_norm": 0.4397567687148274,
"learning_rate": 9.569392812554478e-05,
"loss": 1.1172,
"step": 84
},
{
"epoch": 0.16,
"grad_norm": 0.46690101219263686,
"learning_rate": 9.556826616664407e-05,
"loss": 1.1328,
"step": 85
},
{
"epoch": 0.16,
"grad_norm": 0.4756162905348212,
"learning_rate": 9.544088186697515e-05,
"loss": 1.1172,
"step": 86
},
{
"epoch": 0.17,
"grad_norm": 0.4366895335330985,
"learning_rate": 9.531178004127403e-05,
"loss": 1.1172,
"step": 87
},
{
"epoch": 0.17,
"grad_norm": 0.46048461137548086,
"learning_rate": 9.518096556919396e-05,
"loss": 1.125,
"step": 88
},
{
"epoch": 0.17,
"grad_norm": 0.45095033069608803,
"learning_rate": 9.504844339512095e-05,
"loss": 1.1016,
"step": 89
},
{
"epoch": 0.17,
"grad_norm": 0.440538034417393,
"learning_rate": 9.491421852798696e-05,
"loss": 1.1016,
"step": 90
},
{
"epoch": 0.17,
"grad_norm": 0.4472029761648666,
"learning_rate": 9.477829604108044e-05,
"loss": 1.0938,
"step": 91
},
{
"epoch": 0.17,
"grad_norm": 0.4369667303362839,
"learning_rate": 9.464068107185476e-05,
"loss": 1.0781,
"step": 92
},
{
"epoch": 0.18,
"grad_norm": 0.5249913549114,
"learning_rate": 9.450137882173384e-05,
"loss": 1.0703,
"step": 93
},
{
"epoch": 0.18,
"grad_norm": 0.4465738523446084,
"learning_rate": 9.436039455591573e-05,
"loss": 1.0859,
"step": 94
},
{
"epoch": 0.18,
"grad_norm": 0.43766479595594365,
"learning_rate": 9.421773360317347e-05,
"loss": 1.1016,
"step": 95
},
{
"epoch": 0.18,
"grad_norm": 0.4240043286719592,
"learning_rate": 9.407340135565374e-05,
"loss": 1.0938,
"step": 96
},
{
"epoch": 0.18,
"grad_norm": 0.42744921800889424,
"learning_rate": 9.392740326867304e-05,
"loss": 1.1172,
"step": 97
},
{
"epoch": 0.19,
"grad_norm": 0.458478203908347,
"learning_rate": 9.377974486051147e-05,
"loss": 1.1172,
"step": 98
},
{
"epoch": 0.19,
"grad_norm": 0.4320194692065847,
"learning_rate": 9.363043171220423e-05,
"loss": 1.0859,
"step": 99
},
{
"epoch": 0.19,
"grad_norm": 0.42941401875819113,
"learning_rate": 9.347946946733055e-05,
"loss": 1.0938,
"step": 100
},
{
"epoch": 0.19,
"grad_norm": 0.4345817070644086,
"learning_rate": 9.332686383180055e-05,
"loss": 1.0781,
"step": 101
},
{
"epoch": 0.19,
"grad_norm": 0.42729040555243186,
"learning_rate": 9.31726205736394e-05,
"loss": 1.1094,
"step": 102
},
{
"epoch": 0.2,
"grad_norm": 0.4676759430208925,
"learning_rate": 9.301674552276942e-05,
"loss": 1.0859,
"step": 103
},
{
"epoch": 0.2,
"grad_norm": 0.437120596292176,
"learning_rate": 9.28592445707897e-05,
"loss": 1.1094,
"step": 104
},
{
"epoch": 0.2,
"grad_norm": 0.44408267981562327,
"learning_rate": 9.270012367075336e-05,
"loss": 1.1094,
"step": 105
},
{
"epoch": 0.2,
"grad_norm": 0.4408320908096716,
"learning_rate": 9.253938883694267e-05,
"loss": 1.0859,
"step": 106
},
{
"epoch": 0.2,
"grad_norm": 0.4153884549704849,
"learning_rate": 9.237704614464156e-05,
"loss": 1.0781,
"step": 107
},
{
"epoch": 0.2,
"grad_norm": 0.44536034771337696,
"learning_rate": 9.221310172990616e-05,
"loss": 1.0781,
"step": 108
},
{
"epoch": 0.21,
"grad_norm": 0.42677756463500066,
"learning_rate": 9.204756178933274e-05,
"loss": 1.0703,
"step": 109
},
{
"epoch": 0.21,
"grad_norm": 0.4378028436016707,
"learning_rate": 9.18804325798236e-05,
"loss": 1.0938,
"step": 110
},
{
"epoch": 0.21,
"grad_norm": 0.42237143257723425,
"learning_rate": 9.17117204183505e-05,
"loss": 1.1094,
"step": 111
},
{
"epoch": 0.21,
"grad_norm": 0.43243228227443636,
"learning_rate": 9.154143168171592e-05,
"loss": 1.0859,
"step": 112
},
{
"epoch": 0.21,
"grad_norm": 0.41404933017475487,
"learning_rate": 9.136957280631212e-05,
"loss": 1.0938,
"step": 113
},
{
"epoch": 0.22,
"grad_norm": 0.4409966487573737,
"learning_rate": 9.119615028787771e-05,
"loss": 1.0625,
"step": 114
},
{
"epoch": 0.22,
"grad_norm": 0.40666941809648666,
"learning_rate": 9.102117068125226e-05,
"loss": 1.0703,
"step": 115
},
{
"epoch": 0.22,
"grad_norm": 0.4436737155967258,
"learning_rate": 9.08446406001285e-05,
"loss": 1.0859,
"step": 116
},
{
"epoch": 0.22,
"grad_norm": 0.4274067363700415,
"learning_rate": 9.06665667168023e-05,
"loss": 1.0547,
"step": 117
},
{
"epoch": 0.22,
"grad_norm": 0.40913040466042055,
"learning_rate": 9.048695576192058e-05,
"loss": 1.0625,
"step": 118
},
{
"epoch": 0.23,
"grad_norm": 0.4937917107991283,
"learning_rate": 9.030581452422679e-05,
"loss": 1.1172,
"step": 119
},
{
"epoch": 0.23,
"grad_norm": 0.4404941593953187,
"learning_rate": 9.012314985030445e-05,
"loss": 1.0781,
"step": 120
},
{
"epoch": 0.23,
"grad_norm": 0.39523077054498146,
"learning_rate": 8.993896864431826e-05,
"loss": 1.0625,
"step": 121
},
{
"epoch": 0.23,
"grad_norm": 0.4373478392227282,
"learning_rate": 8.975327786775315e-05,
"loss": 1.0703,
"step": 122
},
{
"epoch": 0.23,
"grad_norm": 0.4577216496829238,
"learning_rate": 8.956608453915125e-05,
"loss": 1.1016,
"step": 123
},
{
"epoch": 0.24,
"grad_norm": 0.4276538666446135,
"learning_rate": 8.937739573384653e-05,
"loss": 1.0781,
"step": 124
},
{
"epoch": 0.24,
"grad_norm": 0.4362035096548072,
"learning_rate": 8.918721858369738e-05,
"loss": 1.0703,
"step": 125
},
{
"epoch": 0.24,
"grad_norm": 0.4493303937073408,
"learning_rate": 8.899556027681709e-05,
"loss": 1.0703,
"step": 126
},
{
"epoch": 0.24,
"grad_norm": 0.4434149185545328,
"learning_rate": 8.880242805730208e-05,
"loss": 1.0938,
"step": 127
},
{
"epoch": 0.24,
"grad_norm": 0.43950175470648284,
"learning_rate": 8.860782922495822e-05,
"loss": 1.0703,
"step": 128
},
{
"epoch": 0.24,
"grad_norm": 0.426071957636363,
"learning_rate": 8.841177113502482e-05,
"loss": 1.0703,
"step": 129
},
{
"epoch": 0.25,
"grad_norm": 0.450291923405564,
"learning_rate": 8.821426119789662e-05,
"loss": 1.0781,
"step": 130
},
{
"epoch": 0.25,
"grad_norm": 0.40557626842779504,
"learning_rate": 8.801530687884378e-05,
"loss": 1.0781,
"step": 131
},
{
"epoch": 0.25,
"grad_norm": 0.4702580576149972,
"learning_rate": 8.781491569772966e-05,
"loss": 1.0547,
"step": 132
},
{
"epoch": 0.25,
"grad_norm": 0.44111964762493167,
"learning_rate": 8.761309522872657e-05,
"loss": 1.0469,
"step": 133
},
{
"epoch": 0.25,
"grad_norm": 0.41670105323821127,
"learning_rate": 8.740985310002956e-05,
"loss": 1.0859,
"step": 134
},
{
"epoch": 0.26,
"grad_norm": 0.4475226900837088,
"learning_rate": 8.720519699356804e-05,
"loss": 1.0859,
"step": 135
},
{
"epoch": 0.26,
"grad_norm": 0.43566177490537383,
"learning_rate": 8.699913464471543e-05,
"loss": 1.0859,
"step": 136
},
{
"epoch": 0.26,
"grad_norm": 0.45325103614908197,
"learning_rate": 8.679167384199684e-05,
"loss": 1.0625,
"step": 137
},
{
"epoch": 0.26,
"grad_norm": 0.42259820068503184,
"learning_rate": 8.658282242679461e-05,
"loss": 1.0859,
"step": 138
},
{
"epoch": 0.26,
"grad_norm": 0.42782456224560983,
"learning_rate": 8.637258829305199e-05,
"loss": 1.0938,
"step": 139
},
{
"epoch": 0.27,
"grad_norm": 0.42070148732795626,
"learning_rate": 8.616097938697475e-05,
"loss": 1.0859,
"step": 140
},
{
"epoch": 0.27,
"grad_norm": 0.4103382323854611,
"learning_rate": 8.594800370673083e-05,
"loss": 1.1016,
"step": 141
},
{
"epoch": 0.27,
"grad_norm": 0.4273680868793077,
"learning_rate": 8.573366930214806e-05,
"loss": 1.0859,
"step": 142
},
{
"epoch": 0.27,
"grad_norm": 0.41570068283778067,
"learning_rate": 8.551798427440986e-05,
"loss": 1.0469,
"step": 143
},
{
"epoch": 0.27,
"grad_norm": 0.4189560948484794,
"learning_rate": 8.53009567757491e-05,
"loss": 1.0938,
"step": 144
},
{
"epoch": 0.28,
"grad_norm": 0.40232705129491403,
"learning_rate": 8.50825950091399e-05,
"loss": 1.0469,
"step": 145
},
{
"epoch": 0.28,
"grad_norm": 0.45728641315276936,
"learning_rate": 8.486290722798764e-05,
"loss": 1.0625,
"step": 146
},
{
"epoch": 0.28,
"grad_norm": 0.43979948317554834,
"learning_rate": 8.464190173581699e-05,
"loss": 1.0938,
"step": 147
},
{
"epoch": 0.28,
"grad_norm": 0.4374598465086451,
"learning_rate": 8.441958688595801e-05,
"loss": 1.0625,
"step": 148
},
{
"epoch": 0.28,
"grad_norm": 0.4352920608233416,
"learning_rate": 8.419597108123054e-05,
"loss": 1.0547,
"step": 149
},
{
"epoch": 0.28,
"grad_norm": 0.42744190205787813,
"learning_rate": 8.397106277362647e-05,
"loss": 1.0703,
"step": 150
},
{
"epoch": 0.29,
"grad_norm": 0.6185631612514999,
"learning_rate": 8.374487046399036e-05,
"loss": 1.0781,
"step": 151
},
{
"epoch": 0.29,
"grad_norm": 0.4214859539037842,
"learning_rate": 8.35174027016981e-05,
"loss": 1.0703,
"step": 152
},
{
"epoch": 0.29,
"grad_norm": 0.4300234685508506,
"learning_rate": 8.328866808433378e-05,
"loss": 1.0312,
"step": 153
},
{
"epoch": 0.29,
"grad_norm": 0.4516779428626118,
"learning_rate": 8.305867525736474e-05,
"loss": 1.0547,
"step": 154
},
{
"epoch": 0.29,
"grad_norm": 0.4444842072606281,
"learning_rate": 8.282743291381481e-05,
"loss": 1.0703,
"step": 155
},
{
"epoch": 0.3,
"grad_norm": 0.44891341810235713,
"learning_rate": 8.259494979393563e-05,
"loss": 1.0469,
"step": 156
},
{
"epoch": 0.3,
"grad_norm": 0.4403106392279916,
"learning_rate": 8.236123468487648e-05,
"loss": 1.0703,
"step": 157
},
{
"epoch": 0.3,
"grad_norm": 0.4558686783543436,
"learning_rate": 8.212629642035199e-05,
"loss": 1.0547,
"step": 158
},
{
"epoch": 0.3,
"grad_norm": 0.47853095524964395,
"learning_rate": 8.189014388030833e-05,
"loss": 1.0859,
"step": 159
},
{
"epoch": 0.3,
"grad_norm": 0.454113313074197,
"learning_rate": 8.165278599058761e-05,
"loss": 1.0547,
"step": 160
},
{
"epoch": 0.31,
"grad_norm": 0.4406670018539545,
"learning_rate": 8.141423172259038e-05,
"loss": 1.0859,
"step": 161
},
{
"epoch": 0.31,
"grad_norm": 0.434252392773616,
"learning_rate": 8.117449009293668e-05,
"loss": 1.0547,
"step": 162
},
{
"epoch": 0.31,
"grad_norm": 0.44488081257640494,
"learning_rate": 8.093357016312517e-05,
"loss": 1.0703,
"step": 163
},
{
"epoch": 0.31,
"grad_norm": 0.4291046723882578,
"learning_rate": 8.069148103919064e-05,
"loss": 1.0703,
"step": 164
},
{
"epoch": 0.31,
"grad_norm": 0.4701461632642551,
"learning_rate": 8.044823187135984e-05,
"loss": 1.0703,
"step": 165
},
{
"epoch": 0.31,
"grad_norm": 0.41504338561910276,
"learning_rate": 8.020383185370559e-05,
"loss": 1.0469,
"step": 166
},
{
"epoch": 0.32,
"grad_norm": 0.43178723119245843,
"learning_rate": 7.995829022379936e-05,
"loss": 1.0703,
"step": 167
},
{
"epoch": 0.32,
"grad_norm": 0.4204377336860703,
"learning_rate": 7.9711616262362e-05,
"loss": 1.0547,
"step": 168
},
{
"epoch": 0.32,
"grad_norm": 0.4407346163963636,
"learning_rate": 7.94638192929131e-05,
"loss": 1.0703,
"step": 169
},
{
"epoch": 0.32,
"grad_norm": 0.4264303388977427,
"learning_rate": 7.921490868141843e-05,
"loss": 1.0391,
"step": 170
},
{
"epoch": 0.32,
"grad_norm": 0.40640791387915454,
"learning_rate": 7.896489383593606e-05,
"loss": 1.0,
"step": 171
},
{
"epoch": 0.33,
"grad_norm": 0.43144011890160755,
"learning_rate": 7.871378420626072e-05,
"loss": 1.0547,
"step": 172
},
{
"epoch": 0.33,
"grad_norm": 0.4744375892527014,
"learning_rate": 7.84615892835666e-05,
"loss": 1.0391,
"step": 173
},
{
"epoch": 0.33,
"grad_norm": 0.4414917687031027,
"learning_rate": 7.820831860004867e-05,
"loss": 1.0391,
"step": 174
},
{
"epoch": 0.33,
"grad_norm": 0.43753081136596556,
"learning_rate": 7.795398172856233e-05,
"loss": 1.0391,
"step": 175
},
{
"epoch": 0.33,
"grad_norm": 0.44688090549658416,
"learning_rate": 7.769858828226164e-05,
"loss": 1.0859,
"step": 176
},
{
"epoch": 0.34,
"grad_norm": 0.4324472242080182,
"learning_rate": 7.744214791423596e-05,
"loss": 1.0781,
"step": 177
},
{
"epoch": 0.34,
"grad_norm": 0.39471909710720243,
"learning_rate": 7.718467031714506e-05,
"loss": 1.0938,
"step": 178
},
{
"epoch": 0.34,
"grad_norm": 0.42273379977293096,
"learning_rate": 7.692616522285278e-05,
"loss": 1.0703,
"step": 179
},
{
"epoch": 0.34,
"grad_norm": 0.41049110470779143,
"learning_rate": 7.666664240205922e-05,
"loss": 1.0469,
"step": 180
},
{
"epoch": 0.34,
"grad_norm": 0.4309053261577172,
"learning_rate": 7.640611166393141e-05,
"loss": 1.0547,
"step": 181
},
{
"epoch": 0.35,
"grad_norm": 0.4297343270635459,
"learning_rate": 7.614458285573261e-05,
"loss": 1.0547,
"step": 182
},
{
"epoch": 0.35,
"grad_norm": 0.42742054343292596,
"learning_rate": 7.588206586245002e-05,
"loss": 1.0469,
"step": 183
},
{
"epoch": 0.35,
"grad_norm": 0.4332643732697325,
"learning_rate": 7.56185706064212e-05,
"loss": 1.0391,
"step": 184
},
{
"epoch": 0.35,
"grad_norm": 0.42811233809608473,
"learning_rate": 7.535410704695906e-05,
"loss": 1.0469,
"step": 185
},
{
"epoch": 0.35,
"grad_norm": 0.4489861675889494,
"learning_rate": 7.508868517997544e-05,
"loss": 1.0312,
"step": 186
},
{
"epoch": 0.35,
"grad_norm": 0.4205028088463761,
"learning_rate": 7.482231503760325e-05,
"loss": 1.0469,
"step": 187
},
{
"epoch": 0.36,
"grad_norm": 0.41000506415552723,
"learning_rate": 7.455500668781726e-05,
"loss": 1.0547,
"step": 188
},
{
"epoch": 0.36,
"grad_norm": 0.41183736439865304,
"learning_rate": 7.428677023405366e-05,
"loss": 1.0547,
"step": 189
},
{
"epoch": 0.36,
"grad_norm": 0.4246427305638071,
"learning_rate": 7.40176158148281e-05,
"loss": 1.0625,
"step": 190
},
{
"epoch": 0.36,
"grad_norm": 0.43275232447123674,
"learning_rate": 7.374755360335253e-05,
"loss": 1.0469,
"step": 191
},
{
"epoch": 0.36,
"grad_norm": 0.42692084681273973,
"learning_rate": 7.347659380715061e-05,
"loss": 1.0234,
"step": 192
},
{
"epoch": 0.37,
"grad_norm": 0.4311244552583607,
"learning_rate": 7.320474666767201e-05,
"loss": 1.0234,
"step": 193
},
{
"epoch": 0.37,
"grad_norm": 0.42719595530993326,
"learning_rate": 7.293202245990526e-05,
"loss": 1.0469,
"step": 194
},
{
"epoch": 0.37,
"grad_norm": 0.4174615425446307,
"learning_rate": 7.265843149198931e-05,
"loss": 1.0547,
"step": 195
},
{
"epoch": 0.37,
"grad_norm": 0.42222785480422026,
"learning_rate": 7.238398410482408e-05,
"loss": 1.0547,
"step": 196
},
{
"epoch": 0.37,
"grad_norm": 0.4157306068460529,
"learning_rate": 7.210869067167942e-05,
"loss": 1.0234,
"step": 197
},
{
"epoch": 0.38,
"grad_norm": 0.4241319467586246,
"learning_rate": 7.18325615978032e-05,
"loss": 1.0625,
"step": 198
},
{
"epoch": 0.38,
"grad_norm": 0.4087921332041852,
"learning_rate": 7.155560732002791e-05,
"loss": 1.0391,
"step": 199
},
{
"epoch": 0.38,
"grad_norm": 0.426858061802629,
"learning_rate": 7.127783830637625e-05,
"loss": 1.0469,
"step": 200
},
{
"epoch": 0.38,
"grad_norm": 0.41722790561085193,
"learning_rate": 7.099926505566537e-05,
"loss": 1.0469,
"step": 201
},
{
"epoch": 0.38,
"grad_norm": 0.4199601470491497,
"learning_rate": 7.071989809711019e-05,
"loss": 1.0547,
"step": 202
},
{
"epoch": 0.39,
"grad_norm": 0.44231292212761336,
"learning_rate": 7.043974798992532e-05,
"loss": 1.0547,
"step": 203
},
{
"epoch": 0.39,
"grad_norm": 0.40964479400554343,
"learning_rate": 7.015882532292598e-05,
"loss": 1.0547,
"step": 204
},
{
"epoch": 0.39,
"grad_norm": 0.41103344664490243,
"learning_rate": 6.98771407141278e-05,
"loss": 1.0391,
"step": 205
},
{
"epoch": 0.39,
"grad_norm": 0.418536368662017,
"learning_rate": 6.959470481034547e-05,
"loss": 1.0391,
"step": 206
},
{
"epoch": 0.39,
"grad_norm": 0.43983351138714233,
"learning_rate": 6.931152828679033e-05,
"loss": 1.0312,
"step": 207
},
{
"epoch": 0.39,
"grad_norm": 0.4167726489287608,
"learning_rate": 6.902762184666687e-05,
"loss": 1.0234,
"step": 208
},
{
"epoch": 0.4,
"grad_norm": 0.42759288997893236,
"learning_rate": 6.874299622076816e-05,
"loss": 1.0625,
"step": 209
},
{
"epoch": 0.4,
"grad_norm": 0.4163882383830232,
"learning_rate": 6.845766216707037e-05,
"loss": 1.0469,
"step": 210
},
{
"epoch": 0.4,
"grad_norm": 0.40983190994051244,
"learning_rate": 6.817163047032598e-05,
"loss": 1.0234,
"step": 211
},
{
"epoch": 0.4,
"grad_norm": 0.40059171202862986,
"learning_rate": 6.78849119416563e-05,
"loss": 1.0156,
"step": 212
},
{
"epoch": 0.4,
"grad_norm": 0.4187064398891471,
"learning_rate": 6.759751741814271e-05,
"loss": 1.0469,
"step": 213
},
{
"epoch": 0.41,
"grad_norm": 0.42319499896716717,
"learning_rate": 6.730945776241722e-05,
"loss": 1.0234,
"step": 214
},
{
"epoch": 0.41,
"grad_norm": 0.43680930005688956,
"learning_rate": 6.702074386225175e-05,
"loss": 1.0469,
"step": 215
},
{
"epoch": 0.41,
"grad_norm": 0.4108033837354488,
"learning_rate": 6.67313866301466e-05,
"loss": 1.0469,
"step": 216
},
{
"epoch": 0.41,
"grad_norm": 0.40903728151823127,
"learning_rate": 6.644139700291817e-05,
"loss": 1.0469,
"step": 217
},
{
"epoch": 0.41,
"grad_norm": 0.40715077992299514,
"learning_rate": 6.615078594128531e-05,
"loss": 1.0703,
"step": 218
},
{
"epoch": 0.42,
"grad_norm": 0.41322957469383936,
"learning_rate": 6.585956442945532e-05,
"loss": 1.0234,
"step": 219
},
{
"epoch": 0.42,
"grad_norm": 0.421602554588443,
"learning_rate": 6.556774347470855e-05,
"loss": 1.0625,
"step": 220
},
{
"epoch": 0.42,
"grad_norm": 0.4246703826676253,
"learning_rate": 6.52753341069825e-05,
"loss": 1.0391,
"step": 221
},
{
"epoch": 0.42,
"grad_norm": 0.4277032038662427,
"learning_rate": 6.498234737845488e-05,
"loss": 1.0234,
"step": 222
},
{
"epoch": 0.42,
"grad_norm": 0.40733604211970925,
"learning_rate": 6.468879436312584e-05,
"loss": 1.0547,
"step": 223
},
{
"epoch": 0.43,
"grad_norm": 0.41272813606786746,
"learning_rate": 6.439468615639946e-05,
"loss": 1.0625,
"step": 224
},
{
"epoch": 0.43,
"grad_norm": 0.40178981821143017,
"learning_rate": 6.410003387466433e-05,
"loss": 1.0234,
"step": 225
},
{
"epoch": 0.43,
"grad_norm": 0.4153854554817549,
"learning_rate": 6.380484865487347e-05,
"loss": 1.0391,
"step": 226
},
{
"epoch": 0.43,
"grad_norm": 0.40755934293498974,
"learning_rate": 6.35091416541232e-05,
"loss": 1.0391,
"step": 227
},
{
"epoch": 0.43,
"grad_norm": 0.41967206872870166,
"learning_rate": 6.321292404923167e-05,
"loss": 1.0703,
"step": 228
},
{
"epoch": 0.43,
"grad_norm": 0.4017640092647761,
"learning_rate": 6.29162070363163e-05,
"loss": 1.0625,
"step": 229
},
{
"epoch": 0.44,
"grad_norm": 0.39890051372690344,
"learning_rate": 6.261900183037052e-05,
"loss": 1.0312,
"step": 230
},
{
"epoch": 0.44,
"grad_norm": 0.4036725539891162,
"learning_rate": 6.232131966484006e-05,
"loss": 1.0312,
"step": 231
},
{
"epoch": 0.44,
"grad_norm": 0.4122985901559554,
"learning_rate": 6.202317179119817e-05,
"loss": 1.0156,
"step": 232
},
{
"epoch": 0.44,
"grad_norm": 0.4113133842869648,
"learning_rate": 6.172456947852049e-05,
"loss": 1.0469,
"step": 233
},
{
"epoch": 0.44,
"grad_norm": 0.4028322239410973,
"learning_rate": 6.142552401305906e-05,
"loss": 1.0469,
"step": 234
},
{
"epoch": 0.45,
"grad_norm": 0.4077613734483574,
"learning_rate": 6.112604669781572e-05,
"loss": 1.0391,
"step": 235
},
{
"epoch": 0.45,
"grad_norm": 0.40208253242419867,
"learning_rate": 6.0826148852114936e-05,
"loss": 1.0469,
"step": 236
},
{
"epoch": 0.45,
"grad_norm": 0.39795754759528085,
"learning_rate": 6.052584181117589e-05,
"loss": 1.0469,
"step": 237
},
{
"epoch": 0.45,
"grad_norm": 0.40763706027293223,
"learning_rate": 6.022513692568412e-05,
"loss": 1.0625,
"step": 238
},
{
"epoch": 0.45,
"grad_norm": 0.4296039465981587,
"learning_rate": 5.9924045561362474e-05,
"loss": 1.0391,
"step": 239
},
{
"epoch": 0.46,
"grad_norm": 0.4070047169709585,
"learning_rate": 5.96225790985415e-05,
"loss": 1.0547,
"step": 240
},
{
"epoch": 0.46,
"grad_norm": 0.41013474576772757,
"learning_rate": 5.9320748931729344e-05,
"loss": 1.0156,
"step": 241
},
{
"epoch": 0.46,
"grad_norm": 0.40322499716017,
"learning_rate": 5.9018566469180994e-05,
"loss": 1.0391,
"step": 242
},
{
"epoch": 0.46,
"grad_norm": 0.3898763992841581,
"learning_rate": 5.87160431324672e-05,
"loss": 1.0156,
"step": 243
},
{
"epoch": 0.46,
"grad_norm": 0.42982455700255084,
"learning_rate": 5.841319035604267e-05,
"loss": 1.0625,
"step": 244
},
{
"epoch": 0.46,
"grad_norm": 0.4040478163311833,
"learning_rate": 5.8110019586813946e-05,
"loss": 0.9961,
"step": 245
},
{
"epoch": 0.47,
"grad_norm": 0.40273222517295776,
"learning_rate": 5.780654228370669e-05,
"loss": 1.0234,
"step": 246
},
{
"epoch": 0.47,
"grad_norm": 0.40974932826857524,
"learning_rate": 5.7502769917232635e-05,
"loss": 1.0156,
"step": 247
},
{
"epoch": 0.47,
"grad_norm": 0.39928761514091116,
"learning_rate": 5.7198713969056026e-05,
"loss": 1.0234,
"step": 248
},
{
"epoch": 0.47,
"grad_norm": 0.41874763062357806,
"learning_rate": 5.689438593155956e-05,
"loss": 1.0312,
"step": 249
},
{
"epoch": 0.47,
"grad_norm": 0.4493892592268143,
"learning_rate": 5.658979730741014e-05,
"loss": 1.0547,
"step": 250
},
{
"epoch": 0.48,
"grad_norm": 0.40018316116952474,
"learning_rate": 5.6284959609124e-05,
"loss": 1.0469,
"step": 251
},
{
"epoch": 0.48,
"grad_norm": 0.43858332707717207,
"learning_rate": 5.597988435863166e-05,
"loss": 1.0312,
"step": 252
},
{
"epoch": 0.48,
"grad_norm": 0.40836711063525477,
"learning_rate": 5.567458308684232e-05,
"loss": 1.0547,
"step": 253
},
{
"epoch": 0.48,
"grad_norm": 0.42292844222040155,
"learning_rate": 5.536906733320816e-05,
"loss": 1.0156,
"step": 254
},
{
"epoch": 0.48,
"grad_norm": 0.41490508684177924,
"learning_rate": 5.506334864528808e-05,
"loss": 1.0234,
"step": 255
},
{
"epoch": 0.49,
"grad_norm": 0.4058447957006835,
"learning_rate": 5.475743857831127e-05,
"loss": 1.0156,
"step": 256
},
{
"epoch": 0.49,
"grad_norm": 0.4254621460881678,
"learning_rate": 5.445134869474049e-05,
"loss": 1.0391,
"step": 257
},
{
"epoch": 0.49,
"grad_norm": 0.4075861687227603,
"learning_rate": 5.414509056383498e-05,
"loss": 1.0078,
"step": 258
},
{
"epoch": 0.49,
"grad_norm": 0.40610391261853535,
"learning_rate": 5.3838675761213244e-05,
"loss": 1.0547,
"step": 259
},
{
"epoch": 0.49,
"grad_norm": 0.38922815015134116,
"learning_rate": 5.3532115868415464e-05,
"loss": 1.0078,
"step": 260
},
{
"epoch": 0.5,
"grad_norm": 0.4046852521792458,
"learning_rate": 5.3225422472465824e-05,
"loss": 1.0078,
"step": 261
},
{
"epoch": 0.5,
"grad_norm": 0.40728479784786964,
"learning_rate": 5.29186071654345e-05,
"loss": 0.9648,
"step": 262
},
{
"epoch": 0.5,
"grad_norm": 0.41255158343952975,
"learning_rate": 5.261168154399952e-05,
"loss": 1.0234,
"step": 263
},
{
"epoch": 0.5,
"grad_norm": 0.4028183441362248,
"learning_rate": 5.23046572090085e-05,
"loss": 1.0078,
"step": 264
},
{
"epoch": 0.5,
"grad_norm": 0.40792031816309837,
"learning_rate": 5.199754576504006e-05,
"loss": 1.0078,
"step": 265
},
{
"epoch": 0.5,
"grad_norm": 0.4089443562798811,
"learning_rate": 5.169035881996533e-05,
"loss": 1.0312,
"step": 266
},
{
"epoch": 0.51,
"grad_norm": 0.40684122314346166,
"learning_rate": 5.138310798450912e-05,
"loss": 1.0312,
"step": 267
},
{
"epoch": 0.51,
"grad_norm": 0.40867014012821695,
"learning_rate": 5.1075804871811115e-05,
"loss": 1.0312,
"step": 268
},
{
"epoch": 0.51,
"grad_norm": 0.40061062002695064,
"learning_rate": 5.076846109698693e-05,
"loss": 1.0547,
"step": 269
},
{
"epoch": 0.51,
"grad_norm": 0.423175226061232,
"learning_rate": 5.046108827668903e-05,
"loss": 1.0469,
"step": 270
},
{
"epoch": 0.51,
"grad_norm": 0.3898202650786047,
"learning_rate": 5.01536980286678e-05,
"loss": 1.0156,
"step": 271
},
{
"epoch": 0.52,
"grad_norm": 0.411610879755615,
"learning_rate": 4.9846301971332234e-05,
"loss": 0.9883,
"step": 272
},
{
"epoch": 0.52,
"grad_norm": 0.39055415860483883,
"learning_rate": 4.9538911723310976e-05,
"loss": 1.0156,
"step": 273
},
{
"epoch": 0.52,
"grad_norm": 0.4259500716874186,
"learning_rate": 4.92315389030131e-05,
"loss": 1.0391,
"step": 274
},
{
"epoch": 0.52,
"grad_norm": 0.39261415846035824,
"learning_rate": 4.892419512818889e-05,
"loss": 1.0312,
"step": 275
},
{
"epoch": 0.52,
"grad_norm": 0.4217477527558212,
"learning_rate": 4.86168920154909e-05,
"loss": 0.9922,
"step": 276
},
{
"epoch": 0.53,
"grad_norm": 0.38700415086180145,
"learning_rate": 4.830964118003468e-05,
"loss": 1.0,
"step": 277
},
{
"epoch": 0.53,
"grad_norm": 0.40332274074495494,
"learning_rate": 4.800245423495997e-05,
"loss": 1.0,
"step": 278
},
{
"epoch": 0.53,
"grad_norm": 0.41396865993704085,
"learning_rate": 4.769534279099152e-05,
"loss": 1.0078,
"step": 279
},
{
"epoch": 0.53,
"grad_norm": 0.4041936351639291,
"learning_rate": 4.7388318456000496e-05,
"loss": 1.0078,
"step": 280
},
{
"epoch": 0.53,
"grad_norm": 0.4270421784351172,
"learning_rate": 4.708139283456551e-05,
"loss": 1.0625,
"step": 281
},
{
"epoch": 0.54,
"grad_norm": 0.4028939120443396,
"learning_rate": 4.6774577527534195e-05,
"loss": 1.0234,
"step": 282
},
{
"epoch": 0.54,
"grad_norm": 0.40455125854222046,
"learning_rate": 4.646788413158455e-05,
"loss": 1.0469,
"step": 283
},
{
"epoch": 0.54,
"grad_norm": 0.4058895633541385,
"learning_rate": 4.616132423878679e-05,
"loss": 1.0391,
"step": 284
},
{
"epoch": 0.54,
"grad_norm": 0.3942526283657809,
"learning_rate": 4.585490943616504e-05,
"loss": 1.0156,
"step": 285
},
{
"epoch": 0.54,
"grad_norm": 0.40351277549494935,
"learning_rate": 4.554865130525953e-05,
"loss": 1.0156,
"step": 286
},
{
"epoch": 0.54,
"grad_norm": 0.39184809982008023,
"learning_rate": 4.524256142168874e-05,
"loss": 1.0156,
"step": 287
},
{
"epoch": 0.55,
"grad_norm": 0.4006543568884177,
"learning_rate": 4.4936651354711946e-05,
"loss": 0.9727,
"step": 288
},
{
"epoch": 0.55,
"grad_norm": 0.4256220682218964,
"learning_rate": 4.463093266679185e-05,
"loss": 1.0156,
"step": 289
},
{
"epoch": 0.55,
"grad_norm": 0.3959597237885138,
"learning_rate": 4.4325416913157704e-05,
"loss": 1.0078,
"step": 290
},
{
"epoch": 0.55,
"grad_norm": 0.4191614983510994,
"learning_rate": 4.402011564136836e-05,
"loss": 1.0,
"step": 291
},
{
"epoch": 0.55,
"grad_norm": 0.41102487050201564,
"learning_rate": 4.371504039087602e-05,
"loss": 1.0156,
"step": 292
},
{
"epoch": 0.56,
"grad_norm": 0.43350039494098586,
"learning_rate": 4.341020269258987e-05,
"loss": 1.0078,
"step": 293
},
{
"epoch": 0.56,
"grad_norm": 0.3956163702456906,
"learning_rate": 4.310561406844045e-05,
"loss": 1.0078,
"step": 294
},
{
"epoch": 0.56,
"grad_norm": 0.40529243964979217,
"learning_rate": 4.2801286030943985e-05,
"loss": 1.0,
"step": 295
},
{
"epoch": 0.56,
"grad_norm": 0.4184379424977412,
"learning_rate": 4.249723008276737e-05,
"loss": 1.0078,
"step": 296
},
{
"epoch": 0.56,
"grad_norm": 0.3971028781090587,
"learning_rate": 4.219345771629333e-05,
"loss": 1.0156,
"step": 297
},
{
"epoch": 0.57,
"grad_norm": 0.4124935318732211,
"learning_rate": 4.188998041318608e-05,
"loss": 1.0078,
"step": 298
},
{
"epoch": 0.57,
"grad_norm": 0.38790648111231085,
"learning_rate": 4.1586809643957345e-05,
"loss": 1.0,
"step": 299
},
{
"epoch": 0.57,
"grad_norm": 0.40687278861829684,
"learning_rate": 4.128395686753282e-05,
"loss": 1.0312,
"step": 300
},
{
"epoch": 0.57,
"grad_norm": 0.3842187210077263,
"learning_rate": 4.098143353081902e-05,
"loss": 1.0078,
"step": 301
},
{
"epoch": 0.57,
"grad_norm": 0.4023673478113215,
"learning_rate": 4.067925106827068e-05,
"loss": 1.0156,
"step": 302
},
{
"epoch": 0.57,
"grad_norm": 0.40540929917797186,
"learning_rate": 4.0377420901458506e-05,
"loss": 1.0469,
"step": 303
},
{
"epoch": 0.58,
"grad_norm": 0.3923349737804049,
"learning_rate": 4.0075954438637545e-05,
"loss": 1.0,
"step": 304
},
{
"epoch": 0.58,
"grad_norm": 0.41558812455430966,
"learning_rate": 3.977486307431589e-05,
"loss": 1.0078,
"step": 305
},
{
"epoch": 0.58,
"grad_norm": 0.40891549560712703,
"learning_rate": 3.947415818882414e-05,
"loss": 1.0391,
"step": 306
},
{
"epoch": 0.58,
"grad_norm": 0.38609997258343076,
"learning_rate": 3.9173851147885075e-05,
"loss": 1.0312,
"step": 307
},
{
"epoch": 0.58,
"grad_norm": 0.3906811392128105,
"learning_rate": 3.887395330218429e-05,
"loss": 1.0156,
"step": 308
},
{
"epoch": 0.59,
"grad_norm": 0.4108545484386376,
"learning_rate": 3.857447598694094e-05,
"loss": 0.9922,
"step": 309
},
{
"epoch": 0.59,
"grad_norm": 0.40531422168101394,
"learning_rate": 3.827543052147952e-05,
"loss": 1.0078,
"step": 310
},
{
"epoch": 0.59,
"grad_norm": 0.3991587404518294,
"learning_rate": 3.797682820880184e-05,
"loss": 0.9961,
"step": 311
},
{
"epoch": 0.59,
"grad_norm": 0.41113645678509786,
"learning_rate": 3.7678680335159954e-05,
"loss": 1.0156,
"step": 312
},
{
"epoch": 0.59,
"grad_norm": 0.4038074947234651,
"learning_rate": 3.7380998169629476e-05,
"loss": 1.0312,
"step": 313
},
{
"epoch": 0.6,
"grad_norm": 0.3853125109344581,
"learning_rate": 3.708379296368372e-05,
"loss": 1.0234,
"step": 314
},
{
"epoch": 0.6,
"grad_norm": 0.4104650046809597,
"learning_rate": 3.678707595076834e-05,
"loss": 0.9922,
"step": 315
},
{
"epoch": 0.6,
"grad_norm": 0.39375524771920445,
"learning_rate": 3.649085834587683e-05,
"loss": 1.0156,
"step": 316
},
{
"epoch": 0.6,
"grad_norm": 0.40077662746048126,
"learning_rate": 3.619515134512656e-05,
"loss": 1.0156,
"step": 317
},
{
"epoch": 0.6,
"grad_norm": 0.38214253590604597,
"learning_rate": 3.589996612533568e-05,
"loss": 1.0156,
"step": 318
},
{
"epoch": 0.61,
"grad_norm": 0.4009148055080218,
"learning_rate": 3.560531384360055e-05,
"loss": 1.0078,
"step": 319
},
{
"epoch": 0.61,
"grad_norm": 0.41232895347465953,
"learning_rate": 3.531120563687419e-05,
"loss": 1.0469,
"step": 320
},
{
"epoch": 0.61,
"grad_norm": 0.38911553214208094,
"learning_rate": 3.501765262154513e-05,
"loss": 1.0078,
"step": 321
},
{
"epoch": 0.61,
"grad_norm": 0.4093791732496366,
"learning_rate": 3.472466589301751e-05,
"loss": 0.9766,
"step": 322
},
{
"epoch": 0.61,
"grad_norm": 0.3850449300702058,
"learning_rate": 3.4432256525291465e-05,
"loss": 0.9883,
"step": 323
},
{
"epoch": 0.61,
"grad_norm": 0.3889336160040775,
"learning_rate": 3.41404355705447e-05,
"loss": 0.9766,
"step": 324
},
{
"epoch": 0.62,
"grad_norm": 0.4084726866306118,
"learning_rate": 3.3849214058714704e-05,
"loss": 1.0156,
"step": 325
},
{
"epoch": 0.62,
"grad_norm": 0.4014871038120363,
"learning_rate": 3.3558602997081866e-05,
"loss": 1.0156,
"step": 326
},
{
"epoch": 0.62,
"grad_norm": 0.4266625922630768,
"learning_rate": 3.326861336985341e-05,
"loss": 1.0391,
"step": 327
},
{
"epoch": 0.62,
"grad_norm": 0.396771175025528,
"learning_rate": 3.297925613774828e-05,
"loss": 1.0078,
"step": 328
},
{
"epoch": 0.62,
"grad_norm": 0.4417896085826512,
"learning_rate": 3.269054223758279e-05,
"loss": 0.9922,
"step": 329
},
{
"epoch": 0.63,
"grad_norm": 0.3809941597462593,
"learning_rate": 3.240248258185731e-05,
"loss": 0.9961,
"step": 330
},
{
"epoch": 0.63,
"grad_norm": 0.4074336906480307,
"learning_rate": 3.2115088058343725e-05,
"loss": 0.9922,
"step": 331
},
{
"epoch": 0.63,
"grad_norm": 0.39553570295641505,
"learning_rate": 3.1828369529674046e-05,
"loss": 1.0,
"step": 332
},
{
"epoch": 0.63,
"grad_norm": 0.397983999068491,
"learning_rate": 3.1542337832929644e-05,
"loss": 1.0078,
"step": 333
},
{
"epoch": 0.63,
"grad_norm": 0.41099820792481956,
"learning_rate": 3.125700377923186e-05,
"loss": 1.0469,
"step": 334
},
{
"epoch": 0.64,
"grad_norm": 0.40386749243880216,
"learning_rate": 3.0972378153333145e-05,
"loss": 0.9961,
"step": 335
},
{
"epoch": 0.64,
"grad_norm": 0.41080474099987024,
"learning_rate": 3.068847171320969e-05,
"loss": 1.0156,
"step": 336
},
{
"epoch": 0.64,
"grad_norm": 0.403700576708479,
"learning_rate": 3.0405295189654537e-05,
"loss": 1.0234,
"step": 337
},
{
"epoch": 0.64,
"grad_norm": 0.40482152116129083,
"learning_rate": 3.0122859285872214e-05,
"loss": 1.0156,
"step": 338
},
{
"epoch": 0.64,
"grad_norm": 0.40683717747057807,
"learning_rate": 2.9841174677074035e-05,
"loss": 0.9805,
"step": 339
},
{
"epoch": 0.65,
"grad_norm": 0.4153367260281328,
"learning_rate": 2.9560252010074706e-05,
"loss": 1.0078,
"step": 340
},
{
"epoch": 0.65,
"grad_norm": 0.3911854931588867,
"learning_rate": 2.9280101902889824e-05,
"loss": 1.0312,
"step": 341
},
{
"epoch": 0.65,
"grad_norm": 0.4042030431024755,
"learning_rate": 2.900073494433464e-05,
"loss": 1.0156,
"step": 342
},
{
"epoch": 0.65,
"grad_norm": 0.40059455864229415,
"learning_rate": 2.8722161693623772e-05,
"loss": 0.9922,
"step": 343
},
{
"epoch": 0.65,
"grad_norm": 0.4074896896435664,
"learning_rate": 2.8444392679972103e-05,
"loss": 0.9883,
"step": 344
},
{
"epoch": 0.65,
"grad_norm": 0.4095882276226098,
"learning_rate": 2.8167438402196805e-05,
"loss": 1.0312,
"step": 345
},
{
"epoch": 0.66,
"grad_norm": 0.4055972430711286,
"learning_rate": 2.7891309328320592e-05,
"loss": 1.0078,
"step": 346
},
{
"epoch": 0.66,
"grad_norm": 0.4147093983828081,
"learning_rate": 2.761601589517595e-05,
"loss": 0.9727,
"step": 347
},
{
"epoch": 0.66,
"grad_norm": 0.3824364918889605,
"learning_rate": 2.7341568508010705e-05,
"loss": 1.0234,
"step": 348
},
{
"epoch": 0.66,
"grad_norm": 0.4173989668789549,
"learning_rate": 2.706797754009476e-05,
"loss": 0.9961,
"step": 349
},
{
"epoch": 0.66,
"grad_norm": 0.41599241388744446,
"learning_rate": 2.6795253332327995e-05,
"loss": 0.9844,
"step": 350
},
{
"epoch": 0.67,
"grad_norm": 0.40510951524103034,
"learning_rate": 2.6523406192849386e-05,
"loss": 0.9688,
"step": 351
},
{
"epoch": 0.67,
"grad_norm": 0.38849419233186266,
"learning_rate": 2.62524463966475e-05,
"loss": 1.0312,
"step": 352
},
{
"epoch": 0.67,
"grad_norm": 0.38718946711482044,
"learning_rate": 2.5982384185171906e-05,
"loss": 1.0078,
"step": 353
},
{
"epoch": 0.67,
"grad_norm": 0.3842929434765999,
"learning_rate": 2.5713229765946357e-05,
"loss": 1.0156,
"step": 354
},
{
"epoch": 0.67,
"grad_norm": 0.39359245264071546,
"learning_rate": 2.544499331218274e-05,
"loss": 0.9883,
"step": 355
},
{
"epoch": 0.68,
"grad_norm": 0.4033582052703098,
"learning_rate": 2.5177684962396765e-05,
"loss": 1.0,
"step": 356
},
{
"epoch": 0.68,
"grad_norm": 0.38879954929363575,
"learning_rate": 2.4911314820024568e-05,
"loss": 1.0078,
"step": 357
},
{
"epoch": 0.68,
"grad_norm": 0.40327329986367616,
"learning_rate": 2.4645892953040962e-05,
"loss": 1.0078,
"step": 358
},
{
"epoch": 0.68,
"grad_norm": 0.3920586735187762,
"learning_rate": 2.438142939357882e-05,
"loss": 1.0,
"step": 359
},
{
"epoch": 0.68,
"grad_norm": 0.4143087212185901,
"learning_rate": 2.4117934137550003e-05,
"loss": 0.9961,
"step": 360
},
{
"epoch": 0.69,
"grad_norm": 0.3935944385117418,
"learning_rate": 2.38554171442674e-05,
"loss": 1.0156,
"step": 361
},
{
"epoch": 0.69,
"grad_norm": 0.4077637452431266,
"learning_rate": 2.3593888336068597e-05,
"loss": 0.9883,
"step": 362
},
{
"epoch": 0.69,
"grad_norm": 0.4025782626367008,
"learning_rate": 2.3333357597940793e-05,
"loss": 1.0078,
"step": 363
},
{
"epoch": 0.69,
"grad_norm": 0.4036874759716257,
"learning_rate": 2.3073834777147236e-05,
"loss": 0.9922,
"step": 364
},
{
"epoch": 0.69,
"grad_norm": 0.4001732077591455,
"learning_rate": 2.281532968285494e-05,
"loss": 1.0156,
"step": 365
},
{
"epoch": 0.69,
"grad_norm": 0.38090979037495276,
"learning_rate": 2.2557852085764053e-05,
"loss": 0.9766,
"step": 366
},
{
"epoch": 0.7,
"grad_norm": 0.40328341409897206,
"learning_rate": 2.230141171773836e-05,
"loss": 0.9922,
"step": 367
},
{
"epoch": 0.7,
"grad_norm": 0.3838644049185376,
"learning_rate": 2.204601827143769e-05,
"loss": 0.9844,
"step": 368
},
{
"epoch": 0.7,
"grad_norm": 0.38931148821481226,
"learning_rate": 2.179168139995134e-05,
"loss": 1.0156,
"step": 369
},
{
"epoch": 0.7,
"grad_norm": 0.4024461082037364,
"learning_rate": 2.1538410716433417e-05,
"loss": 0.9805,
"step": 370
},
{
"epoch": 0.7,
"grad_norm": 0.3736565961055131,
"learning_rate": 2.1286215793739302e-05,
"loss": 1.0078,
"step": 371
},
{
"epoch": 0.71,
"grad_norm": 0.3833954847678005,
"learning_rate": 2.103510616406396e-05,
"loss": 1.0156,
"step": 372
},
{
"epoch": 0.71,
"grad_norm": 0.38010761506947877,
"learning_rate": 2.0785091318581577e-05,
"loss": 0.9883,
"step": 373
},
{
"epoch": 0.71,
"grad_norm": 0.3883359267276046,
"learning_rate": 2.053618070708691e-05,
"loss": 1.0234,
"step": 374
},
{
"epoch": 0.71,
"grad_norm": 0.38727784403682314,
"learning_rate": 2.0288383737638006e-05,
"loss": 1.0,
"step": 375
},
{
"epoch": 0.71,
"grad_norm": 0.37785442157781823,
"learning_rate": 2.0041709776200664e-05,
"loss": 0.9766,
"step": 376
},
{
"epoch": 0.72,
"grad_norm": 0.374719617466221,
"learning_rate": 1.9796168146294412e-05,
"loss": 0.9727,
"step": 377
},
{
"epoch": 0.72,
"grad_norm": 0.3978681831649405,
"learning_rate": 1.9551768128640175e-05,
"loss": 1.0156,
"step": 378
},
{
"epoch": 0.72,
"grad_norm": 0.40863119356002636,
"learning_rate": 1.9308518960809353e-05,
"loss": 0.9805,
"step": 379
},
{
"epoch": 0.72,
"grad_norm": 0.37974978552322725,
"learning_rate": 1.9066429836874844e-05,
"loss": 1.0078,
"step": 380
},
{
"epoch": 0.72,
"grad_norm": 0.38613047300957787,
"learning_rate": 1.8825509907063327e-05,
"loss": 0.9688,
"step": 381
},
{
"epoch": 0.72,
"grad_norm": 0.39858125559813595,
"learning_rate": 1.8585768277409636e-05,
"loss": 1.0078,
"step": 382
},
{
"epoch": 0.73,
"grad_norm": 0.39902011519219927,
"learning_rate": 1.83472140094124e-05,
"loss": 0.9609,
"step": 383
},
{
"epoch": 0.73,
"grad_norm": 0.39852324308132375,
"learning_rate": 1.810985611969167e-05,
"loss": 0.9766,
"step": 384
},
{
"epoch": 0.73,
"grad_norm": 0.39740793737557956,
"learning_rate": 1.7873703579648034e-05,
"loss": 0.9961,
"step": 385
},
{
"epoch": 0.73,
"grad_norm": 0.38900589785267126,
"learning_rate": 1.763876531512354e-05,
"loss": 0.9688,
"step": 386
},
{
"epoch": 0.73,
"grad_norm": 0.386397499552252,
"learning_rate": 1.7405050206064373e-05,
"loss": 1.0078,
"step": 387
},
{
"epoch": 0.74,
"grad_norm": 0.39337570995784904,
"learning_rate": 1.7172567086185205e-05,
"loss": 0.9727,
"step": 388
},
{
"epoch": 0.74,
"grad_norm": 0.39083779363840615,
"learning_rate": 1.694132474263526e-05,
"loss": 0.9961,
"step": 389
},
{
"epoch": 0.74,
"grad_norm": 0.38939147028830934,
"learning_rate": 1.671133191566624e-05,
"loss": 0.9844,
"step": 390
},
{
"epoch": 0.74,
"grad_norm": 0.44238601157646656,
"learning_rate": 1.6482597298301916e-05,
"loss": 0.9805,
"step": 391
},
{
"epoch": 0.74,
"grad_norm": 0.39265218999260465,
"learning_rate": 1.625512953600966e-05,
"loss": 1.0,
"step": 392
},
{
"epoch": 0.75,
"grad_norm": 0.3835027930578862,
"learning_rate": 1.6028937226373536e-05,
"loss": 0.9766,
"step": 393
},
{
"epoch": 0.75,
"grad_norm": 0.4052201995250459,
"learning_rate": 1.5804028918769485e-05,
"loss": 1.0,
"step": 394
},
{
"epoch": 0.75,
"grad_norm": 0.3929827776726385,
"learning_rate": 1.5580413114042e-05,
"loss": 0.9766,
"step": 395
},
{
"epoch": 0.75,
"grad_norm": 0.37984173524249143,
"learning_rate": 1.535809826418304e-05,
"loss": 0.9922,
"step": 396
},
{
"epoch": 0.75,
"grad_norm": 0.39211944754121586,
"learning_rate": 1.5137092772012368e-05,
"loss": 0.9844,
"step": 397
},
{
"epoch": 0.76,
"grad_norm": 0.39835572557937476,
"learning_rate": 1.4917404990860112e-05,
"loss": 1.0156,
"step": 398
},
{
"epoch": 0.76,
"grad_norm": 0.3685838055250128,
"learning_rate": 1.469904322425092e-05,
"loss": 1.0078,
"step": 399
},
{
"epoch": 0.76,
"grad_norm": 0.3708790521540525,
"learning_rate": 1.4482015725590158e-05,
"loss": 0.9727,
"step": 400
},
{
"epoch": 0.76,
"grad_norm": 0.3796896559412745,
"learning_rate": 1.4266330697851954e-05,
"loss": 0.9961,
"step": 401
},
{
"epoch": 0.76,
"grad_norm": 0.374826650593594,
"learning_rate": 1.4051996293269182e-05,
"loss": 0.9766,
"step": 402
},
{
"epoch": 0.76,
"grad_norm": 0.38661887803441924,
"learning_rate": 1.3839020613025266e-05,
"loss": 1.0,
"step": 403
},
{
"epoch": 0.77,
"grad_norm": 0.3745066620120658,
"learning_rate": 1.3627411706948029e-05,
"loss": 0.9688,
"step": 404
},
{
"epoch": 0.77,
"grad_norm": 0.3772263032436855,
"learning_rate": 1.3417177573205398e-05,
"loss": 0.9922,
"step": 405
},
{
"epoch": 0.77,
"grad_norm": 0.37135625308638415,
"learning_rate": 1.3208326158003171e-05,
"loss": 0.9844,
"step": 406
},
{
"epoch": 0.77,
"grad_norm": 0.39016768277198083,
"learning_rate": 1.3000865355284563e-05,
"loss": 0.9922,
"step": 407
},
{
"epoch": 0.77,
"grad_norm": 0.3927111734870179,
"learning_rate": 1.2794803006431982e-05,
"loss": 0.9688,
"step": 408
},
{
"epoch": 0.78,
"grad_norm": 0.37854433770919677,
"learning_rate": 1.2590146899970446e-05,
"loss": 0.9805,
"step": 409
},
{
"epoch": 0.78,
"grad_norm": 0.39232004793616226,
"learning_rate": 1.2386904771273444e-05,
"loss": 0.9961,
"step": 410
},
{
"epoch": 0.78,
"grad_norm": 0.38535521167421993,
"learning_rate": 1.218508430227035e-05,
"loss": 0.9844,
"step": 411
},
{
"epoch": 0.78,
"grad_norm": 0.3686602860581547,
"learning_rate": 1.1984693121156226e-05,
"loss": 0.9922,
"step": 412
},
{
"epoch": 0.78,
"grad_norm": 0.3859440212089631,
"learning_rate": 1.1785738802103396e-05,
"loss": 1.0156,
"step": 413
},
{
"epoch": 0.79,
"grad_norm": 0.3921328346507493,
"learning_rate": 1.158822886497521e-05,
"loss": 0.9688,
"step": 414
},
{
"epoch": 0.79,
"grad_norm": 0.39742562204123394,
"learning_rate": 1.1392170775041788e-05,
"loss": 1.0078,
"step": 415
},
{
"epoch": 0.79,
"grad_norm": 0.3713395354970734,
"learning_rate": 1.1197571942697938e-05,
"loss": 0.9805,
"step": 416
},
{
"epoch": 0.79,
"grad_norm": 0.37345209868445495,
"learning_rate": 1.1004439723182942e-05,
"loss": 1.0,
"step": 417
},
{
"epoch": 0.79,
"grad_norm": 0.3856263873198924,
"learning_rate": 1.0812781416302642e-05,
"loss": 0.9648,
"step": 418
},
{
"epoch": 0.8,
"grad_norm": 0.37612362914326103,
"learning_rate": 1.0622604266153485e-05,
"loss": 1.0156,
"step": 419
},
{
"epoch": 0.8,
"grad_norm": 0.3735721290488117,
"learning_rate": 1.0433915460848764e-05,
"loss": 0.9531,
"step": 420
},
{
"epoch": 0.8,
"grad_norm": 0.3831137038951988,
"learning_rate": 1.0246722132246856e-05,
"loss": 0.9727,
"step": 421
},
{
"epoch": 0.8,
"grad_norm": 0.38425446590883783,
"learning_rate": 1.0061031355681766e-05,
"loss": 0.9805,
"step": 422
},
{
"epoch": 0.8,
"grad_norm": 0.37733541326906417,
"learning_rate": 9.876850149695555e-06,
"loss": 0.9727,
"step": 423
},
{
"epoch": 0.8,
"grad_norm": 0.3919247049760062,
"learning_rate": 9.694185475773216e-06,
"loss": 0.9844,
"step": 424
},
{
"epoch": 0.81,
"grad_norm": 0.37412167877987657,
"learning_rate": 9.513044238079427e-06,
"loss": 0.9844,
"step": 425
},
{
"epoch": 0.81,
"grad_norm": 0.3960239887267585,
"learning_rate": 9.333433283197702e-06,
"loss": 1.0078,
"step": 426
},
{
"epoch": 0.81,
"grad_norm": 0.36022823771296714,
"learning_rate": 9.15535939987151e-06,
"loss": 0.9648,
"step": 427
},
{
"epoch": 0.81,
"grad_norm": 0.3789422853471504,
"learning_rate": 8.978829318747745e-06,
"loss": 0.9844,
"step": 428
},
{
"epoch": 0.81,
"grad_norm": 0.39075327374116037,
"learning_rate": 8.803849712122292e-06,
"loss": 1.0156,
"step": 429
},
{
"epoch": 0.82,
"grad_norm": 0.389242893802255,
"learning_rate": 8.63042719368789e-06,
"loss": 0.9766,
"step": 430
},
{
"epoch": 0.82,
"grad_norm": 0.3779626313074645,
"learning_rate": 8.458568318284088e-06,
"loss": 0.9883,
"step": 431
},
{
"epoch": 0.82,
"grad_norm": 0.39319540149781285,
"learning_rate": 8.288279581649528e-06,
"loss": 1.0,
"step": 432
},
{
"epoch": 0.82,
"grad_norm": 0.3759206398973123,
"learning_rate": 8.11956742017641e-06,
"loss": 0.9766,
"step": 433
},
{
"epoch": 0.82,
"grad_norm": 0.3938599895612702,
"learning_rate": 7.952438210667268e-06,
"loss": 1.0156,
"step": 434
},
{
"epoch": 0.83,
"grad_norm": 0.3821358600474751,
"learning_rate": 7.786898270093846e-06,
"loss": 0.9766,
"step": 435
},
{
"epoch": 0.83,
"grad_norm": 0.3811416337365797,
"learning_rate": 7.6229538553584556e-06,
"loss": 0.9844,
"step": 436
},
{
"epoch": 0.83,
"grad_norm": 0.384284050269022,
"learning_rate": 7.460611163057346e-06,
"loss": 0.9727,
"step": 437
},
{
"epoch": 0.83,
"grad_norm": 0.3868115258369787,
"learning_rate": 7.299876329246652e-06,
"loss": 0.9805,
"step": 438
},
{
"epoch": 0.83,
"grad_norm": 0.37631545582941645,
"learning_rate": 7.140755429210316e-06,
"loss": 0.9766,
"step": 439
},
{
"epoch": 0.83,
"grad_norm": 0.3763933121794425,
"learning_rate": 6.983254477230588e-06,
"loss": 0.9805,
"step": 440
},
{
"epoch": 0.84,
"grad_norm": 0.38966862555598764,
"learning_rate": 6.827379426360614e-06,
"loss": 0.9414,
"step": 441
},
{
"epoch": 0.84,
"grad_norm": 0.38924826237998583,
"learning_rate": 6.673136168199467e-06,
"loss": 0.9766,
"step": 442
},
{
"epoch": 0.84,
"grad_norm": 0.3716162541474658,
"learning_rate": 6.52053053266945e-06,
"loss": 1.0078,
"step": 443
},
{
"epoch": 0.84,
"grad_norm": 0.40192334917868294,
"learning_rate": 6.369568287795791e-06,
"loss": 1.0,
"step": 444
},
{
"epoch": 0.84,
"grad_norm": 0.3827921806873429,
"learning_rate": 6.22025513948854e-06,
"loss": 0.9609,
"step": 445
},
{
"epoch": 0.85,
"grad_norm": 0.3651988094584562,
"learning_rate": 6.0725967313269736e-06,
"loss": 1.0234,
"step": 446
},
{
"epoch": 0.85,
"grad_norm": 0.3854817154179708,
"learning_rate": 5.926598644346259e-06,
"loss": 0.9883,
"step": 447
},
{
"epoch": 0.85,
"grad_norm": 0.4041053789118015,
"learning_rate": 5.782266396826536e-06,
"loss": 0.9922,
"step": 448
},
{
"epoch": 0.85,
"grad_norm": 0.3840835793609324,
"learning_rate": 5.639605444084273e-06,
"loss": 0.9922,
"step": 449
},
{
"epoch": 0.85,
"grad_norm": 0.3827506231880665,
"learning_rate": 5.498621178266167e-06,
"loss": 0.9844,
"step": 450
},
{
"epoch": 0.86,
"grad_norm": 0.37488009002085854,
"learning_rate": 5.3593189281452625e-06,
"loss": 0.9883,
"step": 451
},
{
"epoch": 0.86,
"grad_norm": 0.3644346732423304,
"learning_rate": 5.221703958919572e-06,
"loss": 1.0,
"step": 452
},
{
"epoch": 0.86,
"grad_norm": 0.3648839469610167,
"learning_rate": 5.085781472013051e-06,
"loss": 0.9844,
"step": 453
},
{
"epoch": 0.86,
"grad_norm": 0.3888452684975849,
"learning_rate": 4.951556604879048e-06,
"loss": 0.9961,
"step": 454
},
{
"epoch": 0.86,
"grad_norm": 0.38081986253747263,
"learning_rate": 4.819034430806046e-06,
"loss": 1.0234,
"step": 455
},
{
"epoch": 0.87,
"grad_norm": 0.37504903165101505,
"learning_rate": 4.688219958725981e-06,
"loss": 0.9922,
"step": 456
},
{
"epoch": 0.87,
"grad_norm": 0.3896425547477861,
"learning_rate": 4.559118133024853e-06,
"loss": 0.9688,
"step": 457
},
{
"epoch": 0.87,
"grad_norm": 0.3794758055441137,
"learning_rate": 4.431733833355933e-06,
"loss": 0.9766,
"step": 458
},
{
"epoch": 0.87,
"grad_norm": 0.39706131136751927,
"learning_rate": 4.3060718744552256e-06,
"loss": 1.0,
"step": 459
},
{
"epoch": 0.87,
"grad_norm": 0.3697319148486722,
"learning_rate": 4.1821370059595575e-06,
"loss": 0.9961,
"step": 460
},
{
"epoch": 0.87,
"grad_norm": 0.3732079712646108,
"learning_rate": 4.05993391222701e-06,
"loss": 0.9922,
"step": 461
},
{
"epoch": 0.88,
"grad_norm": 0.38872894699778315,
"learning_rate": 3.939467212159925e-06,
"loss": 0.957,
"step": 462
},
{
"epoch": 0.88,
"grad_norm": 0.39026050723200223,
"learning_rate": 3.820741459030253e-06,
"loss": 1.0,
"step": 463
},
{
"epoch": 0.88,
"grad_norm": 0.3816637074881011,
"learning_rate": 3.70376114030751e-06,
"loss": 0.9727,
"step": 464
},
{
"epoch": 0.88,
"grad_norm": 0.381226241519682,
"learning_rate": 3.5885306774891215e-06,
"loss": 0.957,
"step": 465
},
{
"epoch": 0.88,
"grad_norm": 0.3815235626105606,
"learning_rate": 3.475054425933344e-06,
"loss": 1.0078,
"step": 466
},
{
"epoch": 0.89,
"grad_norm": 0.37253784076771357,
"learning_rate": 3.3633366746946004e-06,
"loss": 0.9844,
"step": 467
},
{
"epoch": 0.89,
"grad_norm": 0.3919508192472245,
"learning_rate": 3.2533816463614253e-06,
"loss": 0.9805,
"step": 468
},
{
"epoch": 0.89,
"grad_norm": 0.3589524968140092,
"learning_rate": 3.145193496896809e-06,
"loss": 0.9805,
"step": 469
},
{
"epoch": 0.89,
"grad_norm": 0.37438948854975374,
"learning_rate": 3.0387763154811454e-06,
"loss": 0.9766,
"step": 470
},
{
"epoch": 0.89,
"grad_norm": 0.37638045292642797,
"learning_rate": 2.934134124357646e-06,
"loss": 0.9766,
"step": 471
},
{
"epoch": 0.9,
"grad_norm": 0.37548669011906294,
"learning_rate": 2.83127087868037e-06,
"loss": 0.9961,
"step": 472
},
{
"epoch": 0.9,
"grad_norm": 0.37099994562581057,
"learning_rate": 2.7301904663646516e-06,
"loss": 0.9805,
"step": 473
},
{
"epoch": 0.9,
"grad_norm": 0.3955783574675612,
"learning_rate": 2.630896707940228e-06,
"loss": 1.0,
"step": 474
},
{
"epoch": 0.9,
"grad_norm": 0.3781205817722125,
"learning_rate": 2.5333933564067492e-06,
"loss": 0.9648,
"step": 475
},
{
"epoch": 0.9,
"grad_norm": 0.3753919414015063,
"learning_rate": 2.4376840970920134e-06,
"loss": 0.9766,
"step": 476
},
{
"epoch": 0.91,
"grad_norm": 0.37073539113386406,
"learning_rate": 2.343772547512613e-06,
"loss": 0.9727,
"step": 477
},
{
"epoch": 0.91,
"grad_norm": 0.38467962659041166,
"learning_rate": 2.2516622572372414e-06,
"loss": 0.9961,
"step": 478
},
{
"epoch": 0.91,
"grad_norm": 0.37351142314581637,
"learning_rate": 2.1613567077524875e-06,
"loss": 0.9648,
"step": 479
},
{
"epoch": 0.91,
"grad_norm": 0.3827518716422808,
"learning_rate": 2.0728593123312935e-06,
"loss": 0.957,
"step": 480
},
{
"epoch": 0.91,
"grad_norm": 0.38424950524851154,
"learning_rate": 1.9861734159038968e-06,
"loss": 0.9805,
"step": 481
},
{
"epoch": 0.91,
"grad_norm": 0.3738580907918172,
"learning_rate": 1.9013022949314563e-06,
"loss": 1.0,
"step": 482
},
{
"epoch": 0.92,
"grad_norm": 0.35985698703033747,
"learning_rate": 1.8182491572821536e-06,
"loss": 0.9688,
"step": 483
},
{
"epoch": 0.92,
"grad_norm": 0.37376402159660854,
"learning_rate": 1.7370171421099978e-06,
"loss": 0.9805,
"step": 484
},
{
"epoch": 0.92,
"grad_norm": 0.3652435400198038,
"learning_rate": 1.6576093197361253e-06,
"loss": 1.0,
"step": 485
},
{
"epoch": 0.92,
"grad_norm": 0.3740292147578786,
"learning_rate": 1.5800286915328033e-06,
"loss": 0.9922,
"step": 486
},
{
"epoch": 0.92,
"grad_norm": 0.38020345497873537,
"learning_rate": 1.5042781898099434e-06,
"loss": 0.9883,
"step": 487
},
{
"epoch": 0.93,
"grad_norm": 0.38405525205639585,
"learning_rate": 1.430360677704301e-06,
"loss": 0.9844,
"step": 488
},
{
"epoch": 0.93,
"grad_norm": 0.37116971953078254,
"learning_rate": 1.3582789490712179e-06,
"loss": 0.9727,
"step": 489
},
{
"epoch": 0.93,
"grad_norm": 0.3675736740984735,
"learning_rate": 1.2880357283790777e-06,
"loss": 0.9961,
"step": 490
},
{
"epoch": 0.93,
"grad_norm": 0.3615376408573602,
"learning_rate": 1.2196336706062739e-06,
"loss": 0.9805,
"step": 491
},
{
"epoch": 0.93,
"grad_norm": 0.3749327617084023,
"learning_rate": 1.153075361140915e-06,
"loss": 1.0156,
"step": 492
},
{
"epoch": 0.94,
"grad_norm": 0.36973528586380827,
"learning_rate": 1.0883633156830553e-06,
"loss": 1.0234,
"step": 493
},
{
"epoch": 0.94,
"grad_norm": 0.36950719264405446,
"learning_rate": 1.0254999801496247e-06,
"loss": 0.9883,
"step": 494
},
{
"epoch": 0.94,
"grad_norm": 0.3811515872434199,
"learning_rate": 9.644877305819976e-07,
"loss": 0.9844,
"step": 495
},
{
"epoch": 0.94,
"grad_norm": 0.37251325878792274,
"learning_rate": 9.053288730561716e-07,
"loss": 1.0,
"step": 496
},
{
"epoch": 0.94,
"grad_norm": 0.3784945023163416,
"learning_rate": 8.480256435956124e-07,
"loss": 0.9883,
"step": 497
},
{
"epoch": 0.94,
"grad_norm": 0.3823913771851788,
"learning_rate": 7.925802080867128e-07,
"loss": 0.9922,
"step": 498
},
{
"epoch": 0.95,
"grad_norm": 0.3723209207003651,
"learning_rate": 7.389946621969679e-07,
"loss": 0.9766,
"step": 499
},
{
"epoch": 0.95,
"grad_norm": 0.3734561531299352,
"learning_rate": 6.872710312957497e-07,
"loss": 0.9883,
"step": 500
},
{
"epoch": 0.95,
"grad_norm": 0.3718975890363552,
"learning_rate": 6.374112703777301e-07,
"loss": 0.9727,
"step": 501
},
{
"epoch": 0.95,
"grad_norm": 0.3853867112825849,
"learning_rate": 5.894172639890394e-07,
"loss": 0.9727,
"step": 502
},
{
"epoch": 0.95,
"grad_norm": 0.3666171985176669,
"learning_rate": 5.432908261559733e-07,
"loss": 0.9727,
"step": 503
},
{
"epoch": 0.96,
"grad_norm": 0.37512968709593764,
"learning_rate": 4.990337003164924e-07,
"loss": 0.9688,
"step": 504
},
{
"epoch": 0.96,
"grad_norm": 0.38232480313500466,
"learning_rate": 4.5664755925426403e-07,
"loss": 0.9844,
"step": 505
},
{
"epoch": 0.96,
"grad_norm": 0.3700171570622694,
"learning_rate": 4.1613400503550114e-07,
"loss": 0.9766,
"step": 506
},
{
"epoch": 0.96,
"grad_norm": 0.3900755929317903,
"learning_rate": 3.7749456894834447e-07,
"loss": 0.9609,
"step": 507
},
{
"epoch": 0.96,
"grad_norm": 0.36995686804328504,
"learning_rate": 3.4073071144502533e-07,
"loss": 0.9688,
"step": 508
},
{
"epoch": 0.97,
"grad_norm": 0.36023064413895745,
"learning_rate": 3.058438220866544e-07,
"loss": 0.9883,
"step": 509
},
{
"epoch": 0.97,
"grad_norm": 0.37996338814367037,
"learning_rate": 2.72835219490708e-07,
"loss": 0.9805,
"step": 510
},
{
"epoch": 0.97,
"grad_norm": 0.36362699174525226,
"learning_rate": 2.4170615128117357e-07,
"loss": 0.9453,
"step": 511
},
{
"epoch": 0.97,
"grad_norm": 0.37920383685543685,
"learning_rate": 2.12457794041393e-07,
"loss": 1.0156,
"step": 512
},
{
"epoch": 0.97,
"grad_norm": 0.3802256086316899,
"learning_rate": 1.850912532696092e-07,
"loss": 0.9883,
"step": 513
},
{
"epoch": 0.98,
"grad_norm": 0.3840079426171848,
"learning_rate": 1.596075633371774e-07,
"loss": 1.0,
"step": 514
},
{
"epoch": 0.98,
"grad_norm": 0.372033357737712,
"learning_rate": 1.3600768744944647e-07,
"loss": 0.9727,
"step": 515
},
{
"epoch": 0.98,
"grad_norm": 0.3700966978882948,
"learning_rate": 1.1429251760938231e-07,
"loss": 0.9453,
"step": 516
},
{
"epoch": 0.98,
"grad_norm": 0.37138234312324336,
"learning_rate": 9.446287458383385e-08,
"loss": 0.9531,
"step": 517
},
{
"epoch": 0.98,
"grad_norm": 0.38319867619202025,
"learning_rate": 7.651950787251339e-08,
"loss": 0.9883,
"step": 518
},
{
"epoch": 0.98,
"grad_norm": 0.36834536474781127,
"learning_rate": 6.046309567968588e-08,
"loss": 0.9805,
"step": 519
},
{
"epoch": 0.99,
"grad_norm": 0.3838857136930694,
"learning_rate": 4.6294244888500645e-08,
"loss": 1.0,
"step": 520
},
{
"epoch": 0.99,
"grad_norm": 0.37142652746946664,
"learning_rate": 3.4013491038087375e-08,
"loss": 1.0,
"step": 521
},
{
"epoch": 0.99,
"grad_norm": 0.3764293979963409,
"learning_rate": 2.3621298303294625e-08,
"loss": 0.9688,
"step": 522
},
{
"epoch": 0.99,
"grad_norm": 0.3612657340251834,
"learning_rate": 1.511805947714273e-08,
"loss": 0.9961,
"step": 523
},
{
"epoch": 0.99,
"grad_norm": 0.3846047566682503,
"learning_rate": 8.504095956002323e-09,
"loss": 0.9922,
"step": 524
},
{
"epoch": 1.0,
"grad_norm": 0.37603252090597145,
"learning_rate": 3.7796577274096245e-09,
"loss": 0.9727,
"step": 525
},
{
"epoch": 1.0,
"grad_norm": 0.3918810108171524,
"learning_rate": 9.449233606573238e-10,
"loss": 0.9805,
"step": 526
},
{
"epoch": 1.0,
"grad_norm": 0.37221487121132474,
"learning_rate": 0.0,
"loss": 1.0078,
"step": 527
},
{
"epoch": 1.0,
"step": 527,
"total_flos": 2.9568341572059136e+16,
"train_loss": 1.0576598078747628,
"train_runtime": 6355.9639,
"train_samples_per_second": 340.075,
"train_steps_per_second": 0.083
}
],
"logging_steps": 1.0,
"max_steps": 527,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 210,
"total_flos": 2.9568341572059136e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}