llama-airo-3 / checkpoint-455 /trainer_state.json
saucam's picture
Upload folder using huggingface_hub
4db226d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.99945085118067,
"eval_steps": 114,
"global_step": 455,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.1220703125,
"learning_rate": 2e-05,
"loss": 1.1845,
"step": 1
},
{
"epoch": 0.0,
"eval_loss": 1.1820911169052124,
"eval_runtime": 382.4714,
"eval_samples_per_second": 7.676,
"eval_steps_per_second": 3.838,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 0.1162109375,
"learning_rate": 4e-05,
"loss": 1.1777,
"step": 2
},
{
"epoch": 0.01,
"grad_norm": 0.115234375,
"learning_rate": 6e-05,
"loss": 1.1406,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 0.126953125,
"learning_rate": 8e-05,
"loss": 1.2366,
"step": 4
},
{
"epoch": 0.01,
"grad_norm": 0.150390625,
"learning_rate": 0.0001,
"loss": 1.2321,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 0.1513671875,
"learning_rate": 0.00012,
"loss": 1.0941,
"step": 6
},
{
"epoch": 0.02,
"grad_norm": 0.103515625,
"learning_rate": 0.00014,
"loss": 1.0609,
"step": 7
},
{
"epoch": 0.02,
"grad_norm": 0.10498046875,
"learning_rate": 0.00016,
"loss": 1.0332,
"step": 8
},
{
"epoch": 0.02,
"grad_norm": 0.0927734375,
"learning_rate": 0.00018,
"loss": 1.1241,
"step": 9
},
{
"epoch": 0.02,
"grad_norm": 0.1162109375,
"learning_rate": 0.0002,
"loss": 1.1964,
"step": 10
},
{
"epoch": 0.02,
"grad_norm": 0.150390625,
"learning_rate": 0.00019999750800065415,
"loss": 1.254,
"step": 11
},
{
"epoch": 0.03,
"grad_norm": 0.130859375,
"learning_rate": 0.00019999003212681782,
"loss": 1.0003,
"step": 12
},
{
"epoch": 0.03,
"grad_norm": 0.10888671875,
"learning_rate": 0.00019997757275108847,
"loss": 1.033,
"step": 13
},
{
"epoch": 0.03,
"grad_norm": 0.09619140625,
"learning_rate": 0.00019996013049444118,
"loss": 1.0915,
"step": 14
},
{
"epoch": 0.03,
"grad_norm": 0.09521484375,
"learning_rate": 0.00019993770622619782,
"loss": 1.027,
"step": 15
},
{
"epoch": 0.04,
"grad_norm": 0.08154296875,
"learning_rate": 0.00019991030106398364,
"loss": 0.8964,
"step": 16
},
{
"epoch": 0.04,
"grad_norm": 0.08203125,
"learning_rate": 0.00019987791637367158,
"loss": 0.9778,
"step": 17
},
{
"epoch": 0.04,
"grad_norm": 0.07666015625,
"learning_rate": 0.00019984055376931413,
"loss": 1.0588,
"step": 18
},
{
"epoch": 0.04,
"grad_norm": 0.076171875,
"learning_rate": 0.00019979821511306308,
"loss": 0.9921,
"step": 19
},
{
"epoch": 0.04,
"grad_norm": 0.0830078125,
"learning_rate": 0.00019975090251507638,
"loss": 1.0424,
"step": 20
},
{
"epoch": 0.05,
"grad_norm": 0.08642578125,
"learning_rate": 0.0001996986183334134,
"loss": 1.0514,
"step": 21
},
{
"epoch": 0.05,
"grad_norm": 0.07568359375,
"learning_rate": 0.00019964136517391706,
"loss": 1.0248,
"step": 22
},
{
"epoch": 0.05,
"grad_norm": 0.0927734375,
"learning_rate": 0.00019957914589008405,
"loss": 1.0394,
"step": 23
},
{
"epoch": 0.05,
"grad_norm": 0.07568359375,
"learning_rate": 0.00019951196358292266,
"loss": 1.0678,
"step": 24
},
{
"epoch": 0.05,
"grad_norm": 0.07861328125,
"learning_rate": 0.0001994398216007982,
"loss": 0.9298,
"step": 25
},
{
"epoch": 0.06,
"grad_norm": 0.095703125,
"learning_rate": 0.00019936272353926615,
"loss": 0.991,
"step": 26
},
{
"epoch": 0.06,
"grad_norm": 0.07958984375,
"learning_rate": 0.00019928067324089286,
"loss": 0.9823,
"step": 27
},
{
"epoch": 0.06,
"grad_norm": 0.08349609375,
"learning_rate": 0.00019919367479506413,
"loss": 1.0103,
"step": 28
},
{
"epoch": 0.06,
"grad_norm": 0.07568359375,
"learning_rate": 0.00019910173253778136,
"loss": 0.9725,
"step": 29
},
{
"epoch": 0.07,
"grad_norm": 0.07421875,
"learning_rate": 0.00019900485105144543,
"loss": 1.0453,
"step": 30
},
{
"epoch": 0.07,
"grad_norm": 0.0830078125,
"learning_rate": 0.00019890303516462843,
"loss": 1.0282,
"step": 31
},
{
"epoch": 0.07,
"grad_norm": 0.08544921875,
"learning_rate": 0.00019879628995183272,
"loss": 0.9683,
"step": 32
},
{
"epoch": 0.07,
"grad_norm": 0.0732421875,
"learning_rate": 0.00019868462073323838,
"loss": 0.9711,
"step": 33
},
{
"epoch": 0.07,
"grad_norm": 0.07666015625,
"learning_rate": 0.00019856803307443781,
"loss": 0.9303,
"step": 34
},
{
"epoch": 0.08,
"grad_norm": 0.0751953125,
"learning_rate": 0.00019844653278615833,
"loss": 0.9018,
"step": 35
},
{
"epoch": 0.08,
"grad_norm": 0.07470703125,
"learning_rate": 0.0001983201259239728,
"loss": 0.932,
"step": 36
},
{
"epoch": 0.08,
"grad_norm": 0.072265625,
"learning_rate": 0.00019818881878799752,
"loss": 0.9073,
"step": 37
},
{
"epoch": 0.08,
"grad_norm": 0.0791015625,
"learning_rate": 0.00019805261792257847,
"loss": 0.9367,
"step": 38
},
{
"epoch": 0.09,
"grad_norm": 0.0771484375,
"learning_rate": 0.00019791153011596496,
"loss": 1.0177,
"step": 39
},
{
"epoch": 0.09,
"grad_norm": 0.07666015625,
"learning_rate": 0.00019776556239997146,
"loss": 0.8887,
"step": 40
},
{
"epoch": 0.09,
"grad_norm": 0.08056640625,
"learning_rate": 0.000197614722049627,
"loss": 1.0279,
"step": 41
},
{
"epoch": 0.09,
"grad_norm": 0.08544921875,
"learning_rate": 0.00019745901658281266,
"loss": 1.0993,
"step": 42
},
{
"epoch": 0.09,
"grad_norm": 0.0751953125,
"learning_rate": 0.00019729845375988694,
"loss": 0.9436,
"step": 43
},
{
"epoch": 0.1,
"grad_norm": 0.08203125,
"learning_rate": 0.00019713304158329873,
"loss": 1.0442,
"step": 44
},
{
"epoch": 0.1,
"grad_norm": 0.08251953125,
"learning_rate": 0.00019696278829718883,
"loss": 1.0196,
"step": 45
},
{
"epoch": 0.1,
"grad_norm": 0.09130859375,
"learning_rate": 0.00019678770238697874,
"loss": 1.0415,
"step": 46
},
{
"epoch": 0.1,
"grad_norm": 0.08935546875,
"learning_rate": 0.00019660779257894796,
"loss": 0.983,
"step": 47
},
{
"epoch": 0.11,
"grad_norm": 0.07763671875,
"learning_rate": 0.000196423067839799,
"loss": 0.9415,
"step": 48
},
{
"epoch": 0.11,
"grad_norm": 0.09228515625,
"learning_rate": 0.00019623353737621035,
"loss": 0.9829,
"step": 49
},
{
"epoch": 0.11,
"grad_norm": 0.10009765625,
"learning_rate": 0.00019603921063437793,
"loss": 0.9223,
"step": 50
},
{
"epoch": 0.11,
"grad_norm": 0.08447265625,
"learning_rate": 0.00019584009729954396,
"loss": 0.8714,
"step": 51
},
{
"epoch": 0.11,
"grad_norm": 0.08837890625,
"learning_rate": 0.00019563620729551445,
"loss": 0.8981,
"step": 52
},
{
"epoch": 0.12,
"grad_norm": 0.103515625,
"learning_rate": 0.00019542755078416456,
"loss": 0.9528,
"step": 53
},
{
"epoch": 0.12,
"grad_norm": 0.08154296875,
"learning_rate": 0.00019521413816493206,
"loss": 1.0403,
"step": 54
},
{
"epoch": 0.12,
"grad_norm": 0.08251953125,
"learning_rate": 0.0001949959800742991,
"loss": 1.0177,
"step": 55
},
{
"epoch": 0.12,
"grad_norm": 0.09375,
"learning_rate": 0.00019477308738526206,
"loss": 1.0504,
"step": 56
},
{
"epoch": 0.13,
"grad_norm": 0.087890625,
"learning_rate": 0.00019454547120678967,
"loss": 0.9463,
"step": 57
},
{
"epoch": 0.13,
"grad_norm": 0.09130859375,
"learning_rate": 0.00019431314288326926,
"loss": 0.907,
"step": 58
},
{
"epoch": 0.13,
"grad_norm": 0.083984375,
"learning_rate": 0.00019407611399394143,
"loss": 0.8962,
"step": 59
},
{
"epoch": 0.13,
"grad_norm": 0.103515625,
"learning_rate": 0.00019383439635232294,
"loss": 0.9455,
"step": 60
},
{
"epoch": 0.13,
"grad_norm": 0.10009765625,
"learning_rate": 0.0001935880020056179,
"loss": 1.0383,
"step": 61
},
{
"epoch": 0.14,
"grad_norm": 0.0849609375,
"learning_rate": 0.00019333694323411734,
"loss": 0.9763,
"step": 62
},
{
"epoch": 0.14,
"grad_norm": 0.09130859375,
"learning_rate": 0.00019308123255058708,
"loss": 1.0526,
"step": 63
},
{
"epoch": 0.14,
"grad_norm": 0.08251953125,
"learning_rate": 0.0001928208826996443,
"loss": 0.8888,
"step": 64
},
{
"epoch": 0.14,
"grad_norm": 0.08740234375,
"learning_rate": 0.00019255590665712214,
"loss": 0.925,
"step": 65
},
{
"epoch": 0.14,
"grad_norm": 0.08203125,
"learning_rate": 0.00019228631762942307,
"loss": 0.8944,
"step": 66
},
{
"epoch": 0.15,
"grad_norm": 0.09130859375,
"learning_rate": 0.00019201212905286074,
"loss": 0.9638,
"step": 67
},
{
"epoch": 0.15,
"grad_norm": 0.09814453125,
"learning_rate": 0.00019173335459299025,
"loss": 0.9773,
"step": 68
},
{
"epoch": 0.15,
"grad_norm": 0.09130859375,
"learning_rate": 0.00019145000814392696,
"loss": 0.9241,
"step": 69
},
{
"epoch": 0.15,
"grad_norm": 0.091796875,
"learning_rate": 0.0001911621038276542,
"loss": 0.9495,
"step": 70
},
{
"epoch": 0.16,
"grad_norm": 0.09375,
"learning_rate": 0.00019086965599331938,
"loss": 0.9238,
"step": 71
},
{
"epoch": 0.16,
"grad_norm": 0.09326171875,
"learning_rate": 0.00019057267921651866,
"loss": 0.9721,
"step": 72
},
{
"epoch": 0.16,
"grad_norm": 0.09326171875,
"learning_rate": 0.00019027118829857077,
"loss": 0.9081,
"step": 73
},
{
"epoch": 0.16,
"grad_norm": 0.10546875,
"learning_rate": 0.00018996519826577907,
"loss": 0.9803,
"step": 74
},
{
"epoch": 0.16,
"grad_norm": 0.0830078125,
"learning_rate": 0.00018965472436868286,
"loss": 0.9435,
"step": 75
},
{
"epoch": 0.17,
"grad_norm": 0.09814453125,
"learning_rate": 0.00018933978208129706,
"loss": 0.8613,
"step": 76
},
{
"epoch": 0.17,
"grad_norm": 0.091796875,
"learning_rate": 0.00018902038710034112,
"loss": 0.9293,
"step": 77
},
{
"epoch": 0.17,
"grad_norm": 0.1015625,
"learning_rate": 0.0001886965553444568,
"loss": 1.0039,
"step": 78
},
{
"epoch": 0.17,
"grad_norm": 0.091796875,
"learning_rate": 0.0001883683029534145,
"loss": 0.9316,
"step": 79
},
{
"epoch": 0.18,
"grad_norm": 0.08154296875,
"learning_rate": 0.00018803564628730915,
"loss": 0.987,
"step": 80
},
{
"epoch": 0.18,
"grad_norm": 0.0908203125,
"learning_rate": 0.0001876986019257446,
"loss": 0.9598,
"step": 81
},
{
"epoch": 0.18,
"grad_norm": 0.1044921875,
"learning_rate": 0.0001873571866670074,
"loss": 0.872,
"step": 82
},
{
"epoch": 0.18,
"grad_norm": 0.0859375,
"learning_rate": 0.00018701141752722964,
"loss": 0.9035,
"step": 83
},
{
"epoch": 0.18,
"grad_norm": 0.095703125,
"learning_rate": 0.0001866613117395407,
"loss": 0.9103,
"step": 84
},
{
"epoch": 0.19,
"grad_norm": 0.087890625,
"learning_rate": 0.00018630688675320842,
"loss": 0.9446,
"step": 85
},
{
"epoch": 0.19,
"grad_norm": 0.08251953125,
"learning_rate": 0.00018594816023276953,
"loss": 0.8302,
"step": 86
},
{
"epoch": 0.19,
"grad_norm": 0.091796875,
"learning_rate": 0.0001855851500571491,
"loss": 0.9336,
"step": 87
},
{
"epoch": 0.19,
"grad_norm": 0.09521484375,
"learning_rate": 0.00018521787431876954,
"loss": 0.9285,
"step": 88
},
{
"epoch": 0.2,
"grad_norm": 0.08984375,
"learning_rate": 0.0001848463513226488,
"loss": 0.8675,
"step": 89
},
{
"epoch": 0.2,
"grad_norm": 0.09521484375,
"learning_rate": 0.0001844705995854882,
"loss": 0.9251,
"step": 90
},
{
"epoch": 0.2,
"grad_norm": 0.0859375,
"learning_rate": 0.0001840906378347494,
"loss": 0.8655,
"step": 91
},
{
"epoch": 0.2,
"grad_norm": 0.107421875,
"learning_rate": 0.00018370648500772108,
"loss": 0.9182,
"step": 92
},
{
"epoch": 0.2,
"grad_norm": 0.09521484375,
"learning_rate": 0.0001833181602505751,
"loss": 0.873,
"step": 93
},
{
"epoch": 0.21,
"grad_norm": 0.0947265625,
"learning_rate": 0.00018292568291741228,
"loss": 0.9838,
"step": 94
},
{
"epoch": 0.21,
"grad_norm": 0.099609375,
"learning_rate": 0.00018252907256929775,
"loss": 0.8565,
"step": 95
},
{
"epoch": 0.21,
"grad_norm": 0.0888671875,
"learning_rate": 0.00018212834897328613,
"loss": 0.8747,
"step": 96
},
{
"epoch": 0.21,
"grad_norm": 0.0888671875,
"learning_rate": 0.0001817235321014361,
"loss": 0.9313,
"step": 97
},
{
"epoch": 0.22,
"grad_norm": 0.099609375,
"learning_rate": 0.0001813146421298154,
"loss": 0.8365,
"step": 98
},
{
"epoch": 0.22,
"grad_norm": 0.10107421875,
"learning_rate": 0.00018090169943749476,
"loss": 1.0068,
"step": 99
},
{
"epoch": 0.22,
"grad_norm": 0.09228515625,
"learning_rate": 0.00018048472460553257,
"loss": 0.8783,
"step": 100
},
{
"epoch": 0.22,
"grad_norm": 0.0986328125,
"learning_rate": 0.00018006373841594905,
"loss": 0.9325,
"step": 101
},
{
"epoch": 0.22,
"grad_norm": 0.09619140625,
"learning_rate": 0.00017963876185069034,
"loss": 1.0326,
"step": 102
},
{
"epoch": 0.23,
"grad_norm": 0.1064453125,
"learning_rate": 0.00017920981609058289,
"loss": 0.8769,
"step": 103
},
{
"epoch": 0.23,
"grad_norm": 0.0869140625,
"learning_rate": 0.00017877692251427782,
"loss": 0.9167,
"step": 104
},
{
"epoch": 0.23,
"grad_norm": 0.1142578125,
"learning_rate": 0.00017834010269718526,
"loss": 0.9688,
"step": 105
},
{
"epoch": 0.23,
"grad_norm": 0.08740234375,
"learning_rate": 0.00017789937841039918,
"loss": 0.8039,
"step": 106
},
{
"epoch": 0.24,
"grad_norm": 0.08837890625,
"learning_rate": 0.0001774547716196123,
"loss": 0.9943,
"step": 107
},
{
"epoch": 0.24,
"grad_norm": 0.1064453125,
"learning_rate": 0.00017700630448402125,
"loss": 0.9769,
"step": 108
},
{
"epoch": 0.24,
"grad_norm": 0.09619140625,
"learning_rate": 0.00017655399935522217,
"loss": 0.8973,
"step": 109
},
{
"epoch": 0.24,
"grad_norm": 0.1025390625,
"learning_rate": 0.0001760978787760968,
"loss": 0.8602,
"step": 110
},
{
"epoch": 0.24,
"grad_norm": 0.09814453125,
"learning_rate": 0.0001756379654796888,
"loss": 0.8966,
"step": 111
},
{
"epoch": 0.25,
"grad_norm": 0.09521484375,
"learning_rate": 0.00017517428238807085,
"loss": 0.8997,
"step": 112
},
{
"epoch": 0.25,
"grad_norm": 0.0947265625,
"learning_rate": 0.0001747068526112022,
"loss": 0.9476,
"step": 113
},
{
"epoch": 0.25,
"grad_norm": 0.09814453125,
"learning_rate": 0.00017423569944577678,
"loss": 0.9328,
"step": 114
},
{
"epoch": 0.25,
"eval_loss": 0.922809362411499,
"eval_runtime": 418.8489,
"eval_samples_per_second": 7.01,
"eval_steps_per_second": 3.505,
"step": 114
},
{
"epoch": 0.25,
"grad_norm": 0.09619140625,
"learning_rate": 0.00017376084637406222,
"loss": 0.8643,
"step": 115
},
{
"epoch": 0.25,
"grad_norm": 0.10302734375,
"learning_rate": 0.0001732823170627294,
"loss": 0.9659,
"step": 116
},
{
"epoch": 0.26,
"grad_norm": 0.0986328125,
"learning_rate": 0.00017280013536167293,
"loss": 0.8158,
"step": 117
},
{
"epoch": 0.26,
"grad_norm": 0.1025390625,
"learning_rate": 0.00017231432530282246,
"loss": 0.9843,
"step": 118
},
{
"epoch": 0.26,
"grad_norm": 0.08984375,
"learning_rate": 0.000171824911098945,
"loss": 0.9591,
"step": 119
},
{
"epoch": 0.26,
"grad_norm": 0.09423828125,
"learning_rate": 0.00017133191714243805,
"loss": 0.9331,
"step": 120
},
{
"epoch": 0.27,
"grad_norm": 0.103515625,
"learning_rate": 0.00017083536800411393,
"loss": 0.8647,
"step": 121
},
{
"epoch": 0.27,
"grad_norm": 0.0888671875,
"learning_rate": 0.00017033528843197525,
"loss": 1.0038,
"step": 122
},
{
"epoch": 0.27,
"grad_norm": 0.1005859375,
"learning_rate": 0.00016983170334998132,
"loss": 0.8214,
"step": 123
},
{
"epoch": 0.27,
"grad_norm": 0.08544921875,
"learning_rate": 0.000169324637856806,
"loss": 0.8832,
"step": 124
},
{
"epoch": 0.27,
"grad_norm": 0.0859375,
"learning_rate": 0.00016881411722458688,
"loss": 0.8642,
"step": 125
},
{
"epoch": 0.28,
"grad_norm": 0.09375,
"learning_rate": 0.00016830016689766558,
"loss": 0.9047,
"step": 126
},
{
"epoch": 0.28,
"grad_norm": 0.095703125,
"learning_rate": 0.0001677828124913197,
"loss": 0.8494,
"step": 127
},
{
"epoch": 0.28,
"grad_norm": 0.08837890625,
"learning_rate": 0.00016726207979048604,
"loss": 0.9421,
"step": 128
},
{
"epoch": 0.28,
"grad_norm": 0.09423828125,
"learning_rate": 0.0001667379947484756,
"loss": 0.8429,
"step": 129
},
{
"epoch": 0.29,
"grad_norm": 0.10400390625,
"learning_rate": 0.00016621058348568007,
"loss": 0.8319,
"step": 130
},
{
"epoch": 0.29,
"grad_norm": 0.09912109375,
"learning_rate": 0.0001656798722882698,
"loss": 0.8681,
"step": 131
},
{
"epoch": 0.29,
"grad_norm": 0.10498046875,
"learning_rate": 0.00016514588760688397,
"loss": 0.9469,
"step": 132
},
{
"epoch": 0.29,
"grad_norm": 0.091796875,
"learning_rate": 0.00016460865605531213,
"loss": 0.9818,
"step": 133
},
{
"epoch": 0.29,
"grad_norm": 0.10009765625,
"learning_rate": 0.0001640682044091678,
"loss": 0.9046,
"step": 134
},
{
"epoch": 0.3,
"grad_norm": 0.09326171875,
"learning_rate": 0.00016352455960455387,
"loss": 0.9319,
"step": 135
},
{
"epoch": 0.3,
"grad_norm": 0.09716796875,
"learning_rate": 0.00016297774873672035,
"loss": 0.963,
"step": 136
},
{
"epoch": 0.3,
"grad_norm": 0.103515625,
"learning_rate": 0.00016242779905871374,
"loss": 0.8749,
"step": 137
},
{
"epoch": 0.3,
"grad_norm": 0.09423828125,
"learning_rate": 0.00016187473798001879,
"loss": 0.9731,
"step": 138
},
{
"epoch": 0.31,
"grad_norm": 0.1015625,
"learning_rate": 0.00016131859306519242,
"loss": 0.8319,
"step": 139
},
{
"epoch": 0.31,
"grad_norm": 0.0908203125,
"learning_rate": 0.0001607593920324899,
"loss": 0.8156,
"step": 140
},
{
"epoch": 0.31,
"grad_norm": 0.10791015625,
"learning_rate": 0.00016019716275248342,
"loss": 0.9433,
"step": 141
},
{
"epoch": 0.31,
"grad_norm": 0.12451171875,
"learning_rate": 0.0001596319332466729,
"loss": 0.913,
"step": 142
},
{
"epoch": 0.31,
"grad_norm": 0.1005859375,
"learning_rate": 0.00015906373168608952,
"loss": 0.9255,
"step": 143
},
{
"epoch": 0.32,
"grad_norm": 0.10498046875,
"learning_rate": 0.00015849258638989165,
"loss": 0.9524,
"step": 144
},
{
"epoch": 0.32,
"grad_norm": 0.1015625,
"learning_rate": 0.00015791852582395334,
"loss": 1.0442,
"step": 145
},
{
"epoch": 0.32,
"grad_norm": 0.123046875,
"learning_rate": 0.00015734157859944575,
"loss": 0.8188,
"step": 146
},
{
"epoch": 0.32,
"grad_norm": 0.10107421875,
"learning_rate": 0.00015676177347141094,
"loss": 0.8675,
"step": 147
},
{
"epoch": 0.33,
"grad_norm": 0.09423828125,
"learning_rate": 0.00015617913933732892,
"loss": 0.9065,
"step": 148
},
{
"epoch": 0.33,
"grad_norm": 0.1064453125,
"learning_rate": 0.00015559370523567732,
"loss": 0.9665,
"step": 149
},
{
"epoch": 0.33,
"grad_norm": 0.1162109375,
"learning_rate": 0.00015500550034448413,
"loss": 0.8779,
"step": 150
},
{
"epoch": 0.33,
"grad_norm": 0.08837890625,
"learning_rate": 0.0001544145539798734,
"loss": 0.8539,
"step": 151
},
{
"epoch": 0.33,
"grad_norm": 0.1142578125,
"learning_rate": 0.00015382089559460422,
"loss": 0.9068,
"step": 152
},
{
"epoch": 0.34,
"grad_norm": 0.10498046875,
"learning_rate": 0.00015322455477660273,
"loss": 0.9563,
"step": 153
},
{
"epoch": 0.34,
"grad_norm": 0.09423828125,
"learning_rate": 0.00015262556124748751,
"loss": 0.8071,
"step": 154
},
{
"epoch": 0.34,
"grad_norm": 0.1123046875,
"learning_rate": 0.0001520239448610882,
"loss": 0.8971,
"step": 155
},
{
"epoch": 0.34,
"grad_norm": 0.103515625,
"learning_rate": 0.00015141973560195768,
"loss": 0.883,
"step": 156
},
{
"epoch": 0.34,
"grad_norm": 0.10498046875,
"learning_rate": 0.0001508129635838775,
"loss": 0.8609,
"step": 157
},
{
"epoch": 0.35,
"grad_norm": 0.10986328125,
"learning_rate": 0.000150203659048357,
"loss": 0.9184,
"step": 158
},
{
"epoch": 0.35,
"grad_norm": 0.109375,
"learning_rate": 0.00014959185236312642,
"loss": 0.8602,
"step": 159
},
{
"epoch": 0.35,
"grad_norm": 0.107421875,
"learning_rate": 0.00014897757402062284,
"loss": 0.8908,
"step": 160
},
{
"epoch": 0.35,
"grad_norm": 0.09521484375,
"learning_rate": 0.00014836085463647088,
"loss": 0.9134,
"step": 161
},
{
"epoch": 0.36,
"grad_norm": 0.109375,
"learning_rate": 0.00014774172494795652,
"loss": 0.9385,
"step": 162
},
{
"epoch": 0.36,
"grad_norm": 0.10888671875,
"learning_rate": 0.00014712021581249533,
"loss": 0.8183,
"step": 163
},
{
"epoch": 0.36,
"grad_norm": 0.09814453125,
"learning_rate": 0.00014649635820609456,
"loss": 0.8806,
"step": 164
},
{
"epoch": 0.36,
"grad_norm": 0.10009765625,
"learning_rate": 0.00014587018322180905,
"loss": 0.915,
"step": 165
},
{
"epoch": 0.36,
"grad_norm": 0.12109375,
"learning_rate": 0.00014524172206819194,
"loss": 0.848,
"step": 166
},
{
"epoch": 0.37,
"grad_norm": 0.10009765625,
"learning_rate": 0.00014461100606773884,
"loss": 0.9189,
"step": 167
},
{
"epoch": 0.37,
"grad_norm": 0.099609375,
"learning_rate": 0.00014397806665532694,
"loss": 0.78,
"step": 168
},
{
"epoch": 0.37,
"grad_norm": 0.10205078125,
"learning_rate": 0.00014334293537664837,
"loss": 0.8854,
"step": 169
},
{
"epoch": 0.37,
"grad_norm": 0.10009765625,
"learning_rate": 0.0001427056438866376,
"loss": 0.7996,
"step": 170
},
{
"epoch": 0.38,
"grad_norm": 0.10693359375,
"learning_rate": 0.00014206622394789432,
"loss": 0.9002,
"step": 171
},
{
"epoch": 0.38,
"grad_norm": 0.1083984375,
"learning_rate": 0.00014142470742909975,
"loss": 0.9172,
"step": 172
},
{
"epoch": 0.38,
"grad_norm": 0.10693359375,
"learning_rate": 0.0001407811263034289,
"loss": 0.9438,
"step": 173
},
{
"epoch": 0.38,
"grad_norm": 0.12890625,
"learning_rate": 0.00014013551264695662,
"loss": 0.8709,
"step": 174
},
{
"epoch": 0.38,
"grad_norm": 0.11328125,
"learning_rate": 0.00013948789863705912,
"loss": 0.8717,
"step": 175
},
{
"epoch": 0.39,
"grad_norm": 0.10205078125,
"learning_rate": 0.00013883831655081018,
"loss": 0.9313,
"step": 176
},
{
"epoch": 0.39,
"grad_norm": 0.11669921875,
"learning_rate": 0.00013818679876337247,
"loss": 0.914,
"step": 177
},
{
"epoch": 0.39,
"grad_norm": 0.1064453125,
"learning_rate": 0.00013753337774638396,
"loss": 0.919,
"step": 178
},
{
"epoch": 0.39,
"grad_norm": 0.10791015625,
"learning_rate": 0.00013687808606633966,
"loss": 0.9628,
"step": 179
},
{
"epoch": 0.4,
"grad_norm": 0.11376953125,
"learning_rate": 0.00013622095638296826,
"loss": 0.8947,
"step": 180
},
{
"epoch": 0.4,
"grad_norm": 0.11669921875,
"learning_rate": 0.0001355620214476046,
"loss": 0.9269,
"step": 181
},
{
"epoch": 0.4,
"grad_norm": 0.10595703125,
"learning_rate": 0.00013490131410155728,
"loss": 0.8438,
"step": 182
},
{
"epoch": 0.4,
"grad_norm": 0.11669921875,
"learning_rate": 0.00013423886727447176,
"loss": 0.8986,
"step": 183
},
{
"epoch": 0.4,
"grad_norm": 0.11474609375,
"learning_rate": 0.00013357471398268918,
"loss": 0.9274,
"step": 184
},
{
"epoch": 0.41,
"grad_norm": 0.111328125,
"learning_rate": 0.000132908887327601,
"loss": 0.8511,
"step": 185
},
{
"epoch": 0.41,
"grad_norm": 0.11767578125,
"learning_rate": 0.00013224142049399895,
"loss": 0.809,
"step": 186
},
{
"epoch": 0.41,
"grad_norm": 0.12158203125,
"learning_rate": 0.00013157234674842128,
"loss": 0.7523,
"step": 187
},
{
"epoch": 0.41,
"grad_norm": 0.11669921875,
"learning_rate": 0.00013090169943749476,
"loss": 0.9021,
"step": 188
},
{
"epoch": 0.42,
"grad_norm": 0.11865234375,
"learning_rate": 0.00013022951198627254,
"loss": 0.8875,
"step": 189
},
{
"epoch": 0.42,
"grad_norm": 0.09521484375,
"learning_rate": 0.00012955581789656843,
"loss": 0.8563,
"step": 190
},
{
"epoch": 0.42,
"grad_norm": 0.1123046875,
"learning_rate": 0.000128880650745287,
"loss": 0.7841,
"step": 191
},
{
"epoch": 0.42,
"grad_norm": 0.10107421875,
"learning_rate": 0.0001282040441827503,
"loss": 0.925,
"step": 192
},
{
"epoch": 0.42,
"grad_norm": 0.10107421875,
"learning_rate": 0.0001275260319310205,
"loss": 1.0102,
"step": 193
},
{
"epoch": 0.43,
"grad_norm": 0.115234375,
"learning_rate": 0.00012684664778221942,
"loss": 0.7835,
"step": 194
},
{
"epoch": 0.43,
"grad_norm": 0.10791015625,
"learning_rate": 0.0001261659255968441,
"loss": 0.9312,
"step": 195
},
{
"epoch": 0.43,
"grad_norm": 0.09716796875,
"learning_rate": 0.0001254838993020793,
"loss": 0.9214,
"step": 196
},
{
"epoch": 0.43,
"grad_norm": 0.10009765625,
"learning_rate": 0.00012480060289010679,
"loss": 0.8237,
"step": 197
},
{
"epoch": 0.43,
"grad_norm": 0.11279296875,
"learning_rate": 0.00012411607041641062,
"loss": 0.9152,
"step": 198
},
{
"epoch": 0.44,
"grad_norm": 0.1123046875,
"learning_rate": 0.00012343033599808044,
"loss": 0.9036,
"step": 199
},
{
"epoch": 0.44,
"grad_norm": 0.1416015625,
"learning_rate": 0.00012274343381211066,
"loss": 0.901,
"step": 200
},
{
"epoch": 0.44,
"grad_norm": 0.11962890625,
"learning_rate": 0.00012205539809369719,
"loss": 0.7381,
"step": 201
},
{
"epoch": 0.44,
"grad_norm": 0.111328125,
"learning_rate": 0.00012136626313453134,
"loss": 0.9358,
"step": 202
},
{
"epoch": 0.45,
"grad_norm": 0.11962890625,
"learning_rate": 0.00012067606328109038,
"loss": 0.8706,
"step": 203
},
{
"epoch": 0.45,
"grad_norm": 0.11572265625,
"learning_rate": 0.00011998483293292602,
"loss": 0.9957,
"step": 204
},
{
"epoch": 0.45,
"grad_norm": 0.10498046875,
"learning_rate": 0.00011929260654094969,
"loss": 0.8489,
"step": 205
},
{
"epoch": 0.45,
"grad_norm": 0.1279296875,
"learning_rate": 0.0001185994186057158,
"loss": 0.8718,
"step": 206
},
{
"epoch": 0.45,
"grad_norm": 0.09912109375,
"learning_rate": 0.00011790530367570194,
"loss": 0.8182,
"step": 207
},
{
"epoch": 0.46,
"grad_norm": 0.10498046875,
"learning_rate": 0.00011721029634558709,
"loss": 0.8512,
"step": 208
},
{
"epoch": 0.46,
"grad_norm": 0.150390625,
"learning_rate": 0.00011651443125452759,
"loss": 0.9216,
"step": 209
},
{
"epoch": 0.46,
"grad_norm": 0.1318359375,
"learning_rate": 0.0001158177430844304,
"loss": 0.7999,
"step": 210
},
{
"epoch": 0.46,
"grad_norm": 0.103515625,
"learning_rate": 0.00011512026655822483,
"loss": 0.7343,
"step": 211
},
{
"epoch": 0.47,
"grad_norm": 0.1005859375,
"learning_rate": 0.00011442203643813183,
"loss": 0.861,
"step": 212
},
{
"epoch": 0.47,
"grad_norm": 0.1220703125,
"learning_rate": 0.00011372308752393144,
"loss": 0.8731,
"step": 213
},
{
"epoch": 0.47,
"grad_norm": 0.1298828125,
"learning_rate": 0.00011302345465122839,
"loss": 0.9257,
"step": 214
},
{
"epoch": 0.47,
"grad_norm": 0.10791015625,
"learning_rate": 0.00011232317268971585,
"loss": 1.0089,
"step": 215
},
{
"epoch": 0.47,
"grad_norm": 0.1220703125,
"learning_rate": 0.00011162227654143778,
"loss": 0.877,
"step": 216
},
{
"epoch": 0.48,
"grad_norm": 0.0927734375,
"learning_rate": 0.00011092080113904886,
"loss": 0.8373,
"step": 217
},
{
"epoch": 0.48,
"grad_norm": 0.10693359375,
"learning_rate": 0.00011021878144407408,
"loss": 0.8474,
"step": 218
},
{
"epoch": 0.48,
"grad_norm": 0.1025390625,
"learning_rate": 0.00010951625244516583,
"loss": 0.788,
"step": 219
},
{
"epoch": 0.48,
"grad_norm": 0.1142578125,
"learning_rate": 0.00010881324915636019,
"loss": 0.8093,
"step": 220
},
{
"epoch": 0.49,
"grad_norm": 0.10986328125,
"learning_rate": 0.0001081098066153319,
"loss": 0.9342,
"step": 221
},
{
"epoch": 0.49,
"grad_norm": 0.10986328125,
"learning_rate": 0.00010740595988164801,
"loss": 0.836,
"step": 222
},
{
"epoch": 0.49,
"grad_norm": 0.09423828125,
"learning_rate": 0.00010670174403502052,
"loss": 0.8206,
"step": 223
},
{
"epoch": 0.49,
"grad_norm": 0.0986328125,
"learning_rate": 0.000105997194173558,
"loss": 0.8645,
"step": 224
},
{
"epoch": 0.49,
"grad_norm": 0.10107421875,
"learning_rate": 0.00010529234541201631,
"loss": 0.8027,
"step": 225
},
{
"epoch": 0.5,
"grad_norm": 0.11181640625,
"learning_rate": 0.00010458723288004857,
"loss": 0.8333,
"step": 226
},
{
"epoch": 0.5,
"grad_norm": 0.10400390625,
"learning_rate": 0.00010388189172045406,
"loss": 0.8999,
"step": 227
},
{
"epoch": 0.5,
"grad_norm": 0.1142578125,
"learning_rate": 0.00010317635708742699,
"loss": 0.8961,
"step": 228
},
{
"epoch": 0.5,
"eval_loss": 0.8713043332099915,
"eval_runtime": 415.0516,
"eval_samples_per_second": 7.074,
"eval_steps_per_second": 3.537,
"step": 228
},
{
"epoch": 0.5,
"grad_norm": 0.103515625,
"learning_rate": 0.00010247066414480424,
"loss": 0.8739,
"step": 229
},
{
"epoch": 0.51,
"grad_norm": 0.134765625,
"learning_rate": 0.00010176484806431288,
"loss": 0.8045,
"step": 230
},
{
"epoch": 0.51,
"grad_norm": 0.103515625,
"learning_rate": 0.00010105894402381704,
"loss": 0.9281,
"step": 231
},
{
"epoch": 0.51,
"grad_norm": 0.1025390625,
"learning_rate": 0.00010035298720556493,
"loss": 0.8426,
"step": 232
},
{
"epoch": 0.51,
"grad_norm": 0.11669921875,
"learning_rate": 9.964701279443508e-05,
"loss": 0.7591,
"step": 233
},
{
"epoch": 0.51,
"grad_norm": 0.11328125,
"learning_rate": 9.894105597618296e-05,
"loss": 0.8066,
"step": 234
},
{
"epoch": 0.52,
"grad_norm": 0.1064453125,
"learning_rate": 9.823515193568715e-05,
"loss": 0.8372,
"step": 235
},
{
"epoch": 0.52,
"grad_norm": 0.1015625,
"learning_rate": 9.752933585519577e-05,
"loss": 0.81,
"step": 236
},
{
"epoch": 0.52,
"grad_norm": 0.10986328125,
"learning_rate": 9.682364291257304e-05,
"loss": 1.0054,
"step": 237
},
{
"epoch": 0.52,
"grad_norm": 0.09814453125,
"learning_rate": 9.611810827954599e-05,
"loss": 0.8229,
"step": 238
},
{
"epoch": 0.52,
"grad_norm": 0.19140625,
"learning_rate": 9.541276711995148e-05,
"loss": 0.7994,
"step": 239
},
{
"epoch": 0.53,
"grad_norm": 0.1279296875,
"learning_rate": 9.470765458798368e-05,
"loss": 0.8103,
"step": 240
},
{
"epoch": 0.53,
"grad_norm": 0.1171875,
"learning_rate": 9.400280582644203e-05,
"loss": 0.7667,
"step": 241
},
{
"epoch": 0.53,
"grad_norm": 0.0966796875,
"learning_rate": 9.329825596497949e-05,
"loss": 0.9226,
"step": 242
},
{
"epoch": 0.53,
"grad_norm": 0.11279296875,
"learning_rate": 9.259404011835201e-05,
"loss": 0.751,
"step": 243
},
{
"epoch": 0.54,
"grad_norm": 0.10498046875,
"learning_rate": 9.189019338466812e-05,
"loss": 0.978,
"step": 244
},
{
"epoch": 0.54,
"grad_norm": 0.0986328125,
"learning_rate": 9.118675084363986e-05,
"loss": 0.8746,
"step": 245
},
{
"epoch": 0.54,
"grad_norm": 0.1318359375,
"learning_rate": 9.048374755483419e-05,
"loss": 0.8223,
"step": 246
},
{
"epoch": 0.54,
"grad_norm": 0.16015625,
"learning_rate": 8.978121855592593e-05,
"loss": 0.9778,
"step": 247
},
{
"epoch": 0.54,
"grad_norm": 0.134765625,
"learning_rate": 8.907919886095115e-05,
"loss": 0.8693,
"step": 248
},
{
"epoch": 0.55,
"grad_norm": 0.1201171875,
"learning_rate": 8.837772345856226e-05,
"loss": 0.7726,
"step": 249
},
{
"epoch": 0.55,
"grad_norm": 0.1494140625,
"learning_rate": 8.767682731028415e-05,
"loss": 0.7388,
"step": 250
},
{
"epoch": 0.55,
"grad_norm": 0.1337890625,
"learning_rate": 8.697654534877165e-05,
"loss": 0.8422,
"step": 251
},
{
"epoch": 0.55,
"grad_norm": 0.11572265625,
"learning_rate": 8.627691247606862e-05,
"loss": 0.8306,
"step": 252
},
{
"epoch": 0.56,
"grad_norm": 0.08984375,
"learning_rate": 8.557796356186818e-05,
"loss": 0.7276,
"step": 253
},
{
"epoch": 0.56,
"grad_norm": 0.1337890625,
"learning_rate": 8.487973344177517e-05,
"loss": 0.8244,
"step": 254
},
{
"epoch": 0.56,
"grad_norm": 0.10693359375,
"learning_rate": 8.418225691556962e-05,
"loss": 0.7665,
"step": 255
},
{
"epoch": 0.56,
"grad_norm": 0.09765625,
"learning_rate": 8.348556874547242e-05,
"loss": 0.8114,
"step": 256
},
{
"epoch": 0.56,
"grad_norm": 0.1318359375,
"learning_rate": 8.278970365441292e-05,
"loss": 0.7918,
"step": 257
},
{
"epoch": 0.57,
"grad_norm": 0.11669921875,
"learning_rate": 8.20946963242981e-05,
"loss": 0.8266,
"step": 258
},
{
"epoch": 0.57,
"grad_norm": 0.11083984375,
"learning_rate": 8.140058139428425e-05,
"loss": 0.7967,
"step": 259
},
{
"epoch": 0.57,
"grad_norm": 0.09912109375,
"learning_rate": 8.070739345905032e-05,
"loss": 0.9177,
"step": 260
},
{
"epoch": 0.57,
"grad_norm": 0.11474609375,
"learning_rate": 8.001516706707401e-05,
"loss": 0.9526,
"step": 261
},
{
"epoch": 0.58,
"grad_norm": 0.11083984375,
"learning_rate": 7.932393671890965e-05,
"loss": 0.9914,
"step": 262
},
{
"epoch": 0.58,
"grad_norm": 0.10205078125,
"learning_rate": 7.863373686546867e-05,
"loss": 0.8242,
"step": 263
},
{
"epoch": 0.58,
"grad_norm": 0.09716796875,
"learning_rate": 7.794460190630282e-05,
"loss": 0.6837,
"step": 264
},
{
"epoch": 0.58,
"grad_norm": 0.11328125,
"learning_rate": 7.725656618788937e-05,
"loss": 0.8758,
"step": 265
},
{
"epoch": 0.58,
"grad_norm": 0.11181640625,
"learning_rate": 7.656966400191956e-05,
"loss": 0.8655,
"step": 266
},
{
"epoch": 0.59,
"grad_norm": 0.11328125,
"learning_rate": 7.58839295835894e-05,
"loss": 1.0563,
"step": 267
},
{
"epoch": 0.59,
"grad_norm": 0.11962890625,
"learning_rate": 7.519939710989325e-05,
"loss": 0.832,
"step": 268
},
{
"epoch": 0.59,
"grad_norm": 0.1142578125,
"learning_rate": 7.45161006979207e-05,
"loss": 0.8198,
"step": 269
},
{
"epoch": 0.59,
"grad_norm": 0.1103515625,
"learning_rate": 7.383407440315596e-05,
"loss": 0.8246,
"step": 270
},
{
"epoch": 0.6,
"grad_norm": 0.10498046875,
"learning_rate": 7.315335221778064e-05,
"loss": 0.8314,
"step": 271
},
{
"epoch": 0.6,
"grad_norm": 0.1240234375,
"learning_rate": 7.247396806897952e-05,
"loss": 0.7107,
"step": 272
},
{
"epoch": 0.6,
"grad_norm": 0.10498046875,
"learning_rate": 7.17959558172497e-05,
"loss": 0.8982,
"step": 273
},
{
"epoch": 0.6,
"grad_norm": 0.1103515625,
"learning_rate": 7.111934925471302e-05,
"loss": 0.9427,
"step": 274
},
{
"epoch": 0.6,
"grad_norm": 0.1279296875,
"learning_rate": 7.04441821034316e-05,
"loss": 0.8454,
"step": 275
},
{
"epoch": 0.61,
"grad_norm": 0.115234375,
"learning_rate": 6.97704880137275e-05,
"loss": 0.8044,
"step": 276
},
{
"epoch": 0.61,
"grad_norm": 0.12451171875,
"learning_rate": 6.909830056250527e-05,
"loss": 0.8196,
"step": 277
},
{
"epoch": 0.61,
"grad_norm": 0.13671875,
"learning_rate": 6.842765325157874e-05,
"loss": 0.8221,
"step": 278
},
{
"epoch": 0.61,
"grad_norm": 0.119140625,
"learning_rate": 6.775857950600106e-05,
"loss": 0.8273,
"step": 279
},
{
"epoch": 0.62,
"grad_norm": 0.0966796875,
"learning_rate": 6.7091112672399e-05,
"loss": 0.9296,
"step": 280
},
{
"epoch": 0.62,
"grad_norm": 0.103515625,
"learning_rate": 6.642528601731082e-05,
"loss": 0.8635,
"step": 281
},
{
"epoch": 0.62,
"grad_norm": 0.10693359375,
"learning_rate": 6.576113272552826e-05,
"loss": 0.8622,
"step": 282
},
{
"epoch": 0.62,
"grad_norm": 0.10693359375,
"learning_rate": 6.509868589844273e-05,
"loss": 0.8982,
"step": 283
},
{
"epoch": 0.62,
"grad_norm": 0.1220703125,
"learning_rate": 6.44379785523954e-05,
"loss": 0.8564,
"step": 284
},
{
"epoch": 0.63,
"grad_norm": 0.1923828125,
"learning_rate": 6.377904361703178e-05,
"loss": 0.7286,
"step": 285
},
{
"epoch": 0.63,
"grad_norm": 0.1005859375,
"learning_rate": 6.312191393366035e-05,
"loss": 1.0057,
"step": 286
},
{
"epoch": 0.63,
"grad_norm": 0.12158203125,
"learning_rate": 6.246662225361602e-05,
"loss": 0.7258,
"step": 287
},
{
"epoch": 0.63,
"grad_norm": 0.099609375,
"learning_rate": 6.181320123662755e-05,
"loss": 0.8115,
"step": 288
},
{
"epoch": 0.63,
"grad_norm": 0.1298828125,
"learning_rate": 6.116168344918983e-05,
"loss": 0.7434,
"step": 289
},
{
"epoch": 0.64,
"grad_norm": 0.10009765625,
"learning_rate": 6.051210136294089e-05,
"loss": 0.875,
"step": 290
},
{
"epoch": 0.64,
"grad_norm": 0.10791015625,
"learning_rate": 5.986448735304339e-05,
"loss": 0.9115,
"step": 291
},
{
"epoch": 0.64,
"grad_norm": 0.1064453125,
"learning_rate": 5.921887369657113e-05,
"loss": 0.8492,
"step": 292
},
{
"epoch": 0.64,
"grad_norm": 0.126953125,
"learning_rate": 5.857529257090027e-05,
"loss": 0.7701,
"step": 293
},
{
"epoch": 0.65,
"grad_norm": 0.11083984375,
"learning_rate": 5.7933776052105745e-05,
"loss": 0.9333,
"step": 294
},
{
"epoch": 0.65,
"grad_norm": 0.1279296875,
"learning_rate": 5.729435611336239e-05,
"loss": 0.7718,
"step": 295
},
{
"epoch": 0.65,
"grad_norm": 0.123046875,
"learning_rate": 5.6657064623351676e-05,
"loss": 0.8522,
"step": 296
},
{
"epoch": 0.65,
"grad_norm": 0.1982421875,
"learning_rate": 5.602193334467307e-05,
"loss": 0.7019,
"step": 297
},
{
"epoch": 0.65,
"grad_norm": 0.11865234375,
"learning_rate": 5.5388993932261215e-05,
"loss": 0.8389,
"step": 298
},
{
"epoch": 0.66,
"grad_norm": 0.1259765625,
"learning_rate": 5.4758277931808075e-05,
"loss": 0.863,
"step": 299
},
{
"epoch": 0.66,
"grad_norm": 0.115234375,
"learning_rate": 5.4129816778190936e-05,
"loss": 0.7688,
"step": 300
},
{
"epoch": 0.66,
"grad_norm": 0.11328125,
"learning_rate": 5.350364179390548e-05,
"loss": 0.8084,
"step": 301
},
{
"epoch": 0.66,
"grad_norm": 0.1005859375,
"learning_rate": 5.28797841875047e-05,
"loss": 0.9221,
"step": 302
},
{
"epoch": 0.67,
"grad_norm": 0.12255859375,
"learning_rate": 5.2258275052043546e-05,
"loss": 0.8468,
"step": 303
},
{
"epoch": 0.67,
"grad_norm": 0.1181640625,
"learning_rate": 5.163914536352918e-05,
"loss": 0.918,
"step": 304
},
{
"epoch": 0.67,
"grad_norm": 0.11572265625,
"learning_rate": 5.102242597937717e-05,
"loss": 0.8958,
"step": 305
},
{
"epoch": 0.67,
"grad_norm": 0.126953125,
"learning_rate": 5.040814763687358e-05,
"loss": 0.8718,
"step": 306
},
{
"epoch": 0.67,
"grad_norm": 0.11376953125,
"learning_rate": 4.9796340951642986e-05,
"loss": 0.8603,
"step": 307
},
{
"epoch": 0.68,
"grad_norm": 0.11669921875,
"learning_rate": 4.918703641612255e-05,
"loss": 0.8245,
"step": 308
},
{
"epoch": 0.68,
"grad_norm": 0.12890625,
"learning_rate": 4.858026439804235e-05,
"loss": 0.8881,
"step": 309
},
{
"epoch": 0.68,
"grad_norm": 0.10791015625,
"learning_rate": 4.797605513891179e-05,
"loss": 0.921,
"step": 310
},
{
"epoch": 0.68,
"grad_norm": 0.232421875,
"learning_rate": 4.737443875251251e-05,
"loss": 0.8585,
"step": 311
},
{
"epoch": 0.69,
"grad_norm": 0.11474609375,
"learning_rate": 4.6775445223397306e-05,
"loss": 0.8024,
"step": 312
},
{
"epoch": 0.69,
"grad_norm": 0.1767578125,
"learning_rate": 4.61791044053958e-05,
"loss": 0.7499,
"step": 313
},
{
"epoch": 0.69,
"grad_norm": 0.130859375,
"learning_rate": 4.558544602012663e-05,
"loss": 0.8993,
"step": 314
},
{
"epoch": 0.69,
"grad_norm": 0.115234375,
"learning_rate": 4.4994499655515865e-05,
"loss": 0.8938,
"step": 315
},
{
"epoch": 0.69,
"grad_norm": 0.11083984375,
"learning_rate": 4.440629476432267e-05,
"loss": 0.8357,
"step": 316
},
{
"epoch": 0.7,
"grad_norm": 0.1328125,
"learning_rate": 4.3820860662671107e-05,
"loss": 0.719,
"step": 317
},
{
"epoch": 0.7,
"grad_norm": 0.1015625,
"learning_rate": 4.323822652858911e-05,
"loss": 0.8492,
"step": 318
},
{
"epoch": 0.7,
"grad_norm": 0.126953125,
"learning_rate": 4.265842140055428e-05,
"loss": 0.7805,
"step": 319
},
{
"epoch": 0.7,
"grad_norm": 0.11376953125,
"learning_rate": 4.2081474176046646e-05,
"loss": 0.814,
"step": 320
},
{
"epoch": 0.71,
"grad_norm": 0.1103515625,
"learning_rate": 4.150741361010837e-05,
"loss": 0.8448,
"step": 321
},
{
"epoch": 0.71,
"grad_norm": 0.1142578125,
"learning_rate": 4.093626831391051e-05,
"loss": 0.8212,
"step": 322
},
{
"epoch": 0.71,
"grad_norm": 0.1435546875,
"learning_rate": 4.036806675332715e-05,
"loss": 0.8006,
"step": 323
},
{
"epoch": 0.71,
"grad_norm": 0.111328125,
"learning_rate": 3.98028372475166e-05,
"loss": 0.8723,
"step": 324
},
{
"epoch": 0.71,
"grad_norm": 0.130859375,
"learning_rate": 3.924060796751012e-05,
"loss": 0.7984,
"step": 325
},
{
"epoch": 0.72,
"grad_norm": 0.10302734375,
"learning_rate": 3.8681406934807585e-05,
"loss": 0.8581,
"step": 326
},
{
"epoch": 0.72,
"grad_norm": 0.12060546875,
"learning_rate": 3.8125262019981224e-05,
"loss": 0.8222,
"step": 327
},
{
"epoch": 0.72,
"grad_norm": 0.12255859375,
"learning_rate": 3.757220094128629e-05,
"loss": 0.8624,
"step": 328
},
{
"epoch": 0.72,
"grad_norm": 0.12353515625,
"learning_rate": 3.702225126327965e-05,
"loss": 0.7377,
"step": 329
},
{
"epoch": 0.72,
"grad_norm": 0.1044921875,
"learning_rate": 3.647544039544615e-05,
"loss": 0.8021,
"step": 330
},
{
"epoch": 0.73,
"grad_norm": 0.125,
"learning_rate": 3.5931795590832254e-05,
"loss": 0.8053,
"step": 331
},
{
"epoch": 0.73,
"grad_norm": 0.10546875,
"learning_rate": 3.53913439446879e-05,
"loss": 1.0031,
"step": 332
},
{
"epoch": 0.73,
"grad_norm": 0.173828125,
"learning_rate": 3.485411239311604e-05,
"loss": 0.773,
"step": 333
},
{
"epoch": 0.73,
"grad_norm": 0.138671875,
"learning_rate": 3.432012771173021e-05,
"loss": 0.785,
"step": 334
},
{
"epoch": 0.74,
"grad_norm": 0.0986328125,
"learning_rate": 3.378941651431996e-05,
"loss": 0.7751,
"step": 335
},
{
"epoch": 0.74,
"grad_norm": 0.10595703125,
"learning_rate": 3.326200525152441e-05,
"loss": 0.7843,
"step": 336
},
{
"epoch": 0.74,
"grad_norm": 0.10302734375,
"learning_rate": 3.2737920209514e-05,
"loss": 0.8728,
"step": 337
},
{
"epoch": 0.74,
"grad_norm": 0.1279296875,
"learning_rate": 3.2217187508680315e-05,
"loss": 0.7532,
"step": 338
},
{
"epoch": 0.74,
"grad_norm": 0.1328125,
"learning_rate": 3.1699833102334395e-05,
"loss": 0.8074,
"step": 339
},
{
"epoch": 0.75,
"grad_norm": 0.0927734375,
"learning_rate": 3.118588277541312e-05,
"loss": 1.0021,
"step": 340
},
{
"epoch": 0.75,
"grad_norm": 0.1044921875,
"learning_rate": 3.067536214319402e-05,
"loss": 0.8352,
"step": 341
},
{
"epoch": 0.75,
"grad_norm": 0.11767578125,
"learning_rate": 3.0168296650018735e-05,
"loss": 0.824,
"step": 342
},
{
"epoch": 0.75,
"eval_loss": 0.8436947464942932,
"eval_runtime": 422.1114,
"eval_samples_per_second": 6.956,
"eval_steps_per_second": 3.478,
"step": 342
},
{
"epoch": 0.75,
"grad_norm": 0.138671875,
"learning_rate": 2.966471156802477e-05,
"loss": 0.9148,
"step": 343
},
{
"epoch": 0.76,
"grad_norm": 0.12353515625,
"learning_rate": 2.9164631995886093e-05,
"loss": 0.9928,
"step": 344
},
{
"epoch": 0.76,
"grad_norm": 0.1162109375,
"learning_rate": 2.8668082857562005e-05,
"loss": 0.8124,
"step": 345
},
{
"epoch": 0.76,
"grad_norm": 0.10107421875,
"learning_rate": 2.8175088901055026e-05,
"loss": 0.8412,
"step": 346
},
{
"epoch": 0.76,
"grad_norm": 0.10986328125,
"learning_rate": 2.7685674697177566e-05,
"loss": 0.7348,
"step": 347
},
{
"epoch": 0.76,
"grad_norm": 0.103515625,
"learning_rate": 2.7199864638327077e-05,
"loss": 0.7976,
"step": 348
},
{
"epoch": 0.77,
"grad_norm": 0.126953125,
"learning_rate": 2.6717682937270604e-05,
"loss": 0.7628,
"step": 349
},
{
"epoch": 0.77,
"grad_norm": 0.10693359375,
"learning_rate": 2.6239153625937784e-05,
"loss": 0.8783,
"step": 350
},
{
"epoch": 0.77,
"grad_norm": 0.1337890625,
"learning_rate": 2.5764300554223242e-05,
"loss": 0.6852,
"step": 351
},
{
"epoch": 0.77,
"grad_norm": 0.1181640625,
"learning_rate": 2.529314738879781e-05,
"loss": 0.8181,
"step": 352
},
{
"epoch": 0.78,
"grad_norm": 0.10693359375,
"learning_rate": 2.4825717611929146e-05,
"loss": 0.8616,
"step": 353
},
{
"epoch": 0.78,
"grad_norm": 0.119140625,
"learning_rate": 2.4362034520311216e-05,
"loss": 0.8029,
"step": 354
},
{
"epoch": 0.78,
"grad_norm": 0.126953125,
"learning_rate": 2.390212122390323e-05,
"loss": 0.8134,
"step": 355
},
{
"epoch": 0.78,
"grad_norm": 0.1298828125,
"learning_rate": 2.3446000644777853e-05,
"loss": 0.7766,
"step": 356
},
{
"epoch": 0.78,
"grad_norm": 0.107421875,
"learning_rate": 2.2993695515978762e-05,
"loss": 0.8786,
"step": 357
},
{
"epoch": 0.79,
"grad_norm": 0.1181640625,
"learning_rate": 2.2545228380387706e-05,
"loss": 0.8264,
"step": 358
},
{
"epoch": 0.79,
"grad_norm": 0.10986328125,
"learning_rate": 2.210062158960081e-05,
"loss": 0.8366,
"step": 359
},
{
"epoch": 0.79,
"grad_norm": 0.1025390625,
"learning_rate": 2.1659897302814747e-05,
"loss": 0.8282,
"step": 360
},
{
"epoch": 0.79,
"grad_norm": 0.10009765625,
"learning_rate": 2.1223077485722198e-05,
"loss": 0.9779,
"step": 361
},
{
"epoch": 0.8,
"grad_norm": 0.11181640625,
"learning_rate": 2.0790183909417093e-05,
"loss": 0.8431,
"step": 362
},
{
"epoch": 0.8,
"grad_norm": 0.12451171875,
"learning_rate": 2.036123814930967e-05,
"loss": 0.8402,
"step": 363
},
{
"epoch": 0.8,
"grad_norm": 0.10302734375,
"learning_rate": 1.9936261584050973e-05,
"loss": 0.9097,
"step": 364
},
{
"epoch": 0.8,
"grad_norm": 0.1083984375,
"learning_rate": 1.9515275394467446e-05,
"loss": 0.7467,
"step": 365
},
{
"epoch": 0.8,
"grad_norm": 0.11376953125,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.9132,
"step": 366
},
{
"epoch": 0.81,
"grad_norm": 0.09765625,
"learning_rate": 1.8685357870184605e-05,
"loss": 0.9088,
"step": 367
},
{
"epoch": 0.81,
"grad_norm": 0.09130859375,
"learning_rate": 1.8276467898563887e-05,
"loss": 0.8388,
"step": 368
},
{
"epoch": 0.81,
"grad_norm": 0.09228515625,
"learning_rate": 1.7871651026713908e-05,
"loss": 0.703,
"step": 369
},
{
"epoch": 0.81,
"grad_norm": 0.1435546875,
"learning_rate": 1.7470927430702277e-05,
"loss": 0.8362,
"step": 370
},
{
"epoch": 0.81,
"grad_norm": 0.09375,
"learning_rate": 1.7074317082587755e-05,
"loss": 0.7667,
"step": 371
},
{
"epoch": 0.82,
"grad_norm": 0.185546875,
"learning_rate": 1.668183974942491e-05,
"loss": 0.8881,
"step": 372
},
{
"epoch": 0.82,
"grad_norm": 0.12158203125,
"learning_rate": 1.6293514992278936e-05,
"loss": 0.8335,
"step": 373
},
{
"epoch": 0.82,
"grad_norm": 0.12353515625,
"learning_rate": 1.590936216525061e-05,
"loss": 0.8181,
"step": 374
},
{
"epoch": 0.82,
"grad_norm": 0.1015625,
"learning_rate": 1.5529400414511806e-05,
"loss": 0.7918,
"step": 375
},
{
"epoch": 0.83,
"grad_norm": 0.12109375,
"learning_rate": 1.5153648677351195e-05,
"loss": 0.8674,
"step": 376
},
{
"epoch": 0.83,
"grad_norm": 0.1123046875,
"learning_rate": 1.4782125681230497e-05,
"loss": 0.7134,
"step": 377
},
{
"epoch": 0.83,
"grad_norm": 0.09228515625,
"learning_rate": 1.4414849942850927e-05,
"loss": 0.8532,
"step": 378
},
{
"epoch": 0.83,
"grad_norm": 0.1025390625,
"learning_rate": 1.4051839767230478e-05,
"loss": 0.7936,
"step": 379
},
{
"epoch": 0.83,
"grad_norm": 0.1025390625,
"learning_rate": 1.3693113246791589e-05,
"loss": 0.7971,
"step": 380
},
{
"epoch": 0.84,
"grad_norm": 0.123046875,
"learning_rate": 1.333868826045932e-05,
"loss": 0.7371,
"step": 381
},
{
"epoch": 0.84,
"grad_norm": 0.11962890625,
"learning_rate": 1.2988582472770373e-05,
"loss": 0.7324,
"step": 382
},
{
"epoch": 0.84,
"grad_norm": 0.10888671875,
"learning_rate": 1.2642813332992608e-05,
"loss": 0.7527,
"step": 383
},
{
"epoch": 0.84,
"grad_norm": 0.095703125,
"learning_rate": 1.2301398074255443e-05,
"loss": 0.8041,
"step": 384
},
{
"epoch": 0.85,
"grad_norm": 0.09814453125,
"learning_rate": 1.196435371269089e-05,
"loss": 0.9232,
"step": 385
},
{
"epoch": 0.85,
"grad_norm": 0.0947265625,
"learning_rate": 1.163169704658551e-05,
"loss": 0.875,
"step": 386
},
{
"epoch": 0.85,
"grad_norm": 0.11181640625,
"learning_rate": 1.1303444655543206e-05,
"loss": 0.9133,
"step": 387
},
{
"epoch": 0.85,
"grad_norm": 0.1064453125,
"learning_rate": 1.0979612899658876e-05,
"loss": 0.7921,
"step": 388
},
{
"epoch": 0.85,
"grad_norm": 0.1064453125,
"learning_rate": 1.0660217918702965e-05,
"loss": 0.7747,
"step": 389
},
{
"epoch": 0.86,
"grad_norm": 0.1171875,
"learning_rate": 1.0345275631317163e-05,
"loss": 0.8552,
"step": 390
},
{
"epoch": 0.86,
"grad_norm": 0.1005859375,
"learning_rate": 1.0034801734220922e-05,
"loss": 0.8868,
"step": 391
},
{
"epoch": 0.86,
"grad_norm": 0.09716796875,
"learning_rate": 9.728811701429241e-06,
"loss": 0.8623,
"step": 392
},
{
"epoch": 0.86,
"grad_norm": 0.123046875,
"learning_rate": 9.427320783481353e-06,
"loss": 0.8697,
"step": 393
},
{
"epoch": 0.87,
"grad_norm": 0.1279296875,
"learning_rate": 9.130344006680657e-06,
"loss": 0.749,
"step": 394
},
{
"epoch": 0.87,
"grad_norm": 0.087890625,
"learning_rate": 8.837896172345827e-06,
"loss": 0.8668,
"step": 395
},
{
"epoch": 0.87,
"grad_norm": 0.10009765625,
"learning_rate": 8.549991856073069e-06,
"loss": 0.9327,
"step": 396
},
{
"epoch": 0.87,
"grad_norm": 0.111328125,
"learning_rate": 8.266645407009788e-06,
"loss": 0.8025,
"step": 397
},
{
"epoch": 0.87,
"grad_norm": 0.10888671875,
"learning_rate": 7.987870947139275e-06,
"loss": 0.8903,
"step": 398
},
{
"epoch": 0.88,
"grad_norm": 0.11279296875,
"learning_rate": 7.713682370576946e-06,
"loss": 0.8081,
"step": 399
},
{
"epoch": 0.88,
"grad_norm": 0.12158203125,
"learning_rate": 7.4440933428779e-06,
"loss": 0.7926,
"step": 400
},
{
"epoch": 0.88,
"grad_norm": 0.10498046875,
"learning_rate": 7.17911730035572e-06,
"loss": 0.863,
"step": 401
},
{
"epoch": 0.88,
"grad_norm": 0.10400390625,
"learning_rate": 6.9187674494129325e-06,
"loss": 0.7536,
"step": 402
},
{
"epoch": 0.89,
"grad_norm": 0.11328125,
"learning_rate": 6.663056765882692e-06,
"loss": 0.7939,
"step": 403
},
{
"epoch": 0.89,
"grad_norm": 0.10693359375,
"learning_rate": 6.4119979943821015e-06,
"loss": 0.7258,
"step": 404
},
{
"epoch": 0.89,
"grad_norm": 0.11083984375,
"learning_rate": 6.165603647677054e-06,
"loss": 0.7775,
"step": 405
},
{
"epoch": 0.89,
"grad_norm": 0.09716796875,
"learning_rate": 5.923886006058565e-06,
"loss": 0.8807,
"step": 406
},
{
"epoch": 0.89,
"grad_norm": 0.09765625,
"learning_rate": 5.6868571167307595e-06,
"loss": 0.911,
"step": 407
},
{
"epoch": 0.9,
"grad_norm": 0.103515625,
"learning_rate": 5.454528793210356e-06,
"loss": 0.8141,
"step": 408
},
{
"epoch": 0.9,
"grad_norm": 0.10400390625,
"learning_rate": 5.2269126147379555e-06,
"loss": 0.8843,
"step": 409
},
{
"epoch": 0.9,
"grad_norm": 0.10107421875,
"learning_rate": 5.00401992570092e-06,
"loss": 0.7764,
"step": 410
},
{
"epoch": 0.9,
"grad_norm": 0.109375,
"learning_rate": 4.785861835067962e-06,
"loss": 0.7584,
"step": 411
},
{
"epoch": 0.9,
"grad_norm": 0.10595703125,
"learning_rate": 4.5724492158354396e-06,
"loss": 0.8061,
"step": 412
},
{
"epoch": 0.91,
"grad_norm": 0.11669921875,
"learning_rate": 4.3637927044855475e-06,
"loss": 0.8835,
"step": 413
},
{
"epoch": 0.91,
"grad_norm": 0.1123046875,
"learning_rate": 4.159902700456053e-06,
"loss": 0.8228,
"step": 414
},
{
"epoch": 0.91,
"grad_norm": 0.11376953125,
"learning_rate": 3.960789365622075e-06,
"loss": 0.7909,
"step": 415
},
{
"epoch": 0.91,
"grad_norm": 0.10693359375,
"learning_rate": 3.766462623789646e-06,
"loss": 0.9526,
"step": 416
},
{
"epoch": 0.92,
"grad_norm": 0.08837890625,
"learning_rate": 3.57693216020103e-06,
"loss": 0.8374,
"step": 417
},
{
"epoch": 0.92,
"grad_norm": 0.10693359375,
"learning_rate": 3.3922074210520405e-06,
"loss": 0.8091,
"step": 418
},
{
"epoch": 0.92,
"grad_norm": 0.1083984375,
"learning_rate": 3.2122976130212646e-06,
"loss": 0.886,
"step": 419
},
{
"epoch": 0.92,
"grad_norm": 0.1025390625,
"learning_rate": 3.0372117028111825e-06,
"loss": 0.7662,
"step": 420
},
{
"epoch": 0.92,
"grad_norm": 0.1171875,
"learning_rate": 2.866958416701271e-06,
"loss": 0.9222,
"step": 421
},
{
"epoch": 0.93,
"grad_norm": 0.103515625,
"learning_rate": 2.7015462401130843e-06,
"loss": 0.7954,
"step": 422
},
{
"epoch": 0.93,
"grad_norm": 0.09423828125,
"learning_rate": 2.5409834171873482e-06,
"loss": 0.7532,
"step": 423
},
{
"epoch": 0.93,
"grad_norm": 0.11962890625,
"learning_rate": 2.3852779503730216e-06,
"loss": 0.8322,
"step": 424
},
{
"epoch": 0.93,
"grad_norm": 0.12890625,
"learning_rate": 2.2344376000285604e-06,
"loss": 0.7971,
"step": 425
},
{
"epoch": 0.94,
"grad_norm": 0.1279296875,
"learning_rate": 2.088469884035049e-06,
"loss": 0.8257,
"step": 426
},
{
"epoch": 0.94,
"grad_norm": 0.13671875,
"learning_rate": 1.9473820774215555e-06,
"loss": 0.7701,
"step": 427
},
{
"epoch": 0.94,
"grad_norm": 0.12109375,
"learning_rate": 1.8111812120024885e-06,
"loss": 0.8043,
"step": 428
},
{
"epoch": 0.94,
"grad_norm": 0.10595703125,
"learning_rate": 1.6798740760272103e-06,
"loss": 0.7984,
"step": 429
},
{
"epoch": 0.94,
"grad_norm": 0.11767578125,
"learning_rate": 1.553467213841664e-06,
"loss": 0.8012,
"step": 430
},
{
"epoch": 0.95,
"grad_norm": 0.10400390625,
"learning_rate": 1.4319669255622115e-06,
"loss": 0.8198,
"step": 431
},
{
"epoch": 0.95,
"grad_norm": 0.1083984375,
"learning_rate": 1.3153792667616183e-06,
"loss": 0.8186,
"step": 432
},
{
"epoch": 0.95,
"grad_norm": 0.09912109375,
"learning_rate": 1.2037100481672835e-06,
"loss": 0.722,
"step": 433
},
{
"epoch": 0.95,
"grad_norm": 0.12158203125,
"learning_rate": 1.0969648353715945e-06,
"loss": 0.8273,
"step": 434
},
{
"epoch": 0.96,
"grad_norm": 0.10791015625,
"learning_rate": 9.951489485545695e-07,
"loss": 0.8434,
"step": 435
},
{
"epoch": 0.96,
"grad_norm": 0.1123046875,
"learning_rate": 8.982674622186605e-07,
"loss": 0.8882,
"step": 436
},
{
"epoch": 0.96,
"grad_norm": 0.130859375,
"learning_rate": 8.063252049358982e-07,
"loss": 0.9377,
"step": 437
},
{
"epoch": 0.96,
"grad_norm": 0.0986328125,
"learning_rate": 7.193267591071529e-07,
"loss": 0.8761,
"step": 438
},
{
"epoch": 0.96,
"grad_norm": 0.10595703125,
"learning_rate": 6.372764607338599e-07,
"loss": 0.8163,
"step": 439
},
{
"epoch": 0.97,
"grad_norm": 0.1005859375,
"learning_rate": 5.60178399201805e-07,
"loss": 0.8317,
"step": 440
},
{
"epoch": 0.97,
"grad_norm": 0.11474609375,
"learning_rate": 4.880364170773533e-07,
"loss": 0.8711,
"step": 441
},
{
"epoch": 0.97,
"grad_norm": 0.1005859375,
"learning_rate": 4.208541099159691e-07,
"loss": 0.7127,
"step": 442
},
{
"epoch": 0.97,
"grad_norm": 0.125,
"learning_rate": 3.586348260829486e-07,
"loss": 0.8064,
"step": 443
},
{
"epoch": 0.98,
"grad_norm": 0.10498046875,
"learning_rate": 3.013816665865976e-07,
"loss": 0.9065,
"step": 444
},
{
"epoch": 0.98,
"grad_norm": 0.11328125,
"learning_rate": 2.490974849236216e-07,
"loss": 0.7879,
"step": 445
},
{
"epoch": 0.98,
"grad_norm": 0.10498046875,
"learning_rate": 2.0178488693695096e-07,
"loss": 0.7726,
"step": 446
},
{
"epoch": 0.98,
"grad_norm": 0.11376953125,
"learning_rate": 1.5944623068586685e-07,
"loss": 0.8243,
"step": 447
},
{
"epoch": 0.98,
"grad_norm": 0.107421875,
"learning_rate": 1.2208362632842862e-07,
"loss": 0.8225,
"step": 448
},
{
"epoch": 0.99,
"grad_norm": 0.10595703125,
"learning_rate": 8.969893601634694e-08,
"loss": 0.7913,
"step": 449
},
{
"epoch": 0.99,
"grad_norm": 0.11865234375,
"learning_rate": 6.229377380218005e-08,
"loss": 0.7748,
"step": 450
},
{
"epoch": 0.99,
"grad_norm": 0.111328125,
"learning_rate": 3.986950555883162e-08,
"loss": 0.8305,
"step": 451
},
{
"epoch": 0.99,
"grad_norm": 0.125,
"learning_rate": 2.242724891156067e-08,
"loss": 0.8547,
"step": 452
},
{
"epoch": 1.0,
"grad_norm": 0.11181640625,
"learning_rate": 9.96787318218173e-09,
"loss": 1.0104,
"step": 453
},
{
"epoch": 1.0,
"grad_norm": 0.11865234375,
"learning_rate": 2.4919993458549784e-09,
"loss": 0.8115,
"step": 454
},
{
"epoch": 1.0,
"grad_norm": 0.146484375,
"learning_rate": 0.0,
"loss": 0.8365,
"step": 455
}
],
"logging_steps": 1,
"max_steps": 455,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 1.3577390229395866e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}