Wiki_200 / trainer_state.json
CreatorPhan's picture
Upload folder using huggingface_hub (#7)
82150e1
raw
history blame contribute delete
No virus
72.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 17.142857142857142,
"eval_steps": 500,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 0.000999404761904762,
"loss": 3.0944,
"step": 1
},
{
"epoch": 0.06,
"learning_rate": 0.0009988095238095238,
"loss": 2.7802,
"step": 2
},
{
"epoch": 0.09,
"learning_rate": 0.0009982142857142857,
"loss": 2.7798,
"step": 3
},
{
"epoch": 0.11,
"learning_rate": 0.0009976190476190477,
"loss": 2.6729,
"step": 4
},
{
"epoch": 0.14,
"learning_rate": 0.0009970238095238096,
"loss": 2.7544,
"step": 5
},
{
"epoch": 0.17,
"learning_rate": 0.0009964285714285715,
"loss": 2.7115,
"step": 6
},
{
"epoch": 0.2,
"learning_rate": 0.0009958333333333334,
"loss": 2.7491,
"step": 7
},
{
"epoch": 0.23,
"learning_rate": 0.0009952380952380953,
"loss": 2.681,
"step": 8
},
{
"epoch": 0.26,
"learning_rate": 0.0009946428571428571,
"loss": 2.7396,
"step": 9
},
{
"epoch": 0.29,
"learning_rate": 0.000994047619047619,
"loss": 2.6911,
"step": 10
},
{
"epoch": 0.31,
"learning_rate": 0.0009934523809523809,
"loss": 2.6725,
"step": 11
},
{
"epoch": 0.34,
"learning_rate": 0.000992857142857143,
"loss": 2.6951,
"step": 12
},
{
"epoch": 0.37,
"learning_rate": 0.0009922619047619049,
"loss": 2.6741,
"step": 13
},
{
"epoch": 0.4,
"learning_rate": 0.0009916666666666667,
"loss": 2.6623,
"step": 14
},
{
"epoch": 0.43,
"learning_rate": 0.0009910714285714286,
"loss": 2.6268,
"step": 15
},
{
"epoch": 0.46,
"learning_rate": 0.0009904761904761905,
"loss": 2.6727,
"step": 16
},
{
"epoch": 0.49,
"learning_rate": 0.0009898809523809524,
"loss": 2.6174,
"step": 17
},
{
"epoch": 0.51,
"learning_rate": 0.0009892857142857142,
"loss": 2.6544,
"step": 18
},
{
"epoch": 0.54,
"learning_rate": 0.0009886904761904763,
"loss": 2.5404,
"step": 19
},
{
"epoch": 0.57,
"learning_rate": 0.0009880952380952382,
"loss": 2.6286,
"step": 20
},
{
"epoch": 0.6,
"learning_rate": 0.0009875,
"loss": 2.6824,
"step": 21
},
{
"epoch": 0.63,
"learning_rate": 0.000986904761904762,
"loss": 2.6012,
"step": 22
},
{
"epoch": 0.66,
"learning_rate": 0.0009863095238095239,
"loss": 2.6446,
"step": 23
},
{
"epoch": 0.69,
"learning_rate": 0.0009857142857142857,
"loss": 2.6437,
"step": 24
},
{
"epoch": 0.71,
"learning_rate": 0.0009851190476190476,
"loss": 2.6596,
"step": 25
},
{
"epoch": 0.74,
"learning_rate": 0.0009845238095238097,
"loss": 2.6554,
"step": 26
},
{
"epoch": 0.77,
"learning_rate": 0.0009839285714285714,
"loss": 2.6064,
"step": 27
},
{
"epoch": 0.8,
"learning_rate": 0.0009833333333333332,
"loss": 2.5994,
"step": 28
},
{
"epoch": 0.83,
"learning_rate": 0.0009827380952380951,
"loss": 2.545,
"step": 29
},
{
"epoch": 0.86,
"learning_rate": 0.0009821428571428572,
"loss": 2.5704,
"step": 30
},
{
"epoch": 0.89,
"learning_rate": 0.000981547619047619,
"loss": 2.6461,
"step": 31
},
{
"epoch": 0.91,
"learning_rate": 0.000980952380952381,
"loss": 2.631,
"step": 32
},
{
"epoch": 0.94,
"learning_rate": 0.0009803571428571428,
"loss": 2.6678,
"step": 33
},
{
"epoch": 0.97,
"learning_rate": 0.0009797619047619047,
"loss": 2.5964,
"step": 34
},
{
"epoch": 1.0,
"learning_rate": 0.0009791666666666666,
"loss": 2.6566,
"step": 35
},
{
"epoch": 1.03,
"learning_rate": 0.0009785714285714285,
"loss": 2.4962,
"step": 36
},
{
"epoch": 1.06,
"learning_rate": 0.0009779761904761906,
"loss": 2.4815,
"step": 37
},
{
"epoch": 1.09,
"learning_rate": 0.0009773809523809524,
"loss": 2.4172,
"step": 38
},
{
"epoch": 1.11,
"learning_rate": 0.0009767857142857143,
"loss": 2.4641,
"step": 39
},
{
"epoch": 1.14,
"learning_rate": 0.0009761904761904762,
"loss": 2.4875,
"step": 40
},
{
"epoch": 1.17,
"learning_rate": 0.0009755952380952381,
"loss": 2.4486,
"step": 41
},
{
"epoch": 1.2,
"learning_rate": 0.000975,
"loss": 2.4463,
"step": 42
},
{
"epoch": 1.23,
"learning_rate": 0.0009744047619047619,
"loss": 2.5063,
"step": 43
},
{
"epoch": 1.26,
"learning_rate": 0.0009738095238095238,
"loss": 2.4506,
"step": 44
},
{
"epoch": 1.29,
"learning_rate": 0.0009732142857142857,
"loss": 2.3862,
"step": 45
},
{
"epoch": 1.31,
"learning_rate": 0.0009726190476190476,
"loss": 2.4925,
"step": 46
},
{
"epoch": 1.34,
"learning_rate": 0.0009720238095238096,
"loss": 2.5,
"step": 47
},
{
"epoch": 1.37,
"learning_rate": 0.0009714285714285714,
"loss": 2.4658,
"step": 48
},
{
"epoch": 1.4,
"learning_rate": 0.0009708333333333333,
"loss": 2.3983,
"step": 49
},
{
"epoch": 1.43,
"learning_rate": 0.0009702380952380953,
"loss": 2.369,
"step": 50
},
{
"epoch": 1.46,
"learning_rate": 0.0009696428571428572,
"loss": 2.4849,
"step": 51
},
{
"epoch": 1.49,
"learning_rate": 0.0009690476190476191,
"loss": 2.4106,
"step": 52
},
{
"epoch": 1.51,
"learning_rate": 0.0009684523809523809,
"loss": 2.4363,
"step": 53
},
{
"epoch": 1.54,
"learning_rate": 0.0009678571428571429,
"loss": 2.4935,
"step": 54
},
{
"epoch": 1.57,
"learning_rate": 0.0009672619047619048,
"loss": 2.479,
"step": 55
},
{
"epoch": 1.6,
"learning_rate": 0.0009666666666666667,
"loss": 2.4299,
"step": 56
},
{
"epoch": 1.63,
"learning_rate": 0.0009660714285714285,
"loss": 2.3964,
"step": 57
},
{
"epoch": 1.66,
"learning_rate": 0.0009654761904761905,
"loss": 2.4865,
"step": 58
},
{
"epoch": 1.69,
"learning_rate": 0.0009648809523809524,
"loss": 2.3831,
"step": 59
},
{
"epoch": 1.71,
"learning_rate": 0.0009642857142857143,
"loss": 2.4304,
"step": 60
},
{
"epoch": 1.74,
"learning_rate": 0.0009636904761904763,
"loss": 2.4273,
"step": 61
},
{
"epoch": 1.77,
"learning_rate": 0.0009630952380952382,
"loss": 2.4427,
"step": 62
},
{
"epoch": 1.8,
"learning_rate": 0.0009625,
"loss": 2.4191,
"step": 63
},
{
"epoch": 1.83,
"learning_rate": 0.0009619047619047619,
"loss": 2.3902,
"step": 64
},
{
"epoch": 1.86,
"learning_rate": 0.0009613095238095239,
"loss": 2.4699,
"step": 65
},
{
"epoch": 1.89,
"learning_rate": 0.0009607142857142858,
"loss": 2.3978,
"step": 66
},
{
"epoch": 1.91,
"learning_rate": 0.0009601190476190476,
"loss": 2.4128,
"step": 67
},
{
"epoch": 1.94,
"learning_rate": 0.0009595238095238095,
"loss": 2.4585,
"step": 68
},
{
"epoch": 1.97,
"learning_rate": 0.0009589285714285715,
"loss": 2.4476,
"step": 69
},
{
"epoch": 2.0,
"learning_rate": 0.0009583333333333334,
"loss": 2.4231,
"step": 70
},
{
"epoch": 2.03,
"learning_rate": 0.0009577380952380953,
"loss": 2.2655,
"step": 71
},
{
"epoch": 2.06,
"learning_rate": 0.0009571428571428573,
"loss": 2.208,
"step": 72
},
{
"epoch": 2.09,
"learning_rate": 0.0009565476190476191,
"loss": 2.1758,
"step": 73
},
{
"epoch": 2.11,
"learning_rate": 0.000955952380952381,
"loss": 2.3113,
"step": 74
},
{
"epoch": 2.14,
"learning_rate": 0.0009553571428571429,
"loss": 2.1739,
"step": 75
},
{
"epoch": 2.17,
"learning_rate": 0.0009547619047619049,
"loss": 2.2234,
"step": 76
},
{
"epoch": 2.2,
"learning_rate": 0.0009541666666666667,
"loss": 2.21,
"step": 77
},
{
"epoch": 2.23,
"learning_rate": 0.0009535714285714286,
"loss": 2.2316,
"step": 78
},
{
"epoch": 2.26,
"learning_rate": 0.0009529761904761904,
"loss": 2.2044,
"step": 79
},
{
"epoch": 2.29,
"learning_rate": 0.0009523809523809524,
"loss": 2.1784,
"step": 80
},
{
"epoch": 2.31,
"learning_rate": 0.0009517857142857143,
"loss": 2.2489,
"step": 81
},
{
"epoch": 2.34,
"learning_rate": 0.0009511904761904761,
"loss": 2.2003,
"step": 82
},
{
"epoch": 2.37,
"learning_rate": 0.0009505952380952381,
"loss": 2.169,
"step": 83
},
{
"epoch": 2.4,
"learning_rate": 0.00095,
"loss": 2.2303,
"step": 84
},
{
"epoch": 2.43,
"learning_rate": 0.0009494047619047619,
"loss": 2.1744,
"step": 85
},
{
"epoch": 2.46,
"learning_rate": 0.0009488095238095238,
"loss": 2.1904,
"step": 86
},
{
"epoch": 2.49,
"learning_rate": 0.0009482142857142857,
"loss": 2.222,
"step": 87
},
{
"epoch": 2.51,
"learning_rate": 0.0009476190476190476,
"loss": 2.2467,
"step": 88
},
{
"epoch": 2.54,
"learning_rate": 0.0009470238095238095,
"loss": 2.2241,
"step": 89
},
{
"epoch": 2.57,
"learning_rate": 0.0009464285714285714,
"loss": 2.2339,
"step": 90
},
{
"epoch": 2.6,
"learning_rate": 0.0009458333333333334,
"loss": 2.2339,
"step": 91
},
{
"epoch": 2.63,
"learning_rate": 0.0009452380952380952,
"loss": 2.1653,
"step": 92
},
{
"epoch": 2.66,
"learning_rate": 0.0009446428571428571,
"loss": 2.2954,
"step": 93
},
{
"epoch": 2.69,
"learning_rate": 0.0009440476190476191,
"loss": 2.2859,
"step": 94
},
{
"epoch": 2.71,
"learning_rate": 0.000943452380952381,
"loss": 2.2107,
"step": 95
},
{
"epoch": 2.74,
"learning_rate": 0.0009428571428571429,
"loss": 2.2142,
"step": 96
},
{
"epoch": 2.77,
"learning_rate": 0.0009422619047619047,
"loss": 2.2433,
"step": 97
},
{
"epoch": 2.8,
"learning_rate": 0.0009416666666666667,
"loss": 2.2417,
"step": 98
},
{
"epoch": 2.83,
"learning_rate": 0.0009410714285714286,
"loss": 2.2045,
"step": 99
},
{
"epoch": 2.86,
"learning_rate": 0.0009404761904761905,
"loss": 2.293,
"step": 100
},
{
"epoch": 2.89,
"learning_rate": 0.0009398809523809523,
"loss": 2.2051,
"step": 101
},
{
"epoch": 2.91,
"learning_rate": 0.0009392857142857143,
"loss": 2.2889,
"step": 102
},
{
"epoch": 2.94,
"learning_rate": 0.0009386904761904762,
"loss": 2.2,
"step": 103
},
{
"epoch": 2.97,
"learning_rate": 0.0009380952380952381,
"loss": 2.2298,
"step": 104
},
{
"epoch": 3.0,
"learning_rate": 0.0009375,
"loss": 2.2722,
"step": 105
},
{
"epoch": 3.03,
"learning_rate": 0.000936904761904762,
"loss": 2.0032,
"step": 106
},
{
"epoch": 3.06,
"learning_rate": 0.0009363095238095238,
"loss": 1.9269,
"step": 107
},
{
"epoch": 3.09,
"learning_rate": 0.0009357142857142857,
"loss": 1.916,
"step": 108
},
{
"epoch": 3.11,
"learning_rate": 0.0009351190476190477,
"loss": 1.9165,
"step": 109
},
{
"epoch": 3.14,
"learning_rate": 0.0009345238095238096,
"loss": 1.9296,
"step": 110
},
{
"epoch": 3.17,
"learning_rate": 0.0009339285714285714,
"loss": 1.954,
"step": 111
},
{
"epoch": 3.2,
"learning_rate": 0.0009333333333333333,
"loss": 1.9457,
"step": 112
},
{
"epoch": 3.23,
"learning_rate": 0.0009327380952380953,
"loss": 1.9135,
"step": 113
},
{
"epoch": 3.26,
"learning_rate": 0.0009321428571428572,
"loss": 1.9383,
"step": 114
},
{
"epoch": 3.29,
"learning_rate": 0.0009315476190476191,
"loss": 1.9057,
"step": 115
},
{
"epoch": 3.31,
"learning_rate": 0.0009309523809523809,
"loss": 1.9541,
"step": 116
},
{
"epoch": 3.34,
"learning_rate": 0.0009303571428571429,
"loss": 1.8827,
"step": 117
},
{
"epoch": 3.37,
"learning_rate": 0.0009297619047619048,
"loss": 1.899,
"step": 118
},
{
"epoch": 3.4,
"learning_rate": 0.0009291666666666667,
"loss": 1.9095,
"step": 119
},
{
"epoch": 3.43,
"learning_rate": 0.0009285714285714287,
"loss": 1.936,
"step": 120
},
{
"epoch": 3.46,
"learning_rate": 0.0009279761904761905,
"loss": 1.9224,
"step": 121
},
{
"epoch": 3.49,
"learning_rate": 0.0009273809523809524,
"loss": 1.9547,
"step": 122
},
{
"epoch": 3.51,
"learning_rate": 0.0009267857142857143,
"loss": 1.9323,
"step": 123
},
{
"epoch": 3.54,
"learning_rate": 0.0009261904761904763,
"loss": 1.9938,
"step": 124
},
{
"epoch": 3.57,
"learning_rate": 0.0009255952380952382,
"loss": 1.8674,
"step": 125
},
{
"epoch": 3.6,
"learning_rate": 0.000925,
"loss": 1.95,
"step": 126
},
{
"epoch": 3.63,
"learning_rate": 0.0009244047619047619,
"loss": 1.9374,
"step": 127
},
{
"epoch": 3.66,
"learning_rate": 0.0009238095238095239,
"loss": 1.9456,
"step": 128
},
{
"epoch": 3.69,
"learning_rate": 0.0009232142857142858,
"loss": 1.9791,
"step": 129
},
{
"epoch": 3.71,
"learning_rate": 0.0009226190476190477,
"loss": 2.0047,
"step": 130
},
{
"epoch": 3.74,
"learning_rate": 0.0009220238095238096,
"loss": 1.9971,
"step": 131
},
{
"epoch": 3.77,
"learning_rate": 0.0009214285714285714,
"loss": 2.0267,
"step": 132
},
{
"epoch": 3.8,
"learning_rate": 0.0009208333333333333,
"loss": 1.9374,
"step": 133
},
{
"epoch": 3.83,
"learning_rate": 0.0009202380952380952,
"loss": 1.9793,
"step": 134
},
{
"epoch": 3.86,
"learning_rate": 0.0009196428571428572,
"loss": 2.0483,
"step": 135
},
{
"epoch": 3.89,
"learning_rate": 0.000919047619047619,
"loss": 2.0003,
"step": 136
},
{
"epoch": 3.91,
"learning_rate": 0.0009184523809523809,
"loss": 2.0185,
"step": 137
},
{
"epoch": 3.94,
"learning_rate": 0.0009178571428571428,
"loss": 2.0517,
"step": 138
},
{
"epoch": 3.97,
"learning_rate": 0.0009172619047619048,
"loss": 1.9824,
"step": 139
},
{
"epoch": 4.0,
"learning_rate": 0.0009166666666666666,
"loss": 2.0383,
"step": 140
},
{
"epoch": 4.03,
"learning_rate": 0.0009160714285714285,
"loss": 1.6818,
"step": 141
},
{
"epoch": 4.06,
"learning_rate": 0.0009154761904761905,
"loss": 1.6208,
"step": 142
},
{
"epoch": 4.09,
"learning_rate": 0.0009148809523809524,
"loss": 1.6843,
"step": 143
},
{
"epoch": 4.11,
"learning_rate": 0.0009142857142857143,
"loss": 1.5885,
"step": 144
},
{
"epoch": 4.14,
"learning_rate": 0.0009136904761904761,
"loss": 1.5799,
"step": 145
},
{
"epoch": 4.17,
"learning_rate": 0.0009130952380952381,
"loss": 1.6334,
"step": 146
},
{
"epoch": 4.2,
"learning_rate": 0.0009125,
"loss": 1.6297,
"step": 147
},
{
"epoch": 4.23,
"learning_rate": 0.0009119047619047619,
"loss": 1.5929,
"step": 148
},
{
"epoch": 4.26,
"learning_rate": 0.0009113095238095238,
"loss": 1.6621,
"step": 149
},
{
"epoch": 4.29,
"learning_rate": 0.0009107142857142857,
"loss": 1.626,
"step": 150
},
{
"epoch": 4.31,
"learning_rate": 0.0009101190476190476,
"loss": 1.6138,
"step": 151
},
{
"epoch": 4.34,
"learning_rate": 0.0009095238095238095,
"loss": 1.6465,
"step": 152
},
{
"epoch": 4.37,
"learning_rate": 0.0009089285714285715,
"loss": 1.6622,
"step": 153
},
{
"epoch": 4.4,
"learning_rate": 0.0009083333333333334,
"loss": 1.6662,
"step": 154
},
{
"epoch": 4.43,
"learning_rate": 0.0009077380952380952,
"loss": 1.6348,
"step": 155
},
{
"epoch": 4.46,
"learning_rate": 0.0009071428571428571,
"loss": 1.6196,
"step": 156
},
{
"epoch": 4.49,
"learning_rate": 0.0009065476190476191,
"loss": 1.6766,
"step": 157
},
{
"epoch": 4.51,
"learning_rate": 0.000905952380952381,
"loss": 1.7069,
"step": 158
},
{
"epoch": 4.54,
"learning_rate": 0.0009053571428571429,
"loss": 1.6848,
"step": 159
},
{
"epoch": 4.57,
"learning_rate": 0.0009047619047619047,
"loss": 1.6884,
"step": 160
},
{
"epoch": 4.6,
"learning_rate": 0.0009041666666666667,
"loss": 1.6721,
"step": 161
},
{
"epoch": 4.63,
"learning_rate": 0.0009035714285714286,
"loss": 1.7116,
"step": 162
},
{
"epoch": 4.66,
"learning_rate": 0.0009029761904761905,
"loss": 1.693,
"step": 163
},
{
"epoch": 4.69,
"learning_rate": 0.0009023809523809525,
"loss": 1.6826,
"step": 164
},
{
"epoch": 4.71,
"learning_rate": 0.0009017857142857143,
"loss": 1.7061,
"step": 165
},
{
"epoch": 4.74,
"learning_rate": 0.0009011904761904762,
"loss": 1.6964,
"step": 166
},
{
"epoch": 4.77,
"learning_rate": 0.0009005952380952381,
"loss": 1.7903,
"step": 167
},
{
"epoch": 4.8,
"learning_rate": 0.0009000000000000001,
"loss": 1.6829,
"step": 168
},
{
"epoch": 4.83,
"learning_rate": 0.000899404761904762,
"loss": 1.7047,
"step": 169
},
{
"epoch": 4.86,
"learning_rate": 0.0008988095238095238,
"loss": 1.7671,
"step": 170
},
{
"epoch": 4.89,
"learning_rate": 0.0008982142857142857,
"loss": 1.7184,
"step": 171
},
{
"epoch": 4.91,
"learning_rate": 0.0008976190476190477,
"loss": 1.8213,
"step": 172
},
{
"epoch": 4.94,
"learning_rate": 0.0008970238095238096,
"loss": 1.7688,
"step": 173
},
{
"epoch": 4.97,
"learning_rate": 0.0008964285714285715,
"loss": 1.7522,
"step": 174
},
{
"epoch": 5.0,
"learning_rate": 0.0008958333333333334,
"loss": 1.7862,
"step": 175
},
{
"epoch": 5.03,
"learning_rate": 0.0008952380952380953,
"loss": 1.3871,
"step": 176
},
{
"epoch": 5.06,
"learning_rate": 0.0008946428571428572,
"loss": 1.3491,
"step": 177
},
{
"epoch": 5.09,
"learning_rate": 0.0008940476190476191,
"loss": 1.3399,
"step": 178
},
{
"epoch": 5.11,
"learning_rate": 0.0008934523809523811,
"loss": 1.3569,
"step": 179
},
{
"epoch": 5.14,
"learning_rate": 0.0008928571428571429,
"loss": 1.3734,
"step": 180
},
{
"epoch": 5.17,
"learning_rate": 0.0008922619047619048,
"loss": 1.3151,
"step": 181
},
{
"epoch": 5.2,
"learning_rate": 0.0008916666666666667,
"loss": 1.3243,
"step": 182
},
{
"epoch": 5.23,
"learning_rate": 0.0008910714285714287,
"loss": 1.342,
"step": 183
},
{
"epoch": 5.26,
"learning_rate": 0.0008904761904761904,
"loss": 1.3664,
"step": 184
},
{
"epoch": 5.29,
"learning_rate": 0.0008898809523809523,
"loss": 1.3493,
"step": 185
},
{
"epoch": 5.31,
"learning_rate": 0.0008892857142857142,
"loss": 1.32,
"step": 186
},
{
"epoch": 5.34,
"learning_rate": 0.0008886904761904762,
"loss": 1.3978,
"step": 187
},
{
"epoch": 5.37,
"learning_rate": 0.0008880952380952381,
"loss": 1.3762,
"step": 188
},
{
"epoch": 5.4,
"learning_rate": 0.0008874999999999999,
"loss": 1.4172,
"step": 189
},
{
"epoch": 5.43,
"learning_rate": 0.0008869047619047619,
"loss": 1.3817,
"step": 190
},
{
"epoch": 5.46,
"learning_rate": 0.0008863095238095238,
"loss": 1.3779,
"step": 191
},
{
"epoch": 5.49,
"learning_rate": 0.0008857142857142857,
"loss": 1.378,
"step": 192
},
{
"epoch": 5.51,
"learning_rate": 0.0008851190476190476,
"loss": 1.4245,
"step": 193
},
{
"epoch": 5.54,
"learning_rate": 0.0008845238095238095,
"loss": 1.4425,
"step": 194
},
{
"epoch": 5.57,
"learning_rate": 0.0008839285714285714,
"loss": 1.4324,
"step": 195
},
{
"epoch": 5.6,
"learning_rate": 0.0008833333333333333,
"loss": 1.4264,
"step": 196
},
{
"epoch": 5.63,
"learning_rate": 0.0008827380952380952,
"loss": 1.4395,
"step": 197
},
{
"epoch": 5.66,
"learning_rate": 0.0008821428571428572,
"loss": 1.4549,
"step": 198
},
{
"epoch": 5.69,
"learning_rate": 0.000881547619047619,
"loss": 1.501,
"step": 199
},
{
"epoch": 5.71,
"learning_rate": 0.0008809523809523809,
"loss": 1.432,
"step": 200
},
{
"epoch": 5.74,
"learning_rate": 0.0008803571428571429,
"loss": 1.4922,
"step": 201
},
{
"epoch": 5.77,
"learning_rate": 0.0008797619047619048,
"loss": 1.4622,
"step": 202
},
{
"epoch": 5.8,
"learning_rate": 0.0008791666666666667,
"loss": 1.4794,
"step": 203
},
{
"epoch": 5.83,
"learning_rate": 0.0008785714285714285,
"loss": 1.4938,
"step": 204
},
{
"epoch": 5.86,
"learning_rate": 0.0008779761904761905,
"loss": 1.4792,
"step": 205
},
{
"epoch": 5.89,
"learning_rate": 0.0008773809523809524,
"loss": 1.5192,
"step": 206
},
{
"epoch": 5.91,
"learning_rate": 0.0008767857142857143,
"loss": 1.5055,
"step": 207
},
{
"epoch": 5.94,
"learning_rate": 0.0008761904761904762,
"loss": 1.5484,
"step": 208
},
{
"epoch": 5.97,
"learning_rate": 0.0008755952380952381,
"loss": 1.5096,
"step": 209
},
{
"epoch": 6.0,
"learning_rate": 0.000875,
"loss": 1.5298,
"step": 210
},
{
"epoch": 6.03,
"learning_rate": 0.0008744047619047619,
"loss": 1.1704,
"step": 211
},
{
"epoch": 6.06,
"learning_rate": 0.0008738095238095239,
"loss": 1.1261,
"step": 212
},
{
"epoch": 6.09,
"learning_rate": 0.0008732142857142858,
"loss": 1.1144,
"step": 213
},
{
"epoch": 6.11,
"learning_rate": 0.0008726190476190476,
"loss": 1.0984,
"step": 214
},
{
"epoch": 6.14,
"learning_rate": 0.0008720238095238095,
"loss": 1.0704,
"step": 215
},
{
"epoch": 6.17,
"learning_rate": 0.0008714285714285715,
"loss": 1.0655,
"step": 216
},
{
"epoch": 6.2,
"learning_rate": 0.0008708333333333334,
"loss": 1.09,
"step": 217
},
{
"epoch": 6.23,
"learning_rate": 0.0008702380952380953,
"loss": 1.0619,
"step": 218
},
{
"epoch": 6.26,
"learning_rate": 0.0008696428571428571,
"loss": 1.1633,
"step": 219
},
{
"epoch": 6.29,
"learning_rate": 0.0008690476190476191,
"loss": 1.1022,
"step": 220
},
{
"epoch": 6.31,
"learning_rate": 0.000868452380952381,
"loss": 1.1057,
"step": 221
},
{
"epoch": 6.34,
"learning_rate": 0.0008678571428571429,
"loss": 1.1279,
"step": 222
},
{
"epoch": 6.37,
"learning_rate": 0.0008672619047619049,
"loss": 1.0915,
"step": 223
},
{
"epoch": 6.4,
"learning_rate": 0.0008666666666666667,
"loss": 1.1731,
"step": 224
},
{
"epoch": 6.43,
"learning_rate": 0.0008660714285714286,
"loss": 1.1352,
"step": 225
},
{
"epoch": 6.46,
"learning_rate": 0.0008654761904761905,
"loss": 1.1632,
"step": 226
},
{
"epoch": 6.49,
"learning_rate": 0.0008648809523809525,
"loss": 1.1691,
"step": 227
},
{
"epoch": 6.51,
"learning_rate": 0.0008642857142857144,
"loss": 1.181,
"step": 228
},
{
"epoch": 6.54,
"learning_rate": 0.0008636904761904762,
"loss": 1.1635,
"step": 229
},
{
"epoch": 6.57,
"learning_rate": 0.0008630952380952381,
"loss": 1.1802,
"step": 230
},
{
"epoch": 6.6,
"learning_rate": 0.0008625000000000001,
"loss": 1.2111,
"step": 231
},
{
"epoch": 6.63,
"learning_rate": 0.000861904761904762,
"loss": 1.2503,
"step": 232
},
{
"epoch": 6.66,
"learning_rate": 0.0008613095238095238,
"loss": 1.2305,
"step": 233
},
{
"epoch": 6.69,
"learning_rate": 0.0008607142857142858,
"loss": 1.2446,
"step": 234
},
{
"epoch": 6.71,
"learning_rate": 0.0008601190476190477,
"loss": 1.263,
"step": 235
},
{
"epoch": 6.74,
"learning_rate": 0.0008595238095238096,
"loss": 1.2407,
"step": 236
},
{
"epoch": 6.77,
"learning_rate": 0.0008589285714285714,
"loss": 1.303,
"step": 237
},
{
"epoch": 6.8,
"learning_rate": 0.0008583333333333333,
"loss": 1.2309,
"step": 238
},
{
"epoch": 6.83,
"learning_rate": 0.0008577380952380952,
"loss": 1.2669,
"step": 239
},
{
"epoch": 6.86,
"learning_rate": 0.0008571428571428571,
"loss": 1.226,
"step": 240
},
{
"epoch": 6.89,
"learning_rate": 0.000856547619047619,
"loss": 1.2862,
"step": 241
},
{
"epoch": 6.91,
"learning_rate": 0.000855952380952381,
"loss": 1.2472,
"step": 242
},
{
"epoch": 6.94,
"learning_rate": 0.0008553571428571428,
"loss": 1.2928,
"step": 243
},
{
"epoch": 6.97,
"learning_rate": 0.0008547619047619047,
"loss": 1.2427,
"step": 244
},
{
"epoch": 7.0,
"learning_rate": 0.0008541666666666666,
"loss": 1.3195,
"step": 245
},
{
"epoch": 7.03,
"learning_rate": 0.0008535714285714286,
"loss": 0.8949,
"step": 246
},
{
"epoch": 7.06,
"learning_rate": 0.0008529761904761905,
"loss": 0.8907,
"step": 247
},
{
"epoch": 7.09,
"learning_rate": 0.0008523809523809523,
"loss": 0.8813,
"step": 248
},
{
"epoch": 7.11,
"learning_rate": 0.0008517857142857143,
"loss": 0.8702,
"step": 249
},
{
"epoch": 7.14,
"learning_rate": 0.0008511904761904762,
"loss": 0.9105,
"step": 250
},
{
"epoch": 7.17,
"learning_rate": 0.0008505952380952381,
"loss": 0.9096,
"step": 251
},
{
"epoch": 7.2,
"learning_rate": 0.00085,
"loss": 0.9121,
"step": 252
},
{
"epoch": 7.23,
"learning_rate": 0.0008494047619047619,
"loss": 0.9063,
"step": 253
},
{
"epoch": 7.26,
"learning_rate": 0.0008488095238095238,
"loss": 0.8976,
"step": 254
},
{
"epoch": 7.29,
"learning_rate": 0.0008482142857142857,
"loss": 0.9283,
"step": 255
},
{
"epoch": 7.31,
"learning_rate": 0.0008476190476190476,
"loss": 0.9409,
"step": 256
},
{
"epoch": 7.34,
"learning_rate": 0.0008470238095238096,
"loss": 0.9311,
"step": 257
},
{
"epoch": 7.37,
"learning_rate": 0.0008464285714285714,
"loss": 0.926,
"step": 258
},
{
"epoch": 7.4,
"learning_rate": 0.0008458333333333333,
"loss": 0.9704,
"step": 259
},
{
"epoch": 7.43,
"learning_rate": 0.0008452380952380953,
"loss": 0.9515,
"step": 260
},
{
"epoch": 7.46,
"learning_rate": 0.0008446428571428572,
"loss": 0.9069,
"step": 261
},
{
"epoch": 7.49,
"learning_rate": 0.000844047619047619,
"loss": 0.9359,
"step": 262
},
{
"epoch": 7.51,
"learning_rate": 0.0008434523809523809,
"loss": 0.9482,
"step": 263
},
{
"epoch": 7.54,
"learning_rate": 0.0008428571428571429,
"loss": 0.9717,
"step": 264
},
{
"epoch": 7.57,
"learning_rate": 0.0008422619047619048,
"loss": 0.9869,
"step": 265
},
{
"epoch": 7.6,
"learning_rate": 0.0008416666666666667,
"loss": 0.9728,
"step": 266
},
{
"epoch": 7.63,
"learning_rate": 0.0008410714285714285,
"loss": 0.9516,
"step": 267
},
{
"epoch": 7.66,
"learning_rate": 0.0008404761904761905,
"loss": 0.9838,
"step": 268
},
{
"epoch": 7.69,
"learning_rate": 0.0008398809523809524,
"loss": 1.0044,
"step": 269
},
{
"epoch": 7.71,
"learning_rate": 0.0008392857142857143,
"loss": 1.0153,
"step": 270
},
{
"epoch": 7.74,
"learning_rate": 0.0008386904761904763,
"loss": 1.0382,
"step": 271
},
{
"epoch": 7.77,
"learning_rate": 0.0008380952380952382,
"loss": 1.0109,
"step": 272
},
{
"epoch": 7.8,
"learning_rate": 0.0008375,
"loss": 0.9989,
"step": 273
},
{
"epoch": 7.83,
"learning_rate": 0.0008369047619047619,
"loss": 1.0631,
"step": 274
},
{
"epoch": 7.86,
"learning_rate": 0.0008363095238095239,
"loss": 1.0546,
"step": 275
},
{
"epoch": 7.89,
"learning_rate": 0.0008357142857142858,
"loss": 1.0827,
"step": 276
},
{
"epoch": 7.91,
"learning_rate": 0.0008351190476190476,
"loss": 1.087,
"step": 277
},
{
"epoch": 7.94,
"learning_rate": 0.0008345238095238095,
"loss": 1.041,
"step": 278
},
{
"epoch": 7.97,
"learning_rate": 0.0008339285714285715,
"loss": 1.0633,
"step": 279
},
{
"epoch": 8.0,
"learning_rate": 0.0008333333333333334,
"loss": 1.0709,
"step": 280
},
{
"epoch": 8.03,
"learning_rate": 0.0008327380952380953,
"loss": 0.7273,
"step": 281
},
{
"epoch": 8.06,
"learning_rate": 0.0008321428571428573,
"loss": 0.726,
"step": 282
},
{
"epoch": 8.09,
"learning_rate": 0.0008315476190476191,
"loss": 0.6943,
"step": 283
},
{
"epoch": 8.11,
"learning_rate": 0.000830952380952381,
"loss": 0.7127,
"step": 284
},
{
"epoch": 8.14,
"learning_rate": 0.0008303571428571429,
"loss": 0.6915,
"step": 285
},
{
"epoch": 8.17,
"learning_rate": 0.0008297619047619049,
"loss": 0.7138,
"step": 286
},
{
"epoch": 8.2,
"learning_rate": 0.0008291666666666667,
"loss": 0.7356,
"step": 287
},
{
"epoch": 8.23,
"learning_rate": 0.0008285714285714286,
"loss": 0.678,
"step": 288
},
{
"epoch": 8.26,
"learning_rate": 0.0008279761904761904,
"loss": 0.7375,
"step": 289
},
{
"epoch": 8.29,
"learning_rate": 0.0008273809523809524,
"loss": 0.7284,
"step": 290
},
{
"epoch": 8.31,
"learning_rate": 0.0008267857142857143,
"loss": 0.7304,
"step": 291
},
{
"epoch": 8.34,
"learning_rate": 0.0008261904761904761,
"loss": 0.7633,
"step": 292
},
{
"epoch": 8.37,
"learning_rate": 0.0008255952380952381,
"loss": 0.7416,
"step": 293
},
{
"epoch": 8.4,
"learning_rate": 0.000825,
"loss": 0.7895,
"step": 294
},
{
"epoch": 8.43,
"learning_rate": 0.0008244047619047619,
"loss": 0.8037,
"step": 295
},
{
"epoch": 8.46,
"learning_rate": 0.0008238095238095238,
"loss": 0.7736,
"step": 296
},
{
"epoch": 8.49,
"learning_rate": 0.0008232142857142857,
"loss": 0.778,
"step": 297
},
{
"epoch": 8.51,
"learning_rate": 0.0008226190476190476,
"loss": 0.7644,
"step": 298
},
{
"epoch": 8.54,
"learning_rate": 0.0008220238095238095,
"loss": 0.7942,
"step": 299
},
{
"epoch": 8.57,
"learning_rate": 0.0008214285714285714,
"loss": 0.7715,
"step": 300
},
{
"epoch": 8.6,
"learning_rate": 0.0008208333333333334,
"loss": 0.8288,
"step": 301
},
{
"epoch": 8.63,
"learning_rate": 0.0008202380952380952,
"loss": 0.8263,
"step": 302
},
{
"epoch": 8.66,
"learning_rate": 0.0008196428571428571,
"loss": 0.7923,
"step": 303
},
{
"epoch": 8.69,
"learning_rate": 0.0008190476190476191,
"loss": 0.8063,
"step": 304
},
{
"epoch": 8.71,
"learning_rate": 0.000818452380952381,
"loss": 0.8016,
"step": 305
},
{
"epoch": 8.74,
"learning_rate": 0.0008178571428571428,
"loss": 0.8467,
"step": 306
},
{
"epoch": 8.77,
"learning_rate": 0.0008172619047619047,
"loss": 0.8353,
"step": 307
},
{
"epoch": 8.8,
"learning_rate": 0.0008166666666666667,
"loss": 0.8272,
"step": 308
},
{
"epoch": 8.83,
"learning_rate": 0.0008160714285714286,
"loss": 0.8852,
"step": 309
},
{
"epoch": 8.86,
"learning_rate": 0.0008154761904761905,
"loss": 0.8541,
"step": 310
},
{
"epoch": 8.89,
"learning_rate": 0.0008148809523809523,
"loss": 0.8236,
"step": 311
},
{
"epoch": 8.91,
"learning_rate": 0.0008142857142857143,
"loss": 0.8609,
"step": 312
},
{
"epoch": 8.94,
"learning_rate": 0.0008136904761904762,
"loss": 0.8802,
"step": 313
},
{
"epoch": 8.97,
"learning_rate": 0.0008130952380952381,
"loss": 0.8615,
"step": 314
},
{
"epoch": 9.0,
"learning_rate": 0.0008125000000000001,
"loss": 0.8514,
"step": 315
},
{
"epoch": 9.03,
"learning_rate": 0.000811904761904762,
"loss": 0.5529,
"step": 316
},
{
"epoch": 9.06,
"learning_rate": 0.0008113095238095238,
"loss": 0.5736,
"step": 317
},
{
"epoch": 9.09,
"learning_rate": 0.0008107142857142857,
"loss": 0.5647,
"step": 318
},
{
"epoch": 9.11,
"learning_rate": 0.0008101190476190477,
"loss": 0.5677,
"step": 319
},
{
"epoch": 9.14,
"learning_rate": 0.0008095238095238096,
"loss": 0.5991,
"step": 320
},
{
"epoch": 9.17,
"learning_rate": 0.0008089285714285714,
"loss": 0.5666,
"step": 321
},
{
"epoch": 9.2,
"learning_rate": 0.0008083333333333333,
"loss": 0.5902,
"step": 322
},
{
"epoch": 9.23,
"learning_rate": 0.0008077380952380953,
"loss": 0.5961,
"step": 323
},
{
"epoch": 9.26,
"learning_rate": 0.0008071428571428572,
"loss": 0.5684,
"step": 324
},
{
"epoch": 9.29,
"learning_rate": 0.0008065476190476191,
"loss": 0.5976,
"step": 325
},
{
"epoch": 9.31,
"learning_rate": 0.0008059523809523809,
"loss": 0.6033,
"step": 326
},
{
"epoch": 9.34,
"learning_rate": 0.0008053571428571429,
"loss": 0.5877,
"step": 327
},
{
"epoch": 9.37,
"learning_rate": 0.0008047619047619048,
"loss": 0.5943,
"step": 328
},
{
"epoch": 9.4,
"learning_rate": 0.0008041666666666667,
"loss": 0.6176,
"step": 329
},
{
"epoch": 9.43,
"learning_rate": 0.0008035714285714287,
"loss": 0.6143,
"step": 330
},
{
"epoch": 9.46,
"learning_rate": 0.0008029761904761905,
"loss": 0.597,
"step": 331
},
{
"epoch": 9.49,
"learning_rate": 0.0008023809523809524,
"loss": 0.604,
"step": 332
},
{
"epoch": 9.51,
"learning_rate": 0.0008017857142857143,
"loss": 0.6036,
"step": 333
},
{
"epoch": 9.54,
"learning_rate": 0.0008011904761904763,
"loss": 0.6243,
"step": 334
},
{
"epoch": 9.57,
"learning_rate": 0.0008005952380952382,
"loss": 0.6301,
"step": 335
},
{
"epoch": 9.6,
"learning_rate": 0.0008,
"loss": 0.6271,
"step": 336
},
{
"epoch": 9.63,
"learning_rate": 0.0007994047619047619,
"loss": 0.6246,
"step": 337
},
{
"epoch": 9.66,
"learning_rate": 0.0007988095238095239,
"loss": 0.6597,
"step": 338
},
{
"epoch": 9.69,
"learning_rate": 0.0007982142857142858,
"loss": 0.6517,
"step": 339
},
{
"epoch": 9.71,
"learning_rate": 0.0007976190476190477,
"loss": 0.6645,
"step": 340
},
{
"epoch": 9.74,
"learning_rate": 0.0007970238095238096,
"loss": 0.6542,
"step": 341
},
{
"epoch": 9.77,
"learning_rate": 0.0007964285714285714,
"loss": 0.6496,
"step": 342
},
{
"epoch": 9.8,
"learning_rate": 0.0007958333333333333,
"loss": 0.6309,
"step": 343
},
{
"epoch": 9.83,
"learning_rate": 0.0007952380952380952,
"loss": 0.6668,
"step": 344
},
{
"epoch": 9.86,
"learning_rate": 0.0007946428571428572,
"loss": 0.6841,
"step": 345
},
{
"epoch": 9.89,
"learning_rate": 0.000794047619047619,
"loss": 0.6958,
"step": 346
},
{
"epoch": 9.91,
"learning_rate": 0.0007934523809523809,
"loss": 0.6592,
"step": 347
},
{
"epoch": 9.94,
"learning_rate": 0.0007928571428571428,
"loss": 0.6968,
"step": 348
},
{
"epoch": 9.97,
"learning_rate": 0.0007922619047619048,
"loss": 0.6916,
"step": 349
},
{
"epoch": 10.0,
"learning_rate": 0.0007916666666666666,
"loss": 0.7155,
"step": 350
},
{
"epoch": 10.03,
"learning_rate": 0.0007910714285714285,
"loss": 0.4288,
"step": 351
},
{
"epoch": 10.06,
"learning_rate": 0.0007904761904761905,
"loss": 0.4493,
"step": 352
},
{
"epoch": 10.09,
"learning_rate": 0.0007898809523809524,
"loss": 0.4152,
"step": 353
},
{
"epoch": 10.11,
"learning_rate": 0.0007892857142857143,
"loss": 0.4324,
"step": 354
},
{
"epoch": 10.14,
"learning_rate": 0.0007886904761904761,
"loss": 0.4334,
"step": 355
},
{
"epoch": 10.17,
"learning_rate": 0.0007880952380952381,
"loss": 0.4479,
"step": 356
},
{
"epoch": 10.2,
"learning_rate": 0.0007875,
"loss": 0.4391,
"step": 357
},
{
"epoch": 10.23,
"learning_rate": 0.0007869047619047619,
"loss": 0.4534,
"step": 358
},
{
"epoch": 10.26,
"learning_rate": 0.0007863095238095238,
"loss": 0.4494,
"step": 359
},
{
"epoch": 10.29,
"learning_rate": 0.0007857142857142857,
"loss": 0.4519,
"step": 360
},
{
"epoch": 10.31,
"learning_rate": 0.0007851190476190476,
"loss": 0.4673,
"step": 361
},
{
"epoch": 10.34,
"learning_rate": 0.0007845238095238095,
"loss": 0.4628,
"step": 362
},
{
"epoch": 10.37,
"learning_rate": 0.0007839285714285715,
"loss": 0.4608,
"step": 363
},
{
"epoch": 10.4,
"learning_rate": 0.0007833333333333334,
"loss": 0.4755,
"step": 364
},
{
"epoch": 10.43,
"learning_rate": 0.0007827380952380952,
"loss": 0.4771,
"step": 365
},
{
"epoch": 10.46,
"learning_rate": 0.0007821428571428571,
"loss": 0.4679,
"step": 366
},
{
"epoch": 10.49,
"learning_rate": 0.0007815476190476191,
"loss": 0.4985,
"step": 367
},
{
"epoch": 10.51,
"learning_rate": 0.000780952380952381,
"loss": 0.5242,
"step": 368
},
{
"epoch": 10.54,
"learning_rate": 0.0007803571428571429,
"loss": 0.478,
"step": 369
},
{
"epoch": 10.57,
"learning_rate": 0.0007797619047619047,
"loss": 0.5072,
"step": 370
},
{
"epoch": 10.6,
"learning_rate": 0.0007791666666666667,
"loss": 0.5001,
"step": 371
},
{
"epoch": 10.63,
"learning_rate": 0.0007785714285714286,
"loss": 0.5119,
"step": 372
},
{
"epoch": 10.66,
"learning_rate": 0.0007779761904761905,
"loss": 0.5212,
"step": 373
},
{
"epoch": 10.69,
"learning_rate": 0.0007773809523809525,
"loss": 0.5073,
"step": 374
},
{
"epoch": 10.71,
"learning_rate": 0.0007767857142857143,
"loss": 0.5089,
"step": 375
},
{
"epoch": 10.74,
"learning_rate": 0.0007761904761904762,
"loss": 0.5161,
"step": 376
},
{
"epoch": 10.77,
"learning_rate": 0.0007755952380952381,
"loss": 0.4861,
"step": 377
},
{
"epoch": 10.8,
"learning_rate": 0.0007750000000000001,
"loss": 0.531,
"step": 378
},
{
"epoch": 10.83,
"learning_rate": 0.000774404761904762,
"loss": 0.5244,
"step": 379
},
{
"epoch": 10.86,
"learning_rate": 0.0007738095238095238,
"loss": 0.5446,
"step": 380
},
{
"epoch": 10.89,
"learning_rate": 0.0007732142857142857,
"loss": 0.5515,
"step": 381
},
{
"epoch": 10.91,
"learning_rate": 0.0007726190476190477,
"loss": 0.5345,
"step": 382
},
{
"epoch": 10.94,
"learning_rate": 0.0007720238095238096,
"loss": 0.537,
"step": 383
},
{
"epoch": 10.97,
"learning_rate": 0.0007714285714285715,
"loss": 0.5589,
"step": 384
},
{
"epoch": 11.0,
"learning_rate": 0.0007708333333333334,
"loss": 0.5459,
"step": 385
},
{
"epoch": 11.03,
"learning_rate": 0.0007702380952380953,
"loss": 0.3344,
"step": 386
},
{
"epoch": 11.06,
"learning_rate": 0.0007696428571428572,
"loss": 0.3352,
"step": 387
},
{
"epoch": 11.09,
"learning_rate": 0.0007690476190476191,
"loss": 0.3263,
"step": 388
},
{
"epoch": 11.11,
"learning_rate": 0.0007684523809523811,
"loss": 0.3501,
"step": 389
},
{
"epoch": 11.14,
"learning_rate": 0.0007678571428571429,
"loss": 0.3523,
"step": 390
},
{
"epoch": 11.17,
"learning_rate": 0.0007672619047619048,
"loss": 0.3379,
"step": 391
},
{
"epoch": 11.2,
"learning_rate": 0.0007666666666666667,
"loss": 0.3456,
"step": 392
},
{
"epoch": 11.23,
"learning_rate": 0.0007660714285714287,
"loss": 0.347,
"step": 393
},
{
"epoch": 11.26,
"learning_rate": 0.0007654761904761904,
"loss": 0.3622,
"step": 394
},
{
"epoch": 11.29,
"learning_rate": 0.0007648809523809523,
"loss": 0.3612,
"step": 395
},
{
"epoch": 11.31,
"learning_rate": 0.0007642857142857142,
"loss": 0.3789,
"step": 396
},
{
"epoch": 11.34,
"learning_rate": 0.0007636904761904762,
"loss": 0.3491,
"step": 397
},
{
"epoch": 11.37,
"learning_rate": 0.0007630952380952381,
"loss": 0.3578,
"step": 398
},
{
"epoch": 11.4,
"learning_rate": 0.0007624999999999999,
"loss": 0.3524,
"step": 399
},
{
"epoch": 11.43,
"learning_rate": 0.0007619047619047619,
"loss": 0.3671,
"step": 400
},
{
"epoch": 11.46,
"learning_rate": 0.0007613095238095238,
"loss": 0.374,
"step": 401
},
{
"epoch": 11.49,
"learning_rate": 0.0007607142857142857,
"loss": 0.3872,
"step": 402
},
{
"epoch": 11.51,
"learning_rate": 0.0007601190476190476,
"loss": 0.3801,
"step": 403
},
{
"epoch": 11.54,
"learning_rate": 0.0007595238095238095,
"loss": 0.3618,
"step": 404
},
{
"epoch": 11.57,
"learning_rate": 0.0007589285714285714,
"loss": 0.3929,
"step": 405
},
{
"epoch": 11.6,
"learning_rate": 0.0007583333333333333,
"loss": 0.4099,
"step": 406
},
{
"epoch": 11.63,
"learning_rate": 0.0007577380952380952,
"loss": 0.3778,
"step": 407
},
{
"epoch": 11.66,
"learning_rate": 0.0007571428571428572,
"loss": 0.3965,
"step": 408
},
{
"epoch": 11.69,
"learning_rate": 0.000756547619047619,
"loss": 0.4013,
"step": 409
},
{
"epoch": 11.71,
"learning_rate": 0.0007559523809523809,
"loss": 0.4031,
"step": 410
},
{
"epoch": 11.74,
"learning_rate": 0.0007553571428571429,
"loss": 0.398,
"step": 411
},
{
"epoch": 11.77,
"learning_rate": 0.0007547619047619048,
"loss": 0.4106,
"step": 412
},
{
"epoch": 11.8,
"learning_rate": 0.0007541666666666667,
"loss": 0.4031,
"step": 413
},
{
"epoch": 11.83,
"learning_rate": 0.0007535714285714285,
"loss": 0.4199,
"step": 414
},
{
"epoch": 11.86,
"learning_rate": 0.0007529761904761905,
"loss": 0.4012,
"step": 415
},
{
"epoch": 11.89,
"learning_rate": 0.0007523809523809524,
"loss": 0.4096,
"step": 416
},
{
"epoch": 11.91,
"learning_rate": 0.0007517857142857143,
"loss": 0.4237,
"step": 417
},
{
"epoch": 11.94,
"learning_rate": 0.0007511904761904762,
"loss": 0.4115,
"step": 418
},
{
"epoch": 11.97,
"learning_rate": 0.0007505952380952381,
"loss": 0.4459,
"step": 419
},
{
"epoch": 12.0,
"learning_rate": 0.00075,
"loss": 0.4406,
"step": 420
},
{
"epoch": 12.03,
"learning_rate": 0.0007494047619047619,
"loss": 0.2561,
"step": 421
},
{
"epoch": 12.06,
"learning_rate": 0.0007488095238095239,
"loss": 0.2662,
"step": 422
},
{
"epoch": 12.09,
"learning_rate": 0.0007482142857142858,
"loss": 0.2538,
"step": 423
},
{
"epoch": 12.11,
"learning_rate": 0.0007476190476190476,
"loss": 0.2571,
"step": 424
},
{
"epoch": 12.14,
"learning_rate": 0.0007470238095238095,
"loss": 0.2527,
"step": 425
},
{
"epoch": 12.17,
"learning_rate": 0.0007464285714285715,
"loss": 0.2739,
"step": 426
},
{
"epoch": 12.2,
"learning_rate": 0.0007458333333333334,
"loss": 0.2642,
"step": 427
},
{
"epoch": 12.23,
"learning_rate": 0.0007452380952380953,
"loss": 0.2736,
"step": 428
},
{
"epoch": 12.26,
"learning_rate": 0.0007446428571428571,
"loss": 0.2774,
"step": 429
},
{
"epoch": 12.29,
"learning_rate": 0.0007440476190476191,
"loss": 0.2684,
"step": 430
},
{
"epoch": 12.31,
"learning_rate": 0.000743452380952381,
"loss": 0.2727,
"step": 431
},
{
"epoch": 12.34,
"learning_rate": 0.0007428571428571429,
"loss": 0.2801,
"step": 432
},
{
"epoch": 12.37,
"learning_rate": 0.0007422619047619049,
"loss": 0.2685,
"step": 433
},
{
"epoch": 12.4,
"learning_rate": 0.0007416666666666667,
"loss": 0.2958,
"step": 434
},
{
"epoch": 12.43,
"learning_rate": 0.0007410714285714286,
"loss": 0.2868,
"step": 435
},
{
"epoch": 12.46,
"learning_rate": 0.0007404761904761905,
"loss": 0.2838,
"step": 436
},
{
"epoch": 12.49,
"learning_rate": 0.0007398809523809525,
"loss": 0.2976,
"step": 437
},
{
"epoch": 12.51,
"learning_rate": 0.0007392857142857144,
"loss": 0.2963,
"step": 438
},
{
"epoch": 12.54,
"learning_rate": 0.0007386904761904762,
"loss": 0.2891,
"step": 439
},
{
"epoch": 12.57,
"learning_rate": 0.0007380952380952381,
"loss": 0.2897,
"step": 440
},
{
"epoch": 12.6,
"learning_rate": 0.0007375000000000001,
"loss": 0.3013,
"step": 441
},
{
"epoch": 12.63,
"learning_rate": 0.000736904761904762,
"loss": 0.2977,
"step": 442
},
{
"epoch": 12.66,
"learning_rate": 0.0007363095238095238,
"loss": 0.2985,
"step": 443
},
{
"epoch": 12.69,
"learning_rate": 0.0007357142857142858,
"loss": 0.3097,
"step": 444
},
{
"epoch": 12.71,
"learning_rate": 0.0007351190476190477,
"loss": 0.3183,
"step": 445
},
{
"epoch": 12.74,
"learning_rate": 0.0007345238095238096,
"loss": 0.3192,
"step": 446
},
{
"epoch": 12.77,
"learning_rate": 0.0007339285714285714,
"loss": 0.3265,
"step": 447
},
{
"epoch": 12.8,
"learning_rate": 0.0007333333333333333,
"loss": 0.3033,
"step": 448
},
{
"epoch": 12.83,
"learning_rate": 0.0007327380952380952,
"loss": 0.313,
"step": 449
},
{
"epoch": 12.86,
"learning_rate": 0.0007321428571428571,
"loss": 0.3186,
"step": 450
},
{
"epoch": 12.89,
"learning_rate": 0.000731547619047619,
"loss": 0.3275,
"step": 451
},
{
"epoch": 12.91,
"learning_rate": 0.000730952380952381,
"loss": 0.3272,
"step": 452
},
{
"epoch": 12.94,
"learning_rate": 0.0007303571428571428,
"loss": 0.3336,
"step": 453
},
{
"epoch": 12.97,
"learning_rate": 0.0007297619047619047,
"loss": 0.3244,
"step": 454
},
{
"epoch": 13.0,
"learning_rate": 0.0007291666666666666,
"loss": 0.3403,
"step": 455
},
{
"epoch": 13.03,
"learning_rate": 0.0007285714285714286,
"loss": 0.1931,
"step": 456
},
{
"epoch": 13.06,
"learning_rate": 0.0007279761904761905,
"loss": 0.2104,
"step": 457
},
{
"epoch": 13.09,
"learning_rate": 0.0007273809523809523,
"loss": 0.2025,
"step": 458
},
{
"epoch": 13.11,
"learning_rate": 0.0007267857142857143,
"loss": 0.1933,
"step": 459
},
{
"epoch": 13.14,
"learning_rate": 0.0007261904761904762,
"loss": 0.2002,
"step": 460
},
{
"epoch": 13.17,
"learning_rate": 0.0007255952380952381,
"loss": 0.2097,
"step": 461
},
{
"epoch": 13.2,
"learning_rate": 0.000725,
"loss": 0.2097,
"step": 462
},
{
"epoch": 13.23,
"learning_rate": 0.0007244047619047619,
"loss": 0.2061,
"step": 463
},
{
"epoch": 13.26,
"learning_rate": 0.0007238095238095238,
"loss": 0.2,
"step": 464
},
{
"epoch": 13.29,
"learning_rate": 0.0007232142857142857,
"loss": 0.2082,
"step": 465
},
{
"epoch": 13.31,
"learning_rate": 0.0007226190476190476,
"loss": 0.209,
"step": 466
},
{
"epoch": 13.34,
"learning_rate": 0.0007220238095238096,
"loss": 0.2083,
"step": 467
},
{
"epoch": 13.37,
"learning_rate": 0.0007214285714285714,
"loss": 0.215,
"step": 468
},
{
"epoch": 13.4,
"learning_rate": 0.0007208333333333333,
"loss": 0.2113,
"step": 469
},
{
"epoch": 13.43,
"learning_rate": 0.0007202380952380953,
"loss": 0.2186,
"step": 470
},
{
"epoch": 13.46,
"learning_rate": 0.0007196428571428572,
"loss": 0.2164,
"step": 471
},
{
"epoch": 13.49,
"learning_rate": 0.000719047619047619,
"loss": 0.2197,
"step": 472
},
{
"epoch": 13.51,
"learning_rate": 0.0007184523809523809,
"loss": 0.2256,
"step": 473
},
{
"epoch": 13.54,
"learning_rate": 0.0007178571428571429,
"loss": 0.2165,
"step": 474
},
{
"epoch": 13.57,
"learning_rate": 0.0007172619047619048,
"loss": 0.2227,
"step": 475
},
{
"epoch": 13.6,
"learning_rate": 0.0007166666666666667,
"loss": 0.2245,
"step": 476
},
{
"epoch": 13.63,
"learning_rate": 0.0007160714285714285,
"loss": 0.2239,
"step": 477
},
{
"epoch": 13.66,
"learning_rate": 0.0007154761904761905,
"loss": 0.2251,
"step": 478
},
{
"epoch": 13.69,
"learning_rate": 0.0007148809523809524,
"loss": 0.2305,
"step": 479
},
{
"epoch": 13.71,
"learning_rate": 0.0007142857142857143,
"loss": 0.2334,
"step": 480
},
{
"epoch": 13.74,
"learning_rate": 0.0007136904761904763,
"loss": 0.229,
"step": 481
},
{
"epoch": 13.77,
"learning_rate": 0.0007130952380952381,
"loss": 0.2363,
"step": 482
},
{
"epoch": 13.8,
"learning_rate": 0.0007125,
"loss": 0.2382,
"step": 483
},
{
"epoch": 13.83,
"learning_rate": 0.0007119047619047619,
"loss": 0.2374,
"step": 484
},
{
"epoch": 13.86,
"learning_rate": 0.0007113095238095239,
"loss": 0.2433,
"step": 485
},
{
"epoch": 13.89,
"learning_rate": 0.0007107142857142858,
"loss": 0.2349,
"step": 486
},
{
"epoch": 13.91,
"learning_rate": 0.0007101190476190476,
"loss": 0.2549,
"step": 487
},
{
"epoch": 13.94,
"learning_rate": 0.0007095238095238095,
"loss": 0.2481,
"step": 488
},
{
"epoch": 13.97,
"learning_rate": 0.0007089285714285715,
"loss": 0.2504,
"step": 489
},
{
"epoch": 14.0,
"learning_rate": 0.0007083333333333334,
"loss": 0.2521,
"step": 490
},
{
"epoch": 14.03,
"learning_rate": 0.0007077380952380953,
"loss": 0.1489,
"step": 491
},
{
"epoch": 14.06,
"learning_rate": 0.0007071428571428572,
"loss": 0.15,
"step": 492
},
{
"epoch": 14.09,
"learning_rate": 0.0007065476190476191,
"loss": 0.1499,
"step": 493
},
{
"epoch": 14.11,
"learning_rate": 0.000705952380952381,
"loss": 0.1515,
"step": 494
},
{
"epoch": 14.14,
"learning_rate": 0.0007053571428571429,
"loss": 0.1542,
"step": 495
},
{
"epoch": 14.17,
"learning_rate": 0.0007047619047619049,
"loss": 0.1523,
"step": 496
},
{
"epoch": 14.2,
"learning_rate": 0.0007041666666666667,
"loss": 0.1605,
"step": 497
},
{
"epoch": 14.23,
"learning_rate": 0.0007035714285714286,
"loss": 0.1572,
"step": 498
},
{
"epoch": 14.26,
"learning_rate": 0.0007029761904761904,
"loss": 0.1634,
"step": 499
},
{
"epoch": 14.29,
"learning_rate": 0.0007023809523809524,
"loss": 0.1548,
"step": 500
},
{
"epoch": 14.31,
"learning_rate": 0.0007017857142857143,
"loss": 0.1521,
"step": 501
},
{
"epoch": 14.34,
"learning_rate": 0.0007011904761904761,
"loss": 0.1644,
"step": 502
},
{
"epoch": 14.37,
"learning_rate": 0.0007005952380952381,
"loss": 0.155,
"step": 503
},
{
"epoch": 14.4,
"learning_rate": 0.0007,
"loss": 0.1674,
"step": 504
},
{
"epoch": 14.43,
"learning_rate": 0.0006994047619047619,
"loss": 0.1619,
"step": 505
},
{
"epoch": 14.46,
"learning_rate": 0.0006988095238095237,
"loss": 0.1644,
"step": 506
},
{
"epoch": 14.49,
"learning_rate": 0.0006982142857142857,
"loss": 0.1723,
"step": 507
},
{
"epoch": 14.51,
"learning_rate": 0.0006976190476190476,
"loss": 0.1621,
"step": 508
},
{
"epoch": 14.54,
"learning_rate": 0.0006970238095238095,
"loss": 0.1647,
"step": 509
},
{
"epoch": 14.57,
"learning_rate": 0.0006964285714285714,
"loss": 0.1741,
"step": 510
},
{
"epoch": 14.6,
"learning_rate": 0.0006958333333333334,
"loss": 0.1673,
"step": 511
},
{
"epoch": 14.63,
"learning_rate": 0.0006952380952380952,
"loss": 0.1752,
"step": 512
},
{
"epoch": 14.66,
"learning_rate": 0.0006946428571428571,
"loss": 0.167,
"step": 513
},
{
"epoch": 14.69,
"learning_rate": 0.0006940476190476191,
"loss": 0.1718,
"step": 514
},
{
"epoch": 14.71,
"learning_rate": 0.000693452380952381,
"loss": 0.1787,
"step": 515
},
{
"epoch": 14.74,
"learning_rate": 0.0006928571428571428,
"loss": 0.1747,
"step": 516
},
{
"epoch": 14.77,
"learning_rate": 0.0006922619047619047,
"loss": 0.1766,
"step": 517
},
{
"epoch": 14.8,
"learning_rate": 0.0006916666666666667,
"loss": 0.1782,
"step": 518
},
{
"epoch": 14.83,
"learning_rate": 0.0006910714285714286,
"loss": 0.1799,
"step": 519
},
{
"epoch": 14.86,
"learning_rate": 0.0006904761904761905,
"loss": 0.169,
"step": 520
},
{
"epoch": 14.89,
"learning_rate": 0.0006898809523809523,
"loss": 0.1802,
"step": 521
},
{
"epoch": 14.91,
"learning_rate": 0.0006892857142857143,
"loss": 0.18,
"step": 522
},
{
"epoch": 14.94,
"learning_rate": 0.0006886904761904762,
"loss": 0.1823,
"step": 523
},
{
"epoch": 14.97,
"learning_rate": 0.0006880952380952381,
"loss": 0.1829,
"step": 524
},
{
"epoch": 15.0,
"learning_rate": 0.0006875,
"loss": 0.1885,
"step": 525
},
{
"epoch": 15.03,
"learning_rate": 0.000686904761904762,
"loss": 0.1171,
"step": 526
},
{
"epoch": 15.06,
"learning_rate": 0.0006863095238095238,
"loss": 0.1126,
"step": 527
},
{
"epoch": 15.09,
"learning_rate": 0.0006857142857142857,
"loss": 0.1186,
"step": 528
},
{
"epoch": 15.11,
"learning_rate": 0.0006851190476190477,
"loss": 0.1164,
"step": 529
},
{
"epoch": 15.14,
"learning_rate": 0.0006845238095238096,
"loss": 0.1163,
"step": 530
},
{
"epoch": 15.17,
"learning_rate": 0.0006839285714285714,
"loss": 0.1204,
"step": 531
},
{
"epoch": 15.2,
"learning_rate": 0.0006833333333333333,
"loss": 0.1212,
"step": 532
},
{
"epoch": 15.23,
"learning_rate": 0.0006827380952380953,
"loss": 0.1188,
"step": 533
},
{
"epoch": 15.26,
"learning_rate": 0.0006821428571428572,
"loss": 0.1154,
"step": 534
},
{
"epoch": 15.29,
"learning_rate": 0.0006815476190476191,
"loss": 0.1244,
"step": 535
},
{
"epoch": 15.31,
"learning_rate": 0.0006809523809523809,
"loss": 0.1214,
"step": 536
},
{
"epoch": 15.34,
"learning_rate": 0.0006803571428571429,
"loss": 0.1294,
"step": 537
},
{
"epoch": 15.37,
"learning_rate": 0.0006797619047619048,
"loss": 0.1232,
"step": 538
},
{
"epoch": 15.4,
"learning_rate": 0.0006791666666666667,
"loss": 0.1262,
"step": 539
},
{
"epoch": 15.43,
"learning_rate": 0.0006785714285714287,
"loss": 0.1195,
"step": 540
},
{
"epoch": 15.46,
"learning_rate": 0.0006779761904761905,
"loss": 0.123,
"step": 541
},
{
"epoch": 15.49,
"learning_rate": 0.0006773809523809524,
"loss": 0.1266,
"step": 542
},
{
"epoch": 15.51,
"learning_rate": 0.0006767857142857143,
"loss": 0.1345,
"step": 543
},
{
"epoch": 15.54,
"learning_rate": 0.0006761904761904763,
"loss": 0.1174,
"step": 544
},
{
"epoch": 15.57,
"learning_rate": 0.0006755952380952382,
"loss": 0.1293,
"step": 545
},
{
"epoch": 15.6,
"learning_rate": 0.000675,
"loss": 0.1293,
"step": 546
},
{
"epoch": 15.63,
"learning_rate": 0.0006744047619047619,
"loss": 0.1269,
"step": 547
},
{
"epoch": 15.66,
"learning_rate": 0.0006738095238095239,
"loss": 0.1321,
"step": 548
},
{
"epoch": 15.69,
"learning_rate": 0.0006732142857142858,
"loss": 0.1318,
"step": 549
},
{
"epoch": 15.71,
"learning_rate": 0.0006726190476190477,
"loss": 0.1283,
"step": 550
},
{
"epoch": 15.74,
"learning_rate": 0.0006720238095238096,
"loss": 0.128,
"step": 551
},
{
"epoch": 15.77,
"learning_rate": 0.0006714285714285714,
"loss": 0.1295,
"step": 552
},
{
"epoch": 15.8,
"learning_rate": 0.0006708333333333333,
"loss": 0.1323,
"step": 553
},
{
"epoch": 15.83,
"learning_rate": 0.0006702380952380952,
"loss": 0.1348,
"step": 554
},
{
"epoch": 15.86,
"learning_rate": 0.0006696428571428571,
"loss": 0.1276,
"step": 555
},
{
"epoch": 15.89,
"learning_rate": 0.000669047619047619,
"loss": 0.1356,
"step": 556
},
{
"epoch": 15.91,
"learning_rate": 0.0006684523809523809,
"loss": 0.1404,
"step": 557
},
{
"epoch": 15.94,
"learning_rate": 0.0006678571428571428,
"loss": 0.1311,
"step": 558
},
{
"epoch": 15.97,
"learning_rate": 0.0006672619047619048,
"loss": 0.1401,
"step": 559
},
{
"epoch": 16.0,
"learning_rate": 0.0006666666666666666,
"loss": 0.1411,
"step": 560
},
{
"epoch": 16.03,
"learning_rate": 0.0006660714285714285,
"loss": 0.0862,
"step": 561
},
{
"epoch": 16.06,
"learning_rate": 0.0006654761904761905,
"loss": 0.0902,
"step": 562
},
{
"epoch": 16.09,
"learning_rate": 0.0006648809523809524,
"loss": 0.0871,
"step": 563
},
{
"epoch": 16.11,
"learning_rate": 0.0006642857142857143,
"loss": 0.0906,
"step": 564
},
{
"epoch": 16.14,
"learning_rate": 0.0006636904761904761,
"loss": 0.0891,
"step": 565
},
{
"epoch": 16.17,
"learning_rate": 0.0006630952380952381,
"loss": 0.0917,
"step": 566
},
{
"epoch": 16.2,
"learning_rate": 0.0006625,
"loss": 0.0906,
"step": 567
},
{
"epoch": 16.23,
"learning_rate": 0.0006619047619047619,
"loss": 0.0927,
"step": 568
},
{
"epoch": 16.26,
"learning_rate": 0.0006613095238095238,
"loss": 0.0927,
"step": 569
},
{
"epoch": 16.29,
"learning_rate": 0.0006607142857142857,
"loss": 0.0934,
"step": 570
},
{
"epoch": 16.31,
"learning_rate": 0.0006601190476190476,
"loss": 0.0956,
"step": 571
},
{
"epoch": 16.34,
"learning_rate": 0.0006595238095238095,
"loss": 0.0933,
"step": 572
},
{
"epoch": 16.37,
"learning_rate": 0.0006589285714285715,
"loss": 0.0993,
"step": 573
},
{
"epoch": 16.4,
"learning_rate": 0.0006583333333333334,
"loss": 0.095,
"step": 574
},
{
"epoch": 16.43,
"learning_rate": 0.0006577380952380952,
"loss": 0.0963,
"step": 575
},
{
"epoch": 16.46,
"learning_rate": 0.0006571428571428571,
"loss": 0.0948,
"step": 576
},
{
"epoch": 16.49,
"learning_rate": 0.0006565476190476191,
"loss": 0.0952,
"step": 577
},
{
"epoch": 16.51,
"learning_rate": 0.000655952380952381,
"loss": 0.1001,
"step": 578
},
{
"epoch": 16.54,
"learning_rate": 0.0006553571428571429,
"loss": 0.0924,
"step": 579
},
{
"epoch": 16.57,
"learning_rate": 0.0006547619047619047,
"loss": 0.0962,
"step": 580
},
{
"epoch": 16.6,
"learning_rate": 0.0006541666666666667,
"loss": 0.0949,
"step": 581
},
{
"epoch": 16.63,
"learning_rate": 0.0006535714285714286,
"loss": 0.1,
"step": 582
},
{
"epoch": 16.66,
"learning_rate": 0.0006529761904761905,
"loss": 0.1009,
"step": 583
},
{
"epoch": 16.69,
"learning_rate": 0.0006523809523809525,
"loss": 0.1023,
"step": 584
},
{
"epoch": 16.71,
"learning_rate": 0.0006517857142857143,
"loss": 0.0995,
"step": 585
},
{
"epoch": 16.74,
"learning_rate": 0.0006511904761904762,
"loss": 0.1015,
"step": 586
},
{
"epoch": 16.77,
"learning_rate": 0.0006505952380952381,
"loss": 0.0966,
"step": 587
},
{
"epoch": 16.8,
"learning_rate": 0.0006500000000000001,
"loss": 0.1019,
"step": 588
},
{
"epoch": 16.83,
"learning_rate": 0.000649404761904762,
"loss": 0.0996,
"step": 589
},
{
"epoch": 16.86,
"learning_rate": 0.0006488095238095238,
"loss": 0.103,
"step": 590
},
{
"epoch": 16.89,
"learning_rate": 0.0006482142857142857,
"loss": 0.1042,
"step": 591
},
{
"epoch": 16.91,
"learning_rate": 0.0006476190476190477,
"loss": 0.1039,
"step": 592
},
{
"epoch": 16.94,
"learning_rate": 0.0006470238095238096,
"loss": 0.1058,
"step": 593
},
{
"epoch": 16.97,
"learning_rate": 0.0006464285714285715,
"loss": 0.0994,
"step": 594
},
{
"epoch": 17.0,
"learning_rate": 0.0006458333333333334,
"loss": 0.1062,
"step": 595
},
{
"epoch": 17.03,
"learning_rate": 0.0006452380952380953,
"loss": 0.0709,
"step": 596
},
{
"epoch": 17.06,
"learning_rate": 0.0006446428571428572,
"loss": 0.0733,
"step": 597
},
{
"epoch": 17.09,
"learning_rate": 0.0006440476190476191,
"loss": 0.0724,
"step": 598
},
{
"epoch": 17.11,
"learning_rate": 0.0006434523809523811,
"loss": 0.0733,
"step": 599
},
{
"epoch": 17.14,
"learning_rate": 0.0006428571428571429,
"loss": 0.0741,
"step": 600
}
],
"logging_steps": 1,
"max_steps": 1680,
"num_train_epochs": 48,
"save_steps": 100,
"total_flos": 3.463615849187021e+17,
"trial_name": null,
"trial_params": null
}