xtreme_s_xlsr_300m_minds14 / trainer_state.json
anton-l's picture
anton-l HF staff
End of training
b16812f
{
"best_metric": 0.9105892955087709,
"best_model_checkpoint": "xtreme_s_xlsr_300m_minds14_resplit/checkpoint-1800",
"epoch": 50.0,
"global_step": 1850,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 1.9999999999999996e-07,
"loss": 2.6417,
"step": 1
},
{
"epoch": 0.05,
"learning_rate": 3.9999999999999993e-07,
"loss": 2.6374,
"step": 2
},
{
"epoch": 0.08,
"learning_rate": 6e-07,
"loss": 2.6452,
"step": 3
},
{
"epoch": 0.11,
"learning_rate": 7.999999999999999e-07,
"loss": 2.6409,
"step": 4
},
{
"epoch": 0.14,
"learning_rate": 1e-06,
"loss": 2.6407,
"step": 5
},
{
"epoch": 0.16,
"learning_rate": 1.2e-06,
"loss": 2.6356,
"step": 6
},
{
"epoch": 0.19,
"learning_rate": 1.4e-06,
"loss": 2.6394,
"step": 7
},
{
"epoch": 0.22,
"learning_rate": 1.5999999999999997e-06,
"loss": 2.6379,
"step": 8
},
{
"epoch": 0.24,
"learning_rate": 1.8e-06,
"loss": 2.6387,
"step": 9
},
{
"epoch": 0.27,
"learning_rate": 2e-06,
"loss": 2.6436,
"step": 10
},
{
"epoch": 0.3,
"learning_rate": 2.1999999999999997e-06,
"loss": 2.644,
"step": 11
},
{
"epoch": 0.32,
"learning_rate": 2.4e-06,
"loss": 2.6413,
"step": 12
},
{
"epoch": 0.35,
"learning_rate": 2.5999999999999997e-06,
"loss": 2.6387,
"step": 13
},
{
"epoch": 0.38,
"learning_rate": 2.8e-06,
"loss": 2.6356,
"step": 14
},
{
"epoch": 0.41,
"learning_rate": 2.9999999999999997e-06,
"loss": 2.6396,
"step": 15
},
{
"epoch": 0.43,
"learning_rate": 3.1999999999999994e-06,
"loss": 2.6415,
"step": 16
},
{
"epoch": 0.46,
"learning_rate": 3.4e-06,
"loss": 2.6378,
"step": 17
},
{
"epoch": 0.49,
"learning_rate": 3.6e-06,
"loss": 2.6393,
"step": 18
},
{
"epoch": 0.51,
"learning_rate": 3.7999999999999996e-06,
"loss": 2.6399,
"step": 19
},
{
"epoch": 0.54,
"learning_rate": 4e-06,
"loss": 2.6419,
"step": 20
},
{
"epoch": 0.57,
"learning_rate": 4.2e-06,
"loss": 2.6404,
"step": 21
},
{
"epoch": 0.59,
"learning_rate": 4.399999999999999e-06,
"loss": 2.6393,
"step": 22
},
{
"epoch": 0.62,
"learning_rate": 4.599999999999999e-06,
"loss": 2.6366,
"step": 23
},
{
"epoch": 0.65,
"learning_rate": 4.8e-06,
"loss": 2.6415,
"step": 24
},
{
"epoch": 0.68,
"learning_rate": 4.9999999999999996e-06,
"loss": 2.635,
"step": 25
},
{
"epoch": 0.7,
"learning_rate": 5.199999999999999e-06,
"loss": 2.6407,
"step": 26
},
{
"epoch": 0.73,
"learning_rate": 5.399999999999999e-06,
"loss": 2.6395,
"step": 27
},
{
"epoch": 0.76,
"learning_rate": 5.6e-06,
"loss": 2.6453,
"step": 28
},
{
"epoch": 0.78,
"learning_rate": 5.7999999999999995e-06,
"loss": 2.6363,
"step": 29
},
{
"epoch": 0.81,
"learning_rate": 5.999999999999999e-06,
"loss": 2.6396,
"step": 30
},
{
"epoch": 0.84,
"learning_rate": 6.199999999999999e-06,
"loss": 2.6364,
"step": 31
},
{
"epoch": 0.86,
"learning_rate": 6.399999999999999e-06,
"loss": 2.6361,
"step": 32
},
{
"epoch": 0.89,
"learning_rate": 6.599999999999999e-06,
"loss": 2.6403,
"step": 33
},
{
"epoch": 0.92,
"learning_rate": 6.8e-06,
"loss": 2.6372,
"step": 34
},
{
"epoch": 0.95,
"learning_rate": 7e-06,
"loss": 2.6365,
"step": 35
},
{
"epoch": 0.97,
"learning_rate": 7.2e-06,
"loss": 2.6347,
"step": 36
},
{
"epoch": 1.0,
"learning_rate": 7.3999999999999995e-06,
"loss": 2.6372,
"step": 37
},
{
"epoch": 1.03,
"learning_rate": 7.599999999999999e-06,
"loss": 2.6386,
"step": 38
},
{
"epoch": 1.05,
"learning_rate": 7.799999999999998e-06,
"loss": 2.6409,
"step": 39
},
{
"epoch": 1.08,
"learning_rate": 8e-06,
"loss": 2.6407,
"step": 40
},
{
"epoch": 1.11,
"learning_rate": 8.2e-06,
"loss": 2.6378,
"step": 41
},
{
"epoch": 1.14,
"learning_rate": 8.4e-06,
"loss": 2.6365,
"step": 42
},
{
"epoch": 1.16,
"learning_rate": 8.599999999999999e-06,
"loss": 2.6378,
"step": 43
},
{
"epoch": 1.19,
"learning_rate": 8.799999999999999e-06,
"loss": 2.6379,
"step": 44
},
{
"epoch": 1.22,
"learning_rate": 8.999999999999999e-06,
"loss": 2.6364,
"step": 45
},
{
"epoch": 1.24,
"learning_rate": 9.199999999999998e-06,
"loss": 2.636,
"step": 46
},
{
"epoch": 1.27,
"learning_rate": 9.399999999999998e-06,
"loss": 2.6417,
"step": 47
},
{
"epoch": 1.3,
"learning_rate": 9.6e-06,
"loss": 2.6437,
"step": 48
},
{
"epoch": 1.32,
"learning_rate": 9.799999999999998e-06,
"loss": 2.6418,
"step": 49
},
{
"epoch": 1.35,
"learning_rate": 9.999999999999999e-06,
"loss": 2.6434,
"step": 50
},
{
"epoch": 1.38,
"learning_rate": 1.02e-05,
"loss": 2.6335,
"step": 51
},
{
"epoch": 1.41,
"learning_rate": 1.0399999999999999e-05,
"loss": 2.6365,
"step": 52
},
{
"epoch": 1.43,
"learning_rate": 1.06e-05,
"loss": 2.6368,
"step": 53
},
{
"epoch": 1.46,
"learning_rate": 1.0799999999999998e-05,
"loss": 2.6363,
"step": 54
},
{
"epoch": 1.49,
"learning_rate": 1.1e-05,
"loss": 2.6342,
"step": 55
},
{
"epoch": 1.51,
"learning_rate": 1.12e-05,
"loss": 2.6448,
"step": 56
},
{
"epoch": 1.54,
"learning_rate": 1.14e-05,
"loss": 2.6343,
"step": 57
},
{
"epoch": 1.57,
"learning_rate": 1.1599999999999999e-05,
"loss": 2.6413,
"step": 58
},
{
"epoch": 1.59,
"learning_rate": 1.1799999999999999e-05,
"loss": 2.638,
"step": 59
},
{
"epoch": 1.62,
"learning_rate": 1.1999999999999999e-05,
"loss": 2.6375,
"step": 60
},
{
"epoch": 1.65,
"learning_rate": 1.2199999999999998e-05,
"loss": 2.6418,
"step": 61
},
{
"epoch": 1.68,
"learning_rate": 1.2399999999999998e-05,
"loss": 2.633,
"step": 62
},
{
"epoch": 1.7,
"learning_rate": 1.26e-05,
"loss": 2.6347,
"step": 63
},
{
"epoch": 1.73,
"learning_rate": 1.2799999999999998e-05,
"loss": 2.6363,
"step": 64
},
{
"epoch": 1.76,
"learning_rate": 1.3e-05,
"loss": 2.643,
"step": 65
},
{
"epoch": 1.78,
"learning_rate": 1.3199999999999997e-05,
"loss": 2.6389,
"step": 66
},
{
"epoch": 1.81,
"learning_rate": 1.3399999999999999e-05,
"loss": 2.6442,
"step": 67
},
{
"epoch": 1.84,
"learning_rate": 1.36e-05,
"loss": 2.6371,
"step": 68
},
{
"epoch": 1.86,
"learning_rate": 1.3799999999999998e-05,
"loss": 2.6353,
"step": 69
},
{
"epoch": 1.89,
"learning_rate": 1.4e-05,
"loss": 2.6364,
"step": 70
},
{
"epoch": 1.92,
"learning_rate": 1.4199999999999998e-05,
"loss": 2.6337,
"step": 71
},
{
"epoch": 1.95,
"learning_rate": 1.44e-05,
"loss": 2.6284,
"step": 72
},
{
"epoch": 1.97,
"learning_rate": 1.4599999999999997e-05,
"loss": 2.6299,
"step": 73
},
{
"epoch": 2.0,
"learning_rate": 1.4799999999999999e-05,
"loss": 2.6349,
"step": 74
},
{
"epoch": 2.03,
"learning_rate": 1.4999999999999999e-05,
"loss": 2.6442,
"step": 75
},
{
"epoch": 2.05,
"learning_rate": 1.5199999999999998e-05,
"loss": 2.642,
"step": 76
},
{
"epoch": 2.08,
"learning_rate": 1.5399999999999998e-05,
"loss": 2.6444,
"step": 77
},
{
"epoch": 2.11,
"learning_rate": 1.5599999999999996e-05,
"loss": 2.6387,
"step": 78
},
{
"epoch": 2.14,
"learning_rate": 1.5799999999999998e-05,
"loss": 2.6377,
"step": 79
},
{
"epoch": 2.16,
"learning_rate": 1.6e-05,
"loss": 2.6342,
"step": 80
},
{
"epoch": 2.19,
"learning_rate": 1.6199999999999997e-05,
"loss": 2.6279,
"step": 81
},
{
"epoch": 2.22,
"learning_rate": 1.64e-05,
"loss": 2.6242,
"step": 82
},
{
"epoch": 2.24,
"learning_rate": 1.6599999999999997e-05,
"loss": 2.6264,
"step": 83
},
{
"epoch": 2.27,
"learning_rate": 1.68e-05,
"loss": 2.647,
"step": 84
},
{
"epoch": 2.3,
"learning_rate": 1.6999999999999996e-05,
"loss": 2.6376,
"step": 85
},
{
"epoch": 2.32,
"learning_rate": 1.7199999999999998e-05,
"loss": 2.6367,
"step": 86
},
{
"epoch": 2.35,
"learning_rate": 1.74e-05,
"loss": 2.6451,
"step": 87
},
{
"epoch": 2.38,
"learning_rate": 1.7599999999999998e-05,
"loss": 2.6299,
"step": 88
},
{
"epoch": 2.41,
"learning_rate": 1.78e-05,
"loss": 2.6352,
"step": 89
},
{
"epoch": 2.43,
"learning_rate": 1.7999999999999997e-05,
"loss": 2.6307,
"step": 90
},
{
"epoch": 2.46,
"learning_rate": 1.82e-05,
"loss": 2.6193,
"step": 91
},
{
"epoch": 2.49,
"learning_rate": 1.8399999999999997e-05,
"loss": 2.6258,
"step": 92
},
{
"epoch": 2.51,
"learning_rate": 1.8599999999999998e-05,
"loss": 2.6363,
"step": 93
},
{
"epoch": 2.54,
"learning_rate": 1.8799999999999996e-05,
"loss": 2.6347,
"step": 94
},
{
"epoch": 2.57,
"learning_rate": 1.9e-05,
"loss": 2.6336,
"step": 95
},
{
"epoch": 2.59,
"learning_rate": 1.92e-05,
"loss": 2.639,
"step": 96
},
{
"epoch": 2.62,
"learning_rate": 1.9399999999999997e-05,
"loss": 2.6274,
"step": 97
},
{
"epoch": 2.65,
"learning_rate": 1.9599999999999995e-05,
"loss": 2.6318,
"step": 98
},
{
"epoch": 2.68,
"learning_rate": 1.98e-05,
"loss": 2.6324,
"step": 99
},
{
"epoch": 2.7,
"learning_rate": 1.9999999999999998e-05,
"loss": 2.6116,
"step": 100
},
{
"epoch": 2.73,
"learning_rate": 2.0199999999999996e-05,
"loss": 2.6282,
"step": 101
},
{
"epoch": 2.76,
"learning_rate": 2.04e-05,
"loss": 2.6382,
"step": 102
},
{
"epoch": 2.78,
"learning_rate": 2.06e-05,
"loss": 2.637,
"step": 103
},
{
"epoch": 2.81,
"learning_rate": 2.0799999999999997e-05,
"loss": 2.6358,
"step": 104
},
{
"epoch": 2.84,
"learning_rate": 2.1e-05,
"loss": 2.6383,
"step": 105
},
{
"epoch": 2.86,
"learning_rate": 2.12e-05,
"loss": 2.63,
"step": 106
},
{
"epoch": 2.89,
"learning_rate": 2.14e-05,
"loss": 2.6221,
"step": 107
},
{
"epoch": 2.92,
"learning_rate": 2.1599999999999996e-05,
"loss": 2.6348,
"step": 108
},
{
"epoch": 2.95,
"learning_rate": 2.1799999999999998e-05,
"loss": 2.6214,
"step": 109
},
{
"epoch": 2.97,
"learning_rate": 2.2e-05,
"loss": 2.616,
"step": 110
},
{
"epoch": 3.0,
"learning_rate": 2.2199999999999998e-05,
"loss": 2.6082,
"step": 111
},
{
"epoch": 3.03,
"learning_rate": 2.24e-05,
"loss": 2.6354,
"step": 112
},
{
"epoch": 3.05,
"learning_rate": 2.2599999999999997e-05,
"loss": 2.6265,
"step": 113
},
{
"epoch": 3.08,
"learning_rate": 2.28e-05,
"loss": 2.6469,
"step": 114
},
{
"epoch": 3.11,
"learning_rate": 2.2999999999999997e-05,
"loss": 2.6404,
"step": 115
},
{
"epoch": 3.14,
"learning_rate": 2.3199999999999998e-05,
"loss": 2.6283,
"step": 116
},
{
"epoch": 3.16,
"learning_rate": 2.34e-05,
"loss": 2.6244,
"step": 117
},
{
"epoch": 3.19,
"learning_rate": 2.3599999999999998e-05,
"loss": 2.6169,
"step": 118
},
{
"epoch": 3.22,
"learning_rate": 2.38e-05,
"loss": 2.6045,
"step": 119
},
{
"epoch": 3.24,
"learning_rate": 2.3999999999999997e-05,
"loss": 2.616,
"step": 120
},
{
"epoch": 3.27,
"learning_rate": 2.42e-05,
"loss": 2.64,
"step": 121
},
{
"epoch": 3.3,
"learning_rate": 2.4399999999999997e-05,
"loss": 2.6298,
"step": 122
},
{
"epoch": 3.32,
"learning_rate": 2.4599999999999998e-05,
"loss": 2.6299,
"step": 123
},
{
"epoch": 3.35,
"learning_rate": 2.4799999999999996e-05,
"loss": 2.6269,
"step": 124
},
{
"epoch": 3.38,
"learning_rate": 2.4999999999999998e-05,
"loss": 2.6174,
"step": 125
},
{
"epoch": 3.41,
"learning_rate": 2.52e-05,
"loss": 2.6369,
"step": 126
},
{
"epoch": 3.43,
"learning_rate": 2.5399999999999997e-05,
"loss": 2.5979,
"step": 127
},
{
"epoch": 3.46,
"learning_rate": 2.5599999999999995e-05,
"loss": 2.6082,
"step": 128
},
{
"epoch": 3.49,
"learning_rate": 2.5799999999999997e-05,
"loss": 2.5813,
"step": 129
},
{
"epoch": 3.51,
"learning_rate": 2.6e-05,
"loss": 2.6109,
"step": 130
},
{
"epoch": 3.54,
"learning_rate": 2.6199999999999996e-05,
"loss": 2.6249,
"step": 131
},
{
"epoch": 3.57,
"learning_rate": 2.6399999999999995e-05,
"loss": 2.6281,
"step": 132
},
{
"epoch": 3.59,
"learning_rate": 2.66e-05,
"loss": 2.6311,
"step": 133
},
{
"epoch": 3.62,
"learning_rate": 2.6799999999999998e-05,
"loss": 2.6356,
"step": 134
},
{
"epoch": 3.65,
"learning_rate": 2.6999999999999996e-05,
"loss": 2.6395,
"step": 135
},
{
"epoch": 3.68,
"learning_rate": 2.72e-05,
"loss": 2.6213,
"step": 136
},
{
"epoch": 3.7,
"learning_rate": 2.74e-05,
"loss": 2.5947,
"step": 137
},
{
"epoch": 3.73,
"learning_rate": 2.7599999999999997e-05,
"loss": 2.6031,
"step": 138
},
{
"epoch": 3.76,
"learning_rate": 2.7799999999999995e-05,
"loss": 2.6187,
"step": 139
},
{
"epoch": 3.78,
"learning_rate": 2.8e-05,
"loss": 2.6202,
"step": 140
},
{
"epoch": 3.81,
"learning_rate": 2.8199999999999998e-05,
"loss": 2.6171,
"step": 141
},
{
"epoch": 3.84,
"learning_rate": 2.8399999999999996e-05,
"loss": 2.6385,
"step": 142
},
{
"epoch": 3.86,
"learning_rate": 2.86e-05,
"loss": 2.6041,
"step": 143
},
{
"epoch": 3.89,
"learning_rate": 2.88e-05,
"loss": 2.6377,
"step": 144
},
{
"epoch": 3.92,
"learning_rate": 2.8999999999999997e-05,
"loss": 2.6178,
"step": 145
},
{
"epoch": 3.95,
"learning_rate": 2.9199999999999995e-05,
"loss": 2.557,
"step": 146
},
{
"epoch": 3.97,
"learning_rate": 2.94e-05,
"loss": 2.5592,
"step": 147
},
{
"epoch": 4.0,
"learning_rate": 2.9599999999999998e-05,
"loss": 2.5815,
"step": 148
},
{
"epoch": 4.03,
"learning_rate": 2.9799999999999996e-05,
"loss": 2.5901,
"step": 149
},
{
"epoch": 4.05,
"learning_rate": 2.9999999999999997e-05,
"loss": 2.6119,
"step": 150
},
{
"epoch": 4.08,
"learning_rate": 3.02e-05,
"loss": 2.601,
"step": 151
},
{
"epoch": 4.11,
"learning_rate": 3.0399999999999997e-05,
"loss": 2.6193,
"step": 152
},
{
"epoch": 4.14,
"learning_rate": 3.06e-05,
"loss": 2.6095,
"step": 153
},
{
"epoch": 4.16,
"learning_rate": 3.0799999999999996e-05,
"loss": 2.6152,
"step": 154
},
{
"epoch": 4.19,
"learning_rate": 3.0999999999999995e-05,
"loss": 2.6206,
"step": 155
},
{
"epoch": 4.22,
"learning_rate": 3.119999999999999e-05,
"loss": 2.5746,
"step": 156
},
{
"epoch": 4.24,
"learning_rate": 3.14e-05,
"loss": 2.5693,
"step": 157
},
{
"epoch": 4.27,
"learning_rate": 3.1599999999999996e-05,
"loss": 2.5845,
"step": 158
},
{
"epoch": 4.3,
"learning_rate": 3.1799999999999994e-05,
"loss": 2.6012,
"step": 159
},
{
"epoch": 4.32,
"learning_rate": 3.2e-05,
"loss": 2.6029,
"step": 160
},
{
"epoch": 4.35,
"learning_rate": 3.22e-05,
"loss": 2.6031,
"step": 161
},
{
"epoch": 4.38,
"learning_rate": 3.2399999999999995e-05,
"loss": 2.6327,
"step": 162
},
{
"epoch": 4.41,
"learning_rate": 3.259999999999999e-05,
"loss": 2.6029,
"step": 163
},
{
"epoch": 4.43,
"learning_rate": 3.28e-05,
"loss": 2.5042,
"step": 164
},
{
"epoch": 4.46,
"learning_rate": 3.2999999999999996e-05,
"loss": 2.5999,
"step": 165
},
{
"epoch": 4.49,
"learning_rate": 3.3199999999999994e-05,
"loss": 2.576,
"step": 166
},
{
"epoch": 4.51,
"learning_rate": 3.34e-05,
"loss": 2.5642,
"step": 167
},
{
"epoch": 4.54,
"learning_rate": 3.36e-05,
"loss": 2.5894,
"step": 168
},
{
"epoch": 4.57,
"learning_rate": 3.3799999999999995e-05,
"loss": 2.6032,
"step": 169
},
{
"epoch": 4.59,
"learning_rate": 3.399999999999999e-05,
"loss": 2.625,
"step": 170
},
{
"epoch": 4.62,
"learning_rate": 3.42e-05,
"loss": 2.6339,
"step": 171
},
{
"epoch": 4.65,
"learning_rate": 3.4399999999999996e-05,
"loss": 2.5575,
"step": 172
},
{
"epoch": 4.68,
"learning_rate": 3.4599999999999994e-05,
"loss": 2.5494,
"step": 173
},
{
"epoch": 4.7,
"learning_rate": 3.48e-05,
"loss": 2.5556,
"step": 174
},
{
"epoch": 4.73,
"learning_rate": 3.5e-05,
"loss": 2.4615,
"step": 175
},
{
"epoch": 4.76,
"learning_rate": 3.5199999999999995e-05,
"loss": 2.7476,
"step": 176
},
{
"epoch": 4.78,
"learning_rate": 3.539999999999999e-05,
"loss": 2.6781,
"step": 177
},
{
"epoch": 4.81,
"learning_rate": 3.56e-05,
"loss": 2.5902,
"step": 178
},
{
"epoch": 4.84,
"learning_rate": 3.5799999999999996e-05,
"loss": 2.6021,
"step": 179
},
{
"epoch": 4.86,
"learning_rate": 3.5999999999999994e-05,
"loss": 2.6898,
"step": 180
},
{
"epoch": 4.89,
"learning_rate": 3.62e-05,
"loss": 2.6671,
"step": 181
},
{
"epoch": 4.92,
"learning_rate": 3.64e-05,
"loss": 2.5783,
"step": 182
},
{
"epoch": 4.95,
"learning_rate": 3.6599999999999995e-05,
"loss": 2.5957,
"step": 183
},
{
"epoch": 4.97,
"learning_rate": 3.679999999999999e-05,
"loss": 2.5449,
"step": 184
},
{
"epoch": 5.0,
"learning_rate": 3.7e-05,
"loss": 2.5292,
"step": 185
},
{
"epoch": 5.03,
"learning_rate": 3.7199999999999996e-05,
"loss": 2.5724,
"step": 186
},
{
"epoch": 5.05,
"learning_rate": 3.7399999999999994e-05,
"loss": 2.6295,
"step": 187
},
{
"epoch": 5.08,
"learning_rate": 3.759999999999999e-05,
"loss": 2.6541,
"step": 188
},
{
"epoch": 5.11,
"learning_rate": 3.78e-05,
"loss": 2.5877,
"step": 189
},
{
"epoch": 5.14,
"learning_rate": 3.8e-05,
"loss": 2.5809,
"step": 190
},
{
"epoch": 5.16,
"learning_rate": 3.8199999999999993e-05,
"loss": 2.5981,
"step": 191
},
{
"epoch": 5.19,
"learning_rate": 3.84e-05,
"loss": 2.5479,
"step": 192
},
{
"epoch": 5.22,
"learning_rate": 3.86e-05,
"loss": 2.603,
"step": 193
},
{
"epoch": 5.24,
"learning_rate": 3.8799999999999994e-05,
"loss": 2.5262,
"step": 194
},
{
"epoch": 5.27,
"learning_rate": 3.9e-05,
"loss": 2.5537,
"step": 195
},
{
"epoch": 5.3,
"learning_rate": 3.919999999999999e-05,
"loss": 2.5796,
"step": 196
},
{
"epoch": 5.32,
"learning_rate": 3.9399999999999995e-05,
"loss": 2.5734,
"step": 197
},
{
"epoch": 5.35,
"learning_rate": 3.96e-05,
"loss": 2.6088,
"step": 198
},
{
"epoch": 5.38,
"learning_rate": 3.979999999999999e-05,
"loss": 2.6707,
"step": 199
},
{
"epoch": 5.41,
"learning_rate": 3.9999999999999996e-05,
"loss": 2.6739,
"step": 200
},
{
"epoch": 5.41,
"eval_accuracy": 0.11897498474679682,
"eval_f1": 0.043012478058323964,
"eval_loss": 2.568721055984497,
"eval_runtime": 146.9392,
"eval_samples_per_second": 11.154,
"eval_steps_per_second": 0.701,
"step": 200
},
{
"epoch": 5.43,
"learning_rate": 4.02e-05,
"loss": 2.5526,
"step": 201
},
{
"epoch": 5.46,
"learning_rate": 4.039999999999999e-05,
"loss": 2.4739,
"step": 202
},
{
"epoch": 5.49,
"learning_rate": 4.06e-05,
"loss": 2.4736,
"step": 203
},
{
"epoch": 5.51,
"learning_rate": 4.08e-05,
"loss": 2.5938,
"step": 204
},
{
"epoch": 5.54,
"learning_rate": 4.0999999999999994e-05,
"loss": 2.6251,
"step": 205
},
{
"epoch": 5.57,
"learning_rate": 4.12e-05,
"loss": 2.5928,
"step": 206
},
{
"epoch": 5.59,
"learning_rate": 4.14e-05,
"loss": 2.6375,
"step": 207
},
{
"epoch": 5.62,
"learning_rate": 4.1599999999999995e-05,
"loss": 2.6963,
"step": 208
},
{
"epoch": 5.65,
"learning_rate": 4.18e-05,
"loss": 2.6731,
"step": 209
},
{
"epoch": 5.68,
"learning_rate": 4.2e-05,
"loss": 2.5799,
"step": 210
},
{
"epoch": 5.7,
"learning_rate": 4.2199999999999996e-05,
"loss": 2.5423,
"step": 211
},
{
"epoch": 5.73,
"learning_rate": 4.24e-05,
"loss": 2.5009,
"step": 212
},
{
"epoch": 5.76,
"learning_rate": 4.259999999999999e-05,
"loss": 2.5705,
"step": 213
},
{
"epoch": 5.78,
"learning_rate": 4.28e-05,
"loss": 2.5714,
"step": 214
},
{
"epoch": 5.81,
"learning_rate": 4.3e-05,
"loss": 2.5848,
"step": 215
},
{
"epoch": 5.84,
"learning_rate": 4.319999999999999e-05,
"loss": 2.5847,
"step": 216
},
{
"epoch": 5.86,
"learning_rate": 4.34e-05,
"loss": 2.5998,
"step": 217
},
{
"epoch": 5.89,
"learning_rate": 4.3599999999999996e-05,
"loss": 2.5082,
"step": 218
},
{
"epoch": 5.92,
"learning_rate": 4.3799999999999994e-05,
"loss": 2.5943,
"step": 219
},
{
"epoch": 5.95,
"learning_rate": 4.4e-05,
"loss": 2.6021,
"step": 220
},
{
"epoch": 5.97,
"learning_rate": 4.42e-05,
"loss": 2.4933,
"step": 221
},
{
"epoch": 6.0,
"learning_rate": 4.4399999999999995e-05,
"loss": 2.4663,
"step": 222
},
{
"epoch": 6.03,
"learning_rate": 4.46e-05,
"loss": 2.5512,
"step": 223
},
{
"epoch": 6.05,
"learning_rate": 4.48e-05,
"loss": 2.5553,
"step": 224
},
{
"epoch": 6.08,
"learning_rate": 4.4999999999999996e-05,
"loss": 2.6202,
"step": 225
},
{
"epoch": 6.11,
"learning_rate": 4.5199999999999994e-05,
"loss": 2.5573,
"step": 226
},
{
"epoch": 6.14,
"learning_rate": 4.539999999999999e-05,
"loss": 2.5991,
"step": 227
},
{
"epoch": 6.16,
"learning_rate": 4.56e-05,
"loss": 2.5368,
"step": 228
},
{
"epoch": 6.19,
"learning_rate": 4.5799999999999995e-05,
"loss": 2.5112,
"step": 229
},
{
"epoch": 6.22,
"learning_rate": 4.599999999999999e-05,
"loss": 2.4998,
"step": 230
},
{
"epoch": 6.24,
"learning_rate": 4.62e-05,
"loss": 2.4995,
"step": 231
},
{
"epoch": 6.27,
"learning_rate": 4.6399999999999996e-05,
"loss": 2.5927,
"step": 232
},
{
"epoch": 6.3,
"learning_rate": 4.6599999999999994e-05,
"loss": 2.5962,
"step": 233
},
{
"epoch": 6.32,
"learning_rate": 4.68e-05,
"loss": 2.5677,
"step": 234
},
{
"epoch": 6.35,
"learning_rate": 4.7e-05,
"loss": 2.5205,
"step": 235
},
{
"epoch": 6.38,
"learning_rate": 4.7199999999999995e-05,
"loss": 2.5376,
"step": 236
},
{
"epoch": 6.41,
"learning_rate": 4.7399999999999993e-05,
"loss": 2.5479,
"step": 237
},
{
"epoch": 6.43,
"learning_rate": 4.76e-05,
"loss": 2.5495,
"step": 238
},
{
"epoch": 6.46,
"learning_rate": 4.7799999999999996e-05,
"loss": 2.5021,
"step": 239
},
{
"epoch": 6.49,
"learning_rate": 4.7999999999999994e-05,
"loss": 2.3521,
"step": 240
},
{
"epoch": 6.51,
"learning_rate": 4.82e-05,
"loss": 2.5678,
"step": 241
},
{
"epoch": 6.54,
"learning_rate": 4.84e-05,
"loss": 2.5656,
"step": 242
},
{
"epoch": 6.57,
"learning_rate": 4.8599999999999995e-05,
"loss": 2.519,
"step": 243
},
{
"epoch": 6.59,
"learning_rate": 4.8799999999999994e-05,
"loss": 2.5035,
"step": 244
},
{
"epoch": 6.62,
"learning_rate": 4.899999999999999e-05,
"loss": 2.526,
"step": 245
},
{
"epoch": 6.65,
"learning_rate": 4.9199999999999997e-05,
"loss": 2.496,
"step": 246
},
{
"epoch": 6.68,
"learning_rate": 4.9399999999999995e-05,
"loss": 2.3688,
"step": 247
},
{
"epoch": 6.7,
"learning_rate": 4.959999999999999e-05,
"loss": 2.409,
"step": 248
},
{
"epoch": 6.73,
"learning_rate": 4.98e-05,
"loss": 2.4092,
"step": 249
},
{
"epoch": 6.76,
"learning_rate": 4.9999999999999996e-05,
"loss": 2.584,
"step": 250
},
{
"epoch": 6.78,
"learning_rate": 5.0199999999999994e-05,
"loss": 2.5024,
"step": 251
},
{
"epoch": 6.81,
"learning_rate": 5.04e-05,
"loss": 2.6997,
"step": 252
},
{
"epoch": 6.84,
"learning_rate": 5.06e-05,
"loss": 2.6143,
"step": 253
},
{
"epoch": 6.86,
"learning_rate": 5.0799999999999995e-05,
"loss": 2.538,
"step": 254
},
{
"epoch": 6.89,
"learning_rate": 5.1e-05,
"loss": 2.4851,
"step": 255
},
{
"epoch": 6.92,
"learning_rate": 5.119999999999999e-05,
"loss": 2.4812,
"step": 256
},
{
"epoch": 6.95,
"learning_rate": 5.1399999999999996e-05,
"loss": 2.49,
"step": 257
},
{
"epoch": 6.97,
"learning_rate": 5.1599999999999994e-05,
"loss": 2.4491,
"step": 258
},
{
"epoch": 7.0,
"learning_rate": 5.179999999999999e-05,
"loss": 2.3631,
"step": 259
},
{
"epoch": 7.03,
"learning_rate": 5.2e-05,
"loss": 2.5618,
"step": 260
},
{
"epoch": 7.05,
"learning_rate": 5.2199999999999995e-05,
"loss": 2.497,
"step": 261
},
{
"epoch": 7.08,
"learning_rate": 5.239999999999999e-05,
"loss": 2.5047,
"step": 262
},
{
"epoch": 7.11,
"learning_rate": 5.26e-05,
"loss": 2.4196,
"step": 263
},
{
"epoch": 7.14,
"learning_rate": 5.279999999999999e-05,
"loss": 2.3383,
"step": 264
},
{
"epoch": 7.16,
"learning_rate": 5.2999999999999994e-05,
"loss": 2.4444,
"step": 265
},
{
"epoch": 7.19,
"learning_rate": 5.32e-05,
"loss": 2.3745,
"step": 266
},
{
"epoch": 7.22,
"learning_rate": 5.339999999999999e-05,
"loss": 2.4306,
"step": 267
},
{
"epoch": 7.24,
"learning_rate": 5.3599999999999995e-05,
"loss": 2.3145,
"step": 268
},
{
"epoch": 7.27,
"learning_rate": 5.38e-05,
"loss": 2.4592,
"step": 269
},
{
"epoch": 7.3,
"learning_rate": 5.399999999999999e-05,
"loss": 2.4447,
"step": 270
},
{
"epoch": 7.32,
"learning_rate": 5.4199999999999996e-05,
"loss": 2.4029,
"step": 271
},
{
"epoch": 7.35,
"learning_rate": 5.44e-05,
"loss": 2.3272,
"step": 272
},
{
"epoch": 7.38,
"learning_rate": 5.459999999999999e-05,
"loss": 2.4472,
"step": 273
},
{
"epoch": 7.41,
"learning_rate": 5.48e-05,
"loss": 2.3294,
"step": 274
},
{
"epoch": 7.43,
"learning_rate": 5.499999999999999e-05,
"loss": 2.3245,
"step": 275
},
{
"epoch": 7.46,
"learning_rate": 5.519999999999999e-05,
"loss": 2.3615,
"step": 276
},
{
"epoch": 7.49,
"learning_rate": 5.54e-05,
"loss": 2.3143,
"step": 277
},
{
"epoch": 7.51,
"learning_rate": 5.559999999999999e-05,
"loss": 2.4937,
"step": 278
},
{
"epoch": 7.54,
"learning_rate": 5.5799999999999994e-05,
"loss": 2.4228,
"step": 279
},
{
"epoch": 7.57,
"learning_rate": 5.6e-05,
"loss": 2.3861,
"step": 280
},
{
"epoch": 7.59,
"learning_rate": 5.619999999999999e-05,
"loss": 2.406,
"step": 281
},
{
"epoch": 7.62,
"learning_rate": 5.6399999999999995e-05,
"loss": 2.44,
"step": 282
},
{
"epoch": 7.65,
"learning_rate": 5.66e-05,
"loss": 2.445,
"step": 283
},
{
"epoch": 7.68,
"learning_rate": 5.679999999999999e-05,
"loss": 2.3667,
"step": 284
},
{
"epoch": 7.7,
"learning_rate": 5.6999999999999996e-05,
"loss": 2.3431,
"step": 285
},
{
"epoch": 7.73,
"learning_rate": 5.72e-05,
"loss": 2.3846,
"step": 286
},
{
"epoch": 7.76,
"learning_rate": 5.739999999999999e-05,
"loss": 2.3478,
"step": 287
},
{
"epoch": 7.78,
"learning_rate": 5.76e-05,
"loss": 2.4035,
"step": 288
},
{
"epoch": 7.81,
"learning_rate": 5.78e-05,
"loss": 2.1838,
"step": 289
},
{
"epoch": 7.84,
"learning_rate": 5.7999999999999994e-05,
"loss": 2.2607,
"step": 290
},
{
"epoch": 7.86,
"learning_rate": 5.82e-05,
"loss": 2.3555,
"step": 291
},
{
"epoch": 7.89,
"learning_rate": 5.839999999999999e-05,
"loss": 2.3329,
"step": 292
},
{
"epoch": 7.92,
"learning_rate": 5.8599999999999995e-05,
"loss": 2.2661,
"step": 293
},
{
"epoch": 7.95,
"learning_rate": 5.88e-05,
"loss": 2.183,
"step": 294
},
{
"epoch": 7.97,
"learning_rate": 5.899999999999999e-05,
"loss": 2.2078,
"step": 295
},
{
"epoch": 8.0,
"learning_rate": 5.9199999999999996e-05,
"loss": 2.09,
"step": 296
},
{
"epoch": 8.03,
"learning_rate": 5.94e-05,
"loss": 2.2115,
"step": 297
},
{
"epoch": 8.05,
"learning_rate": 5.959999999999999e-05,
"loss": 2.1926,
"step": 298
},
{
"epoch": 8.08,
"learning_rate": 5.98e-05,
"loss": 2.0966,
"step": 299
},
{
"epoch": 8.11,
"learning_rate": 5.9999999999999995e-05,
"loss": 2.1595,
"step": 300
},
{
"epoch": 8.14,
"learning_rate": 6.019999999999999e-05,
"loss": 2.1798,
"step": 301
},
{
"epoch": 8.16,
"learning_rate": 6.04e-05,
"loss": 2.0468,
"step": 302
},
{
"epoch": 8.19,
"learning_rate": 6.0599999999999996e-05,
"loss": 2.1874,
"step": 303
},
{
"epoch": 8.22,
"learning_rate": 6.0799999999999994e-05,
"loss": 2.1913,
"step": 304
},
{
"epoch": 8.24,
"learning_rate": 6.1e-05,
"loss": 2.1783,
"step": 305
},
{
"epoch": 8.27,
"learning_rate": 6.12e-05,
"loss": 2.4473,
"step": 306
},
{
"epoch": 8.3,
"learning_rate": 6.139999999999999e-05,
"loss": 2.2005,
"step": 307
},
{
"epoch": 8.32,
"learning_rate": 6.159999999999999e-05,
"loss": 2.1019,
"step": 308
},
{
"epoch": 8.35,
"learning_rate": 6.18e-05,
"loss": 2.0509,
"step": 309
},
{
"epoch": 8.38,
"learning_rate": 6.199999999999999e-05,
"loss": 2.1941,
"step": 310
},
{
"epoch": 8.41,
"learning_rate": 6.22e-05,
"loss": 2.0721,
"step": 311
},
{
"epoch": 8.43,
"learning_rate": 6.239999999999999e-05,
"loss": 2.1192,
"step": 312
},
{
"epoch": 8.46,
"learning_rate": 6.259999999999999e-05,
"loss": 2.0177,
"step": 313
},
{
"epoch": 8.49,
"learning_rate": 6.28e-05,
"loss": 1.9845,
"step": 314
},
{
"epoch": 8.51,
"learning_rate": 6.299999999999999e-05,
"loss": 2.238,
"step": 315
},
{
"epoch": 8.54,
"learning_rate": 6.319999999999999e-05,
"loss": 2.112,
"step": 316
},
{
"epoch": 8.57,
"learning_rate": 6.34e-05,
"loss": 2.0599,
"step": 317
},
{
"epoch": 8.59,
"learning_rate": 6.359999999999999e-05,
"loss": 1.9024,
"step": 318
},
{
"epoch": 8.62,
"learning_rate": 6.379999999999999e-05,
"loss": 2.0076,
"step": 319
},
{
"epoch": 8.65,
"learning_rate": 6.4e-05,
"loss": 2.1417,
"step": 320
},
{
"epoch": 8.68,
"learning_rate": 6.419999999999999e-05,
"loss": 2.1203,
"step": 321
},
{
"epoch": 8.7,
"learning_rate": 6.44e-05,
"loss": 2.0671,
"step": 322
},
{
"epoch": 8.73,
"learning_rate": 6.459999999999998e-05,
"loss": 2.1219,
"step": 323
},
{
"epoch": 8.76,
"learning_rate": 6.479999999999999e-05,
"loss": 2.0633,
"step": 324
},
{
"epoch": 8.78,
"learning_rate": 6.5e-05,
"loss": 2.2034,
"step": 325
},
{
"epoch": 8.81,
"learning_rate": 6.519999999999999e-05,
"loss": 1.9897,
"step": 326
},
{
"epoch": 8.84,
"learning_rate": 6.539999999999999e-05,
"loss": 2.0454,
"step": 327
},
{
"epoch": 8.86,
"learning_rate": 6.56e-05,
"loss": 2.0497,
"step": 328
},
{
"epoch": 8.89,
"learning_rate": 6.579999999999999e-05,
"loss": 1.9576,
"step": 329
},
{
"epoch": 8.92,
"learning_rate": 6.599999999999999e-05,
"loss": 1.9456,
"step": 330
},
{
"epoch": 8.95,
"learning_rate": 6.62e-05,
"loss": 2.0269,
"step": 331
},
{
"epoch": 8.97,
"learning_rate": 6.639999999999999e-05,
"loss": 1.8103,
"step": 332
},
{
"epoch": 9.0,
"learning_rate": 6.659999999999999e-05,
"loss": 2.2947,
"step": 333
},
{
"epoch": 9.03,
"learning_rate": 6.68e-05,
"loss": 2.0997,
"step": 334
},
{
"epoch": 9.05,
"learning_rate": 6.699999999999999e-05,
"loss": 1.93,
"step": 335
},
{
"epoch": 9.08,
"learning_rate": 6.72e-05,
"loss": 1.8143,
"step": 336
},
{
"epoch": 9.11,
"learning_rate": 6.739999999999998e-05,
"loss": 1.9162,
"step": 337
},
{
"epoch": 9.14,
"learning_rate": 6.759999999999999e-05,
"loss": 2.0876,
"step": 338
},
{
"epoch": 9.16,
"learning_rate": 6.78e-05,
"loss": 1.968,
"step": 339
},
{
"epoch": 9.19,
"learning_rate": 6.799999999999999e-05,
"loss": 2.0487,
"step": 340
},
{
"epoch": 9.22,
"learning_rate": 6.819999999999999e-05,
"loss": 1.9975,
"step": 341
},
{
"epoch": 9.24,
"learning_rate": 6.84e-05,
"loss": 1.8253,
"step": 342
},
{
"epoch": 9.27,
"learning_rate": 6.859999999999999e-05,
"loss": 2.1425,
"step": 343
},
{
"epoch": 9.3,
"learning_rate": 6.879999999999999e-05,
"loss": 2.1315,
"step": 344
},
{
"epoch": 9.32,
"learning_rate": 6.9e-05,
"loss": 1.7855,
"step": 345
},
{
"epoch": 9.35,
"learning_rate": 6.919999999999999e-05,
"loss": 1.8839,
"step": 346
},
{
"epoch": 9.38,
"learning_rate": 6.939999999999999e-05,
"loss": 1.8469,
"step": 347
},
{
"epoch": 9.41,
"learning_rate": 6.96e-05,
"loss": 1.7415,
"step": 348
},
{
"epoch": 9.43,
"learning_rate": 6.979999999999999e-05,
"loss": 2.038,
"step": 349
},
{
"epoch": 9.46,
"learning_rate": 7e-05,
"loss": 1.7268,
"step": 350
},
{
"epoch": 9.49,
"learning_rate": 7.02e-05,
"loss": 1.7827,
"step": 351
},
{
"epoch": 9.51,
"learning_rate": 7.039999999999999e-05,
"loss": 1.9078,
"step": 352
},
{
"epoch": 9.54,
"learning_rate": 7.06e-05,
"loss": 1.8195,
"step": 353
},
{
"epoch": 9.57,
"learning_rate": 7.079999999999999e-05,
"loss": 1.9038,
"step": 354
},
{
"epoch": 9.59,
"learning_rate": 7.099999999999999e-05,
"loss": 1.8091,
"step": 355
},
{
"epoch": 9.62,
"learning_rate": 7.12e-05,
"loss": 1.9322,
"step": 356
},
{
"epoch": 9.65,
"learning_rate": 7.139999999999999e-05,
"loss": 1.7492,
"step": 357
},
{
"epoch": 9.68,
"learning_rate": 7.159999999999999e-05,
"loss": 1.7152,
"step": 358
},
{
"epoch": 9.7,
"learning_rate": 7.18e-05,
"loss": 1.7611,
"step": 359
},
{
"epoch": 9.73,
"learning_rate": 7.199999999999999e-05,
"loss": 1.6853,
"step": 360
},
{
"epoch": 9.76,
"learning_rate": 7.219999999999999e-05,
"loss": 1.8927,
"step": 361
},
{
"epoch": 9.78,
"learning_rate": 7.24e-05,
"loss": 1.8974,
"step": 362
},
{
"epoch": 9.81,
"learning_rate": 7.259999999999999e-05,
"loss": 1.7425,
"step": 363
},
{
"epoch": 9.84,
"learning_rate": 7.28e-05,
"loss": 1.801,
"step": 364
},
{
"epoch": 9.86,
"learning_rate": 7.3e-05,
"loss": 1.7858,
"step": 365
},
{
"epoch": 9.89,
"learning_rate": 7.319999999999999e-05,
"loss": 1.9879,
"step": 366
},
{
"epoch": 9.92,
"learning_rate": 7.34e-05,
"loss": 1.742,
"step": 367
},
{
"epoch": 9.95,
"learning_rate": 7.359999999999999e-05,
"loss": 1.7452,
"step": 368
},
{
"epoch": 9.97,
"learning_rate": 7.379999999999999e-05,
"loss": 1.6894,
"step": 369
},
{
"epoch": 10.0,
"learning_rate": 7.4e-05,
"loss": 1.6956,
"step": 370
},
{
"epoch": 10.03,
"learning_rate": 7.419999999999999e-05,
"loss": 1.914,
"step": 371
},
{
"epoch": 10.05,
"learning_rate": 7.439999999999999e-05,
"loss": 1.8094,
"step": 372
},
{
"epoch": 10.08,
"learning_rate": 7.46e-05,
"loss": 1.6506,
"step": 373
},
{
"epoch": 10.11,
"learning_rate": 7.479999999999999e-05,
"loss": 1.6168,
"step": 374
},
{
"epoch": 10.14,
"learning_rate": 7.5e-05,
"loss": 1.7375,
"step": 375
},
{
"epoch": 10.16,
"learning_rate": 7.519999999999998e-05,
"loss": 1.556,
"step": 376
},
{
"epoch": 10.19,
"learning_rate": 7.54e-05,
"loss": 1.6714,
"step": 377
},
{
"epoch": 10.22,
"learning_rate": 7.56e-05,
"loss": 1.5655,
"step": 378
},
{
"epoch": 10.24,
"learning_rate": 7.579999999999999e-05,
"loss": 1.6161,
"step": 379
},
{
"epoch": 10.27,
"learning_rate": 7.6e-05,
"loss": 1.9182,
"step": 380
},
{
"epoch": 10.3,
"learning_rate": 7.62e-05,
"loss": 1.7702,
"step": 381
},
{
"epoch": 10.32,
"learning_rate": 7.639999999999999e-05,
"loss": 1.7353,
"step": 382
},
{
"epoch": 10.35,
"learning_rate": 7.66e-05,
"loss": 1.6404,
"step": 383
},
{
"epoch": 10.38,
"learning_rate": 7.68e-05,
"loss": 1.7743,
"step": 384
},
{
"epoch": 10.41,
"learning_rate": 7.699999999999999e-05,
"loss": 1.4808,
"step": 385
},
{
"epoch": 10.43,
"learning_rate": 7.72e-05,
"loss": 1.6405,
"step": 386
},
{
"epoch": 10.46,
"learning_rate": 7.74e-05,
"loss": 1.5348,
"step": 387
},
{
"epoch": 10.49,
"learning_rate": 7.759999999999999e-05,
"loss": 1.4336,
"step": 388
},
{
"epoch": 10.51,
"learning_rate": 7.780000000000001e-05,
"loss": 1.6258,
"step": 389
},
{
"epoch": 10.54,
"learning_rate": 7.8e-05,
"loss": 1.7506,
"step": 390
},
{
"epoch": 10.57,
"learning_rate": 7.819999999999999e-05,
"loss": 1.3946,
"step": 391
},
{
"epoch": 10.59,
"learning_rate": 7.839999999999998e-05,
"loss": 1.6215,
"step": 392
},
{
"epoch": 10.62,
"learning_rate": 7.86e-05,
"loss": 1.485,
"step": 393
},
{
"epoch": 10.65,
"learning_rate": 7.879999999999999e-05,
"loss": 1.4283,
"step": 394
},
{
"epoch": 10.68,
"learning_rate": 7.899999999999998e-05,
"loss": 1.4608,
"step": 395
},
{
"epoch": 10.7,
"learning_rate": 7.92e-05,
"loss": 1.6412,
"step": 396
},
{
"epoch": 10.73,
"learning_rate": 7.939999999999999e-05,
"loss": 1.3955,
"step": 397
},
{
"epoch": 10.76,
"learning_rate": 7.959999999999998e-05,
"loss": 1.6308,
"step": 398
},
{
"epoch": 10.78,
"learning_rate": 7.98e-05,
"loss": 1.4336,
"step": 399
},
{
"epoch": 10.81,
"learning_rate": 7.999999999999999e-05,
"loss": 1.4953,
"step": 400
},
{
"epoch": 10.81,
"eval_accuracy": 0.5692495424039048,
"eval_f1": 0.5550435993245623,
"eval_loss": 1.6052155494689941,
"eval_runtime": 145.469,
"eval_samples_per_second": 11.267,
"eval_steps_per_second": 0.708,
"step": 400
},
{
"epoch": 10.84,
"learning_rate": 8.019999999999998e-05,
"loss": 1.5986,
"step": 401
},
{
"epoch": 10.86,
"learning_rate": 8.04e-05,
"loss": 1.4496,
"step": 402
},
{
"epoch": 10.89,
"learning_rate": 8.06e-05,
"loss": 1.3469,
"step": 403
},
{
"epoch": 10.92,
"learning_rate": 8.079999999999999e-05,
"loss": 1.3803,
"step": 404
},
{
"epoch": 10.95,
"learning_rate": 8.1e-05,
"loss": 1.7213,
"step": 405
},
{
"epoch": 10.97,
"learning_rate": 8.12e-05,
"loss": 1.5288,
"step": 406
},
{
"epoch": 11.0,
"learning_rate": 8.139999999999999e-05,
"loss": 1.5151,
"step": 407
},
{
"epoch": 11.03,
"learning_rate": 8.16e-05,
"loss": 1.5882,
"step": 408
},
{
"epoch": 11.05,
"learning_rate": 8.18e-05,
"loss": 1.4347,
"step": 409
},
{
"epoch": 11.08,
"learning_rate": 8.199999999999999e-05,
"loss": 1.2348,
"step": 410
},
{
"epoch": 11.11,
"learning_rate": 8.22e-05,
"loss": 1.3429,
"step": 411
},
{
"epoch": 11.14,
"learning_rate": 8.24e-05,
"loss": 1.4835,
"step": 412
},
{
"epoch": 11.16,
"learning_rate": 8.259999999999999e-05,
"loss": 1.4372,
"step": 413
},
{
"epoch": 11.19,
"learning_rate": 8.28e-05,
"loss": 1.6437,
"step": 414
},
{
"epoch": 11.22,
"learning_rate": 8.3e-05,
"loss": 1.3977,
"step": 415
},
{
"epoch": 11.24,
"learning_rate": 8.319999999999999e-05,
"loss": 1.2792,
"step": 416
},
{
"epoch": 11.27,
"learning_rate": 8.34e-05,
"loss": 1.1592,
"step": 417
},
{
"epoch": 11.3,
"learning_rate": 8.36e-05,
"loss": 1.355,
"step": 418
},
{
"epoch": 11.32,
"learning_rate": 8.379999999999999e-05,
"loss": 1.3062,
"step": 419
},
{
"epoch": 11.35,
"learning_rate": 8.4e-05,
"loss": 1.2942,
"step": 420
},
{
"epoch": 11.38,
"learning_rate": 8.42e-05,
"loss": 1.3441,
"step": 421
},
{
"epoch": 11.41,
"learning_rate": 8.439999999999999e-05,
"loss": 1.2757,
"step": 422
},
{
"epoch": 11.43,
"learning_rate": 8.459999999999998e-05,
"loss": 1.4208,
"step": 423
},
{
"epoch": 11.46,
"learning_rate": 8.48e-05,
"loss": 1.5512,
"step": 424
},
{
"epoch": 11.49,
"learning_rate": 8.499999999999999e-05,
"loss": 1.2471,
"step": 425
},
{
"epoch": 11.51,
"learning_rate": 8.519999999999998e-05,
"loss": 1.6044,
"step": 426
},
{
"epoch": 11.54,
"learning_rate": 8.54e-05,
"loss": 1.5731,
"step": 427
},
{
"epoch": 11.57,
"learning_rate": 8.56e-05,
"loss": 1.2329,
"step": 428
},
{
"epoch": 11.59,
"learning_rate": 8.579999999999998e-05,
"loss": 1.0439,
"step": 429
},
{
"epoch": 11.62,
"learning_rate": 8.6e-05,
"loss": 1.3202,
"step": 430
},
{
"epoch": 11.65,
"learning_rate": 8.62e-05,
"loss": 1.3318,
"step": 431
},
{
"epoch": 11.68,
"learning_rate": 8.639999999999999e-05,
"loss": 1.3758,
"step": 432
},
{
"epoch": 11.7,
"learning_rate": 8.659999999999999e-05,
"loss": 1.3132,
"step": 433
},
{
"epoch": 11.73,
"learning_rate": 8.68e-05,
"loss": 1.2362,
"step": 434
},
{
"epoch": 11.76,
"learning_rate": 8.699999999999999e-05,
"loss": 1.1981,
"step": 435
},
{
"epoch": 11.78,
"learning_rate": 8.719999999999999e-05,
"loss": 1.1684,
"step": 436
},
{
"epoch": 11.81,
"learning_rate": 8.74e-05,
"loss": 1.3668,
"step": 437
},
{
"epoch": 11.84,
"learning_rate": 8.759999999999999e-05,
"loss": 1.2949,
"step": 438
},
{
"epoch": 11.86,
"learning_rate": 8.779999999999999e-05,
"loss": 1.2402,
"step": 439
},
{
"epoch": 11.89,
"learning_rate": 8.8e-05,
"loss": 1.3449,
"step": 440
},
{
"epoch": 11.92,
"learning_rate": 8.819999999999999e-05,
"loss": 1.1668,
"step": 441
},
{
"epoch": 11.95,
"learning_rate": 8.84e-05,
"loss": 1.2628,
"step": 442
},
{
"epoch": 11.97,
"learning_rate": 8.86e-05,
"loss": 1.1524,
"step": 443
},
{
"epoch": 12.0,
"learning_rate": 8.879999999999999e-05,
"loss": 1.2417,
"step": 444
},
{
"epoch": 12.03,
"learning_rate": 8.9e-05,
"loss": 1.2384,
"step": 445
},
{
"epoch": 12.05,
"learning_rate": 8.92e-05,
"loss": 1.1749,
"step": 446
},
{
"epoch": 12.08,
"learning_rate": 8.939999999999999e-05,
"loss": 1.0134,
"step": 447
},
{
"epoch": 12.11,
"learning_rate": 8.96e-05,
"loss": 1.0005,
"step": 448
},
{
"epoch": 12.14,
"learning_rate": 8.98e-05,
"loss": 1.1042,
"step": 449
},
{
"epoch": 12.16,
"learning_rate": 8.999999999999999e-05,
"loss": 1.186,
"step": 450
},
{
"epoch": 12.19,
"learning_rate": 9.02e-05,
"loss": 1.011,
"step": 451
},
{
"epoch": 12.22,
"learning_rate": 9.039999999999999e-05,
"loss": 1.2734,
"step": 452
},
{
"epoch": 12.24,
"learning_rate": 9.059999999999999e-05,
"loss": 1.1241,
"step": 453
},
{
"epoch": 12.27,
"learning_rate": 9.079999999999998e-05,
"loss": 1.0502,
"step": 454
},
{
"epoch": 12.3,
"learning_rate": 9.099999999999999e-05,
"loss": 1.1955,
"step": 455
},
{
"epoch": 12.32,
"learning_rate": 9.12e-05,
"loss": 1.1248,
"step": 456
},
{
"epoch": 12.35,
"learning_rate": 9.139999999999999e-05,
"loss": 0.936,
"step": 457
},
{
"epoch": 12.38,
"learning_rate": 9.159999999999999e-05,
"loss": 1.1369,
"step": 458
},
{
"epoch": 12.41,
"learning_rate": 9.18e-05,
"loss": 1.1986,
"step": 459
},
{
"epoch": 12.43,
"learning_rate": 9.199999999999999e-05,
"loss": 1.2611,
"step": 460
},
{
"epoch": 12.46,
"learning_rate": 9.219999999999999e-05,
"loss": 0.9961,
"step": 461
},
{
"epoch": 12.49,
"learning_rate": 9.24e-05,
"loss": 1.0504,
"step": 462
},
{
"epoch": 12.51,
"learning_rate": 9.259999999999999e-05,
"loss": 1.2036,
"step": 463
},
{
"epoch": 12.54,
"learning_rate": 9.279999999999999e-05,
"loss": 1.1906,
"step": 464
},
{
"epoch": 12.57,
"learning_rate": 9.3e-05,
"loss": 0.9564,
"step": 465
},
{
"epoch": 12.59,
"learning_rate": 9.319999999999999e-05,
"loss": 1.063,
"step": 466
},
{
"epoch": 12.62,
"learning_rate": 9.34e-05,
"loss": 1.1568,
"step": 467
},
{
"epoch": 12.65,
"learning_rate": 9.36e-05,
"loss": 0.9901,
"step": 468
},
{
"epoch": 12.68,
"learning_rate": 9.379999999999999e-05,
"loss": 0.9842,
"step": 469
},
{
"epoch": 12.7,
"learning_rate": 9.4e-05,
"loss": 0.9332,
"step": 470
},
{
"epoch": 12.73,
"learning_rate": 9.419999999999999e-05,
"loss": 0.8594,
"step": 471
},
{
"epoch": 12.76,
"learning_rate": 9.439999999999999e-05,
"loss": 1.075,
"step": 472
},
{
"epoch": 12.78,
"learning_rate": 9.46e-05,
"loss": 0.9111,
"step": 473
},
{
"epoch": 12.81,
"learning_rate": 9.479999999999999e-05,
"loss": 0.7974,
"step": 474
},
{
"epoch": 12.84,
"learning_rate": 9.499999999999999e-05,
"loss": 1.0316,
"step": 475
},
{
"epoch": 12.86,
"learning_rate": 9.52e-05,
"loss": 1.2055,
"step": 476
},
{
"epoch": 12.89,
"learning_rate": 9.539999999999999e-05,
"loss": 0.8806,
"step": 477
},
{
"epoch": 12.92,
"learning_rate": 9.559999999999999e-05,
"loss": 0.8681,
"step": 478
},
{
"epoch": 12.95,
"learning_rate": 9.58e-05,
"loss": 1.2315,
"step": 479
},
{
"epoch": 12.97,
"learning_rate": 9.599999999999999e-05,
"loss": 1.171,
"step": 480
},
{
"epoch": 13.0,
"learning_rate": 9.62e-05,
"loss": 1.076,
"step": 481
},
{
"epoch": 13.03,
"learning_rate": 9.64e-05,
"loss": 1.0727,
"step": 482
},
{
"epoch": 13.05,
"learning_rate": 9.659999999999999e-05,
"loss": 0.7977,
"step": 483
},
{
"epoch": 13.08,
"learning_rate": 9.68e-05,
"loss": 0.8483,
"step": 484
},
{
"epoch": 13.11,
"learning_rate": 9.699999999999999e-05,
"loss": 1.0143,
"step": 485
},
{
"epoch": 13.14,
"learning_rate": 9.719999999999999e-05,
"loss": 0.8356,
"step": 486
},
{
"epoch": 13.16,
"learning_rate": 9.74e-05,
"loss": 0.6896,
"step": 487
},
{
"epoch": 13.19,
"learning_rate": 9.759999999999999e-05,
"loss": 0.7748,
"step": 488
},
{
"epoch": 13.22,
"learning_rate": 9.779999999999999e-05,
"loss": 0.8735,
"step": 489
},
{
"epoch": 13.24,
"learning_rate": 9.799999999999998e-05,
"loss": 0.9873,
"step": 490
},
{
"epoch": 13.27,
"learning_rate": 9.819999999999999e-05,
"loss": 0.9813,
"step": 491
},
{
"epoch": 13.3,
"learning_rate": 9.839999999999999e-05,
"loss": 0.7224,
"step": 492
},
{
"epoch": 13.32,
"learning_rate": 9.859999999999998e-05,
"loss": 0.9018,
"step": 493
},
{
"epoch": 13.35,
"learning_rate": 9.879999999999999e-05,
"loss": 1.0302,
"step": 494
},
{
"epoch": 13.38,
"learning_rate": 9.9e-05,
"loss": 0.9939,
"step": 495
},
{
"epoch": 13.41,
"learning_rate": 9.919999999999999e-05,
"loss": 1.057,
"step": 496
},
{
"epoch": 13.43,
"learning_rate": 9.939999999999999e-05,
"loss": 0.9274,
"step": 497
},
{
"epoch": 13.46,
"learning_rate": 9.96e-05,
"loss": 1.0053,
"step": 498
},
{
"epoch": 13.49,
"learning_rate": 9.979999999999999e-05,
"loss": 0.8242,
"step": 499
},
{
"epoch": 13.51,
"learning_rate": 9.999999999999999e-05,
"loss": 0.8972,
"step": 500
},
{
"epoch": 13.54,
"learning_rate": 0.0001002,
"loss": 0.8247,
"step": 501
},
{
"epoch": 13.57,
"learning_rate": 0.00010039999999999999,
"loss": 0.6772,
"step": 502
},
{
"epoch": 13.59,
"learning_rate": 0.00010059999999999999,
"loss": 0.7767,
"step": 503
},
{
"epoch": 13.62,
"learning_rate": 0.0001008,
"loss": 0.919,
"step": 504
},
{
"epoch": 13.65,
"learning_rate": 0.00010099999999999999,
"loss": 0.8063,
"step": 505
},
{
"epoch": 13.68,
"learning_rate": 0.0001012,
"loss": 0.6723,
"step": 506
},
{
"epoch": 13.7,
"learning_rate": 0.0001014,
"loss": 0.885,
"step": 507
},
{
"epoch": 13.73,
"learning_rate": 0.00010159999999999999,
"loss": 0.5874,
"step": 508
},
{
"epoch": 13.76,
"learning_rate": 0.00010179999999999998,
"loss": 0.789,
"step": 509
},
{
"epoch": 13.78,
"learning_rate": 0.000102,
"loss": 0.8326,
"step": 510
},
{
"epoch": 13.81,
"learning_rate": 0.00010219999999999999,
"loss": 0.5933,
"step": 511
},
{
"epoch": 13.84,
"learning_rate": 0.00010239999999999998,
"loss": 0.7928,
"step": 512
},
{
"epoch": 13.86,
"learning_rate": 0.0001026,
"loss": 0.8363,
"step": 513
},
{
"epoch": 13.89,
"learning_rate": 0.00010279999999999999,
"loss": 0.6847,
"step": 514
},
{
"epoch": 13.92,
"learning_rate": 0.00010299999999999998,
"loss": 0.9032,
"step": 515
},
{
"epoch": 13.95,
"learning_rate": 0.00010319999999999999,
"loss": 0.9369,
"step": 516
},
{
"epoch": 13.97,
"learning_rate": 0.00010339999999999999,
"loss": 0.7048,
"step": 517
},
{
"epoch": 14.0,
"learning_rate": 0.00010359999999999998,
"loss": 0.723,
"step": 518
},
{
"epoch": 14.03,
"learning_rate": 0.00010379999999999999,
"loss": 0.7403,
"step": 519
},
{
"epoch": 14.05,
"learning_rate": 0.000104,
"loss": 0.7422,
"step": 520
},
{
"epoch": 14.08,
"learning_rate": 0.00010419999999999998,
"loss": 0.5352,
"step": 521
},
{
"epoch": 14.11,
"learning_rate": 0.00010439999999999999,
"loss": 0.6564,
"step": 522
},
{
"epoch": 14.14,
"learning_rate": 0.0001046,
"loss": 0.542,
"step": 523
},
{
"epoch": 14.16,
"learning_rate": 0.00010479999999999999,
"loss": 0.4743,
"step": 524
},
{
"epoch": 14.19,
"learning_rate": 0.00010499999999999999,
"loss": 0.6545,
"step": 525
},
{
"epoch": 14.22,
"learning_rate": 0.0001052,
"loss": 0.5965,
"step": 526
},
{
"epoch": 14.24,
"learning_rate": 0.00010539999999999999,
"loss": 0.6255,
"step": 527
},
{
"epoch": 14.27,
"learning_rate": 0.00010559999999999998,
"loss": 0.6232,
"step": 528
},
{
"epoch": 14.3,
"learning_rate": 0.0001058,
"loss": 0.6419,
"step": 529
},
{
"epoch": 14.32,
"learning_rate": 0.00010599999999999999,
"loss": 0.4861,
"step": 530
},
{
"epoch": 14.35,
"learning_rate": 0.00010619999999999998,
"loss": 0.5955,
"step": 531
},
{
"epoch": 14.38,
"learning_rate": 0.0001064,
"loss": 0.6042,
"step": 532
},
{
"epoch": 14.41,
"learning_rate": 0.00010659999999999999,
"loss": 0.4796,
"step": 533
},
{
"epoch": 14.43,
"learning_rate": 0.00010679999999999998,
"loss": 0.711,
"step": 534
},
{
"epoch": 14.46,
"learning_rate": 0.000107,
"loss": 1.0621,
"step": 535
},
{
"epoch": 14.49,
"learning_rate": 0.00010719999999999999,
"loss": 0.6416,
"step": 536
},
{
"epoch": 14.51,
"learning_rate": 0.00010739999999999998,
"loss": 0.6445,
"step": 537
},
{
"epoch": 14.54,
"learning_rate": 0.0001076,
"loss": 0.554,
"step": 538
},
{
"epoch": 14.57,
"learning_rate": 0.00010779999999999999,
"loss": 0.5899,
"step": 539
},
{
"epoch": 14.59,
"learning_rate": 0.00010799999999999998,
"loss": 0.8029,
"step": 540
},
{
"epoch": 14.62,
"learning_rate": 0.0001082,
"loss": 0.5827,
"step": 541
},
{
"epoch": 14.65,
"learning_rate": 0.00010839999999999999,
"loss": 0.6803,
"step": 542
},
{
"epoch": 14.68,
"learning_rate": 0.00010859999999999998,
"loss": 0.7156,
"step": 543
},
{
"epoch": 14.7,
"learning_rate": 0.0001088,
"loss": 0.7321,
"step": 544
},
{
"epoch": 14.73,
"learning_rate": 0.00010899999999999999,
"loss": 0.6935,
"step": 545
},
{
"epoch": 14.76,
"learning_rate": 0.00010919999999999998,
"loss": 0.7959,
"step": 546
},
{
"epoch": 14.78,
"learning_rate": 0.00010939999999999998,
"loss": 0.8499,
"step": 547
},
{
"epoch": 14.81,
"learning_rate": 0.0001096,
"loss": 0.5122,
"step": 548
},
{
"epoch": 14.84,
"learning_rate": 0.00010979999999999999,
"loss": 0.6553,
"step": 549
},
{
"epoch": 14.86,
"learning_rate": 0.00010999999999999998,
"loss": 0.5838,
"step": 550
},
{
"epoch": 14.89,
"learning_rate": 0.0001102,
"loss": 0.7886,
"step": 551
},
{
"epoch": 14.92,
"learning_rate": 0.00011039999999999999,
"loss": 0.5856,
"step": 552
},
{
"epoch": 14.95,
"learning_rate": 0.00011059999999999998,
"loss": 0.9775,
"step": 553
},
{
"epoch": 14.97,
"learning_rate": 0.0001108,
"loss": 0.6251,
"step": 554
},
{
"epoch": 15.0,
"learning_rate": 0.00011099999999999999,
"loss": 0.5156,
"step": 555
},
{
"epoch": 15.03,
"learning_rate": 0.00011119999999999998,
"loss": 0.4644,
"step": 556
},
{
"epoch": 15.05,
"learning_rate": 0.0001114,
"loss": 0.6939,
"step": 557
},
{
"epoch": 15.08,
"learning_rate": 0.00011159999999999999,
"loss": 0.598,
"step": 558
},
{
"epoch": 15.11,
"learning_rate": 0.00011179999999999998,
"loss": 0.6619,
"step": 559
},
{
"epoch": 15.14,
"learning_rate": 0.000112,
"loss": 0.604,
"step": 560
},
{
"epoch": 15.16,
"learning_rate": 0.00011219999999999999,
"loss": 0.4523,
"step": 561
},
{
"epoch": 15.19,
"learning_rate": 0.00011239999999999998,
"loss": 0.7339,
"step": 562
},
{
"epoch": 15.22,
"learning_rate": 0.0001126,
"loss": 0.7099,
"step": 563
},
{
"epoch": 15.24,
"learning_rate": 0.00011279999999999999,
"loss": 0.4526,
"step": 564
},
{
"epoch": 15.27,
"learning_rate": 0.00011299999999999998,
"loss": 0.5945,
"step": 565
},
{
"epoch": 15.3,
"learning_rate": 0.0001132,
"loss": 0.5931,
"step": 566
},
{
"epoch": 15.32,
"learning_rate": 0.00011339999999999999,
"loss": 0.4768,
"step": 567
},
{
"epoch": 15.35,
"learning_rate": 0.00011359999999999998,
"loss": 0.4294,
"step": 568
},
{
"epoch": 15.38,
"learning_rate": 0.0001138,
"loss": 0.7857,
"step": 569
},
{
"epoch": 15.41,
"learning_rate": 0.00011399999999999999,
"loss": 0.5383,
"step": 570
},
{
"epoch": 15.43,
"learning_rate": 0.00011419999999999998,
"loss": 0.7131,
"step": 571
},
{
"epoch": 15.46,
"learning_rate": 0.0001144,
"loss": 0.8794,
"step": 572
},
{
"epoch": 15.49,
"learning_rate": 0.0001146,
"loss": 0.6101,
"step": 573
},
{
"epoch": 15.51,
"learning_rate": 0.00011479999999999999,
"loss": 0.712,
"step": 574
},
{
"epoch": 15.54,
"learning_rate": 0.000115,
"loss": 0.5399,
"step": 575
},
{
"epoch": 15.57,
"learning_rate": 0.0001152,
"loss": 0.4222,
"step": 576
},
{
"epoch": 15.59,
"learning_rate": 0.00011539999999999999,
"loss": 0.6332,
"step": 577
},
{
"epoch": 15.62,
"learning_rate": 0.0001156,
"loss": 0.4206,
"step": 578
},
{
"epoch": 15.65,
"learning_rate": 0.0001158,
"loss": 0.5363,
"step": 579
},
{
"epoch": 15.68,
"learning_rate": 0.00011599999999999999,
"loss": 0.5044,
"step": 580
},
{
"epoch": 15.7,
"learning_rate": 0.00011619999999999998,
"loss": 0.5931,
"step": 581
},
{
"epoch": 15.73,
"learning_rate": 0.0001164,
"loss": 0.5315,
"step": 582
},
{
"epoch": 15.76,
"learning_rate": 0.00011659999999999999,
"loss": 0.4,
"step": 583
},
{
"epoch": 15.78,
"learning_rate": 0.00011679999999999998,
"loss": 0.4896,
"step": 584
},
{
"epoch": 15.81,
"learning_rate": 0.000117,
"loss": 0.4461,
"step": 585
},
{
"epoch": 15.84,
"learning_rate": 0.00011719999999999999,
"loss": 0.6034,
"step": 586
},
{
"epoch": 15.86,
"learning_rate": 0.00011739999999999998,
"loss": 0.5388,
"step": 587
},
{
"epoch": 15.89,
"learning_rate": 0.0001176,
"loss": 0.4941,
"step": 588
},
{
"epoch": 15.92,
"learning_rate": 0.00011779999999999999,
"loss": 0.5699,
"step": 589
},
{
"epoch": 15.95,
"learning_rate": 0.00011799999999999998,
"loss": 0.6906,
"step": 590
},
{
"epoch": 15.97,
"learning_rate": 0.0001182,
"loss": 0.3726,
"step": 591
},
{
"epoch": 16.0,
"learning_rate": 0.00011839999999999999,
"loss": 0.2104,
"step": 592
},
{
"epoch": 16.03,
"learning_rate": 0.00011859999999999998,
"loss": 0.4181,
"step": 593
},
{
"epoch": 16.05,
"learning_rate": 0.0001188,
"loss": 0.4097,
"step": 594
},
{
"epoch": 16.08,
"learning_rate": 0.00011899999999999999,
"loss": 0.3204,
"step": 595
},
{
"epoch": 16.11,
"learning_rate": 0.00011919999999999998,
"loss": 0.5511,
"step": 596
},
{
"epoch": 16.14,
"learning_rate": 0.0001194,
"loss": 0.5371,
"step": 597
},
{
"epoch": 16.16,
"learning_rate": 0.0001196,
"loss": 0.4971,
"step": 598
},
{
"epoch": 16.19,
"learning_rate": 0.00011979999999999998,
"loss": 0.4602,
"step": 599
},
{
"epoch": 16.22,
"learning_rate": 0.00011999999999999999,
"loss": 0.6177,
"step": 600
},
{
"epoch": 16.22,
"eval_accuracy": 0.801098230628432,
"eval_f1": 0.8052236537807262,
"eval_loss": 0.7927422523498535,
"eval_runtime": 146.137,
"eval_samples_per_second": 11.216,
"eval_steps_per_second": 0.705,
"step": 600
},
{
"epoch": 16.24,
"learning_rate": 0.0001202,
"loss": 0.3458,
"step": 601
},
{
"epoch": 16.27,
"learning_rate": 0.00012039999999999999,
"loss": 0.7875,
"step": 602
},
{
"epoch": 16.3,
"learning_rate": 0.00012059999999999999,
"loss": 0.7417,
"step": 603
},
{
"epoch": 16.32,
"learning_rate": 0.0001208,
"loss": 0.3318,
"step": 604
},
{
"epoch": 16.35,
"learning_rate": 0.00012099999999999999,
"loss": 0.41,
"step": 605
},
{
"epoch": 16.38,
"learning_rate": 0.00012119999999999999,
"loss": 0.3526,
"step": 606
},
{
"epoch": 16.41,
"learning_rate": 0.0001214,
"loss": 0.3536,
"step": 607
},
{
"epoch": 16.43,
"learning_rate": 0.00012159999999999999,
"loss": 0.2392,
"step": 608
},
{
"epoch": 16.46,
"learning_rate": 0.00012179999999999999,
"loss": 0.438,
"step": 609
},
{
"epoch": 16.49,
"learning_rate": 0.00012179999999999999,
"loss": 0.4375,
"step": 610
},
{
"epoch": 16.51,
"learning_rate": 0.000122,
"loss": 0.4823,
"step": 611
},
{
"epoch": 16.54,
"learning_rate": 0.0001222,
"loss": 0.4186,
"step": 612
},
{
"epoch": 16.57,
"learning_rate": 0.0001224,
"loss": 0.3106,
"step": 613
},
{
"epoch": 16.59,
"learning_rate": 0.0001226,
"loss": 0.5899,
"step": 614
},
{
"epoch": 16.62,
"learning_rate": 0.00012279999999999998,
"loss": 0.4254,
"step": 615
},
{
"epoch": 16.65,
"learning_rate": 0.00012299999999999998,
"loss": 0.3434,
"step": 616
},
{
"epoch": 16.68,
"learning_rate": 0.00012319999999999999,
"loss": 0.4242,
"step": 617
},
{
"epoch": 16.7,
"learning_rate": 0.0001234,
"loss": 0.3927,
"step": 618
},
{
"epoch": 16.73,
"learning_rate": 0.0001236,
"loss": 0.465,
"step": 619
},
{
"epoch": 16.76,
"learning_rate": 0.0001238,
"loss": 0.4632,
"step": 620
},
{
"epoch": 16.78,
"learning_rate": 0.00012399999999999998,
"loss": 0.2679,
"step": 621
},
{
"epoch": 16.81,
"learning_rate": 0.00012419999999999998,
"loss": 0.3658,
"step": 622
},
{
"epoch": 16.84,
"learning_rate": 0.0001244,
"loss": 0.3901,
"step": 623
},
{
"epoch": 16.86,
"learning_rate": 0.0001246,
"loss": 0.569,
"step": 624
},
{
"epoch": 16.89,
"learning_rate": 0.00012479999999999997,
"loss": 0.2357,
"step": 625
},
{
"epoch": 16.92,
"learning_rate": 0.000125,
"loss": 0.4753,
"step": 626
},
{
"epoch": 16.95,
"learning_rate": 0.00012519999999999998,
"loss": 0.6153,
"step": 627
},
{
"epoch": 16.97,
"learning_rate": 0.00012539999999999999,
"loss": 0.4527,
"step": 628
},
{
"epoch": 17.0,
"learning_rate": 0.0001256,
"loss": 0.5302,
"step": 629
},
{
"epoch": 17.03,
"learning_rate": 0.0001258,
"loss": 0.5312,
"step": 630
},
{
"epoch": 17.05,
"learning_rate": 0.00012599999999999997,
"loss": 0.5452,
"step": 631
},
{
"epoch": 17.08,
"learning_rate": 0.0001262,
"loss": 0.2832,
"step": 632
},
{
"epoch": 17.11,
"learning_rate": 0.00012639999999999998,
"loss": 0.4857,
"step": 633
},
{
"epoch": 17.14,
"learning_rate": 0.0001266,
"loss": 0.4472,
"step": 634
},
{
"epoch": 17.16,
"learning_rate": 0.0001268,
"loss": 0.4373,
"step": 635
},
{
"epoch": 17.19,
"learning_rate": 0.000127,
"loss": 0.4742,
"step": 636
},
{
"epoch": 17.22,
"learning_rate": 0.00012719999999999997,
"loss": 0.4723,
"step": 637
},
{
"epoch": 17.24,
"learning_rate": 0.0001274,
"loss": 0.4061,
"step": 638
},
{
"epoch": 17.27,
"learning_rate": 0.00012759999999999998,
"loss": 0.5199,
"step": 639
},
{
"epoch": 17.3,
"learning_rate": 0.0001278,
"loss": 0.3767,
"step": 640
},
{
"epoch": 17.32,
"learning_rate": 0.000128,
"loss": 0.284,
"step": 641
},
{
"epoch": 17.35,
"learning_rate": 0.0001282,
"loss": 0.5199,
"step": 642
},
{
"epoch": 17.38,
"learning_rate": 0.00012839999999999998,
"loss": 0.4451,
"step": 643
},
{
"epoch": 17.41,
"learning_rate": 0.00012859999999999998,
"loss": 0.4515,
"step": 644
},
{
"epoch": 17.43,
"learning_rate": 0.0001288,
"loss": 0.384,
"step": 645
},
{
"epoch": 17.46,
"learning_rate": 0.000129,
"loss": 0.3236,
"step": 646
},
{
"epoch": 17.49,
"learning_rate": 0.00012919999999999997,
"loss": 0.5444,
"step": 647
},
{
"epoch": 17.51,
"learning_rate": 0.0001294,
"loss": 0.4563,
"step": 648
},
{
"epoch": 17.54,
"learning_rate": 0.00012959999999999998,
"loss": 0.4376,
"step": 649
},
{
"epoch": 17.57,
"learning_rate": 0.00012979999999999998,
"loss": 0.2369,
"step": 650
},
{
"epoch": 17.59,
"learning_rate": 0.00013,
"loss": 0.4034,
"step": 651
},
{
"epoch": 17.62,
"learning_rate": 0.0001302,
"loss": 0.4463,
"step": 652
},
{
"epoch": 17.65,
"learning_rate": 0.00013039999999999997,
"loss": 0.2272,
"step": 653
},
{
"epoch": 17.68,
"learning_rate": 0.0001306,
"loss": 0.3984,
"step": 654
},
{
"epoch": 17.7,
"learning_rate": 0.00013079999999999998,
"loss": 0.7102,
"step": 655
},
{
"epoch": 17.73,
"learning_rate": 0.00013099999999999999,
"loss": 0.3144,
"step": 656
},
{
"epoch": 17.76,
"learning_rate": 0.0001312,
"loss": 0.4706,
"step": 657
},
{
"epoch": 17.78,
"learning_rate": 0.0001314,
"loss": 0.514,
"step": 658
},
{
"epoch": 17.81,
"learning_rate": 0.00013159999999999997,
"loss": 0.4538,
"step": 659
},
{
"epoch": 17.84,
"learning_rate": 0.0001318,
"loss": 0.3891,
"step": 660
},
{
"epoch": 17.86,
"learning_rate": 0.00013199999999999998,
"loss": 0.5258,
"step": 661
},
{
"epoch": 17.89,
"learning_rate": 0.0001322,
"loss": 0.4321,
"step": 662
},
{
"epoch": 17.92,
"learning_rate": 0.0001324,
"loss": 0.3134,
"step": 663
},
{
"epoch": 17.95,
"learning_rate": 0.0001326,
"loss": 0.3381,
"step": 664
},
{
"epoch": 17.97,
"learning_rate": 0.00013279999999999998,
"loss": 0.3088,
"step": 665
},
{
"epoch": 18.0,
"learning_rate": 0.000133,
"loss": 0.3347,
"step": 666
},
{
"epoch": 18.03,
"learning_rate": 0.00013319999999999999,
"loss": 0.6366,
"step": 667
},
{
"epoch": 18.05,
"learning_rate": 0.0001334,
"loss": 0.5807,
"step": 668
},
{
"epoch": 18.08,
"learning_rate": 0.0001336,
"loss": 0.2062,
"step": 669
},
{
"epoch": 18.11,
"learning_rate": 0.0001338,
"loss": 0.2841,
"step": 670
},
{
"epoch": 18.14,
"learning_rate": 0.00013399999999999998,
"loss": 0.3044,
"step": 671
},
{
"epoch": 18.16,
"learning_rate": 0.0001342,
"loss": 0.3935,
"step": 672
},
{
"epoch": 18.19,
"learning_rate": 0.0001344,
"loss": 0.3646,
"step": 673
},
{
"epoch": 18.22,
"learning_rate": 0.0001346,
"loss": 0.61,
"step": 674
},
{
"epoch": 18.24,
"learning_rate": 0.00013479999999999997,
"loss": 0.2328,
"step": 675
},
{
"epoch": 18.27,
"learning_rate": 0.000135,
"loss": 0.3327,
"step": 676
},
{
"epoch": 18.3,
"learning_rate": 0.00013519999999999998,
"loss": 0.2878,
"step": 677
},
{
"epoch": 18.32,
"learning_rate": 0.00013539999999999998,
"loss": 0.2468,
"step": 678
},
{
"epoch": 18.35,
"learning_rate": 0.0001356,
"loss": 0.2945,
"step": 679
},
{
"epoch": 18.38,
"learning_rate": 0.0001358,
"loss": 0.4131,
"step": 680
},
{
"epoch": 18.41,
"learning_rate": 0.00013599999999999997,
"loss": 0.2302,
"step": 681
},
{
"epoch": 18.43,
"learning_rate": 0.0001362,
"loss": 0.3082,
"step": 682
},
{
"epoch": 18.46,
"learning_rate": 0.00013639999999999998,
"loss": 0.4246,
"step": 683
},
{
"epoch": 18.49,
"learning_rate": 0.00013659999999999999,
"loss": 0.4966,
"step": 684
},
{
"epoch": 18.51,
"learning_rate": 0.0001368,
"loss": 0.3638,
"step": 685
},
{
"epoch": 18.54,
"learning_rate": 0.000137,
"loss": 0.631,
"step": 686
},
{
"epoch": 18.57,
"learning_rate": 0.00013719999999999997,
"loss": 0.5196,
"step": 687
},
{
"epoch": 18.59,
"learning_rate": 0.0001374,
"loss": 0.401,
"step": 688
},
{
"epoch": 18.62,
"learning_rate": 0.00013759999999999998,
"loss": 0.3046,
"step": 689
},
{
"epoch": 18.65,
"learning_rate": 0.0001378,
"loss": 0.1823,
"step": 690
},
{
"epoch": 18.68,
"learning_rate": 0.000138,
"loss": 0.4359,
"step": 691
},
{
"epoch": 18.7,
"learning_rate": 0.0001382,
"loss": 0.3963,
"step": 692
},
{
"epoch": 18.73,
"learning_rate": 0.00013839999999999998,
"loss": 0.4171,
"step": 693
},
{
"epoch": 18.76,
"learning_rate": 0.0001386,
"loss": 0.5997,
"step": 694
},
{
"epoch": 18.78,
"learning_rate": 0.00013879999999999999,
"loss": 0.6568,
"step": 695
},
{
"epoch": 18.81,
"learning_rate": 0.000139,
"loss": 0.3035,
"step": 696
},
{
"epoch": 18.84,
"learning_rate": 0.0001392,
"loss": 0.2032,
"step": 697
},
{
"epoch": 18.86,
"learning_rate": 0.0001394,
"loss": 0.4966,
"step": 698
},
{
"epoch": 18.89,
"learning_rate": 0.00013959999999999998,
"loss": 0.3761,
"step": 699
},
{
"epoch": 18.92,
"learning_rate": 0.00013979999999999998,
"loss": 0.5097,
"step": 700
},
{
"epoch": 18.95,
"learning_rate": 0.00014,
"loss": 0.3576,
"step": 701
},
{
"epoch": 18.97,
"learning_rate": 0.0001402,
"loss": 0.1785,
"step": 702
},
{
"epoch": 19.0,
"learning_rate": 0.0001404,
"loss": 0.6038,
"step": 703
},
{
"epoch": 19.03,
"learning_rate": 0.0001406,
"loss": 0.2656,
"step": 704
},
{
"epoch": 19.05,
"learning_rate": 0.00014079999999999998,
"loss": 0.2778,
"step": 705
},
{
"epoch": 19.08,
"learning_rate": 0.00014099999999999998,
"loss": 0.3205,
"step": 706
},
{
"epoch": 19.11,
"learning_rate": 0.0001412,
"loss": 0.254,
"step": 707
},
{
"epoch": 19.14,
"learning_rate": 0.0001414,
"loss": 0.2903,
"step": 708
},
{
"epoch": 19.16,
"learning_rate": 0.00014159999999999997,
"loss": 0.2903,
"step": 709
},
{
"epoch": 19.19,
"learning_rate": 0.0001418,
"loss": 0.3095,
"step": 710
},
{
"epoch": 19.22,
"learning_rate": 0.00014199999999999998,
"loss": 0.1502,
"step": 711
},
{
"epoch": 19.24,
"learning_rate": 0.0001422,
"loss": 0.4493,
"step": 712
},
{
"epoch": 19.27,
"learning_rate": 0.0001424,
"loss": 0.3919,
"step": 713
},
{
"epoch": 19.3,
"learning_rate": 0.0001426,
"loss": 0.2116,
"step": 714
},
{
"epoch": 19.32,
"learning_rate": 0.00014279999999999997,
"loss": 0.4176,
"step": 715
},
{
"epoch": 19.35,
"learning_rate": 0.00014299999999999998,
"loss": 0.3053,
"step": 716
},
{
"epoch": 19.38,
"learning_rate": 0.00014319999999999998,
"loss": 0.4521,
"step": 717
},
{
"epoch": 19.41,
"learning_rate": 0.0001434,
"loss": 0.3508,
"step": 718
},
{
"epoch": 19.43,
"learning_rate": 0.0001436,
"loss": 0.4663,
"step": 719
},
{
"epoch": 19.46,
"learning_rate": 0.0001438,
"loss": 0.4144,
"step": 720
},
{
"epoch": 19.49,
"learning_rate": 0.00014399999999999998,
"loss": 0.2802,
"step": 721
},
{
"epoch": 19.51,
"learning_rate": 0.00014419999999999998,
"loss": 0.5107,
"step": 722
},
{
"epoch": 19.54,
"learning_rate": 0.00014439999999999999,
"loss": 0.5556,
"step": 723
},
{
"epoch": 19.57,
"learning_rate": 0.0001446,
"loss": 0.3472,
"step": 724
},
{
"epoch": 19.59,
"learning_rate": 0.0001448,
"loss": 0.2966,
"step": 725
},
{
"epoch": 19.62,
"learning_rate": 0.000145,
"loss": 0.496,
"step": 726
},
{
"epoch": 19.65,
"learning_rate": 0.00014519999999999998,
"loss": 0.2283,
"step": 727
},
{
"epoch": 19.68,
"learning_rate": 0.00014539999999999998,
"loss": 0.3437,
"step": 728
},
{
"epoch": 19.7,
"learning_rate": 0.0001456,
"loss": 0.5037,
"step": 729
},
{
"epoch": 19.73,
"learning_rate": 0.0001458,
"loss": 0.7449,
"step": 730
},
{
"epoch": 19.76,
"learning_rate": 0.000146,
"loss": 0.3824,
"step": 731
},
{
"epoch": 19.78,
"learning_rate": 0.0001462,
"loss": 0.3026,
"step": 732
},
{
"epoch": 19.81,
"learning_rate": 0.00014639999999999998,
"loss": 0.173,
"step": 733
},
{
"epoch": 19.84,
"learning_rate": 0.00014659999999999999,
"loss": 0.6184,
"step": 734
},
{
"epoch": 19.86,
"learning_rate": 0.0001468,
"loss": 0.3737,
"step": 735
},
{
"epoch": 19.89,
"learning_rate": 0.000147,
"loss": 0.3356,
"step": 736
},
{
"epoch": 19.92,
"learning_rate": 0.00014719999999999997,
"loss": 0.3988,
"step": 737
},
{
"epoch": 19.95,
"learning_rate": 0.00014739999999999998,
"loss": 0.4557,
"step": 738
},
{
"epoch": 19.97,
"learning_rate": 0.00014759999999999998,
"loss": 0.2592,
"step": 739
},
{
"epoch": 20.0,
"learning_rate": 0.0001478,
"loss": 0.1012,
"step": 740
},
{
"epoch": 20.03,
"learning_rate": 0.000148,
"loss": 0.3727,
"step": 741
},
{
"epoch": 20.05,
"learning_rate": 0.0001482,
"loss": 0.4883,
"step": 742
},
{
"epoch": 20.08,
"learning_rate": 0.00014839999999999998,
"loss": 0.4711,
"step": 743
},
{
"epoch": 20.11,
"learning_rate": 0.00014859999999999998,
"loss": 0.2632,
"step": 744
},
{
"epoch": 20.14,
"learning_rate": 0.00014879999999999998,
"loss": 0.4035,
"step": 745
},
{
"epoch": 20.16,
"learning_rate": 0.000149,
"loss": 0.4132,
"step": 746
},
{
"epoch": 20.19,
"learning_rate": 0.0001492,
"loss": 0.2502,
"step": 747
},
{
"epoch": 20.22,
"learning_rate": 0.0001494,
"loss": 0.415,
"step": 748
},
{
"epoch": 20.24,
"learning_rate": 0.00014959999999999998,
"loss": 0.3357,
"step": 749
},
{
"epoch": 20.27,
"learning_rate": 0.00014979999999999998,
"loss": 0.213,
"step": 750
},
{
"epoch": 20.3,
"learning_rate": 0.00015,
"loss": 0.3843,
"step": 751
},
{
"epoch": 20.32,
"learning_rate": 0.0001502,
"loss": 0.1665,
"step": 752
},
{
"epoch": 20.35,
"learning_rate": 0.00015039999999999997,
"loss": 0.1855,
"step": 753
},
{
"epoch": 20.38,
"learning_rate": 0.00015059999999999997,
"loss": 0.3982,
"step": 754
},
{
"epoch": 20.41,
"learning_rate": 0.0001508,
"loss": 0.2085,
"step": 755
},
{
"epoch": 20.43,
"learning_rate": 0.00015099999999999998,
"loss": 0.2918,
"step": 756
},
{
"epoch": 20.46,
"learning_rate": 0.0001512,
"loss": 0.5367,
"step": 757
},
{
"epoch": 20.49,
"learning_rate": 0.0001514,
"loss": 0.3875,
"step": 758
},
{
"epoch": 20.51,
"learning_rate": 0.00015159999999999997,
"loss": 0.3402,
"step": 759
},
{
"epoch": 20.54,
"learning_rate": 0.00015179999999999998,
"loss": 0.4142,
"step": 760
},
{
"epoch": 20.57,
"learning_rate": 0.000152,
"loss": 0.3972,
"step": 761
},
{
"epoch": 20.59,
"learning_rate": 0.00015219999999999999,
"loss": 0.3906,
"step": 762
},
{
"epoch": 20.62,
"learning_rate": 0.0001524,
"loss": 0.2564,
"step": 763
},
{
"epoch": 20.65,
"learning_rate": 0.0001526,
"loss": 0.1959,
"step": 764
},
{
"epoch": 20.68,
"learning_rate": 0.00015279999999999997,
"loss": 0.2067,
"step": 765
},
{
"epoch": 20.7,
"learning_rate": 0.00015299999999999998,
"loss": 0.2827,
"step": 766
},
{
"epoch": 20.73,
"learning_rate": 0.0001532,
"loss": 0.4067,
"step": 767
},
{
"epoch": 20.76,
"learning_rate": 0.0001534,
"loss": 0.2759,
"step": 768
},
{
"epoch": 20.78,
"learning_rate": 0.0001536,
"loss": 0.6458,
"step": 769
},
{
"epoch": 20.81,
"learning_rate": 0.0001538,
"loss": 0.3213,
"step": 770
},
{
"epoch": 20.84,
"learning_rate": 0.00015399999999999998,
"loss": 0.1339,
"step": 771
},
{
"epoch": 20.86,
"learning_rate": 0.00015419999999999998,
"loss": 0.536,
"step": 772
},
{
"epoch": 20.89,
"learning_rate": 0.0001544,
"loss": 0.3861,
"step": 773
},
{
"epoch": 20.92,
"learning_rate": 0.0001546,
"loss": 0.616,
"step": 774
},
{
"epoch": 20.95,
"learning_rate": 0.0001548,
"loss": 0.1447,
"step": 775
},
{
"epoch": 20.97,
"learning_rate": 0.000155,
"loss": 0.4107,
"step": 776
},
{
"epoch": 21.0,
"learning_rate": 0.00015519999999999998,
"loss": 0.0542,
"step": 777
},
{
"epoch": 21.03,
"learning_rate": 0.00015539999999999998,
"loss": 0.2203,
"step": 778
},
{
"epoch": 21.05,
"learning_rate": 0.00015560000000000001,
"loss": 0.2727,
"step": 779
},
{
"epoch": 21.08,
"learning_rate": 0.0001558,
"loss": 0.2085,
"step": 780
},
{
"epoch": 21.11,
"learning_rate": 0.000156,
"loss": 0.2955,
"step": 781
},
{
"epoch": 21.14,
"learning_rate": 0.0001562,
"loss": 0.3776,
"step": 782
},
{
"epoch": 21.16,
"learning_rate": 0.00015639999999999998,
"loss": 0.4125,
"step": 783
},
{
"epoch": 21.19,
"learning_rate": 0.00015659999999999998,
"loss": 0.2002,
"step": 784
},
{
"epoch": 21.22,
"learning_rate": 0.00015679999999999996,
"loss": 0.3757,
"step": 785
},
{
"epoch": 21.24,
"learning_rate": 0.000157,
"loss": 0.2586,
"step": 786
},
{
"epoch": 21.27,
"learning_rate": 0.0001572,
"loss": 0.3852,
"step": 787
},
{
"epoch": 21.3,
"learning_rate": 0.00015739999999999998,
"loss": 0.3034,
"step": 788
},
{
"epoch": 21.32,
"learning_rate": 0.00015759999999999998,
"loss": 0.2283,
"step": 789
},
{
"epoch": 21.35,
"learning_rate": 0.0001578,
"loss": 0.2221,
"step": 790
},
{
"epoch": 21.38,
"learning_rate": 0.00015799999999999996,
"loss": 0.5111,
"step": 791
},
{
"epoch": 21.41,
"learning_rate": 0.00015819999999999997,
"loss": 0.2252,
"step": 792
},
{
"epoch": 21.43,
"learning_rate": 0.0001584,
"loss": 0.1557,
"step": 793
},
{
"epoch": 21.46,
"learning_rate": 0.00015859999999999998,
"loss": 0.3329,
"step": 794
},
{
"epoch": 21.49,
"learning_rate": 0.00015879999999999998,
"loss": 0.387,
"step": 795
},
{
"epoch": 21.51,
"learning_rate": 0.000159,
"loss": 0.2782,
"step": 796
},
{
"epoch": 21.54,
"learning_rate": 0.00015919999999999997,
"loss": 0.5415,
"step": 797
},
{
"epoch": 21.57,
"learning_rate": 0.00015939999999999997,
"loss": 0.2938,
"step": 798
},
{
"epoch": 21.59,
"learning_rate": 0.0001596,
"loss": 0.1872,
"step": 799
},
{
"epoch": 21.62,
"learning_rate": 0.00015979999999999998,
"loss": 0.3609,
"step": 800
},
{
"epoch": 21.62,
"eval_accuracy": 0.8608907870652837,
"eval_f1": 0.8608929267260805,
"eval_loss": 0.5679439902305603,
"eval_runtime": 147.5619,
"eval_samples_per_second": 11.107,
"eval_steps_per_second": 0.698,
"step": 800
},
{
"epoch": 21.65,
"learning_rate": 0.00015999999999999999,
"loss": 0.2795,
"step": 801
},
{
"epoch": 21.68,
"learning_rate": 0.0001602,
"loss": 0.3569,
"step": 802
},
{
"epoch": 21.7,
"learning_rate": 0.00016039999999999997,
"loss": 0.3719,
"step": 803
},
{
"epoch": 21.73,
"learning_rate": 0.00016059999999999997,
"loss": 0.2675,
"step": 804
},
{
"epoch": 21.76,
"learning_rate": 0.0001608,
"loss": 0.3226,
"step": 805
},
{
"epoch": 21.78,
"learning_rate": 0.00016099999999999998,
"loss": 0.274,
"step": 806
},
{
"epoch": 21.81,
"learning_rate": 0.0001612,
"loss": 0.2661,
"step": 807
},
{
"epoch": 21.84,
"learning_rate": 0.0001614,
"loss": 0.1802,
"step": 808
},
{
"epoch": 21.86,
"learning_rate": 0.00016159999999999997,
"loss": 0.4662,
"step": 809
},
{
"epoch": 21.89,
"learning_rate": 0.00016179999999999998,
"loss": 0.2233,
"step": 810
},
{
"epoch": 21.92,
"learning_rate": 0.000162,
"loss": 0.2714,
"step": 811
},
{
"epoch": 21.95,
"learning_rate": 0.00016219999999999999,
"loss": 0.4136,
"step": 812
},
{
"epoch": 21.97,
"learning_rate": 0.0001624,
"loss": 0.4474,
"step": 813
},
{
"epoch": 22.0,
"learning_rate": 0.0001626,
"loss": 0.3824,
"step": 814
},
{
"epoch": 22.03,
"learning_rate": 0.00016279999999999997,
"loss": 0.4823,
"step": 815
},
{
"epoch": 22.05,
"learning_rate": 0.00016299999999999998,
"loss": 0.3393,
"step": 816
},
{
"epoch": 22.08,
"learning_rate": 0.0001632,
"loss": 0.341,
"step": 817
},
{
"epoch": 22.11,
"learning_rate": 0.0001634,
"loss": 0.5738,
"step": 818
},
{
"epoch": 22.14,
"learning_rate": 0.0001636,
"loss": 0.1832,
"step": 819
},
{
"epoch": 22.16,
"learning_rate": 0.0001638,
"loss": 0.198,
"step": 820
},
{
"epoch": 22.19,
"learning_rate": 0.00016399999999999997,
"loss": 0.3644,
"step": 821
},
{
"epoch": 22.22,
"learning_rate": 0.00016419999999999998,
"loss": 0.2497,
"step": 822
},
{
"epoch": 22.24,
"learning_rate": 0.0001644,
"loss": 0.2881,
"step": 823
},
{
"epoch": 22.27,
"learning_rate": 0.0001646,
"loss": 0.2888,
"step": 824
},
{
"epoch": 22.3,
"learning_rate": 0.0001648,
"loss": 0.0593,
"step": 825
},
{
"epoch": 22.32,
"learning_rate": 0.000165,
"loss": 0.3591,
"step": 826
},
{
"epoch": 22.35,
"learning_rate": 0.00016519999999999998,
"loss": 0.1027,
"step": 827
},
{
"epoch": 22.38,
"learning_rate": 0.00016539999999999998,
"loss": 0.1865,
"step": 828
},
{
"epoch": 22.41,
"learning_rate": 0.0001656,
"loss": 0.2184,
"step": 829
},
{
"epoch": 22.43,
"learning_rate": 0.00016579999999999996,
"loss": 0.5119,
"step": 830
},
{
"epoch": 22.46,
"learning_rate": 0.000166,
"loss": 0.5421,
"step": 831
},
{
"epoch": 22.49,
"learning_rate": 0.0001662,
"loss": 0.083,
"step": 832
},
{
"epoch": 22.51,
"learning_rate": 0.00016639999999999998,
"loss": 0.465,
"step": 833
},
{
"epoch": 22.54,
"learning_rate": 0.00016659999999999998,
"loss": 0.2081,
"step": 834
},
{
"epoch": 22.57,
"learning_rate": 0.0001668,
"loss": 0.3402,
"step": 835
},
{
"epoch": 22.59,
"learning_rate": 0.00016699999999999997,
"loss": 0.4658,
"step": 836
},
{
"epoch": 22.62,
"learning_rate": 0.0001672,
"loss": 0.3844,
"step": 837
},
{
"epoch": 22.65,
"learning_rate": 0.0001674,
"loss": 0.3143,
"step": 838
},
{
"epoch": 22.68,
"learning_rate": 0.00016759999999999998,
"loss": 0.2889,
"step": 839
},
{
"epoch": 22.7,
"learning_rate": 0.00016779999999999999,
"loss": 0.3765,
"step": 840
},
{
"epoch": 22.73,
"learning_rate": 0.000168,
"loss": 0.2806,
"step": 841
},
{
"epoch": 22.76,
"learning_rate": 0.00016819999999999997,
"loss": 0.2995,
"step": 842
},
{
"epoch": 22.78,
"learning_rate": 0.0001684,
"loss": 0.6669,
"step": 843
},
{
"epoch": 22.81,
"learning_rate": 0.0001686,
"loss": 0.2198,
"step": 844
},
{
"epoch": 22.84,
"learning_rate": 0.00016879999999999998,
"loss": 0.1432,
"step": 845
},
{
"epoch": 22.86,
"learning_rate": 0.000169,
"loss": 0.2888,
"step": 846
},
{
"epoch": 22.89,
"learning_rate": 0.00016919999999999997,
"loss": 0.2408,
"step": 847
},
{
"epoch": 22.92,
"learning_rate": 0.00016939999999999997,
"loss": 0.2257,
"step": 848
},
{
"epoch": 22.95,
"learning_rate": 0.0001696,
"loss": 0.5977,
"step": 849
},
{
"epoch": 22.97,
"learning_rate": 0.00016979999999999998,
"loss": 0.0929,
"step": 850
},
{
"epoch": 23.0,
"learning_rate": 0.00016999999999999999,
"loss": 0.161,
"step": 851
},
{
"epoch": 23.03,
"learning_rate": 0.0001702,
"loss": 0.1445,
"step": 852
},
{
"epoch": 23.05,
"learning_rate": 0.00017039999999999997,
"loss": 0.2106,
"step": 853
},
{
"epoch": 23.08,
"learning_rate": 0.00017059999999999997,
"loss": 0.2774,
"step": 854
},
{
"epoch": 23.11,
"learning_rate": 0.0001708,
"loss": 0.212,
"step": 855
},
{
"epoch": 23.14,
"learning_rate": 0.00017099999999999998,
"loss": 0.1133,
"step": 856
},
{
"epoch": 23.16,
"learning_rate": 0.0001712,
"loss": 0.362,
"step": 857
},
{
"epoch": 23.19,
"learning_rate": 0.0001714,
"loss": 0.2237,
"step": 858
},
{
"epoch": 23.22,
"learning_rate": 0.00017159999999999997,
"loss": 0.1276,
"step": 859
},
{
"epoch": 23.24,
"learning_rate": 0.00017179999999999997,
"loss": 0.194,
"step": 860
},
{
"epoch": 23.27,
"learning_rate": 0.000172,
"loss": 0.143,
"step": 861
},
{
"epoch": 23.3,
"learning_rate": 0.00017219999999999998,
"loss": 0.2553,
"step": 862
},
{
"epoch": 23.32,
"learning_rate": 0.0001724,
"loss": 0.2588,
"step": 863
},
{
"epoch": 23.35,
"learning_rate": 0.0001726,
"loss": 0.1567,
"step": 864
},
{
"epoch": 23.38,
"learning_rate": 0.00017279999999999997,
"loss": 0.1509,
"step": 865
},
{
"epoch": 23.41,
"learning_rate": 0.00017299999999999998,
"loss": 0.4557,
"step": 866
},
{
"epoch": 23.43,
"learning_rate": 0.00017319999999999998,
"loss": 0.2995,
"step": 867
},
{
"epoch": 23.46,
"learning_rate": 0.00017339999999999996,
"loss": 0.1386,
"step": 868
},
{
"epoch": 23.49,
"learning_rate": 0.0001736,
"loss": 0.1908,
"step": 869
},
{
"epoch": 23.51,
"learning_rate": 0.0001738,
"loss": 0.2493,
"step": 870
},
{
"epoch": 23.54,
"learning_rate": 0.00017399999999999997,
"loss": 0.4653,
"step": 871
},
{
"epoch": 23.57,
"learning_rate": 0.00017419999999999998,
"loss": 0.3064,
"step": 872
},
{
"epoch": 23.59,
"learning_rate": 0.00017439999999999998,
"loss": 0.2827,
"step": 873
},
{
"epoch": 23.62,
"learning_rate": 0.00017459999999999996,
"loss": 0.3536,
"step": 874
},
{
"epoch": 23.65,
"learning_rate": 0.0001748,
"loss": 0.2147,
"step": 875
},
{
"epoch": 23.68,
"learning_rate": 0.000175,
"loss": 0.3503,
"step": 876
},
{
"epoch": 23.7,
"learning_rate": 0.00017519999999999998,
"loss": 0.3309,
"step": 877
},
{
"epoch": 23.73,
"learning_rate": 0.00017539999999999998,
"loss": 0.3293,
"step": 878
},
{
"epoch": 23.76,
"learning_rate": 0.00017559999999999999,
"loss": 0.2567,
"step": 879
},
{
"epoch": 23.78,
"learning_rate": 0.00017579999999999996,
"loss": 0.3443,
"step": 880
},
{
"epoch": 23.81,
"learning_rate": 0.000176,
"loss": 0.2961,
"step": 881
},
{
"epoch": 23.84,
"learning_rate": 0.0001762,
"loss": 0.3842,
"step": 882
},
{
"epoch": 23.86,
"learning_rate": 0.00017639999999999998,
"loss": 0.3122,
"step": 883
},
{
"epoch": 23.89,
"learning_rate": 0.00017659999999999998,
"loss": 0.1259,
"step": 884
},
{
"epoch": 23.92,
"learning_rate": 0.0001768,
"loss": 0.3351,
"step": 885
},
{
"epoch": 23.95,
"learning_rate": 0.00017699999999999997,
"loss": 0.3964,
"step": 886
},
{
"epoch": 23.97,
"learning_rate": 0.0001772,
"loss": 0.1914,
"step": 887
},
{
"epoch": 24.0,
"learning_rate": 0.0001774,
"loss": 0.3296,
"step": 888
},
{
"epoch": 24.03,
"learning_rate": 0.00017759999999999998,
"loss": 0.2765,
"step": 889
},
{
"epoch": 24.05,
"learning_rate": 0.00017779999999999998,
"loss": 0.2776,
"step": 890
},
{
"epoch": 24.08,
"learning_rate": 0.000178,
"loss": 0.2362,
"step": 891
},
{
"epoch": 24.11,
"learning_rate": 0.00017819999999999997,
"loss": 0.4676,
"step": 892
},
{
"epoch": 24.14,
"learning_rate": 0.0001784,
"loss": 0.2703,
"step": 893
},
{
"epoch": 24.16,
"learning_rate": 0.0001786,
"loss": 0.2569,
"step": 894
},
{
"epoch": 24.19,
"learning_rate": 0.00017879999999999998,
"loss": 0.2649,
"step": 895
},
{
"epoch": 24.22,
"learning_rate": 0.000179,
"loss": 0.207,
"step": 896
},
{
"epoch": 24.24,
"learning_rate": 0.0001792,
"loss": 0.1249,
"step": 897
},
{
"epoch": 24.27,
"learning_rate": 0.00017939999999999997,
"loss": 0.4348,
"step": 898
},
{
"epoch": 24.3,
"learning_rate": 0.0001796,
"loss": 0.2774,
"step": 899
},
{
"epoch": 24.32,
"learning_rate": 0.0001798,
"loss": 0.1186,
"step": 900
},
{
"epoch": 24.35,
"learning_rate": 0.00017999999999999998,
"loss": 0.1361,
"step": 901
},
{
"epoch": 24.38,
"learning_rate": 0.0001802,
"loss": 0.1605,
"step": 902
},
{
"epoch": 24.41,
"learning_rate": 0.0001804,
"loss": 0.1574,
"step": 903
},
{
"epoch": 24.43,
"learning_rate": 0.00018059999999999997,
"loss": 0.2604,
"step": 904
},
{
"epoch": 24.46,
"learning_rate": 0.00018079999999999998,
"loss": 0.2295,
"step": 905
},
{
"epoch": 24.49,
"learning_rate": 0.000181,
"loss": 0.3012,
"step": 906
},
{
"epoch": 24.51,
"learning_rate": 0.00018119999999999999,
"loss": 0.4797,
"step": 907
},
{
"epoch": 24.54,
"learning_rate": 0.0001814,
"loss": 0.5477,
"step": 908
},
{
"epoch": 24.57,
"learning_rate": 0.00018159999999999997,
"loss": 0.2036,
"step": 909
},
{
"epoch": 24.59,
"learning_rate": 0.00018179999999999997,
"loss": 0.2525,
"step": 910
},
{
"epoch": 24.62,
"learning_rate": 0.00018199999999999998,
"loss": 0.3189,
"step": 911
},
{
"epoch": 24.65,
"learning_rate": 0.00018219999999999996,
"loss": 0.1867,
"step": 912
},
{
"epoch": 24.68,
"learning_rate": 0.0001824,
"loss": 0.1245,
"step": 913
},
{
"epoch": 24.7,
"learning_rate": 0.0001826,
"loss": 0.2563,
"step": 914
},
{
"epoch": 24.73,
"learning_rate": 0.00018279999999999997,
"loss": 0.207,
"step": 915
},
{
"epoch": 24.76,
"learning_rate": 0.00018299999999999998,
"loss": 0.1856,
"step": 916
},
{
"epoch": 24.78,
"learning_rate": 0.00018319999999999998,
"loss": 0.1295,
"step": 917
},
{
"epoch": 24.81,
"learning_rate": 0.00018339999999999996,
"loss": 0.1973,
"step": 918
},
{
"epoch": 24.84,
"learning_rate": 0.0001836,
"loss": 0.3584,
"step": 919
},
{
"epoch": 24.86,
"learning_rate": 0.0001838,
"loss": 0.2202,
"step": 920
},
{
"epoch": 24.89,
"learning_rate": 0.00018399999999999997,
"loss": 0.1379,
"step": 921
},
{
"epoch": 24.92,
"learning_rate": 0.00018419999999999998,
"loss": 0.3133,
"step": 922
},
{
"epoch": 24.95,
"learning_rate": 0.00018439999999999998,
"loss": 0.3539,
"step": 923
},
{
"epoch": 24.97,
"learning_rate": 0.00018459999999999996,
"loss": 0.1498,
"step": 924
},
{
"epoch": 25.0,
"learning_rate": 0.0001848,
"loss": 0.2274,
"step": 925
},
{
"epoch": 25.03,
"learning_rate": 0.000185,
"loss": 0.2015,
"step": 926
},
{
"epoch": 25.05,
"learning_rate": 0.00018519999999999998,
"loss": 0.2851,
"step": 927
},
{
"epoch": 25.08,
"learning_rate": 0.00018539999999999998,
"loss": 0.2795,
"step": 928
},
{
"epoch": 25.11,
"learning_rate": 0.00018559999999999998,
"loss": 0.4104,
"step": 929
},
{
"epoch": 25.14,
"learning_rate": 0.00018579999999999996,
"loss": 0.3036,
"step": 930
},
{
"epoch": 25.16,
"learning_rate": 0.000186,
"loss": 0.2959,
"step": 931
},
{
"epoch": 25.19,
"learning_rate": 0.0001862,
"loss": 0.5758,
"step": 932
},
{
"epoch": 25.22,
"learning_rate": 0.00018639999999999998,
"loss": 0.1434,
"step": 933
},
{
"epoch": 25.24,
"learning_rate": 0.00018659999999999998,
"loss": 0.1867,
"step": 934
},
{
"epoch": 25.27,
"learning_rate": 0.0001868,
"loss": 0.558,
"step": 935
},
{
"epoch": 25.3,
"learning_rate": 0.00018699999999999996,
"loss": 0.2011,
"step": 936
},
{
"epoch": 25.32,
"learning_rate": 0.0001872,
"loss": 0.4021,
"step": 937
},
{
"epoch": 25.35,
"learning_rate": 0.0001874,
"loss": 0.114,
"step": 938
},
{
"epoch": 25.38,
"learning_rate": 0.00018759999999999998,
"loss": 0.2346,
"step": 939
},
{
"epoch": 25.41,
"learning_rate": 0.00018779999999999998,
"loss": 0.0694,
"step": 940
},
{
"epoch": 25.43,
"learning_rate": 0.000188,
"loss": 0.2091,
"step": 941
},
{
"epoch": 25.46,
"learning_rate": 0.00018819999999999997,
"loss": 0.3194,
"step": 942
},
{
"epoch": 25.49,
"learning_rate": 0.00018839999999999997,
"loss": 0.3055,
"step": 943
},
{
"epoch": 25.51,
"learning_rate": 0.0001886,
"loss": 0.1264,
"step": 944
},
{
"epoch": 25.54,
"learning_rate": 0.00018879999999999998,
"loss": 0.1249,
"step": 945
},
{
"epoch": 25.57,
"learning_rate": 0.00018899999999999999,
"loss": 0.2434,
"step": 946
},
{
"epoch": 25.59,
"learning_rate": 0.0001892,
"loss": 0.1051,
"step": 947
},
{
"epoch": 25.62,
"learning_rate": 0.00018939999999999997,
"loss": 0.2589,
"step": 948
},
{
"epoch": 25.65,
"learning_rate": 0.00018959999999999997,
"loss": 0.1581,
"step": 949
},
{
"epoch": 25.68,
"learning_rate": 0.0001898,
"loss": 0.251,
"step": 950
},
{
"epoch": 25.7,
"learning_rate": 0.00018999999999999998,
"loss": 0.2946,
"step": 951
},
{
"epoch": 25.73,
"learning_rate": 0.0001902,
"loss": 0.0588,
"step": 952
},
{
"epoch": 25.76,
"learning_rate": 0.0001904,
"loss": 0.1951,
"step": 953
},
{
"epoch": 25.78,
"learning_rate": 0.00019059999999999997,
"loss": 0.2148,
"step": 954
},
{
"epoch": 25.81,
"learning_rate": 0.00019079999999999998,
"loss": 0.1273,
"step": 955
},
{
"epoch": 25.84,
"learning_rate": 0.000191,
"loss": 0.3302,
"step": 956
},
{
"epoch": 25.86,
"learning_rate": 0.00019119999999999999,
"loss": 0.2869,
"step": 957
},
{
"epoch": 25.89,
"learning_rate": 0.0001914,
"loss": 0.0963,
"step": 958
},
{
"epoch": 25.92,
"learning_rate": 0.0001916,
"loss": 0.3224,
"step": 959
},
{
"epoch": 25.95,
"learning_rate": 0.00019179999999999997,
"loss": 0.4571,
"step": 960
},
{
"epoch": 25.97,
"learning_rate": 0.00019199999999999998,
"loss": 0.3363,
"step": 961
},
{
"epoch": 26.0,
"learning_rate": 0.0001922,
"loss": 0.7345,
"step": 962
},
{
"epoch": 26.03,
"learning_rate": 0.0001924,
"loss": 0.4107,
"step": 963
},
{
"epoch": 26.05,
"learning_rate": 0.0001926,
"loss": 0.2725,
"step": 964
},
{
"epoch": 26.08,
"learning_rate": 0.0001928,
"loss": 0.1483,
"step": 965
},
{
"epoch": 26.11,
"learning_rate": 0.00019299999999999997,
"loss": 0.3933,
"step": 966
},
{
"epoch": 26.14,
"learning_rate": 0.00019319999999999998,
"loss": 0.4185,
"step": 967
},
{
"epoch": 26.16,
"learning_rate": 0.0001934,
"loss": 0.236,
"step": 968
},
{
"epoch": 26.19,
"learning_rate": 0.0001936,
"loss": 0.2104,
"step": 969
},
{
"epoch": 26.22,
"learning_rate": 0.0001938,
"loss": 0.307,
"step": 970
},
{
"epoch": 26.24,
"learning_rate": 0.00019399999999999997,
"loss": 0.1798,
"step": 971
},
{
"epoch": 26.27,
"learning_rate": 0.00019419999999999998,
"loss": 0.3239,
"step": 972
},
{
"epoch": 26.3,
"learning_rate": 0.00019439999999999998,
"loss": 0.1161,
"step": 973
},
{
"epoch": 26.32,
"learning_rate": 0.00019459999999999996,
"loss": 0.1211,
"step": 974
},
{
"epoch": 26.35,
"learning_rate": 0.0001948,
"loss": 0.173,
"step": 975
},
{
"epoch": 26.38,
"learning_rate": 0.000195,
"loss": 0.3027,
"step": 976
},
{
"epoch": 26.41,
"learning_rate": 0.00019519999999999997,
"loss": 0.4959,
"step": 977
},
{
"epoch": 26.43,
"learning_rate": 0.00019539999999999998,
"loss": 0.2363,
"step": 978
},
{
"epoch": 26.46,
"learning_rate": 0.00019559999999999998,
"loss": 0.3428,
"step": 979
},
{
"epoch": 26.49,
"learning_rate": 0.00019579999999999996,
"loss": 0.1483,
"step": 980
},
{
"epoch": 26.51,
"learning_rate": 0.00019599999999999997,
"loss": 0.2388,
"step": 981
},
{
"epoch": 26.54,
"learning_rate": 0.0001962,
"loss": 0.2154,
"step": 982
},
{
"epoch": 26.57,
"learning_rate": 0.00019639999999999998,
"loss": 0.2492,
"step": 983
},
{
"epoch": 26.59,
"learning_rate": 0.00019659999999999998,
"loss": 0.3266,
"step": 984
},
{
"epoch": 26.62,
"learning_rate": 0.00019679999999999999,
"loss": 0.0984,
"step": 985
},
{
"epoch": 26.65,
"learning_rate": 0.00019699999999999996,
"loss": 0.2091,
"step": 986
},
{
"epoch": 26.68,
"learning_rate": 0.00019719999999999997,
"loss": 0.1276,
"step": 987
},
{
"epoch": 26.7,
"learning_rate": 0.0001974,
"loss": 0.119,
"step": 988
},
{
"epoch": 26.73,
"learning_rate": 0.00019759999999999998,
"loss": 0.1331,
"step": 989
},
{
"epoch": 26.76,
"learning_rate": 0.00019779999999999998,
"loss": 0.3638,
"step": 990
},
{
"epoch": 26.78,
"learning_rate": 0.000198,
"loss": 0.2227,
"step": 991
},
{
"epoch": 26.81,
"learning_rate": 0.00019819999999999997,
"loss": 0.2172,
"step": 992
},
{
"epoch": 26.84,
"learning_rate": 0.00019839999999999997,
"loss": 0.1313,
"step": 993
},
{
"epoch": 26.86,
"learning_rate": 0.0001986,
"loss": 0.2505,
"step": 994
},
{
"epoch": 26.89,
"learning_rate": 0.00019879999999999998,
"loss": 0.3768,
"step": 995
},
{
"epoch": 26.92,
"learning_rate": 0.00019899999999999999,
"loss": 0.0901,
"step": 996
},
{
"epoch": 26.95,
"learning_rate": 0.0001992,
"loss": 0.3423,
"step": 997
},
{
"epoch": 26.97,
"learning_rate": 0.00019939999999999997,
"loss": 0.1084,
"step": 998
},
{
"epoch": 27.0,
"learning_rate": 0.00019959999999999997,
"loss": 0.7539,
"step": 999
},
{
"epoch": 27.03,
"learning_rate": 0.0001998,
"loss": 0.4972,
"step": 1000
},
{
"epoch": 27.03,
"eval_accuracy": 0.8523489932885906,
"eval_f1": 0.8508557153938668,
"eval_loss": 0.5943850874900818,
"eval_runtime": 147.0543,
"eval_samples_per_second": 11.146,
"eval_steps_per_second": 0.7,
"step": 1000
},
{
"epoch": 27.05,
"learning_rate": 0.00019999999999999998,
"loss": 0.073,
"step": 1001
},
{
"epoch": 27.08,
"learning_rate": 0.0002002,
"loss": 0.3725,
"step": 1002
},
{
"epoch": 27.11,
"learning_rate": 0.0002004,
"loss": 0.1764,
"step": 1003
},
{
"epoch": 27.14,
"learning_rate": 0.00020059999999999997,
"loss": 0.5396,
"step": 1004
},
{
"epoch": 27.16,
"learning_rate": 0.00020079999999999997,
"loss": 0.1294,
"step": 1005
},
{
"epoch": 27.19,
"learning_rate": 0.000201,
"loss": 0.1826,
"step": 1006
},
{
"epoch": 27.22,
"learning_rate": 0.00020119999999999998,
"loss": 0.1764,
"step": 1007
},
{
"epoch": 27.24,
"learning_rate": 0.0002014,
"loss": 0.342,
"step": 1008
},
{
"epoch": 27.27,
"learning_rate": 0.0002016,
"loss": 0.1554,
"step": 1009
},
{
"epoch": 27.3,
"learning_rate": 0.00020179999999999997,
"loss": 0.3056,
"step": 1010
},
{
"epoch": 27.32,
"learning_rate": 0.00020199999999999998,
"loss": 0.1714,
"step": 1011
},
{
"epoch": 27.35,
"learning_rate": 0.0002022,
"loss": 0.1539,
"step": 1012
},
{
"epoch": 27.38,
"learning_rate": 0.0002024,
"loss": 0.0917,
"step": 1013
},
{
"epoch": 27.41,
"learning_rate": 0.0002026,
"loss": 0.1955,
"step": 1014
},
{
"epoch": 27.43,
"learning_rate": 0.0002028,
"loss": 0.235,
"step": 1015
},
{
"epoch": 27.46,
"learning_rate": 0.00020299999999999997,
"loss": 0.1496,
"step": 1016
},
{
"epoch": 27.49,
"learning_rate": 0.00020319999999999998,
"loss": 0.2519,
"step": 1017
},
{
"epoch": 27.51,
"learning_rate": 0.00020339999999999998,
"loss": 0.2617,
"step": 1018
},
{
"epoch": 27.54,
"learning_rate": 0.00020359999999999996,
"loss": 0.304,
"step": 1019
},
{
"epoch": 27.57,
"learning_rate": 0.0002038,
"loss": 0.1902,
"step": 1020
},
{
"epoch": 27.59,
"learning_rate": 0.000204,
"loss": 0.1836,
"step": 1021
},
{
"epoch": 27.62,
"learning_rate": 0.00020419999999999998,
"loss": 0.3966,
"step": 1022
},
{
"epoch": 27.65,
"learning_rate": 0.00020439999999999998,
"loss": 0.63,
"step": 1023
},
{
"epoch": 27.68,
"learning_rate": 0.00020459999999999999,
"loss": 0.2456,
"step": 1024
},
{
"epoch": 27.7,
"learning_rate": 0.00020479999999999996,
"loss": 0.1763,
"step": 1025
},
{
"epoch": 27.73,
"learning_rate": 0.000205,
"loss": 0.4416,
"step": 1026
},
{
"epoch": 27.76,
"learning_rate": 0.0002052,
"loss": 0.2014,
"step": 1027
},
{
"epoch": 27.78,
"learning_rate": 0.00020539999999999998,
"loss": 0.3589,
"step": 1028
},
{
"epoch": 27.81,
"learning_rate": 0.00020559999999999998,
"loss": 0.5608,
"step": 1029
},
{
"epoch": 27.84,
"learning_rate": 0.0002058,
"loss": 0.2554,
"step": 1030
},
{
"epoch": 27.86,
"learning_rate": 0.00020599999999999997,
"loss": 0.4065,
"step": 1031
},
{
"epoch": 27.89,
"learning_rate": 0.0002062,
"loss": 0.1937,
"step": 1032
},
{
"epoch": 27.92,
"learning_rate": 0.00020639999999999998,
"loss": 0.438,
"step": 1033
},
{
"epoch": 27.95,
"learning_rate": 0.00020659999999999998,
"loss": 0.2321,
"step": 1034
},
{
"epoch": 27.97,
"learning_rate": 0.00020679999999999999,
"loss": 0.2314,
"step": 1035
},
{
"epoch": 28.0,
"learning_rate": 0.00020699999999999996,
"loss": 1.0444,
"step": 1036
},
{
"epoch": 28.03,
"learning_rate": 0.00020719999999999997,
"loss": 0.3526,
"step": 1037
},
{
"epoch": 28.05,
"learning_rate": 0.0002074,
"loss": 0.2849,
"step": 1038
},
{
"epoch": 28.08,
"learning_rate": 0.00020759999999999998,
"loss": 0.2548,
"step": 1039
},
{
"epoch": 28.11,
"learning_rate": 0.00020779999999999998,
"loss": 0.2856,
"step": 1040
},
{
"epoch": 28.14,
"learning_rate": 0.000208,
"loss": 0.271,
"step": 1041
},
{
"epoch": 28.16,
"learning_rate": 0.00020819999999999996,
"loss": 0.1856,
"step": 1042
},
{
"epoch": 28.19,
"learning_rate": 0.00020839999999999997,
"loss": 0.3723,
"step": 1043
},
{
"epoch": 28.22,
"learning_rate": 0.0002086,
"loss": 0.2489,
"step": 1044
},
{
"epoch": 28.24,
"learning_rate": 0.00020879999999999998,
"loss": 0.185,
"step": 1045
},
{
"epoch": 28.27,
"learning_rate": 0.00020899999999999998,
"loss": 0.4312,
"step": 1046
},
{
"epoch": 28.3,
"learning_rate": 0.0002092,
"loss": 0.4048,
"step": 1047
},
{
"epoch": 28.32,
"learning_rate": 0.00020939999999999997,
"loss": 0.1655,
"step": 1048
},
{
"epoch": 28.35,
"learning_rate": 0.00020959999999999997,
"loss": 0.1543,
"step": 1049
},
{
"epoch": 28.38,
"learning_rate": 0.0002098,
"loss": 0.3146,
"step": 1050
},
{
"epoch": 28.41,
"learning_rate": 0.00020999999999999998,
"loss": 0.1975,
"step": 1051
},
{
"epoch": 28.43,
"learning_rate": 0.0002102,
"loss": 0.0847,
"step": 1052
},
{
"epoch": 28.46,
"learning_rate": 0.0002104,
"loss": 0.3628,
"step": 1053
},
{
"epoch": 28.49,
"learning_rate": 0.00021059999999999997,
"loss": 0.1491,
"step": 1054
},
{
"epoch": 28.51,
"learning_rate": 0.00021079999999999997,
"loss": 0.3402,
"step": 1055
},
{
"epoch": 28.54,
"learning_rate": 0.00021099999999999998,
"loss": 0.1393,
"step": 1056
},
{
"epoch": 28.57,
"learning_rate": 0.00021119999999999996,
"loss": 0.2878,
"step": 1057
},
{
"epoch": 28.59,
"learning_rate": 0.0002114,
"loss": 0.1192,
"step": 1058
},
{
"epoch": 28.62,
"learning_rate": 0.0002116,
"loss": 0.1113,
"step": 1059
},
{
"epoch": 28.65,
"learning_rate": 0.00021179999999999997,
"loss": 0.1834,
"step": 1060
},
{
"epoch": 28.68,
"learning_rate": 0.00021199999999999998,
"loss": 0.2541,
"step": 1061
},
{
"epoch": 28.7,
"learning_rate": 0.00021219999999999998,
"loss": 0.1539,
"step": 1062
},
{
"epoch": 28.73,
"learning_rate": 0.00021239999999999996,
"loss": 0.4812,
"step": 1063
},
{
"epoch": 28.76,
"learning_rate": 0.0002126,
"loss": 0.3902,
"step": 1064
},
{
"epoch": 28.78,
"learning_rate": 0.0002128,
"loss": 0.3009,
"step": 1065
},
{
"epoch": 28.81,
"learning_rate": 0.00021299999999999997,
"loss": 0.2293,
"step": 1066
},
{
"epoch": 28.84,
"learning_rate": 0.00021319999999999998,
"loss": 0.1392,
"step": 1067
},
{
"epoch": 28.86,
"learning_rate": 0.00021339999999999998,
"loss": 0.3846,
"step": 1068
},
{
"epoch": 28.89,
"learning_rate": 0.00021359999999999996,
"loss": 0.154,
"step": 1069
},
{
"epoch": 28.92,
"learning_rate": 0.0002138,
"loss": 0.3268,
"step": 1070
},
{
"epoch": 28.95,
"learning_rate": 0.000214,
"loss": 0.3737,
"step": 1071
},
{
"epoch": 28.97,
"learning_rate": 0.00021419999999999998,
"loss": 0.1218,
"step": 1072
},
{
"epoch": 29.0,
"learning_rate": 0.00021439999999999998,
"loss": 0.4128,
"step": 1073
},
{
"epoch": 29.03,
"learning_rate": 0.00021459999999999998,
"loss": 0.3509,
"step": 1074
},
{
"epoch": 29.05,
"learning_rate": 0.00021479999999999996,
"loss": 0.2992,
"step": 1075
},
{
"epoch": 29.08,
"learning_rate": 0.000215,
"loss": 0.2564,
"step": 1076
},
{
"epoch": 29.11,
"learning_rate": 0.0002152,
"loss": 0.2603,
"step": 1077
},
{
"epoch": 29.14,
"learning_rate": 0.00021539999999999998,
"loss": 0.0845,
"step": 1078
},
{
"epoch": 29.16,
"learning_rate": 0.00021559999999999998,
"loss": 0.2243,
"step": 1079
},
{
"epoch": 29.19,
"learning_rate": 0.0002158,
"loss": 0.2961,
"step": 1080
},
{
"epoch": 29.22,
"learning_rate": 0.00021599999999999996,
"loss": 0.6431,
"step": 1081
},
{
"epoch": 29.24,
"learning_rate": 0.0002162,
"loss": 0.2291,
"step": 1082
},
{
"epoch": 29.27,
"learning_rate": 0.0002164,
"loss": 0.4369,
"step": 1083
},
{
"epoch": 29.3,
"learning_rate": 0.00021659999999999998,
"loss": 0.4033,
"step": 1084
},
{
"epoch": 29.32,
"learning_rate": 0.00021679999999999998,
"loss": 0.322,
"step": 1085
},
{
"epoch": 29.35,
"learning_rate": 0.000217,
"loss": 0.361,
"step": 1086
},
{
"epoch": 29.38,
"learning_rate": 0.00021719999999999997,
"loss": 0.139,
"step": 1087
},
{
"epoch": 29.41,
"learning_rate": 0.0002174,
"loss": 0.2594,
"step": 1088
},
{
"epoch": 29.43,
"learning_rate": 0.0002176,
"loss": 0.3376,
"step": 1089
},
{
"epoch": 29.46,
"learning_rate": 0.00021779999999999998,
"loss": 0.3243,
"step": 1090
},
{
"epoch": 29.49,
"learning_rate": 0.00021799999999999999,
"loss": 0.0901,
"step": 1091
},
{
"epoch": 29.51,
"learning_rate": 0.0002182,
"loss": 0.3879,
"step": 1092
},
{
"epoch": 29.54,
"learning_rate": 0.00021839999999999997,
"loss": 0.0815,
"step": 1093
},
{
"epoch": 29.57,
"learning_rate": 0.00021859999999999997,
"loss": 0.2032,
"step": 1094
},
{
"epoch": 29.59,
"learning_rate": 0.00021879999999999995,
"loss": 0.3851,
"step": 1095
},
{
"epoch": 29.62,
"learning_rate": 0.00021899999999999998,
"loss": 0.6732,
"step": 1096
},
{
"epoch": 29.65,
"learning_rate": 0.0002192,
"loss": 0.297,
"step": 1097
},
{
"epoch": 29.68,
"learning_rate": 0.00021939999999999997,
"loss": 0.5093,
"step": 1098
},
{
"epoch": 29.7,
"learning_rate": 0.00021959999999999997,
"loss": 0.5463,
"step": 1099
},
{
"epoch": 29.73,
"learning_rate": 0.00021979999999999998,
"loss": 0.3572,
"step": 1100
},
{
"epoch": 29.76,
"learning_rate": 0.00021999999999999995,
"loss": 0.5057,
"step": 1101
},
{
"epoch": 29.78,
"learning_rate": 0.00022019999999999999,
"loss": 0.3215,
"step": 1102
},
{
"epoch": 29.81,
"learning_rate": 0.0002204,
"loss": 0.302,
"step": 1103
},
{
"epoch": 29.84,
"learning_rate": 0.00022059999999999997,
"loss": 0.3,
"step": 1104
},
{
"epoch": 29.86,
"learning_rate": 0.00022079999999999997,
"loss": 0.3374,
"step": 1105
},
{
"epoch": 29.89,
"learning_rate": 0.00022099999999999998,
"loss": 0.4301,
"step": 1106
},
{
"epoch": 29.92,
"learning_rate": 0.00022119999999999996,
"loss": 0.2079,
"step": 1107
},
{
"epoch": 29.95,
"learning_rate": 0.0002214,
"loss": 0.3041,
"step": 1108
},
{
"epoch": 29.97,
"learning_rate": 0.0002216,
"loss": 0.3039,
"step": 1109
},
{
"epoch": 30.0,
"learning_rate": 0.00022179999999999997,
"loss": 0.0676,
"step": 1110
},
{
"epoch": 30.03,
"learning_rate": 0.00022199999999999998,
"loss": 0.4737,
"step": 1111
},
{
"epoch": 30.05,
"learning_rate": 0.00022219999999999998,
"loss": 0.4261,
"step": 1112
},
{
"epoch": 30.08,
"learning_rate": 0.00022239999999999996,
"loss": 0.2535,
"step": 1113
},
{
"epoch": 30.11,
"learning_rate": 0.0002226,
"loss": 0.2844,
"step": 1114
},
{
"epoch": 30.14,
"learning_rate": 0.0002228,
"loss": 0.2255,
"step": 1115
},
{
"epoch": 30.16,
"learning_rate": 0.00022299999999999997,
"loss": 0.1224,
"step": 1116
},
{
"epoch": 30.19,
"learning_rate": 0.00022319999999999998,
"loss": 0.3224,
"step": 1117
},
{
"epoch": 30.22,
"learning_rate": 0.00022339999999999998,
"loss": 0.2683,
"step": 1118
},
{
"epoch": 30.24,
"learning_rate": 0.00022359999999999996,
"loss": 0.1746,
"step": 1119
},
{
"epoch": 30.27,
"learning_rate": 0.0002238,
"loss": 0.2925,
"step": 1120
},
{
"epoch": 30.3,
"learning_rate": 0.000224,
"loss": 0.4056,
"step": 1121
},
{
"epoch": 30.32,
"learning_rate": 0.00022419999999999997,
"loss": 0.2971,
"step": 1122
},
{
"epoch": 30.35,
"learning_rate": 0.00022439999999999998,
"loss": 0.1513,
"step": 1123
},
{
"epoch": 30.38,
"learning_rate": 0.00022459999999999998,
"loss": 0.1592,
"step": 1124
},
{
"epoch": 30.41,
"learning_rate": 0.00022479999999999996,
"loss": 0.2192,
"step": 1125
},
{
"epoch": 30.43,
"learning_rate": 0.000225,
"loss": 0.2827,
"step": 1126
},
{
"epoch": 30.46,
"learning_rate": 0.0002252,
"loss": 0.0868,
"step": 1127
},
{
"epoch": 30.49,
"learning_rate": 0.00022539999999999998,
"loss": 0.3284,
"step": 1128
},
{
"epoch": 30.51,
"learning_rate": 0.00022559999999999998,
"loss": 0.3506,
"step": 1129
},
{
"epoch": 30.54,
"learning_rate": 0.00022579999999999999,
"loss": 0.3427,
"step": 1130
},
{
"epoch": 30.57,
"learning_rate": 0.00022599999999999996,
"loss": 0.1684,
"step": 1131
},
{
"epoch": 30.59,
"learning_rate": 0.00022619999999999997,
"loss": 0.3836,
"step": 1132
},
{
"epoch": 30.62,
"learning_rate": 0.0002264,
"loss": 0.3323,
"step": 1133
},
{
"epoch": 30.65,
"learning_rate": 0.00022659999999999998,
"loss": 0.2752,
"step": 1134
},
{
"epoch": 30.68,
"learning_rate": 0.00022679999999999998,
"loss": 0.3276,
"step": 1135
},
{
"epoch": 30.7,
"learning_rate": 0.000227,
"loss": 0.1484,
"step": 1136
},
{
"epoch": 30.73,
"learning_rate": 0.00022719999999999997,
"loss": 0.243,
"step": 1137
},
{
"epoch": 30.76,
"learning_rate": 0.00022739999999999997,
"loss": 0.3931,
"step": 1138
},
{
"epoch": 30.78,
"learning_rate": 0.0002276,
"loss": 0.3735,
"step": 1139
},
{
"epoch": 30.81,
"learning_rate": 0.00022779999999999998,
"loss": 0.128,
"step": 1140
},
{
"epoch": 30.84,
"learning_rate": 0.00022799999999999999,
"loss": 0.4408,
"step": 1141
},
{
"epoch": 30.86,
"learning_rate": 0.0002282,
"loss": 0.4897,
"step": 1142
},
{
"epoch": 30.89,
"learning_rate": 0.00022839999999999997,
"loss": 0.209,
"step": 1143
},
{
"epoch": 30.92,
"learning_rate": 0.00022859999999999997,
"loss": 0.2233,
"step": 1144
},
{
"epoch": 30.95,
"learning_rate": 0.0002288,
"loss": 0.3929,
"step": 1145
},
{
"epoch": 30.97,
"learning_rate": 0.00022899999999999998,
"loss": 0.1007,
"step": 1146
},
{
"epoch": 31.0,
"learning_rate": 0.0002292,
"loss": 0.0403,
"step": 1147
},
{
"epoch": 31.03,
"learning_rate": 0.0002294,
"loss": 0.1411,
"step": 1148
},
{
"epoch": 31.05,
"learning_rate": 0.00022959999999999997,
"loss": 0.2985,
"step": 1149
},
{
"epoch": 31.08,
"learning_rate": 0.00022979999999999997,
"loss": 0.3401,
"step": 1150
},
{
"epoch": 31.11,
"learning_rate": 0.00023,
"loss": 0.3337,
"step": 1151
},
{
"epoch": 31.14,
"learning_rate": 0.00023019999999999998,
"loss": 0.1233,
"step": 1152
},
{
"epoch": 31.16,
"learning_rate": 0.0002304,
"loss": 0.1404,
"step": 1153
},
{
"epoch": 31.19,
"learning_rate": 0.0002306,
"loss": 0.1015,
"step": 1154
},
{
"epoch": 31.22,
"learning_rate": 0.00023079999999999997,
"loss": 0.1532,
"step": 1155
},
{
"epoch": 31.24,
"learning_rate": 0.00023099999999999998,
"loss": 0.2418,
"step": 1156
},
{
"epoch": 31.27,
"learning_rate": 0.0002312,
"loss": 0.3232,
"step": 1157
},
{
"epoch": 31.3,
"learning_rate": 0.0002314,
"loss": 0.2301,
"step": 1158
},
{
"epoch": 31.32,
"learning_rate": 0.0002316,
"loss": 0.0991,
"step": 1159
},
{
"epoch": 31.35,
"learning_rate": 0.00023179999999999997,
"loss": 0.1552,
"step": 1160
},
{
"epoch": 31.38,
"learning_rate": 0.00023199999999999997,
"loss": 0.3827,
"step": 1161
},
{
"epoch": 31.41,
"learning_rate": 0.00023219999999999998,
"loss": 0.1072,
"step": 1162
},
{
"epoch": 31.43,
"learning_rate": 0.00023239999999999996,
"loss": 0.1654,
"step": 1163
},
{
"epoch": 31.46,
"learning_rate": 0.00023259999999999996,
"loss": 0.4643,
"step": 1164
},
{
"epoch": 31.49,
"learning_rate": 0.0002328,
"loss": 0.1021,
"step": 1165
},
{
"epoch": 31.51,
"learning_rate": 0.00023299999999999997,
"loss": 0.1945,
"step": 1166
},
{
"epoch": 31.54,
"learning_rate": 0.00023319999999999998,
"loss": 0.1557,
"step": 1167
},
{
"epoch": 31.57,
"learning_rate": 0.00023339999999999998,
"loss": 0.0984,
"step": 1168
},
{
"epoch": 31.59,
"learning_rate": 0.00023359999999999996,
"loss": 0.387,
"step": 1169
},
{
"epoch": 31.62,
"learning_rate": 0.00023379999999999996,
"loss": 0.2535,
"step": 1170
},
{
"epoch": 31.65,
"learning_rate": 0.000234,
"loss": 0.0969,
"step": 1171
},
{
"epoch": 31.68,
"learning_rate": 0.00023419999999999997,
"loss": 0.2164,
"step": 1172
},
{
"epoch": 31.7,
"learning_rate": 0.00023439999999999998,
"loss": 0.345,
"step": 1173
},
{
"epoch": 31.73,
"learning_rate": 0.00023459999999999998,
"loss": 0.3302,
"step": 1174
},
{
"epoch": 31.76,
"learning_rate": 0.00023479999999999996,
"loss": 0.6692,
"step": 1175
},
{
"epoch": 31.78,
"learning_rate": 0.00023499999999999997,
"loss": 0.2264,
"step": 1176
},
{
"epoch": 31.81,
"learning_rate": 0.0002352,
"loss": 0.1162,
"step": 1177
},
{
"epoch": 31.84,
"learning_rate": 0.00023539999999999998,
"loss": 0.1791,
"step": 1178
},
{
"epoch": 31.86,
"learning_rate": 0.00023559999999999998,
"loss": 0.1306,
"step": 1179
},
{
"epoch": 31.89,
"learning_rate": 0.00023579999999999999,
"loss": 0.1442,
"step": 1180
},
{
"epoch": 31.92,
"learning_rate": 0.00023599999999999996,
"loss": 0.1954,
"step": 1181
},
{
"epoch": 31.95,
"learning_rate": 0.00023619999999999997,
"loss": 0.4181,
"step": 1182
},
{
"epoch": 31.97,
"learning_rate": 0.0002364,
"loss": 0.2626,
"step": 1183
},
{
"epoch": 32.0,
"learning_rate": 0.0002364,
"loss": 0.2427,
"step": 1184
},
{
"epoch": 32.03,
"learning_rate": 0.00023659999999999998,
"loss": 0.3492,
"step": 1185
},
{
"epoch": 32.05,
"learning_rate": 0.00023679999999999998,
"loss": 0.2849,
"step": 1186
},
{
"epoch": 32.08,
"learning_rate": 0.000237,
"loss": 0.2873,
"step": 1187
},
{
"epoch": 32.11,
"learning_rate": 0.00023719999999999997,
"loss": 0.1321,
"step": 1188
},
{
"epoch": 32.14,
"learning_rate": 0.00023739999999999997,
"loss": 0.2089,
"step": 1189
},
{
"epoch": 32.16,
"learning_rate": 0.0002376,
"loss": 0.1616,
"step": 1190
},
{
"epoch": 32.19,
"learning_rate": 0.00023779999999999998,
"loss": 0.1305,
"step": 1191
},
{
"epoch": 32.22,
"learning_rate": 0.00023799999999999998,
"loss": 0.0742,
"step": 1192
},
{
"epoch": 32.24,
"learning_rate": 0.0002382,
"loss": 0.2483,
"step": 1193
},
{
"epoch": 32.27,
"learning_rate": 0.00023839999999999997,
"loss": 0.3403,
"step": 1194
},
{
"epoch": 32.3,
"learning_rate": 0.00023859999999999997,
"loss": 0.2142,
"step": 1195
},
{
"epoch": 32.32,
"learning_rate": 0.0002388,
"loss": 0.1559,
"step": 1196
},
{
"epoch": 32.35,
"learning_rate": 0.00023899999999999998,
"loss": 0.3118,
"step": 1197
},
{
"epoch": 32.38,
"learning_rate": 0.0002392,
"loss": 0.0473,
"step": 1198
},
{
"epoch": 32.41,
"learning_rate": 0.0002394,
"loss": 0.0295,
"step": 1199
},
{
"epoch": 32.43,
"learning_rate": 0.00023959999999999997,
"loss": 0.1799,
"step": 1200
},
{
"epoch": 32.43,
"eval_accuracy": 0.862111043319097,
"eval_f1": 0.8622963565057812,
"eval_loss": 0.6193989515304565,
"eval_runtime": 146.8211,
"eval_samples_per_second": 11.163,
"eval_steps_per_second": 0.702,
"step": 1200
},
{
"epoch": 32.46,
"learning_rate": 0.00023979999999999997,
"loss": 0.0611,
"step": 1201
},
{
"epoch": 32.49,
"learning_rate": 0.00023999999999999998,
"loss": 0.3459,
"step": 1202
},
{
"epoch": 32.51,
"learning_rate": 0.00024019999999999996,
"loss": 0.4025,
"step": 1203
},
{
"epoch": 32.54,
"learning_rate": 0.0002404,
"loss": 0.1461,
"step": 1204
},
{
"epoch": 32.57,
"learning_rate": 0.0002406,
"loss": 0.1463,
"step": 1205
},
{
"epoch": 32.59,
"learning_rate": 0.00024079999999999997,
"loss": 0.218,
"step": 1206
},
{
"epoch": 32.62,
"learning_rate": 0.00024099999999999998,
"loss": 0.2327,
"step": 1207
},
{
"epoch": 32.65,
"learning_rate": 0.00024119999999999998,
"loss": 0.1869,
"step": 1208
},
{
"epoch": 32.68,
"learning_rate": 0.00024139999999999996,
"loss": 0.236,
"step": 1209
},
{
"epoch": 32.7,
"learning_rate": 0.0002416,
"loss": 0.2215,
"step": 1210
},
{
"epoch": 32.73,
"learning_rate": 0.0002418,
"loss": 0.1307,
"step": 1211
},
{
"epoch": 32.76,
"learning_rate": 0.00024199999999999997,
"loss": 0.3697,
"step": 1212
},
{
"epoch": 32.78,
"learning_rate": 0.00024219999999999998,
"loss": 0.4418,
"step": 1213
},
{
"epoch": 32.81,
"learning_rate": 0.00024239999999999998,
"loss": 0.0683,
"step": 1214
},
{
"epoch": 32.84,
"learning_rate": 0.00024259999999999996,
"loss": 0.2517,
"step": 1215
},
{
"epoch": 32.86,
"learning_rate": 0.0002428,
"loss": 0.2183,
"step": 1216
},
{
"epoch": 32.89,
"learning_rate": 0.000243,
"loss": 0.231,
"step": 1217
},
{
"epoch": 32.92,
"learning_rate": 0.00024319999999999998,
"loss": 0.2416,
"step": 1218
},
{
"epoch": 32.95,
"learning_rate": 0.00024339999999999998,
"loss": 0.3926,
"step": 1219
},
{
"epoch": 32.97,
"learning_rate": 0.00024359999999999999,
"loss": 0.1133,
"step": 1220
},
{
"epoch": 33.0,
"learning_rate": 0.00024379999999999996,
"loss": 0.2584,
"step": 1221
},
{
"epoch": 33.03,
"learning_rate": 0.000244,
"loss": 0.1544,
"step": 1222
},
{
"epoch": 33.05,
"learning_rate": 0.00024419999999999997,
"loss": 0.1836,
"step": 1223
},
{
"epoch": 33.08,
"learning_rate": 0.0002444,
"loss": 0.242,
"step": 1224
},
{
"epoch": 33.11,
"learning_rate": 0.0002446,
"loss": 0.3959,
"step": 1225
},
{
"epoch": 33.14,
"learning_rate": 0.0002448,
"loss": 0.309,
"step": 1226
},
{
"epoch": 33.16,
"learning_rate": 0.000245,
"loss": 0.4087,
"step": 1227
},
{
"epoch": 33.19,
"learning_rate": 0.0002452,
"loss": 0.2149,
"step": 1228
},
{
"epoch": 33.22,
"learning_rate": 0.00024539999999999995,
"loss": 0.2415,
"step": 1229
},
{
"epoch": 33.24,
"learning_rate": 0.00024559999999999995,
"loss": 0.2733,
"step": 1230
},
{
"epoch": 33.27,
"learning_rate": 0.0002458,
"loss": 0.3233,
"step": 1231
},
{
"epoch": 33.3,
"learning_rate": 0.00024599999999999996,
"loss": 0.1467,
"step": 1232
},
{
"epoch": 33.32,
"learning_rate": 0.00024619999999999997,
"loss": 0.092,
"step": 1233
},
{
"epoch": 33.35,
"learning_rate": 0.00024639999999999997,
"loss": 0.1137,
"step": 1234
},
{
"epoch": 33.38,
"learning_rate": 0.0002466,
"loss": 0.1028,
"step": 1235
},
{
"epoch": 33.41,
"learning_rate": 0.0002468,
"loss": 0.2876,
"step": 1236
},
{
"epoch": 33.43,
"learning_rate": 0.000247,
"loss": 0.2448,
"step": 1237
},
{
"epoch": 33.46,
"learning_rate": 0.0002472,
"loss": 0.203,
"step": 1238
},
{
"epoch": 33.49,
"learning_rate": 0.0002474,
"loss": 0.2069,
"step": 1239
},
{
"epoch": 33.51,
"learning_rate": 0.0002476,
"loss": 0.8065,
"step": 1240
},
{
"epoch": 33.54,
"learning_rate": 0.00024779999999999995,
"loss": 0.4867,
"step": 1241
},
{
"epoch": 33.57,
"learning_rate": 0.00024799999999999996,
"loss": 0.2184,
"step": 1242
},
{
"epoch": 33.59,
"learning_rate": 0.00024819999999999996,
"loss": 0.2501,
"step": 1243
},
{
"epoch": 33.62,
"learning_rate": 0.00024839999999999997,
"loss": 0.1785,
"step": 1244
},
{
"epoch": 33.65,
"learning_rate": 0.00024859999999999997,
"loss": 0.2313,
"step": 1245
},
{
"epoch": 33.68,
"learning_rate": 0.0002488,
"loss": 0.1774,
"step": 1246
},
{
"epoch": 33.7,
"learning_rate": 0.000249,
"loss": 0.2601,
"step": 1247
},
{
"epoch": 33.73,
"learning_rate": 0.0002492,
"loss": 0.1124,
"step": 1248
},
{
"epoch": 33.76,
"learning_rate": 0.0002494,
"loss": 0.2014,
"step": 1249
},
{
"epoch": 33.78,
"learning_rate": 0.00024959999999999994,
"loss": 0.2164,
"step": 1250
},
{
"epoch": 33.81,
"learning_rate": 0.0002498,
"loss": 0.2007,
"step": 1251
},
{
"epoch": 33.84,
"learning_rate": 0.00025,
"loss": 0.2754,
"step": 1252
},
{
"epoch": 33.86,
"learning_rate": 0.00025019999999999996,
"loss": 0.1551,
"step": 1253
},
{
"epoch": 33.89,
"learning_rate": 0.00025039999999999996,
"loss": 0.1649,
"step": 1254
},
{
"epoch": 33.92,
"learning_rate": 0.00025059999999999997,
"loss": 0.1547,
"step": 1255
},
{
"epoch": 33.95,
"learning_rate": 0.00025079999999999997,
"loss": 0.2767,
"step": 1256
},
{
"epoch": 33.97,
"learning_rate": 0.000251,
"loss": 0.3168,
"step": 1257
},
{
"epoch": 34.0,
"learning_rate": 0.0002512,
"loss": 0.5342,
"step": 1258
},
{
"epoch": 34.03,
"learning_rate": 0.0002514,
"loss": 0.1531,
"step": 1259
},
{
"epoch": 34.05,
"learning_rate": 0.0002516,
"loss": 0.116,
"step": 1260
},
{
"epoch": 34.08,
"learning_rate": 0.0002518,
"loss": 0.3592,
"step": 1261
},
{
"epoch": 34.11,
"learning_rate": 0.00025199999999999995,
"loss": 0.2923,
"step": 1262
},
{
"epoch": 34.14,
"learning_rate": 0.0002522,
"loss": 0.16,
"step": 1263
},
{
"epoch": 34.16,
"learning_rate": 0.0002524,
"loss": 0.1291,
"step": 1264
},
{
"epoch": 34.19,
"learning_rate": 0.00025259999999999996,
"loss": 0.1898,
"step": 1265
},
{
"epoch": 34.22,
"learning_rate": 0.00025279999999999996,
"loss": 0.0503,
"step": 1266
},
{
"epoch": 34.24,
"learning_rate": 0.00025299999999999997,
"loss": 0.1906,
"step": 1267
},
{
"epoch": 34.27,
"learning_rate": 0.0002532,
"loss": 0.2068,
"step": 1268
},
{
"epoch": 34.3,
"learning_rate": 0.0002534,
"loss": 0.232,
"step": 1269
},
{
"epoch": 34.32,
"learning_rate": 0.0002536,
"loss": 0.0258,
"step": 1270
},
{
"epoch": 34.35,
"learning_rate": 0.0002538,
"loss": 0.1378,
"step": 1271
},
{
"epoch": 34.38,
"learning_rate": 0.000254,
"loss": 0.1115,
"step": 1272
},
{
"epoch": 34.41,
"learning_rate": 0.0002542,
"loss": 0.0649,
"step": 1273
},
{
"epoch": 34.43,
"learning_rate": 0.00025439999999999995,
"loss": 0.4716,
"step": 1274
},
{
"epoch": 34.46,
"learning_rate": 0.0002546,
"loss": 0.1065,
"step": 1275
},
{
"epoch": 34.49,
"learning_rate": 0.0002548,
"loss": 0.2515,
"step": 1276
},
{
"epoch": 34.51,
"learning_rate": 0.00025499999999999996,
"loss": 0.4898,
"step": 1277
},
{
"epoch": 34.54,
"learning_rate": 0.00025519999999999997,
"loss": 0.1787,
"step": 1278
},
{
"epoch": 34.57,
"learning_rate": 0.0002554,
"loss": 0.2962,
"step": 1279
},
{
"epoch": 34.59,
"learning_rate": 0.0002556,
"loss": 0.1131,
"step": 1280
},
{
"epoch": 34.62,
"learning_rate": 0.0002558,
"loss": 0.2746,
"step": 1281
},
{
"epoch": 34.65,
"learning_rate": 0.000256,
"loss": 0.0482,
"step": 1282
},
{
"epoch": 34.68,
"learning_rate": 0.0002562,
"loss": 0.4344,
"step": 1283
},
{
"epoch": 34.7,
"learning_rate": 0.0002564,
"loss": 0.4398,
"step": 1284
},
{
"epoch": 34.73,
"learning_rate": 0.00025659999999999995,
"loss": 0.2232,
"step": 1285
},
{
"epoch": 34.76,
"learning_rate": 0.00025679999999999995,
"loss": 0.5902,
"step": 1286
},
{
"epoch": 34.78,
"learning_rate": 0.00025699999999999996,
"loss": 0.2576,
"step": 1287
},
{
"epoch": 34.81,
"learning_rate": 0.00025719999999999996,
"loss": 0.2447,
"step": 1288
},
{
"epoch": 34.84,
"learning_rate": 0.00025739999999999997,
"loss": 0.3988,
"step": 1289
},
{
"epoch": 34.86,
"learning_rate": 0.0002576,
"loss": 0.282,
"step": 1290
},
{
"epoch": 34.89,
"learning_rate": 0.0002578,
"loss": 0.3711,
"step": 1291
},
{
"epoch": 34.92,
"learning_rate": 0.000258,
"loss": 0.4105,
"step": 1292
},
{
"epoch": 34.95,
"learning_rate": 0.0002582,
"loss": 0.3257,
"step": 1293
},
{
"epoch": 34.97,
"learning_rate": 0.00025839999999999994,
"loss": 0.1024,
"step": 1294
},
{
"epoch": 35.0,
"learning_rate": 0.0002586,
"loss": 0.3325,
"step": 1295
},
{
"epoch": 35.03,
"learning_rate": 0.0002588,
"loss": 0.1733,
"step": 1296
},
{
"epoch": 35.05,
"learning_rate": 0.00025899999999999995,
"loss": 0.1173,
"step": 1297
},
{
"epoch": 35.08,
"learning_rate": 0.00025919999999999996,
"loss": 0.1559,
"step": 1298
},
{
"epoch": 35.11,
"learning_rate": 0.00025939999999999996,
"loss": 0.148,
"step": 1299
},
{
"epoch": 35.14,
"learning_rate": 0.00025959999999999997,
"loss": 0.1109,
"step": 1300
},
{
"epoch": 35.16,
"learning_rate": 0.00025979999999999997,
"loss": 0.1497,
"step": 1301
},
{
"epoch": 35.19,
"learning_rate": 0.00026,
"loss": 0.08,
"step": 1302
},
{
"epoch": 35.22,
"learning_rate": 0.0002602,
"loss": 0.0925,
"step": 1303
},
{
"epoch": 35.24,
"learning_rate": 0.0002604,
"loss": 0.1058,
"step": 1304
},
{
"epoch": 35.27,
"learning_rate": 0.0002606,
"loss": 0.191,
"step": 1305
},
{
"epoch": 35.3,
"learning_rate": 0.00026079999999999994,
"loss": 0.369,
"step": 1306
},
{
"epoch": 35.32,
"learning_rate": 0.000261,
"loss": 0.2756,
"step": 1307
},
{
"epoch": 35.35,
"learning_rate": 0.0002612,
"loss": 0.4153,
"step": 1308
},
{
"epoch": 35.38,
"learning_rate": 0.00026139999999999996,
"loss": 0.2264,
"step": 1309
},
{
"epoch": 35.41,
"learning_rate": 0.00026159999999999996,
"loss": 0.1337,
"step": 1310
},
{
"epoch": 35.43,
"learning_rate": 0.00026179999999999997,
"loss": 0.0509,
"step": 1311
},
{
"epoch": 35.46,
"learning_rate": 0.00026199999999999997,
"loss": 0.2477,
"step": 1312
},
{
"epoch": 35.49,
"learning_rate": 0.0002622,
"loss": 0.0458,
"step": 1313
},
{
"epoch": 35.51,
"learning_rate": 0.0002624,
"loss": 0.1973,
"step": 1314
},
{
"epoch": 35.54,
"learning_rate": 0.0002626,
"loss": 0.2278,
"step": 1315
},
{
"epoch": 35.57,
"learning_rate": 0.0002628,
"loss": 0.2463,
"step": 1316
},
{
"epoch": 35.59,
"learning_rate": 0.000263,
"loss": 0.1927,
"step": 1317
},
{
"epoch": 35.62,
"learning_rate": 0.00026319999999999995,
"loss": 0.3287,
"step": 1318
},
{
"epoch": 35.65,
"learning_rate": 0.00026339999999999995,
"loss": 0.4425,
"step": 1319
},
{
"epoch": 35.68,
"learning_rate": 0.0002636,
"loss": 0.5175,
"step": 1320
},
{
"epoch": 35.7,
"learning_rate": 0.00026379999999999996,
"loss": 0.2077,
"step": 1321
},
{
"epoch": 35.73,
"learning_rate": 0.00026399999999999997,
"loss": 0.3209,
"step": 1322
},
{
"epoch": 35.76,
"learning_rate": 0.00026419999999999997,
"loss": 0.3655,
"step": 1323
},
{
"epoch": 35.78,
"learning_rate": 0.0002644,
"loss": 0.1997,
"step": 1324
},
{
"epoch": 35.81,
"learning_rate": 0.0002646,
"loss": 0.0406,
"step": 1325
},
{
"epoch": 35.84,
"learning_rate": 0.0002648,
"loss": 0.2138,
"step": 1326
},
{
"epoch": 35.86,
"learning_rate": 0.000265,
"loss": 0.3074,
"step": 1327
},
{
"epoch": 35.89,
"learning_rate": 0.0002652,
"loss": 0.0729,
"step": 1328
},
{
"epoch": 35.92,
"learning_rate": 0.0002654,
"loss": 0.5372,
"step": 1329
},
{
"epoch": 35.95,
"learning_rate": 0.00026559999999999995,
"loss": 0.2879,
"step": 1330
},
{
"epoch": 35.97,
"learning_rate": 0.00026579999999999996,
"loss": 0.0752,
"step": 1331
},
{
"epoch": 36.0,
"learning_rate": 0.000266,
"loss": 0.0498,
"step": 1332
},
{
"epoch": 36.03,
"learning_rate": 0.00026619999999999997,
"loss": 0.4303,
"step": 1333
},
{
"epoch": 36.05,
"learning_rate": 0.00026639999999999997,
"loss": 0.3358,
"step": 1334
},
{
"epoch": 36.08,
"learning_rate": 0.0002666,
"loss": 0.3115,
"step": 1335
},
{
"epoch": 36.11,
"learning_rate": 0.0002668,
"loss": 0.1592,
"step": 1336
},
{
"epoch": 36.14,
"learning_rate": 0.000267,
"loss": 0.2941,
"step": 1337
},
{
"epoch": 36.16,
"learning_rate": 0.0002672,
"loss": 0.1571,
"step": 1338
},
{
"epoch": 36.19,
"learning_rate": 0.0002674,
"loss": 0.1955,
"step": 1339
},
{
"epoch": 36.22,
"learning_rate": 0.0002676,
"loss": 0.3113,
"step": 1340
},
{
"epoch": 36.24,
"learning_rate": 0.0002678,
"loss": 0.0576,
"step": 1341
},
{
"epoch": 36.27,
"learning_rate": 0.00026799999999999995,
"loss": 0.4205,
"step": 1342
},
{
"epoch": 36.3,
"learning_rate": 0.00026819999999999996,
"loss": 0.3922,
"step": 1343
},
{
"epoch": 36.32,
"learning_rate": 0.0002684,
"loss": 0.1362,
"step": 1344
},
{
"epoch": 36.35,
"learning_rate": 0.00026859999999999997,
"loss": 0.3153,
"step": 1345
},
{
"epoch": 36.38,
"learning_rate": 0.0002688,
"loss": 0.2137,
"step": 1346
},
{
"epoch": 36.41,
"learning_rate": 0.000269,
"loss": 0.1421,
"step": 1347
},
{
"epoch": 36.43,
"learning_rate": 0.0002692,
"loss": 0.1331,
"step": 1348
},
{
"epoch": 36.46,
"learning_rate": 0.0002694,
"loss": 0.3623,
"step": 1349
},
{
"epoch": 36.49,
"learning_rate": 0.00026959999999999994,
"loss": 0.1467,
"step": 1350
},
{
"epoch": 36.51,
"learning_rate": 0.0002698,
"loss": 0.1958,
"step": 1351
},
{
"epoch": 36.54,
"learning_rate": 0.00027,
"loss": 0.2006,
"step": 1352
},
{
"epoch": 36.57,
"learning_rate": 0.00027019999999999995,
"loss": 0.2991,
"step": 1353
},
{
"epoch": 36.59,
"learning_rate": 0.00027039999999999996,
"loss": 0.1397,
"step": 1354
},
{
"epoch": 36.62,
"learning_rate": 0.00027059999999999996,
"loss": 0.2261,
"step": 1355
},
{
"epoch": 36.65,
"learning_rate": 0.00027079999999999997,
"loss": 0.2324,
"step": 1356
},
{
"epoch": 36.68,
"learning_rate": 0.000271,
"loss": 0.1734,
"step": 1357
},
{
"epoch": 36.7,
"learning_rate": 0.0002712,
"loss": 0.1923,
"step": 1358
},
{
"epoch": 36.73,
"learning_rate": 0.0002714,
"loss": 0.1503,
"step": 1359
},
{
"epoch": 36.76,
"learning_rate": 0.0002716,
"loss": 0.5295,
"step": 1360
},
{
"epoch": 36.78,
"learning_rate": 0.0002718,
"loss": 0.3482,
"step": 1361
},
{
"epoch": 36.81,
"learning_rate": 0.00027199999999999994,
"loss": 0.2801,
"step": 1362
},
{
"epoch": 36.84,
"learning_rate": 0.00027219999999999995,
"loss": 0.3078,
"step": 1363
},
{
"epoch": 36.86,
"learning_rate": 0.0002724,
"loss": 0.1801,
"step": 1364
},
{
"epoch": 36.89,
"learning_rate": 0.00027259999999999996,
"loss": 0.1568,
"step": 1365
},
{
"epoch": 36.92,
"learning_rate": 0.00027279999999999996,
"loss": 0.224,
"step": 1366
},
{
"epoch": 36.95,
"learning_rate": 0.00027299999999999997,
"loss": 0.066,
"step": 1367
},
{
"epoch": 36.97,
"learning_rate": 0.00027319999999999997,
"loss": 0.117,
"step": 1368
},
{
"epoch": 37.0,
"learning_rate": 0.0002734,
"loss": 0.102,
"step": 1369
},
{
"epoch": 37.03,
"learning_rate": 0.0002736,
"loss": 0.289,
"step": 1370
},
{
"epoch": 37.05,
"learning_rate": 0.0002738,
"loss": 0.6453,
"step": 1371
},
{
"epoch": 37.08,
"learning_rate": 0.000274,
"loss": 0.2908,
"step": 1372
},
{
"epoch": 37.11,
"learning_rate": 0.0002742,
"loss": 0.0938,
"step": 1373
},
{
"epoch": 37.14,
"learning_rate": 0.00027439999999999995,
"loss": 0.1754,
"step": 1374
},
{
"epoch": 37.16,
"learning_rate": 0.00027459999999999995,
"loss": 0.1098,
"step": 1375
},
{
"epoch": 37.19,
"learning_rate": 0.0002748,
"loss": 0.0363,
"step": 1376
},
{
"epoch": 37.22,
"learning_rate": 0.00027499999999999996,
"loss": 0.3716,
"step": 1377
},
{
"epoch": 37.24,
"learning_rate": 0.00027519999999999997,
"loss": 0.1334,
"step": 1378
},
{
"epoch": 37.27,
"learning_rate": 0.00027539999999999997,
"loss": 0.1943,
"step": 1379
},
{
"epoch": 37.3,
"learning_rate": 0.0002756,
"loss": 0.1441,
"step": 1380
},
{
"epoch": 37.32,
"learning_rate": 0.0002758,
"loss": 0.2701,
"step": 1381
},
{
"epoch": 37.35,
"learning_rate": 0.000276,
"loss": 0.1437,
"step": 1382
},
{
"epoch": 37.38,
"learning_rate": 0.0002762,
"loss": 0.4404,
"step": 1383
},
{
"epoch": 37.41,
"learning_rate": 0.0002764,
"loss": 0.3195,
"step": 1384
},
{
"epoch": 37.43,
"learning_rate": 0.0002766,
"loss": 0.32,
"step": 1385
},
{
"epoch": 37.46,
"learning_rate": 0.00027679999999999995,
"loss": 0.5652,
"step": 1386
},
{
"epoch": 37.49,
"learning_rate": 0.00027699999999999996,
"loss": 0.4718,
"step": 1387
},
{
"epoch": 37.51,
"learning_rate": 0.0002772,
"loss": 0.2938,
"step": 1388
},
{
"epoch": 37.54,
"learning_rate": 0.00027739999999999997,
"loss": 0.2889,
"step": 1389
},
{
"epoch": 37.57,
"learning_rate": 0.00027759999999999997,
"loss": 0.2426,
"step": 1390
},
{
"epoch": 37.59,
"learning_rate": 0.0002778,
"loss": 0.0463,
"step": 1391
},
{
"epoch": 37.62,
"learning_rate": 0.000278,
"loss": 0.138,
"step": 1392
},
{
"epoch": 37.65,
"learning_rate": 0.0002782,
"loss": 0.1259,
"step": 1393
},
{
"epoch": 37.68,
"learning_rate": 0.0002784,
"loss": 0.0739,
"step": 1394
},
{
"epoch": 37.7,
"learning_rate": 0.00027859999999999994,
"loss": 0.1825,
"step": 1395
},
{
"epoch": 37.73,
"learning_rate": 0.0002788,
"loss": 0.1489,
"step": 1396
},
{
"epoch": 37.76,
"learning_rate": 0.000279,
"loss": 0.4117,
"step": 1397
},
{
"epoch": 37.78,
"learning_rate": 0.00027919999999999996,
"loss": 0.2202,
"step": 1398
},
{
"epoch": 37.81,
"learning_rate": 0.00027939999999999996,
"loss": 0.1821,
"step": 1399
},
{
"epoch": 37.84,
"learning_rate": 0.00027959999999999997,
"loss": 0.1308,
"step": 1400
},
{
"epoch": 37.84,
"eval_accuracy": 0.8547895057962172,
"eval_f1": 0.8569462299797571,
"eval_loss": 0.5956199169158936,
"eval_runtime": 146.0034,
"eval_samples_per_second": 11.226,
"eval_steps_per_second": 0.705,
"step": 1400
},
{
"epoch": 37.86,
"learning_rate": 0.00027979999999999997,
"loss": 0.2249,
"step": 1401
},
{
"epoch": 37.89,
"learning_rate": 0.00028,
"loss": 0.1103,
"step": 1402
},
{
"epoch": 37.92,
"learning_rate": 0.0002802,
"loss": 0.4685,
"step": 1403
},
{
"epoch": 37.95,
"learning_rate": 0.0002804,
"loss": 0.2424,
"step": 1404
},
{
"epoch": 37.97,
"learning_rate": 0.0002806,
"loss": 0.13,
"step": 1405
},
{
"epoch": 38.0,
"learning_rate": 0.0002808,
"loss": 0.2299,
"step": 1406
},
{
"epoch": 38.03,
"learning_rate": 0.00028099999999999995,
"loss": 0.0983,
"step": 1407
},
{
"epoch": 38.05,
"learning_rate": 0.0002812,
"loss": 0.3015,
"step": 1408
},
{
"epoch": 38.08,
"learning_rate": 0.00028139999999999996,
"loss": 0.1988,
"step": 1409
},
{
"epoch": 38.11,
"learning_rate": 0.00028159999999999996,
"loss": 0.18,
"step": 1410
},
{
"epoch": 38.14,
"learning_rate": 0.00028179999999999997,
"loss": 0.168,
"step": 1411
},
{
"epoch": 38.16,
"learning_rate": 0.00028199999999999997,
"loss": 0.2596,
"step": 1412
},
{
"epoch": 38.19,
"learning_rate": 0.0002822,
"loss": 0.1578,
"step": 1413
},
{
"epoch": 38.22,
"learning_rate": 0.0002824,
"loss": 0.2145,
"step": 1414
},
{
"epoch": 38.24,
"learning_rate": 0.0002826,
"loss": 0.5184,
"step": 1415
},
{
"epoch": 38.27,
"learning_rate": 0.0002828,
"loss": 0.422,
"step": 1416
},
{
"epoch": 38.3,
"learning_rate": 0.000283,
"loss": 0.2597,
"step": 1417
},
{
"epoch": 38.32,
"learning_rate": 0.00028319999999999994,
"loss": 0.3481,
"step": 1418
},
{
"epoch": 38.35,
"learning_rate": 0.00028339999999999995,
"loss": 0.2173,
"step": 1419
},
{
"epoch": 38.38,
"learning_rate": 0.0002836,
"loss": 0.1825,
"step": 1420
},
{
"epoch": 38.41,
"learning_rate": 0.00028379999999999996,
"loss": 0.2524,
"step": 1421
},
{
"epoch": 38.43,
"learning_rate": 0.00028399999999999996,
"loss": 0.2772,
"step": 1422
},
{
"epoch": 38.46,
"learning_rate": 0.00028419999999999997,
"loss": 0.3806,
"step": 1423
},
{
"epoch": 38.49,
"learning_rate": 0.0002844,
"loss": 0.1517,
"step": 1424
},
{
"epoch": 38.51,
"learning_rate": 0.0002846,
"loss": 0.3316,
"step": 1425
},
{
"epoch": 38.54,
"learning_rate": 0.0002848,
"loss": 0.0705,
"step": 1426
},
{
"epoch": 38.57,
"learning_rate": 0.000285,
"loss": 0.1302,
"step": 1427
},
{
"epoch": 38.59,
"learning_rate": 0.0002852,
"loss": 0.1577,
"step": 1428
},
{
"epoch": 38.62,
"learning_rate": 0.0002854,
"loss": 0.2394,
"step": 1429
},
{
"epoch": 38.65,
"learning_rate": 0.00028559999999999995,
"loss": 0.0374,
"step": 1430
},
{
"epoch": 38.68,
"learning_rate": 0.00028579999999999995,
"loss": 0.1781,
"step": 1431
},
{
"epoch": 38.7,
"learning_rate": 0.00028599999999999996,
"loss": 0.2886,
"step": 1432
},
{
"epoch": 38.73,
"learning_rate": 0.00028619999999999996,
"loss": 0.2224,
"step": 1433
},
{
"epoch": 38.76,
"learning_rate": 0.00028639999999999997,
"loss": 0.2565,
"step": 1434
},
{
"epoch": 38.78,
"learning_rate": 0.0002866,
"loss": 0.2415,
"step": 1435
},
{
"epoch": 38.81,
"learning_rate": 0.0002868,
"loss": 0.3166,
"step": 1436
},
{
"epoch": 38.84,
"learning_rate": 0.000287,
"loss": 0.1737,
"step": 1437
},
{
"epoch": 38.86,
"learning_rate": 0.0002872,
"loss": 0.2448,
"step": 1438
},
{
"epoch": 38.89,
"learning_rate": 0.00028739999999999994,
"loss": 0.2633,
"step": 1439
},
{
"epoch": 38.92,
"learning_rate": 0.0002876,
"loss": 0.2462,
"step": 1440
},
{
"epoch": 38.95,
"learning_rate": 0.0002878,
"loss": 0.4224,
"step": 1441
},
{
"epoch": 38.97,
"learning_rate": 0.00028799999999999995,
"loss": 0.029,
"step": 1442
},
{
"epoch": 39.0,
"learning_rate": 0.00028819999999999996,
"loss": 0.0346,
"step": 1443
},
{
"epoch": 39.03,
"learning_rate": 0.00028839999999999996,
"loss": 0.3013,
"step": 1444
},
{
"epoch": 39.05,
"learning_rate": 0.00028859999999999997,
"loss": 0.3282,
"step": 1445
},
{
"epoch": 39.08,
"learning_rate": 0.00028879999999999997,
"loss": 0.3859,
"step": 1446
},
{
"epoch": 39.11,
"learning_rate": 0.000289,
"loss": 0.3956,
"step": 1447
},
{
"epoch": 39.14,
"learning_rate": 0.0002892,
"loss": 0.4009,
"step": 1448
},
{
"epoch": 39.16,
"learning_rate": 0.0002894,
"loss": 0.1441,
"step": 1449
},
{
"epoch": 39.19,
"learning_rate": 0.0002896,
"loss": 0.2938,
"step": 1450
},
{
"epoch": 39.22,
"learning_rate": 0.00028979999999999994,
"loss": 0.0588,
"step": 1451
},
{
"epoch": 39.24,
"learning_rate": 0.00029,
"loss": 0.0703,
"step": 1452
},
{
"epoch": 39.27,
"learning_rate": 0.0002902,
"loss": 0.3778,
"step": 1453
},
{
"epoch": 39.3,
"learning_rate": 0.00029039999999999996,
"loss": 0.218,
"step": 1454
},
{
"epoch": 39.32,
"learning_rate": 0.00029059999999999996,
"loss": 0.2842,
"step": 1455
},
{
"epoch": 39.35,
"learning_rate": 0.00029079999999999997,
"loss": 0.1955,
"step": 1456
},
{
"epoch": 39.38,
"learning_rate": 0.00029099999999999997,
"loss": 0.0463,
"step": 1457
},
{
"epoch": 39.41,
"learning_rate": 0.0002912,
"loss": 0.2826,
"step": 1458
},
{
"epoch": 39.43,
"learning_rate": 0.0002914,
"loss": 0.1625,
"step": 1459
},
{
"epoch": 39.46,
"learning_rate": 0.0002916,
"loss": 0.1916,
"step": 1460
},
{
"epoch": 39.49,
"learning_rate": 0.0002918,
"loss": 0.346,
"step": 1461
},
{
"epoch": 39.51,
"learning_rate": 0.000292,
"loss": 0.3519,
"step": 1462
},
{
"epoch": 39.54,
"learning_rate": 0.00029219999999999995,
"loss": 0.1692,
"step": 1463
},
{
"epoch": 39.57,
"learning_rate": 0.0002924,
"loss": 0.1637,
"step": 1464
},
{
"epoch": 39.59,
"learning_rate": 0.0002926,
"loss": 0.4694,
"step": 1465
},
{
"epoch": 39.62,
"learning_rate": 0.00029279999999999996,
"loss": 0.1554,
"step": 1466
},
{
"epoch": 39.65,
"learning_rate": 0.00029299999999999997,
"loss": 0.0957,
"step": 1467
},
{
"epoch": 39.68,
"learning_rate": 0.00029319999999999997,
"loss": 0.448,
"step": 1468
},
{
"epoch": 39.7,
"learning_rate": 0.0002934,
"loss": 0.2844,
"step": 1469
},
{
"epoch": 39.73,
"learning_rate": 0.0002936,
"loss": 0.3577,
"step": 1470
},
{
"epoch": 39.76,
"learning_rate": 0.00029379999999999993,
"loss": 0.4636,
"step": 1471
},
{
"epoch": 39.78,
"learning_rate": 0.000294,
"loss": 0.4088,
"step": 1472
},
{
"epoch": 39.81,
"learning_rate": 0.0002942,
"loss": 0.3921,
"step": 1473
},
{
"epoch": 39.84,
"learning_rate": 0.00029439999999999995,
"loss": 0.2593,
"step": 1474
},
{
"epoch": 39.86,
"learning_rate": 0.00029459999999999995,
"loss": 0.2874,
"step": 1475
},
{
"epoch": 39.89,
"learning_rate": 0.00029479999999999996,
"loss": 0.0549,
"step": 1476
},
{
"epoch": 39.92,
"learning_rate": 0.00029499999999999996,
"loss": 0.2142,
"step": 1477
},
{
"epoch": 39.95,
"learning_rate": 0.00029519999999999997,
"loss": 0.2124,
"step": 1478
},
{
"epoch": 39.97,
"learning_rate": 0.00029539999999999997,
"loss": 0.1104,
"step": 1479
},
{
"epoch": 40.0,
"learning_rate": 0.0002956,
"loss": 0.1242,
"step": 1480
},
{
"epoch": 40.03,
"learning_rate": 0.0002958,
"loss": 0.1763,
"step": 1481
},
{
"epoch": 40.05,
"learning_rate": 0.000296,
"loss": 0.0897,
"step": 1482
},
{
"epoch": 40.08,
"learning_rate": 0.00029619999999999994,
"loss": 0.1164,
"step": 1483
},
{
"epoch": 40.11,
"learning_rate": 0.0002964,
"loss": 0.178,
"step": 1484
},
{
"epoch": 40.14,
"learning_rate": 0.0002966,
"loss": 0.1987,
"step": 1485
},
{
"epoch": 40.16,
"learning_rate": 0.00029679999999999995,
"loss": 0.1571,
"step": 1486
},
{
"epoch": 40.19,
"learning_rate": 0.00029699999999999996,
"loss": 0.0671,
"step": 1487
},
{
"epoch": 40.22,
"learning_rate": 0.00029719999999999996,
"loss": 0.2539,
"step": 1488
},
{
"epoch": 40.24,
"learning_rate": 0.00029739999999999996,
"loss": 0.2036,
"step": 1489
},
{
"epoch": 40.27,
"learning_rate": 0.00029759999999999997,
"loss": 0.3095,
"step": 1490
},
{
"epoch": 40.3,
"learning_rate": 0.0002978,
"loss": 0.1363,
"step": 1491
},
{
"epoch": 40.32,
"learning_rate": 0.000298,
"loss": 0.0843,
"step": 1492
},
{
"epoch": 40.35,
"learning_rate": 0.0002982,
"loss": 0.0966,
"step": 1493
},
{
"epoch": 40.38,
"learning_rate": 0.0002984,
"loss": 0.1462,
"step": 1494
},
{
"epoch": 40.41,
"learning_rate": 0.00029859999999999994,
"loss": 0.1395,
"step": 1495
},
{
"epoch": 40.43,
"learning_rate": 0.0002988,
"loss": 0.0485,
"step": 1496
},
{
"epoch": 40.46,
"learning_rate": 0.000299,
"loss": 0.2283,
"step": 1497
},
{
"epoch": 40.49,
"learning_rate": 0.00029919999999999995,
"loss": 0.1439,
"step": 1498
},
{
"epoch": 40.51,
"learning_rate": 0.00029939999999999996,
"loss": 0.2257,
"step": 1499
},
{
"epoch": 40.54,
"learning_rate": 0.00029959999999999996,
"loss": 0.2687,
"step": 1500
},
{
"epoch": 40.57,
"learning_rate": 0.00029979999999999997,
"loss": 0.199,
"step": 1501
},
{
"epoch": 40.59,
"learning_rate": 0.0003,
"loss": 0.2109,
"step": 1502
},
{
"epoch": 40.62,
"learning_rate": 0.0002991428571428571,
"loss": 0.2964,
"step": 1503
},
{
"epoch": 40.65,
"learning_rate": 0.00029828571428571426,
"loss": 0.245,
"step": 1504
},
{
"epoch": 40.68,
"learning_rate": 0.0002974285714285714,
"loss": 0.2008,
"step": 1505
},
{
"epoch": 40.7,
"learning_rate": 0.00029657142857142854,
"loss": 0.1585,
"step": 1506
},
{
"epoch": 40.73,
"learning_rate": 0.0002957142857142857,
"loss": 0.1585,
"step": 1507
},
{
"epoch": 40.76,
"learning_rate": 0.0002948571428571428,
"loss": 0.6041,
"step": 1508
},
{
"epoch": 40.78,
"learning_rate": 0.000294,
"loss": 0.5041,
"step": 1509
},
{
"epoch": 40.81,
"learning_rate": 0.0002931428571428571,
"loss": 0.0664,
"step": 1510
},
{
"epoch": 40.84,
"learning_rate": 0.0002922857142857143,
"loss": 0.1329,
"step": 1511
},
{
"epoch": 40.86,
"learning_rate": 0.0002914285714285714,
"loss": 0.3115,
"step": 1512
},
{
"epoch": 40.89,
"learning_rate": 0.00029057142857142856,
"loss": 0.2609,
"step": 1513
},
{
"epoch": 40.92,
"learning_rate": 0.0002897142857142857,
"loss": 0.3696,
"step": 1514
},
{
"epoch": 40.95,
"learning_rate": 0.00028885714285714284,
"loss": 0.3112,
"step": 1515
},
{
"epoch": 40.97,
"learning_rate": 0.00028799999999999995,
"loss": 0.1086,
"step": 1516
},
{
"epoch": 41.0,
"learning_rate": 0.0002871428571428571,
"loss": 0.2325,
"step": 1517
},
{
"epoch": 41.03,
"learning_rate": 0.00028628571428571424,
"loss": 0.2148,
"step": 1518
},
{
"epoch": 41.05,
"learning_rate": 0.0002854285714285714,
"loss": 0.2591,
"step": 1519
},
{
"epoch": 41.08,
"learning_rate": 0.00028457142857142857,
"loss": 0.0628,
"step": 1520
},
{
"epoch": 41.11,
"learning_rate": 0.0002837142857142857,
"loss": 0.1892,
"step": 1521
},
{
"epoch": 41.14,
"learning_rate": 0.0002828571428571428,
"loss": 0.2154,
"step": 1522
},
{
"epoch": 41.16,
"learning_rate": 0.00028199999999999997,
"loss": 0.2709,
"step": 1523
},
{
"epoch": 41.19,
"learning_rate": 0.00028114285714285714,
"loss": 0.3161,
"step": 1524
},
{
"epoch": 41.22,
"learning_rate": 0.00028028571428571425,
"loss": 0.3503,
"step": 1525
},
{
"epoch": 41.24,
"learning_rate": 0.00027942857142857137,
"loss": 0.2608,
"step": 1526
},
{
"epoch": 41.27,
"learning_rate": 0.00027857142857142854,
"loss": 0.5886,
"step": 1527
},
{
"epoch": 41.3,
"learning_rate": 0.0002777142857142857,
"loss": 0.2455,
"step": 1528
},
{
"epoch": 41.32,
"learning_rate": 0.0002768571428571428,
"loss": 0.1259,
"step": 1529
},
{
"epoch": 41.35,
"learning_rate": 0.000276,
"loss": 0.2719,
"step": 1530
},
{
"epoch": 41.38,
"learning_rate": 0.0002751428571428571,
"loss": 0.0755,
"step": 1531
},
{
"epoch": 41.41,
"learning_rate": 0.00027428571428571427,
"loss": 0.2419,
"step": 1532
},
{
"epoch": 41.43,
"learning_rate": 0.00027342857142857144,
"loss": 0.1197,
"step": 1533
},
{
"epoch": 41.46,
"learning_rate": 0.00027257142857142855,
"loss": 0.3742,
"step": 1534
},
{
"epoch": 41.49,
"learning_rate": 0.00027171428571428567,
"loss": 0.3098,
"step": 1535
},
{
"epoch": 41.51,
"learning_rate": 0.00027085714285714283,
"loss": 0.2366,
"step": 1536
},
{
"epoch": 41.54,
"learning_rate": 0.00027,
"loss": 0.1359,
"step": 1537
},
{
"epoch": 41.57,
"learning_rate": 0.0002691428571428571,
"loss": 0.0608,
"step": 1538
},
{
"epoch": 41.59,
"learning_rate": 0.00026828571428571423,
"loss": 0.1513,
"step": 1539
},
{
"epoch": 41.62,
"learning_rate": 0.0002674285714285714,
"loss": 0.0523,
"step": 1540
},
{
"epoch": 41.65,
"learning_rate": 0.00026657142857142857,
"loss": 0.0867,
"step": 1541
},
{
"epoch": 41.68,
"learning_rate": 0.0002657142857142857,
"loss": 0.0567,
"step": 1542
},
{
"epoch": 41.7,
"learning_rate": 0.00026485714285714285,
"loss": 0.2741,
"step": 1543
},
{
"epoch": 41.73,
"learning_rate": 0.00026399999999999997,
"loss": 0.0638,
"step": 1544
},
{
"epoch": 41.76,
"learning_rate": 0.00026314285714285713,
"loss": 0.1622,
"step": 1545
},
{
"epoch": 41.78,
"learning_rate": 0.0002622857142857143,
"loss": 0.2279,
"step": 1546
},
{
"epoch": 41.81,
"learning_rate": 0.0002614285714285714,
"loss": 0.1824,
"step": 1547
},
{
"epoch": 41.84,
"learning_rate": 0.00026057142857142853,
"loss": 0.2644,
"step": 1548
},
{
"epoch": 41.86,
"learning_rate": 0.0002597142857142857,
"loss": 0.1405,
"step": 1549
},
{
"epoch": 41.89,
"learning_rate": 0.0002588571428571428,
"loss": 0.2183,
"step": 1550
},
{
"epoch": 41.92,
"learning_rate": 0.000258,
"loss": 0.1939,
"step": 1551
},
{
"epoch": 41.95,
"learning_rate": 0.0002571428571428571,
"loss": 0.3432,
"step": 1552
},
{
"epoch": 41.97,
"learning_rate": 0.00025628571428571427,
"loss": 0.0464,
"step": 1553
},
{
"epoch": 42.0,
"learning_rate": 0.0002554285714285714,
"loss": 0.2959,
"step": 1554
},
{
"epoch": 42.03,
"learning_rate": 0.00025457142857142855,
"loss": 0.3245,
"step": 1555
},
{
"epoch": 42.05,
"learning_rate": 0.0002537142857142857,
"loss": 0.1813,
"step": 1556
},
{
"epoch": 42.08,
"learning_rate": 0.00025285714285714283,
"loss": 0.1694,
"step": 1557
},
{
"epoch": 42.11,
"learning_rate": 0.00025199999999999995,
"loss": 0.0765,
"step": 1558
},
{
"epoch": 42.14,
"learning_rate": 0.0002511428571428571,
"loss": 0.1116,
"step": 1559
},
{
"epoch": 42.16,
"learning_rate": 0.0002502857142857143,
"loss": 0.0871,
"step": 1560
},
{
"epoch": 42.19,
"learning_rate": 0.0002494285714285714,
"loss": 0.1192,
"step": 1561
},
{
"epoch": 42.22,
"learning_rate": 0.00024857142857142857,
"loss": 0.0862,
"step": 1562
},
{
"epoch": 42.24,
"learning_rate": 0.0002477142857142857,
"loss": 0.2583,
"step": 1563
},
{
"epoch": 42.27,
"learning_rate": 0.00024685714285714285,
"loss": 0.1217,
"step": 1564
},
{
"epoch": 42.3,
"learning_rate": 0.00024599999999999996,
"loss": 0.1456,
"step": 1565
},
{
"epoch": 42.32,
"learning_rate": 0.00024514285714285713,
"loss": 0.235,
"step": 1566
},
{
"epoch": 42.35,
"learning_rate": 0.00024428571428571424,
"loss": 0.2004,
"step": 1567
},
{
"epoch": 42.38,
"learning_rate": 0.00024342857142857139,
"loss": 0.333,
"step": 1568
},
{
"epoch": 42.41,
"learning_rate": 0.00024257142857142855,
"loss": 0.3184,
"step": 1569
},
{
"epoch": 42.43,
"learning_rate": 0.0002417142857142857,
"loss": 0.0285,
"step": 1570
},
{
"epoch": 42.46,
"learning_rate": 0.00024085714285714284,
"loss": 0.2713,
"step": 1571
},
{
"epoch": 42.49,
"learning_rate": 0.00023999999999999998,
"loss": 0.1802,
"step": 1572
},
{
"epoch": 42.51,
"learning_rate": 0.00023914285714285712,
"loss": 0.1753,
"step": 1573
},
{
"epoch": 42.54,
"learning_rate": 0.00023828571428571426,
"loss": 0.3024,
"step": 1574
},
{
"epoch": 42.57,
"learning_rate": 0.00023742857142857143,
"loss": 0.0876,
"step": 1575
},
{
"epoch": 42.59,
"learning_rate": 0.00023657142857142854,
"loss": 0.1785,
"step": 1576
},
{
"epoch": 42.62,
"learning_rate": 0.00023571428571428569,
"loss": 0.0308,
"step": 1577
},
{
"epoch": 42.65,
"learning_rate": 0.00023485714285714283,
"loss": 0.191,
"step": 1578
},
{
"epoch": 42.68,
"learning_rate": 0.000234,
"loss": 0.2837,
"step": 1579
},
{
"epoch": 42.7,
"learning_rate": 0.0002331428571428571,
"loss": 0.0639,
"step": 1580
},
{
"epoch": 42.73,
"learning_rate": 0.00023228571428571425,
"loss": 0.0454,
"step": 1581
},
{
"epoch": 42.76,
"learning_rate": 0.00023142857142857142,
"loss": 0.2392,
"step": 1582
},
{
"epoch": 42.78,
"learning_rate": 0.00023057142857142856,
"loss": 0.1495,
"step": 1583
},
{
"epoch": 42.81,
"learning_rate": 0.00022971428571428568,
"loss": 0.3006,
"step": 1584
},
{
"epoch": 42.84,
"learning_rate": 0.00022885714285714284,
"loss": 0.1542,
"step": 1585
},
{
"epoch": 42.86,
"learning_rate": 0.00022799999999999999,
"loss": 0.352,
"step": 1586
},
{
"epoch": 42.89,
"learning_rate": 0.00022714285714285713,
"loss": 0.1551,
"step": 1587
},
{
"epoch": 42.92,
"learning_rate": 0.00022628571428571427,
"loss": 0.1549,
"step": 1588
},
{
"epoch": 42.95,
"learning_rate": 0.0002254285714285714,
"loss": 0.1468,
"step": 1589
},
{
"epoch": 42.97,
"learning_rate": 0.00022457142857142855,
"loss": 0.023,
"step": 1590
},
{
"epoch": 43.0,
"learning_rate": 0.0002237142857142857,
"loss": 0.0546,
"step": 1591
},
{
"epoch": 43.03,
"learning_rate": 0.00022285714285714283,
"loss": 0.1419,
"step": 1592
},
{
"epoch": 43.05,
"learning_rate": 0.00022199999999999998,
"loss": 0.0632,
"step": 1593
},
{
"epoch": 43.08,
"learning_rate": 0.00022114285714285712,
"loss": 0.1506,
"step": 1594
},
{
"epoch": 43.11,
"learning_rate": 0.00022028571428571429,
"loss": 0.1272,
"step": 1595
},
{
"epoch": 43.14,
"learning_rate": 0.0002194285714285714,
"loss": 0.0839,
"step": 1596
},
{
"epoch": 43.16,
"learning_rate": 0.00021857142857142854,
"loss": 0.1086,
"step": 1597
},
{
"epoch": 43.19,
"learning_rate": 0.0002177142857142857,
"loss": 0.1817,
"step": 1598
},
{
"epoch": 43.22,
"learning_rate": 0.00021685714285714285,
"loss": 0.1787,
"step": 1599
},
{
"epoch": 43.24,
"learning_rate": 0.00021599999999999996,
"loss": 0.2298,
"step": 1600
},
{
"epoch": 43.24,
"eval_accuracy": 0.8743136058572301,
"eval_f1": 0.8732261368709694,
"eval_loss": 0.5200848579406738,
"eval_runtime": 124.4236,
"eval_samples_per_second": 13.173,
"eval_steps_per_second": 0.828,
"step": 1600
},
{
"epoch": 43.27,
"learning_rate": 0.00021514285714285713,
"loss": 0.1427,
"step": 1601
},
{
"epoch": 43.3,
"learning_rate": 0.00021428571428571427,
"loss": 0.0483,
"step": 1602
},
{
"epoch": 43.32,
"learning_rate": 0.00021342857142857142,
"loss": 0.1524,
"step": 1603
},
{
"epoch": 43.35,
"learning_rate": 0.00021257142857142853,
"loss": 0.2113,
"step": 1604
},
{
"epoch": 43.38,
"learning_rate": 0.0002117142857142857,
"loss": 0.2367,
"step": 1605
},
{
"epoch": 43.41,
"learning_rate": 0.00021085714285714284,
"loss": 0.2941,
"step": 1606
},
{
"epoch": 43.43,
"learning_rate": 0.00020999999999999998,
"loss": 0.0409,
"step": 1607
},
{
"epoch": 43.46,
"learning_rate": 0.00020914285714285712,
"loss": 0.1859,
"step": 1608
},
{
"epoch": 43.49,
"learning_rate": 0.00020828571428571426,
"loss": 0.0342,
"step": 1609
},
{
"epoch": 43.51,
"learning_rate": 0.0002074285714285714,
"loss": 0.0355,
"step": 1610
},
{
"epoch": 43.54,
"learning_rate": 0.00020657142857142857,
"loss": 0.0844,
"step": 1611
},
{
"epoch": 43.57,
"learning_rate": 0.0002057142857142857,
"loss": 0.0246,
"step": 1612
},
{
"epoch": 43.59,
"learning_rate": 0.00020485714285714283,
"loss": 0.1246,
"step": 1613
},
{
"epoch": 43.62,
"learning_rate": 0.000204,
"loss": 0.0165,
"step": 1614
},
{
"epoch": 43.65,
"learning_rate": 0.00020314285714285714,
"loss": 0.1248,
"step": 1615
},
{
"epoch": 43.68,
"learning_rate": 0.00020228571428571425,
"loss": 0.2327,
"step": 1616
},
{
"epoch": 43.7,
"learning_rate": 0.0002014285714285714,
"loss": 0.0406,
"step": 1617
},
{
"epoch": 43.73,
"learning_rate": 0.00020057142857142856,
"loss": 0.0428,
"step": 1618
},
{
"epoch": 43.76,
"learning_rate": 0.0001997142857142857,
"loss": 0.2236,
"step": 1619
},
{
"epoch": 43.78,
"learning_rate": 0.00019885714285714282,
"loss": 0.0845,
"step": 1620
},
{
"epoch": 43.81,
"learning_rate": 0.000198,
"loss": 0.2605,
"step": 1621
},
{
"epoch": 43.84,
"learning_rate": 0.00019714285714285713,
"loss": 0.2268,
"step": 1622
},
{
"epoch": 43.86,
"learning_rate": 0.00019628571428571427,
"loss": 0.1439,
"step": 1623
},
{
"epoch": 43.89,
"learning_rate": 0.0001954285714285714,
"loss": 0.1332,
"step": 1624
},
{
"epoch": 43.92,
"learning_rate": 0.00019457142857142855,
"loss": 0.2402,
"step": 1625
},
{
"epoch": 43.95,
"learning_rate": 0.0001937142857142857,
"loss": 0.0895,
"step": 1626
},
{
"epoch": 43.97,
"learning_rate": 0.00019285714285714286,
"loss": 0.0458,
"step": 1627
},
{
"epoch": 44.0,
"learning_rate": 0.00019199999999999998,
"loss": 0.1726,
"step": 1628
},
{
"epoch": 44.03,
"learning_rate": 0.00019114285714285712,
"loss": 0.0508,
"step": 1629
},
{
"epoch": 44.05,
"learning_rate": 0.00019028571428571426,
"loss": 0.22,
"step": 1630
},
{
"epoch": 44.08,
"learning_rate": 0.00018942857142857143,
"loss": 0.0987,
"step": 1631
},
{
"epoch": 44.11,
"learning_rate": 0.00018857142857142854,
"loss": 0.0986,
"step": 1632
},
{
"epoch": 44.14,
"learning_rate": 0.00018771428571428568,
"loss": 0.16,
"step": 1633
},
{
"epoch": 44.16,
"learning_rate": 0.00018685714285714285,
"loss": 0.2314,
"step": 1634
},
{
"epoch": 44.19,
"learning_rate": 0.000186,
"loss": 0.1361,
"step": 1635
},
{
"epoch": 44.22,
"learning_rate": 0.0001851428571428571,
"loss": 0.0599,
"step": 1636
},
{
"epoch": 44.24,
"learning_rate": 0.00018428571428571428,
"loss": 0.251,
"step": 1637
},
{
"epoch": 44.27,
"learning_rate": 0.00018342857142857142,
"loss": 0.1844,
"step": 1638
},
{
"epoch": 44.3,
"learning_rate": 0.00018257142857142853,
"loss": 0.0761,
"step": 1639
},
{
"epoch": 44.32,
"learning_rate": 0.0001817142857142857,
"loss": 0.1035,
"step": 1640
},
{
"epoch": 44.35,
"learning_rate": 0.00018085714285714284,
"loss": 0.1906,
"step": 1641
},
{
"epoch": 44.38,
"learning_rate": 0.00017999999999999998,
"loss": 0.1309,
"step": 1642
},
{
"epoch": 44.41,
"learning_rate": 0.00017914285714285715,
"loss": 0.0374,
"step": 1643
},
{
"epoch": 44.43,
"learning_rate": 0.00017828571428571427,
"loss": 0.0079,
"step": 1644
},
{
"epoch": 44.46,
"learning_rate": 0.0001774285714285714,
"loss": 0.032,
"step": 1645
},
{
"epoch": 44.49,
"learning_rate": 0.00017657142857142855,
"loss": 0.0879,
"step": 1646
},
{
"epoch": 44.51,
"learning_rate": 0.00017571428571428572,
"loss": 0.0346,
"step": 1647
},
{
"epoch": 44.54,
"learning_rate": 0.00017485714285714283,
"loss": 0.042,
"step": 1648
},
{
"epoch": 44.57,
"learning_rate": 0.00017399999999999997,
"loss": 0.1421,
"step": 1649
},
{
"epoch": 44.59,
"learning_rate": 0.00017314285714285714,
"loss": 0.1587,
"step": 1650
},
{
"epoch": 44.62,
"learning_rate": 0.00017228571428571428,
"loss": 0.0408,
"step": 1651
},
{
"epoch": 44.65,
"learning_rate": 0.0001714285714285714,
"loss": 0.1064,
"step": 1652
},
{
"epoch": 44.68,
"learning_rate": 0.00017057142857142857,
"loss": 0.231,
"step": 1653
},
{
"epoch": 44.7,
"learning_rate": 0.0001697142857142857,
"loss": 0.323,
"step": 1654
},
{
"epoch": 44.73,
"learning_rate": 0.00016885714285714282,
"loss": 0.2473,
"step": 1655
},
{
"epoch": 44.76,
"learning_rate": 0.000168,
"loss": 0.3009,
"step": 1656
},
{
"epoch": 44.78,
"learning_rate": 0.00016714285714285713,
"loss": 0.0899,
"step": 1657
},
{
"epoch": 44.81,
"learning_rate": 0.00016628571428571427,
"loss": 0.3443,
"step": 1658
},
{
"epoch": 44.84,
"learning_rate": 0.0001654285714285714,
"loss": 0.0393,
"step": 1659
},
{
"epoch": 44.86,
"learning_rate": 0.00016457142857142856,
"loss": 0.0226,
"step": 1660
},
{
"epoch": 44.89,
"learning_rate": 0.0001637142857142857,
"loss": 0.0651,
"step": 1661
},
{
"epoch": 44.92,
"learning_rate": 0.00016285714285714284,
"loss": 0.0491,
"step": 1662
},
{
"epoch": 44.95,
"learning_rate": 0.000162,
"loss": 0.2542,
"step": 1663
},
{
"epoch": 44.97,
"learning_rate": 0.00016114285714285712,
"loss": 0.3146,
"step": 1664
},
{
"epoch": 45.0,
"learning_rate": 0.00016028571428571426,
"loss": 0.2383,
"step": 1665
},
{
"epoch": 45.03,
"learning_rate": 0.00015942857142857143,
"loss": 0.1795,
"step": 1666
},
{
"epoch": 45.05,
"learning_rate": 0.00015857142857142857,
"loss": 0.0426,
"step": 1667
},
{
"epoch": 45.08,
"learning_rate": 0.0001577142857142857,
"loss": 0.0131,
"step": 1668
},
{
"epoch": 45.11,
"learning_rate": 0.00015685714285714286,
"loss": 0.1698,
"step": 1669
},
{
"epoch": 45.14,
"learning_rate": 0.000156,
"loss": 0.3096,
"step": 1670
},
{
"epoch": 45.16,
"learning_rate": 0.0001551428571428571,
"loss": 0.0188,
"step": 1671
},
{
"epoch": 45.19,
"learning_rate": 0.00015428571428571425,
"loss": 0.0455,
"step": 1672
},
{
"epoch": 45.22,
"learning_rate": 0.00015342857142857142,
"loss": 0.0791,
"step": 1673
},
{
"epoch": 45.24,
"learning_rate": 0.00015257142857142856,
"loss": 0.0248,
"step": 1674
},
{
"epoch": 45.27,
"learning_rate": 0.00015171428571428568,
"loss": 0.0797,
"step": 1675
},
{
"epoch": 45.3,
"learning_rate": 0.00015085714285714285,
"loss": 0.045,
"step": 1676
},
{
"epoch": 45.32,
"learning_rate": 0.00015,
"loss": 0.0271,
"step": 1677
},
{
"epoch": 45.35,
"learning_rate": 0.00014914285714285713,
"loss": 0.0223,
"step": 1678
},
{
"epoch": 45.38,
"learning_rate": 0.00014828571428571427,
"loss": 0.0213,
"step": 1679
},
{
"epoch": 45.41,
"learning_rate": 0.0001474285714285714,
"loss": 0.0204,
"step": 1680
},
{
"epoch": 45.43,
"learning_rate": 0.00014657142857142855,
"loss": 0.0633,
"step": 1681
},
{
"epoch": 45.46,
"learning_rate": 0.0001457142857142857,
"loss": 0.0525,
"step": 1682
},
{
"epoch": 45.49,
"learning_rate": 0.00014485714285714286,
"loss": 0.1086,
"step": 1683
},
{
"epoch": 45.51,
"learning_rate": 0.00014399999999999998,
"loss": 0.1499,
"step": 1684
},
{
"epoch": 45.54,
"learning_rate": 0.00014314285714285712,
"loss": 0.0787,
"step": 1685
},
{
"epoch": 45.57,
"learning_rate": 0.00014228571428571429,
"loss": 0.0551,
"step": 1686
},
{
"epoch": 45.59,
"learning_rate": 0.0001414285714285714,
"loss": 0.1821,
"step": 1687
},
{
"epoch": 45.62,
"learning_rate": 0.00014057142857142857,
"loss": 0.0611,
"step": 1688
},
{
"epoch": 45.65,
"learning_rate": 0.00013971428571428568,
"loss": 0.0096,
"step": 1689
},
{
"epoch": 45.68,
"learning_rate": 0.00013885714285714285,
"loss": 0.0624,
"step": 1690
},
{
"epoch": 45.7,
"learning_rate": 0.000138,
"loss": 0.1372,
"step": 1691
},
{
"epoch": 45.73,
"learning_rate": 0.00013714285714285713,
"loss": 0.1189,
"step": 1692
},
{
"epoch": 45.76,
"learning_rate": 0.00013628571428571428,
"loss": 0.0148,
"step": 1693
},
{
"epoch": 45.78,
"learning_rate": 0.00013542857142857142,
"loss": 0.0464,
"step": 1694
},
{
"epoch": 45.81,
"learning_rate": 0.00013457142857142856,
"loss": 0.0666,
"step": 1695
},
{
"epoch": 45.84,
"learning_rate": 0.0001337142857142857,
"loss": 0.1203,
"step": 1696
},
{
"epoch": 45.86,
"learning_rate": 0.00013285714285714284,
"loss": 0.0538,
"step": 1697
},
{
"epoch": 45.89,
"learning_rate": 0.00013199999999999998,
"loss": 0.1381,
"step": 1698
},
{
"epoch": 45.92,
"learning_rate": 0.00013114285714285715,
"loss": 0.1174,
"step": 1699
},
{
"epoch": 45.95,
"learning_rate": 0.00013028571428571427,
"loss": 0.1471,
"step": 1700
},
{
"epoch": 45.97,
"learning_rate": 0.0001294285714285714,
"loss": 0.101,
"step": 1701
},
{
"epoch": 46.0,
"learning_rate": 0.00012857142857142855,
"loss": 0.0095,
"step": 1702
},
{
"epoch": 46.03,
"learning_rate": 0.0001277142857142857,
"loss": 0.0295,
"step": 1703
},
{
"epoch": 46.05,
"learning_rate": 0.00012685714285714286,
"loss": 0.0189,
"step": 1704
},
{
"epoch": 46.08,
"learning_rate": 0.00012599999999999997,
"loss": 0.2608,
"step": 1705
},
{
"epoch": 46.11,
"learning_rate": 0.00012514285714285714,
"loss": 0.1727,
"step": 1706
},
{
"epoch": 46.14,
"learning_rate": 0.00012428571428571428,
"loss": 0.1127,
"step": 1707
},
{
"epoch": 46.16,
"learning_rate": 0.00012342857142857142,
"loss": 0.2989,
"step": 1708
},
{
"epoch": 46.19,
"learning_rate": 0.00012257142857142857,
"loss": 0.0245,
"step": 1709
},
{
"epoch": 46.22,
"learning_rate": 0.00012171428571428569,
"loss": 0.1563,
"step": 1710
},
{
"epoch": 46.24,
"learning_rate": 0.00012085714285714285,
"loss": 0.0039,
"step": 1711
},
{
"epoch": 46.27,
"learning_rate": 0.00011999999999999999,
"loss": 0.174,
"step": 1712
},
{
"epoch": 46.3,
"learning_rate": 0.00011914285714285713,
"loss": 0.0356,
"step": 1713
},
{
"epoch": 46.32,
"learning_rate": 0.00011828571428571427,
"loss": 0.0504,
"step": 1714
},
{
"epoch": 46.35,
"learning_rate": 0.00011742857142857141,
"loss": 0.0071,
"step": 1715
},
{
"epoch": 46.38,
"learning_rate": 0.00011657142857142856,
"loss": 0.0366,
"step": 1716
},
{
"epoch": 46.41,
"learning_rate": 0.00011571428571428571,
"loss": 0.0054,
"step": 1717
},
{
"epoch": 46.43,
"learning_rate": 0.00011485714285714284,
"loss": 0.0078,
"step": 1718
},
{
"epoch": 46.46,
"learning_rate": 0.00011399999999999999,
"loss": 0.0645,
"step": 1719
},
{
"epoch": 46.49,
"learning_rate": 0.00011314285714285713,
"loss": 0.131,
"step": 1720
},
{
"epoch": 46.51,
"learning_rate": 0.00011228571428571428,
"loss": 0.0095,
"step": 1721
},
{
"epoch": 46.54,
"learning_rate": 0.00011142857142857142,
"loss": 0.0446,
"step": 1722
},
{
"epoch": 46.57,
"learning_rate": 0.00011057142857142856,
"loss": 0.0276,
"step": 1723
},
{
"epoch": 46.59,
"learning_rate": 0.0001097142857142857,
"loss": 0.0741,
"step": 1724
},
{
"epoch": 46.62,
"learning_rate": 0.00010885714285714285,
"loss": 0.1191,
"step": 1725
},
{
"epoch": 46.65,
"learning_rate": 0.00010799999999999998,
"loss": 0.1225,
"step": 1726
},
{
"epoch": 46.68,
"learning_rate": 0.00010714285714285714,
"loss": 0.0115,
"step": 1727
},
{
"epoch": 46.7,
"learning_rate": 0.00010628571428571427,
"loss": 0.1836,
"step": 1728
},
{
"epoch": 46.73,
"learning_rate": 0.00010542857142857142,
"loss": 0.0141,
"step": 1729
},
{
"epoch": 46.76,
"learning_rate": 0.00010457142857142856,
"loss": 0.0497,
"step": 1730
},
{
"epoch": 46.78,
"learning_rate": 0.0001037142857142857,
"loss": 0.0104,
"step": 1731
},
{
"epoch": 46.81,
"learning_rate": 0.00010285714285714284,
"loss": 0.0094,
"step": 1732
},
{
"epoch": 46.84,
"learning_rate": 0.000102,
"loss": 0.0735,
"step": 1733
},
{
"epoch": 46.86,
"learning_rate": 0.00010114285714285713,
"loss": 0.0048,
"step": 1734
},
{
"epoch": 46.89,
"learning_rate": 0.00010028571428571428,
"loss": 0.125,
"step": 1735
},
{
"epoch": 46.92,
"learning_rate": 9.942857142857141e-05,
"loss": 0.0046,
"step": 1736
},
{
"epoch": 46.95,
"learning_rate": 9.857142857142856e-05,
"loss": 0.0275,
"step": 1737
},
{
"epoch": 46.97,
"learning_rate": 9.77142857142857e-05,
"loss": 0.003,
"step": 1738
},
{
"epoch": 47.0,
"learning_rate": 9.685714285714285e-05,
"loss": 0.1494,
"step": 1739
},
{
"epoch": 47.03,
"learning_rate": 9.599999999999999e-05,
"loss": 0.0106,
"step": 1740
},
{
"epoch": 47.05,
"learning_rate": 9.514285714285713e-05,
"loss": 0.1743,
"step": 1741
},
{
"epoch": 47.08,
"learning_rate": 9.428571428571427e-05,
"loss": 0.0366,
"step": 1742
},
{
"epoch": 47.11,
"learning_rate": 9.342857142857143e-05,
"loss": 0.0393,
"step": 1743
},
{
"epoch": 47.14,
"learning_rate": 9.257142857142855e-05,
"loss": 0.0031,
"step": 1744
},
{
"epoch": 47.16,
"learning_rate": 9.171428571428571e-05,
"loss": 0.0132,
"step": 1745
},
{
"epoch": 47.19,
"learning_rate": 9.085714285714285e-05,
"loss": 0.086,
"step": 1746
},
{
"epoch": 47.22,
"learning_rate": 8.999999999999999e-05,
"loss": 0.0524,
"step": 1747
},
{
"epoch": 47.24,
"learning_rate": 8.914285714285713e-05,
"loss": 0.1834,
"step": 1748
},
{
"epoch": 47.27,
"learning_rate": 8.828571428571427e-05,
"loss": 0.0187,
"step": 1749
},
{
"epoch": 47.3,
"learning_rate": 8.742857142857142e-05,
"loss": 0.0042,
"step": 1750
},
{
"epoch": 47.32,
"learning_rate": 8.657142857142857e-05,
"loss": 0.1823,
"step": 1751
},
{
"epoch": 47.35,
"learning_rate": 8.57142857142857e-05,
"loss": 0.1087,
"step": 1752
},
{
"epoch": 47.38,
"learning_rate": 8.485714285714285e-05,
"loss": 0.0886,
"step": 1753
},
{
"epoch": 47.41,
"learning_rate": 8.4e-05,
"loss": 0.0873,
"step": 1754
},
{
"epoch": 47.43,
"learning_rate": 8.314285714285714e-05,
"loss": 0.0051,
"step": 1755
},
{
"epoch": 47.46,
"learning_rate": 8.228571428571428e-05,
"loss": 0.0243,
"step": 1756
},
{
"epoch": 47.49,
"learning_rate": 8.142857142857142e-05,
"loss": 0.1264,
"step": 1757
},
{
"epoch": 47.51,
"learning_rate": 8.057142857142856e-05,
"loss": 0.1129,
"step": 1758
},
{
"epoch": 47.54,
"learning_rate": 7.971428571428572e-05,
"loss": 0.0771,
"step": 1759
},
{
"epoch": 47.57,
"learning_rate": 7.885714285714284e-05,
"loss": 0.102,
"step": 1760
},
{
"epoch": 47.59,
"learning_rate": 7.8e-05,
"loss": 0.0106,
"step": 1761
},
{
"epoch": 47.62,
"learning_rate": 7.714285714285713e-05,
"loss": 0.1018,
"step": 1762
},
{
"epoch": 47.65,
"learning_rate": 7.628571428571428e-05,
"loss": 0.0148,
"step": 1763
},
{
"epoch": 47.68,
"learning_rate": 7.542857142857142e-05,
"loss": 0.1169,
"step": 1764
},
{
"epoch": 47.7,
"learning_rate": 7.457142857142856e-05,
"loss": 0.0946,
"step": 1765
},
{
"epoch": 47.73,
"learning_rate": 7.37142857142857e-05,
"loss": 0.0465,
"step": 1766
},
{
"epoch": 47.76,
"learning_rate": 7.285714285714285e-05,
"loss": 0.0227,
"step": 1767
},
{
"epoch": 47.78,
"learning_rate": 7.199999999999999e-05,
"loss": 0.0085,
"step": 1768
},
{
"epoch": 47.81,
"learning_rate": 7.114285714285714e-05,
"loss": 0.0181,
"step": 1769
},
{
"epoch": 47.84,
"learning_rate": 7.028571428571428e-05,
"loss": 0.0199,
"step": 1770
},
{
"epoch": 47.86,
"learning_rate": 6.942857142857143e-05,
"loss": 0.0361,
"step": 1771
},
{
"epoch": 47.89,
"learning_rate": 6.857142857142857e-05,
"loss": 0.0867,
"step": 1772
},
{
"epoch": 47.92,
"learning_rate": 6.771428571428571e-05,
"loss": 0.1125,
"step": 1773
},
{
"epoch": 47.95,
"learning_rate": 6.685714285714285e-05,
"loss": 0.1302,
"step": 1774
},
{
"epoch": 47.97,
"learning_rate": 6.599999999999999e-05,
"loss": 0.0043,
"step": 1775
},
{
"epoch": 48.0,
"learning_rate": 6.514285714285713e-05,
"loss": 0.0605,
"step": 1776
},
{
"epoch": 48.03,
"learning_rate": 6.428571428571427e-05,
"loss": 0.0149,
"step": 1777
},
{
"epoch": 48.05,
"learning_rate": 6.342857142857143e-05,
"loss": 0.0035,
"step": 1778
},
{
"epoch": 48.08,
"learning_rate": 6.257142857142857e-05,
"loss": 0.0064,
"step": 1779
},
{
"epoch": 48.11,
"learning_rate": 6.171428571428571e-05,
"loss": 0.1075,
"step": 1780
},
{
"epoch": 48.14,
"learning_rate": 6.0857142857142847e-05,
"loss": 0.0248,
"step": 1781
},
{
"epoch": 48.16,
"learning_rate": 5.9999999999999995e-05,
"loss": 0.0484,
"step": 1782
},
{
"epoch": 48.19,
"learning_rate": 5.9142857142857136e-05,
"loss": 0.0467,
"step": 1783
},
{
"epoch": 48.22,
"learning_rate": 5.828571428571428e-05,
"loss": 0.0056,
"step": 1784
},
{
"epoch": 48.24,
"learning_rate": 5.742857142857142e-05,
"loss": 0.0327,
"step": 1785
},
{
"epoch": 48.27,
"learning_rate": 5.657142857142857e-05,
"loss": 0.0338,
"step": 1786
},
{
"epoch": 48.3,
"learning_rate": 5.571428571428571e-05,
"loss": 0.1408,
"step": 1787
},
{
"epoch": 48.32,
"learning_rate": 5.485714285714285e-05,
"loss": 0.0056,
"step": 1788
},
{
"epoch": 48.35,
"learning_rate": 5.399999999999999e-05,
"loss": 0.0051,
"step": 1789
},
{
"epoch": 48.38,
"learning_rate": 5.314285714285713e-05,
"loss": 0.0137,
"step": 1790
},
{
"epoch": 48.41,
"learning_rate": 5.228571428571428e-05,
"loss": 0.0274,
"step": 1791
},
{
"epoch": 48.43,
"learning_rate": 5.142857142857142e-05,
"loss": 0.0102,
"step": 1792
},
{
"epoch": 48.46,
"learning_rate": 5.0571428571428564e-05,
"loss": 0.007,
"step": 1793
},
{
"epoch": 48.49,
"learning_rate": 4.9714285714285705e-05,
"loss": 0.0201,
"step": 1794
},
{
"epoch": 48.51,
"learning_rate": 4.885714285714285e-05,
"loss": 0.0178,
"step": 1795
},
{
"epoch": 48.54,
"learning_rate": 4.7999999999999994e-05,
"loss": 0.0054,
"step": 1796
},
{
"epoch": 48.57,
"learning_rate": 4.7142857142857136e-05,
"loss": 0.0046,
"step": 1797
},
{
"epoch": 48.59,
"learning_rate": 4.628571428571428e-05,
"loss": 0.0045,
"step": 1798
},
{
"epoch": 48.62,
"learning_rate": 4.5428571428571425e-05,
"loss": 0.0143,
"step": 1799
},
{
"epoch": 48.65,
"learning_rate": 4.457142857142857e-05,
"loss": 0.0052,
"step": 1800
},
{
"epoch": 48.65,
"eval_accuracy": 0.9103111653447223,
"eval_f1": 0.9105892955087709,
"eval_loss": 0.38255956768989563,
"eval_runtime": 136.264,
"eval_samples_per_second": 12.028,
"eval_steps_per_second": 0.756,
"step": 1800
},
{
"epoch": 48.68,
"learning_rate": 4.371428571428571e-05,
"loss": 0.0028,
"step": 1801
},
{
"epoch": 48.7,
"learning_rate": 4.285714285714285e-05,
"loss": 0.1272,
"step": 1802
},
{
"epoch": 48.73,
"learning_rate": 4.2e-05,
"loss": 0.0462,
"step": 1803
},
{
"epoch": 48.76,
"learning_rate": 4.114285714285714e-05,
"loss": 0.0035,
"step": 1804
},
{
"epoch": 48.78,
"learning_rate": 4.028571428571428e-05,
"loss": 0.0102,
"step": 1805
},
{
"epoch": 48.81,
"learning_rate": 3.942857142857142e-05,
"loss": 0.1071,
"step": 1806
},
{
"epoch": 48.84,
"learning_rate": 3.857142857142856e-05,
"loss": 0.0295,
"step": 1807
},
{
"epoch": 48.86,
"learning_rate": 3.771428571428571e-05,
"loss": 0.0526,
"step": 1808
},
{
"epoch": 48.89,
"learning_rate": 3.685714285714285e-05,
"loss": 0.0653,
"step": 1809
},
{
"epoch": 48.92,
"learning_rate": 3.5999999999999994e-05,
"loss": 0.0085,
"step": 1810
},
{
"epoch": 48.95,
"learning_rate": 3.514285714285714e-05,
"loss": 0.0471,
"step": 1811
},
{
"epoch": 48.97,
"learning_rate": 3.4285714285714284e-05,
"loss": 0.0031,
"step": 1812
},
{
"epoch": 49.0,
"learning_rate": 3.3428571428571425e-05,
"loss": 0.0024,
"step": 1813
},
{
"epoch": 49.03,
"learning_rate": 3.2571428571428566e-05,
"loss": 0.022,
"step": 1814
},
{
"epoch": 49.05,
"learning_rate": 3.1714285714285715e-05,
"loss": 0.004,
"step": 1815
},
{
"epoch": 49.08,
"learning_rate": 3.0857142857142856e-05,
"loss": 0.0029,
"step": 1816
},
{
"epoch": 49.11,
"learning_rate": 2.9999999999999997e-05,
"loss": 0.0032,
"step": 1817
},
{
"epoch": 49.14,
"learning_rate": 2.914285714285714e-05,
"loss": 0.0056,
"step": 1818
},
{
"epoch": 49.16,
"learning_rate": 2.8285714285714284e-05,
"loss": 0.0471,
"step": 1819
},
{
"epoch": 49.19,
"learning_rate": 2.7428571428571425e-05,
"loss": 0.0079,
"step": 1820
},
{
"epoch": 49.22,
"learning_rate": 2.6571428571428566e-05,
"loss": 0.0033,
"step": 1821
},
{
"epoch": 49.24,
"learning_rate": 2.571428571428571e-05,
"loss": 0.054,
"step": 1822
},
{
"epoch": 49.27,
"learning_rate": 2.4857142857142852e-05,
"loss": 0.0292,
"step": 1823
},
{
"epoch": 49.3,
"learning_rate": 2.3999999999999997e-05,
"loss": 0.0035,
"step": 1824
},
{
"epoch": 49.32,
"learning_rate": 2.314285714285714e-05,
"loss": 0.0041,
"step": 1825
},
{
"epoch": 49.35,
"learning_rate": 2.2285714285714283e-05,
"loss": 0.0117,
"step": 1826
},
{
"epoch": 49.38,
"learning_rate": 2.1428571428571425e-05,
"loss": 0.007,
"step": 1827
},
{
"epoch": 49.41,
"learning_rate": 2.057142857142857e-05,
"loss": 0.0039,
"step": 1828
},
{
"epoch": 49.43,
"learning_rate": 1.971428571428571e-05,
"loss": 0.0267,
"step": 1829
},
{
"epoch": 49.46,
"learning_rate": 1.8857142857142856e-05,
"loss": 0.0183,
"step": 1830
},
{
"epoch": 49.49,
"learning_rate": 1.7999999999999997e-05,
"loss": 0.0065,
"step": 1831
},
{
"epoch": 49.51,
"learning_rate": 1.7142857142857142e-05,
"loss": 0.0529,
"step": 1832
},
{
"epoch": 49.54,
"learning_rate": 1.6285714285714283e-05,
"loss": 0.1062,
"step": 1833
},
{
"epoch": 49.57,
"learning_rate": 1.5428571428571428e-05,
"loss": 0.0407,
"step": 1834
},
{
"epoch": 49.59,
"learning_rate": 1.457142857142857e-05,
"loss": 0.0029,
"step": 1835
},
{
"epoch": 49.62,
"learning_rate": 1.3714285714285712e-05,
"loss": 0.0715,
"step": 1836
},
{
"epoch": 49.65,
"learning_rate": 1.2857142857142856e-05,
"loss": 0.0042,
"step": 1837
},
{
"epoch": 49.68,
"learning_rate": 1.1999999999999999e-05,
"loss": 0.0135,
"step": 1838
},
{
"epoch": 49.7,
"learning_rate": 1.1142857142857142e-05,
"loss": 0.0037,
"step": 1839
},
{
"epoch": 49.73,
"learning_rate": 1.0285714285714285e-05,
"loss": 0.003,
"step": 1840
},
{
"epoch": 49.76,
"learning_rate": 9.428571428571428e-06,
"loss": 0.0142,
"step": 1841
},
{
"epoch": 49.78,
"learning_rate": 8.571428571428571e-06,
"loss": 0.016,
"step": 1842
},
{
"epoch": 49.81,
"learning_rate": 7.714285714285714e-06,
"loss": 0.0875,
"step": 1843
},
{
"epoch": 49.84,
"learning_rate": 6.857142857142856e-06,
"loss": 0.0031,
"step": 1844
},
{
"epoch": 49.86,
"learning_rate": 5.999999999999999e-06,
"loss": 0.0162,
"step": 1845
},
{
"epoch": 49.89,
"learning_rate": 5.142857142857142e-06,
"loss": 0.0024,
"step": 1846
},
{
"epoch": 49.92,
"learning_rate": 4.2857142857142855e-06,
"loss": 0.0911,
"step": 1847
},
{
"epoch": 49.95,
"learning_rate": 3.428571428571428e-06,
"loss": 0.0219,
"step": 1848
},
{
"epoch": 49.97,
"learning_rate": 2.571428571428571e-06,
"loss": 0.0062,
"step": 1849
},
{
"epoch": 50.0,
"learning_rate": 1.714285714285714e-06,
"loss": 0.0061,
"step": 1850
},
{
"epoch": 50.0,
"step": 1850,
"total_flos": 3.932245658559316e+19,
"train_loss": 0.772748684147247,
"train_runtime": 10461.0184,
"train_samples_per_second": 11.094,
"train_steps_per_second": 0.177
}
],
"max_steps": 1850,
"num_train_epochs": 50,
"total_flos": 3.932245658559316e+19,
"trial_name": null,
"trial_params": null
}