Leyley-13B-Lora / checkpoint-1040 /trainer_state.json
Undi95's picture
Upload folder using huggingface_hub
97675a8
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 32.0,
"eval_steps": 80,
"global_step": 1040,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 2.5e-07,
"loss": 1.8362,
"step": 1
},
{
"epoch": 0.03,
"eval_loss": 1.7487908601760864,
"eval_runtime": 1.7201,
"eval_samples_per_second": 4.07,
"eval_steps_per_second": 2.326,
"step": 1
},
{
"epoch": 0.06,
"learning_rate": 2.5e-07,
"loss": 1.7387,
"step": 2
},
{
"epoch": 0.09,
"learning_rate": 2.5e-07,
"loss": 1.9194,
"step": 3
},
{
"epoch": 0.12,
"learning_rate": 2.5e-07,
"loss": 1.8898,
"step": 4
},
{
"epoch": 0.15,
"learning_rate": 2.5e-07,
"loss": 1.7892,
"step": 5
},
{
"epoch": 0.18,
"learning_rate": 2.5e-07,
"loss": 1.9383,
"step": 6
},
{
"epoch": 0.22,
"learning_rate": 2.5e-07,
"loss": 1.6433,
"step": 7
},
{
"epoch": 0.25,
"learning_rate": 2.5e-07,
"loss": 1.9912,
"step": 8
},
{
"epoch": 0.28,
"learning_rate": 2.5e-07,
"loss": 1.967,
"step": 9
},
{
"epoch": 0.31,
"learning_rate": 2.5e-07,
"loss": 1.8385,
"step": 10
},
{
"epoch": 0.34,
"learning_rate": 2.5e-07,
"loss": 2.0755,
"step": 11
},
{
"epoch": 0.37,
"learning_rate": 2.5e-07,
"loss": 1.852,
"step": 12
},
{
"epoch": 0.4,
"learning_rate": 2.5e-07,
"loss": 2.0098,
"step": 13
},
{
"epoch": 0.43,
"learning_rate": 2.5e-07,
"loss": 1.8534,
"step": 14
},
{
"epoch": 0.46,
"learning_rate": 2.5e-07,
"loss": 2.2001,
"step": 15
},
{
"epoch": 0.49,
"learning_rate": 2.5e-07,
"loss": 1.7698,
"step": 16
},
{
"epoch": 0.52,
"learning_rate": 2.5e-07,
"loss": 1.9215,
"step": 17
},
{
"epoch": 0.55,
"learning_rate": 2.5e-07,
"loss": 1.8275,
"step": 18
},
{
"epoch": 0.58,
"learning_rate": 2.5e-07,
"loss": 1.8526,
"step": 19
},
{
"epoch": 0.62,
"learning_rate": 2.5e-07,
"loss": 1.8421,
"step": 20
},
{
"epoch": 0.65,
"learning_rate": 2.5e-07,
"loss": 1.7406,
"step": 21
},
{
"epoch": 0.68,
"learning_rate": 2.5e-07,
"loss": 1.8343,
"step": 22
},
{
"epoch": 0.71,
"learning_rate": 2.5e-07,
"loss": 1.8616,
"step": 23
},
{
"epoch": 0.74,
"learning_rate": 2.5e-07,
"loss": 1.6493,
"step": 24
},
{
"epoch": 0.77,
"learning_rate": 2.5e-07,
"loss": 1.931,
"step": 25
},
{
"epoch": 0.8,
"learning_rate": 2.5e-07,
"loss": 1.9538,
"step": 26
},
{
"epoch": 0.83,
"learning_rate": 2.5e-07,
"loss": 2.013,
"step": 27
},
{
"epoch": 0.86,
"learning_rate": 2.5e-07,
"loss": 2.0858,
"step": 28
},
{
"epoch": 0.89,
"learning_rate": 2.5e-07,
"loss": 2.0456,
"step": 29
},
{
"epoch": 0.92,
"learning_rate": 2.5e-07,
"loss": 1.7464,
"step": 30
},
{
"epoch": 0.95,
"learning_rate": 2.5e-07,
"loss": 1.8876,
"step": 31
},
{
"epoch": 0.98,
"learning_rate": 2.5e-07,
"loss": 1.7079,
"step": 32
},
{
"epoch": 1.02,
"learning_rate": 2.5e-07,
"loss": 1.8358,
"step": 33
},
{
"epoch": 1.05,
"learning_rate": 2.5e-07,
"loss": 2.0894,
"step": 34
},
{
"epoch": 1.08,
"learning_rate": 2.5e-07,
"loss": 1.8563,
"step": 35
},
{
"epoch": 1.11,
"learning_rate": 2.5e-07,
"loss": 1.893,
"step": 36
},
{
"epoch": 1.14,
"learning_rate": 2.5e-07,
"loss": 1.9089,
"step": 37
},
{
"epoch": 1.17,
"learning_rate": 2.5e-07,
"loss": 1.7537,
"step": 38
},
{
"epoch": 1.2,
"learning_rate": 2.5e-07,
"loss": 1.8376,
"step": 39
},
{
"epoch": 1.23,
"learning_rate": 2.5e-07,
"loss": 1.7036,
"step": 40
},
{
"epoch": 1.26,
"learning_rate": 2.5e-07,
"loss": 1.8212,
"step": 41
},
{
"epoch": 1.29,
"learning_rate": 2.5e-07,
"loss": 1.8071,
"step": 42
},
{
"epoch": 1.32,
"learning_rate": 2.5e-07,
"loss": 1.8911,
"step": 43
},
{
"epoch": 1.35,
"learning_rate": 2.5e-07,
"loss": 1.88,
"step": 44
},
{
"epoch": 1.38,
"learning_rate": 2.5e-07,
"loss": 1.7136,
"step": 45
},
{
"epoch": 1.42,
"learning_rate": 2.5e-07,
"loss": 1.6818,
"step": 46
},
{
"epoch": 1.45,
"learning_rate": 2.5e-07,
"loss": 1.9645,
"step": 47
},
{
"epoch": 1.48,
"learning_rate": 2.5e-07,
"loss": 1.9903,
"step": 48
},
{
"epoch": 1.51,
"learning_rate": 2.5e-07,
"loss": 1.8368,
"step": 49
},
{
"epoch": 1.54,
"learning_rate": 2.5e-07,
"loss": 1.7862,
"step": 50
},
{
"epoch": 1.57,
"learning_rate": 2.5e-07,
"loss": 1.9217,
"step": 51
},
{
"epoch": 1.6,
"learning_rate": 2.5e-07,
"loss": 1.8417,
"step": 52
},
{
"epoch": 1.63,
"learning_rate": 2.5e-07,
"loss": 1.8408,
"step": 53
},
{
"epoch": 1.66,
"learning_rate": 2.5e-07,
"loss": 1.8922,
"step": 54
},
{
"epoch": 1.69,
"learning_rate": 2.5e-07,
"loss": 1.7651,
"step": 55
},
{
"epoch": 1.72,
"learning_rate": 2.5e-07,
"loss": 1.7586,
"step": 56
},
{
"epoch": 1.75,
"learning_rate": 2.5e-07,
"loss": 1.9452,
"step": 57
},
{
"epoch": 1.78,
"learning_rate": 2.5e-07,
"loss": 1.7795,
"step": 58
},
{
"epoch": 1.82,
"learning_rate": 2.5e-07,
"loss": 1.875,
"step": 59
},
{
"epoch": 1.85,
"learning_rate": 2.5e-07,
"loss": 1.8503,
"step": 60
},
{
"epoch": 1.88,
"learning_rate": 2.5e-07,
"loss": 1.7522,
"step": 61
},
{
"epoch": 1.91,
"learning_rate": 2.5e-07,
"loss": 1.7527,
"step": 62
},
{
"epoch": 1.94,
"learning_rate": 2.5e-07,
"loss": 1.8297,
"step": 63
},
{
"epoch": 1.97,
"learning_rate": 2.5e-07,
"loss": 1.7751,
"step": 64
},
{
"epoch": 2.0,
"learning_rate": 2.5e-07,
"loss": 1.9007,
"step": 65
},
{
"epoch": 2.03,
"learning_rate": 2.5e-07,
"loss": 1.829,
"step": 66
},
{
"epoch": 2.06,
"learning_rate": 2.5e-07,
"loss": 1.7726,
"step": 67
},
{
"epoch": 2.09,
"learning_rate": 2.5e-07,
"loss": 1.8103,
"step": 68
},
{
"epoch": 2.12,
"learning_rate": 2.5e-07,
"loss": 1.8264,
"step": 69
},
{
"epoch": 2.15,
"learning_rate": 2.5e-07,
"loss": 1.7188,
"step": 70
},
{
"epoch": 2.18,
"learning_rate": 2.5e-07,
"loss": 1.8933,
"step": 71
},
{
"epoch": 2.22,
"learning_rate": 2.5e-07,
"loss": 1.6881,
"step": 72
},
{
"epoch": 2.25,
"learning_rate": 2.5e-07,
"loss": 1.6726,
"step": 73
},
{
"epoch": 2.28,
"learning_rate": 2.5e-07,
"loss": 1.8541,
"step": 74
},
{
"epoch": 2.31,
"learning_rate": 2.5e-07,
"loss": 1.8042,
"step": 75
},
{
"epoch": 2.34,
"learning_rate": 2.5e-07,
"loss": 1.7049,
"step": 76
},
{
"epoch": 2.37,
"learning_rate": 2.5e-07,
"loss": 1.6737,
"step": 77
},
{
"epoch": 2.4,
"learning_rate": 2.5e-07,
"loss": 1.6985,
"step": 78
},
{
"epoch": 2.43,
"learning_rate": 2.5e-07,
"loss": 1.7261,
"step": 79
},
{
"epoch": 2.46,
"learning_rate": 2.5e-07,
"loss": 2.035,
"step": 80
},
{
"epoch": 2.46,
"eval_loss": 1.646209955215454,
"eval_runtime": 1.7493,
"eval_samples_per_second": 4.002,
"eval_steps_per_second": 2.287,
"step": 80
},
{
"epoch": 2.49,
"learning_rate": 2.5e-07,
"loss": 1.7559,
"step": 81
},
{
"epoch": 2.52,
"learning_rate": 2.5e-07,
"loss": 1.6864,
"step": 82
},
{
"epoch": 2.55,
"learning_rate": 2.5e-07,
"loss": 1.8974,
"step": 83
},
{
"epoch": 2.58,
"learning_rate": 2.5e-07,
"loss": 1.822,
"step": 84
},
{
"epoch": 2.62,
"learning_rate": 2.5e-07,
"loss": 1.6783,
"step": 85
},
{
"epoch": 2.65,
"learning_rate": 2.5e-07,
"loss": 1.8375,
"step": 86
},
{
"epoch": 2.68,
"learning_rate": 2.5e-07,
"loss": 1.7302,
"step": 87
},
{
"epoch": 2.71,
"learning_rate": 2.5e-07,
"loss": 1.7164,
"step": 88
},
{
"epoch": 2.74,
"learning_rate": 2.5e-07,
"loss": 1.6118,
"step": 89
},
{
"epoch": 2.77,
"learning_rate": 2.5e-07,
"loss": 1.7528,
"step": 90
},
{
"epoch": 2.8,
"learning_rate": 2.5e-07,
"loss": 1.9012,
"step": 91
},
{
"epoch": 2.83,
"learning_rate": 2.5e-07,
"loss": 1.6869,
"step": 92
},
{
"epoch": 2.86,
"learning_rate": 2.5e-07,
"loss": 1.7648,
"step": 93
},
{
"epoch": 2.89,
"learning_rate": 2.5e-07,
"loss": 1.6151,
"step": 94
},
{
"epoch": 2.92,
"learning_rate": 2.5e-07,
"loss": 1.7853,
"step": 95
},
{
"epoch": 2.95,
"learning_rate": 2.5e-07,
"loss": 1.7481,
"step": 96
},
{
"epoch": 2.98,
"learning_rate": 2.5e-07,
"loss": 1.7218,
"step": 97
},
{
"epoch": 3.02,
"learning_rate": 2.5e-07,
"loss": 1.8767,
"step": 98
},
{
"epoch": 3.05,
"learning_rate": 2.5e-07,
"loss": 1.7954,
"step": 99
},
{
"epoch": 3.08,
"learning_rate": 2.5e-07,
"loss": 1.6822,
"step": 100
},
{
"epoch": 3.11,
"learning_rate": 2.5e-07,
"loss": 1.821,
"step": 101
},
{
"epoch": 3.14,
"learning_rate": 2.5e-07,
"loss": 1.6984,
"step": 102
},
{
"epoch": 3.17,
"learning_rate": 2.5e-07,
"loss": 1.7225,
"step": 103
},
{
"epoch": 3.2,
"learning_rate": 2.5e-07,
"loss": 1.6312,
"step": 104
},
{
"epoch": 3.23,
"learning_rate": 2.5e-07,
"loss": 1.7467,
"step": 105
},
{
"epoch": 3.26,
"learning_rate": 2.5e-07,
"loss": 1.679,
"step": 106
},
{
"epoch": 3.29,
"learning_rate": 2.5e-07,
"loss": 1.791,
"step": 107
},
{
"epoch": 3.32,
"learning_rate": 2.5e-07,
"loss": 1.7233,
"step": 108
},
{
"epoch": 3.35,
"learning_rate": 2.5e-07,
"loss": 1.8198,
"step": 109
},
{
"epoch": 3.38,
"learning_rate": 2.5e-07,
"loss": 1.6536,
"step": 110
},
{
"epoch": 3.42,
"learning_rate": 2.5e-07,
"loss": 1.6672,
"step": 111
},
{
"epoch": 3.45,
"learning_rate": 2.5e-07,
"loss": 1.8259,
"step": 112
},
{
"epoch": 3.48,
"learning_rate": 2.5e-07,
"loss": 1.7671,
"step": 113
},
{
"epoch": 3.51,
"learning_rate": 2.5e-07,
"loss": 1.5717,
"step": 114
},
{
"epoch": 3.54,
"learning_rate": 2.5e-07,
"loss": 1.7491,
"step": 115
},
{
"epoch": 3.57,
"learning_rate": 2.5e-07,
"loss": 1.4921,
"step": 116
},
{
"epoch": 3.6,
"learning_rate": 2.5e-07,
"loss": 1.7512,
"step": 117
},
{
"epoch": 3.63,
"learning_rate": 2.5e-07,
"loss": 1.5921,
"step": 118
},
{
"epoch": 3.66,
"learning_rate": 2.5e-07,
"loss": 1.6179,
"step": 119
},
{
"epoch": 3.69,
"learning_rate": 2.5e-07,
"loss": 1.7484,
"step": 120
},
{
"epoch": 3.72,
"learning_rate": 2.5e-07,
"loss": 1.6551,
"step": 121
},
{
"epoch": 3.75,
"learning_rate": 2.5e-07,
"loss": 1.6753,
"step": 122
},
{
"epoch": 3.78,
"learning_rate": 2.5e-07,
"loss": 1.816,
"step": 123
},
{
"epoch": 3.82,
"learning_rate": 2.5e-07,
"loss": 1.7982,
"step": 124
},
{
"epoch": 3.85,
"learning_rate": 2.5e-07,
"loss": 1.5347,
"step": 125
},
{
"epoch": 3.88,
"learning_rate": 2.5e-07,
"loss": 1.6895,
"step": 126
},
{
"epoch": 3.91,
"learning_rate": 2.5e-07,
"loss": 1.6968,
"step": 127
},
{
"epoch": 3.94,
"learning_rate": 2.5e-07,
"loss": 1.6039,
"step": 128
},
{
"epoch": 3.97,
"learning_rate": 2.5e-07,
"loss": 1.6148,
"step": 129
},
{
"epoch": 4.0,
"learning_rate": 2.5e-07,
"loss": 1.6244,
"step": 130
},
{
"epoch": 4.03,
"learning_rate": 2.5e-07,
"loss": 1.5743,
"step": 131
},
{
"epoch": 4.06,
"learning_rate": 2.5e-07,
"loss": 1.5876,
"step": 132
},
{
"epoch": 4.09,
"learning_rate": 2.5e-07,
"loss": 1.7865,
"step": 133
},
{
"epoch": 4.12,
"learning_rate": 2.5e-07,
"loss": 1.731,
"step": 134
},
{
"epoch": 4.15,
"learning_rate": 2.5e-07,
"loss": 1.6661,
"step": 135
},
{
"epoch": 4.18,
"learning_rate": 2.5e-07,
"loss": 1.6231,
"step": 136
},
{
"epoch": 4.22,
"learning_rate": 2.5e-07,
"loss": 1.5479,
"step": 137
},
{
"epoch": 4.25,
"learning_rate": 2.5e-07,
"loss": 1.7758,
"step": 138
},
{
"epoch": 4.28,
"learning_rate": 2.5e-07,
"loss": 1.6865,
"step": 139
},
{
"epoch": 4.31,
"learning_rate": 2.5e-07,
"loss": 1.6355,
"step": 140
},
{
"epoch": 4.34,
"learning_rate": 2.5e-07,
"loss": 1.6922,
"step": 141
},
{
"epoch": 4.37,
"learning_rate": 2.5e-07,
"loss": 1.5235,
"step": 142
},
{
"epoch": 4.4,
"learning_rate": 2.5e-07,
"loss": 1.6388,
"step": 143
},
{
"epoch": 4.43,
"learning_rate": 2.5e-07,
"loss": 1.536,
"step": 144
},
{
"epoch": 4.46,
"learning_rate": 2.5e-07,
"loss": 1.6105,
"step": 145
},
{
"epoch": 4.49,
"learning_rate": 2.5e-07,
"loss": 1.6795,
"step": 146
},
{
"epoch": 4.52,
"learning_rate": 2.5e-07,
"loss": 1.5493,
"step": 147
},
{
"epoch": 4.55,
"learning_rate": 2.5e-07,
"loss": 1.6987,
"step": 148
},
{
"epoch": 4.58,
"learning_rate": 2.5e-07,
"loss": 1.529,
"step": 149
},
{
"epoch": 4.62,
"learning_rate": 2.5e-07,
"loss": 1.5458,
"step": 150
},
{
"epoch": 4.65,
"learning_rate": 2.5e-07,
"loss": 1.5496,
"step": 151
},
{
"epoch": 4.68,
"learning_rate": 2.5e-07,
"loss": 1.6442,
"step": 152
},
{
"epoch": 4.71,
"learning_rate": 2.5e-07,
"loss": 1.727,
"step": 153
},
{
"epoch": 4.74,
"learning_rate": 2.5e-07,
"loss": 1.6884,
"step": 154
},
{
"epoch": 4.77,
"learning_rate": 2.5e-07,
"loss": 1.5979,
"step": 155
},
{
"epoch": 4.8,
"learning_rate": 2.5e-07,
"loss": 1.5635,
"step": 156
},
{
"epoch": 4.83,
"learning_rate": 2.5e-07,
"loss": 1.805,
"step": 157
},
{
"epoch": 4.86,
"learning_rate": 2.5e-07,
"loss": 1.565,
"step": 158
},
{
"epoch": 4.89,
"learning_rate": 2.5e-07,
"loss": 1.5836,
"step": 159
},
{
"epoch": 4.92,
"learning_rate": 2.5e-07,
"loss": 1.5489,
"step": 160
},
{
"epoch": 4.92,
"eval_loss": 1.490054965019226,
"eval_runtime": 1.7397,
"eval_samples_per_second": 4.024,
"eval_steps_per_second": 2.299,
"step": 160
},
{
"epoch": 4.95,
"learning_rate": 2.5e-07,
"loss": 1.6744,
"step": 161
},
{
"epoch": 4.98,
"learning_rate": 2.5e-07,
"loss": 1.5578,
"step": 162
},
{
"epoch": 5.02,
"learning_rate": 2.5e-07,
"loss": 1.5033,
"step": 163
},
{
"epoch": 5.05,
"learning_rate": 2.5e-07,
"loss": 1.701,
"step": 164
},
{
"epoch": 5.08,
"learning_rate": 2.5e-07,
"loss": 1.5297,
"step": 165
},
{
"epoch": 5.11,
"learning_rate": 2.5e-07,
"loss": 1.5086,
"step": 166
},
{
"epoch": 5.14,
"learning_rate": 2.5e-07,
"loss": 1.5426,
"step": 167
},
{
"epoch": 5.17,
"learning_rate": 2.5e-07,
"loss": 1.5224,
"step": 168
},
{
"epoch": 5.2,
"learning_rate": 2.5e-07,
"loss": 1.4174,
"step": 169
},
{
"epoch": 5.23,
"learning_rate": 2.5e-07,
"loss": 1.5665,
"step": 170
},
{
"epoch": 5.26,
"learning_rate": 2.5e-07,
"loss": 1.5093,
"step": 171
},
{
"epoch": 5.29,
"learning_rate": 2.5e-07,
"loss": 1.4894,
"step": 172
},
{
"epoch": 5.32,
"learning_rate": 2.5e-07,
"loss": 1.7075,
"step": 173
},
{
"epoch": 5.35,
"learning_rate": 2.5e-07,
"loss": 1.5987,
"step": 174
},
{
"epoch": 5.38,
"learning_rate": 2.5e-07,
"loss": 1.5064,
"step": 175
},
{
"epoch": 5.42,
"learning_rate": 2.5e-07,
"loss": 1.5896,
"step": 176
},
{
"epoch": 5.45,
"learning_rate": 2.5e-07,
"loss": 1.5435,
"step": 177
},
{
"epoch": 5.48,
"learning_rate": 2.5e-07,
"loss": 1.5764,
"step": 178
},
{
"epoch": 5.51,
"learning_rate": 2.5e-07,
"loss": 1.6604,
"step": 179
},
{
"epoch": 5.54,
"learning_rate": 2.5e-07,
"loss": 1.6184,
"step": 180
},
{
"epoch": 5.57,
"learning_rate": 2.5e-07,
"loss": 1.6953,
"step": 181
},
{
"epoch": 5.6,
"learning_rate": 2.5e-07,
"loss": 1.5764,
"step": 182
},
{
"epoch": 5.63,
"learning_rate": 2.5e-07,
"loss": 1.4756,
"step": 183
},
{
"epoch": 5.66,
"learning_rate": 2.5e-07,
"loss": 1.6062,
"step": 184
},
{
"epoch": 5.69,
"learning_rate": 2.5e-07,
"loss": 1.5978,
"step": 185
},
{
"epoch": 5.72,
"learning_rate": 2.5e-07,
"loss": 1.4222,
"step": 186
},
{
"epoch": 5.75,
"learning_rate": 2.5e-07,
"loss": 1.5142,
"step": 187
},
{
"epoch": 5.78,
"learning_rate": 2.5e-07,
"loss": 1.4466,
"step": 188
},
{
"epoch": 5.82,
"learning_rate": 2.5e-07,
"loss": 1.5513,
"step": 189
},
{
"epoch": 5.85,
"learning_rate": 2.5e-07,
"loss": 1.4656,
"step": 190
},
{
"epoch": 5.88,
"learning_rate": 2.5e-07,
"loss": 1.5972,
"step": 191
},
{
"epoch": 5.91,
"learning_rate": 2.5e-07,
"loss": 1.4809,
"step": 192
},
{
"epoch": 5.94,
"learning_rate": 2.5e-07,
"loss": 1.6644,
"step": 193
},
{
"epoch": 5.97,
"learning_rate": 2.5e-07,
"loss": 1.5554,
"step": 194
},
{
"epoch": 6.0,
"learning_rate": 2.5e-07,
"loss": 1.5099,
"step": 195
},
{
"epoch": 6.03,
"learning_rate": 2.5e-07,
"loss": 1.5775,
"step": 196
},
{
"epoch": 6.06,
"learning_rate": 2.5e-07,
"loss": 1.4642,
"step": 197
},
{
"epoch": 6.09,
"learning_rate": 2.5e-07,
"loss": 1.4549,
"step": 198
},
{
"epoch": 6.12,
"learning_rate": 2.5e-07,
"loss": 1.4262,
"step": 199
},
{
"epoch": 6.15,
"learning_rate": 2.5e-07,
"loss": 1.3087,
"step": 200
},
{
"epoch": 6.18,
"learning_rate": 2.5e-07,
"loss": 1.4069,
"step": 201
},
{
"epoch": 6.22,
"learning_rate": 2.5e-07,
"loss": 1.5301,
"step": 202
},
{
"epoch": 6.25,
"learning_rate": 2.5e-07,
"loss": 1.4505,
"step": 203
},
{
"epoch": 6.28,
"learning_rate": 2.5e-07,
"loss": 1.5708,
"step": 204
},
{
"epoch": 6.31,
"learning_rate": 2.5e-07,
"loss": 1.5237,
"step": 205
},
{
"epoch": 6.34,
"learning_rate": 2.5e-07,
"loss": 1.6189,
"step": 206
},
{
"epoch": 6.37,
"learning_rate": 2.5e-07,
"loss": 1.3563,
"step": 207
},
{
"epoch": 6.4,
"learning_rate": 2.5e-07,
"loss": 1.6897,
"step": 208
},
{
"epoch": 6.43,
"learning_rate": 2.5e-07,
"loss": 1.4593,
"step": 209
},
{
"epoch": 6.46,
"learning_rate": 2.5e-07,
"loss": 1.6024,
"step": 210
},
{
"epoch": 6.49,
"learning_rate": 2.5e-07,
"loss": 1.4543,
"step": 211
},
{
"epoch": 6.52,
"learning_rate": 2.5e-07,
"loss": 1.5193,
"step": 212
},
{
"epoch": 6.55,
"learning_rate": 2.5e-07,
"loss": 1.3936,
"step": 213
},
{
"epoch": 6.58,
"learning_rate": 2.5e-07,
"loss": 1.5552,
"step": 214
},
{
"epoch": 6.62,
"learning_rate": 2.5e-07,
"loss": 1.453,
"step": 215
},
{
"epoch": 6.65,
"learning_rate": 2.5e-07,
"loss": 1.4554,
"step": 216
},
{
"epoch": 6.68,
"learning_rate": 2.5e-07,
"loss": 1.6442,
"step": 217
},
{
"epoch": 6.71,
"learning_rate": 2.5e-07,
"loss": 1.439,
"step": 218
},
{
"epoch": 6.74,
"learning_rate": 2.5e-07,
"loss": 1.4309,
"step": 219
},
{
"epoch": 6.77,
"learning_rate": 2.5e-07,
"loss": 1.4857,
"step": 220
},
{
"epoch": 6.8,
"learning_rate": 2.5e-07,
"loss": 1.5154,
"step": 221
},
{
"epoch": 6.83,
"learning_rate": 2.5e-07,
"loss": 1.3941,
"step": 222
},
{
"epoch": 6.86,
"learning_rate": 2.5e-07,
"loss": 1.5596,
"step": 223
},
{
"epoch": 6.89,
"learning_rate": 2.5e-07,
"loss": 1.4859,
"step": 224
},
{
"epoch": 6.92,
"learning_rate": 2.5e-07,
"loss": 1.4801,
"step": 225
},
{
"epoch": 6.95,
"learning_rate": 2.5e-07,
"loss": 1.5035,
"step": 226
},
{
"epoch": 6.98,
"learning_rate": 2.5e-07,
"loss": 1.6068,
"step": 227
},
{
"epoch": 7.02,
"learning_rate": 2.5e-07,
"loss": 1.4447,
"step": 228
},
{
"epoch": 7.05,
"learning_rate": 2.5e-07,
"loss": 1.5094,
"step": 229
},
{
"epoch": 7.08,
"learning_rate": 2.5e-07,
"loss": 1.5474,
"step": 230
},
{
"epoch": 7.11,
"learning_rate": 2.5e-07,
"loss": 1.4545,
"step": 231
},
{
"epoch": 7.14,
"learning_rate": 2.5e-07,
"loss": 1.4672,
"step": 232
},
{
"epoch": 7.17,
"learning_rate": 2.5e-07,
"loss": 1.6396,
"step": 233
},
{
"epoch": 7.2,
"learning_rate": 2.5e-07,
"loss": 1.374,
"step": 234
},
{
"epoch": 7.23,
"learning_rate": 2.5e-07,
"loss": 1.4521,
"step": 235
},
{
"epoch": 7.26,
"learning_rate": 2.5e-07,
"loss": 1.2984,
"step": 236
},
{
"epoch": 7.29,
"learning_rate": 2.5e-07,
"loss": 1.4511,
"step": 237
},
{
"epoch": 7.32,
"learning_rate": 2.5e-07,
"loss": 1.405,
"step": 238
},
{
"epoch": 7.35,
"learning_rate": 2.5e-07,
"loss": 1.296,
"step": 239
},
{
"epoch": 7.38,
"learning_rate": 2.5e-07,
"loss": 1.4392,
"step": 240
},
{
"epoch": 7.38,
"eval_loss": 1.3567150831222534,
"eval_runtime": 1.7364,
"eval_samples_per_second": 4.031,
"eval_steps_per_second": 2.304,
"step": 240
},
{
"epoch": 7.42,
"learning_rate": 2.5e-07,
"loss": 1.5595,
"step": 241
},
{
"epoch": 7.45,
"learning_rate": 2.5e-07,
"loss": 1.5671,
"step": 242
},
{
"epoch": 7.48,
"learning_rate": 2.5e-07,
"loss": 1.4832,
"step": 243
},
{
"epoch": 7.51,
"learning_rate": 2.5e-07,
"loss": 1.4415,
"step": 244
},
{
"epoch": 7.54,
"learning_rate": 2.5e-07,
"loss": 1.4649,
"step": 245
},
{
"epoch": 7.57,
"learning_rate": 2.5e-07,
"loss": 1.5053,
"step": 246
},
{
"epoch": 7.6,
"learning_rate": 2.5e-07,
"loss": 1.5084,
"step": 247
},
{
"epoch": 7.63,
"learning_rate": 2.5e-07,
"loss": 1.4543,
"step": 248
},
{
"epoch": 7.66,
"learning_rate": 2.5e-07,
"loss": 1.3008,
"step": 249
},
{
"epoch": 7.69,
"learning_rate": 2.5e-07,
"loss": 1.324,
"step": 250
},
{
"epoch": 7.72,
"learning_rate": 2.5e-07,
"loss": 1.5586,
"step": 251
},
{
"epoch": 7.75,
"learning_rate": 2.5e-07,
"loss": 1.4245,
"step": 252
},
{
"epoch": 7.78,
"learning_rate": 2.5e-07,
"loss": 1.4361,
"step": 253
},
{
"epoch": 7.82,
"learning_rate": 2.5e-07,
"loss": 1.4301,
"step": 254
},
{
"epoch": 7.85,
"learning_rate": 2.5e-07,
"loss": 1.5183,
"step": 255
},
{
"epoch": 7.88,
"learning_rate": 2.5e-07,
"loss": 1.46,
"step": 256
},
{
"epoch": 7.91,
"learning_rate": 2.5e-07,
"loss": 1.2602,
"step": 257
},
{
"epoch": 7.94,
"learning_rate": 2.5e-07,
"loss": 1.358,
"step": 258
},
{
"epoch": 7.97,
"learning_rate": 2.5e-07,
"loss": 1.2598,
"step": 259
},
{
"epoch": 8.0,
"learning_rate": 2.5e-07,
"loss": 1.3058,
"step": 260
},
{
"epoch": 8.03,
"learning_rate": 2.5e-07,
"loss": 1.4428,
"step": 261
},
{
"epoch": 8.06,
"learning_rate": 2.5e-07,
"loss": 1.4506,
"step": 262
},
{
"epoch": 8.09,
"learning_rate": 2.5e-07,
"loss": 1.4627,
"step": 263
},
{
"epoch": 8.12,
"learning_rate": 2.5e-07,
"loss": 1.4584,
"step": 264
},
{
"epoch": 8.15,
"learning_rate": 2.5e-07,
"loss": 1.356,
"step": 265
},
{
"epoch": 8.18,
"learning_rate": 2.5e-07,
"loss": 1.4304,
"step": 266
},
{
"epoch": 8.22,
"learning_rate": 2.5e-07,
"loss": 1.2296,
"step": 267
},
{
"epoch": 8.25,
"learning_rate": 2.5e-07,
"loss": 1.4255,
"step": 268
},
{
"epoch": 8.28,
"learning_rate": 2.5e-07,
"loss": 1.4978,
"step": 269
},
{
"epoch": 8.31,
"learning_rate": 2.5e-07,
"loss": 1.4115,
"step": 270
},
{
"epoch": 8.34,
"learning_rate": 2.5e-07,
"loss": 1.4366,
"step": 271
},
{
"epoch": 8.37,
"learning_rate": 2.5e-07,
"loss": 1.2477,
"step": 272
},
{
"epoch": 8.4,
"learning_rate": 2.5e-07,
"loss": 1.453,
"step": 273
},
{
"epoch": 8.43,
"learning_rate": 2.5e-07,
"loss": 1.3008,
"step": 274
},
{
"epoch": 8.46,
"learning_rate": 2.5e-07,
"loss": 1.2511,
"step": 275
},
{
"epoch": 8.49,
"learning_rate": 2.5e-07,
"loss": 1.4864,
"step": 276
},
{
"epoch": 8.52,
"learning_rate": 2.5e-07,
"loss": 1.4733,
"step": 277
},
{
"epoch": 8.55,
"learning_rate": 2.5e-07,
"loss": 1.4998,
"step": 278
},
{
"epoch": 8.58,
"learning_rate": 2.5e-07,
"loss": 1.4771,
"step": 279
},
{
"epoch": 8.62,
"learning_rate": 2.5e-07,
"loss": 1.4164,
"step": 280
},
{
"epoch": 8.65,
"learning_rate": 2.5e-07,
"loss": 1.2803,
"step": 281
},
{
"epoch": 8.68,
"learning_rate": 2.5e-07,
"loss": 1.3673,
"step": 282
},
{
"epoch": 8.71,
"learning_rate": 2.5e-07,
"loss": 1.3849,
"step": 283
},
{
"epoch": 8.74,
"learning_rate": 2.5e-07,
"loss": 1.4484,
"step": 284
},
{
"epoch": 8.77,
"learning_rate": 2.5e-07,
"loss": 1.397,
"step": 285
},
{
"epoch": 8.8,
"learning_rate": 2.5e-07,
"loss": 1.5398,
"step": 286
},
{
"epoch": 8.83,
"learning_rate": 2.5e-07,
"loss": 1.2841,
"step": 287
},
{
"epoch": 8.86,
"learning_rate": 2.5e-07,
"loss": 1.2991,
"step": 288
},
{
"epoch": 8.89,
"learning_rate": 2.5e-07,
"loss": 1.3,
"step": 289
},
{
"epoch": 8.92,
"learning_rate": 2.5e-07,
"loss": 1.413,
"step": 290
},
{
"epoch": 8.95,
"learning_rate": 2.5e-07,
"loss": 1.3346,
"step": 291
},
{
"epoch": 8.98,
"learning_rate": 2.5e-07,
"loss": 1.3362,
"step": 292
},
{
"epoch": 9.02,
"learning_rate": 2.5e-07,
"loss": 1.1674,
"step": 293
},
{
"epoch": 9.05,
"learning_rate": 2.5e-07,
"loss": 1.4128,
"step": 294
},
{
"epoch": 9.08,
"learning_rate": 2.5e-07,
"loss": 1.461,
"step": 295
},
{
"epoch": 9.11,
"learning_rate": 2.5e-07,
"loss": 1.3092,
"step": 296
},
{
"epoch": 9.14,
"learning_rate": 2.5e-07,
"loss": 1.2157,
"step": 297
},
{
"epoch": 9.17,
"learning_rate": 2.5e-07,
"loss": 1.3718,
"step": 298
},
{
"epoch": 9.2,
"learning_rate": 2.5e-07,
"loss": 1.4823,
"step": 299
},
{
"epoch": 9.23,
"learning_rate": 2.5e-07,
"loss": 1.4398,
"step": 300
},
{
"epoch": 9.26,
"learning_rate": 2.5e-07,
"loss": 1.3343,
"step": 301
},
{
"epoch": 9.29,
"learning_rate": 2.5e-07,
"loss": 1.1481,
"step": 302
},
{
"epoch": 9.32,
"learning_rate": 2.5e-07,
"loss": 1.3867,
"step": 303
},
{
"epoch": 9.35,
"learning_rate": 2.5e-07,
"loss": 1.3983,
"step": 304
},
{
"epoch": 9.38,
"learning_rate": 2.5e-07,
"loss": 1.3382,
"step": 305
},
{
"epoch": 9.42,
"learning_rate": 2.5e-07,
"loss": 1.4251,
"step": 306
},
{
"epoch": 9.45,
"learning_rate": 2.5e-07,
"loss": 1.2907,
"step": 307
},
{
"epoch": 9.48,
"learning_rate": 2.5e-07,
"loss": 1.2584,
"step": 308
},
{
"epoch": 9.51,
"learning_rate": 2.5e-07,
"loss": 1.3281,
"step": 309
},
{
"epoch": 9.54,
"learning_rate": 2.5e-07,
"loss": 1.4022,
"step": 310
},
{
"epoch": 9.57,
"learning_rate": 2.5e-07,
"loss": 1.3523,
"step": 311
},
{
"epoch": 9.6,
"learning_rate": 2.5e-07,
"loss": 1.5241,
"step": 312
},
{
"epoch": 9.63,
"learning_rate": 2.5e-07,
"loss": 1.1701,
"step": 313
},
{
"epoch": 9.66,
"learning_rate": 2.5e-07,
"loss": 1.194,
"step": 314
},
{
"epoch": 9.69,
"learning_rate": 2.5e-07,
"loss": 1.4622,
"step": 315
},
{
"epoch": 9.72,
"learning_rate": 2.5e-07,
"loss": 1.1747,
"step": 316
},
{
"epoch": 9.75,
"learning_rate": 2.5e-07,
"loss": 1.4286,
"step": 317
},
{
"epoch": 9.78,
"learning_rate": 2.5e-07,
"loss": 1.3895,
"step": 318
},
{
"epoch": 9.82,
"learning_rate": 2.5e-07,
"loss": 1.3746,
"step": 319
},
{
"epoch": 9.85,
"learning_rate": 2.5e-07,
"loss": 1.2196,
"step": 320
},
{
"epoch": 9.85,
"eval_loss": 1.2475242614746094,
"eval_runtime": 1.7503,
"eval_samples_per_second": 3.999,
"eval_steps_per_second": 2.285,
"step": 320
},
{
"epoch": 9.88,
"learning_rate": 2.5e-07,
"loss": 1.2543,
"step": 321
},
{
"epoch": 9.91,
"learning_rate": 2.5e-07,
"loss": 1.239,
"step": 322
},
{
"epoch": 9.94,
"learning_rate": 2.5e-07,
"loss": 1.3088,
"step": 323
},
{
"epoch": 9.97,
"learning_rate": 2.5e-07,
"loss": 1.299,
"step": 324
},
{
"epoch": 10.0,
"learning_rate": 2.5e-07,
"loss": 1.3273,
"step": 325
},
{
"epoch": 10.03,
"learning_rate": 2.5e-07,
"loss": 1.3335,
"step": 326
},
{
"epoch": 10.06,
"learning_rate": 2.5e-07,
"loss": 1.317,
"step": 327
},
{
"epoch": 10.09,
"learning_rate": 2.5e-07,
"loss": 1.3048,
"step": 328
},
{
"epoch": 10.12,
"learning_rate": 2.5e-07,
"loss": 1.429,
"step": 329
},
{
"epoch": 10.15,
"learning_rate": 2.5e-07,
"loss": 1.2001,
"step": 330
},
{
"epoch": 10.18,
"learning_rate": 2.5e-07,
"loss": 1.3512,
"step": 331
},
{
"epoch": 10.22,
"learning_rate": 2.5e-07,
"loss": 1.2161,
"step": 332
},
{
"epoch": 10.25,
"learning_rate": 2.5e-07,
"loss": 1.2098,
"step": 333
},
{
"epoch": 10.28,
"learning_rate": 2.5e-07,
"loss": 1.3637,
"step": 334
},
{
"epoch": 10.31,
"learning_rate": 2.5e-07,
"loss": 1.3788,
"step": 335
},
{
"epoch": 10.34,
"learning_rate": 2.5e-07,
"loss": 1.2368,
"step": 336
},
{
"epoch": 10.37,
"learning_rate": 2.5e-07,
"loss": 1.348,
"step": 337
},
{
"epoch": 10.4,
"learning_rate": 2.5e-07,
"loss": 1.0986,
"step": 338
},
{
"epoch": 10.43,
"learning_rate": 2.5e-07,
"loss": 1.3233,
"step": 339
},
{
"epoch": 10.46,
"learning_rate": 2.5e-07,
"loss": 1.3613,
"step": 340
},
{
"epoch": 10.49,
"learning_rate": 2.5e-07,
"loss": 1.3211,
"step": 341
},
{
"epoch": 10.52,
"learning_rate": 2.5e-07,
"loss": 1.5044,
"step": 342
},
{
"epoch": 10.55,
"learning_rate": 2.5e-07,
"loss": 1.3033,
"step": 343
},
{
"epoch": 10.58,
"learning_rate": 2.5e-07,
"loss": 1.4222,
"step": 344
},
{
"epoch": 10.62,
"learning_rate": 2.5e-07,
"loss": 1.4241,
"step": 345
},
{
"epoch": 10.65,
"learning_rate": 2.5e-07,
"loss": 1.3264,
"step": 346
},
{
"epoch": 10.68,
"learning_rate": 2.5e-07,
"loss": 1.4957,
"step": 347
},
{
"epoch": 10.71,
"learning_rate": 2.5e-07,
"loss": 1.1016,
"step": 348
},
{
"epoch": 10.74,
"learning_rate": 2.5e-07,
"loss": 1.2492,
"step": 349
},
{
"epoch": 10.77,
"learning_rate": 2.5e-07,
"loss": 1.1237,
"step": 350
},
{
"epoch": 10.8,
"learning_rate": 2.5e-07,
"loss": 1.4371,
"step": 351
},
{
"epoch": 10.83,
"learning_rate": 2.5e-07,
"loss": 1.438,
"step": 352
},
{
"epoch": 10.86,
"learning_rate": 2.5e-07,
"loss": 1.2182,
"step": 353
},
{
"epoch": 10.89,
"learning_rate": 2.5e-07,
"loss": 1.2577,
"step": 354
},
{
"epoch": 10.92,
"learning_rate": 2.5e-07,
"loss": 1.2687,
"step": 355
},
{
"epoch": 10.95,
"learning_rate": 2.5e-07,
"loss": 1.3387,
"step": 356
},
{
"epoch": 10.98,
"learning_rate": 2.5e-07,
"loss": 1.3571,
"step": 357
},
{
"epoch": 11.02,
"learning_rate": 2.5e-07,
"loss": 1.2289,
"step": 358
},
{
"epoch": 11.05,
"learning_rate": 2.5e-07,
"loss": 1.2925,
"step": 359
},
{
"epoch": 11.08,
"learning_rate": 2.5e-07,
"loss": 1.3187,
"step": 360
},
{
"epoch": 11.11,
"learning_rate": 2.5e-07,
"loss": 1.3628,
"step": 361
},
{
"epoch": 11.14,
"learning_rate": 2.5e-07,
"loss": 1.2547,
"step": 362
},
{
"epoch": 11.17,
"learning_rate": 2.5e-07,
"loss": 1.3169,
"step": 363
},
{
"epoch": 11.2,
"learning_rate": 2.5e-07,
"loss": 1.2921,
"step": 364
},
{
"epoch": 11.23,
"learning_rate": 2.5e-07,
"loss": 1.3426,
"step": 365
},
{
"epoch": 11.26,
"learning_rate": 2.5e-07,
"loss": 1.4212,
"step": 366
},
{
"epoch": 11.29,
"learning_rate": 2.5e-07,
"loss": 1.2466,
"step": 367
},
{
"epoch": 11.32,
"learning_rate": 2.5e-07,
"loss": 1.3367,
"step": 368
},
{
"epoch": 11.35,
"learning_rate": 2.5e-07,
"loss": 1.3179,
"step": 369
},
{
"epoch": 11.38,
"learning_rate": 2.5e-07,
"loss": 1.2568,
"step": 370
},
{
"epoch": 11.42,
"learning_rate": 2.5e-07,
"loss": 1.1769,
"step": 371
},
{
"epoch": 11.45,
"learning_rate": 2.5e-07,
"loss": 1.3068,
"step": 372
},
{
"epoch": 11.48,
"learning_rate": 2.5e-07,
"loss": 1.1623,
"step": 373
},
{
"epoch": 11.51,
"learning_rate": 2.5e-07,
"loss": 1.2147,
"step": 374
},
{
"epoch": 11.54,
"learning_rate": 2.5e-07,
"loss": 1.4786,
"step": 375
},
{
"epoch": 11.57,
"learning_rate": 2.5e-07,
"loss": 1.3387,
"step": 376
},
{
"epoch": 11.6,
"learning_rate": 2.5e-07,
"loss": 1.261,
"step": 377
},
{
"epoch": 11.63,
"learning_rate": 2.5e-07,
"loss": 1.2758,
"step": 378
},
{
"epoch": 11.66,
"learning_rate": 2.5e-07,
"loss": 1.2917,
"step": 379
},
{
"epoch": 11.69,
"learning_rate": 2.5e-07,
"loss": 1.3428,
"step": 380
},
{
"epoch": 11.72,
"learning_rate": 2.5e-07,
"loss": 1.4322,
"step": 381
},
{
"epoch": 11.75,
"learning_rate": 2.5e-07,
"loss": 1.2796,
"step": 382
},
{
"epoch": 11.78,
"learning_rate": 2.5e-07,
"loss": 1.318,
"step": 383
},
{
"epoch": 11.82,
"learning_rate": 2.5e-07,
"loss": 1.2229,
"step": 384
},
{
"epoch": 11.85,
"learning_rate": 2.5e-07,
"loss": 1.1542,
"step": 385
},
{
"epoch": 11.88,
"learning_rate": 2.5e-07,
"loss": 1.3305,
"step": 386
},
{
"epoch": 11.91,
"learning_rate": 2.5e-07,
"loss": 1.3448,
"step": 387
},
{
"epoch": 11.94,
"learning_rate": 2.5e-07,
"loss": 1.2508,
"step": 388
},
{
"epoch": 11.97,
"learning_rate": 2.5e-07,
"loss": 1.3033,
"step": 389
},
{
"epoch": 12.0,
"learning_rate": 2.5e-07,
"loss": 1.1879,
"step": 390
},
{
"epoch": 12.03,
"learning_rate": 2.5e-07,
"loss": 1.3695,
"step": 391
},
{
"epoch": 12.06,
"learning_rate": 2.5e-07,
"loss": 1.278,
"step": 392
},
{
"epoch": 12.09,
"learning_rate": 2.5e-07,
"loss": 1.4726,
"step": 393
},
{
"epoch": 12.12,
"learning_rate": 2.5e-07,
"loss": 1.142,
"step": 394
},
{
"epoch": 12.15,
"learning_rate": 2.5e-07,
"loss": 1.2075,
"step": 395
},
{
"epoch": 12.18,
"learning_rate": 2.5e-07,
"loss": 1.4716,
"step": 396
},
{
"epoch": 12.22,
"learning_rate": 2.5e-07,
"loss": 1.336,
"step": 397
},
{
"epoch": 12.25,
"learning_rate": 2.5e-07,
"loss": 1.0818,
"step": 398
},
{
"epoch": 12.28,
"learning_rate": 2.5e-07,
"loss": 1.3366,
"step": 399
},
{
"epoch": 12.31,
"learning_rate": 2.5e-07,
"loss": 1.3219,
"step": 400
},
{
"epoch": 12.31,
"eval_loss": 1.2088563442230225,
"eval_runtime": 1.7541,
"eval_samples_per_second": 3.991,
"eval_steps_per_second": 2.28,
"step": 400
},
{
"epoch": 12.34,
"learning_rate": 2.5e-07,
"loss": 1.4101,
"step": 401
},
{
"epoch": 12.37,
"learning_rate": 2.5e-07,
"loss": 1.2773,
"step": 402
},
{
"epoch": 12.4,
"learning_rate": 2.5e-07,
"loss": 1.3575,
"step": 403
},
{
"epoch": 12.43,
"learning_rate": 2.5e-07,
"loss": 1.089,
"step": 404
},
{
"epoch": 12.46,
"learning_rate": 2.5e-07,
"loss": 1.4164,
"step": 405
},
{
"epoch": 12.49,
"learning_rate": 2.5e-07,
"loss": 1.3292,
"step": 406
},
{
"epoch": 12.52,
"learning_rate": 2.5e-07,
"loss": 1.3447,
"step": 407
},
{
"epoch": 12.55,
"learning_rate": 2.5e-07,
"loss": 1.4455,
"step": 408
},
{
"epoch": 12.58,
"learning_rate": 2.5e-07,
"loss": 1.3716,
"step": 409
},
{
"epoch": 12.62,
"learning_rate": 2.5e-07,
"loss": 1.338,
"step": 410
},
{
"epoch": 12.65,
"learning_rate": 2.5e-07,
"loss": 1.2842,
"step": 411
},
{
"epoch": 12.68,
"learning_rate": 2.5e-07,
"loss": 1.0655,
"step": 412
},
{
"epoch": 12.71,
"learning_rate": 2.5e-07,
"loss": 1.1405,
"step": 413
},
{
"epoch": 12.74,
"learning_rate": 2.5e-07,
"loss": 1.3488,
"step": 414
},
{
"epoch": 12.77,
"learning_rate": 2.5e-07,
"loss": 1.1401,
"step": 415
},
{
"epoch": 12.8,
"learning_rate": 2.5e-07,
"loss": 1.2261,
"step": 416
},
{
"epoch": 12.83,
"learning_rate": 2.5e-07,
"loss": 1.2211,
"step": 417
},
{
"epoch": 12.86,
"learning_rate": 2.5e-07,
"loss": 1.235,
"step": 418
},
{
"epoch": 12.89,
"learning_rate": 2.5e-07,
"loss": 1.2779,
"step": 419
},
{
"epoch": 12.92,
"learning_rate": 2.5e-07,
"loss": 1.228,
"step": 420
},
{
"epoch": 12.95,
"learning_rate": 2.5e-07,
"loss": 1.3058,
"step": 421
},
{
"epoch": 12.98,
"learning_rate": 2.5e-07,
"loss": 1.2325,
"step": 422
},
{
"epoch": 13.02,
"learning_rate": 2.5e-07,
"loss": 1.3127,
"step": 423
},
{
"epoch": 13.05,
"learning_rate": 2.5e-07,
"loss": 1.382,
"step": 424
},
{
"epoch": 13.08,
"learning_rate": 2.5e-07,
"loss": 1.3625,
"step": 425
},
{
"epoch": 13.11,
"learning_rate": 2.5e-07,
"loss": 1.3533,
"step": 426
},
{
"epoch": 13.14,
"learning_rate": 2.5e-07,
"loss": 1.2046,
"step": 427
},
{
"epoch": 13.17,
"learning_rate": 2.5e-07,
"loss": 1.2991,
"step": 428
},
{
"epoch": 13.2,
"learning_rate": 2.5e-07,
"loss": 1.1785,
"step": 429
},
{
"epoch": 13.23,
"learning_rate": 2.5e-07,
"loss": 1.2795,
"step": 430
},
{
"epoch": 13.26,
"learning_rate": 2.5e-07,
"loss": 1.248,
"step": 431
},
{
"epoch": 13.29,
"learning_rate": 2.5e-07,
"loss": 1.3476,
"step": 432
},
{
"epoch": 13.32,
"learning_rate": 2.5e-07,
"loss": 1.277,
"step": 433
},
{
"epoch": 13.35,
"learning_rate": 2.5e-07,
"loss": 1.2044,
"step": 434
},
{
"epoch": 13.38,
"learning_rate": 2.5e-07,
"loss": 1.1909,
"step": 435
},
{
"epoch": 13.42,
"learning_rate": 2.5e-07,
"loss": 1.2544,
"step": 436
},
{
"epoch": 13.45,
"learning_rate": 2.5e-07,
"loss": 1.2036,
"step": 437
},
{
"epoch": 13.48,
"learning_rate": 2.5e-07,
"loss": 1.2141,
"step": 438
},
{
"epoch": 13.51,
"learning_rate": 2.5e-07,
"loss": 1.2086,
"step": 439
},
{
"epoch": 13.54,
"learning_rate": 2.5e-07,
"loss": 1.3863,
"step": 440
},
{
"epoch": 13.57,
"learning_rate": 2.5e-07,
"loss": 1.2435,
"step": 441
},
{
"epoch": 13.6,
"learning_rate": 2.5e-07,
"loss": 1.3655,
"step": 442
},
{
"epoch": 13.63,
"learning_rate": 2.5e-07,
"loss": 1.3562,
"step": 443
},
{
"epoch": 13.66,
"learning_rate": 2.5e-07,
"loss": 1.3441,
"step": 444
},
{
"epoch": 13.69,
"learning_rate": 2.5e-07,
"loss": 1.342,
"step": 445
},
{
"epoch": 13.72,
"learning_rate": 2.5e-07,
"loss": 1.0318,
"step": 446
},
{
"epoch": 13.75,
"learning_rate": 2.5e-07,
"loss": 1.3068,
"step": 447
},
{
"epoch": 13.78,
"learning_rate": 2.5e-07,
"loss": 1.4054,
"step": 448
},
{
"epoch": 13.82,
"learning_rate": 2.5e-07,
"loss": 1.1914,
"step": 449
},
{
"epoch": 13.85,
"learning_rate": 2.5e-07,
"loss": 1.3734,
"step": 450
},
{
"epoch": 13.88,
"learning_rate": 2.5e-07,
"loss": 1.3705,
"step": 451
},
{
"epoch": 13.91,
"learning_rate": 2.5e-07,
"loss": 1.2954,
"step": 452
},
{
"epoch": 13.94,
"learning_rate": 2.5e-07,
"loss": 1.2625,
"step": 453
},
{
"epoch": 13.97,
"learning_rate": 2.5e-07,
"loss": 1.0147,
"step": 454
},
{
"epoch": 14.0,
"learning_rate": 2.5e-07,
"loss": 1.2888,
"step": 455
},
{
"epoch": 14.03,
"learning_rate": 2.5e-07,
"loss": 1.1156,
"step": 456
},
{
"epoch": 14.06,
"learning_rate": 2.5e-07,
"loss": 1.421,
"step": 457
},
{
"epoch": 14.09,
"learning_rate": 2.5e-07,
"loss": 1.2285,
"step": 458
},
{
"epoch": 14.12,
"learning_rate": 2.5e-07,
"loss": 1.2881,
"step": 459
},
{
"epoch": 14.15,
"learning_rate": 2.5e-07,
"loss": 1.2028,
"step": 460
},
{
"epoch": 14.18,
"learning_rate": 2.5e-07,
"loss": 1.2213,
"step": 461
},
{
"epoch": 14.22,
"learning_rate": 2.5e-07,
"loss": 1.2455,
"step": 462
},
{
"epoch": 14.25,
"learning_rate": 2.5e-07,
"loss": 1.2788,
"step": 463
},
{
"epoch": 14.28,
"learning_rate": 2.5e-07,
"loss": 1.1349,
"step": 464
},
{
"epoch": 14.31,
"learning_rate": 2.5e-07,
"loss": 1.3234,
"step": 465
},
{
"epoch": 14.34,
"learning_rate": 2.5e-07,
"loss": 1.2652,
"step": 466
},
{
"epoch": 14.37,
"learning_rate": 2.5e-07,
"loss": 1.3123,
"step": 467
},
{
"epoch": 14.4,
"learning_rate": 2.5e-07,
"loss": 1.3447,
"step": 468
},
{
"epoch": 14.43,
"learning_rate": 2.5e-07,
"loss": 1.2818,
"step": 469
},
{
"epoch": 14.46,
"learning_rate": 2.5e-07,
"loss": 1.1331,
"step": 470
},
{
"epoch": 14.49,
"learning_rate": 2.5e-07,
"loss": 1.2309,
"step": 471
},
{
"epoch": 14.52,
"learning_rate": 2.5e-07,
"loss": 1.2867,
"step": 472
},
{
"epoch": 14.55,
"learning_rate": 2.5e-07,
"loss": 1.3339,
"step": 473
},
{
"epoch": 14.58,
"learning_rate": 2.5e-07,
"loss": 1.4158,
"step": 474
},
{
"epoch": 14.62,
"learning_rate": 2.5e-07,
"loss": 1.529,
"step": 475
},
{
"epoch": 14.65,
"learning_rate": 2.5e-07,
"loss": 1.258,
"step": 476
},
{
"epoch": 14.68,
"learning_rate": 2.5e-07,
"loss": 1.3033,
"step": 477
},
{
"epoch": 14.71,
"learning_rate": 2.5e-07,
"loss": 1.147,
"step": 478
},
{
"epoch": 14.74,
"learning_rate": 2.5e-07,
"loss": 1.3166,
"step": 479
},
{
"epoch": 14.77,
"learning_rate": 2.5e-07,
"loss": 1.2171,
"step": 480
},
{
"epoch": 14.77,
"eval_loss": 1.1870461702346802,
"eval_runtime": 1.7297,
"eval_samples_per_second": 4.047,
"eval_steps_per_second": 2.312,
"step": 480
},
{
"epoch": 14.8,
"learning_rate": 2.5e-07,
"loss": 1.18,
"step": 481
},
{
"epoch": 14.83,
"learning_rate": 2.5e-07,
"loss": 1.2117,
"step": 482
},
{
"epoch": 14.86,
"learning_rate": 2.5e-07,
"loss": 1.148,
"step": 483
},
{
"epoch": 14.89,
"learning_rate": 2.5e-07,
"loss": 1.3961,
"step": 484
},
{
"epoch": 14.92,
"learning_rate": 2.5e-07,
"loss": 1.3331,
"step": 485
},
{
"epoch": 14.95,
"learning_rate": 2.5e-07,
"loss": 1.3485,
"step": 486
},
{
"epoch": 14.98,
"learning_rate": 2.5e-07,
"loss": 1.0999,
"step": 487
},
{
"epoch": 15.02,
"learning_rate": 2.5e-07,
"loss": 1.2521,
"step": 488
},
{
"epoch": 15.05,
"learning_rate": 2.5e-07,
"loss": 1.2097,
"step": 489
},
{
"epoch": 15.08,
"learning_rate": 2.5e-07,
"loss": 1.2557,
"step": 490
},
{
"epoch": 15.11,
"learning_rate": 2.5e-07,
"loss": 1.2638,
"step": 491
},
{
"epoch": 15.14,
"learning_rate": 2.5e-07,
"loss": 1.3549,
"step": 492
},
{
"epoch": 15.17,
"learning_rate": 2.5e-07,
"loss": 1.2417,
"step": 493
},
{
"epoch": 15.2,
"learning_rate": 2.5e-07,
"loss": 1.2861,
"step": 494
},
{
"epoch": 15.23,
"learning_rate": 2.5e-07,
"loss": 1.2428,
"step": 495
},
{
"epoch": 15.26,
"learning_rate": 2.5e-07,
"loss": 1.1561,
"step": 496
},
{
"epoch": 15.29,
"learning_rate": 2.5e-07,
"loss": 1.2641,
"step": 497
},
{
"epoch": 15.32,
"learning_rate": 2.5e-07,
"loss": 1.3073,
"step": 498
},
{
"epoch": 15.35,
"learning_rate": 2.5e-07,
"loss": 1.2227,
"step": 499
},
{
"epoch": 15.38,
"learning_rate": 2.5e-07,
"loss": 1.3311,
"step": 500
},
{
"epoch": 15.42,
"learning_rate": 2.5e-07,
"loss": 1.2581,
"step": 501
},
{
"epoch": 15.45,
"learning_rate": 2.5e-07,
"loss": 1.2751,
"step": 502
},
{
"epoch": 15.48,
"learning_rate": 2.5e-07,
"loss": 1.4222,
"step": 503
},
{
"epoch": 15.51,
"learning_rate": 2.5e-07,
"loss": 1.3263,
"step": 504
},
{
"epoch": 15.54,
"learning_rate": 2.5e-07,
"loss": 1.2236,
"step": 505
},
{
"epoch": 15.57,
"learning_rate": 2.5e-07,
"loss": 1.3125,
"step": 506
},
{
"epoch": 15.6,
"learning_rate": 2.5e-07,
"loss": 1.424,
"step": 507
},
{
"epoch": 15.63,
"learning_rate": 2.5e-07,
"loss": 1.2231,
"step": 508
},
{
"epoch": 15.66,
"learning_rate": 2.5e-07,
"loss": 1.2089,
"step": 509
},
{
"epoch": 15.69,
"learning_rate": 2.5e-07,
"loss": 1.4663,
"step": 510
},
{
"epoch": 15.72,
"learning_rate": 2.5e-07,
"loss": 1.3236,
"step": 511
},
{
"epoch": 15.75,
"learning_rate": 2.5e-07,
"loss": 1.2133,
"step": 512
},
{
"epoch": 15.78,
"learning_rate": 2.5e-07,
"loss": 1.1598,
"step": 513
},
{
"epoch": 15.82,
"learning_rate": 2.5e-07,
"loss": 1.3023,
"step": 514
},
{
"epoch": 15.85,
"learning_rate": 2.5e-07,
"loss": 1.139,
"step": 515
},
{
"epoch": 15.88,
"learning_rate": 2.5e-07,
"loss": 1.1881,
"step": 516
},
{
"epoch": 15.91,
"learning_rate": 2.5e-07,
"loss": 1.1448,
"step": 517
},
{
"epoch": 15.94,
"learning_rate": 2.5e-07,
"loss": 1.2321,
"step": 518
},
{
"epoch": 15.97,
"learning_rate": 2.5e-07,
"loss": 1.2134,
"step": 519
},
{
"epoch": 16.0,
"learning_rate": 2.5e-07,
"loss": 1.3268,
"step": 520
},
{
"epoch": 16.03,
"learning_rate": 2.5e-07,
"loss": 1.3858,
"step": 521
},
{
"epoch": 16.06,
"learning_rate": 2.5e-07,
"loss": 1.3358,
"step": 522
},
{
"epoch": 16.09,
"learning_rate": 2.5e-07,
"loss": 1.2051,
"step": 523
},
{
"epoch": 16.12,
"learning_rate": 2.5e-07,
"loss": 1.1431,
"step": 524
},
{
"epoch": 16.15,
"learning_rate": 2.5e-07,
"loss": 1.2539,
"step": 525
},
{
"epoch": 16.18,
"learning_rate": 2.5e-07,
"loss": 1.3096,
"step": 526
},
{
"epoch": 16.22,
"learning_rate": 2.5e-07,
"loss": 1.1879,
"step": 527
},
{
"epoch": 16.25,
"learning_rate": 2.5e-07,
"loss": 1.3215,
"step": 528
},
{
"epoch": 16.28,
"learning_rate": 2.5e-07,
"loss": 1.2835,
"step": 529
},
{
"epoch": 16.31,
"learning_rate": 2.5e-07,
"loss": 1.2596,
"step": 530
},
{
"epoch": 16.34,
"learning_rate": 2.5e-07,
"loss": 1.2635,
"step": 531
},
{
"epoch": 16.37,
"learning_rate": 2.5e-07,
"loss": 1.4138,
"step": 532
},
{
"epoch": 16.4,
"learning_rate": 2.5e-07,
"loss": 1.2552,
"step": 533
},
{
"epoch": 16.43,
"learning_rate": 2.5e-07,
"loss": 1.337,
"step": 534
},
{
"epoch": 16.46,
"learning_rate": 2.5e-07,
"loss": 1.1408,
"step": 535
},
{
"epoch": 16.49,
"learning_rate": 2.5e-07,
"loss": 1.2962,
"step": 536
},
{
"epoch": 16.52,
"learning_rate": 2.5e-07,
"loss": 1.3547,
"step": 537
},
{
"epoch": 16.55,
"learning_rate": 2.5e-07,
"loss": 1.2199,
"step": 538
},
{
"epoch": 16.58,
"learning_rate": 2.5e-07,
"loss": 1.2924,
"step": 539
},
{
"epoch": 16.62,
"learning_rate": 2.5e-07,
"loss": 1.1342,
"step": 540
},
{
"epoch": 16.65,
"learning_rate": 2.5e-07,
"loss": 1.2628,
"step": 541
},
{
"epoch": 16.68,
"learning_rate": 2.5e-07,
"loss": 0.9506,
"step": 542
},
{
"epoch": 16.71,
"learning_rate": 2.5e-07,
"loss": 1.3052,
"step": 543
},
{
"epoch": 16.74,
"learning_rate": 2.5e-07,
"loss": 1.3089,
"step": 544
},
{
"epoch": 16.77,
"learning_rate": 2.5e-07,
"loss": 1.3776,
"step": 545
},
{
"epoch": 16.8,
"learning_rate": 2.5e-07,
"loss": 1.0516,
"step": 546
},
{
"epoch": 16.83,
"learning_rate": 2.5e-07,
"loss": 1.1433,
"step": 547
},
{
"epoch": 16.86,
"learning_rate": 2.5e-07,
"loss": 1.3056,
"step": 548
},
{
"epoch": 16.89,
"learning_rate": 2.5e-07,
"loss": 1.2652,
"step": 549
},
{
"epoch": 16.92,
"learning_rate": 2.5e-07,
"loss": 1.2002,
"step": 550
},
{
"epoch": 16.95,
"learning_rate": 2.5e-07,
"loss": 1.181,
"step": 551
},
{
"epoch": 16.98,
"learning_rate": 2.5e-07,
"loss": 1.0902,
"step": 552
},
{
"epoch": 17.02,
"learning_rate": 2.5e-07,
"loss": 1.2845,
"step": 553
},
{
"epoch": 17.05,
"learning_rate": 2.5e-07,
"loss": 1.2646,
"step": 554
},
{
"epoch": 17.08,
"learning_rate": 2.5e-07,
"loss": 1.0982,
"step": 555
},
{
"epoch": 17.11,
"learning_rate": 2.5e-07,
"loss": 1.1109,
"step": 556
},
{
"epoch": 17.14,
"learning_rate": 2.5e-07,
"loss": 1.2508,
"step": 557
},
{
"epoch": 17.17,
"learning_rate": 2.5e-07,
"loss": 1.2859,
"step": 558
},
{
"epoch": 17.2,
"learning_rate": 2.5e-07,
"loss": 0.9845,
"step": 559
},
{
"epoch": 17.23,
"learning_rate": 2.5e-07,
"loss": 1.1686,
"step": 560
},
{
"epoch": 17.23,
"eval_loss": 1.173031210899353,
"eval_runtime": 1.7371,
"eval_samples_per_second": 4.03,
"eval_steps_per_second": 2.303,
"step": 560
},
{
"epoch": 17.26,
"learning_rate": 2.5e-07,
"loss": 1.2483,
"step": 561
},
{
"epoch": 17.29,
"learning_rate": 2.5e-07,
"loss": 1.3438,
"step": 562
},
{
"epoch": 17.32,
"learning_rate": 2.5e-07,
"loss": 1.2862,
"step": 563
},
{
"epoch": 17.35,
"learning_rate": 2.5e-07,
"loss": 1.2464,
"step": 564
},
{
"epoch": 17.38,
"learning_rate": 2.5e-07,
"loss": 1.3084,
"step": 565
},
{
"epoch": 17.42,
"learning_rate": 2.5e-07,
"loss": 1.2382,
"step": 566
},
{
"epoch": 17.45,
"learning_rate": 2.5e-07,
"loss": 1.2763,
"step": 567
},
{
"epoch": 17.48,
"learning_rate": 2.5e-07,
"loss": 1.1901,
"step": 568
},
{
"epoch": 17.51,
"learning_rate": 2.5e-07,
"loss": 1.0708,
"step": 569
},
{
"epoch": 17.54,
"learning_rate": 2.5e-07,
"loss": 1.2879,
"step": 570
},
{
"epoch": 17.57,
"learning_rate": 2.5e-07,
"loss": 1.2257,
"step": 571
},
{
"epoch": 17.6,
"learning_rate": 2.5e-07,
"loss": 1.06,
"step": 572
},
{
"epoch": 17.63,
"learning_rate": 2.5e-07,
"loss": 1.3583,
"step": 573
},
{
"epoch": 17.66,
"learning_rate": 2.5e-07,
"loss": 1.2269,
"step": 574
},
{
"epoch": 17.69,
"learning_rate": 2.5e-07,
"loss": 1.3294,
"step": 575
},
{
"epoch": 17.72,
"learning_rate": 2.5e-07,
"loss": 1.3498,
"step": 576
},
{
"epoch": 17.75,
"learning_rate": 2.5e-07,
"loss": 1.0898,
"step": 577
},
{
"epoch": 17.78,
"learning_rate": 2.5e-07,
"loss": 1.2676,
"step": 578
},
{
"epoch": 17.82,
"learning_rate": 2.5e-07,
"loss": 1.3162,
"step": 579
},
{
"epoch": 17.85,
"learning_rate": 2.5e-07,
"loss": 1.3982,
"step": 580
},
{
"epoch": 17.88,
"learning_rate": 2.5e-07,
"loss": 1.3926,
"step": 581
},
{
"epoch": 17.91,
"learning_rate": 2.5e-07,
"loss": 1.1394,
"step": 582
},
{
"epoch": 17.94,
"learning_rate": 2.5e-07,
"loss": 1.2473,
"step": 583
},
{
"epoch": 17.97,
"learning_rate": 2.5e-07,
"loss": 1.1476,
"step": 584
},
{
"epoch": 18.0,
"learning_rate": 2.5e-07,
"loss": 1.226,
"step": 585
},
{
"epoch": 18.03,
"learning_rate": 2.5e-07,
"loss": 1.2704,
"step": 586
},
{
"epoch": 18.06,
"learning_rate": 2.5e-07,
"loss": 0.9477,
"step": 587
},
{
"epoch": 18.09,
"learning_rate": 2.5e-07,
"loss": 1.3465,
"step": 588
},
{
"epoch": 18.12,
"learning_rate": 2.5e-07,
"loss": 1.3294,
"step": 589
},
{
"epoch": 18.15,
"learning_rate": 2.5e-07,
"loss": 1.057,
"step": 590
},
{
"epoch": 18.18,
"learning_rate": 2.5e-07,
"loss": 1.2434,
"step": 591
},
{
"epoch": 18.22,
"learning_rate": 2.5e-07,
"loss": 1.2729,
"step": 592
},
{
"epoch": 18.25,
"learning_rate": 2.5e-07,
"loss": 1.1356,
"step": 593
},
{
"epoch": 18.28,
"learning_rate": 2.5e-07,
"loss": 1.1587,
"step": 594
},
{
"epoch": 18.31,
"learning_rate": 2.5e-07,
"loss": 1.1197,
"step": 595
},
{
"epoch": 18.34,
"learning_rate": 2.5e-07,
"loss": 1.1778,
"step": 596
},
{
"epoch": 18.37,
"learning_rate": 2.5e-07,
"loss": 1.1198,
"step": 597
},
{
"epoch": 18.4,
"learning_rate": 2.5e-07,
"loss": 1.3865,
"step": 598
},
{
"epoch": 18.43,
"learning_rate": 2.5e-07,
"loss": 1.2218,
"step": 599
},
{
"epoch": 18.46,
"learning_rate": 2.5e-07,
"loss": 1.0191,
"step": 600
},
{
"epoch": 18.49,
"learning_rate": 2.5e-07,
"loss": 1.3666,
"step": 601
},
{
"epoch": 18.52,
"learning_rate": 2.5e-07,
"loss": 1.3141,
"step": 602
},
{
"epoch": 18.55,
"learning_rate": 2.5e-07,
"loss": 1.279,
"step": 603
},
{
"epoch": 18.58,
"learning_rate": 2.5e-07,
"loss": 1.32,
"step": 604
},
{
"epoch": 18.62,
"learning_rate": 2.5e-07,
"loss": 1.3216,
"step": 605
},
{
"epoch": 18.65,
"learning_rate": 2.5e-07,
"loss": 1.1378,
"step": 606
},
{
"epoch": 18.68,
"learning_rate": 2.5e-07,
"loss": 1.174,
"step": 607
},
{
"epoch": 18.71,
"learning_rate": 2.5e-07,
"loss": 1.1897,
"step": 608
},
{
"epoch": 18.74,
"learning_rate": 2.5e-07,
"loss": 1.2227,
"step": 609
},
{
"epoch": 18.77,
"learning_rate": 2.5e-07,
"loss": 1.2315,
"step": 610
},
{
"epoch": 18.8,
"learning_rate": 2.5e-07,
"loss": 1.1637,
"step": 611
},
{
"epoch": 18.83,
"learning_rate": 2.5e-07,
"loss": 1.2978,
"step": 612
},
{
"epoch": 18.86,
"learning_rate": 2.5e-07,
"loss": 1.2103,
"step": 613
},
{
"epoch": 18.89,
"learning_rate": 2.5e-07,
"loss": 1.3145,
"step": 614
},
{
"epoch": 18.92,
"learning_rate": 2.5e-07,
"loss": 1.2807,
"step": 615
},
{
"epoch": 18.95,
"learning_rate": 2.5e-07,
"loss": 1.2668,
"step": 616
},
{
"epoch": 18.98,
"learning_rate": 2.5e-07,
"loss": 1.2986,
"step": 617
},
{
"epoch": 19.02,
"learning_rate": 2.5e-07,
"loss": 1.1996,
"step": 618
},
{
"epoch": 19.05,
"learning_rate": 2.5e-07,
"loss": 0.9905,
"step": 619
},
{
"epoch": 19.08,
"learning_rate": 2.5e-07,
"loss": 1.3338,
"step": 620
},
{
"epoch": 19.11,
"learning_rate": 2.5e-07,
"loss": 1.0955,
"step": 621
},
{
"epoch": 19.14,
"learning_rate": 2.5e-07,
"loss": 1.1721,
"step": 622
},
{
"epoch": 19.17,
"learning_rate": 2.5e-07,
"loss": 1.2088,
"step": 623
},
{
"epoch": 19.2,
"learning_rate": 2.5e-07,
"loss": 1.3623,
"step": 624
},
{
"epoch": 19.23,
"learning_rate": 2.5e-07,
"loss": 1.0578,
"step": 625
},
{
"epoch": 19.26,
"learning_rate": 2.5e-07,
"loss": 1.0765,
"step": 626
},
{
"epoch": 19.29,
"learning_rate": 2.5e-07,
"loss": 1.3624,
"step": 627
},
{
"epoch": 19.32,
"learning_rate": 2.5e-07,
"loss": 1.1778,
"step": 628
},
{
"epoch": 19.35,
"learning_rate": 2.5e-07,
"loss": 1.2267,
"step": 629
},
{
"epoch": 19.38,
"learning_rate": 2.5e-07,
"loss": 1.2311,
"step": 630
},
{
"epoch": 19.42,
"learning_rate": 2.5e-07,
"loss": 1.3226,
"step": 631
},
{
"epoch": 19.45,
"learning_rate": 2.5e-07,
"loss": 1.2294,
"step": 632
},
{
"epoch": 19.48,
"learning_rate": 2.5e-07,
"loss": 1.0192,
"step": 633
},
{
"epoch": 19.51,
"learning_rate": 2.5e-07,
"loss": 1.2559,
"step": 634
},
{
"epoch": 19.54,
"learning_rate": 2.5e-07,
"loss": 1.2497,
"step": 635
},
{
"epoch": 19.57,
"learning_rate": 2.5e-07,
"loss": 1.4026,
"step": 636
},
{
"epoch": 19.6,
"learning_rate": 2.5e-07,
"loss": 1.2551,
"step": 637
},
{
"epoch": 19.63,
"learning_rate": 2.5e-07,
"loss": 1.2695,
"step": 638
},
{
"epoch": 19.66,
"learning_rate": 2.5e-07,
"loss": 1.2047,
"step": 639
},
{
"epoch": 19.69,
"learning_rate": 2.5e-07,
"loss": 1.1506,
"step": 640
},
{
"epoch": 19.69,
"eval_loss": 1.161481261253357,
"eval_runtime": 1.7425,
"eval_samples_per_second": 4.017,
"eval_steps_per_second": 2.296,
"step": 640
},
{
"epoch": 19.72,
"learning_rate": 2.5e-07,
"loss": 1.2597,
"step": 641
},
{
"epoch": 19.75,
"learning_rate": 2.5e-07,
"loss": 1.1908,
"step": 642
},
{
"epoch": 19.78,
"learning_rate": 2.5e-07,
"loss": 1.2338,
"step": 643
},
{
"epoch": 19.82,
"learning_rate": 2.5e-07,
"loss": 1.31,
"step": 644
},
{
"epoch": 19.85,
"learning_rate": 2.5e-07,
"loss": 1.0713,
"step": 645
},
{
"epoch": 19.88,
"learning_rate": 2.5e-07,
"loss": 1.2727,
"step": 646
},
{
"epoch": 19.91,
"learning_rate": 2.5e-07,
"loss": 1.2029,
"step": 647
},
{
"epoch": 19.94,
"learning_rate": 2.5e-07,
"loss": 1.3007,
"step": 648
},
{
"epoch": 19.97,
"learning_rate": 2.5e-07,
"loss": 1.3173,
"step": 649
},
{
"epoch": 20.0,
"learning_rate": 2.5e-07,
"loss": 1.2024,
"step": 650
},
{
"epoch": 20.03,
"learning_rate": 2.5e-07,
"loss": 1.1753,
"step": 651
},
{
"epoch": 20.06,
"learning_rate": 2.5e-07,
"loss": 1.273,
"step": 652
},
{
"epoch": 20.09,
"learning_rate": 2.5e-07,
"loss": 1.2252,
"step": 653
},
{
"epoch": 20.12,
"learning_rate": 2.5e-07,
"loss": 1.2582,
"step": 654
},
{
"epoch": 20.15,
"learning_rate": 2.5e-07,
"loss": 1.2634,
"step": 655
},
{
"epoch": 20.18,
"learning_rate": 2.5e-07,
"loss": 1.2168,
"step": 656
},
{
"epoch": 20.22,
"learning_rate": 2.5e-07,
"loss": 1.2868,
"step": 657
},
{
"epoch": 20.25,
"learning_rate": 2.5e-07,
"loss": 1.2229,
"step": 658
},
{
"epoch": 20.28,
"learning_rate": 2.5e-07,
"loss": 1.0799,
"step": 659
},
{
"epoch": 20.31,
"learning_rate": 2.5e-07,
"loss": 1.187,
"step": 660
},
{
"epoch": 20.34,
"learning_rate": 2.5e-07,
"loss": 1.3681,
"step": 661
},
{
"epoch": 20.37,
"learning_rate": 2.5e-07,
"loss": 1.1951,
"step": 662
},
{
"epoch": 20.4,
"learning_rate": 2.5e-07,
"loss": 1.0643,
"step": 663
},
{
"epoch": 20.43,
"learning_rate": 2.5e-07,
"loss": 1.2073,
"step": 664
},
{
"epoch": 20.46,
"learning_rate": 2.5e-07,
"loss": 1.2797,
"step": 665
},
{
"epoch": 20.49,
"learning_rate": 2.5e-07,
"loss": 1.3999,
"step": 666
},
{
"epoch": 20.52,
"learning_rate": 2.5e-07,
"loss": 1.2501,
"step": 667
},
{
"epoch": 20.55,
"learning_rate": 2.5e-07,
"loss": 1.2163,
"step": 668
},
{
"epoch": 20.58,
"learning_rate": 2.5e-07,
"loss": 1.3067,
"step": 669
},
{
"epoch": 20.62,
"learning_rate": 2.5e-07,
"loss": 1.1854,
"step": 670
},
{
"epoch": 20.65,
"learning_rate": 2.5e-07,
"loss": 1.1188,
"step": 671
},
{
"epoch": 20.68,
"learning_rate": 2.5e-07,
"loss": 1.2344,
"step": 672
},
{
"epoch": 20.71,
"learning_rate": 2.5e-07,
"loss": 1.2431,
"step": 673
},
{
"epoch": 20.74,
"learning_rate": 2.5e-07,
"loss": 1.1848,
"step": 674
},
{
"epoch": 20.77,
"learning_rate": 2.5e-07,
"loss": 1.3042,
"step": 675
},
{
"epoch": 20.8,
"learning_rate": 2.5e-07,
"loss": 1.121,
"step": 676
},
{
"epoch": 20.83,
"learning_rate": 2.5e-07,
"loss": 1.1777,
"step": 677
},
{
"epoch": 20.86,
"learning_rate": 2.5e-07,
"loss": 1.2183,
"step": 678
},
{
"epoch": 20.89,
"learning_rate": 2.5e-07,
"loss": 1.1327,
"step": 679
},
{
"epoch": 20.92,
"learning_rate": 2.5e-07,
"loss": 1.1136,
"step": 680
},
{
"epoch": 20.95,
"learning_rate": 2.5e-07,
"loss": 1.2761,
"step": 681
},
{
"epoch": 20.98,
"learning_rate": 2.5e-07,
"loss": 1.079,
"step": 682
},
{
"epoch": 21.02,
"learning_rate": 2.5e-07,
"loss": 1.2263,
"step": 683
},
{
"epoch": 21.05,
"learning_rate": 2.5e-07,
"loss": 1.2507,
"step": 684
},
{
"epoch": 21.08,
"learning_rate": 2.5e-07,
"loss": 1.1574,
"step": 685
},
{
"epoch": 21.11,
"learning_rate": 2.5e-07,
"loss": 1.0829,
"step": 686
},
{
"epoch": 21.14,
"learning_rate": 2.5e-07,
"loss": 1.2645,
"step": 687
},
{
"epoch": 21.17,
"learning_rate": 2.5e-07,
"loss": 1.2632,
"step": 688
},
{
"epoch": 21.2,
"learning_rate": 2.5e-07,
"loss": 1.2332,
"step": 689
},
{
"epoch": 21.23,
"learning_rate": 2.5e-07,
"loss": 1.2069,
"step": 690
},
{
"epoch": 21.26,
"learning_rate": 2.5e-07,
"loss": 1.2087,
"step": 691
},
{
"epoch": 21.29,
"learning_rate": 2.5e-07,
"loss": 1.1768,
"step": 692
},
{
"epoch": 21.32,
"learning_rate": 2.5e-07,
"loss": 1.0126,
"step": 693
},
{
"epoch": 21.35,
"learning_rate": 2.5e-07,
"loss": 1.3168,
"step": 694
},
{
"epoch": 21.38,
"learning_rate": 2.5e-07,
"loss": 1.0629,
"step": 695
},
{
"epoch": 21.42,
"learning_rate": 2.5e-07,
"loss": 1.1156,
"step": 696
},
{
"epoch": 21.45,
"learning_rate": 2.5e-07,
"loss": 1.2308,
"step": 697
},
{
"epoch": 21.48,
"learning_rate": 2.5e-07,
"loss": 1.2688,
"step": 698
},
{
"epoch": 21.51,
"learning_rate": 2.5e-07,
"loss": 1.1114,
"step": 699
},
{
"epoch": 21.54,
"learning_rate": 2.5e-07,
"loss": 1.0432,
"step": 700
},
{
"epoch": 21.57,
"learning_rate": 2.5e-07,
"loss": 1.2938,
"step": 701
},
{
"epoch": 21.6,
"learning_rate": 2.5e-07,
"loss": 1.2673,
"step": 702
},
{
"epoch": 21.63,
"learning_rate": 2.5e-07,
"loss": 1.2795,
"step": 703
},
{
"epoch": 21.66,
"learning_rate": 2.5e-07,
"loss": 1.1992,
"step": 704
},
{
"epoch": 21.69,
"learning_rate": 2.5e-07,
"loss": 1.2114,
"step": 705
},
{
"epoch": 21.72,
"learning_rate": 2.5e-07,
"loss": 1.1559,
"step": 706
},
{
"epoch": 21.75,
"learning_rate": 2.5e-07,
"loss": 1.1923,
"step": 707
},
{
"epoch": 21.78,
"learning_rate": 2.5e-07,
"loss": 1.135,
"step": 708
},
{
"epoch": 21.82,
"learning_rate": 2.5e-07,
"loss": 1.1937,
"step": 709
},
{
"epoch": 21.85,
"learning_rate": 2.5e-07,
"loss": 1.3328,
"step": 710
},
{
"epoch": 21.88,
"learning_rate": 2.5e-07,
"loss": 1.2513,
"step": 711
},
{
"epoch": 21.91,
"learning_rate": 2.5e-07,
"loss": 1.197,
"step": 712
},
{
"epoch": 21.94,
"learning_rate": 2.5e-07,
"loss": 1.0483,
"step": 713
},
{
"epoch": 21.97,
"learning_rate": 2.5e-07,
"loss": 1.3072,
"step": 714
},
{
"epoch": 22.0,
"learning_rate": 2.5e-07,
"loss": 1.2929,
"step": 715
},
{
"epoch": 22.03,
"learning_rate": 2.5e-07,
"loss": 1.2504,
"step": 716
},
{
"epoch": 22.06,
"learning_rate": 2.5e-07,
"loss": 1.0944,
"step": 717
},
{
"epoch": 22.09,
"learning_rate": 2.5e-07,
"loss": 1.2897,
"step": 718
},
{
"epoch": 22.12,
"learning_rate": 2.5e-07,
"loss": 1.1668,
"step": 719
},
{
"epoch": 22.15,
"learning_rate": 2.5e-07,
"loss": 1.1829,
"step": 720
},
{
"epoch": 22.15,
"eval_loss": 1.151276707649231,
"eval_runtime": 1.757,
"eval_samples_per_second": 3.984,
"eval_steps_per_second": 2.277,
"step": 720
},
{
"epoch": 22.18,
"learning_rate": 2.5e-07,
"loss": 1.1646,
"step": 721
},
{
"epoch": 22.22,
"learning_rate": 2.5e-07,
"loss": 1.2432,
"step": 722
},
{
"epoch": 22.25,
"learning_rate": 2.5e-07,
"loss": 1.3598,
"step": 723
},
{
"epoch": 22.28,
"learning_rate": 2.5e-07,
"loss": 0.9391,
"step": 724
},
{
"epoch": 22.31,
"learning_rate": 2.5e-07,
"loss": 1.2974,
"step": 725
},
{
"epoch": 22.34,
"learning_rate": 2.5e-07,
"loss": 1.2225,
"step": 726
},
{
"epoch": 22.37,
"learning_rate": 2.5e-07,
"loss": 1.2099,
"step": 727
},
{
"epoch": 22.4,
"learning_rate": 2.5e-07,
"loss": 1.1805,
"step": 728
},
{
"epoch": 22.43,
"learning_rate": 2.5e-07,
"loss": 1.1195,
"step": 729
},
{
"epoch": 22.46,
"learning_rate": 2.5e-07,
"loss": 1.1393,
"step": 730
},
{
"epoch": 22.49,
"learning_rate": 2.5e-07,
"loss": 1.0755,
"step": 731
},
{
"epoch": 22.52,
"learning_rate": 2.5e-07,
"loss": 1.299,
"step": 732
},
{
"epoch": 22.55,
"learning_rate": 2.5e-07,
"loss": 1.2319,
"step": 733
},
{
"epoch": 22.58,
"learning_rate": 2.5e-07,
"loss": 1.1846,
"step": 734
},
{
"epoch": 22.62,
"learning_rate": 2.5e-07,
"loss": 1.3545,
"step": 735
},
{
"epoch": 22.65,
"learning_rate": 2.5e-07,
"loss": 1.1044,
"step": 736
},
{
"epoch": 22.68,
"learning_rate": 2.5e-07,
"loss": 1.1402,
"step": 737
},
{
"epoch": 22.71,
"learning_rate": 2.5e-07,
"loss": 1.2412,
"step": 738
},
{
"epoch": 22.74,
"learning_rate": 2.5e-07,
"loss": 1.2066,
"step": 739
},
{
"epoch": 22.77,
"learning_rate": 2.5e-07,
"loss": 1.243,
"step": 740
},
{
"epoch": 22.8,
"learning_rate": 2.5e-07,
"loss": 1.037,
"step": 741
},
{
"epoch": 22.83,
"learning_rate": 2.5e-07,
"loss": 1.1585,
"step": 742
},
{
"epoch": 22.86,
"learning_rate": 2.5e-07,
"loss": 1.2563,
"step": 743
},
{
"epoch": 22.89,
"learning_rate": 2.5e-07,
"loss": 1.2223,
"step": 744
},
{
"epoch": 22.92,
"learning_rate": 2.5e-07,
"loss": 1.1915,
"step": 745
},
{
"epoch": 22.95,
"learning_rate": 2.5e-07,
"loss": 1.2192,
"step": 746
},
{
"epoch": 22.98,
"learning_rate": 2.5e-07,
"loss": 1.0116,
"step": 747
},
{
"epoch": 23.02,
"learning_rate": 2.5e-07,
"loss": 1.3601,
"step": 748
},
{
"epoch": 23.05,
"learning_rate": 2.5e-07,
"loss": 0.9849,
"step": 749
},
{
"epoch": 23.08,
"learning_rate": 2.5e-07,
"loss": 1.238,
"step": 750
},
{
"epoch": 23.11,
"learning_rate": 2.5e-07,
"loss": 1.1157,
"step": 751
},
{
"epoch": 23.14,
"learning_rate": 2.5e-07,
"loss": 1.1531,
"step": 752
},
{
"epoch": 23.17,
"learning_rate": 2.5e-07,
"loss": 1.2322,
"step": 753
},
{
"epoch": 23.2,
"learning_rate": 2.5e-07,
"loss": 1.1656,
"step": 754
},
{
"epoch": 23.23,
"learning_rate": 2.5e-07,
"loss": 1.1579,
"step": 755
},
{
"epoch": 23.26,
"learning_rate": 2.5e-07,
"loss": 1.1663,
"step": 756
},
{
"epoch": 23.29,
"learning_rate": 2.5e-07,
"loss": 1.2587,
"step": 757
},
{
"epoch": 23.32,
"learning_rate": 2.5e-07,
"loss": 1.2768,
"step": 758
},
{
"epoch": 23.35,
"learning_rate": 2.5e-07,
"loss": 1.1416,
"step": 759
},
{
"epoch": 23.38,
"learning_rate": 2.5e-07,
"loss": 1.2223,
"step": 760
},
{
"epoch": 23.42,
"learning_rate": 2.5e-07,
"loss": 1.2657,
"step": 761
},
{
"epoch": 23.45,
"learning_rate": 2.5e-07,
"loss": 1.0958,
"step": 762
},
{
"epoch": 23.48,
"learning_rate": 2.5e-07,
"loss": 1.1521,
"step": 763
},
{
"epoch": 23.51,
"learning_rate": 2.5e-07,
"loss": 1.0966,
"step": 764
},
{
"epoch": 23.54,
"learning_rate": 2.5e-07,
"loss": 1.1623,
"step": 765
},
{
"epoch": 23.57,
"learning_rate": 2.5e-07,
"loss": 1.2151,
"step": 766
},
{
"epoch": 23.6,
"learning_rate": 2.5e-07,
"loss": 1.163,
"step": 767
},
{
"epoch": 23.63,
"learning_rate": 2.5e-07,
"loss": 1.1975,
"step": 768
},
{
"epoch": 23.66,
"learning_rate": 2.5e-07,
"loss": 1.3573,
"step": 769
},
{
"epoch": 23.69,
"learning_rate": 2.5e-07,
"loss": 1.2025,
"step": 770
},
{
"epoch": 23.72,
"learning_rate": 2.5e-07,
"loss": 1.2213,
"step": 771
},
{
"epoch": 23.75,
"learning_rate": 2.5e-07,
"loss": 1.3908,
"step": 772
},
{
"epoch": 23.78,
"learning_rate": 2.5e-07,
"loss": 1.2604,
"step": 773
},
{
"epoch": 23.82,
"learning_rate": 2.5e-07,
"loss": 1.1152,
"step": 774
},
{
"epoch": 23.85,
"learning_rate": 2.5e-07,
"loss": 1.2275,
"step": 775
},
{
"epoch": 23.88,
"learning_rate": 2.5e-07,
"loss": 1.0982,
"step": 776
},
{
"epoch": 23.91,
"learning_rate": 2.5e-07,
"loss": 1.2497,
"step": 777
},
{
"epoch": 23.94,
"learning_rate": 2.5e-07,
"loss": 1.2903,
"step": 778
},
{
"epoch": 23.97,
"learning_rate": 2.5e-07,
"loss": 1.3096,
"step": 779
},
{
"epoch": 24.0,
"learning_rate": 2.5e-07,
"loss": 1.2009,
"step": 780
},
{
"epoch": 24.03,
"learning_rate": 2.5e-07,
"loss": 1.2357,
"step": 781
},
{
"epoch": 24.06,
"learning_rate": 2.5e-07,
"loss": 1.2579,
"step": 782
},
{
"epoch": 24.09,
"learning_rate": 2.5e-07,
"loss": 1.2434,
"step": 783
},
{
"epoch": 24.12,
"learning_rate": 2.5e-07,
"loss": 1.198,
"step": 784
},
{
"epoch": 24.15,
"learning_rate": 2.5e-07,
"loss": 1.1289,
"step": 785
},
{
"epoch": 24.18,
"learning_rate": 2.5e-07,
"loss": 1.2248,
"step": 786
},
{
"epoch": 24.22,
"learning_rate": 2.5e-07,
"loss": 1.2352,
"step": 787
},
{
"epoch": 24.25,
"learning_rate": 2.5e-07,
"loss": 1.1982,
"step": 788
},
{
"epoch": 24.28,
"learning_rate": 2.5e-07,
"loss": 1.2262,
"step": 789
},
{
"epoch": 24.31,
"learning_rate": 2.5e-07,
"loss": 1.2906,
"step": 790
},
{
"epoch": 24.34,
"learning_rate": 2.5e-07,
"loss": 1.1845,
"step": 791
},
{
"epoch": 24.37,
"learning_rate": 2.5e-07,
"loss": 1.2187,
"step": 792
},
{
"epoch": 24.4,
"learning_rate": 2.5e-07,
"loss": 1.1024,
"step": 793
},
{
"epoch": 24.43,
"learning_rate": 2.5e-07,
"loss": 1.3153,
"step": 794
},
{
"epoch": 24.46,
"learning_rate": 2.5e-07,
"loss": 1.0233,
"step": 795
},
{
"epoch": 24.49,
"learning_rate": 2.5e-07,
"loss": 1.1543,
"step": 796
},
{
"epoch": 24.52,
"learning_rate": 2.5e-07,
"loss": 1.1003,
"step": 797
},
{
"epoch": 24.55,
"learning_rate": 2.5e-07,
"loss": 1.212,
"step": 798
},
{
"epoch": 24.58,
"learning_rate": 2.5e-07,
"loss": 1.1479,
"step": 799
},
{
"epoch": 24.62,
"learning_rate": 2.5e-07,
"loss": 1.267,
"step": 800
},
{
"epoch": 24.62,
"eval_loss": 1.1453821659088135,
"eval_runtime": 1.7358,
"eval_samples_per_second": 4.033,
"eval_steps_per_second": 2.304,
"step": 800
},
{
"epoch": 24.65,
"learning_rate": 2.5e-07,
"loss": 1.0811,
"step": 801
},
{
"epoch": 24.68,
"learning_rate": 2.5e-07,
"loss": 1.318,
"step": 802
},
{
"epoch": 24.71,
"learning_rate": 2.5e-07,
"loss": 1.2086,
"step": 803
},
{
"epoch": 24.74,
"learning_rate": 2.5e-07,
"loss": 1.288,
"step": 804
},
{
"epoch": 24.77,
"learning_rate": 2.5e-07,
"loss": 1.1407,
"step": 805
},
{
"epoch": 24.8,
"learning_rate": 2.5e-07,
"loss": 0.9671,
"step": 806
},
{
"epoch": 24.83,
"learning_rate": 2.5e-07,
"loss": 1.1035,
"step": 807
},
{
"epoch": 24.86,
"learning_rate": 2.5e-07,
"loss": 1.0981,
"step": 808
},
{
"epoch": 24.89,
"learning_rate": 2.5e-07,
"loss": 1.3622,
"step": 809
},
{
"epoch": 24.92,
"learning_rate": 2.5e-07,
"loss": 1.2952,
"step": 810
},
{
"epoch": 24.95,
"learning_rate": 2.5e-07,
"loss": 1.3048,
"step": 811
},
{
"epoch": 24.98,
"learning_rate": 2.5e-07,
"loss": 1.258,
"step": 812
},
{
"epoch": 25.02,
"learning_rate": 2.5e-07,
"loss": 1.0539,
"step": 813
},
{
"epoch": 25.05,
"learning_rate": 2.5e-07,
"loss": 1.2292,
"step": 814
},
{
"epoch": 25.08,
"learning_rate": 2.5e-07,
"loss": 1.174,
"step": 815
},
{
"epoch": 25.11,
"learning_rate": 2.5e-07,
"loss": 1.2923,
"step": 816
},
{
"epoch": 25.14,
"learning_rate": 2.5e-07,
"loss": 1.0353,
"step": 817
},
{
"epoch": 25.17,
"learning_rate": 2.5e-07,
"loss": 1.2647,
"step": 818
},
{
"epoch": 25.2,
"learning_rate": 2.5e-07,
"loss": 1.2257,
"step": 819
},
{
"epoch": 25.23,
"learning_rate": 2.5e-07,
"loss": 1.125,
"step": 820
},
{
"epoch": 25.26,
"learning_rate": 2.5e-07,
"loss": 1.2659,
"step": 821
},
{
"epoch": 25.29,
"learning_rate": 2.5e-07,
"loss": 1.0275,
"step": 822
},
{
"epoch": 25.32,
"learning_rate": 2.5e-07,
"loss": 1.2631,
"step": 823
},
{
"epoch": 25.35,
"learning_rate": 2.5e-07,
"loss": 1.1905,
"step": 824
},
{
"epoch": 25.38,
"learning_rate": 2.5e-07,
"loss": 1.1871,
"step": 825
},
{
"epoch": 25.42,
"learning_rate": 2.5e-07,
"loss": 1.0288,
"step": 826
},
{
"epoch": 25.45,
"learning_rate": 2.5e-07,
"loss": 1.0998,
"step": 827
},
{
"epoch": 25.48,
"learning_rate": 2.5e-07,
"loss": 1.1128,
"step": 828
},
{
"epoch": 25.51,
"learning_rate": 2.5e-07,
"loss": 1.1058,
"step": 829
},
{
"epoch": 25.54,
"learning_rate": 2.5e-07,
"loss": 0.9362,
"step": 830
},
{
"epoch": 25.57,
"learning_rate": 2.5e-07,
"loss": 1.1587,
"step": 831
},
{
"epoch": 25.6,
"learning_rate": 2.5e-07,
"loss": 1.1345,
"step": 832
},
{
"epoch": 25.63,
"learning_rate": 2.5e-07,
"loss": 1.269,
"step": 833
},
{
"epoch": 25.66,
"learning_rate": 2.5e-07,
"loss": 1.1619,
"step": 834
},
{
"epoch": 25.69,
"learning_rate": 2.5e-07,
"loss": 1.2199,
"step": 835
},
{
"epoch": 25.72,
"learning_rate": 2.5e-07,
"loss": 1.2374,
"step": 836
},
{
"epoch": 25.75,
"learning_rate": 2.5e-07,
"loss": 1.2568,
"step": 837
},
{
"epoch": 25.78,
"learning_rate": 2.5e-07,
"loss": 1.1374,
"step": 838
},
{
"epoch": 25.82,
"learning_rate": 2.5e-07,
"loss": 1.32,
"step": 839
},
{
"epoch": 25.85,
"learning_rate": 2.5e-07,
"loss": 1.2681,
"step": 840
},
{
"epoch": 25.88,
"learning_rate": 2.5e-07,
"loss": 1.0749,
"step": 841
},
{
"epoch": 25.91,
"learning_rate": 2.5e-07,
"loss": 1.325,
"step": 842
},
{
"epoch": 25.94,
"learning_rate": 2.5e-07,
"loss": 1.2718,
"step": 843
},
{
"epoch": 25.97,
"learning_rate": 2.5e-07,
"loss": 1.2822,
"step": 844
},
{
"epoch": 26.0,
"learning_rate": 2.5e-07,
"loss": 1.2542,
"step": 845
},
{
"epoch": 26.03,
"learning_rate": 2.5e-07,
"loss": 1.094,
"step": 846
},
{
"epoch": 26.06,
"learning_rate": 2.5e-07,
"loss": 1.1741,
"step": 847
},
{
"epoch": 26.09,
"learning_rate": 2.5e-07,
"loss": 0.9948,
"step": 848
},
{
"epoch": 26.12,
"learning_rate": 2.5e-07,
"loss": 0.9381,
"step": 849
},
{
"epoch": 26.15,
"learning_rate": 2.5e-07,
"loss": 1.2547,
"step": 850
},
{
"epoch": 26.18,
"learning_rate": 2.5e-07,
"loss": 1.3896,
"step": 851
},
{
"epoch": 26.22,
"learning_rate": 2.5e-07,
"loss": 1.2587,
"step": 852
},
{
"epoch": 26.25,
"learning_rate": 2.5e-07,
"loss": 1.2572,
"step": 853
},
{
"epoch": 26.28,
"learning_rate": 2.5e-07,
"loss": 1.1927,
"step": 854
},
{
"epoch": 26.31,
"learning_rate": 2.5e-07,
"loss": 0.9622,
"step": 855
},
{
"epoch": 26.34,
"learning_rate": 2.5e-07,
"loss": 1.192,
"step": 856
},
{
"epoch": 26.37,
"learning_rate": 2.5e-07,
"loss": 0.9601,
"step": 857
},
{
"epoch": 26.4,
"learning_rate": 2.5e-07,
"loss": 1.1648,
"step": 858
},
{
"epoch": 26.43,
"learning_rate": 2.5e-07,
"loss": 1.2387,
"step": 859
},
{
"epoch": 26.46,
"learning_rate": 2.5e-07,
"loss": 1.2444,
"step": 860
},
{
"epoch": 26.49,
"learning_rate": 2.5e-07,
"loss": 1.1718,
"step": 861
},
{
"epoch": 26.52,
"learning_rate": 2.5e-07,
"loss": 1.3051,
"step": 862
},
{
"epoch": 26.55,
"learning_rate": 2.5e-07,
"loss": 1.3384,
"step": 863
},
{
"epoch": 26.58,
"learning_rate": 2.5e-07,
"loss": 1.1536,
"step": 864
},
{
"epoch": 26.62,
"learning_rate": 2.5e-07,
"loss": 1.1541,
"step": 865
},
{
"epoch": 26.65,
"learning_rate": 2.5e-07,
"loss": 1.2604,
"step": 866
},
{
"epoch": 26.68,
"learning_rate": 2.5e-07,
"loss": 1.1687,
"step": 867
},
{
"epoch": 26.71,
"learning_rate": 2.5e-07,
"loss": 1.0095,
"step": 868
},
{
"epoch": 26.74,
"learning_rate": 2.5e-07,
"loss": 1.2004,
"step": 869
},
{
"epoch": 26.77,
"learning_rate": 2.5e-07,
"loss": 1.3289,
"step": 870
},
{
"epoch": 26.8,
"learning_rate": 2.5e-07,
"loss": 1.2065,
"step": 871
},
{
"epoch": 26.83,
"learning_rate": 2.5e-07,
"loss": 1.1295,
"step": 872
},
{
"epoch": 26.86,
"learning_rate": 2.5e-07,
"loss": 1.2577,
"step": 873
},
{
"epoch": 26.89,
"learning_rate": 2.5e-07,
"loss": 1.1807,
"step": 874
},
{
"epoch": 26.92,
"learning_rate": 2.5e-07,
"loss": 1.1881,
"step": 875
},
{
"epoch": 26.95,
"learning_rate": 2.5e-07,
"loss": 1.2618,
"step": 876
},
{
"epoch": 26.98,
"learning_rate": 2.5e-07,
"loss": 1.3067,
"step": 877
},
{
"epoch": 27.02,
"learning_rate": 2.5e-07,
"loss": 0.8701,
"step": 878
},
{
"epoch": 27.05,
"learning_rate": 2.5e-07,
"loss": 1.1478,
"step": 879
},
{
"epoch": 27.08,
"learning_rate": 2.5e-07,
"loss": 1.0857,
"step": 880
},
{
"epoch": 27.08,
"eval_loss": 1.1367146968841553,
"eval_runtime": 1.7401,
"eval_samples_per_second": 4.023,
"eval_steps_per_second": 2.299,
"step": 880
},
{
"epoch": 27.11,
"learning_rate": 2.5e-07,
"loss": 1.2706,
"step": 881
},
{
"epoch": 27.14,
"learning_rate": 2.5e-07,
"loss": 1.151,
"step": 882
},
{
"epoch": 27.17,
"learning_rate": 2.5e-07,
"loss": 1.2352,
"step": 883
},
{
"epoch": 27.2,
"learning_rate": 2.5e-07,
"loss": 1.0755,
"step": 884
},
{
"epoch": 27.23,
"learning_rate": 2.5e-07,
"loss": 0.9987,
"step": 885
},
{
"epoch": 27.26,
"learning_rate": 2.5e-07,
"loss": 1.3896,
"step": 886
},
{
"epoch": 27.29,
"learning_rate": 2.5e-07,
"loss": 1.047,
"step": 887
},
{
"epoch": 27.32,
"learning_rate": 2.5e-07,
"loss": 1.2163,
"step": 888
},
{
"epoch": 27.35,
"learning_rate": 2.5e-07,
"loss": 1.1826,
"step": 889
},
{
"epoch": 27.38,
"learning_rate": 2.5e-07,
"loss": 1.0939,
"step": 890
},
{
"epoch": 27.42,
"learning_rate": 2.5e-07,
"loss": 1.0561,
"step": 891
},
{
"epoch": 27.45,
"learning_rate": 2.5e-07,
"loss": 1.1888,
"step": 892
},
{
"epoch": 27.48,
"learning_rate": 2.5e-07,
"loss": 1.2385,
"step": 893
},
{
"epoch": 27.51,
"learning_rate": 2.5e-07,
"loss": 1.1719,
"step": 894
},
{
"epoch": 27.54,
"learning_rate": 2.5e-07,
"loss": 1.2768,
"step": 895
},
{
"epoch": 27.57,
"learning_rate": 2.5e-07,
"loss": 1.2474,
"step": 896
},
{
"epoch": 27.6,
"learning_rate": 2.5e-07,
"loss": 1.3374,
"step": 897
},
{
"epoch": 27.63,
"learning_rate": 2.5e-07,
"loss": 1.2982,
"step": 898
},
{
"epoch": 27.66,
"learning_rate": 2.5e-07,
"loss": 1.0727,
"step": 899
},
{
"epoch": 27.69,
"learning_rate": 2.5e-07,
"loss": 1.2495,
"step": 900
},
{
"epoch": 27.72,
"learning_rate": 2.5e-07,
"loss": 1.1306,
"step": 901
},
{
"epoch": 27.75,
"learning_rate": 2.5e-07,
"loss": 1.2784,
"step": 902
},
{
"epoch": 27.78,
"learning_rate": 2.5e-07,
"loss": 1.1225,
"step": 903
},
{
"epoch": 27.82,
"learning_rate": 2.5e-07,
"loss": 1.2542,
"step": 904
},
{
"epoch": 27.85,
"learning_rate": 2.5e-07,
"loss": 1.1575,
"step": 905
},
{
"epoch": 27.88,
"learning_rate": 2.5e-07,
"loss": 1.1425,
"step": 906
},
{
"epoch": 27.91,
"learning_rate": 2.5e-07,
"loss": 1.1282,
"step": 907
},
{
"epoch": 27.94,
"learning_rate": 2.5e-07,
"loss": 1.223,
"step": 908
},
{
"epoch": 27.97,
"learning_rate": 2.5e-07,
"loss": 1.1326,
"step": 909
},
{
"epoch": 28.0,
"learning_rate": 2.5e-07,
"loss": 1.2255,
"step": 910
},
{
"epoch": 28.03,
"learning_rate": 2.5e-07,
"loss": 1.1362,
"step": 911
},
{
"epoch": 28.06,
"learning_rate": 2.5e-07,
"loss": 1.1001,
"step": 912
},
{
"epoch": 28.09,
"learning_rate": 2.5e-07,
"loss": 1.2152,
"step": 913
},
{
"epoch": 28.12,
"learning_rate": 2.5e-07,
"loss": 1.1168,
"step": 914
},
{
"epoch": 28.15,
"learning_rate": 2.5e-07,
"loss": 1.2479,
"step": 915
},
{
"epoch": 28.18,
"learning_rate": 2.5e-07,
"loss": 1.1956,
"step": 916
},
{
"epoch": 28.22,
"learning_rate": 2.5e-07,
"loss": 1.1865,
"step": 917
},
{
"epoch": 28.25,
"learning_rate": 2.5e-07,
"loss": 1.1982,
"step": 918
},
{
"epoch": 28.28,
"learning_rate": 2.5e-07,
"loss": 1.0791,
"step": 919
},
{
"epoch": 28.31,
"learning_rate": 2.5e-07,
"loss": 1.1691,
"step": 920
},
{
"epoch": 28.34,
"learning_rate": 2.5e-07,
"loss": 1.2283,
"step": 921
},
{
"epoch": 28.37,
"learning_rate": 2.5e-07,
"loss": 1.2687,
"step": 922
},
{
"epoch": 28.4,
"learning_rate": 2.5e-07,
"loss": 0.9531,
"step": 923
},
{
"epoch": 28.43,
"learning_rate": 2.5e-07,
"loss": 1.0041,
"step": 924
},
{
"epoch": 28.46,
"learning_rate": 2.5e-07,
"loss": 0.9897,
"step": 925
},
{
"epoch": 28.49,
"learning_rate": 2.5e-07,
"loss": 1.1019,
"step": 926
},
{
"epoch": 28.52,
"learning_rate": 2.5e-07,
"loss": 1.2045,
"step": 927
},
{
"epoch": 28.55,
"learning_rate": 2.5e-07,
"loss": 1.1838,
"step": 928
},
{
"epoch": 28.58,
"learning_rate": 2.5e-07,
"loss": 1.1894,
"step": 929
},
{
"epoch": 28.62,
"learning_rate": 2.5e-07,
"loss": 1.1054,
"step": 930
},
{
"epoch": 28.65,
"learning_rate": 2.5e-07,
"loss": 1.3175,
"step": 931
},
{
"epoch": 28.68,
"learning_rate": 2.5e-07,
"loss": 1.2877,
"step": 932
},
{
"epoch": 28.71,
"learning_rate": 2.5e-07,
"loss": 1.2615,
"step": 933
},
{
"epoch": 28.74,
"learning_rate": 2.5e-07,
"loss": 1.1079,
"step": 934
},
{
"epoch": 28.77,
"learning_rate": 2.5e-07,
"loss": 1.264,
"step": 935
},
{
"epoch": 28.8,
"learning_rate": 2.5e-07,
"loss": 1.3003,
"step": 936
},
{
"epoch": 28.83,
"learning_rate": 2.5e-07,
"loss": 1.0865,
"step": 937
},
{
"epoch": 28.86,
"learning_rate": 2.5e-07,
"loss": 1.1714,
"step": 938
},
{
"epoch": 28.89,
"learning_rate": 2.5e-07,
"loss": 1.0339,
"step": 939
},
{
"epoch": 28.92,
"learning_rate": 2.5e-07,
"loss": 1.3118,
"step": 940
},
{
"epoch": 28.95,
"learning_rate": 2.5e-07,
"loss": 1.2253,
"step": 941
},
{
"epoch": 28.98,
"learning_rate": 2.5e-07,
"loss": 1.1665,
"step": 942
},
{
"epoch": 29.02,
"learning_rate": 2.5e-07,
"loss": 1.3285,
"step": 943
},
{
"epoch": 29.05,
"learning_rate": 2.5e-07,
"loss": 1.2553,
"step": 944
},
{
"epoch": 29.08,
"learning_rate": 2.5e-07,
"loss": 1.2341,
"step": 945
},
{
"epoch": 29.11,
"learning_rate": 2.5e-07,
"loss": 1.257,
"step": 946
},
{
"epoch": 29.14,
"learning_rate": 2.5e-07,
"loss": 1.1492,
"step": 947
},
{
"epoch": 29.17,
"learning_rate": 2.5e-07,
"loss": 1.0962,
"step": 948
},
{
"epoch": 29.2,
"learning_rate": 2.5e-07,
"loss": 1.1208,
"step": 949
},
{
"epoch": 29.23,
"learning_rate": 2.5e-07,
"loss": 1.2373,
"step": 950
},
{
"epoch": 29.26,
"learning_rate": 2.5e-07,
"loss": 1.2133,
"step": 951
},
{
"epoch": 29.29,
"learning_rate": 2.5e-07,
"loss": 1.1271,
"step": 952
},
{
"epoch": 29.32,
"learning_rate": 2.5e-07,
"loss": 1.2082,
"step": 953
},
{
"epoch": 29.35,
"learning_rate": 2.5e-07,
"loss": 1.1464,
"step": 954
},
{
"epoch": 29.38,
"learning_rate": 2.5e-07,
"loss": 1.2751,
"step": 955
},
{
"epoch": 29.42,
"learning_rate": 2.5e-07,
"loss": 1.1938,
"step": 956
},
{
"epoch": 29.45,
"learning_rate": 2.5e-07,
"loss": 0.8369,
"step": 957
},
{
"epoch": 29.48,
"learning_rate": 2.5e-07,
"loss": 1.0691,
"step": 958
},
{
"epoch": 29.51,
"learning_rate": 2.5e-07,
"loss": 1.2111,
"step": 959
},
{
"epoch": 29.54,
"learning_rate": 2.5e-07,
"loss": 1.0795,
"step": 960
},
{
"epoch": 29.54,
"eval_loss": 1.1345065832138062,
"eval_runtime": 1.7373,
"eval_samples_per_second": 4.029,
"eval_steps_per_second": 2.302,
"step": 960
},
{
"epoch": 29.57,
"learning_rate": 2.5e-07,
"loss": 1.1129,
"step": 961
},
{
"epoch": 29.6,
"learning_rate": 2.5e-07,
"loss": 1.1189,
"step": 962
},
{
"epoch": 29.63,
"learning_rate": 2.5e-07,
"loss": 0.9281,
"step": 963
},
{
"epoch": 29.66,
"learning_rate": 2.5e-07,
"loss": 1.3352,
"step": 964
},
{
"epoch": 29.69,
"learning_rate": 2.5e-07,
"loss": 1.2835,
"step": 965
},
{
"epoch": 29.72,
"learning_rate": 2.5e-07,
"loss": 1.1693,
"step": 966
},
{
"epoch": 29.75,
"learning_rate": 2.5e-07,
"loss": 1.18,
"step": 967
},
{
"epoch": 29.78,
"learning_rate": 2.5e-07,
"loss": 1.3506,
"step": 968
},
{
"epoch": 29.82,
"learning_rate": 2.5e-07,
"loss": 1.2388,
"step": 969
},
{
"epoch": 29.85,
"learning_rate": 2.5e-07,
"loss": 1.1186,
"step": 970
},
{
"epoch": 29.88,
"learning_rate": 2.5e-07,
"loss": 1.0852,
"step": 971
},
{
"epoch": 29.91,
"learning_rate": 2.5e-07,
"loss": 1.1784,
"step": 972
},
{
"epoch": 29.94,
"learning_rate": 2.5e-07,
"loss": 1.1734,
"step": 973
},
{
"epoch": 29.97,
"learning_rate": 2.5e-07,
"loss": 1.1523,
"step": 974
},
{
"epoch": 30.0,
"learning_rate": 2.5e-07,
"loss": 1.2161,
"step": 975
},
{
"epoch": 30.03,
"learning_rate": 2.5e-07,
"loss": 1.1988,
"step": 976
},
{
"epoch": 30.06,
"learning_rate": 2.5e-07,
"loss": 1.142,
"step": 977
},
{
"epoch": 30.09,
"learning_rate": 2.5e-07,
"loss": 1.1912,
"step": 978
},
{
"epoch": 30.12,
"learning_rate": 2.5e-07,
"loss": 1.2097,
"step": 979
},
{
"epoch": 30.15,
"learning_rate": 2.5e-07,
"loss": 1.0034,
"step": 980
},
{
"epoch": 30.18,
"learning_rate": 2.5e-07,
"loss": 1.1134,
"step": 981
},
{
"epoch": 30.22,
"learning_rate": 2.5e-07,
"loss": 1.0878,
"step": 982
},
{
"epoch": 30.25,
"learning_rate": 2.5e-07,
"loss": 1.2026,
"step": 983
},
{
"epoch": 30.28,
"learning_rate": 2.5e-07,
"loss": 1.3019,
"step": 984
},
{
"epoch": 30.31,
"learning_rate": 2.5e-07,
"loss": 1.3384,
"step": 985
},
{
"epoch": 30.34,
"learning_rate": 2.5e-07,
"loss": 1.1569,
"step": 986
},
{
"epoch": 30.37,
"learning_rate": 2.5e-07,
"loss": 1.0049,
"step": 987
},
{
"epoch": 30.4,
"learning_rate": 2.5e-07,
"loss": 1.2734,
"step": 988
},
{
"epoch": 30.43,
"learning_rate": 2.5e-07,
"loss": 1.1678,
"step": 989
},
{
"epoch": 30.46,
"learning_rate": 2.5e-07,
"loss": 1.3041,
"step": 990
},
{
"epoch": 30.49,
"learning_rate": 2.5e-07,
"loss": 1.0953,
"step": 991
},
{
"epoch": 30.52,
"learning_rate": 2.5e-07,
"loss": 1.2766,
"step": 992
},
{
"epoch": 30.55,
"learning_rate": 2.5e-07,
"loss": 1.1537,
"step": 993
},
{
"epoch": 30.58,
"learning_rate": 2.5e-07,
"loss": 1.2511,
"step": 994
},
{
"epoch": 30.62,
"learning_rate": 2.5e-07,
"loss": 1.0633,
"step": 995
},
{
"epoch": 30.65,
"learning_rate": 2.5e-07,
"loss": 1.1738,
"step": 996
},
{
"epoch": 30.68,
"learning_rate": 2.5e-07,
"loss": 1.2442,
"step": 997
},
{
"epoch": 30.71,
"learning_rate": 2.5e-07,
"loss": 1.1332,
"step": 998
},
{
"epoch": 30.74,
"learning_rate": 2.5e-07,
"loss": 1.2319,
"step": 999
},
{
"epoch": 30.77,
"learning_rate": 2.5e-07,
"loss": 1.0908,
"step": 1000
},
{
"epoch": 30.8,
"learning_rate": 2.5e-07,
"loss": 1.1898,
"step": 1001
},
{
"epoch": 30.83,
"learning_rate": 2.5e-07,
"loss": 0.9774,
"step": 1002
},
{
"epoch": 30.86,
"learning_rate": 2.5e-07,
"loss": 1.0241,
"step": 1003
},
{
"epoch": 30.89,
"learning_rate": 2.5e-07,
"loss": 1.2131,
"step": 1004
},
{
"epoch": 30.92,
"learning_rate": 2.5e-07,
"loss": 1.183,
"step": 1005
},
{
"epoch": 30.95,
"learning_rate": 2.5e-07,
"loss": 1.1281,
"step": 1006
},
{
"epoch": 30.98,
"learning_rate": 2.5e-07,
"loss": 1.2502,
"step": 1007
},
{
"epoch": 31.02,
"learning_rate": 2.5e-07,
"loss": 1.2496,
"step": 1008
},
{
"epoch": 31.05,
"learning_rate": 2.5e-07,
"loss": 1.1432,
"step": 1009
},
{
"epoch": 31.08,
"learning_rate": 2.5e-07,
"loss": 1.1291,
"step": 1010
},
{
"epoch": 31.11,
"learning_rate": 2.5e-07,
"loss": 1.0778,
"step": 1011
},
{
"epoch": 31.14,
"learning_rate": 2.5e-07,
"loss": 1.204,
"step": 1012
},
{
"epoch": 31.17,
"learning_rate": 2.5e-07,
"loss": 1.0836,
"step": 1013
},
{
"epoch": 31.2,
"learning_rate": 2.5e-07,
"loss": 1.1495,
"step": 1014
},
{
"epoch": 31.23,
"learning_rate": 2.5e-07,
"loss": 1.2303,
"step": 1015
},
{
"epoch": 31.26,
"learning_rate": 2.5e-07,
"loss": 1.1503,
"step": 1016
},
{
"epoch": 31.29,
"learning_rate": 2.5e-07,
"loss": 0.9329,
"step": 1017
},
{
"epoch": 31.32,
"learning_rate": 2.5e-07,
"loss": 1.223,
"step": 1018
},
{
"epoch": 31.35,
"learning_rate": 2.5e-07,
"loss": 1.1437,
"step": 1019
},
{
"epoch": 31.38,
"learning_rate": 2.5e-07,
"loss": 1.1518,
"step": 1020
},
{
"epoch": 31.42,
"learning_rate": 2.5e-07,
"loss": 1.2181,
"step": 1021
},
{
"epoch": 31.45,
"learning_rate": 2.5e-07,
"loss": 1.3273,
"step": 1022
},
{
"epoch": 31.48,
"learning_rate": 2.5e-07,
"loss": 1.3224,
"step": 1023
},
{
"epoch": 31.51,
"learning_rate": 2.5e-07,
"loss": 1.2281,
"step": 1024
},
{
"epoch": 31.54,
"learning_rate": 2.5e-07,
"loss": 1.2175,
"step": 1025
},
{
"epoch": 31.57,
"learning_rate": 2.5e-07,
"loss": 1.065,
"step": 1026
},
{
"epoch": 31.6,
"learning_rate": 2.5e-07,
"loss": 1.1645,
"step": 1027
},
{
"epoch": 31.63,
"learning_rate": 2.5e-07,
"loss": 1.2845,
"step": 1028
},
{
"epoch": 31.66,
"learning_rate": 2.5e-07,
"loss": 0.9343,
"step": 1029
},
{
"epoch": 31.69,
"learning_rate": 2.5e-07,
"loss": 1.1228,
"step": 1030
},
{
"epoch": 31.72,
"learning_rate": 2.5e-07,
"loss": 1.2667,
"step": 1031
},
{
"epoch": 31.75,
"learning_rate": 2.5e-07,
"loss": 1.3187,
"step": 1032
},
{
"epoch": 31.78,
"learning_rate": 2.5e-07,
"loss": 1.2145,
"step": 1033
},
{
"epoch": 31.82,
"learning_rate": 2.5e-07,
"loss": 1.1731,
"step": 1034
},
{
"epoch": 31.85,
"learning_rate": 2.5e-07,
"loss": 1.0191,
"step": 1035
},
{
"epoch": 31.88,
"learning_rate": 2.5e-07,
"loss": 1.1871,
"step": 1036
},
{
"epoch": 31.91,
"learning_rate": 2.5e-07,
"loss": 1.0823,
"step": 1037
},
{
"epoch": 31.94,
"learning_rate": 2.5e-07,
"loss": 1.2527,
"step": 1038
},
{
"epoch": 31.97,
"learning_rate": 2.5e-07,
"loss": 1.1156,
"step": 1039
},
{
"epoch": 32.0,
"learning_rate": 2.5e-07,
"loss": 1.0453,
"step": 1040
},
{
"epoch": 32.0,
"eval_loss": 1.1317499876022339,
"eval_runtime": 1.75,
"eval_samples_per_second": 4.0,
"eval_steps_per_second": 2.286,
"step": 1040
}
],
"logging_steps": 1,
"max_steps": 1600,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 65,
"total_flos": 1.6494302424465408e+17,
"trial_name": null,
"trial_params": null
}