Leyley-13B-Lora / checkpoint-715 /trainer_state.json
Undi95's picture
Upload folder using huggingface_hub
97675a8
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 22.0,
"eval_steps": 80,
"global_step": 715,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 2.5e-07,
"loss": 1.8362,
"step": 1
},
{
"epoch": 0.03,
"eval_loss": 1.7487908601760864,
"eval_runtime": 1.7201,
"eval_samples_per_second": 4.07,
"eval_steps_per_second": 2.326,
"step": 1
},
{
"epoch": 0.06,
"learning_rate": 2.5e-07,
"loss": 1.7387,
"step": 2
},
{
"epoch": 0.09,
"learning_rate": 2.5e-07,
"loss": 1.9194,
"step": 3
},
{
"epoch": 0.12,
"learning_rate": 2.5e-07,
"loss": 1.8898,
"step": 4
},
{
"epoch": 0.15,
"learning_rate": 2.5e-07,
"loss": 1.7892,
"step": 5
},
{
"epoch": 0.18,
"learning_rate": 2.5e-07,
"loss": 1.9383,
"step": 6
},
{
"epoch": 0.22,
"learning_rate": 2.5e-07,
"loss": 1.6433,
"step": 7
},
{
"epoch": 0.25,
"learning_rate": 2.5e-07,
"loss": 1.9912,
"step": 8
},
{
"epoch": 0.28,
"learning_rate": 2.5e-07,
"loss": 1.967,
"step": 9
},
{
"epoch": 0.31,
"learning_rate": 2.5e-07,
"loss": 1.8385,
"step": 10
},
{
"epoch": 0.34,
"learning_rate": 2.5e-07,
"loss": 2.0755,
"step": 11
},
{
"epoch": 0.37,
"learning_rate": 2.5e-07,
"loss": 1.852,
"step": 12
},
{
"epoch": 0.4,
"learning_rate": 2.5e-07,
"loss": 2.0098,
"step": 13
},
{
"epoch": 0.43,
"learning_rate": 2.5e-07,
"loss": 1.8534,
"step": 14
},
{
"epoch": 0.46,
"learning_rate": 2.5e-07,
"loss": 2.2001,
"step": 15
},
{
"epoch": 0.49,
"learning_rate": 2.5e-07,
"loss": 1.7698,
"step": 16
},
{
"epoch": 0.52,
"learning_rate": 2.5e-07,
"loss": 1.9215,
"step": 17
},
{
"epoch": 0.55,
"learning_rate": 2.5e-07,
"loss": 1.8275,
"step": 18
},
{
"epoch": 0.58,
"learning_rate": 2.5e-07,
"loss": 1.8526,
"step": 19
},
{
"epoch": 0.62,
"learning_rate": 2.5e-07,
"loss": 1.8421,
"step": 20
},
{
"epoch": 0.65,
"learning_rate": 2.5e-07,
"loss": 1.7406,
"step": 21
},
{
"epoch": 0.68,
"learning_rate": 2.5e-07,
"loss": 1.8343,
"step": 22
},
{
"epoch": 0.71,
"learning_rate": 2.5e-07,
"loss": 1.8616,
"step": 23
},
{
"epoch": 0.74,
"learning_rate": 2.5e-07,
"loss": 1.6493,
"step": 24
},
{
"epoch": 0.77,
"learning_rate": 2.5e-07,
"loss": 1.931,
"step": 25
},
{
"epoch": 0.8,
"learning_rate": 2.5e-07,
"loss": 1.9538,
"step": 26
},
{
"epoch": 0.83,
"learning_rate": 2.5e-07,
"loss": 2.013,
"step": 27
},
{
"epoch": 0.86,
"learning_rate": 2.5e-07,
"loss": 2.0858,
"step": 28
},
{
"epoch": 0.89,
"learning_rate": 2.5e-07,
"loss": 2.0456,
"step": 29
},
{
"epoch": 0.92,
"learning_rate": 2.5e-07,
"loss": 1.7464,
"step": 30
},
{
"epoch": 0.95,
"learning_rate": 2.5e-07,
"loss": 1.8876,
"step": 31
},
{
"epoch": 0.98,
"learning_rate": 2.5e-07,
"loss": 1.7079,
"step": 32
},
{
"epoch": 1.02,
"learning_rate": 2.5e-07,
"loss": 1.8358,
"step": 33
},
{
"epoch": 1.05,
"learning_rate": 2.5e-07,
"loss": 2.0894,
"step": 34
},
{
"epoch": 1.08,
"learning_rate": 2.5e-07,
"loss": 1.8563,
"step": 35
},
{
"epoch": 1.11,
"learning_rate": 2.5e-07,
"loss": 1.893,
"step": 36
},
{
"epoch": 1.14,
"learning_rate": 2.5e-07,
"loss": 1.9089,
"step": 37
},
{
"epoch": 1.17,
"learning_rate": 2.5e-07,
"loss": 1.7537,
"step": 38
},
{
"epoch": 1.2,
"learning_rate": 2.5e-07,
"loss": 1.8376,
"step": 39
},
{
"epoch": 1.23,
"learning_rate": 2.5e-07,
"loss": 1.7036,
"step": 40
},
{
"epoch": 1.26,
"learning_rate": 2.5e-07,
"loss": 1.8212,
"step": 41
},
{
"epoch": 1.29,
"learning_rate": 2.5e-07,
"loss": 1.8071,
"step": 42
},
{
"epoch": 1.32,
"learning_rate": 2.5e-07,
"loss": 1.8911,
"step": 43
},
{
"epoch": 1.35,
"learning_rate": 2.5e-07,
"loss": 1.88,
"step": 44
},
{
"epoch": 1.38,
"learning_rate": 2.5e-07,
"loss": 1.7136,
"step": 45
},
{
"epoch": 1.42,
"learning_rate": 2.5e-07,
"loss": 1.6818,
"step": 46
},
{
"epoch": 1.45,
"learning_rate": 2.5e-07,
"loss": 1.9645,
"step": 47
},
{
"epoch": 1.48,
"learning_rate": 2.5e-07,
"loss": 1.9903,
"step": 48
},
{
"epoch": 1.51,
"learning_rate": 2.5e-07,
"loss": 1.8368,
"step": 49
},
{
"epoch": 1.54,
"learning_rate": 2.5e-07,
"loss": 1.7862,
"step": 50
},
{
"epoch": 1.57,
"learning_rate": 2.5e-07,
"loss": 1.9217,
"step": 51
},
{
"epoch": 1.6,
"learning_rate": 2.5e-07,
"loss": 1.8417,
"step": 52
},
{
"epoch": 1.63,
"learning_rate": 2.5e-07,
"loss": 1.8408,
"step": 53
},
{
"epoch": 1.66,
"learning_rate": 2.5e-07,
"loss": 1.8922,
"step": 54
},
{
"epoch": 1.69,
"learning_rate": 2.5e-07,
"loss": 1.7651,
"step": 55
},
{
"epoch": 1.72,
"learning_rate": 2.5e-07,
"loss": 1.7586,
"step": 56
},
{
"epoch": 1.75,
"learning_rate": 2.5e-07,
"loss": 1.9452,
"step": 57
},
{
"epoch": 1.78,
"learning_rate": 2.5e-07,
"loss": 1.7795,
"step": 58
},
{
"epoch": 1.82,
"learning_rate": 2.5e-07,
"loss": 1.875,
"step": 59
},
{
"epoch": 1.85,
"learning_rate": 2.5e-07,
"loss": 1.8503,
"step": 60
},
{
"epoch": 1.88,
"learning_rate": 2.5e-07,
"loss": 1.7522,
"step": 61
},
{
"epoch": 1.91,
"learning_rate": 2.5e-07,
"loss": 1.7527,
"step": 62
},
{
"epoch": 1.94,
"learning_rate": 2.5e-07,
"loss": 1.8297,
"step": 63
},
{
"epoch": 1.97,
"learning_rate": 2.5e-07,
"loss": 1.7751,
"step": 64
},
{
"epoch": 2.0,
"learning_rate": 2.5e-07,
"loss": 1.9007,
"step": 65
},
{
"epoch": 2.03,
"learning_rate": 2.5e-07,
"loss": 1.829,
"step": 66
},
{
"epoch": 2.06,
"learning_rate": 2.5e-07,
"loss": 1.7726,
"step": 67
},
{
"epoch": 2.09,
"learning_rate": 2.5e-07,
"loss": 1.8103,
"step": 68
},
{
"epoch": 2.12,
"learning_rate": 2.5e-07,
"loss": 1.8264,
"step": 69
},
{
"epoch": 2.15,
"learning_rate": 2.5e-07,
"loss": 1.7188,
"step": 70
},
{
"epoch": 2.18,
"learning_rate": 2.5e-07,
"loss": 1.8933,
"step": 71
},
{
"epoch": 2.22,
"learning_rate": 2.5e-07,
"loss": 1.6881,
"step": 72
},
{
"epoch": 2.25,
"learning_rate": 2.5e-07,
"loss": 1.6726,
"step": 73
},
{
"epoch": 2.28,
"learning_rate": 2.5e-07,
"loss": 1.8541,
"step": 74
},
{
"epoch": 2.31,
"learning_rate": 2.5e-07,
"loss": 1.8042,
"step": 75
},
{
"epoch": 2.34,
"learning_rate": 2.5e-07,
"loss": 1.7049,
"step": 76
},
{
"epoch": 2.37,
"learning_rate": 2.5e-07,
"loss": 1.6737,
"step": 77
},
{
"epoch": 2.4,
"learning_rate": 2.5e-07,
"loss": 1.6985,
"step": 78
},
{
"epoch": 2.43,
"learning_rate": 2.5e-07,
"loss": 1.7261,
"step": 79
},
{
"epoch": 2.46,
"learning_rate": 2.5e-07,
"loss": 2.035,
"step": 80
},
{
"epoch": 2.46,
"eval_loss": 1.646209955215454,
"eval_runtime": 1.7493,
"eval_samples_per_second": 4.002,
"eval_steps_per_second": 2.287,
"step": 80
},
{
"epoch": 2.49,
"learning_rate": 2.5e-07,
"loss": 1.7559,
"step": 81
},
{
"epoch": 2.52,
"learning_rate": 2.5e-07,
"loss": 1.6864,
"step": 82
},
{
"epoch": 2.55,
"learning_rate": 2.5e-07,
"loss": 1.8974,
"step": 83
},
{
"epoch": 2.58,
"learning_rate": 2.5e-07,
"loss": 1.822,
"step": 84
},
{
"epoch": 2.62,
"learning_rate": 2.5e-07,
"loss": 1.6783,
"step": 85
},
{
"epoch": 2.65,
"learning_rate": 2.5e-07,
"loss": 1.8375,
"step": 86
},
{
"epoch": 2.68,
"learning_rate": 2.5e-07,
"loss": 1.7302,
"step": 87
},
{
"epoch": 2.71,
"learning_rate": 2.5e-07,
"loss": 1.7164,
"step": 88
},
{
"epoch": 2.74,
"learning_rate": 2.5e-07,
"loss": 1.6118,
"step": 89
},
{
"epoch": 2.77,
"learning_rate": 2.5e-07,
"loss": 1.7528,
"step": 90
},
{
"epoch": 2.8,
"learning_rate": 2.5e-07,
"loss": 1.9012,
"step": 91
},
{
"epoch": 2.83,
"learning_rate": 2.5e-07,
"loss": 1.6869,
"step": 92
},
{
"epoch": 2.86,
"learning_rate": 2.5e-07,
"loss": 1.7648,
"step": 93
},
{
"epoch": 2.89,
"learning_rate": 2.5e-07,
"loss": 1.6151,
"step": 94
},
{
"epoch": 2.92,
"learning_rate": 2.5e-07,
"loss": 1.7853,
"step": 95
},
{
"epoch": 2.95,
"learning_rate": 2.5e-07,
"loss": 1.7481,
"step": 96
},
{
"epoch": 2.98,
"learning_rate": 2.5e-07,
"loss": 1.7218,
"step": 97
},
{
"epoch": 3.02,
"learning_rate": 2.5e-07,
"loss": 1.8767,
"step": 98
},
{
"epoch": 3.05,
"learning_rate": 2.5e-07,
"loss": 1.7954,
"step": 99
},
{
"epoch": 3.08,
"learning_rate": 2.5e-07,
"loss": 1.6822,
"step": 100
},
{
"epoch": 3.11,
"learning_rate": 2.5e-07,
"loss": 1.821,
"step": 101
},
{
"epoch": 3.14,
"learning_rate": 2.5e-07,
"loss": 1.6984,
"step": 102
},
{
"epoch": 3.17,
"learning_rate": 2.5e-07,
"loss": 1.7225,
"step": 103
},
{
"epoch": 3.2,
"learning_rate": 2.5e-07,
"loss": 1.6312,
"step": 104
},
{
"epoch": 3.23,
"learning_rate": 2.5e-07,
"loss": 1.7467,
"step": 105
},
{
"epoch": 3.26,
"learning_rate": 2.5e-07,
"loss": 1.679,
"step": 106
},
{
"epoch": 3.29,
"learning_rate": 2.5e-07,
"loss": 1.791,
"step": 107
},
{
"epoch": 3.32,
"learning_rate": 2.5e-07,
"loss": 1.7233,
"step": 108
},
{
"epoch": 3.35,
"learning_rate": 2.5e-07,
"loss": 1.8198,
"step": 109
},
{
"epoch": 3.38,
"learning_rate": 2.5e-07,
"loss": 1.6536,
"step": 110
},
{
"epoch": 3.42,
"learning_rate": 2.5e-07,
"loss": 1.6672,
"step": 111
},
{
"epoch": 3.45,
"learning_rate": 2.5e-07,
"loss": 1.8259,
"step": 112
},
{
"epoch": 3.48,
"learning_rate": 2.5e-07,
"loss": 1.7671,
"step": 113
},
{
"epoch": 3.51,
"learning_rate": 2.5e-07,
"loss": 1.5717,
"step": 114
},
{
"epoch": 3.54,
"learning_rate": 2.5e-07,
"loss": 1.7491,
"step": 115
},
{
"epoch": 3.57,
"learning_rate": 2.5e-07,
"loss": 1.4921,
"step": 116
},
{
"epoch": 3.6,
"learning_rate": 2.5e-07,
"loss": 1.7512,
"step": 117
},
{
"epoch": 3.63,
"learning_rate": 2.5e-07,
"loss": 1.5921,
"step": 118
},
{
"epoch": 3.66,
"learning_rate": 2.5e-07,
"loss": 1.6179,
"step": 119
},
{
"epoch": 3.69,
"learning_rate": 2.5e-07,
"loss": 1.7484,
"step": 120
},
{
"epoch": 3.72,
"learning_rate": 2.5e-07,
"loss": 1.6551,
"step": 121
},
{
"epoch": 3.75,
"learning_rate": 2.5e-07,
"loss": 1.6753,
"step": 122
},
{
"epoch": 3.78,
"learning_rate": 2.5e-07,
"loss": 1.816,
"step": 123
},
{
"epoch": 3.82,
"learning_rate": 2.5e-07,
"loss": 1.7982,
"step": 124
},
{
"epoch": 3.85,
"learning_rate": 2.5e-07,
"loss": 1.5347,
"step": 125
},
{
"epoch": 3.88,
"learning_rate": 2.5e-07,
"loss": 1.6895,
"step": 126
},
{
"epoch": 3.91,
"learning_rate": 2.5e-07,
"loss": 1.6968,
"step": 127
},
{
"epoch": 3.94,
"learning_rate": 2.5e-07,
"loss": 1.6039,
"step": 128
},
{
"epoch": 3.97,
"learning_rate": 2.5e-07,
"loss": 1.6148,
"step": 129
},
{
"epoch": 4.0,
"learning_rate": 2.5e-07,
"loss": 1.6244,
"step": 130
},
{
"epoch": 4.03,
"learning_rate": 2.5e-07,
"loss": 1.5743,
"step": 131
},
{
"epoch": 4.06,
"learning_rate": 2.5e-07,
"loss": 1.5876,
"step": 132
},
{
"epoch": 4.09,
"learning_rate": 2.5e-07,
"loss": 1.7865,
"step": 133
},
{
"epoch": 4.12,
"learning_rate": 2.5e-07,
"loss": 1.731,
"step": 134
},
{
"epoch": 4.15,
"learning_rate": 2.5e-07,
"loss": 1.6661,
"step": 135
},
{
"epoch": 4.18,
"learning_rate": 2.5e-07,
"loss": 1.6231,
"step": 136
},
{
"epoch": 4.22,
"learning_rate": 2.5e-07,
"loss": 1.5479,
"step": 137
},
{
"epoch": 4.25,
"learning_rate": 2.5e-07,
"loss": 1.7758,
"step": 138
},
{
"epoch": 4.28,
"learning_rate": 2.5e-07,
"loss": 1.6865,
"step": 139
},
{
"epoch": 4.31,
"learning_rate": 2.5e-07,
"loss": 1.6355,
"step": 140
},
{
"epoch": 4.34,
"learning_rate": 2.5e-07,
"loss": 1.6922,
"step": 141
},
{
"epoch": 4.37,
"learning_rate": 2.5e-07,
"loss": 1.5235,
"step": 142
},
{
"epoch": 4.4,
"learning_rate": 2.5e-07,
"loss": 1.6388,
"step": 143
},
{
"epoch": 4.43,
"learning_rate": 2.5e-07,
"loss": 1.536,
"step": 144
},
{
"epoch": 4.46,
"learning_rate": 2.5e-07,
"loss": 1.6105,
"step": 145
},
{
"epoch": 4.49,
"learning_rate": 2.5e-07,
"loss": 1.6795,
"step": 146
},
{
"epoch": 4.52,
"learning_rate": 2.5e-07,
"loss": 1.5493,
"step": 147
},
{
"epoch": 4.55,
"learning_rate": 2.5e-07,
"loss": 1.6987,
"step": 148
},
{
"epoch": 4.58,
"learning_rate": 2.5e-07,
"loss": 1.529,
"step": 149
},
{
"epoch": 4.62,
"learning_rate": 2.5e-07,
"loss": 1.5458,
"step": 150
},
{
"epoch": 4.65,
"learning_rate": 2.5e-07,
"loss": 1.5496,
"step": 151
},
{
"epoch": 4.68,
"learning_rate": 2.5e-07,
"loss": 1.6442,
"step": 152
},
{
"epoch": 4.71,
"learning_rate": 2.5e-07,
"loss": 1.727,
"step": 153
},
{
"epoch": 4.74,
"learning_rate": 2.5e-07,
"loss": 1.6884,
"step": 154
},
{
"epoch": 4.77,
"learning_rate": 2.5e-07,
"loss": 1.5979,
"step": 155
},
{
"epoch": 4.8,
"learning_rate": 2.5e-07,
"loss": 1.5635,
"step": 156
},
{
"epoch": 4.83,
"learning_rate": 2.5e-07,
"loss": 1.805,
"step": 157
},
{
"epoch": 4.86,
"learning_rate": 2.5e-07,
"loss": 1.565,
"step": 158
},
{
"epoch": 4.89,
"learning_rate": 2.5e-07,
"loss": 1.5836,
"step": 159
},
{
"epoch": 4.92,
"learning_rate": 2.5e-07,
"loss": 1.5489,
"step": 160
},
{
"epoch": 4.92,
"eval_loss": 1.490054965019226,
"eval_runtime": 1.7397,
"eval_samples_per_second": 4.024,
"eval_steps_per_second": 2.299,
"step": 160
},
{
"epoch": 4.95,
"learning_rate": 2.5e-07,
"loss": 1.6744,
"step": 161
},
{
"epoch": 4.98,
"learning_rate": 2.5e-07,
"loss": 1.5578,
"step": 162
},
{
"epoch": 5.02,
"learning_rate": 2.5e-07,
"loss": 1.5033,
"step": 163
},
{
"epoch": 5.05,
"learning_rate": 2.5e-07,
"loss": 1.701,
"step": 164
},
{
"epoch": 5.08,
"learning_rate": 2.5e-07,
"loss": 1.5297,
"step": 165
},
{
"epoch": 5.11,
"learning_rate": 2.5e-07,
"loss": 1.5086,
"step": 166
},
{
"epoch": 5.14,
"learning_rate": 2.5e-07,
"loss": 1.5426,
"step": 167
},
{
"epoch": 5.17,
"learning_rate": 2.5e-07,
"loss": 1.5224,
"step": 168
},
{
"epoch": 5.2,
"learning_rate": 2.5e-07,
"loss": 1.4174,
"step": 169
},
{
"epoch": 5.23,
"learning_rate": 2.5e-07,
"loss": 1.5665,
"step": 170
},
{
"epoch": 5.26,
"learning_rate": 2.5e-07,
"loss": 1.5093,
"step": 171
},
{
"epoch": 5.29,
"learning_rate": 2.5e-07,
"loss": 1.4894,
"step": 172
},
{
"epoch": 5.32,
"learning_rate": 2.5e-07,
"loss": 1.7075,
"step": 173
},
{
"epoch": 5.35,
"learning_rate": 2.5e-07,
"loss": 1.5987,
"step": 174
},
{
"epoch": 5.38,
"learning_rate": 2.5e-07,
"loss": 1.5064,
"step": 175
},
{
"epoch": 5.42,
"learning_rate": 2.5e-07,
"loss": 1.5896,
"step": 176
},
{
"epoch": 5.45,
"learning_rate": 2.5e-07,
"loss": 1.5435,
"step": 177
},
{
"epoch": 5.48,
"learning_rate": 2.5e-07,
"loss": 1.5764,
"step": 178
},
{
"epoch": 5.51,
"learning_rate": 2.5e-07,
"loss": 1.6604,
"step": 179
},
{
"epoch": 5.54,
"learning_rate": 2.5e-07,
"loss": 1.6184,
"step": 180
},
{
"epoch": 5.57,
"learning_rate": 2.5e-07,
"loss": 1.6953,
"step": 181
},
{
"epoch": 5.6,
"learning_rate": 2.5e-07,
"loss": 1.5764,
"step": 182
},
{
"epoch": 5.63,
"learning_rate": 2.5e-07,
"loss": 1.4756,
"step": 183
},
{
"epoch": 5.66,
"learning_rate": 2.5e-07,
"loss": 1.6062,
"step": 184
},
{
"epoch": 5.69,
"learning_rate": 2.5e-07,
"loss": 1.5978,
"step": 185
},
{
"epoch": 5.72,
"learning_rate": 2.5e-07,
"loss": 1.4222,
"step": 186
},
{
"epoch": 5.75,
"learning_rate": 2.5e-07,
"loss": 1.5142,
"step": 187
},
{
"epoch": 5.78,
"learning_rate": 2.5e-07,
"loss": 1.4466,
"step": 188
},
{
"epoch": 5.82,
"learning_rate": 2.5e-07,
"loss": 1.5513,
"step": 189
},
{
"epoch": 5.85,
"learning_rate": 2.5e-07,
"loss": 1.4656,
"step": 190
},
{
"epoch": 5.88,
"learning_rate": 2.5e-07,
"loss": 1.5972,
"step": 191
},
{
"epoch": 5.91,
"learning_rate": 2.5e-07,
"loss": 1.4809,
"step": 192
},
{
"epoch": 5.94,
"learning_rate": 2.5e-07,
"loss": 1.6644,
"step": 193
},
{
"epoch": 5.97,
"learning_rate": 2.5e-07,
"loss": 1.5554,
"step": 194
},
{
"epoch": 6.0,
"learning_rate": 2.5e-07,
"loss": 1.5099,
"step": 195
},
{
"epoch": 6.03,
"learning_rate": 2.5e-07,
"loss": 1.5775,
"step": 196
},
{
"epoch": 6.06,
"learning_rate": 2.5e-07,
"loss": 1.4642,
"step": 197
},
{
"epoch": 6.09,
"learning_rate": 2.5e-07,
"loss": 1.4549,
"step": 198
},
{
"epoch": 6.12,
"learning_rate": 2.5e-07,
"loss": 1.4262,
"step": 199
},
{
"epoch": 6.15,
"learning_rate": 2.5e-07,
"loss": 1.3087,
"step": 200
},
{
"epoch": 6.18,
"learning_rate": 2.5e-07,
"loss": 1.4069,
"step": 201
},
{
"epoch": 6.22,
"learning_rate": 2.5e-07,
"loss": 1.5301,
"step": 202
},
{
"epoch": 6.25,
"learning_rate": 2.5e-07,
"loss": 1.4505,
"step": 203
},
{
"epoch": 6.28,
"learning_rate": 2.5e-07,
"loss": 1.5708,
"step": 204
},
{
"epoch": 6.31,
"learning_rate": 2.5e-07,
"loss": 1.5237,
"step": 205
},
{
"epoch": 6.34,
"learning_rate": 2.5e-07,
"loss": 1.6189,
"step": 206
},
{
"epoch": 6.37,
"learning_rate": 2.5e-07,
"loss": 1.3563,
"step": 207
},
{
"epoch": 6.4,
"learning_rate": 2.5e-07,
"loss": 1.6897,
"step": 208
},
{
"epoch": 6.43,
"learning_rate": 2.5e-07,
"loss": 1.4593,
"step": 209
},
{
"epoch": 6.46,
"learning_rate": 2.5e-07,
"loss": 1.6024,
"step": 210
},
{
"epoch": 6.49,
"learning_rate": 2.5e-07,
"loss": 1.4543,
"step": 211
},
{
"epoch": 6.52,
"learning_rate": 2.5e-07,
"loss": 1.5193,
"step": 212
},
{
"epoch": 6.55,
"learning_rate": 2.5e-07,
"loss": 1.3936,
"step": 213
},
{
"epoch": 6.58,
"learning_rate": 2.5e-07,
"loss": 1.5552,
"step": 214
},
{
"epoch": 6.62,
"learning_rate": 2.5e-07,
"loss": 1.453,
"step": 215
},
{
"epoch": 6.65,
"learning_rate": 2.5e-07,
"loss": 1.4554,
"step": 216
},
{
"epoch": 6.68,
"learning_rate": 2.5e-07,
"loss": 1.6442,
"step": 217
},
{
"epoch": 6.71,
"learning_rate": 2.5e-07,
"loss": 1.439,
"step": 218
},
{
"epoch": 6.74,
"learning_rate": 2.5e-07,
"loss": 1.4309,
"step": 219
},
{
"epoch": 6.77,
"learning_rate": 2.5e-07,
"loss": 1.4857,
"step": 220
},
{
"epoch": 6.8,
"learning_rate": 2.5e-07,
"loss": 1.5154,
"step": 221
},
{
"epoch": 6.83,
"learning_rate": 2.5e-07,
"loss": 1.3941,
"step": 222
},
{
"epoch": 6.86,
"learning_rate": 2.5e-07,
"loss": 1.5596,
"step": 223
},
{
"epoch": 6.89,
"learning_rate": 2.5e-07,
"loss": 1.4859,
"step": 224
},
{
"epoch": 6.92,
"learning_rate": 2.5e-07,
"loss": 1.4801,
"step": 225
},
{
"epoch": 6.95,
"learning_rate": 2.5e-07,
"loss": 1.5035,
"step": 226
},
{
"epoch": 6.98,
"learning_rate": 2.5e-07,
"loss": 1.6068,
"step": 227
},
{
"epoch": 7.02,
"learning_rate": 2.5e-07,
"loss": 1.4447,
"step": 228
},
{
"epoch": 7.05,
"learning_rate": 2.5e-07,
"loss": 1.5094,
"step": 229
},
{
"epoch": 7.08,
"learning_rate": 2.5e-07,
"loss": 1.5474,
"step": 230
},
{
"epoch": 7.11,
"learning_rate": 2.5e-07,
"loss": 1.4545,
"step": 231
},
{
"epoch": 7.14,
"learning_rate": 2.5e-07,
"loss": 1.4672,
"step": 232
},
{
"epoch": 7.17,
"learning_rate": 2.5e-07,
"loss": 1.6396,
"step": 233
},
{
"epoch": 7.2,
"learning_rate": 2.5e-07,
"loss": 1.374,
"step": 234
},
{
"epoch": 7.23,
"learning_rate": 2.5e-07,
"loss": 1.4521,
"step": 235
},
{
"epoch": 7.26,
"learning_rate": 2.5e-07,
"loss": 1.2984,
"step": 236
},
{
"epoch": 7.29,
"learning_rate": 2.5e-07,
"loss": 1.4511,
"step": 237
},
{
"epoch": 7.32,
"learning_rate": 2.5e-07,
"loss": 1.405,
"step": 238
},
{
"epoch": 7.35,
"learning_rate": 2.5e-07,
"loss": 1.296,
"step": 239
},
{
"epoch": 7.38,
"learning_rate": 2.5e-07,
"loss": 1.4392,
"step": 240
},
{
"epoch": 7.38,
"eval_loss": 1.3567150831222534,
"eval_runtime": 1.7364,
"eval_samples_per_second": 4.031,
"eval_steps_per_second": 2.304,
"step": 240
},
{
"epoch": 7.42,
"learning_rate": 2.5e-07,
"loss": 1.5595,
"step": 241
},
{
"epoch": 7.45,
"learning_rate": 2.5e-07,
"loss": 1.5671,
"step": 242
},
{
"epoch": 7.48,
"learning_rate": 2.5e-07,
"loss": 1.4832,
"step": 243
},
{
"epoch": 7.51,
"learning_rate": 2.5e-07,
"loss": 1.4415,
"step": 244
},
{
"epoch": 7.54,
"learning_rate": 2.5e-07,
"loss": 1.4649,
"step": 245
},
{
"epoch": 7.57,
"learning_rate": 2.5e-07,
"loss": 1.5053,
"step": 246
},
{
"epoch": 7.6,
"learning_rate": 2.5e-07,
"loss": 1.5084,
"step": 247
},
{
"epoch": 7.63,
"learning_rate": 2.5e-07,
"loss": 1.4543,
"step": 248
},
{
"epoch": 7.66,
"learning_rate": 2.5e-07,
"loss": 1.3008,
"step": 249
},
{
"epoch": 7.69,
"learning_rate": 2.5e-07,
"loss": 1.324,
"step": 250
},
{
"epoch": 7.72,
"learning_rate": 2.5e-07,
"loss": 1.5586,
"step": 251
},
{
"epoch": 7.75,
"learning_rate": 2.5e-07,
"loss": 1.4245,
"step": 252
},
{
"epoch": 7.78,
"learning_rate": 2.5e-07,
"loss": 1.4361,
"step": 253
},
{
"epoch": 7.82,
"learning_rate": 2.5e-07,
"loss": 1.4301,
"step": 254
},
{
"epoch": 7.85,
"learning_rate": 2.5e-07,
"loss": 1.5183,
"step": 255
},
{
"epoch": 7.88,
"learning_rate": 2.5e-07,
"loss": 1.46,
"step": 256
},
{
"epoch": 7.91,
"learning_rate": 2.5e-07,
"loss": 1.2602,
"step": 257
},
{
"epoch": 7.94,
"learning_rate": 2.5e-07,
"loss": 1.358,
"step": 258
},
{
"epoch": 7.97,
"learning_rate": 2.5e-07,
"loss": 1.2598,
"step": 259
},
{
"epoch": 8.0,
"learning_rate": 2.5e-07,
"loss": 1.3058,
"step": 260
},
{
"epoch": 8.03,
"learning_rate": 2.5e-07,
"loss": 1.4428,
"step": 261
},
{
"epoch": 8.06,
"learning_rate": 2.5e-07,
"loss": 1.4506,
"step": 262
},
{
"epoch": 8.09,
"learning_rate": 2.5e-07,
"loss": 1.4627,
"step": 263
},
{
"epoch": 8.12,
"learning_rate": 2.5e-07,
"loss": 1.4584,
"step": 264
},
{
"epoch": 8.15,
"learning_rate": 2.5e-07,
"loss": 1.356,
"step": 265
},
{
"epoch": 8.18,
"learning_rate": 2.5e-07,
"loss": 1.4304,
"step": 266
},
{
"epoch": 8.22,
"learning_rate": 2.5e-07,
"loss": 1.2296,
"step": 267
},
{
"epoch": 8.25,
"learning_rate": 2.5e-07,
"loss": 1.4255,
"step": 268
},
{
"epoch": 8.28,
"learning_rate": 2.5e-07,
"loss": 1.4978,
"step": 269
},
{
"epoch": 8.31,
"learning_rate": 2.5e-07,
"loss": 1.4115,
"step": 270
},
{
"epoch": 8.34,
"learning_rate": 2.5e-07,
"loss": 1.4366,
"step": 271
},
{
"epoch": 8.37,
"learning_rate": 2.5e-07,
"loss": 1.2477,
"step": 272
},
{
"epoch": 8.4,
"learning_rate": 2.5e-07,
"loss": 1.453,
"step": 273
},
{
"epoch": 8.43,
"learning_rate": 2.5e-07,
"loss": 1.3008,
"step": 274
},
{
"epoch": 8.46,
"learning_rate": 2.5e-07,
"loss": 1.2511,
"step": 275
},
{
"epoch": 8.49,
"learning_rate": 2.5e-07,
"loss": 1.4864,
"step": 276
},
{
"epoch": 8.52,
"learning_rate": 2.5e-07,
"loss": 1.4733,
"step": 277
},
{
"epoch": 8.55,
"learning_rate": 2.5e-07,
"loss": 1.4998,
"step": 278
},
{
"epoch": 8.58,
"learning_rate": 2.5e-07,
"loss": 1.4771,
"step": 279
},
{
"epoch": 8.62,
"learning_rate": 2.5e-07,
"loss": 1.4164,
"step": 280
},
{
"epoch": 8.65,
"learning_rate": 2.5e-07,
"loss": 1.2803,
"step": 281
},
{
"epoch": 8.68,
"learning_rate": 2.5e-07,
"loss": 1.3673,
"step": 282
},
{
"epoch": 8.71,
"learning_rate": 2.5e-07,
"loss": 1.3849,
"step": 283
},
{
"epoch": 8.74,
"learning_rate": 2.5e-07,
"loss": 1.4484,
"step": 284
},
{
"epoch": 8.77,
"learning_rate": 2.5e-07,
"loss": 1.397,
"step": 285
},
{
"epoch": 8.8,
"learning_rate": 2.5e-07,
"loss": 1.5398,
"step": 286
},
{
"epoch": 8.83,
"learning_rate": 2.5e-07,
"loss": 1.2841,
"step": 287
},
{
"epoch": 8.86,
"learning_rate": 2.5e-07,
"loss": 1.2991,
"step": 288
},
{
"epoch": 8.89,
"learning_rate": 2.5e-07,
"loss": 1.3,
"step": 289
},
{
"epoch": 8.92,
"learning_rate": 2.5e-07,
"loss": 1.413,
"step": 290
},
{
"epoch": 8.95,
"learning_rate": 2.5e-07,
"loss": 1.3346,
"step": 291
},
{
"epoch": 8.98,
"learning_rate": 2.5e-07,
"loss": 1.3362,
"step": 292
},
{
"epoch": 9.02,
"learning_rate": 2.5e-07,
"loss": 1.1674,
"step": 293
},
{
"epoch": 9.05,
"learning_rate": 2.5e-07,
"loss": 1.4128,
"step": 294
},
{
"epoch": 9.08,
"learning_rate": 2.5e-07,
"loss": 1.461,
"step": 295
},
{
"epoch": 9.11,
"learning_rate": 2.5e-07,
"loss": 1.3092,
"step": 296
},
{
"epoch": 9.14,
"learning_rate": 2.5e-07,
"loss": 1.2157,
"step": 297
},
{
"epoch": 9.17,
"learning_rate": 2.5e-07,
"loss": 1.3718,
"step": 298
},
{
"epoch": 9.2,
"learning_rate": 2.5e-07,
"loss": 1.4823,
"step": 299
},
{
"epoch": 9.23,
"learning_rate": 2.5e-07,
"loss": 1.4398,
"step": 300
},
{
"epoch": 9.26,
"learning_rate": 2.5e-07,
"loss": 1.3343,
"step": 301
},
{
"epoch": 9.29,
"learning_rate": 2.5e-07,
"loss": 1.1481,
"step": 302
},
{
"epoch": 9.32,
"learning_rate": 2.5e-07,
"loss": 1.3867,
"step": 303
},
{
"epoch": 9.35,
"learning_rate": 2.5e-07,
"loss": 1.3983,
"step": 304
},
{
"epoch": 9.38,
"learning_rate": 2.5e-07,
"loss": 1.3382,
"step": 305
},
{
"epoch": 9.42,
"learning_rate": 2.5e-07,
"loss": 1.4251,
"step": 306
},
{
"epoch": 9.45,
"learning_rate": 2.5e-07,
"loss": 1.2907,
"step": 307
},
{
"epoch": 9.48,
"learning_rate": 2.5e-07,
"loss": 1.2584,
"step": 308
},
{
"epoch": 9.51,
"learning_rate": 2.5e-07,
"loss": 1.3281,
"step": 309
},
{
"epoch": 9.54,
"learning_rate": 2.5e-07,
"loss": 1.4022,
"step": 310
},
{
"epoch": 9.57,
"learning_rate": 2.5e-07,
"loss": 1.3523,
"step": 311
},
{
"epoch": 9.6,
"learning_rate": 2.5e-07,
"loss": 1.5241,
"step": 312
},
{
"epoch": 9.63,
"learning_rate": 2.5e-07,
"loss": 1.1701,
"step": 313
},
{
"epoch": 9.66,
"learning_rate": 2.5e-07,
"loss": 1.194,
"step": 314
},
{
"epoch": 9.69,
"learning_rate": 2.5e-07,
"loss": 1.4622,
"step": 315
},
{
"epoch": 9.72,
"learning_rate": 2.5e-07,
"loss": 1.1747,
"step": 316
},
{
"epoch": 9.75,
"learning_rate": 2.5e-07,
"loss": 1.4286,
"step": 317
},
{
"epoch": 9.78,
"learning_rate": 2.5e-07,
"loss": 1.3895,
"step": 318
},
{
"epoch": 9.82,
"learning_rate": 2.5e-07,
"loss": 1.3746,
"step": 319
},
{
"epoch": 9.85,
"learning_rate": 2.5e-07,
"loss": 1.2196,
"step": 320
},
{
"epoch": 9.85,
"eval_loss": 1.2475242614746094,
"eval_runtime": 1.7503,
"eval_samples_per_second": 3.999,
"eval_steps_per_second": 2.285,
"step": 320
},
{
"epoch": 9.88,
"learning_rate": 2.5e-07,
"loss": 1.2543,
"step": 321
},
{
"epoch": 9.91,
"learning_rate": 2.5e-07,
"loss": 1.239,
"step": 322
},
{
"epoch": 9.94,
"learning_rate": 2.5e-07,
"loss": 1.3088,
"step": 323
},
{
"epoch": 9.97,
"learning_rate": 2.5e-07,
"loss": 1.299,
"step": 324
},
{
"epoch": 10.0,
"learning_rate": 2.5e-07,
"loss": 1.3273,
"step": 325
},
{
"epoch": 10.03,
"learning_rate": 2.5e-07,
"loss": 1.3335,
"step": 326
},
{
"epoch": 10.06,
"learning_rate": 2.5e-07,
"loss": 1.317,
"step": 327
},
{
"epoch": 10.09,
"learning_rate": 2.5e-07,
"loss": 1.3048,
"step": 328
},
{
"epoch": 10.12,
"learning_rate": 2.5e-07,
"loss": 1.429,
"step": 329
},
{
"epoch": 10.15,
"learning_rate": 2.5e-07,
"loss": 1.2001,
"step": 330
},
{
"epoch": 10.18,
"learning_rate": 2.5e-07,
"loss": 1.3512,
"step": 331
},
{
"epoch": 10.22,
"learning_rate": 2.5e-07,
"loss": 1.2161,
"step": 332
},
{
"epoch": 10.25,
"learning_rate": 2.5e-07,
"loss": 1.2098,
"step": 333
},
{
"epoch": 10.28,
"learning_rate": 2.5e-07,
"loss": 1.3637,
"step": 334
},
{
"epoch": 10.31,
"learning_rate": 2.5e-07,
"loss": 1.3788,
"step": 335
},
{
"epoch": 10.34,
"learning_rate": 2.5e-07,
"loss": 1.2368,
"step": 336
},
{
"epoch": 10.37,
"learning_rate": 2.5e-07,
"loss": 1.348,
"step": 337
},
{
"epoch": 10.4,
"learning_rate": 2.5e-07,
"loss": 1.0986,
"step": 338
},
{
"epoch": 10.43,
"learning_rate": 2.5e-07,
"loss": 1.3233,
"step": 339
},
{
"epoch": 10.46,
"learning_rate": 2.5e-07,
"loss": 1.3613,
"step": 340
},
{
"epoch": 10.49,
"learning_rate": 2.5e-07,
"loss": 1.3211,
"step": 341
},
{
"epoch": 10.52,
"learning_rate": 2.5e-07,
"loss": 1.5044,
"step": 342
},
{
"epoch": 10.55,
"learning_rate": 2.5e-07,
"loss": 1.3033,
"step": 343
},
{
"epoch": 10.58,
"learning_rate": 2.5e-07,
"loss": 1.4222,
"step": 344
},
{
"epoch": 10.62,
"learning_rate": 2.5e-07,
"loss": 1.4241,
"step": 345
},
{
"epoch": 10.65,
"learning_rate": 2.5e-07,
"loss": 1.3264,
"step": 346
},
{
"epoch": 10.68,
"learning_rate": 2.5e-07,
"loss": 1.4957,
"step": 347
},
{
"epoch": 10.71,
"learning_rate": 2.5e-07,
"loss": 1.1016,
"step": 348
},
{
"epoch": 10.74,
"learning_rate": 2.5e-07,
"loss": 1.2492,
"step": 349
},
{
"epoch": 10.77,
"learning_rate": 2.5e-07,
"loss": 1.1237,
"step": 350
},
{
"epoch": 10.8,
"learning_rate": 2.5e-07,
"loss": 1.4371,
"step": 351
},
{
"epoch": 10.83,
"learning_rate": 2.5e-07,
"loss": 1.438,
"step": 352
},
{
"epoch": 10.86,
"learning_rate": 2.5e-07,
"loss": 1.2182,
"step": 353
},
{
"epoch": 10.89,
"learning_rate": 2.5e-07,
"loss": 1.2577,
"step": 354
},
{
"epoch": 10.92,
"learning_rate": 2.5e-07,
"loss": 1.2687,
"step": 355
},
{
"epoch": 10.95,
"learning_rate": 2.5e-07,
"loss": 1.3387,
"step": 356
},
{
"epoch": 10.98,
"learning_rate": 2.5e-07,
"loss": 1.3571,
"step": 357
},
{
"epoch": 11.02,
"learning_rate": 2.5e-07,
"loss": 1.2289,
"step": 358
},
{
"epoch": 11.05,
"learning_rate": 2.5e-07,
"loss": 1.2925,
"step": 359
},
{
"epoch": 11.08,
"learning_rate": 2.5e-07,
"loss": 1.3187,
"step": 360
},
{
"epoch": 11.11,
"learning_rate": 2.5e-07,
"loss": 1.3628,
"step": 361
},
{
"epoch": 11.14,
"learning_rate": 2.5e-07,
"loss": 1.2547,
"step": 362
},
{
"epoch": 11.17,
"learning_rate": 2.5e-07,
"loss": 1.3169,
"step": 363
},
{
"epoch": 11.2,
"learning_rate": 2.5e-07,
"loss": 1.2921,
"step": 364
},
{
"epoch": 11.23,
"learning_rate": 2.5e-07,
"loss": 1.3426,
"step": 365
},
{
"epoch": 11.26,
"learning_rate": 2.5e-07,
"loss": 1.4212,
"step": 366
},
{
"epoch": 11.29,
"learning_rate": 2.5e-07,
"loss": 1.2466,
"step": 367
},
{
"epoch": 11.32,
"learning_rate": 2.5e-07,
"loss": 1.3367,
"step": 368
},
{
"epoch": 11.35,
"learning_rate": 2.5e-07,
"loss": 1.3179,
"step": 369
},
{
"epoch": 11.38,
"learning_rate": 2.5e-07,
"loss": 1.2568,
"step": 370
},
{
"epoch": 11.42,
"learning_rate": 2.5e-07,
"loss": 1.1769,
"step": 371
},
{
"epoch": 11.45,
"learning_rate": 2.5e-07,
"loss": 1.3068,
"step": 372
},
{
"epoch": 11.48,
"learning_rate": 2.5e-07,
"loss": 1.1623,
"step": 373
},
{
"epoch": 11.51,
"learning_rate": 2.5e-07,
"loss": 1.2147,
"step": 374
},
{
"epoch": 11.54,
"learning_rate": 2.5e-07,
"loss": 1.4786,
"step": 375
},
{
"epoch": 11.57,
"learning_rate": 2.5e-07,
"loss": 1.3387,
"step": 376
},
{
"epoch": 11.6,
"learning_rate": 2.5e-07,
"loss": 1.261,
"step": 377
},
{
"epoch": 11.63,
"learning_rate": 2.5e-07,
"loss": 1.2758,
"step": 378
},
{
"epoch": 11.66,
"learning_rate": 2.5e-07,
"loss": 1.2917,
"step": 379
},
{
"epoch": 11.69,
"learning_rate": 2.5e-07,
"loss": 1.3428,
"step": 380
},
{
"epoch": 11.72,
"learning_rate": 2.5e-07,
"loss": 1.4322,
"step": 381
},
{
"epoch": 11.75,
"learning_rate": 2.5e-07,
"loss": 1.2796,
"step": 382
},
{
"epoch": 11.78,
"learning_rate": 2.5e-07,
"loss": 1.318,
"step": 383
},
{
"epoch": 11.82,
"learning_rate": 2.5e-07,
"loss": 1.2229,
"step": 384
},
{
"epoch": 11.85,
"learning_rate": 2.5e-07,
"loss": 1.1542,
"step": 385
},
{
"epoch": 11.88,
"learning_rate": 2.5e-07,
"loss": 1.3305,
"step": 386
},
{
"epoch": 11.91,
"learning_rate": 2.5e-07,
"loss": 1.3448,
"step": 387
},
{
"epoch": 11.94,
"learning_rate": 2.5e-07,
"loss": 1.2508,
"step": 388
},
{
"epoch": 11.97,
"learning_rate": 2.5e-07,
"loss": 1.3033,
"step": 389
},
{
"epoch": 12.0,
"learning_rate": 2.5e-07,
"loss": 1.1879,
"step": 390
},
{
"epoch": 12.03,
"learning_rate": 2.5e-07,
"loss": 1.3695,
"step": 391
},
{
"epoch": 12.06,
"learning_rate": 2.5e-07,
"loss": 1.278,
"step": 392
},
{
"epoch": 12.09,
"learning_rate": 2.5e-07,
"loss": 1.4726,
"step": 393
},
{
"epoch": 12.12,
"learning_rate": 2.5e-07,
"loss": 1.142,
"step": 394
},
{
"epoch": 12.15,
"learning_rate": 2.5e-07,
"loss": 1.2075,
"step": 395
},
{
"epoch": 12.18,
"learning_rate": 2.5e-07,
"loss": 1.4716,
"step": 396
},
{
"epoch": 12.22,
"learning_rate": 2.5e-07,
"loss": 1.336,
"step": 397
},
{
"epoch": 12.25,
"learning_rate": 2.5e-07,
"loss": 1.0818,
"step": 398
},
{
"epoch": 12.28,
"learning_rate": 2.5e-07,
"loss": 1.3366,
"step": 399
},
{
"epoch": 12.31,
"learning_rate": 2.5e-07,
"loss": 1.3219,
"step": 400
},
{
"epoch": 12.31,
"eval_loss": 1.2088563442230225,
"eval_runtime": 1.7541,
"eval_samples_per_second": 3.991,
"eval_steps_per_second": 2.28,
"step": 400
},
{
"epoch": 12.34,
"learning_rate": 2.5e-07,
"loss": 1.4101,
"step": 401
},
{
"epoch": 12.37,
"learning_rate": 2.5e-07,
"loss": 1.2773,
"step": 402
},
{
"epoch": 12.4,
"learning_rate": 2.5e-07,
"loss": 1.3575,
"step": 403
},
{
"epoch": 12.43,
"learning_rate": 2.5e-07,
"loss": 1.089,
"step": 404
},
{
"epoch": 12.46,
"learning_rate": 2.5e-07,
"loss": 1.4164,
"step": 405
},
{
"epoch": 12.49,
"learning_rate": 2.5e-07,
"loss": 1.3292,
"step": 406
},
{
"epoch": 12.52,
"learning_rate": 2.5e-07,
"loss": 1.3447,
"step": 407
},
{
"epoch": 12.55,
"learning_rate": 2.5e-07,
"loss": 1.4455,
"step": 408
},
{
"epoch": 12.58,
"learning_rate": 2.5e-07,
"loss": 1.3716,
"step": 409
},
{
"epoch": 12.62,
"learning_rate": 2.5e-07,
"loss": 1.338,
"step": 410
},
{
"epoch": 12.65,
"learning_rate": 2.5e-07,
"loss": 1.2842,
"step": 411
},
{
"epoch": 12.68,
"learning_rate": 2.5e-07,
"loss": 1.0655,
"step": 412
},
{
"epoch": 12.71,
"learning_rate": 2.5e-07,
"loss": 1.1405,
"step": 413
},
{
"epoch": 12.74,
"learning_rate": 2.5e-07,
"loss": 1.3488,
"step": 414
},
{
"epoch": 12.77,
"learning_rate": 2.5e-07,
"loss": 1.1401,
"step": 415
},
{
"epoch": 12.8,
"learning_rate": 2.5e-07,
"loss": 1.2261,
"step": 416
},
{
"epoch": 12.83,
"learning_rate": 2.5e-07,
"loss": 1.2211,
"step": 417
},
{
"epoch": 12.86,
"learning_rate": 2.5e-07,
"loss": 1.235,
"step": 418
},
{
"epoch": 12.89,
"learning_rate": 2.5e-07,
"loss": 1.2779,
"step": 419
},
{
"epoch": 12.92,
"learning_rate": 2.5e-07,
"loss": 1.228,
"step": 420
},
{
"epoch": 12.95,
"learning_rate": 2.5e-07,
"loss": 1.3058,
"step": 421
},
{
"epoch": 12.98,
"learning_rate": 2.5e-07,
"loss": 1.2325,
"step": 422
},
{
"epoch": 13.02,
"learning_rate": 2.5e-07,
"loss": 1.3127,
"step": 423
},
{
"epoch": 13.05,
"learning_rate": 2.5e-07,
"loss": 1.382,
"step": 424
},
{
"epoch": 13.08,
"learning_rate": 2.5e-07,
"loss": 1.3625,
"step": 425
},
{
"epoch": 13.11,
"learning_rate": 2.5e-07,
"loss": 1.3533,
"step": 426
},
{
"epoch": 13.14,
"learning_rate": 2.5e-07,
"loss": 1.2046,
"step": 427
},
{
"epoch": 13.17,
"learning_rate": 2.5e-07,
"loss": 1.2991,
"step": 428
},
{
"epoch": 13.2,
"learning_rate": 2.5e-07,
"loss": 1.1785,
"step": 429
},
{
"epoch": 13.23,
"learning_rate": 2.5e-07,
"loss": 1.2795,
"step": 430
},
{
"epoch": 13.26,
"learning_rate": 2.5e-07,
"loss": 1.248,
"step": 431
},
{
"epoch": 13.29,
"learning_rate": 2.5e-07,
"loss": 1.3476,
"step": 432
},
{
"epoch": 13.32,
"learning_rate": 2.5e-07,
"loss": 1.277,
"step": 433
},
{
"epoch": 13.35,
"learning_rate": 2.5e-07,
"loss": 1.2044,
"step": 434
},
{
"epoch": 13.38,
"learning_rate": 2.5e-07,
"loss": 1.1909,
"step": 435
},
{
"epoch": 13.42,
"learning_rate": 2.5e-07,
"loss": 1.2544,
"step": 436
},
{
"epoch": 13.45,
"learning_rate": 2.5e-07,
"loss": 1.2036,
"step": 437
},
{
"epoch": 13.48,
"learning_rate": 2.5e-07,
"loss": 1.2141,
"step": 438
},
{
"epoch": 13.51,
"learning_rate": 2.5e-07,
"loss": 1.2086,
"step": 439
},
{
"epoch": 13.54,
"learning_rate": 2.5e-07,
"loss": 1.3863,
"step": 440
},
{
"epoch": 13.57,
"learning_rate": 2.5e-07,
"loss": 1.2435,
"step": 441
},
{
"epoch": 13.6,
"learning_rate": 2.5e-07,
"loss": 1.3655,
"step": 442
},
{
"epoch": 13.63,
"learning_rate": 2.5e-07,
"loss": 1.3562,
"step": 443
},
{
"epoch": 13.66,
"learning_rate": 2.5e-07,
"loss": 1.3441,
"step": 444
},
{
"epoch": 13.69,
"learning_rate": 2.5e-07,
"loss": 1.342,
"step": 445
},
{
"epoch": 13.72,
"learning_rate": 2.5e-07,
"loss": 1.0318,
"step": 446
},
{
"epoch": 13.75,
"learning_rate": 2.5e-07,
"loss": 1.3068,
"step": 447
},
{
"epoch": 13.78,
"learning_rate": 2.5e-07,
"loss": 1.4054,
"step": 448
},
{
"epoch": 13.82,
"learning_rate": 2.5e-07,
"loss": 1.1914,
"step": 449
},
{
"epoch": 13.85,
"learning_rate": 2.5e-07,
"loss": 1.3734,
"step": 450
},
{
"epoch": 13.88,
"learning_rate": 2.5e-07,
"loss": 1.3705,
"step": 451
},
{
"epoch": 13.91,
"learning_rate": 2.5e-07,
"loss": 1.2954,
"step": 452
},
{
"epoch": 13.94,
"learning_rate": 2.5e-07,
"loss": 1.2625,
"step": 453
},
{
"epoch": 13.97,
"learning_rate": 2.5e-07,
"loss": 1.0147,
"step": 454
},
{
"epoch": 14.0,
"learning_rate": 2.5e-07,
"loss": 1.2888,
"step": 455
},
{
"epoch": 14.03,
"learning_rate": 2.5e-07,
"loss": 1.1156,
"step": 456
},
{
"epoch": 14.06,
"learning_rate": 2.5e-07,
"loss": 1.421,
"step": 457
},
{
"epoch": 14.09,
"learning_rate": 2.5e-07,
"loss": 1.2285,
"step": 458
},
{
"epoch": 14.12,
"learning_rate": 2.5e-07,
"loss": 1.2881,
"step": 459
},
{
"epoch": 14.15,
"learning_rate": 2.5e-07,
"loss": 1.2028,
"step": 460
},
{
"epoch": 14.18,
"learning_rate": 2.5e-07,
"loss": 1.2213,
"step": 461
},
{
"epoch": 14.22,
"learning_rate": 2.5e-07,
"loss": 1.2455,
"step": 462
},
{
"epoch": 14.25,
"learning_rate": 2.5e-07,
"loss": 1.2788,
"step": 463
},
{
"epoch": 14.28,
"learning_rate": 2.5e-07,
"loss": 1.1349,
"step": 464
},
{
"epoch": 14.31,
"learning_rate": 2.5e-07,
"loss": 1.3234,
"step": 465
},
{
"epoch": 14.34,
"learning_rate": 2.5e-07,
"loss": 1.2652,
"step": 466
},
{
"epoch": 14.37,
"learning_rate": 2.5e-07,
"loss": 1.3123,
"step": 467
},
{
"epoch": 14.4,
"learning_rate": 2.5e-07,
"loss": 1.3447,
"step": 468
},
{
"epoch": 14.43,
"learning_rate": 2.5e-07,
"loss": 1.2818,
"step": 469
},
{
"epoch": 14.46,
"learning_rate": 2.5e-07,
"loss": 1.1331,
"step": 470
},
{
"epoch": 14.49,
"learning_rate": 2.5e-07,
"loss": 1.2309,
"step": 471
},
{
"epoch": 14.52,
"learning_rate": 2.5e-07,
"loss": 1.2867,
"step": 472
},
{
"epoch": 14.55,
"learning_rate": 2.5e-07,
"loss": 1.3339,
"step": 473
},
{
"epoch": 14.58,
"learning_rate": 2.5e-07,
"loss": 1.4158,
"step": 474
},
{
"epoch": 14.62,
"learning_rate": 2.5e-07,
"loss": 1.529,
"step": 475
},
{
"epoch": 14.65,
"learning_rate": 2.5e-07,
"loss": 1.258,
"step": 476
},
{
"epoch": 14.68,
"learning_rate": 2.5e-07,
"loss": 1.3033,
"step": 477
},
{
"epoch": 14.71,
"learning_rate": 2.5e-07,
"loss": 1.147,
"step": 478
},
{
"epoch": 14.74,
"learning_rate": 2.5e-07,
"loss": 1.3166,
"step": 479
},
{
"epoch": 14.77,
"learning_rate": 2.5e-07,
"loss": 1.2171,
"step": 480
},
{
"epoch": 14.77,
"eval_loss": 1.1870461702346802,
"eval_runtime": 1.7297,
"eval_samples_per_second": 4.047,
"eval_steps_per_second": 2.312,
"step": 480
},
{
"epoch": 14.8,
"learning_rate": 2.5e-07,
"loss": 1.18,
"step": 481
},
{
"epoch": 14.83,
"learning_rate": 2.5e-07,
"loss": 1.2117,
"step": 482
},
{
"epoch": 14.86,
"learning_rate": 2.5e-07,
"loss": 1.148,
"step": 483
},
{
"epoch": 14.89,
"learning_rate": 2.5e-07,
"loss": 1.3961,
"step": 484
},
{
"epoch": 14.92,
"learning_rate": 2.5e-07,
"loss": 1.3331,
"step": 485
},
{
"epoch": 14.95,
"learning_rate": 2.5e-07,
"loss": 1.3485,
"step": 486
},
{
"epoch": 14.98,
"learning_rate": 2.5e-07,
"loss": 1.0999,
"step": 487
},
{
"epoch": 15.02,
"learning_rate": 2.5e-07,
"loss": 1.2521,
"step": 488
},
{
"epoch": 15.05,
"learning_rate": 2.5e-07,
"loss": 1.2097,
"step": 489
},
{
"epoch": 15.08,
"learning_rate": 2.5e-07,
"loss": 1.2557,
"step": 490
},
{
"epoch": 15.11,
"learning_rate": 2.5e-07,
"loss": 1.2638,
"step": 491
},
{
"epoch": 15.14,
"learning_rate": 2.5e-07,
"loss": 1.3549,
"step": 492
},
{
"epoch": 15.17,
"learning_rate": 2.5e-07,
"loss": 1.2417,
"step": 493
},
{
"epoch": 15.2,
"learning_rate": 2.5e-07,
"loss": 1.2861,
"step": 494
},
{
"epoch": 15.23,
"learning_rate": 2.5e-07,
"loss": 1.2428,
"step": 495
},
{
"epoch": 15.26,
"learning_rate": 2.5e-07,
"loss": 1.1561,
"step": 496
},
{
"epoch": 15.29,
"learning_rate": 2.5e-07,
"loss": 1.2641,
"step": 497
},
{
"epoch": 15.32,
"learning_rate": 2.5e-07,
"loss": 1.3073,
"step": 498
},
{
"epoch": 15.35,
"learning_rate": 2.5e-07,
"loss": 1.2227,
"step": 499
},
{
"epoch": 15.38,
"learning_rate": 2.5e-07,
"loss": 1.3311,
"step": 500
},
{
"epoch": 15.42,
"learning_rate": 2.5e-07,
"loss": 1.2581,
"step": 501
},
{
"epoch": 15.45,
"learning_rate": 2.5e-07,
"loss": 1.2751,
"step": 502
},
{
"epoch": 15.48,
"learning_rate": 2.5e-07,
"loss": 1.4222,
"step": 503
},
{
"epoch": 15.51,
"learning_rate": 2.5e-07,
"loss": 1.3263,
"step": 504
},
{
"epoch": 15.54,
"learning_rate": 2.5e-07,
"loss": 1.2236,
"step": 505
},
{
"epoch": 15.57,
"learning_rate": 2.5e-07,
"loss": 1.3125,
"step": 506
},
{
"epoch": 15.6,
"learning_rate": 2.5e-07,
"loss": 1.424,
"step": 507
},
{
"epoch": 15.63,
"learning_rate": 2.5e-07,
"loss": 1.2231,
"step": 508
},
{
"epoch": 15.66,
"learning_rate": 2.5e-07,
"loss": 1.2089,
"step": 509
},
{
"epoch": 15.69,
"learning_rate": 2.5e-07,
"loss": 1.4663,
"step": 510
},
{
"epoch": 15.72,
"learning_rate": 2.5e-07,
"loss": 1.3236,
"step": 511
},
{
"epoch": 15.75,
"learning_rate": 2.5e-07,
"loss": 1.2133,
"step": 512
},
{
"epoch": 15.78,
"learning_rate": 2.5e-07,
"loss": 1.1598,
"step": 513
},
{
"epoch": 15.82,
"learning_rate": 2.5e-07,
"loss": 1.3023,
"step": 514
},
{
"epoch": 15.85,
"learning_rate": 2.5e-07,
"loss": 1.139,
"step": 515
},
{
"epoch": 15.88,
"learning_rate": 2.5e-07,
"loss": 1.1881,
"step": 516
},
{
"epoch": 15.91,
"learning_rate": 2.5e-07,
"loss": 1.1448,
"step": 517
},
{
"epoch": 15.94,
"learning_rate": 2.5e-07,
"loss": 1.2321,
"step": 518
},
{
"epoch": 15.97,
"learning_rate": 2.5e-07,
"loss": 1.2134,
"step": 519
},
{
"epoch": 16.0,
"learning_rate": 2.5e-07,
"loss": 1.3268,
"step": 520
},
{
"epoch": 16.03,
"learning_rate": 2.5e-07,
"loss": 1.3858,
"step": 521
},
{
"epoch": 16.06,
"learning_rate": 2.5e-07,
"loss": 1.3358,
"step": 522
},
{
"epoch": 16.09,
"learning_rate": 2.5e-07,
"loss": 1.2051,
"step": 523
},
{
"epoch": 16.12,
"learning_rate": 2.5e-07,
"loss": 1.1431,
"step": 524
},
{
"epoch": 16.15,
"learning_rate": 2.5e-07,
"loss": 1.2539,
"step": 525
},
{
"epoch": 16.18,
"learning_rate": 2.5e-07,
"loss": 1.3096,
"step": 526
},
{
"epoch": 16.22,
"learning_rate": 2.5e-07,
"loss": 1.1879,
"step": 527
},
{
"epoch": 16.25,
"learning_rate": 2.5e-07,
"loss": 1.3215,
"step": 528
},
{
"epoch": 16.28,
"learning_rate": 2.5e-07,
"loss": 1.2835,
"step": 529
},
{
"epoch": 16.31,
"learning_rate": 2.5e-07,
"loss": 1.2596,
"step": 530
},
{
"epoch": 16.34,
"learning_rate": 2.5e-07,
"loss": 1.2635,
"step": 531
},
{
"epoch": 16.37,
"learning_rate": 2.5e-07,
"loss": 1.4138,
"step": 532
},
{
"epoch": 16.4,
"learning_rate": 2.5e-07,
"loss": 1.2552,
"step": 533
},
{
"epoch": 16.43,
"learning_rate": 2.5e-07,
"loss": 1.337,
"step": 534
},
{
"epoch": 16.46,
"learning_rate": 2.5e-07,
"loss": 1.1408,
"step": 535
},
{
"epoch": 16.49,
"learning_rate": 2.5e-07,
"loss": 1.2962,
"step": 536
},
{
"epoch": 16.52,
"learning_rate": 2.5e-07,
"loss": 1.3547,
"step": 537
},
{
"epoch": 16.55,
"learning_rate": 2.5e-07,
"loss": 1.2199,
"step": 538
},
{
"epoch": 16.58,
"learning_rate": 2.5e-07,
"loss": 1.2924,
"step": 539
},
{
"epoch": 16.62,
"learning_rate": 2.5e-07,
"loss": 1.1342,
"step": 540
},
{
"epoch": 16.65,
"learning_rate": 2.5e-07,
"loss": 1.2628,
"step": 541
},
{
"epoch": 16.68,
"learning_rate": 2.5e-07,
"loss": 0.9506,
"step": 542
},
{
"epoch": 16.71,
"learning_rate": 2.5e-07,
"loss": 1.3052,
"step": 543
},
{
"epoch": 16.74,
"learning_rate": 2.5e-07,
"loss": 1.3089,
"step": 544
},
{
"epoch": 16.77,
"learning_rate": 2.5e-07,
"loss": 1.3776,
"step": 545
},
{
"epoch": 16.8,
"learning_rate": 2.5e-07,
"loss": 1.0516,
"step": 546
},
{
"epoch": 16.83,
"learning_rate": 2.5e-07,
"loss": 1.1433,
"step": 547
},
{
"epoch": 16.86,
"learning_rate": 2.5e-07,
"loss": 1.3056,
"step": 548
},
{
"epoch": 16.89,
"learning_rate": 2.5e-07,
"loss": 1.2652,
"step": 549
},
{
"epoch": 16.92,
"learning_rate": 2.5e-07,
"loss": 1.2002,
"step": 550
},
{
"epoch": 16.95,
"learning_rate": 2.5e-07,
"loss": 1.181,
"step": 551
},
{
"epoch": 16.98,
"learning_rate": 2.5e-07,
"loss": 1.0902,
"step": 552
},
{
"epoch": 17.02,
"learning_rate": 2.5e-07,
"loss": 1.2845,
"step": 553
},
{
"epoch": 17.05,
"learning_rate": 2.5e-07,
"loss": 1.2646,
"step": 554
},
{
"epoch": 17.08,
"learning_rate": 2.5e-07,
"loss": 1.0982,
"step": 555
},
{
"epoch": 17.11,
"learning_rate": 2.5e-07,
"loss": 1.1109,
"step": 556
},
{
"epoch": 17.14,
"learning_rate": 2.5e-07,
"loss": 1.2508,
"step": 557
},
{
"epoch": 17.17,
"learning_rate": 2.5e-07,
"loss": 1.2859,
"step": 558
},
{
"epoch": 17.2,
"learning_rate": 2.5e-07,
"loss": 0.9845,
"step": 559
},
{
"epoch": 17.23,
"learning_rate": 2.5e-07,
"loss": 1.1686,
"step": 560
},
{
"epoch": 17.23,
"eval_loss": 1.173031210899353,
"eval_runtime": 1.7371,
"eval_samples_per_second": 4.03,
"eval_steps_per_second": 2.303,
"step": 560
},
{
"epoch": 17.26,
"learning_rate": 2.5e-07,
"loss": 1.2483,
"step": 561
},
{
"epoch": 17.29,
"learning_rate": 2.5e-07,
"loss": 1.3438,
"step": 562
},
{
"epoch": 17.32,
"learning_rate": 2.5e-07,
"loss": 1.2862,
"step": 563
},
{
"epoch": 17.35,
"learning_rate": 2.5e-07,
"loss": 1.2464,
"step": 564
},
{
"epoch": 17.38,
"learning_rate": 2.5e-07,
"loss": 1.3084,
"step": 565
},
{
"epoch": 17.42,
"learning_rate": 2.5e-07,
"loss": 1.2382,
"step": 566
},
{
"epoch": 17.45,
"learning_rate": 2.5e-07,
"loss": 1.2763,
"step": 567
},
{
"epoch": 17.48,
"learning_rate": 2.5e-07,
"loss": 1.1901,
"step": 568
},
{
"epoch": 17.51,
"learning_rate": 2.5e-07,
"loss": 1.0708,
"step": 569
},
{
"epoch": 17.54,
"learning_rate": 2.5e-07,
"loss": 1.2879,
"step": 570
},
{
"epoch": 17.57,
"learning_rate": 2.5e-07,
"loss": 1.2257,
"step": 571
},
{
"epoch": 17.6,
"learning_rate": 2.5e-07,
"loss": 1.06,
"step": 572
},
{
"epoch": 17.63,
"learning_rate": 2.5e-07,
"loss": 1.3583,
"step": 573
},
{
"epoch": 17.66,
"learning_rate": 2.5e-07,
"loss": 1.2269,
"step": 574
},
{
"epoch": 17.69,
"learning_rate": 2.5e-07,
"loss": 1.3294,
"step": 575
},
{
"epoch": 17.72,
"learning_rate": 2.5e-07,
"loss": 1.3498,
"step": 576
},
{
"epoch": 17.75,
"learning_rate": 2.5e-07,
"loss": 1.0898,
"step": 577
},
{
"epoch": 17.78,
"learning_rate": 2.5e-07,
"loss": 1.2676,
"step": 578
},
{
"epoch": 17.82,
"learning_rate": 2.5e-07,
"loss": 1.3162,
"step": 579
},
{
"epoch": 17.85,
"learning_rate": 2.5e-07,
"loss": 1.3982,
"step": 580
},
{
"epoch": 17.88,
"learning_rate": 2.5e-07,
"loss": 1.3926,
"step": 581
},
{
"epoch": 17.91,
"learning_rate": 2.5e-07,
"loss": 1.1394,
"step": 582
},
{
"epoch": 17.94,
"learning_rate": 2.5e-07,
"loss": 1.2473,
"step": 583
},
{
"epoch": 17.97,
"learning_rate": 2.5e-07,
"loss": 1.1476,
"step": 584
},
{
"epoch": 18.0,
"learning_rate": 2.5e-07,
"loss": 1.226,
"step": 585
},
{
"epoch": 18.03,
"learning_rate": 2.5e-07,
"loss": 1.2704,
"step": 586
},
{
"epoch": 18.06,
"learning_rate": 2.5e-07,
"loss": 0.9477,
"step": 587
},
{
"epoch": 18.09,
"learning_rate": 2.5e-07,
"loss": 1.3465,
"step": 588
},
{
"epoch": 18.12,
"learning_rate": 2.5e-07,
"loss": 1.3294,
"step": 589
},
{
"epoch": 18.15,
"learning_rate": 2.5e-07,
"loss": 1.057,
"step": 590
},
{
"epoch": 18.18,
"learning_rate": 2.5e-07,
"loss": 1.2434,
"step": 591
},
{
"epoch": 18.22,
"learning_rate": 2.5e-07,
"loss": 1.2729,
"step": 592
},
{
"epoch": 18.25,
"learning_rate": 2.5e-07,
"loss": 1.1356,
"step": 593
},
{
"epoch": 18.28,
"learning_rate": 2.5e-07,
"loss": 1.1587,
"step": 594
},
{
"epoch": 18.31,
"learning_rate": 2.5e-07,
"loss": 1.1197,
"step": 595
},
{
"epoch": 18.34,
"learning_rate": 2.5e-07,
"loss": 1.1778,
"step": 596
},
{
"epoch": 18.37,
"learning_rate": 2.5e-07,
"loss": 1.1198,
"step": 597
},
{
"epoch": 18.4,
"learning_rate": 2.5e-07,
"loss": 1.3865,
"step": 598
},
{
"epoch": 18.43,
"learning_rate": 2.5e-07,
"loss": 1.2218,
"step": 599
},
{
"epoch": 18.46,
"learning_rate": 2.5e-07,
"loss": 1.0191,
"step": 600
},
{
"epoch": 18.49,
"learning_rate": 2.5e-07,
"loss": 1.3666,
"step": 601
},
{
"epoch": 18.52,
"learning_rate": 2.5e-07,
"loss": 1.3141,
"step": 602
},
{
"epoch": 18.55,
"learning_rate": 2.5e-07,
"loss": 1.279,
"step": 603
},
{
"epoch": 18.58,
"learning_rate": 2.5e-07,
"loss": 1.32,
"step": 604
},
{
"epoch": 18.62,
"learning_rate": 2.5e-07,
"loss": 1.3216,
"step": 605
},
{
"epoch": 18.65,
"learning_rate": 2.5e-07,
"loss": 1.1378,
"step": 606
},
{
"epoch": 18.68,
"learning_rate": 2.5e-07,
"loss": 1.174,
"step": 607
},
{
"epoch": 18.71,
"learning_rate": 2.5e-07,
"loss": 1.1897,
"step": 608
},
{
"epoch": 18.74,
"learning_rate": 2.5e-07,
"loss": 1.2227,
"step": 609
},
{
"epoch": 18.77,
"learning_rate": 2.5e-07,
"loss": 1.2315,
"step": 610
},
{
"epoch": 18.8,
"learning_rate": 2.5e-07,
"loss": 1.1637,
"step": 611
},
{
"epoch": 18.83,
"learning_rate": 2.5e-07,
"loss": 1.2978,
"step": 612
},
{
"epoch": 18.86,
"learning_rate": 2.5e-07,
"loss": 1.2103,
"step": 613
},
{
"epoch": 18.89,
"learning_rate": 2.5e-07,
"loss": 1.3145,
"step": 614
},
{
"epoch": 18.92,
"learning_rate": 2.5e-07,
"loss": 1.2807,
"step": 615
},
{
"epoch": 18.95,
"learning_rate": 2.5e-07,
"loss": 1.2668,
"step": 616
},
{
"epoch": 18.98,
"learning_rate": 2.5e-07,
"loss": 1.2986,
"step": 617
},
{
"epoch": 19.02,
"learning_rate": 2.5e-07,
"loss": 1.1996,
"step": 618
},
{
"epoch": 19.05,
"learning_rate": 2.5e-07,
"loss": 0.9905,
"step": 619
},
{
"epoch": 19.08,
"learning_rate": 2.5e-07,
"loss": 1.3338,
"step": 620
},
{
"epoch": 19.11,
"learning_rate": 2.5e-07,
"loss": 1.0955,
"step": 621
},
{
"epoch": 19.14,
"learning_rate": 2.5e-07,
"loss": 1.1721,
"step": 622
},
{
"epoch": 19.17,
"learning_rate": 2.5e-07,
"loss": 1.2088,
"step": 623
},
{
"epoch": 19.2,
"learning_rate": 2.5e-07,
"loss": 1.3623,
"step": 624
},
{
"epoch": 19.23,
"learning_rate": 2.5e-07,
"loss": 1.0578,
"step": 625
},
{
"epoch": 19.26,
"learning_rate": 2.5e-07,
"loss": 1.0765,
"step": 626
},
{
"epoch": 19.29,
"learning_rate": 2.5e-07,
"loss": 1.3624,
"step": 627
},
{
"epoch": 19.32,
"learning_rate": 2.5e-07,
"loss": 1.1778,
"step": 628
},
{
"epoch": 19.35,
"learning_rate": 2.5e-07,
"loss": 1.2267,
"step": 629
},
{
"epoch": 19.38,
"learning_rate": 2.5e-07,
"loss": 1.2311,
"step": 630
},
{
"epoch": 19.42,
"learning_rate": 2.5e-07,
"loss": 1.3226,
"step": 631
},
{
"epoch": 19.45,
"learning_rate": 2.5e-07,
"loss": 1.2294,
"step": 632
},
{
"epoch": 19.48,
"learning_rate": 2.5e-07,
"loss": 1.0192,
"step": 633
},
{
"epoch": 19.51,
"learning_rate": 2.5e-07,
"loss": 1.2559,
"step": 634
},
{
"epoch": 19.54,
"learning_rate": 2.5e-07,
"loss": 1.2497,
"step": 635
},
{
"epoch": 19.57,
"learning_rate": 2.5e-07,
"loss": 1.4026,
"step": 636
},
{
"epoch": 19.6,
"learning_rate": 2.5e-07,
"loss": 1.2551,
"step": 637
},
{
"epoch": 19.63,
"learning_rate": 2.5e-07,
"loss": 1.2695,
"step": 638
},
{
"epoch": 19.66,
"learning_rate": 2.5e-07,
"loss": 1.2047,
"step": 639
},
{
"epoch": 19.69,
"learning_rate": 2.5e-07,
"loss": 1.1506,
"step": 640
},
{
"epoch": 19.69,
"eval_loss": 1.161481261253357,
"eval_runtime": 1.7425,
"eval_samples_per_second": 4.017,
"eval_steps_per_second": 2.296,
"step": 640
},
{
"epoch": 19.72,
"learning_rate": 2.5e-07,
"loss": 1.2597,
"step": 641
},
{
"epoch": 19.75,
"learning_rate": 2.5e-07,
"loss": 1.1908,
"step": 642
},
{
"epoch": 19.78,
"learning_rate": 2.5e-07,
"loss": 1.2338,
"step": 643
},
{
"epoch": 19.82,
"learning_rate": 2.5e-07,
"loss": 1.31,
"step": 644
},
{
"epoch": 19.85,
"learning_rate": 2.5e-07,
"loss": 1.0713,
"step": 645
},
{
"epoch": 19.88,
"learning_rate": 2.5e-07,
"loss": 1.2727,
"step": 646
},
{
"epoch": 19.91,
"learning_rate": 2.5e-07,
"loss": 1.2029,
"step": 647
},
{
"epoch": 19.94,
"learning_rate": 2.5e-07,
"loss": 1.3007,
"step": 648
},
{
"epoch": 19.97,
"learning_rate": 2.5e-07,
"loss": 1.3173,
"step": 649
},
{
"epoch": 20.0,
"learning_rate": 2.5e-07,
"loss": 1.2024,
"step": 650
},
{
"epoch": 20.03,
"learning_rate": 2.5e-07,
"loss": 1.1753,
"step": 651
},
{
"epoch": 20.06,
"learning_rate": 2.5e-07,
"loss": 1.273,
"step": 652
},
{
"epoch": 20.09,
"learning_rate": 2.5e-07,
"loss": 1.2252,
"step": 653
},
{
"epoch": 20.12,
"learning_rate": 2.5e-07,
"loss": 1.2582,
"step": 654
},
{
"epoch": 20.15,
"learning_rate": 2.5e-07,
"loss": 1.2634,
"step": 655
},
{
"epoch": 20.18,
"learning_rate": 2.5e-07,
"loss": 1.2168,
"step": 656
},
{
"epoch": 20.22,
"learning_rate": 2.5e-07,
"loss": 1.2868,
"step": 657
},
{
"epoch": 20.25,
"learning_rate": 2.5e-07,
"loss": 1.2229,
"step": 658
},
{
"epoch": 20.28,
"learning_rate": 2.5e-07,
"loss": 1.0799,
"step": 659
},
{
"epoch": 20.31,
"learning_rate": 2.5e-07,
"loss": 1.187,
"step": 660
},
{
"epoch": 20.34,
"learning_rate": 2.5e-07,
"loss": 1.3681,
"step": 661
},
{
"epoch": 20.37,
"learning_rate": 2.5e-07,
"loss": 1.1951,
"step": 662
},
{
"epoch": 20.4,
"learning_rate": 2.5e-07,
"loss": 1.0643,
"step": 663
},
{
"epoch": 20.43,
"learning_rate": 2.5e-07,
"loss": 1.2073,
"step": 664
},
{
"epoch": 20.46,
"learning_rate": 2.5e-07,
"loss": 1.2797,
"step": 665
},
{
"epoch": 20.49,
"learning_rate": 2.5e-07,
"loss": 1.3999,
"step": 666
},
{
"epoch": 20.52,
"learning_rate": 2.5e-07,
"loss": 1.2501,
"step": 667
},
{
"epoch": 20.55,
"learning_rate": 2.5e-07,
"loss": 1.2163,
"step": 668
},
{
"epoch": 20.58,
"learning_rate": 2.5e-07,
"loss": 1.3067,
"step": 669
},
{
"epoch": 20.62,
"learning_rate": 2.5e-07,
"loss": 1.1854,
"step": 670
},
{
"epoch": 20.65,
"learning_rate": 2.5e-07,
"loss": 1.1188,
"step": 671
},
{
"epoch": 20.68,
"learning_rate": 2.5e-07,
"loss": 1.2344,
"step": 672
},
{
"epoch": 20.71,
"learning_rate": 2.5e-07,
"loss": 1.2431,
"step": 673
},
{
"epoch": 20.74,
"learning_rate": 2.5e-07,
"loss": 1.1848,
"step": 674
},
{
"epoch": 20.77,
"learning_rate": 2.5e-07,
"loss": 1.3042,
"step": 675
},
{
"epoch": 20.8,
"learning_rate": 2.5e-07,
"loss": 1.121,
"step": 676
},
{
"epoch": 20.83,
"learning_rate": 2.5e-07,
"loss": 1.1777,
"step": 677
},
{
"epoch": 20.86,
"learning_rate": 2.5e-07,
"loss": 1.2183,
"step": 678
},
{
"epoch": 20.89,
"learning_rate": 2.5e-07,
"loss": 1.1327,
"step": 679
},
{
"epoch": 20.92,
"learning_rate": 2.5e-07,
"loss": 1.1136,
"step": 680
},
{
"epoch": 20.95,
"learning_rate": 2.5e-07,
"loss": 1.2761,
"step": 681
},
{
"epoch": 20.98,
"learning_rate": 2.5e-07,
"loss": 1.079,
"step": 682
},
{
"epoch": 21.02,
"learning_rate": 2.5e-07,
"loss": 1.2263,
"step": 683
},
{
"epoch": 21.05,
"learning_rate": 2.5e-07,
"loss": 1.2507,
"step": 684
},
{
"epoch": 21.08,
"learning_rate": 2.5e-07,
"loss": 1.1574,
"step": 685
},
{
"epoch": 21.11,
"learning_rate": 2.5e-07,
"loss": 1.0829,
"step": 686
},
{
"epoch": 21.14,
"learning_rate": 2.5e-07,
"loss": 1.2645,
"step": 687
},
{
"epoch": 21.17,
"learning_rate": 2.5e-07,
"loss": 1.2632,
"step": 688
},
{
"epoch": 21.2,
"learning_rate": 2.5e-07,
"loss": 1.2332,
"step": 689
},
{
"epoch": 21.23,
"learning_rate": 2.5e-07,
"loss": 1.2069,
"step": 690
},
{
"epoch": 21.26,
"learning_rate": 2.5e-07,
"loss": 1.2087,
"step": 691
},
{
"epoch": 21.29,
"learning_rate": 2.5e-07,
"loss": 1.1768,
"step": 692
},
{
"epoch": 21.32,
"learning_rate": 2.5e-07,
"loss": 1.0126,
"step": 693
},
{
"epoch": 21.35,
"learning_rate": 2.5e-07,
"loss": 1.3168,
"step": 694
},
{
"epoch": 21.38,
"learning_rate": 2.5e-07,
"loss": 1.0629,
"step": 695
},
{
"epoch": 21.42,
"learning_rate": 2.5e-07,
"loss": 1.1156,
"step": 696
},
{
"epoch": 21.45,
"learning_rate": 2.5e-07,
"loss": 1.2308,
"step": 697
},
{
"epoch": 21.48,
"learning_rate": 2.5e-07,
"loss": 1.2688,
"step": 698
},
{
"epoch": 21.51,
"learning_rate": 2.5e-07,
"loss": 1.1114,
"step": 699
},
{
"epoch": 21.54,
"learning_rate": 2.5e-07,
"loss": 1.0432,
"step": 700
},
{
"epoch": 21.57,
"learning_rate": 2.5e-07,
"loss": 1.2938,
"step": 701
},
{
"epoch": 21.6,
"learning_rate": 2.5e-07,
"loss": 1.2673,
"step": 702
},
{
"epoch": 21.63,
"learning_rate": 2.5e-07,
"loss": 1.2795,
"step": 703
},
{
"epoch": 21.66,
"learning_rate": 2.5e-07,
"loss": 1.1992,
"step": 704
},
{
"epoch": 21.69,
"learning_rate": 2.5e-07,
"loss": 1.2114,
"step": 705
},
{
"epoch": 21.72,
"learning_rate": 2.5e-07,
"loss": 1.1559,
"step": 706
},
{
"epoch": 21.75,
"learning_rate": 2.5e-07,
"loss": 1.1923,
"step": 707
},
{
"epoch": 21.78,
"learning_rate": 2.5e-07,
"loss": 1.135,
"step": 708
},
{
"epoch": 21.82,
"learning_rate": 2.5e-07,
"loss": 1.1937,
"step": 709
},
{
"epoch": 21.85,
"learning_rate": 2.5e-07,
"loss": 1.3328,
"step": 710
},
{
"epoch": 21.88,
"learning_rate": 2.5e-07,
"loss": 1.2513,
"step": 711
},
{
"epoch": 21.91,
"learning_rate": 2.5e-07,
"loss": 1.197,
"step": 712
},
{
"epoch": 21.94,
"learning_rate": 2.5e-07,
"loss": 1.0483,
"step": 713
},
{
"epoch": 21.97,
"learning_rate": 2.5e-07,
"loss": 1.3072,
"step": 714
},
{
"epoch": 22.0,
"learning_rate": 2.5e-07,
"loss": 1.2929,
"step": 715
}
],
"logging_steps": 1,
"max_steps": 1600,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 65,
"total_flos": 1.1332584807727104e+17,
"trial_name": null,
"trial_params": null
}