Doctor-Shotgun's picture
Upload folder using huggingface_hub
15e6f3e verified
raw
history blame contribute delete
No virus
39.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.965045592705167,
"eval_steps": 500,
"global_step": 328,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 1e-05,
"loss": 1.897,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 2e-05,
"loss": 1.9202,
"step": 2
},
{
"epoch": 0.02,
"learning_rate": 3e-05,
"loss": 1.9071,
"step": 3
},
{
"epoch": 0.02,
"learning_rate": 4e-05,
"loss": 1.9712,
"step": 4
},
{
"epoch": 0.03,
"learning_rate": 5e-05,
"loss": 2.0125,
"step": 5
},
{
"epoch": 0.04,
"learning_rate": 6e-05,
"loss": 1.8839,
"step": 6
},
{
"epoch": 0.04,
"learning_rate": 7e-05,
"loss": 1.9586,
"step": 7
},
{
"epoch": 0.05,
"learning_rate": 8e-05,
"loss": 1.9625,
"step": 8
},
{
"epoch": 0.05,
"learning_rate": 9e-05,
"loss": 1.9269,
"step": 9
},
{
"epoch": 0.06,
"learning_rate": 0.0001,
"loss": 1.9005,
"step": 10
},
{
"epoch": 0.07,
"learning_rate": 9.999756004407229e-05,
"loss": 1.857,
"step": 11
},
{
"epoch": 0.07,
"learning_rate": 9.999024041442456e-05,
"loss": 1.9072,
"step": 12
},
{
"epoch": 0.08,
"learning_rate": 9.997804182543973e-05,
"loss": 1.7945,
"step": 13
},
{
"epoch": 0.09,
"learning_rate": 9.99609654676786e-05,
"loss": 1.8496,
"step": 14
},
{
"epoch": 0.09,
"learning_rate": 9.993901300776359e-05,
"loss": 1.8275,
"step": 15
},
{
"epoch": 0.1,
"learning_rate": 9.991218658821608e-05,
"loss": 1.8701,
"step": 16
},
{
"epoch": 0.1,
"learning_rate": 9.988048882724732e-05,
"loss": 1.8594,
"step": 17
},
{
"epoch": 0.11,
"learning_rate": 9.984392281850293e-05,
"loss": 1.8669,
"step": 18
},
{
"epoch": 0.12,
"learning_rate": 9.980249213076084e-05,
"loss": 1.8022,
"step": 19
},
{
"epoch": 0.12,
"learning_rate": 9.97562008075832e-05,
"loss": 1.8139,
"step": 20
},
{
"epoch": 0.13,
"learning_rate": 9.970505336692153e-05,
"loss": 1.8775,
"step": 21
},
{
"epoch": 0.13,
"learning_rate": 9.964905480067586e-05,
"loss": 1.7862,
"step": 22
},
{
"epoch": 0.14,
"learning_rate": 9.958821057420754e-05,
"loss": 1.8414,
"step": 23
},
{
"epoch": 0.15,
"learning_rate": 9.952252662580579e-05,
"loss": 1.7446,
"step": 24
},
{
"epoch": 0.15,
"learning_rate": 9.94520093661082e-05,
"loss": 1.8705,
"step": 25
},
{
"epoch": 0.16,
"learning_rate": 9.937666567747501e-05,
"loss": 1.822,
"step": 26
},
{
"epoch": 0.16,
"learning_rate": 9.92965029133174e-05,
"loss": 1.9357,
"step": 27
},
{
"epoch": 0.17,
"learning_rate": 9.921152889737984e-05,
"loss": 1.7958,
"step": 28
},
{
"epoch": 0.18,
"learning_rate": 9.912175192297648e-05,
"loss": 1.7557,
"step": 29
},
{
"epoch": 0.18,
"learning_rate": 9.902718075218176e-05,
"loss": 1.8138,
"step": 30
},
{
"epoch": 0.19,
"learning_rate": 9.89278246149752e-05,
"loss": 1.7865,
"step": 31
},
{
"epoch": 0.19,
"learning_rate": 9.882369320834069e-05,
"loss": 1.7997,
"step": 32
},
{
"epoch": 0.2,
"learning_rate": 9.87147966953199e-05,
"loss": 1.7534,
"step": 33
},
{
"epoch": 0.21,
"learning_rate": 9.860114570402054e-05,
"loss": 1.7092,
"step": 34
},
{
"epoch": 0.21,
"learning_rate": 9.848275132657903e-05,
"loss": 1.7261,
"step": 35
},
{
"epoch": 0.22,
"learning_rate": 9.835962511807786e-05,
"loss": 1.7827,
"step": 36
},
{
"epoch": 0.22,
"learning_rate": 9.823177909541794e-05,
"loss": 1.7948,
"step": 37
},
{
"epoch": 0.23,
"learning_rate": 9.809922573614569e-05,
"loss": 1.8275,
"step": 38
},
{
"epoch": 0.24,
"learning_rate": 9.796197797723532e-05,
"loss": 1.7533,
"step": 39
},
{
"epoch": 0.24,
"learning_rate": 9.782004921382612e-05,
"loss": 1.7555,
"step": 40
},
{
"epoch": 0.25,
"learning_rate": 9.767345329791522e-05,
"loss": 1.8018,
"step": 41
},
{
"epoch": 0.26,
"learning_rate": 9.752220453700556e-05,
"loss": 1.8107,
"step": 42
},
{
"epoch": 0.26,
"learning_rate": 9.736631769270957e-05,
"loss": 1.7941,
"step": 43
},
{
"epoch": 0.27,
"learning_rate": 9.720580797930845e-05,
"loss": 1.8829,
"step": 44
},
{
"epoch": 0.27,
"learning_rate": 9.704069106226727e-05,
"loss": 1.8041,
"step": 45
},
{
"epoch": 0.28,
"learning_rate": 9.687098305670605e-05,
"loss": 1.77,
"step": 46
},
{
"epoch": 0.29,
"learning_rate": 9.669670052582695e-05,
"loss": 1.7547,
"step": 47
},
{
"epoch": 0.29,
"learning_rate": 9.651786047929773e-05,
"loss": 1.7594,
"step": 48
},
{
"epoch": 0.3,
"learning_rate": 9.633448037159167e-05,
"loss": 1.7076,
"step": 49
},
{
"epoch": 0.3,
"learning_rate": 9.614657810028402e-05,
"loss": 1.786,
"step": 50
},
{
"epoch": 0.31,
"learning_rate": 9.595417200430516e-05,
"loss": 1.7076,
"step": 51
},
{
"epoch": 0.32,
"learning_rate": 9.575728086215092e-05,
"loss": 1.7508,
"step": 52
},
{
"epoch": 0.32,
"learning_rate": 9.555592389004966e-05,
"loss": 1.7979,
"step": 53
},
{
"epoch": 0.33,
"learning_rate": 9.535012074008687e-05,
"loss": 1.7075,
"step": 54
},
{
"epoch": 0.33,
"learning_rate": 9.513989149828718e-05,
"loss": 1.7828,
"step": 55
},
{
"epoch": 0.34,
"learning_rate": 9.492525668265399e-05,
"loss": 1.8179,
"step": 56
},
{
"epoch": 0.35,
"learning_rate": 9.470623724116692e-05,
"loss": 1.7802,
"step": 57
},
{
"epoch": 0.35,
"learning_rate": 9.448285454973738e-05,
"loss": 1.7873,
"step": 58
},
{
"epoch": 0.36,
"learning_rate": 9.425513041012219e-05,
"loss": 1.7315,
"step": 59
},
{
"epoch": 0.36,
"learning_rate": 9.402308704779599e-05,
"loss": 1.7953,
"step": 60
},
{
"epoch": 0.37,
"learning_rate": 9.378674710978185e-05,
"loss": 1.7946,
"step": 61
},
{
"epoch": 0.38,
"learning_rate": 9.354613366244108e-05,
"loss": 1.7543,
"step": 62
},
{
"epoch": 0.38,
"learning_rate": 9.330127018922194e-05,
"loss": 1.7302,
"step": 63
},
{
"epoch": 0.39,
"learning_rate": 9.305218058836778e-05,
"loss": 1.8224,
"step": 64
},
{
"epoch": 0.4,
"learning_rate": 9.279888917058452e-05,
"loss": 1.757,
"step": 65
},
{
"epoch": 0.4,
"learning_rate": 9.254142065666801e-05,
"loss": 1.7506,
"step": 66
},
{
"epoch": 0.41,
"learning_rate": 9.22798001750913e-05,
"loss": 1.7523,
"step": 67
},
{
"epoch": 0.41,
"learning_rate": 9.201405325955221e-05,
"loss": 1.7923,
"step": 68
},
{
"epoch": 0.42,
"learning_rate": 9.174420584648123e-05,
"loss": 1.7417,
"step": 69
},
{
"epoch": 0.43,
"learning_rate": 9.14702842725101e-05,
"loss": 1.8008,
"step": 70
},
{
"epoch": 0.43,
"learning_rate": 9.119231527190158e-05,
"loss": 1.7204,
"step": 71
},
{
"epoch": 0.44,
"learning_rate": 9.091032597394012e-05,
"loss": 1.7863,
"step": 72
},
{
"epoch": 0.44,
"learning_rate": 9.062434390028407e-05,
"loss": 1.7512,
"step": 73
},
{
"epoch": 0.45,
"learning_rate": 9.033439696227965e-05,
"loss": 1.8159,
"step": 74
},
{
"epoch": 0.46,
"learning_rate": 9.004051345823689e-05,
"loss": 1.7654,
"step": 75
},
{
"epoch": 0.46,
"learning_rate": 8.974272207066767e-05,
"loss": 1.712,
"step": 76
},
{
"epoch": 0.47,
"learning_rate": 8.944105186348646e-05,
"loss": 1.7975,
"step": 77
},
{
"epoch": 0.47,
"learning_rate": 8.913553227917367e-05,
"loss": 1.7364,
"step": 78
},
{
"epoch": 0.48,
"learning_rate": 8.882619313590212e-05,
"loss": 1.7615,
"step": 79
},
{
"epoch": 0.49,
"learning_rate": 8.851306462462688e-05,
"loss": 1.6968,
"step": 80
},
{
"epoch": 0.49,
"learning_rate": 8.819617730613862e-05,
"loss": 1.7455,
"step": 81
},
{
"epoch": 0.5,
"learning_rate": 8.787556210808101e-05,
"loss": 1.8118,
"step": 82
},
{
"epoch": 0.5,
"learning_rate": 8.755125032193214e-05,
"loss": 1.7766,
"step": 83
},
{
"epoch": 0.51,
"learning_rate": 8.722327359995064e-05,
"loss": 1.7388,
"step": 84
},
{
"epoch": 0.52,
"learning_rate": 8.689166395208636e-05,
"loss": 1.6951,
"step": 85
},
{
"epoch": 0.52,
"learning_rate": 8.655645374285637e-05,
"loss": 1.8524,
"step": 86
},
{
"epoch": 0.53,
"learning_rate": 8.621767568818613e-05,
"loss": 1.8439,
"step": 87
},
{
"epoch": 0.53,
"learning_rate": 8.587536285221656e-05,
"loss": 1.7917,
"step": 88
},
{
"epoch": 0.54,
"learning_rate": 8.552954864407699e-05,
"loss": 1.7381,
"step": 89
},
{
"epoch": 0.55,
"learning_rate": 8.518026681462448e-05,
"loss": 1.7198,
"step": 90
},
{
"epoch": 0.55,
"learning_rate": 8.482755145314986e-05,
"loss": 1.7388,
"step": 91
},
{
"epoch": 0.56,
"learning_rate": 8.44714369840506e-05,
"loss": 1.7147,
"step": 92
},
{
"epoch": 0.57,
"learning_rate": 8.41119581634711e-05,
"loss": 1.7247,
"step": 93
},
{
"epoch": 0.57,
"learning_rate": 8.374915007591053e-05,
"loss": 1.7975,
"step": 94
},
{
"epoch": 0.58,
"learning_rate": 8.338304813079865e-05,
"loss": 1.6963,
"step": 95
},
{
"epoch": 0.58,
"learning_rate": 8.301368805903988e-05,
"loss": 1.7466,
"step": 96
},
{
"epoch": 0.59,
"learning_rate": 8.264110590952609e-05,
"loss": 1.8162,
"step": 97
},
{
"epoch": 0.6,
"learning_rate": 8.226533804561827e-05,
"loss": 1.8318,
"step": 98
},
{
"epoch": 0.6,
"learning_rate": 8.188642114159747e-05,
"loss": 1.8107,
"step": 99
},
{
"epoch": 0.61,
"learning_rate": 8.150439217908556e-05,
"loss": 1.7286,
"step": 100
},
{
"epoch": 0.61,
"learning_rate": 8.11192884434358e-05,
"loss": 1.8096,
"step": 101
},
{
"epoch": 0.62,
"learning_rate": 8.073114752009387e-05,
"loss": 1.7905,
"step": 102
},
{
"epoch": 0.63,
"learning_rate": 8.034000729092968e-05,
"loss": 1.8094,
"step": 103
},
{
"epoch": 0.63,
"learning_rate": 7.994590593054001e-05,
"loss": 1.7906,
"step": 104
},
{
"epoch": 0.64,
"learning_rate": 7.954888190252292e-05,
"loss": 1.8019,
"step": 105
},
{
"epoch": 0.64,
"learning_rate": 7.91489739557236e-05,
"loss": 1.7756,
"step": 106
},
{
"epoch": 0.65,
"learning_rate": 7.874622112045269e-05,
"loss": 1.7668,
"step": 107
},
{
"epoch": 0.66,
"learning_rate": 7.83406627046769e-05,
"loss": 1.7509,
"step": 108
},
{
"epoch": 0.66,
"learning_rate": 7.793233829018262e-05,
"loss": 1.8472,
"step": 109
},
{
"epoch": 0.67,
"learning_rate": 7.752128772871292e-05,
"loss": 1.7917,
"step": 110
},
{
"epoch": 0.67,
"learning_rate": 7.710755113807794e-05,
"loss": 1.7408,
"step": 111
},
{
"epoch": 0.68,
"learning_rate": 7.669116889823955e-05,
"loss": 1.6748,
"step": 112
},
{
"epoch": 0.69,
"learning_rate": 7.627218164737031e-05,
"loss": 1.8016,
"step": 113
},
{
"epoch": 0.69,
"learning_rate": 7.585063027788731e-05,
"loss": 1.7302,
"step": 114
},
{
"epoch": 0.7,
"learning_rate": 7.542655593246103e-05,
"loss": 1.735,
"step": 115
},
{
"epoch": 0.71,
"learning_rate": 7.500000000000001e-05,
"loss": 1.777,
"step": 116
},
{
"epoch": 0.71,
"learning_rate": 7.457100411161128e-05,
"loss": 1.7049,
"step": 117
},
{
"epoch": 0.72,
"learning_rate": 7.413961013653726e-05,
"loss": 1.826,
"step": 118
},
{
"epoch": 0.72,
"learning_rate": 7.370586017806942e-05,
"loss": 1.7539,
"step": 119
},
{
"epoch": 0.73,
"learning_rate": 7.326979656943906e-05,
"loss": 1.6597,
"step": 120
},
{
"epoch": 0.74,
"learning_rate": 7.283146186968565e-05,
"loss": 1.7977,
"step": 121
},
{
"epoch": 0.74,
"learning_rate": 7.239089885950316e-05,
"loss": 1.7501,
"step": 122
},
{
"epoch": 0.75,
"learning_rate": 7.19481505370647e-05,
"loss": 1.7521,
"step": 123
},
{
"epoch": 0.75,
"learning_rate": 7.150326011382604e-05,
"loss": 1.7778,
"step": 124
},
{
"epoch": 0.76,
"learning_rate": 7.105627101030817e-05,
"loss": 1.7793,
"step": 125
},
{
"epoch": 0.77,
"learning_rate": 7.060722685185961e-05,
"loss": 1.8148,
"step": 126
},
{
"epoch": 0.77,
"learning_rate": 7.015617146439863e-05,
"loss": 1.7838,
"step": 127
},
{
"epoch": 0.78,
"learning_rate": 6.970314887013584e-05,
"loss": 1.796,
"step": 128
},
{
"epoch": 0.78,
"learning_rate": 6.924820328327786e-05,
"loss": 1.832,
"step": 129
},
{
"epoch": 0.79,
"learning_rate": 6.879137910571191e-05,
"loss": 1.7494,
"step": 130
},
{
"epoch": 0.8,
"learning_rate": 6.833272092267241e-05,
"loss": 1.7762,
"step": 131
},
{
"epoch": 0.8,
"learning_rate": 6.787227349838947e-05,
"loss": 1.7136,
"step": 132
},
{
"epoch": 0.81,
"learning_rate": 6.741008177171995e-05,
"loss": 1.7987,
"step": 133
},
{
"epoch": 0.81,
"learning_rate": 6.694619085176159e-05,
"loss": 1.7855,
"step": 134
},
{
"epoch": 0.82,
"learning_rate": 6.64806460134504e-05,
"loss": 1.801,
"step": 135
},
{
"epoch": 0.83,
"learning_rate": 6.601349269314188e-05,
"loss": 1.6862,
"step": 136
},
{
"epoch": 0.83,
"learning_rate": 6.554477648417657e-05,
"loss": 1.7124,
"step": 137
},
{
"epoch": 0.84,
"learning_rate": 6.507454313243015e-05,
"loss": 1.7362,
"step": 138
},
{
"epoch": 0.84,
"learning_rate": 6.460283853184879e-05,
"loss": 1.6903,
"step": 139
},
{
"epoch": 0.85,
"learning_rate": 6.412970871996995e-05,
"loss": 1.7153,
"step": 140
},
{
"epoch": 0.86,
"learning_rate": 6.365519987342917e-05,
"loss": 1.7335,
"step": 141
},
{
"epoch": 0.86,
"learning_rate": 6.317935830345338e-05,
"loss": 1.7567,
"step": 142
},
{
"epoch": 0.87,
"learning_rate": 6.270223045134096e-05,
"loss": 1.7885,
"step": 143
},
{
"epoch": 0.88,
"learning_rate": 6.222386288392913e-05,
"loss": 1.8316,
"step": 144
},
{
"epoch": 0.88,
"learning_rate": 6.174430228904919e-05,
"loss": 1.7411,
"step": 145
},
{
"epoch": 0.89,
"learning_rate": 6.126359547096975e-05,
"loss": 1.7384,
"step": 146
},
{
"epoch": 0.89,
"learning_rate": 6.078178934582885e-05,
"loss": 1.7717,
"step": 147
},
{
"epoch": 0.9,
"learning_rate": 6.029893093705492e-05,
"loss": 1.7984,
"step": 148
},
{
"epoch": 0.91,
"learning_rate": 5.981506737077744e-05,
"loss": 1.7875,
"step": 149
},
{
"epoch": 0.91,
"learning_rate": 5.9330245871227454e-05,
"loss": 1.762,
"step": 150
},
{
"epoch": 0.92,
"learning_rate": 5.884451375612865e-05,
"loss": 1.7517,
"step": 151
},
{
"epoch": 0.92,
"learning_rate": 5.835791843207916e-05,
"loss": 1.7745,
"step": 152
},
{
"epoch": 0.93,
"learning_rate": 5.787050738992482e-05,
"loss": 1.6965,
"step": 153
},
{
"epoch": 0.94,
"learning_rate": 5.738232820012407e-05,
"loss": 1.7892,
"step": 154
},
{
"epoch": 0.94,
"learning_rate": 5.6893428508105225e-05,
"loss": 1.7803,
"step": 155
},
{
"epoch": 0.95,
"learning_rate": 5.640385602961634e-05,
"loss": 1.7592,
"step": 156
},
{
"epoch": 0.95,
"learning_rate": 5.5913658546068295e-05,
"loss": 1.8219,
"step": 157
},
{
"epoch": 0.96,
"learning_rate": 5.5422883899871284e-05,
"loss": 1.8626,
"step": 158
},
{
"epoch": 0.97,
"learning_rate": 5.493157998976559e-05,
"loss": 1.7252,
"step": 159
},
{
"epoch": 0.97,
"learning_rate": 5.4439794766146746e-05,
"loss": 1.6911,
"step": 160
},
{
"epoch": 0.98,
"learning_rate": 5.39475762263856e-05,
"loss": 1.8046,
"step": 161
},
{
"epoch": 0.98,
"learning_rate": 5.34549724101439e-05,
"loss": 1.7559,
"step": 162
},
{
"epoch": 0.99,
"learning_rate": 5.296203139468572e-05,
"loss": 1.8943,
"step": 163
},
{
"epoch": 1.0,
"learning_rate": 5.246880129018516e-05,
"loss": 1.7772,
"step": 164
},
{
"epoch": 1.0,
"learning_rate": 5.197533023503089e-05,
"loss": 1.7057,
"step": 165
},
{
"epoch": 1.01,
"learning_rate": 5.148166639112799e-05,
"loss": 1.6976,
"step": 166
},
{
"epoch": 1.02,
"learning_rate": 5.0987857939197324e-05,
"loss": 1.8151,
"step": 167
},
{
"epoch": 1.02,
"learning_rate": 5.049395307407329e-05,
"loss": 1.7286,
"step": 168
},
{
"epoch": 1.03,
"learning_rate": 5e-05,
"loss": 1.782,
"step": 169
},
{
"epoch": 1.0,
"learning_rate": 4.950604692592672e-05,
"loss": 1.7561,
"step": 170
},
{
"epoch": 1.01,
"learning_rate": 4.901214206080269e-05,
"loss": 1.6331,
"step": 171
},
{
"epoch": 1.02,
"learning_rate": 4.851833360887201e-05,
"loss": 1.6803,
"step": 172
},
{
"epoch": 1.02,
"learning_rate": 4.802466976496911e-05,
"loss": 1.7244,
"step": 173
},
{
"epoch": 1.03,
"learning_rate": 4.7531198709814854e-05,
"loss": 1.7924,
"step": 174
},
{
"epoch": 1.03,
"learning_rate": 4.703796860531429e-05,
"loss": 1.7374,
"step": 175
},
{
"epoch": 1.04,
"learning_rate": 4.654502758985611e-05,
"loss": 1.6847,
"step": 176
},
{
"epoch": 1.05,
"learning_rate": 4.6052423773614404e-05,
"loss": 1.7498,
"step": 177
},
{
"epoch": 1.05,
"learning_rate": 4.5560205233853266e-05,
"loss": 1.8711,
"step": 178
},
{
"epoch": 1.06,
"learning_rate": 4.506842001023442e-05,
"loss": 1.7231,
"step": 179
},
{
"epoch": 1.07,
"learning_rate": 4.4577116100128735e-05,
"loss": 1.7341,
"step": 180
},
{
"epoch": 1.07,
"learning_rate": 4.4086341453931716e-05,
"loss": 1.7211,
"step": 181
},
{
"epoch": 1.08,
"learning_rate": 4.3596143970383664e-05,
"loss": 1.6533,
"step": 182
},
{
"epoch": 1.08,
"learning_rate": 4.3106571491894786e-05,
"loss": 1.8128,
"step": 183
},
{
"epoch": 1.09,
"learning_rate": 4.2617671799875944e-05,
"loss": 1.7778,
"step": 184
},
{
"epoch": 1.1,
"learning_rate": 4.212949261007519e-05,
"loss": 1.6615,
"step": 185
},
{
"epoch": 1.1,
"learning_rate": 4.1642081567920846e-05,
"loss": 1.8061,
"step": 186
},
{
"epoch": 1.11,
"learning_rate": 4.115548624387137e-05,
"loss": 1.78,
"step": 187
},
{
"epoch": 1.11,
"learning_rate": 4.066975412877255e-05,
"loss": 1.7801,
"step": 188
},
{
"epoch": 1.12,
"learning_rate": 4.0184932629222575e-05,
"loss": 1.7524,
"step": 189
},
{
"epoch": 1.13,
"learning_rate": 3.970106906294509e-05,
"loss": 1.7623,
"step": 190
},
{
"epoch": 1.13,
"learning_rate": 3.921821065417116e-05,
"loss": 1.7811,
"step": 191
},
{
"epoch": 1.14,
"learning_rate": 3.873640452903026e-05,
"loss": 1.7596,
"step": 192
},
{
"epoch": 1.14,
"learning_rate": 3.825569771095082e-05,
"loss": 1.7775,
"step": 193
},
{
"epoch": 1.15,
"learning_rate": 3.777613711607087e-05,
"loss": 1.7089,
"step": 194
},
{
"epoch": 1.16,
"learning_rate": 3.729776954865905e-05,
"loss": 1.7586,
"step": 195
},
{
"epoch": 1.16,
"learning_rate": 3.682064169654663e-05,
"loss": 1.7708,
"step": 196
},
{
"epoch": 1.17,
"learning_rate": 3.6344800126570844e-05,
"loss": 1.7585,
"step": 197
},
{
"epoch": 1.17,
"learning_rate": 3.587029128003006e-05,
"loss": 1.8535,
"step": 198
},
{
"epoch": 1.18,
"learning_rate": 3.539716146815122e-05,
"loss": 1.7022,
"step": 199
},
{
"epoch": 1.19,
"learning_rate": 3.492545686756986e-05,
"loss": 1.8033,
"step": 200
},
{
"epoch": 1.19,
"learning_rate": 3.4455223515823446e-05,
"loss": 1.7032,
"step": 201
},
{
"epoch": 1.2,
"learning_rate": 3.3986507306858125e-05,
"loss": 1.7893,
"step": 202
},
{
"epoch": 1.21,
"learning_rate": 3.351935398654961e-05,
"loss": 1.8007,
"step": 203
},
{
"epoch": 1.21,
"learning_rate": 3.3053809148238426e-05,
"loss": 1.6865,
"step": 204
},
{
"epoch": 1.22,
"learning_rate": 3.258991822828007e-05,
"loss": 1.7272,
"step": 205
},
{
"epoch": 1.22,
"learning_rate": 3.212772650161056e-05,
"loss": 1.6851,
"step": 206
},
{
"epoch": 1.23,
"learning_rate": 3.16672790773276e-05,
"loss": 1.6703,
"step": 207
},
{
"epoch": 1.24,
"learning_rate": 3.12086208942881e-05,
"loss": 1.7194,
"step": 208
},
{
"epoch": 1.24,
"learning_rate": 3.075179671672216e-05,
"loss": 1.6898,
"step": 209
},
{
"epoch": 1.25,
"learning_rate": 3.0296851129864168e-05,
"loss": 1.6836,
"step": 210
},
{
"epoch": 1.25,
"learning_rate": 2.98438285356014e-05,
"loss": 1.6886,
"step": 211
},
{
"epoch": 1.26,
"learning_rate": 2.9392773148140408e-05,
"loss": 1.6708,
"step": 212
},
{
"epoch": 1.27,
"learning_rate": 2.894372898969186e-05,
"loss": 1.6964,
"step": 213
},
{
"epoch": 1.27,
"learning_rate": 2.8496739886173995e-05,
"loss": 1.639,
"step": 214
},
{
"epoch": 1.28,
"learning_rate": 2.805184946293532e-05,
"loss": 1.742,
"step": 215
},
{
"epoch": 1.28,
"learning_rate": 2.7609101140496863e-05,
"loss": 1.758,
"step": 216
},
{
"epoch": 1.29,
"learning_rate": 2.716853813031435e-05,
"loss": 1.6614,
"step": 217
},
{
"epoch": 1.3,
"learning_rate": 2.6730203430560947e-05,
"loss": 1.7814,
"step": 218
},
{
"epoch": 1.3,
"learning_rate": 2.6294139821930597e-05,
"loss": 1.7335,
"step": 219
},
{
"epoch": 1.31,
"learning_rate": 2.5860389863462765e-05,
"loss": 1.7071,
"step": 220
},
{
"epoch": 1.31,
"learning_rate": 2.542899588838875e-05,
"loss": 1.7433,
"step": 221
},
{
"epoch": 1.32,
"learning_rate": 2.500000000000001e-05,
"loss": 1.7727,
"step": 222
},
{
"epoch": 1.33,
"learning_rate": 2.4573444067538986e-05,
"loss": 1.6221,
"step": 223
},
{
"epoch": 1.33,
"learning_rate": 2.414936972211272e-05,
"loss": 1.7824,
"step": 224
},
{
"epoch": 1.34,
"learning_rate": 2.3727818352629712e-05,
"loss": 1.832,
"step": 225
},
{
"epoch": 1.34,
"learning_rate": 2.3308831101760486e-05,
"loss": 1.6692,
"step": 226
},
{
"epoch": 1.35,
"learning_rate": 2.289244886192207e-05,
"loss": 1.7652,
"step": 227
},
{
"epoch": 1.36,
"learning_rate": 2.247871227128709e-05,
"loss": 1.7668,
"step": 228
},
{
"epoch": 1.36,
"learning_rate": 2.2067661709817383e-05,
"loss": 1.7603,
"step": 229
},
{
"epoch": 1.37,
"learning_rate": 2.1659337295323118e-05,
"loss": 1.7214,
"step": 230
},
{
"epoch": 1.38,
"learning_rate": 2.125377887954732e-05,
"loss": 1.664,
"step": 231
},
{
"epoch": 1.38,
"learning_rate": 2.0851026044276406e-05,
"loss": 1.7438,
"step": 232
},
{
"epoch": 1.39,
"learning_rate": 2.0451118097477094e-05,
"loss": 1.7077,
"step": 233
},
{
"epoch": 1.39,
"learning_rate": 2.005409406946e-05,
"loss": 1.6965,
"step": 234
},
{
"epoch": 1.4,
"learning_rate": 1.9659992709070345e-05,
"loss": 1.7149,
"step": 235
},
{
"epoch": 1.41,
"learning_rate": 1.9268852479906147e-05,
"loss": 1.7308,
"step": 236
},
{
"epoch": 1.41,
"learning_rate": 1.888071155656421e-05,
"loss": 1.8064,
"step": 237
},
{
"epoch": 1.42,
"learning_rate": 1.849560782091445e-05,
"loss": 1.7866,
"step": 238
},
{
"epoch": 1.42,
"learning_rate": 1.811357885840254e-05,
"loss": 1.7135,
"step": 239
},
{
"epoch": 1.43,
"learning_rate": 1.7734661954381754e-05,
"loss": 1.7349,
"step": 240
},
{
"epoch": 1.44,
"learning_rate": 1.7358894090473925e-05,
"loss": 1.7935,
"step": 241
},
{
"epoch": 1.44,
"learning_rate": 1.6986311940960147e-05,
"loss": 1.693,
"step": 242
},
{
"epoch": 1.45,
"learning_rate": 1.661695186920138e-05,
"loss": 1.8604,
"step": 243
},
{
"epoch": 1.45,
"learning_rate": 1.6250849924089484e-05,
"loss": 1.741,
"step": 244
},
{
"epoch": 1.46,
"learning_rate": 1.5888041836528915e-05,
"loss": 1.6668,
"step": 245
},
{
"epoch": 1.47,
"learning_rate": 1.552856301594942e-05,
"loss": 1.6838,
"step": 246
},
{
"epoch": 1.47,
"learning_rate": 1.5172448546850165e-05,
"loss": 1.7143,
"step": 247
},
{
"epoch": 1.48,
"learning_rate": 1.4819733185375534e-05,
"loss": 1.6719,
"step": 248
},
{
"epoch": 1.48,
"learning_rate": 1.4470451355923027e-05,
"loss": 1.7879,
"step": 249
},
{
"epoch": 1.49,
"learning_rate": 1.4124637147783432e-05,
"loss": 1.6943,
"step": 250
},
{
"epoch": 1.5,
"learning_rate": 1.378232431181386e-05,
"loss": 1.7382,
"step": 251
},
{
"epoch": 1.5,
"learning_rate": 1.3443546257143624e-05,
"loss": 1.7818,
"step": 252
},
{
"epoch": 1.51,
"learning_rate": 1.3108336047913633e-05,
"loss": 1.6781,
"step": 253
},
{
"epoch": 1.52,
"learning_rate": 1.277672640004936e-05,
"loss": 1.6687,
"step": 254
},
{
"epoch": 1.52,
"learning_rate": 1.2448749678067856e-05,
"loss": 1.7702,
"step": 255
},
{
"epoch": 1.53,
"learning_rate": 1.2124437891918993e-05,
"loss": 1.7324,
"step": 256
},
{
"epoch": 1.53,
"learning_rate": 1.1803822693861378e-05,
"loss": 1.7849,
"step": 257
},
{
"epoch": 1.54,
"learning_rate": 1.1486935375373126e-05,
"loss": 1.6662,
"step": 258
},
{
"epoch": 1.55,
"learning_rate": 1.1173806864097886e-05,
"loss": 1.7473,
"step": 259
},
{
"epoch": 1.55,
"learning_rate": 1.0864467720826343e-05,
"loss": 1.7529,
"step": 260
},
{
"epoch": 1.56,
"learning_rate": 1.0558948136513535e-05,
"loss": 1.7526,
"step": 261
},
{
"epoch": 1.56,
"learning_rate": 1.0257277929332332e-05,
"loss": 1.721,
"step": 262
},
{
"epoch": 1.57,
"learning_rate": 9.959486541763119e-06,
"loss": 1.7076,
"step": 263
},
{
"epoch": 1.58,
"learning_rate": 9.66560303772035e-06,
"loss": 1.7822,
"step": 264
},
{
"epoch": 1.58,
"learning_rate": 9.375656099715934e-06,
"loss": 1.7664,
"step": 265
},
{
"epoch": 1.59,
"learning_rate": 9.08967402605988e-06,
"loss": 1.6733,
"step": 266
},
{
"epoch": 1.59,
"learning_rate": 8.80768472809842e-06,
"loss": 1.717,
"step": 267
},
{
"epoch": 1.6,
"learning_rate": 8.529715727489912e-06,
"loss": 1.7967,
"step": 268
},
{
"epoch": 1.61,
"learning_rate": 8.255794153518798e-06,
"loss": 1.5902,
"step": 269
},
{
"epoch": 1.61,
"learning_rate": 7.985946740447791e-06,
"loss": 1.785,
"step": 270
},
{
"epoch": 1.62,
"learning_rate": 7.720199824908692e-06,
"loss": 1.7182,
"step": 271
},
{
"epoch": 1.62,
"learning_rate": 7.458579343331995e-06,
"loss": 1.7056,
"step": 272
},
{
"epoch": 1.63,
"learning_rate": 7.2011108294154804e-06,
"loss": 1.7588,
"step": 273
},
{
"epoch": 1.64,
"learning_rate": 6.947819411632223e-06,
"loss": 1.6806,
"step": 274
},
{
"epoch": 1.64,
"learning_rate": 6.698729810778065e-06,
"loss": 1.6865,
"step": 275
},
{
"epoch": 1.65,
"learning_rate": 6.45386633755894e-06,
"loss": 1.7063,
"step": 276
},
{
"epoch": 1.66,
"learning_rate": 6.213252890218163e-06,
"loss": 1.6924,
"step": 277
},
{
"epoch": 1.66,
"learning_rate": 5.976912952204017e-06,
"loss": 1.7508,
"step": 278
},
{
"epoch": 1.67,
"learning_rate": 5.74486958987781e-06,
"loss": 1.7623,
"step": 279
},
{
"epoch": 1.67,
"learning_rate": 5.51714545026264e-06,
"loss": 1.7495,
"step": 280
},
{
"epoch": 1.68,
"learning_rate": 5.293762758833071e-06,
"loss": 1.7111,
"step": 281
},
{
"epoch": 1.69,
"learning_rate": 5.074743317346009e-06,
"loss": 1.7635,
"step": 282
},
{
"epoch": 1.69,
"learning_rate": 4.860108501712824e-06,
"loss": 1.6867,
"step": 283
},
{
"epoch": 1.7,
"learning_rate": 4.649879259913137e-06,
"loss": 1.7256,
"step": 284
},
{
"epoch": 1.7,
"learning_rate": 4.4440761099503455e-06,
"loss": 1.7276,
"step": 285
},
{
"epoch": 1.71,
"learning_rate": 4.242719137849077e-06,
"loss": 1.7283,
"step": 286
},
{
"epoch": 1.72,
"learning_rate": 4.045827995694834e-06,
"loss": 1.7619,
"step": 287
},
{
"epoch": 1.72,
"learning_rate": 3.853421899715992e-06,
"loss": 1.7451,
"step": 288
},
{
"epoch": 1.73,
"learning_rate": 3.6655196284083317e-06,
"loss": 1.7501,
"step": 289
},
{
"epoch": 1.73,
"learning_rate": 3.4821395207022766e-06,
"loss": 1.8164,
"step": 290
},
{
"epoch": 1.74,
"learning_rate": 3.303299474173066e-06,
"loss": 1.6923,
"step": 291
},
{
"epoch": 1.75,
"learning_rate": 3.1290169432939553e-06,
"loss": 1.823,
"step": 292
},
{
"epoch": 1.75,
"learning_rate": 2.9593089377327245e-06,
"loss": 1.7789,
"step": 293
},
{
"epoch": 1.76,
"learning_rate": 2.794192020691544e-06,
"loss": 1.732,
"step": 294
},
{
"epoch": 1.76,
"learning_rate": 2.6336823072904304e-06,
"loss": 1.6913,
"step": 295
},
{
"epoch": 1.77,
"learning_rate": 2.4777954629944477e-06,
"loss": 1.8344,
"step": 296
},
{
"epoch": 1.78,
"learning_rate": 2.3265467020847866e-06,
"loss": 1.7282,
"step": 297
},
{
"epoch": 1.78,
"learning_rate": 2.179950786173879e-06,
"loss": 1.715,
"step": 298
},
{
"epoch": 1.79,
"learning_rate": 2.038022022764685e-06,
"loss": 1.6707,
"step": 299
},
{
"epoch": 1.79,
"learning_rate": 1.9007742638543102e-06,
"loss": 1.7191,
"step": 300
},
{
"epoch": 1.8,
"learning_rate": 1.7682209045820686e-06,
"loss": 1.6994,
"step": 301
},
{
"epoch": 1.81,
"learning_rate": 1.6403748819221466e-06,
"loss": 1.7857,
"step": 302
},
{
"epoch": 1.81,
"learning_rate": 1.5172486734209789e-06,
"loss": 1.8173,
"step": 303
},
{
"epoch": 1.82,
"learning_rate": 1.3988542959794627e-06,
"loss": 1.8064,
"step": 304
},
{
"epoch": 1.83,
"learning_rate": 1.2852033046801104e-06,
"loss": 1.741,
"step": 305
},
{
"epoch": 1.83,
"learning_rate": 1.1763067916593262e-06,
"loss": 1.7858,
"step": 306
},
{
"epoch": 1.84,
"learning_rate": 1.0721753850247984e-06,
"loss": 1.6811,
"step": 307
},
{
"epoch": 1.84,
"learning_rate": 9.728192478182574e-07,
"loss": 1.8126,
"step": 308
},
{
"epoch": 1.85,
"learning_rate": 8.782480770235247e-07,
"loss": 1.8005,
"step": 309
},
{
"epoch": 1.86,
"learning_rate": 7.884711026201585e-07,
"loss": 1.8307,
"step": 310
},
{
"epoch": 1.86,
"learning_rate": 7.034970866825974e-07,
"loss": 1.6815,
"step": 311
},
{
"epoch": 1.87,
"learning_rate": 6.233343225249933e-07,
"loss": 1.6576,
"step": 312
},
{
"epoch": 1.87,
"learning_rate": 5.479906338917984e-07,
"loss": 1.647,
"step": 313
},
{
"epoch": 1.88,
"learning_rate": 4.774733741942206e-07,
"loss": 1.698,
"step": 314
},
{
"epoch": 1.89,
"learning_rate": 4.1178942579248036e-07,
"loss": 1.722,
"step": 315
},
{
"epoch": 1.89,
"learning_rate": 3.5094519932415417e-07,
"loss": 1.7361,
"step": 316
},
{
"epoch": 1.9,
"learning_rate": 2.9494663307847447e-07,
"loss": 1.7881,
"step": 317
},
{
"epoch": 1.9,
"learning_rate": 2.437991924167937e-07,
"loss": 1.7061,
"step": 318
},
{
"epoch": 1.91,
"learning_rate": 1.975078692391552e-07,
"loss": 1.8235,
"step": 319
},
{
"epoch": 1.92,
"learning_rate": 1.560771814970885e-07,
"loss": 1.7096,
"step": 320
},
{
"epoch": 1.92,
"learning_rate": 1.195111727526843e-07,
"loss": 1.7408,
"step": 321
},
{
"epoch": 1.93,
"learning_rate": 8.781341178393244e-08,
"loss": 1.6738,
"step": 322
},
{
"epoch": 1.93,
"learning_rate": 6.098699223641702e-08,
"loss": 1.7742,
"step": 323
},
{
"epoch": 1.94,
"learning_rate": 3.9034532321408076e-08,
"loss": 1.6591,
"step": 324
},
{
"epoch": 1.95,
"learning_rate": 2.1958174560282595e-08,
"loss": 1.841,
"step": 325
},
{
"epoch": 1.95,
"learning_rate": 9.75958557545842e-09,
"loss": 1.7205,
"step": 326
},
{
"epoch": 1.96,
"learning_rate": 2.4399559277132888e-09,
"loss": 1.7067,
"step": 327
},
{
"epoch": 1.97,
"learning_rate": 0.0,
"loss": 1.7346,
"step": 328
}
],
"logging_steps": 1,
"max_steps": 328,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 8.91586250599406e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}