croissant_mmlu / checkpoint-275 /trainer_state.json
manu's picture
Upload folder using huggingface_hub
36f92c4 verified
raw
history blame
33.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 69,
"global_step": 275,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.000000000000001e-06,
"loss": 2.5429,
"step": 1
},
{
"epoch": 0.0,
"eval_loss": 2.5242362022399902,
"eval_runtime": 44.0094,
"eval_samples_per_second": 25.676,
"eval_steps_per_second": 1.613,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 8.000000000000001e-06,
"loss": 2.5186,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 1.2e-05,
"loss": 2.5088,
"step": 3
},
{
"epoch": 0.01,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.5423,
"step": 4
},
{
"epoch": 0.02,
"learning_rate": 2e-05,
"loss": 2.5137,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 2.4e-05,
"loss": 2.4817,
"step": 6
},
{
"epoch": 0.03,
"learning_rate": 2.8000000000000003e-05,
"loss": 2.4889,
"step": 7
},
{
"epoch": 0.03,
"learning_rate": 3.2000000000000005e-05,
"loss": 2.4334,
"step": 8
},
{
"epoch": 0.03,
"learning_rate": 3.6e-05,
"loss": 2.4707,
"step": 9
},
{
"epoch": 0.04,
"learning_rate": 4e-05,
"loss": 2.4251,
"step": 10
},
{
"epoch": 0.04,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.4167,
"step": 11
},
{
"epoch": 0.04,
"learning_rate": 4.8e-05,
"loss": 2.4235,
"step": 12
},
{
"epoch": 0.05,
"learning_rate": 5.2000000000000004e-05,
"loss": 2.411,
"step": 13
},
{
"epoch": 0.05,
"learning_rate": 5.6000000000000006e-05,
"loss": 2.4181,
"step": 14
},
{
"epoch": 0.05,
"learning_rate": 6e-05,
"loss": 2.3847,
"step": 15
},
{
"epoch": 0.06,
"learning_rate": 6.400000000000001e-05,
"loss": 2.407,
"step": 16
},
{
"epoch": 0.06,
"learning_rate": 6.800000000000001e-05,
"loss": 2.3764,
"step": 17
},
{
"epoch": 0.07,
"learning_rate": 7.2e-05,
"loss": 2.3801,
"step": 18
},
{
"epoch": 0.07,
"learning_rate": 7.6e-05,
"loss": 2.387,
"step": 19
},
{
"epoch": 0.07,
"learning_rate": 8e-05,
"loss": 2.3782,
"step": 20
},
{
"epoch": 0.08,
"learning_rate": 8.4e-05,
"loss": 2.3684,
"step": 21
},
{
"epoch": 0.08,
"learning_rate": 8.800000000000001e-05,
"loss": 2.381,
"step": 22
},
{
"epoch": 0.08,
"learning_rate": 9.200000000000001e-05,
"loss": 2.381,
"step": 23
},
{
"epoch": 0.09,
"learning_rate": 9.6e-05,
"loss": 2.34,
"step": 24
},
{
"epoch": 0.09,
"learning_rate": 0.0001,
"loss": 2.3467,
"step": 25
},
{
"epoch": 0.09,
"learning_rate": 0.00010400000000000001,
"loss": 2.335,
"step": 26
},
{
"epoch": 0.1,
"learning_rate": 0.00010800000000000001,
"loss": 2.3741,
"step": 27
},
{
"epoch": 0.1,
"learning_rate": 0.00011200000000000001,
"loss": 2.3716,
"step": 28
},
{
"epoch": 0.11,
"learning_rate": 0.000116,
"loss": 2.3168,
"step": 29
},
{
"epoch": 0.11,
"learning_rate": 0.00012,
"loss": 2.3438,
"step": 30
},
{
"epoch": 0.11,
"learning_rate": 0.000124,
"loss": 2.3625,
"step": 31
},
{
"epoch": 0.12,
"learning_rate": 0.00012800000000000002,
"loss": 2.3581,
"step": 32
},
{
"epoch": 0.12,
"learning_rate": 0.000132,
"loss": 2.3283,
"step": 33
},
{
"epoch": 0.12,
"learning_rate": 0.00013600000000000003,
"loss": 2.3691,
"step": 34
},
{
"epoch": 0.13,
"learning_rate": 0.00014,
"loss": 2.3538,
"step": 35
},
{
"epoch": 0.13,
"learning_rate": 0.000144,
"loss": 2.337,
"step": 36
},
{
"epoch": 0.13,
"learning_rate": 0.000148,
"loss": 2.2913,
"step": 37
},
{
"epoch": 0.14,
"learning_rate": 0.000152,
"loss": 2.2872,
"step": 38
},
{
"epoch": 0.14,
"learning_rate": 0.00015600000000000002,
"loss": 2.319,
"step": 39
},
{
"epoch": 0.15,
"learning_rate": 0.00016,
"loss": 2.3445,
"step": 40
},
{
"epoch": 0.15,
"learning_rate": 0.000164,
"loss": 2.3293,
"step": 41
},
{
"epoch": 0.15,
"learning_rate": 0.000168,
"loss": 2.3142,
"step": 42
},
{
"epoch": 0.16,
"learning_rate": 0.000172,
"loss": 2.3172,
"step": 43
},
{
"epoch": 0.16,
"learning_rate": 0.00017600000000000002,
"loss": 2.3179,
"step": 44
},
{
"epoch": 0.16,
"learning_rate": 0.00018,
"loss": 2.3018,
"step": 45
},
{
"epoch": 0.17,
"learning_rate": 0.00018400000000000003,
"loss": 2.3266,
"step": 46
},
{
"epoch": 0.17,
"learning_rate": 0.000188,
"loss": 2.3131,
"step": 47
},
{
"epoch": 0.17,
"learning_rate": 0.000192,
"loss": 2.3235,
"step": 48
},
{
"epoch": 0.18,
"learning_rate": 0.000196,
"loss": 2.3281,
"step": 49
},
{
"epoch": 0.18,
"learning_rate": 0.0002,
"loss": 2.3332,
"step": 50
},
{
"epoch": 0.19,
"learning_rate": 0.0001999980260856137,
"loss": 2.3148,
"step": 51
},
{
"epoch": 0.19,
"learning_rate": 0.00019999210442038162,
"loss": 2.2979,
"step": 52
},
{
"epoch": 0.19,
"learning_rate": 0.0001999822352380809,
"loss": 2.3257,
"step": 53
},
{
"epoch": 0.2,
"learning_rate": 0.00019996841892833,
"loss": 2.2947,
"step": 54
},
{
"epoch": 0.2,
"learning_rate": 0.00019995065603657316,
"loss": 2.3266,
"step": 55
},
{
"epoch": 0.2,
"learning_rate": 0.00019992894726405893,
"loss": 2.3385,
"step": 56
},
{
"epoch": 0.21,
"learning_rate": 0.0001999032934678125,
"loss": 2.3289,
"step": 57
},
{
"epoch": 0.21,
"learning_rate": 0.00019987369566060176,
"loss": 2.2741,
"step": 58
},
{
"epoch": 0.21,
"learning_rate": 0.00019984015501089752,
"loss": 2.2867,
"step": 59
},
{
"epoch": 0.22,
"learning_rate": 0.00019980267284282717,
"loss": 2.2836,
"step": 60
},
{
"epoch": 0.22,
"learning_rate": 0.00019976125063612252,
"loss": 2.3015,
"step": 61
},
{
"epoch": 0.23,
"learning_rate": 0.0001997158900260614,
"loss": 2.3233,
"step": 62
},
{
"epoch": 0.23,
"learning_rate": 0.00019966659280340297,
"loss": 2.2758,
"step": 63
},
{
"epoch": 0.23,
"learning_rate": 0.00019961336091431727,
"loss": 2.2709,
"step": 64
},
{
"epoch": 0.24,
"learning_rate": 0.00019955619646030802,
"loss": 2.2797,
"step": 65
},
{
"epoch": 0.24,
"learning_rate": 0.00019949510169813003,
"loss": 2.304,
"step": 66
},
{
"epoch": 0.24,
"learning_rate": 0.0001994300790396999,
"loss": 2.276,
"step": 67
},
{
"epoch": 0.25,
"learning_rate": 0.00019936113105200085,
"loss": 2.2041,
"step": 68
},
{
"epoch": 0.25,
"learning_rate": 0.00019928826045698136,
"loss": 2.2283,
"step": 69
},
{
"epoch": 0.25,
"eval_loss": 2.2513792514801025,
"eval_runtime": 44.1028,
"eval_samples_per_second": 25.622,
"eval_steps_per_second": 1.61,
"step": 69
},
{
"epoch": 0.25,
"learning_rate": 0.0001992114701314478,
"loss": 2.2609,
"step": 70
},
{
"epoch": 0.26,
"learning_rate": 0.00019913076310695068,
"loss": 2.2827,
"step": 71
},
{
"epoch": 0.26,
"learning_rate": 0.00019904614256966512,
"loss": 2.209,
"step": 72
},
{
"epoch": 0.27,
"learning_rate": 0.0001989576118602651,
"loss": 2.2145,
"step": 73
},
{
"epoch": 0.27,
"learning_rate": 0.0001988651744737914,
"loss": 2.2473,
"step": 74
},
{
"epoch": 0.27,
"learning_rate": 0.00019876883405951377,
"loss": 2.2453,
"step": 75
},
{
"epoch": 0.28,
"learning_rate": 0.0001986685944207868,
"loss": 2.211,
"step": 76
},
{
"epoch": 0.28,
"learning_rate": 0.00019856445951489982,
"loss": 2.2569,
"step": 77
},
{
"epoch": 0.28,
"learning_rate": 0.00019845643345292054,
"loss": 2.2212,
"step": 78
},
{
"epoch": 0.29,
"learning_rate": 0.00019834452049953297,
"loss": 2.1689,
"step": 79
},
{
"epoch": 0.29,
"learning_rate": 0.0001982287250728689,
"loss": 2.2705,
"step": 80
},
{
"epoch": 0.29,
"learning_rate": 0.0001981090517443334,
"loss": 2.1772,
"step": 81
},
{
"epoch": 0.3,
"learning_rate": 0.0001979855052384247,
"loss": 2.1771,
"step": 82
},
{
"epoch": 0.3,
"learning_rate": 0.00019785809043254722,
"loss": 2.2058,
"step": 83
},
{
"epoch": 0.31,
"learning_rate": 0.00019772681235681936,
"loss": 2.1685,
"step": 84
},
{
"epoch": 0.31,
"learning_rate": 0.00019759167619387476,
"loss": 2.1862,
"step": 85
},
{
"epoch": 0.31,
"learning_rate": 0.00019745268727865774,
"loss": 2.1707,
"step": 86
},
{
"epoch": 0.32,
"learning_rate": 0.00019730985109821266,
"loss": 2.1632,
"step": 87
},
{
"epoch": 0.32,
"learning_rate": 0.0001971631732914674,
"loss": 2.1792,
"step": 88
},
{
"epoch": 0.32,
"learning_rate": 0.0001970126596490106,
"loss": 2.1885,
"step": 89
},
{
"epoch": 0.33,
"learning_rate": 0.0001968583161128631,
"loss": 2.2058,
"step": 90
},
{
"epoch": 0.33,
"learning_rate": 0.00019670014877624353,
"loss": 2.1616,
"step": 91
},
{
"epoch": 0.33,
"learning_rate": 0.0001965381638833274,
"loss": 2.1764,
"step": 92
},
{
"epoch": 0.34,
"learning_rate": 0.000196372367829001,
"loss": 2.1524,
"step": 93
},
{
"epoch": 0.34,
"learning_rate": 0.0001962027671586086,
"loss": 2.1466,
"step": 94
},
{
"epoch": 0.35,
"learning_rate": 0.0001960293685676943,
"loss": 2.1066,
"step": 95
},
{
"epoch": 0.35,
"learning_rate": 0.0001958521789017376,
"loss": 2.1546,
"step": 96
},
{
"epoch": 0.35,
"learning_rate": 0.00019567120515588308,
"loss": 2.1259,
"step": 97
},
{
"epoch": 0.36,
"learning_rate": 0.00019548645447466431,
"loss": 2.1833,
"step": 98
},
{
"epoch": 0.36,
"learning_rate": 0.00019529793415172192,
"loss": 2.1349,
"step": 99
},
{
"epoch": 0.36,
"learning_rate": 0.00019510565162951537,
"loss": 2.1688,
"step": 100
},
{
"epoch": 0.37,
"learning_rate": 0.00019490961449902946,
"loss": 2.1241,
"step": 101
},
{
"epoch": 0.37,
"learning_rate": 0.00019470983049947444,
"loss": 2.0855,
"step": 102
},
{
"epoch": 0.37,
"learning_rate": 0.00019450630751798048,
"loss": 2.1153,
"step": 103
},
{
"epoch": 0.38,
"learning_rate": 0.00019429905358928646,
"loss": 2.134,
"step": 104
},
{
"epoch": 0.38,
"learning_rate": 0.00019408807689542257,
"loss": 2.097,
"step": 105
},
{
"epoch": 0.39,
"learning_rate": 0.00019387338576538744,
"loss": 2.0723,
"step": 106
},
{
"epoch": 0.39,
"learning_rate": 0.00019365498867481923,
"loss": 2.1019,
"step": 107
},
{
"epoch": 0.39,
"learning_rate": 0.00019343289424566122,
"loss": 2.1268,
"step": 108
},
{
"epoch": 0.4,
"learning_rate": 0.0001932071112458211,
"loss": 2.0742,
"step": 109
},
{
"epoch": 0.4,
"learning_rate": 0.00019297764858882514,
"loss": 2.0667,
"step": 110
},
{
"epoch": 0.4,
"learning_rate": 0.00019274451533346615,
"loss": 2.066,
"step": 111
},
{
"epoch": 0.41,
"learning_rate": 0.0001925077206834458,
"loss": 2.0845,
"step": 112
},
{
"epoch": 0.41,
"learning_rate": 0.0001922672739870115,
"loss": 2.112,
"step": 113
},
{
"epoch": 0.41,
"learning_rate": 0.00019202318473658705,
"loss": 2.1757,
"step": 114
},
{
"epoch": 0.42,
"learning_rate": 0.00019177546256839812,
"loss": 2.12,
"step": 115
},
{
"epoch": 0.42,
"learning_rate": 0.00019152411726209176,
"loss": 2.0802,
"step": 116
},
{
"epoch": 0.43,
"learning_rate": 0.0001912691587403503,
"loss": 2.0203,
"step": 117
},
{
"epoch": 0.43,
"learning_rate": 0.00019101059706849957,
"loss": 2.078,
"step": 118
},
{
"epoch": 0.43,
"learning_rate": 0.0001907484424541117,
"loss": 2.0773,
"step": 119
},
{
"epoch": 0.44,
"learning_rate": 0.00019048270524660196,
"loss": 2.0601,
"step": 120
},
{
"epoch": 0.44,
"learning_rate": 0.00019021339593682028,
"loss": 2.0711,
"step": 121
},
{
"epoch": 0.44,
"learning_rate": 0.0001899405251566371,
"loss": 2.0607,
"step": 122
},
{
"epoch": 0.45,
"learning_rate": 0.00018966410367852362,
"loss": 2.0505,
"step": 123
},
{
"epoch": 0.45,
"learning_rate": 0.0001893841424151264,
"loss": 2.0371,
"step": 124
},
{
"epoch": 0.45,
"learning_rate": 0.0001891006524188368,
"loss": 2.0507,
"step": 125
},
{
"epoch": 0.46,
"learning_rate": 0.00018881364488135448,
"loss": 2.0541,
"step": 126
},
{
"epoch": 0.46,
"learning_rate": 0.00018852313113324552,
"loss": 2.0362,
"step": 127
},
{
"epoch": 0.47,
"learning_rate": 0.00018822912264349534,
"loss": 2.0668,
"step": 128
},
{
"epoch": 0.47,
"learning_rate": 0.00018793163101905563,
"loss": 3.3779,
"step": 129
},
{
"epoch": 0.47,
"learning_rate": 0.00018763066800438636,
"loss": 2.0519,
"step": 130
},
{
"epoch": 0.48,
"learning_rate": 0.00018732624548099204,
"loss": 2.0925,
"step": 131
},
{
"epoch": 0.48,
"learning_rate": 0.0001870183754669526,
"loss": 2.0931,
"step": 132
},
{
"epoch": 0.48,
"learning_rate": 0.000186707070116449,
"loss": 2.0339,
"step": 133
},
{
"epoch": 0.49,
"learning_rate": 0.00018639234171928353,
"loss": 1.9732,
"step": 134
},
{
"epoch": 0.49,
"learning_rate": 0.0001860742027003944,
"loss": 2.0093,
"step": 135
},
{
"epoch": 0.49,
"learning_rate": 0.00018575266561936523,
"loss": 1.9884,
"step": 136
},
{
"epoch": 0.5,
"learning_rate": 0.0001854277431699295,
"loss": 1.9744,
"step": 137
},
{
"epoch": 0.5,
"learning_rate": 0.00018509944817946922,
"loss": 1.9539,
"step": 138
},
{
"epoch": 0.5,
"eval_loss": 2.0380747318267822,
"eval_runtime": 44.126,
"eval_samples_per_second": 25.608,
"eval_steps_per_second": 1.609,
"step": 138
},
{
"epoch": 0.51,
"learning_rate": 0.00018476779360850832,
"loss": 2.0191,
"step": 139
},
{
"epoch": 0.51,
"learning_rate": 0.00018443279255020152,
"loss": 2.0294,
"step": 140
},
{
"epoch": 0.51,
"learning_rate": 0.00018409445822981693,
"loss": 2.021,
"step": 141
},
{
"epoch": 0.52,
"learning_rate": 0.0001837528040042142,
"loss": 2.0358,
"step": 142
},
{
"epoch": 0.52,
"learning_rate": 0.00018340784336131713,
"loss": 1.9937,
"step": 143
},
{
"epoch": 0.52,
"learning_rate": 0.00018305958991958127,
"loss": 2.0256,
"step": 144
},
{
"epoch": 0.53,
"learning_rate": 0.00018270805742745617,
"loss": 2.0468,
"step": 145
},
{
"epoch": 0.53,
"learning_rate": 0.00018235325976284275,
"loss": 1.9277,
"step": 146
},
{
"epoch": 0.53,
"learning_rate": 0.00018199521093254523,
"loss": 2.047,
"step": 147
},
{
"epoch": 0.54,
"learning_rate": 0.00018163392507171842,
"loss": 2.0202,
"step": 148
},
{
"epoch": 0.54,
"learning_rate": 0.0001812694164433094,
"loss": 1.9753,
"step": 149
},
{
"epoch": 0.55,
"learning_rate": 0.00018090169943749476,
"loss": 1.9769,
"step": 150
},
{
"epoch": 0.55,
"learning_rate": 0.0001805307885711122,
"loss": 1.9002,
"step": 151
},
{
"epoch": 0.55,
"learning_rate": 0.00018015669848708767,
"loss": 1.8904,
"step": 152
},
{
"epoch": 0.56,
"learning_rate": 0.0001797794439538571,
"loss": 1.8454,
"step": 153
},
{
"epoch": 0.56,
"learning_rate": 0.00017939903986478355,
"loss": 1.9393,
"step": 154
},
{
"epoch": 0.56,
"learning_rate": 0.00017901550123756906,
"loss": 1.9692,
"step": 155
},
{
"epoch": 0.57,
"learning_rate": 0.00017862884321366188,
"loss": 1.8821,
"step": 156
},
{
"epoch": 0.57,
"learning_rate": 0.0001782390810576588,
"loss": 1.9025,
"step": 157
},
{
"epoch": 0.57,
"learning_rate": 0.00017784623015670238,
"loss": 1.8977,
"step": 158
},
{
"epoch": 0.58,
"learning_rate": 0.00017745030601987337,
"loss": 1.9179,
"step": 159
},
{
"epoch": 0.58,
"learning_rate": 0.00017705132427757895,
"loss": 1.8895,
"step": 160
},
{
"epoch": 0.59,
"learning_rate": 0.00017664930068093498,
"loss": 1.8531,
"step": 161
},
{
"epoch": 0.59,
"learning_rate": 0.0001762442511011448,
"loss": 1.9298,
"step": 162
},
{
"epoch": 0.59,
"learning_rate": 0.0001758361915288722,
"loss": 1.862,
"step": 163
},
{
"epoch": 0.6,
"learning_rate": 0.00017542513807361037,
"loss": 1.9015,
"step": 164
},
{
"epoch": 0.6,
"learning_rate": 0.00017501110696304596,
"loss": 1.8509,
"step": 165
},
{
"epoch": 0.6,
"learning_rate": 0.00017459411454241822,
"loss": 1.8403,
"step": 166
},
{
"epoch": 0.61,
"learning_rate": 0.00017417417727387394,
"loss": 1.9279,
"step": 167
},
{
"epoch": 0.61,
"learning_rate": 0.0001737513117358174,
"loss": 1.7867,
"step": 168
},
{
"epoch": 0.61,
"learning_rate": 0.00017332553462225602,
"loss": 1.8755,
"step": 169
},
{
"epoch": 0.62,
"learning_rate": 0.00017289686274214118,
"loss": 1.8639,
"step": 170
},
{
"epoch": 0.62,
"learning_rate": 0.0001724653130187047,
"loss": 1.8795,
"step": 171
},
{
"epoch": 0.63,
"learning_rate": 0.0001720309024887907,
"loss": 1.8081,
"step": 172
},
{
"epoch": 0.63,
"learning_rate": 0.00017159364830218312,
"loss": 1.8725,
"step": 173
},
{
"epoch": 0.63,
"learning_rate": 0.00017115356772092857,
"loss": 1.8081,
"step": 174
},
{
"epoch": 0.64,
"learning_rate": 0.00017071067811865476,
"loss": 1.8939,
"step": 175
},
{
"epoch": 0.64,
"learning_rate": 0.00017026499697988493,
"loss": 1.819,
"step": 176
},
{
"epoch": 0.64,
"learning_rate": 0.00016981654189934727,
"loss": 1.8051,
"step": 177
},
{
"epoch": 0.65,
"learning_rate": 0.0001693653305812805,
"loss": 1.8504,
"step": 178
},
{
"epoch": 0.65,
"learning_rate": 0.00016891138083873487,
"loss": 1.7902,
"step": 179
},
{
"epoch": 0.65,
"learning_rate": 0.00016845471059286887,
"loss": 1.787,
"step": 180
},
{
"epoch": 0.66,
"learning_rate": 0.00016799533787224192,
"loss": 1.8533,
"step": 181
},
{
"epoch": 0.66,
"learning_rate": 0.00016753328081210245,
"loss": 1.831,
"step": 182
},
{
"epoch": 0.67,
"learning_rate": 0.000167068557653672,
"loss": 1.8204,
"step": 183
},
{
"epoch": 0.67,
"learning_rate": 0.00016660118674342517,
"loss": 1.8043,
"step": 184
},
{
"epoch": 0.67,
"learning_rate": 0.00016613118653236518,
"loss": 1.778,
"step": 185
},
{
"epoch": 0.68,
"learning_rate": 0.00016565857557529566,
"loss": 1.7868,
"step": 186
},
{
"epoch": 0.68,
"learning_rate": 0.0001651833725300879,
"loss": 1.8423,
"step": 187
},
{
"epoch": 0.68,
"learning_rate": 0.00016470559615694446,
"loss": 1.8157,
"step": 188
},
{
"epoch": 0.69,
"learning_rate": 0.00016422526531765846,
"loss": 1.7796,
"step": 189
},
{
"epoch": 0.69,
"learning_rate": 0.000163742398974869,
"loss": 1.705,
"step": 190
},
{
"epoch": 0.69,
"learning_rate": 0.00016325701619131246,
"loss": 1.7537,
"step": 191
},
{
"epoch": 0.7,
"learning_rate": 0.00016276913612907007,
"loss": 1.7629,
"step": 192
},
{
"epoch": 0.7,
"learning_rate": 0.00016227877804881127,
"loss": 1.7667,
"step": 193
},
{
"epoch": 0.71,
"learning_rate": 0.00016178596130903344,
"loss": 1.7376,
"step": 194
},
{
"epoch": 0.71,
"learning_rate": 0.00016129070536529766,
"loss": 1.7776,
"step": 195
},
{
"epoch": 0.71,
"learning_rate": 0.00016079302976946055,
"loss": 1.6759,
"step": 196
},
{
"epoch": 0.72,
"learning_rate": 0.00016029295416890248,
"loss": 1.7559,
"step": 197
},
{
"epoch": 0.72,
"learning_rate": 0.0001597904983057519,
"loss": 1.76,
"step": 198
},
{
"epoch": 0.72,
"learning_rate": 0.00015928568201610595,
"loss": 1.7412,
"step": 199
},
{
"epoch": 0.73,
"learning_rate": 0.00015877852522924732,
"loss": 1.6685,
"step": 200
},
{
"epoch": 0.73,
"learning_rate": 0.00015826904796685762,
"loss": 1.7587,
"step": 201
},
{
"epoch": 0.73,
"learning_rate": 0.00015775727034222675,
"loss": 1.7417,
"step": 202
},
{
"epoch": 0.74,
"learning_rate": 0.0001572432125594591,
"loss": 1.6513,
"step": 203
},
{
"epoch": 0.74,
"learning_rate": 0.00015672689491267567,
"loss": 1.6968,
"step": 204
},
{
"epoch": 0.75,
"learning_rate": 0.00015620833778521307,
"loss": 1.6509,
"step": 205
},
{
"epoch": 0.75,
"learning_rate": 0.00015568756164881882,
"loss": 1.6245,
"step": 206
},
{
"epoch": 0.75,
"learning_rate": 0.00015516458706284303,
"loss": 1.6608,
"step": 207
},
{
"epoch": 0.75,
"eval_loss": 1.6871771812438965,
"eval_runtime": 44.071,
"eval_samples_per_second": 25.64,
"eval_steps_per_second": 1.611,
"step": 207
},
{
"epoch": 0.76,
"learning_rate": 0.00015463943467342693,
"loss": 1.6572,
"step": 208
},
{
"epoch": 0.76,
"learning_rate": 0.00015411212521268758,
"loss": 1.5938,
"step": 209
},
{
"epoch": 0.76,
"learning_rate": 0.00015358267949789966,
"loss": 1.6421,
"step": 210
},
{
"epoch": 0.77,
"learning_rate": 0.0001530511184306734,
"loss": 1.7423,
"step": 211
},
{
"epoch": 0.77,
"learning_rate": 0.0001525174629961296,
"loss": 1.6636,
"step": 212
},
{
"epoch": 0.77,
"learning_rate": 0.00015198173426207094,
"loss": 1.7339,
"step": 213
},
{
"epoch": 0.78,
"learning_rate": 0.00015144395337815064,
"loss": 1.6729,
"step": 214
},
{
"epoch": 0.78,
"learning_rate": 0.00015090414157503714,
"loss": 1.6939,
"step": 215
},
{
"epoch": 0.79,
"learning_rate": 0.0001503623201635761,
"loss": 1.6394,
"step": 216
},
{
"epoch": 0.79,
"learning_rate": 0.0001498185105339491,
"loss": 1.6196,
"step": 217
},
{
"epoch": 0.79,
"learning_rate": 0.00014927273415482915,
"loss": 1.6304,
"step": 218
},
{
"epoch": 0.8,
"learning_rate": 0.00014872501257253323,
"loss": 1.6124,
"step": 219
},
{
"epoch": 0.8,
"learning_rate": 0.00014817536741017152,
"loss": 1.6011,
"step": 220
},
{
"epoch": 0.8,
"learning_rate": 0.0001476238203667939,
"loss": 1.5667,
"step": 221
},
{
"epoch": 0.81,
"learning_rate": 0.0001470703932165333,
"loss": 1.6536,
"step": 222
},
{
"epoch": 0.81,
"learning_rate": 0.00014651510780774583,
"loss": 1.5861,
"step": 223
},
{
"epoch": 0.81,
"learning_rate": 0.00014595798606214882,
"loss": 1.5339,
"step": 224
},
{
"epoch": 0.82,
"learning_rate": 0.00014539904997395468,
"loss": 1.6058,
"step": 225
},
{
"epoch": 0.82,
"learning_rate": 0.00014483832160900326,
"loss": 1.605,
"step": 226
},
{
"epoch": 0.83,
"learning_rate": 0.0001442758231038902,
"loss": 1.5731,
"step": 227
},
{
"epoch": 0.83,
"learning_rate": 0.0001437115766650933,
"loss": 1.5876,
"step": 228
},
{
"epoch": 0.83,
"learning_rate": 0.0001431456045680959,
"loss": 1.6278,
"step": 229
},
{
"epoch": 0.84,
"learning_rate": 0.00014257792915650728,
"loss": 1.5877,
"step": 230
},
{
"epoch": 0.84,
"learning_rate": 0.00014200857284118066,
"loss": 1.5985,
"step": 231
},
{
"epoch": 0.84,
"learning_rate": 0.00014143755809932845,
"loss": 1.5359,
"step": 232
},
{
"epoch": 0.85,
"learning_rate": 0.00014086490747363493,
"loss": 1.4969,
"step": 233
},
{
"epoch": 0.85,
"learning_rate": 0.00014029064357136628,
"loss": 1.5615,
"step": 234
},
{
"epoch": 0.85,
"learning_rate": 0.00013971478906347806,
"loss": 1.5356,
"step": 235
},
{
"epoch": 0.86,
"learning_rate": 0.00013913736668372026,
"loss": 1.5306,
"step": 236
},
{
"epoch": 0.86,
"learning_rate": 0.00013855839922773968,
"loss": 1.4575,
"step": 237
},
{
"epoch": 0.87,
"learning_rate": 0.00013797790955218014,
"loss": 1.5216,
"step": 238
},
{
"epoch": 0.87,
"learning_rate": 0.00013739592057378003,
"loss": 1.5153,
"step": 239
},
{
"epoch": 0.87,
"learning_rate": 0.00013681245526846783,
"loss": 1.4623,
"step": 240
},
{
"epoch": 0.88,
"learning_rate": 0.00013622753667045457,
"loss": 1.4995,
"step": 241
},
{
"epoch": 0.88,
"learning_rate": 0.00013564118787132506,
"loss": 1.5363,
"step": 242
},
{
"epoch": 0.88,
"learning_rate": 0.0001350534320191259,
"loss": 1.425,
"step": 243
},
{
"epoch": 0.89,
"learning_rate": 0.0001344642923174517,
"loss": 1.4145,
"step": 244
},
{
"epoch": 0.89,
"learning_rate": 0.00013387379202452917,
"loss": 1.4994,
"step": 245
},
{
"epoch": 0.89,
"learning_rate": 0.00013328195445229868,
"loss": 1.4758,
"step": 246
},
{
"epoch": 0.9,
"learning_rate": 0.00013268880296549425,
"loss": 1.5288,
"step": 247
},
{
"epoch": 0.9,
"learning_rate": 0.00013209436098072095,
"loss": 1.4809,
"step": 248
},
{
"epoch": 0.91,
"learning_rate": 0.0001314986519655305,
"loss": 1.4916,
"step": 249
},
{
"epoch": 0.91,
"learning_rate": 0.00013090169943749476,
"loss": 1.5181,
"step": 250
},
{
"epoch": 0.91,
"learning_rate": 0.00013030352696327742,
"loss": 1.4987,
"step": 251
},
{
"epoch": 0.92,
"learning_rate": 0.0001297041581577035,
"loss": 1.4526,
"step": 252
},
{
"epoch": 0.92,
"learning_rate": 0.00012910361668282719,
"loss": 1.4375,
"step": 253
},
{
"epoch": 0.92,
"learning_rate": 0.0001285019262469976,
"loss": 1.4934,
"step": 254
},
{
"epoch": 0.93,
"learning_rate": 0.00012789911060392294,
"loss": 1.443,
"step": 255
},
{
"epoch": 0.93,
"learning_rate": 0.00012729519355173254,
"loss": 1.359,
"step": 256
},
{
"epoch": 0.93,
"learning_rate": 0.00012669019893203759,
"loss": 1.4471,
"step": 257
},
{
"epoch": 0.94,
"learning_rate": 0.00012608415062898972,
"loss": 1.4615,
"step": 258
},
{
"epoch": 0.94,
"learning_rate": 0.00012547707256833823,
"loss": 1.4249,
"step": 259
},
{
"epoch": 0.95,
"learning_rate": 0.0001248689887164855,
"loss": 1.4157,
"step": 260
},
{
"epoch": 0.95,
"learning_rate": 0.00012425992307954075,
"loss": 1.4213,
"step": 261
},
{
"epoch": 0.95,
"learning_rate": 0.00012364989970237248,
"loss": 1.3552,
"step": 262
},
{
"epoch": 0.96,
"learning_rate": 0.00012303894266765908,
"loss": 1.3715,
"step": 263
},
{
"epoch": 0.96,
"learning_rate": 0.00012242707609493814,
"loss": 1.3484,
"step": 264
},
{
"epoch": 0.96,
"learning_rate": 0.00012181432413965428,
"loss": 1.4268,
"step": 265
},
{
"epoch": 0.97,
"learning_rate": 0.00012120071099220549,
"loss": 1.3612,
"step": 266
},
{
"epoch": 0.97,
"learning_rate": 0.00012058626087698814,
"loss": 1.341,
"step": 267
},
{
"epoch": 0.97,
"learning_rate": 0.00011997099805144069,
"loss": 1.3221,
"step": 268
},
{
"epoch": 0.98,
"learning_rate": 0.00011935494680508606,
"loss": 1.3872,
"step": 269
},
{
"epoch": 0.98,
"learning_rate": 0.00011873813145857249,
"loss": 1.4136,
"step": 270
},
{
"epoch": 0.99,
"learning_rate": 0.00011812057636271374,
"loss": 1.3503,
"step": 271
},
{
"epoch": 0.99,
"learning_rate": 0.00011750230589752762,
"loss": 1.4157,
"step": 272
},
{
"epoch": 0.99,
"learning_rate": 0.00011688334447127338,
"loss": 1.3828,
"step": 273
},
{
"epoch": 1.0,
"learning_rate": 0.00011626371651948838,
"loss": 1.3542,
"step": 274
},
{
"epoch": 1.0,
"learning_rate": 0.0001156434465040231,
"loss": 1.3105,
"step": 275
}
],
"logging_steps": 1,
"max_steps": 550,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 275,
"total_flos": 2.76799703482368e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}