t5-base-kw2email-v4 / trainer_state.json
pszemraj's picture
Upload trainer_state.json
a52ed2a
raw
history blame
28.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9962100703844072,
"global_step": 460,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 0.00014285714285714284,
"loss": 1.8415,
"step": 2
},
{
"epoch": 0.02,
"learning_rate": 0.0002857142857142857,
"loss": 1.9512,
"step": 4
},
{
"epoch": 0.03,
"learning_rate": 0.00042857142857142855,
"loss": 2.0021,
"step": 6
},
{
"epoch": 0.03,
"learning_rate": 0.0005714285714285714,
"loss": 2.0064,
"step": 8
},
{
"epoch": 0.04,
"learning_rate": 0.0007142857142857143,
"loss": 1.9143,
"step": 10
},
{
"epoch": 0.05,
"learning_rate": 0.0008571428571428571,
"loss": 1.8679,
"step": 12
},
{
"epoch": 0.06,
"learning_rate": 0.001,
"loss": 1.9015,
"step": 14
},
{
"epoch": 0.07,
"learning_rate": 0.0009999503838747563,
"loss": 1.9679,
"step": 16
},
{
"epoch": 0.08,
"learning_rate": 0.000999801545346065,
"loss": 1.96,
"step": 18
},
{
"epoch": 0.09,
"learning_rate": 0.0009995535139530904,
"loss": 2.019,
"step": 20
},
{
"epoch": 0.1,
"learning_rate": 0.000999206338921259,
"loss": 1.9169,
"step": 22
},
{
"epoch": 0.1,
"learning_rate": 0.00099876008915249,
"loss": 2.002,
"step": 24
},
{
"epoch": 0.11,
"learning_rate": 0.0009982148532115217,
"loss": 1.966,
"step": 26
},
{
"epoch": 0.12,
"learning_rate": 0.0009975707393083326,
"loss": 1.9922,
"step": 28
},
{
"epoch": 0.13,
"learning_rate": 0.0009968278752766672,
"loss": 1.8925,
"step": 30
},
{
"epoch": 0.14,
"learning_rate": 0.0009959864085486647,
"loss": 1.9948,
"step": 32
},
{
"epoch": 0.15,
"learning_rate": 0.0009950465061255996,
"loss": 2.0015,
"step": 34
},
{
"epoch": 0.16,
"learning_rate": 0.0009940083545447369,
"loss": 1.9682,
"step": 36
},
{
"epoch": 0.16,
"learning_rate": 0.0009928721598423125,
"loss": 1.9633,
"step": 38
},
{
"epoch": 0.17,
"learning_rate": 0.0009916381475126405,
"loss": 2.0015,
"step": 40
},
{
"epoch": 0.18,
"learning_rate": 0.0009903065624633626,
"loss": 1.8838,
"step": 42
},
{
"epoch": 0.19,
"learning_rate": 0.000988877668966841,
"loss": 1.966,
"step": 44
},
{
"epoch": 0.2,
"learning_rate": 0.0009873517506077101,
"loss": 1.8983,
"step": 46
},
{
"epoch": 0.21,
"learning_rate": 0.000985729110226596,
"loss": 1.9624,
"step": 48
},
{
"epoch": 0.22,
"learning_rate": 0.0009840100698600119,
"loss": 1.9347,
"step": 50
},
{
"epoch": 0.23,
"learning_rate": 0.0009821949706764462,
"loss": 1.9835,
"step": 52
},
{
"epoch": 0.23,
"learning_rate": 0.000980284172908653,
"loss": 2.0104,
"step": 54
},
{
"epoch": 0.24,
"learning_rate": 0.0009782780557821575,
"loss": 1.885,
"step": 56
},
{
"epoch": 0.25,
"learning_rate": 0.0009761770174399942,
"loss": 1.964,
"step": 58
},
{
"epoch": 0.26,
"learning_rate": 0.000973981474863689,
"loss": 1.932,
"step": 60
},
{
"epoch": 0.27,
"learning_rate": 0.0009716918637905041,
"loss": 1.9233,
"step": 62
},
{
"epoch": 0.28,
"learning_rate": 0.0009693086386269581,
"loss": 1.8307,
"step": 64
},
{
"epoch": 0.29,
"learning_rate": 0.000966832272358644,
"loss": 1.9262,
"step": 66
},
{
"epoch": 0.29,
"learning_rate": 0.0009642632564563575,
"loss": 1.9522,
"step": 68
},
{
"epoch": 0.3,
"learning_rate": 0.0009616021007785576,
"loss": 1.9949,
"step": 70
},
{
"epoch": 0.31,
"learning_rate": 0.0009588493334701778,
"loss": 1.9875,
"step": 72
},
{
"epoch": 0.32,
"learning_rate": 0.0009560055008578085,
"loss": 2.0109,
"step": 74
},
{
"epoch": 0.33,
"learning_rate": 0.0009530711673412697,
"loss": 1.9399,
"step": 76
},
{
"epoch": 0.34,
"learning_rate": 0.0009500469152815987,
"loss": 1.9279,
"step": 78
},
{
"epoch": 0.35,
"learning_rate": 0.0009469333448854713,
"loss": 1.9739,
"step": 80
},
{
"epoch": 0.36,
"learning_rate": 0.0009437310740860822,
"loss": 1.9563,
"step": 82
},
{
"epoch": 0.36,
"learning_rate": 0.0009404407384205079,
"loss": 1.9654,
"step": 84
},
{
"epoch": 0.37,
"learning_rate": 0.0009370629909035739,
"loss": 2.0422,
"step": 86
},
{
"epoch": 0.38,
"learning_rate": 0.0009335985018982559,
"loss": 1.9137,
"step": 88
},
{
"epoch": 0.39,
"learning_rate": 0.0009300479589826354,
"loss": 1.9886,
"step": 90
},
{
"epoch": 0.4,
"learning_rate": 0.0009264120668134404,
"loss": 1.9525,
"step": 92
},
{
"epoch": 0.41,
"learning_rate": 0.0009226915469861956,
"loss": 1.9942,
"step": 94
},
{
"epoch": 0.42,
"learning_rate": 0.0009188871378920122,
"loss": 1.9555,
"step": 96
},
{
"epoch": 0.42,
"learning_rate": 0.0009149995945710424,
"loss": 1.9369,
"step": 98
},
{
"epoch": 0.43,
"learning_rate": 0.0009110296885626314,
"loss": 1.9038,
"step": 100
},
{
"epoch": 0.44,
"learning_rate": 0.0009069782077521942,
"loss": 1.9615,
"step": 102
},
{
"epoch": 0.45,
"learning_rate": 0.0009028459562148479,
"loss": 2.0167,
"step": 104
},
{
"epoch": 0.46,
"learning_rate": 0.0008986337540558318,
"loss": 1.8892,
"step": 106
},
{
"epoch": 0.47,
"learning_rate": 0.0008943424372477454,
"loss": 1.9468,
"step": 108
},
{
"epoch": 0.48,
"learning_rate": 0.0008899728574646376,
"loss": 1.9827,
"step": 110
},
{
"epoch": 0.49,
"learning_rate": 0.0008855258819129796,
"loss": 1.8949,
"step": 112
},
{
"epoch": 0.49,
"learning_rate": 0.0008810023931595549,
"loss": 1.9341,
"step": 114
},
{
"epoch": 0.5,
"learning_rate": 0.0008764032889563016,
"loss": 1.9982,
"step": 116
},
{
"epoch": 0.51,
"learning_rate": 0.0008717294820621407,
"loss": 1.982,
"step": 118
},
{
"epoch": 0.52,
"learning_rate": 0.0008669819000618246,
"loss": 1.8524,
"step": 120
},
{
"epoch": 0.53,
"learning_rate": 0.0008621614851818462,
"loss": 1.891,
"step": 122
},
{
"epoch": 0.54,
"learning_rate": 0.000857269194103439,
"loss": 1.9186,
"step": 124
},
{
"epoch": 0.55,
"learning_rate": 0.0008523059977727103,
"loss": 1.8855,
"step": 126
},
{
"epoch": 0.55,
"learning_rate": 0.0008472728812079436,
"loss": 1.9325,
"step": 128
},
{
"epoch": 0.56,
"learning_rate": 0.0008421708433041057,
"loss": 1.9798,
"step": 130
},
{
"epoch": 0.57,
"learning_rate": 0.0008370008966346037,
"loss": 1.9994,
"step": 132
},
{
"epoch": 0.58,
"learning_rate": 0.0008317640672503231,
"loss": 1.9706,
"step": 134
},
{
"epoch": 0.59,
"learning_rate": 0.0008264613944759942,
"loss": 1.9126,
"step": 136
},
{
"epoch": 0.6,
"learning_rate": 0.0008210939307039234,
"loss": 1.921,
"step": 138
},
{
"epoch": 0.61,
"learning_rate": 0.0008156627411851295,
"loss": 1.8735,
"step": 140
},
{
"epoch": 0.62,
"learning_rate": 0.00081016890381793,
"loss": 1.8251,
"step": 142
},
{
"epoch": 0.62,
"learning_rate": 0.0008046135089340164,
"loss": 1.9081,
"step": 144
},
{
"epoch": 0.63,
"learning_rate": 0.0007989976590820622,
"loss": 1.9156,
"step": 146
},
{
"epoch": 0.64,
"learning_rate": 0.0007933224688089058,
"loss": 1.8971,
"step": 148
},
{
"epoch": 0.65,
"learning_rate": 0.0007875890644383525,
"loss": 1.968,
"step": 150
},
{
"epoch": 0.66,
"learning_rate": 0.0007817985838476398,
"loss": 1.9105,
"step": 152
},
{
"epoch": 0.67,
"learning_rate": 0.0007759521762416084,
"loss": 1.9104,
"step": 154
},
{
"epoch": 0.68,
"learning_rate": 0.0007700510019246265,
"loss": 1.9086,
"step": 156
},
{
"epoch": 0.68,
"learning_rate": 0.0007640962320703099,
"loss": 1.8637,
"step": 158
},
{
"epoch": 0.69,
"learning_rate": 0.0007580890484890863,
"loss": 2.0117,
"step": 160
},
{
"epoch": 0.7,
"learning_rate": 0.0007520306433936473,
"loss": 1.9229,
"step": 162
},
{
"epoch": 0.71,
"learning_rate": 0.0007459222191623368,
"loss": 1.9669,
"step": 164
},
{
"epoch": 0.72,
"learning_rate": 0.0007397649881005219,
"loss": 1.9291,
"step": 166
},
{
"epoch": 0.73,
"learning_rate": 0.0007335601721999923,
"loss": 1.9108,
"step": 168
},
{
"epoch": 0.74,
"learning_rate": 0.0007273090028964396,
"loss": 1.9027,
"step": 170
},
{
"epoch": 0.74,
"learning_rate": 0.0007210127208250598,
"loss": 1.8513,
"step": 172
},
{
"epoch": 0.75,
"learning_rate": 0.0007146725755743328,
"loss": 1.9157,
"step": 174
},
{
"epoch": 0.76,
"learning_rate": 0.0007082898254380215,
"loss": 1.9181,
"step": 176
},
{
"epoch": 0.77,
"learning_rate": 0.0007018657371654464,
"loss": 1.8859,
"step": 178
},
{
"epoch": 0.78,
"learning_rate": 0.000695401585710081,
"loss": 1.8927,
"step": 180
},
{
"epoch": 0.79,
"learning_rate": 0.0006888986539765181,
"loss": 1.908,
"step": 182
},
{
"epoch": 0.8,
"learning_rate": 0.0006823582325658588,
"loss": 2.0016,
"step": 184
},
{
"epoch": 0.81,
"learning_rate": 0.0006757816195195749,
"loss": 2.0238,
"step": 186
},
{
"epoch": 0.81,
"learning_rate": 0.0006691701200618925,
"loss": 1.9548,
"step": 188
},
{
"epoch": 0.82,
"learning_rate": 0.0006625250463407523,
"loss": 1.9494,
"step": 190
},
{
"epoch": 0.83,
"learning_rate": 0.0006558477171673941,
"loss": 1.8334,
"step": 192
},
{
"epoch": 0.84,
"learning_rate": 0.0006491394577546203,
"loss": 1.9507,
"step": 194
},
{
"epoch": 0.85,
"learning_rate": 0.0006424015994537876,
"loss": 1.9014,
"step": 196
},
{
"epoch": 0.86,
"learning_rate": 0.0006356354794905814,
"loss": 1.9175,
"step": 198
},
{
"epoch": 0.87,
"learning_rate": 0.0006288424406996237,
"loss": 1.9221,
"step": 200
},
{
"epoch": 0.87,
"learning_rate": 0.0006220238312579681,
"loss": 1.9813,
"step": 202
},
{
"epoch": 0.88,
"learning_rate": 0.000615181004417535,
"loss": 1.9541,
"step": 204
},
{
"epoch": 0.89,
"learning_rate": 0.0006083153182365383,
"loss": 1.8112,
"step": 206
},
{
"epoch": 0.9,
"learning_rate": 0.00060142813530996,
"loss": 1.902,
"step": 208
},
{
"epoch": 0.91,
"learning_rate": 0.0005945208224991225,
"loss": 1.9269,
"step": 210
},
{
"epoch": 0.92,
"learning_rate": 0.0005875947506604162,
"loss": 1.949,
"step": 212
},
{
"epoch": 0.93,
"learning_rate": 0.000580651294373232,
"loss": 1.9206,
"step": 214
},
{
"epoch": 0.94,
"learning_rate": 0.0005736918316671572,
"loss": 1.8971,
"step": 216
},
{
"epoch": 0.94,
"learning_rate": 0.0005667177437484845,
"loss": 1.9171,
"step": 218
},
{
"epoch": 0.95,
"learning_rate": 0.0005597304147260927,
"loss": 1.8511,
"step": 220
},
{
"epoch": 0.96,
"learning_rate": 0.0005527312313367492,
"loss": 1.8867,
"step": 222
},
{
"epoch": 0.97,
"learning_rate": 0.0005457215826698928,
"loss": 1.9239,
"step": 224
},
{
"epoch": 0.98,
"learning_rate": 0.0005387028598919481,
"loss": 1.8722,
"step": 226
},
{
"epoch": 0.99,
"learning_rate": 0.0005316764559702285,
"loss": 1.8539,
"step": 228
},
{
"epoch": 1.0,
"learning_rate": 0.0005246437653964822,
"loss": 1.9282,
"step": 230
},
{
"epoch": 1.01,
"learning_rate": 0.0005176061839101343,
"loss": 2.6089,
"step": 232
},
{
"epoch": 1.02,
"learning_rate": 0.0005105651082212828,
"loss": 1.8528,
"step": 234
},
{
"epoch": 1.03,
"learning_rate": 0.0005035219357335002,
"loss": 1.916,
"step": 236
},
{
"epoch": 1.03,
"learning_rate": 0.0004964780642664998,
"loss": 1.9557,
"step": 238
},
{
"epoch": 1.04,
"learning_rate": 0.0004894348917787173,
"loss": 1.8235,
"step": 240
},
{
"epoch": 1.05,
"learning_rate": 0.0004823938160898657,
"loss": 1.9001,
"step": 242
},
{
"epoch": 1.06,
"learning_rate": 0.0004753562346035178,
"loss": 1.8572,
"step": 244
},
{
"epoch": 1.07,
"learning_rate": 0.0004683235440297717,
"loss": 1.8058,
"step": 246
},
{
"epoch": 1.08,
"learning_rate": 0.00046129714010805206,
"loss": 1.8738,
"step": 248
},
{
"epoch": 1.09,
"learning_rate": 0.00045427841733010723,
"loss": 1.7835,
"step": 250
},
{
"epoch": 1.1,
"learning_rate": 0.00044726876866325085,
"loss": 1.8462,
"step": 252
},
{
"epoch": 1.1,
"learning_rate": 0.0004402695852739074,
"loss": 1.8028,
"step": 254
},
{
"epoch": 1.11,
"learning_rate": 0.0004332822562515155,
"loss": 1.8254,
"step": 256
},
{
"epoch": 1.12,
"learning_rate": 0.00042630816833284286,
"loss": 1.846,
"step": 258
},
{
"epoch": 1.13,
"learning_rate": 0.0004193487056267679,
"loss": 1.7496,
"step": 260
},
{
"epoch": 1.14,
"learning_rate": 0.0004124052493395838,
"loss": 1.7965,
"step": 262
},
{
"epoch": 1.15,
"learning_rate": 0.0004054791775008775,
"loss": 1.7148,
"step": 264
},
{
"epoch": 1.16,
"learning_rate": 0.0003985718646900401,
"loss": 1.8289,
"step": 266
},
{
"epoch": 1.16,
"learning_rate": 0.00039168468176346176,
"loss": 1.795,
"step": 268
},
{
"epoch": 1.17,
"learning_rate": 0.00038481899558246514,
"loss": 1.8347,
"step": 270
},
{
"epoch": 1.18,
"learning_rate": 0.00037797616874203193,
"loss": 1.8465,
"step": 272
},
{
"epoch": 1.19,
"learning_rate": 0.0003711575593003763,
"loss": 1.8523,
"step": 274
},
{
"epoch": 1.2,
"learning_rate": 0.00036436452050941866,
"loss": 1.7976,
"step": 276
},
{
"epoch": 1.21,
"learning_rate": 0.0003575984005462124,
"loss": 1.8306,
"step": 278
},
{
"epoch": 1.22,
"learning_rate": 0.0003508605422453799,
"loss": 1.8349,
"step": 280
},
{
"epoch": 1.23,
"learning_rate": 0.0003441522828326061,
"loss": 1.8249,
"step": 282
},
{
"epoch": 1.23,
"learning_rate": 0.00033747495365924786,
"loss": 1.822,
"step": 284
},
{
"epoch": 1.24,
"learning_rate": 0.00033082987993810755,
"loss": 1.7984,
"step": 286
},
{
"epoch": 1.25,
"learning_rate": 0.00032421838048042514,
"loss": 1.8706,
"step": 288
},
{
"epoch": 1.26,
"learning_rate": 0.0003176417674341412,
"loss": 1.8664,
"step": 290
},
{
"epoch": 1.27,
"learning_rate": 0.000311101346023482,
"loss": 1.7938,
"step": 292
},
{
"epoch": 1.28,
"learning_rate": 0.000304598414289919,
"loss": 1.8195,
"step": 294
},
{
"epoch": 1.29,
"learning_rate": 0.0002981342628345535,
"loss": 1.8177,
"step": 296
},
{
"epoch": 1.29,
"learning_rate": 0.0002917101745619787,
"loss": 1.7852,
"step": 298
},
{
"epoch": 1.3,
"learning_rate": 0.00028532742442566734,
"loss": 1.8172,
"step": 300
},
{
"epoch": 1.31,
"learning_rate": 0.00027898727917494,
"loss": 1.8626,
"step": 302
},
{
"epoch": 1.32,
"learning_rate": 0.00027269099710356055,
"loss": 1.8716,
"step": 304
},
{
"epoch": 1.33,
"learning_rate": 0.0002664398278000076,
"loss": 1.7199,
"step": 306
},
{
"epoch": 1.34,
"learning_rate": 0.0002602350118994782,
"loss": 1.8076,
"step": 308
},
{
"epoch": 1.35,
"learning_rate": 0.0002540777808376632,
"loss": 1.8665,
"step": 310
},
{
"epoch": 1.36,
"learning_rate": 0.0002479693566063527,
"loss": 1.9296,
"step": 312
},
{
"epoch": 1.36,
"learning_rate": 0.0002419109515109137,
"loss": 1.8659,
"step": 314
},
{
"epoch": 1.37,
"learning_rate": 0.0002359037679296901,
"loss": 1.7887,
"step": 316
},
{
"epoch": 1.38,
"learning_rate": 0.00022994899807537357,
"loss": 1.6962,
"step": 318
},
{
"epoch": 1.39,
"learning_rate": 0.0002240478237583915,
"loss": 1.8846,
"step": 320
},
{
"epoch": 1.4,
"learning_rate": 0.00021820141615236022,
"loss": 1.8557,
"step": 322
},
{
"epoch": 1.41,
"learning_rate": 0.00021241093556164758,
"loss": 1.8232,
"step": 324
},
{
"epoch": 1.42,
"learning_rate": 0.00020667753119109423,
"loss": 1.8227,
"step": 326
},
{
"epoch": 1.42,
"learning_rate": 0.0002010023409179378,
"loss": 1.9507,
"step": 328
},
{
"epoch": 1.43,
"learning_rate": 0.0001953864910659835,
"loss": 1.7959,
"step": 330
},
{
"epoch": 1.44,
"learning_rate": 0.00018983109618207018,
"loss": 1.777,
"step": 332
},
{
"epoch": 1.45,
"learning_rate": 0.00018433725881487057,
"loss": 1.8493,
"step": 334
},
{
"epoch": 1.46,
"learning_rate": 0.00017890606929607666,
"loss": 1.8469,
"step": 336
},
{
"epoch": 1.47,
"learning_rate": 0.00017353860552400585,
"loss": 1.926,
"step": 338
},
{
"epoch": 1.48,
"learning_rate": 0.00016823593274967703,
"loss": 1.8596,
"step": 340
},
{
"epoch": 1.49,
"learning_rate": 0.00016299910336539642,
"loss": 1.9387,
"step": 342
},
{
"epoch": 1.49,
"learning_rate": 0.0001578291566958942,
"loss": 1.8431,
"step": 344
},
{
"epoch": 1.5,
"learning_rate": 0.00015272711879205648,
"loss": 1.7386,
"step": 346
},
{
"epoch": 1.51,
"learning_rate": 0.00014769400222728975,
"loss": 1.8143,
"step": 348
},
{
"epoch": 1.52,
"learning_rate": 0.00014273080589656125,
"loss": 1.8075,
"step": 350
},
{
"epoch": 1.53,
"learning_rate": 0.00013783851481815403,
"loss": 1.8118,
"step": 352
},
{
"epoch": 1.54,
"learning_rate": 0.00013301809993817543,
"loss": 1.8411,
"step": 354
},
{
"epoch": 1.55,
"learning_rate": 0.00012827051793785942,
"loss": 1.8274,
"step": 356
},
{
"epoch": 1.55,
"learning_rate": 0.00012359671104369846,
"loss": 1.912,
"step": 358
},
{
"epoch": 1.56,
"learning_rate": 0.00011899760684044514,
"loss": 1.7547,
"step": 360
},
{
"epoch": 1.57,
"learning_rate": 0.00011447411808702052,
"loss": 1.9167,
"step": 362
},
{
"epoch": 1.58,
"learning_rate": 0.00011002714253536233,
"loss": 1.7957,
"step": 364
},
{
"epoch": 1.59,
"learning_rate": 0.00010565756275225474,
"loss": 1.8266,
"step": 366
},
{
"epoch": 1.6,
"learning_rate": 0.00010136624594416826,
"loss": 1.8285,
"step": 368
},
{
"epoch": 1.61,
"learning_rate": 9.715404378515225e-05,
"loss": 1.8656,
"step": 370
},
{
"epoch": 1.62,
"learning_rate": 9.302179224780604e-05,
"loss": 1.8691,
"step": 372
},
{
"epoch": 1.62,
"learning_rate": 8.897031143736866e-05,
"loss": 1.7859,
"step": 374
},
{
"epoch": 1.63,
"learning_rate": 8.50004054289577e-05,
"loss": 1.822,
"step": 376
},
{
"epoch": 1.64,
"learning_rate": 8.11128621079878e-05,
"loss": 1.9343,
"step": 378
},
{
"epoch": 1.65,
"learning_rate": 7.73084530138044e-05,
"loss": 1.8374,
"step": 380
},
{
"epoch": 1.66,
"learning_rate": 7.358793318655971e-05,
"loss": 1.8533,
"step": 382
},
{
"epoch": 1.67,
"learning_rate": 6.995204101736469e-05,
"loss": 1.9424,
"step": 384
},
{
"epoch": 1.68,
"learning_rate": 6.640149810174423e-05,
"loss": 1.8204,
"step": 386
},
{
"epoch": 1.68,
"learning_rate": 6.293700909642619e-05,
"loss": 1.9254,
"step": 388
},
{
"epoch": 1.69,
"learning_rate": 5.9559261579492316e-05,
"loss": 1.795,
"step": 390
},
{
"epoch": 1.7,
"learning_rate": 5.6268925913917855e-05,
"loss": 1.8497,
"step": 392
},
{
"epoch": 1.71,
"learning_rate": 5.30666551145288e-05,
"loss": 1.8333,
"step": 394
},
{
"epoch": 1.72,
"learning_rate": 4.995308471840132e-05,
"loss": 1.783,
"step": 396
},
{
"epoch": 1.73,
"learning_rate": 4.692883265873027e-05,
"loss": 1.9231,
"step": 398
},
{
"epoch": 1.74,
"learning_rate": 4.399449914219167e-05,
"loss": 1.83,
"step": 400
},
{
"epoch": 1.74,
"learning_rate": 4.1150666529822245e-05,
"loss": 1.773,
"step": 402
},
{
"epoch": 1.75,
"learning_rate": 3.8397899221442545e-05,
"loss": 1.8058,
"step": 404
},
{
"epoch": 1.76,
"learning_rate": 3.5736743543642534e-05,
"loss": 1.8111,
"step": 406
},
{
"epoch": 1.77,
"learning_rate": 3.316772764135606e-05,
"loss": 1.7742,
"step": 408
},
{
"epoch": 1.78,
"learning_rate": 3.069136137304201e-05,
"loss": 1.8001,
"step": 410
},
{
"epoch": 1.79,
"learning_rate": 2.830813620949596e-05,
"loss": 1.7632,
"step": 412
},
{
"epoch": 1.8,
"learning_rate": 2.6018525136310922e-05,
"loss": 1.8808,
"step": 414
},
{
"epoch": 1.81,
"learning_rate": 2.3822982560005814e-05,
"loss": 1.8566,
"step": 416
},
{
"epoch": 1.81,
"learning_rate": 2.1721944217842515e-05,
"loss": 1.7656,
"step": 418
},
{
"epoch": 1.82,
"learning_rate": 1.9715827091347003e-05,
"loss": 1.9143,
"step": 420
},
{
"epoch": 1.83,
"learning_rate": 1.7805029323553768e-05,
"loss": 1.9037,
"step": 422
},
{
"epoch": 1.84,
"learning_rate": 1.5989930139988284e-05,
"loss": 1.786,
"step": 424
},
{
"epoch": 1.85,
"learning_rate": 1.4270889773404128e-05,
"loss": 1.9072,
"step": 426
},
{
"epoch": 1.86,
"learning_rate": 1.2648249392289924e-05,
"loss": 1.7677,
"step": 428
},
{
"epoch": 1.87,
"learning_rate": 1.1122331033159072e-05,
"loss": 1.9154,
"step": 430
},
{
"epoch": 1.87,
"learning_rate": 9.693437536637318e-06,
"loss": 1.8175,
"step": 432
},
{
"epoch": 1.88,
"learning_rate": 8.361852487359379e-06,
"loss": 1.7268,
"step": 434
},
{
"epoch": 1.89,
"learning_rate": 7.127840157687582e-06,
"loss": 1.8565,
"step": 436
},
{
"epoch": 1.9,
"learning_rate": 5.99164545526304e-06,
"loss": 1.9316,
"step": 438
},
{
"epoch": 1.91,
"learning_rate": 4.953493874400472e-06,
"loss": 1.792,
"step": 440
},
{
"epoch": 1.92,
"learning_rate": 4.013591451335286e-06,
"loss": 1.8061,
"step": 442
},
{
"epoch": 1.93,
"learning_rate": 3.1721247233328366e-06,
"loss": 1.8494,
"step": 444
},
{
"epoch": 1.94,
"learning_rate": 2.4292606916674344e-06,
"loss": 1.8836,
"step": 446
},
{
"epoch": 1.94,
"learning_rate": 1.7851467884782979e-06,
"loss": 1.7925,
"step": 448
},
{
"epoch": 1.95,
"learning_rate": 1.2399108475099063e-06,
"loss": 1.8539,
"step": 450
},
{
"epoch": 1.96,
"learning_rate": 7.936610787411258e-07,
"loss": 1.8058,
"step": 452
},
{
"epoch": 1.97,
"learning_rate": 4.4648604690966656e-07,
"loss": 1.8023,
"step": 454
},
{
"epoch": 1.98,
"learning_rate": 1.984546539349763e-07,
"loss": 1.777,
"step": 456
},
{
"epoch": 1.99,
"learning_rate": 4.9616125243623354e-08,
"loss": 1.8195,
"step": 458
},
{
"epoch": 2.0,
"learning_rate": 0.0,
"loss": 1.8233,
"step": 460
},
{
"epoch": 2.0,
"step": 460,
"total_flos": 4.03948633545769e+16,
"train_loss": 1.8878583851067916,
"train_runtime": 22914.0021,
"train_samples_per_second": 5.159,
"train_steps_per_second": 0.02
}
],
"max_steps": 460,
"num_train_epochs": 2,
"total_flos": 4.03948633545769e+16,
"trial_name": null,
"trial_params": null
}