starmpcc's picture
Upload folder using huggingface_hub
f775c59 verified
raw
history blame
19.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9953917050691244,
"global_step": 162,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.000000000000001e-06,
"loss": 1.4312,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 8.000000000000001e-06,
"loss": 1.4237,
"step": 2
},
{
"epoch": 0.02,
"learning_rate": 1.2e-05,
"loss": 1.3934,
"step": 3
},
{
"epoch": 0.02,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.3338,
"step": 4
},
{
"epoch": 0.03,
"learning_rate": 2e-05,
"loss": 1.3131,
"step": 5
},
{
"epoch": 0.04,
"learning_rate": 1.9997998037428528e-05,
"loss": 1.2994,
"step": 6
},
{
"epoch": 0.04,
"learning_rate": 1.999199295128493e-05,
"loss": 1.2578,
"step": 7
},
{
"epoch": 0.05,
"learning_rate": 1.998198714596076e-05,
"loss": 1.2713,
"step": 8
},
{
"epoch": 0.06,
"learning_rate": 1.996798462770555e-05,
"loss": 1.2291,
"step": 9
},
{
"epoch": 0.06,
"learning_rate": 1.994999100302281e-05,
"loss": 1.2212,
"step": 10
},
{
"epoch": 0.07,
"learning_rate": 1.992801347642515e-05,
"loss": 1.2206,
"step": 11
},
{
"epoch": 0.07,
"learning_rate": 1.9902060847549716e-05,
"loss": 1.2123,
"step": 12
},
{
"epoch": 0.08,
"learning_rate": 1.987214350763483e-05,
"loss": 1.2006,
"step": 13
},
{
"epoch": 0.09,
"learning_rate": 1.9838273435359447e-05,
"loss": 1.1977,
"step": 14
},
{
"epoch": 0.09,
"learning_rate": 1.9800464192046956e-05,
"loss": 1.1972,
"step": 15
},
{
"epoch": 0.1,
"learning_rate": 1.9758730916235356e-05,
"loss": 1.201,
"step": 16
},
{
"epoch": 0.1,
"learning_rate": 1.9713090317615877e-05,
"loss": 1.1958,
"step": 17
},
{
"epoch": 0.11,
"learning_rate": 1.966356067034256e-05,
"loss": 1.1706,
"step": 18
},
{
"epoch": 0.12,
"learning_rate": 1.96101618057154e-05,
"loss": 1.1926,
"step": 19
},
{
"epoch": 0.12,
"learning_rate": 1.9552915104240067e-05,
"loss": 1.192,
"step": 20
},
{
"epoch": 0.13,
"learning_rate": 1.9491843487067305e-05,
"loss": 1.1897,
"step": 21
},
{
"epoch": 0.14,
"learning_rate": 1.9426971406815464e-05,
"loss": 1.1891,
"step": 22
},
{
"epoch": 0.14,
"learning_rate": 1.9358324837779864e-05,
"loss": 1.186,
"step": 23
},
{
"epoch": 0.15,
"learning_rate": 1.9285931265532875e-05,
"loss": 1.1704,
"step": 24
},
{
"epoch": 0.15,
"learning_rate": 1.920981967591891e-05,
"loss": 1.1822,
"step": 25
},
{
"epoch": 0.16,
"learning_rate": 1.9130020543448705e-05,
"loss": 1.1682,
"step": 26
},
{
"epoch": 0.17,
"learning_rate": 1.9046565819097546e-05,
"loss": 1.181,
"step": 27
},
{
"epoch": 0.17,
"learning_rate": 1.895948891751234e-05,
"loss": 1.169,
"step": 28
},
{
"epoch": 0.18,
"learning_rate": 1.8868824703632658e-05,
"loss": 1.1743,
"step": 29
},
{
"epoch": 0.18,
"learning_rate": 1.8774609478731048e-05,
"loss": 1.1678,
"step": 30
},
{
"epoch": 0.19,
"learning_rate": 1.8676880965878293e-05,
"loss": 1.1762,
"step": 31
},
{
"epoch": 0.2,
"learning_rate": 1.857567829483937e-05,
"loss": 1.1637,
"step": 32
},
{
"epoch": 0.2,
"learning_rate": 1.84710419864062e-05,
"loss": 1.1644,
"step": 33
},
{
"epoch": 0.21,
"learning_rate": 1.8363013936173393e-05,
"loss": 1.1644,
"step": 34
},
{
"epoch": 0.22,
"learning_rate": 1.8251637397763597e-05,
"loss": 1.1557,
"step": 35
},
{
"epoch": 0.22,
"learning_rate": 1.8136956965509064e-05,
"loss": 1.161,
"step": 36
},
{
"epoch": 0.23,
"learning_rate": 1.8019018556596402e-05,
"loss": 1.15,
"step": 37
},
{
"epoch": 0.23,
"learning_rate": 1.7897869392681685e-05,
"loss": 1.1704,
"step": 38
},
{
"epoch": 0.24,
"learning_rate": 1.7773557980983264e-05,
"loss": 1.1521,
"step": 39
},
{
"epoch": 0.25,
"learning_rate": 1.7646134094859816e-05,
"loss": 1.1578,
"step": 40
},
{
"epoch": 0.25,
"learning_rate": 1.7515648753881495e-05,
"loss": 1.1578,
"step": 41
},
{
"epoch": 0.26,
"learning_rate": 1.738215420340205e-05,
"loss": 1.1615,
"step": 42
},
{
"epoch": 0.26,
"learning_rate": 1.7245703893640188e-05,
"loss": 1.157,
"step": 43
},
{
"epoch": 0.27,
"learning_rate": 1.7106352458278524e-05,
"loss": 1.14,
"step": 44
},
{
"epoch": 0.28,
"learning_rate": 1.696415569258862e-05,
"loss": 1.1552,
"step": 45
},
{
"epoch": 0.28,
"learning_rate": 1.6819170531091018e-05,
"loss": 1.1595,
"step": 46
},
{
"epoch": 0.29,
"learning_rate": 1.667145502475907e-05,
"loss": 1.1568,
"step": 47
},
{
"epoch": 0.29,
"learning_rate": 1.6521068317775756e-05,
"loss": 1.1489,
"step": 48
},
{
"epoch": 0.3,
"learning_rate": 1.6368070623852794e-05,
"loss": 1.1649,
"step": 49
},
{
"epoch": 0.31,
"learning_rate": 1.6212523202121547e-05,
"loss": 1.1506,
"step": 50
},
{
"epoch": 0.31,
"learning_rate": 1.6054488332605282e-05,
"loss": 1.1458,
"step": 51
},
{
"epoch": 0.32,
"learning_rate": 1.589402929128276e-05,
"loss": 1.1457,
"step": 52
},
{
"epoch": 0.33,
"learning_rate": 1.573121032475297e-05,
"loss": 1.1539,
"step": 53
},
{
"epoch": 0.33,
"learning_rate": 1.5566096624511306e-05,
"loss": 1.155,
"step": 54
},
{
"epoch": 0.34,
"learning_rate": 1.5398754300847346e-05,
"loss": 1.1385,
"step": 55
},
{
"epoch": 0.34,
"learning_rate": 1.5229250356374804e-05,
"loss": 1.1467,
"step": 56
},
{
"epoch": 0.35,
"learning_rate": 1.5057652659204198e-05,
"loss": 1.1619,
"step": 57
},
{
"epoch": 0.36,
"learning_rate": 1.4884029915768945e-05,
"loss": 1.1297,
"step": 58
},
{
"epoch": 0.36,
"learning_rate": 1.4708451643315827e-05,
"loss": 1.144,
"step": 59
},
{
"epoch": 0.37,
"learning_rate": 1.4530988142070802e-05,
"loss": 1.158,
"step": 60
},
{
"epoch": 0.37,
"learning_rate": 1.4351710467091337e-05,
"loss": 1.1486,
"step": 61
},
{
"epoch": 0.38,
"learning_rate": 1.4170690399816469e-05,
"loss": 1.1456,
"step": 62
},
{
"epoch": 0.39,
"learning_rate": 1.3988000419326073e-05,
"loss": 1.1473,
"step": 63
},
{
"epoch": 0.39,
"learning_rate": 1.3803713673320773e-05,
"loss": 1.143,
"step": 64
},
{
"epoch": 0.4,
"learning_rate": 1.3617903948834155e-05,
"loss": 1.1538,
"step": 65
},
{
"epoch": 0.41,
"learning_rate": 1.343064564268899e-05,
"loss": 1.1435,
"step": 66
},
{
"epoch": 0.41,
"learning_rate": 1.324201373170929e-05,
"loss": 1.1491,
"step": 67
},
{
"epoch": 0.42,
"learning_rate": 1.3052083742700172e-05,
"loss": 1.1334,
"step": 68
},
{
"epoch": 0.42,
"learning_rate": 1.286093172220748e-05,
"loss": 1.1417,
"step": 69
},
{
"epoch": 0.43,
"learning_rate": 1.2668634206069305e-05,
"loss": 1.1518,
"step": 70
},
{
"epoch": 0.44,
"learning_rate": 1.2475268188771628e-05,
"loss": 1.1494,
"step": 71
},
{
"epoch": 0.44,
"learning_rate": 1.2280911092620298e-05,
"loss": 1.1419,
"step": 72
},
{
"epoch": 0.45,
"learning_rate": 1.2085640736741708e-05,
"loss": 1.1357,
"step": 73
},
{
"epoch": 0.45,
"learning_rate": 1.1889535305924619e-05,
"loss": 1.1433,
"step": 74
},
{
"epoch": 0.46,
"learning_rate": 1.1692673319315541e-05,
"loss": 1.1513,
"step": 75
},
{
"epoch": 0.47,
"learning_rate": 1.1495133598980263e-05,
"loss": 1.1388,
"step": 76
},
{
"epoch": 0.47,
"learning_rate": 1.1296995238344084e-05,
"loss": 1.1534,
"step": 77
},
{
"epoch": 0.48,
"learning_rate": 1.1098337570523397e-05,
"loss": 1.1606,
"step": 78
},
{
"epoch": 0.49,
"learning_rate": 1.08992401365613e-05,
"loss": 1.1422,
"step": 79
},
{
"epoch": 0.49,
"learning_rate": 1.0699782653579973e-05,
"loss": 1.142,
"step": 80
},
{
"epoch": 0.5,
"learning_rate": 1.0500044982862519e-05,
"loss": 1.1407,
"step": 81
},
{
"epoch": 0.5,
"learning_rate": 1.0300107097877114e-05,
"loss": 1.143,
"step": 82
},
{
"epoch": 0.51,
"learning_rate": 1.0100049052256236e-05,
"loss": 1.1441,
"step": 83
},
{
"epoch": 0.52,
"learning_rate": 9.899950947743767e-06,
"loss": 1.1395,
"step": 84
},
{
"epoch": 0.52,
"learning_rate": 9.699892902122887e-06,
"loss": 1.1412,
"step": 85
},
{
"epoch": 0.53,
"learning_rate": 9.499955017137485e-06,
"loss": 1.1392,
"step": 86
},
{
"epoch": 0.53,
"learning_rate": 9.300217346420029e-06,
"loss": 1.1427,
"step": 87
},
{
"epoch": 0.54,
"learning_rate": 9.100759863438702e-06,
"loss": 1.1344,
"step": 88
},
{
"epoch": 0.55,
"learning_rate": 8.901662429476607e-06,
"loss": 1.1436,
"step": 89
},
{
"epoch": 0.55,
"learning_rate": 8.703004761655918e-06,
"loss": 1.1475,
"step": 90
},
{
"epoch": 0.56,
"learning_rate": 8.504866401019738e-06,
"loss": 1.1404,
"step": 91
},
{
"epoch": 0.57,
"learning_rate": 8.30732668068446e-06,
"loss": 1.1339,
"step": 92
},
{
"epoch": 0.57,
"learning_rate": 8.110464694075383e-06,
"loss": 1.1467,
"step": 93
},
{
"epoch": 0.58,
"learning_rate": 7.914359263258295e-06,
"loss": 1.1414,
"step": 94
},
{
"epoch": 0.58,
"learning_rate": 7.719088907379705e-06,
"loss": 1.1364,
"step": 95
},
{
"epoch": 0.59,
"learning_rate": 7.524731811228374e-06,
"loss": 1.1235,
"step": 96
},
{
"epoch": 0.6,
"learning_rate": 7.331365793930698e-06,
"loss": 1.1377,
"step": 97
},
{
"epoch": 0.6,
"learning_rate": 7.139068277792524e-06,
"loss": 1.1398,
"step": 98
},
{
"epoch": 0.61,
"learning_rate": 6.947916257299829e-06,
"loss": 1.1281,
"step": 99
},
{
"epoch": 0.61,
"learning_rate": 6.757986268290713e-06,
"loss": 1.1355,
"step": 100
},
{
"epoch": 0.62,
"learning_rate": 6.569354357311015e-06,
"loss": 1.1415,
"step": 101
},
{
"epoch": 0.63,
"learning_rate": 6.382096051165847e-06,
"loss": 1.1266,
"step": 102
},
{
"epoch": 0.63,
"learning_rate": 6.196286326679231e-06,
"loss": 1.1503,
"step": 103
},
{
"epoch": 0.64,
"learning_rate": 6.0119995806739316e-06,
"loss": 1.1375,
"step": 104
},
{
"epoch": 0.65,
"learning_rate": 5.829309600183536e-06,
"loss": 1.1273,
"step": 105
},
{
"epoch": 0.65,
"learning_rate": 5.648289532908666e-06,
"loss": 1.1294,
"step": 106
},
{
"epoch": 0.66,
"learning_rate": 5.469011857929202e-06,
"loss": 1.1322,
"step": 107
},
{
"epoch": 0.66,
"learning_rate": 5.291548356684177e-06,
"loss": 1.1385,
"step": 108
},
{
"epoch": 0.67,
"learning_rate": 5.115970084231059e-06,
"loss": 1.1479,
"step": 109
},
{
"epoch": 0.68,
"learning_rate": 4.942347340795803e-06,
"loss": 1.1376,
"step": 110
},
{
"epoch": 0.68,
"learning_rate": 4.7707496436252e-06,
"loss": 1.1315,
"step": 111
},
{
"epoch": 0.69,
"learning_rate": 4.601245699152659e-06,
"loss": 1.1318,
"step": 112
},
{
"epoch": 0.69,
"learning_rate": 4.4339033754886974e-06,
"loss": 1.1319,
"step": 113
},
{
"epoch": 0.7,
"learning_rate": 4.268789675247029e-06,
"loss": 1.1293,
"step": 114
},
{
"epoch": 0.71,
"learning_rate": 4.105970708717244e-06,
"loss": 1.1245,
"step": 115
},
{
"epoch": 0.71,
"learning_rate": 3.945511667394719e-06,
"loss": 1.1403,
"step": 116
},
{
"epoch": 0.72,
"learning_rate": 3.787476797878459e-06,
"loss": 1.141,
"step": 117
},
{
"epoch": 0.73,
"learning_rate": 3.6319293761472073e-06,
"loss": 1.1272,
"step": 118
},
{
"epoch": 0.73,
"learning_rate": 3.47893168222425e-06,
"loss": 1.1353,
"step": 119
},
{
"epoch": 0.74,
"learning_rate": 3.3285449752409315e-06,
"loss": 1.1352,
"step": 120
},
{
"epoch": 0.74,
"learning_rate": 3.1808294689089856e-06,
"loss": 1.1348,
"step": 121
},
{
"epoch": 0.75,
"learning_rate": 3.035844307411384e-06,
"loss": 1.1303,
"step": 122
},
{
"epoch": 0.76,
"learning_rate": 2.8936475417214795e-06,
"loss": 1.1252,
"step": 123
},
{
"epoch": 0.76,
"learning_rate": 2.754296106359811e-06,
"loss": 1.1362,
"step": 124
},
{
"epoch": 0.77,
"learning_rate": 2.6178457965979543e-06,
"loss": 1.1346,
"step": 125
},
{
"epoch": 0.77,
"learning_rate": 2.484351246118507e-06,
"loss": 1.1387,
"step": 126
},
{
"epoch": 0.78,
"learning_rate": 2.353865905140187e-06,
"loss": 1.1345,
"step": 127
},
{
"epoch": 0.79,
"learning_rate": 2.226442019016739e-06,
"loss": 1.1253,
"step": 128
},
{
"epoch": 0.79,
"learning_rate": 2.1021306073183166e-06,
"loss": 1.1274,
"step": 129
},
{
"epoch": 0.8,
"learning_rate": 1.9809814434036e-06,
"loss": 1.1356,
"step": 130
},
{
"epoch": 0.8,
"learning_rate": 1.8630430344909378e-06,
"loss": 1.1375,
"step": 131
},
{
"epoch": 0.81,
"learning_rate": 1.748362602236403e-06,
"loss": 1.1236,
"step": 132
},
{
"epoch": 0.82,
"learning_rate": 1.63698606382661e-06,
"loss": 1.1405,
"step": 133
},
{
"epoch": 0.82,
"learning_rate": 1.528958013593801e-06,
"loss": 1.1332,
"step": 134
},
{
"epoch": 0.83,
"learning_rate": 1.4243217051606285e-06,
"loss": 1.1283,
"step": 135
},
{
"epoch": 0.84,
"learning_rate": 1.3231190341217081e-06,
"loss": 1.1321,
"step": 136
},
{
"epoch": 0.84,
"learning_rate": 1.2253905212689554e-06,
"loss": 1.1269,
"step": 137
},
{
"epoch": 0.85,
"learning_rate": 1.1311752963673438e-06,
"loss": 1.1288,
"step": 138
},
{
"epoch": 0.85,
"learning_rate": 1.0405110824876619e-06,
"loss": 1.1348,
"step": 139
},
{
"epoch": 0.86,
"learning_rate": 9.534341809024583e-07,
"loss": 1.1371,
"step": 140
},
{
"epoch": 0.87,
"learning_rate": 8.699794565512976e-07,
"loss": 1.1294,
"step": 141
},
{
"epoch": 0.87,
"learning_rate": 7.901803240810901e-07,
"loss": 1.1365,
"step": 142
},
{
"epoch": 0.88,
"learning_rate": 7.140687344671282e-07,
"loss": 1.1381,
"step": 143
},
{
"epoch": 0.88,
"learning_rate": 6.416751622201389e-07,
"loss": 1.1416,
"step": 144
},
{
"epoch": 0.89,
"learning_rate": 5.730285931845381e-07,
"loss": 1.1299,
"step": 145
},
{
"epoch": 0.9,
"learning_rate": 5.08156512932696e-07,
"loss": 1.1346,
"step": 146
},
{
"epoch": 0.9,
"learning_rate": 4.4708489575993496e-07,
"loss": 1.139,
"step": 147
},
{
"epoch": 0.91,
"learning_rate": 3.8983819428460414e-07,
"loss": 1.119,
"step": 148
},
{
"epoch": 0.92,
"learning_rate": 3.364393296574453e-07,
"loss": 1.1309,
"step": 149
},
{
"epoch": 0.92,
"learning_rate": 2.8690968238412444e-07,
"loss": 1.1268,
"step": 150
},
{
"epoch": 0.93,
"learning_rate": 2.41269083764647e-07,
"loss": 1.1372,
"step": 151
},
{
"epoch": 0.93,
"learning_rate": 1.9953580795304628e-07,
"loss": 1.1429,
"step": 152
},
{
"epoch": 0.94,
"learning_rate": 1.6172656464055748e-07,
"loss": 1.1181,
"step": 153
},
{
"epoch": 0.95,
"learning_rate": 1.278564923651704e-07,
"loss": 1.1209,
"step": 154
},
{
"epoch": 0.95,
"learning_rate": 9.793915245028595e-08,
"loss": 1.123,
"step": 155
},
{
"epoch": 0.96,
"learning_rate": 7.19865235748507e-08,
"loss": 1.1397,
"step": 156
},
{
"epoch": 0.96,
"learning_rate": 5.000899697719552e-08,
"loss": 1.136,
"step": 157
},
{
"epoch": 0.97,
"learning_rate": 3.201537229445051e-08,
"loss": 1.137,
"step": 158
},
{
"epoch": 0.98,
"learning_rate": 1.8012854039244354e-08,
"loss": 1.1256,
"step": 159
},
{
"epoch": 0.98,
"learning_rate": 8.007048715068522e-09,
"loss": 1.1329,
"step": 160
},
{
"epoch": 0.99,
"learning_rate": 2.0019625714740032e-09,
"loss": 1.1339,
"step": 161
},
{
"epoch": 1.0,
"learning_rate": 0.0,
"loss": 1.1352,
"step": 162
},
{
"epoch": 1.0,
"step": 162,
"total_flos": 8.291234906646774e+17,
"train_loss": 1.1575636539930179,
"train_runtime": 7042.5737,
"train_samples_per_second": 2.956,
"train_steps_per_second": 0.023
}
],
"max_steps": 162,
"num_train_epochs": 1,
"total_flos": 8.291234906646774e+17,
"trial_name": null,
"trial_params": null
}