|
{ |
|
"best_metric": 0.6451979106514778, |
|
"best_model_checkpoint": "./runtime-masked/bert_uncased_L-2_H-768_A-12-mlm-multi-emails-hq/checkpoint-705", |
|
"epoch": 4.994708994708994, |
|
"global_step": 705, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.4999999999999998e-05, |
|
"loss": 3.2694, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.9999999999999996e-05, |
|
"loss": 3.2366, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 7.5e-05, |
|
"loss": 3.2013, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.999999999999999e-05, |
|
"loss": 3.0257, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000125, |
|
"loss": 3.0287, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00015, |
|
"loss": 2.9585, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000175, |
|
"loss": 2.8967, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019999999999999998, |
|
"loss": 2.7588, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000225, |
|
"loss": 2.7953, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00025, |
|
"loss": 2.752, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00027499999999999996, |
|
"loss": 2.6644, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0003, |
|
"loss": 2.736, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00029998511516242686, |
|
"loss": 2.5937, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00029994046360381946, |
|
"loss": 2.6411, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0002998660541859271, |
|
"loss": 2.587, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0002997619016763776, |
|
"loss": 2.5726, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000299628026745747, |
|
"loss": 2.5472, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0002994644559634565, |
|
"loss": 2.5179, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00029927122179249977, |
|
"loss": 2.4876, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00029904836258300014, |
|
"loss": 2.5026, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002987959225645994, |
|
"loss": 2.4842, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00029851395183767983, |
|
"loss": 2.442, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00029820250636342104, |
|
"loss": 2.4273, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00029786164795269374, |
|
"loss": 2.4646, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00029749144425379216, |
|
"loss": 2.3918, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0002970919687390088, |
|
"loss": 2.4216, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00029666330069005225, |
|
"loss": 2.3679, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.000296205525182313, |
|
"loss": 2.4738, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00029571873306797876, |
|
"loss": 2.4424, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0002952030209580035, |
|
"loss": 2.4183, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00029465849120293385, |
|
"loss": 2.368, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0002940852518725959, |
|
"loss": 2.3876, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00029348341673464726, |
|
"loss": 2.3105, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00029285310523199823, |
|
"loss": 2.3397, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0002921944424591067, |
|
"loss": 2.3759, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0002915075591371512, |
|
"loss": 2.4231, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0002907925915880874, |
|
"loss": 2.3697, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00029004968170759317, |
|
"loss": 2.309, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002892789769369072, |
|
"loss": 2.2677, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00028848063023356724, |
|
"loss": 2.3125, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002876548000410533, |
|
"loss": 2.3249, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0002868016502573425, |
|
"loss": 2.276, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0002859213502023809, |
|
"loss": 2.3264, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0002850140745844796, |
|
"loss": 2.2838, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002840800034656413, |
|
"loss": 2.268, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0002831193222258246, |
|
"loss": 2.2771, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00028213222152615234, |
|
"loss": 2.3053, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"eval_accuracy": 0.6064227132633339, |
|
"eval_loss": 2.1757757663726807, |
|
"eval_runtime": 10.2199, |
|
"eval_samples_per_second": 197.361, |
|
"eval_steps_per_second": 98.729, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002811188972710721, |
|
"loss": 2.7816, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00028007955056947675, |
|
"loss": 2.2354, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00027901438769479056, |
|
"loss": 2.2302, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0002779236200440321, |
|
"loss": 2.2056, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00027680746409585865, |
|
"loss": 2.2774, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00027566614136760364, |
|
"loss": 2.2402, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0002744998783713127, |
|
"loss": 2.2697, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00027330890656878943, |
|
"loss": 2.2966, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0002720934623256582, |
|
"loss": 2.2673, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00027085378686445433, |
|
"loss": 2.2083, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0002695901262167495, |
|
"loss": 2.1908, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.0002683027311743236, |
|
"loss": 2.1983, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00026699185723939124, |
|
"loss": 2.1856, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0002656577645738938, |
|
"loss": 2.232, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00026430071794786644, |
|
"loss": 2.2705, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00026292098668689043, |
|
"loss": 2.1916, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00026151884461864214, |
|
"loss": 2.2189, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002600945700185474, |
|
"loss": 2.2082, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.0002586484455545538, |
|
"loss": 2.1623, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00025718075823103165, |
|
"loss": 2.1774, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.0002556917993318131, |
|
"loss": 2.1522, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.000254181864362383, |
|
"loss": 2.1744, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0002526512529912317, |
|
"loss": 2.2299, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00025110026899038105, |
|
"loss": 2.2391, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00024952922017509687, |
|
"loss": 2.1867, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00024793841834279824, |
|
"loss": 2.2197, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00024632817921117696, |
|
"loss": 2.1504, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0002446988223555388, |
|
"loss": 2.1684, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00024305067114537896, |
|
"loss": 2.1997, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.0002413840526802049, |
|
"loss": 2.1836, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00023969929772461865, |
|
"loss": 2.1628, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0002379967406426717, |
|
"loss": 2.1886, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00023627671933150574, |
|
"loss": 2.1225, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00023453957515429192, |
|
"loss": 2.2524, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.0002327856528724825, |
|
"loss": 2.1975, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00023101530057738793, |
|
"loss": 2.1573, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00022922886962109296, |
|
"loss": 2.0817, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00022742671454672588, |
|
"loss": 2.1674, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00022560919301809414, |
|
"loss": 2.131, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00022377666574870103, |
|
"loss": 2.1338, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.00022192949643015655, |
|
"loss": 2.1217, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00022006805165999764, |
|
"loss": 2.1887, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00021819270086893185, |
|
"loss": 2.1461, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00021630381624751795, |
|
"loss": 2.0757, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00021440177267229984, |
|
"loss": 2.1336, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.0002124869476314064, |
|
"loss": 2.133, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.0002105597211496339, |
|
"loss": 2.1556, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"eval_accuracy": 0.6236541534592513, |
|
"eval_loss": 2.058671236038208, |
|
"eval_runtime": 10.2746, |
|
"eval_samples_per_second": 196.309, |
|
"eval_steps_per_second": 98.203, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.0002086204757130243, |
|
"loss": 2.6096, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.0002066695961929554, |
|
"loss": 2.1364, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.00020470746976975762, |
|
"loss": 2.0328, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.00020273448585587248, |
|
"loss": 2.1069, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.00020075103601856773, |
|
"loss": 2.1156, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.00019875751390222566, |
|
"loss": 2.0617, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.0001967543151502182, |
|
"loss": 2.1158, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.00019474183732638608, |
|
"loss": 2.0996, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.00019272047983613628, |
|
"loss": 2.1118, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.00019069064384717442, |
|
"loss": 2.0661, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.0001886527322098871, |
|
"loss": 2.0963, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 0.00018660714937739043, |
|
"loss": 2.039, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 0.00018455430132526047, |
|
"loss": 2.1014, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.00018249459547096147, |
|
"loss": 2.1209, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.00018042844059298798, |
|
"loss": 2.0982, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.00017835624674973673, |
|
"loss": 2.0784, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.00017627842519812485, |
|
"loss": 2.0276, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.0001741953883119696, |
|
"loss": 2.0388, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.00017210754950014714, |
|
"loss": 2.0352, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.00017001532312454533, |
|
"loss": 2.0596, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 0.00016791912441782778, |
|
"loss": 2.0419, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.00016581936940102474, |
|
"loss": 2.1015, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.00016371647480096783, |
|
"loss": 2.0491, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 0.00016161085796758442, |
|
"loss": 2.0816, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.00015950293679106852, |
|
"loss": 2.0958, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.00015739312961894465, |
|
"loss": 2.084, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.00015528185517304027, |
|
"loss": 2.0794, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.00015316953246638482, |
|
"loss": 2.0948, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 0.00015105658072005004, |
|
"loss": 2.0494, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 0.00014894341927994994, |
|
"loss": 2.0161, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 0.0001468304675336152, |
|
"loss": 2.0325, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.00014471814482695968, |
|
"loss": 2.0537, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.00014260687038105532, |
|
"loss": 2.0511, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.00014049706320893148, |
|
"loss": 2.069, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.0001383891420324156, |
|
"loss": 2.0702, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.00013628352519903214, |
|
"loss": 2.0198, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.00013418063059897524, |
|
"loss": 2.0515, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.0001320808755821722, |
|
"loss": 2.0652, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.00012998467687545465, |
|
"loss": 2.0401, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.00012789245049985284, |
|
"loss": 1.9955, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.00012580461168803036, |
|
"loss": 1.9932, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 0.00012372157480187513, |
|
"loss": 1.9957, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.00012164375325026323, |
|
"loss": 2.0278, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.00011957155940701203, |
|
"loss": 2.0055, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.00011750540452903852, |
|
"loss": 2.1282, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00011544569867473953, |
|
"loss": 2.0752, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.00011339285062260957, |
|
"loss": 2.0616, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"eval_accuracy": 0.6354889456059845, |
|
"eval_loss": 1.9779837131500244, |
|
"eval_runtime": 10.2076, |
|
"eval_samples_per_second": 197.597, |
|
"eval_steps_per_second": 98.848, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 0.00011134726779011288, |
|
"loss": 2.5405, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 0.00010930935615282558, |
|
"loss": 2.0141, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 0.00010727952016386371, |
|
"loss": 2.039, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 0.00010525816267361396, |
|
"loss": 2.0632, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 0.00010324568484978182, |
|
"loss": 1.9622, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.00010124248609777434, |
|
"loss": 2.0226, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 9.924896398143224e-05, |
|
"loss": 2.0174, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 9.726551414412753e-05, |
|
"loss": 1.9791, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 9.529253023024234e-05, |
|
"loss": 2.0418, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 9.33304038070446e-05, |
|
"loss": 1.9825, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 9.137952428697568e-05, |
|
"loss": 2.0014, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 8.944027885036605e-05, |
|
"loss": 1.9395, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 8.751305236859359e-05, |
|
"loss": 2.0189, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 8.559822732770019e-05, |
|
"loss": 1.9629, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 8.3696183752482e-05, |
|
"loss": 2.0085, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 8.180729913106815e-05, |
|
"loss": 1.9725, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 7.993194834000229e-05, |
|
"loss": 1.9946, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 7.807050356984345e-05, |
|
"loss": 2.0058, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 7.622333425129895e-05, |
|
"loss": 2.0525, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 7.439080698190579e-05, |
|
"loss": 1.9817, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 7.25732854532741e-05, |
|
"loss": 2.0297, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 7.077113037890702e-05, |
|
"loss": 2.0163, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 6.898469942261206e-05, |
|
"loss": 2.0449, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 6.721434712751744e-05, |
|
"loss": 1.9419, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 6.546042484570807e-05, |
|
"loss": 2.0181, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 6.372328066849426e-05, |
|
"loss": 1.9761, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 6.200325935732827e-05, |
|
"loss": 1.9969, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 6.030070227538133e-05, |
|
"loss": 1.9515, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 5.8615947319795044e-05, |
|
"loss": 2.0222, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 5.6949328854621045e-05, |
|
"loss": 2.0148, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 5.5301177644461164e-05, |
|
"loss": 2.0086, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 5.367182078882299e-05, |
|
"loss": 2.0048, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 5.206158165720175e-05, |
|
"loss": 1.967, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 5.04707798249031e-05, |
|
"loss": 1.9731, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 4.889973100961892e-05, |
|
"loss": 1.961, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 4.734874700876826e-05, |
|
"loss": 1.9674, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 4.5818135637616936e-05, |
|
"loss": 1.9797, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 4.430820066818692e-05, |
|
"loss": 2.0148, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 4.281924176896837e-05, |
|
"loss": 1.9882, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 4.13515544454462e-05, |
|
"loss": 1.9908, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 3.990542998145262e-05, |
|
"loss": 1.9337, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 3.8481155381357825e-05, |
|
"loss": 1.9709, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 3.7079013313109535e-05, |
|
"loss": 1.9485, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 3.569928205213354e-05, |
|
"loss": 1.955, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 3.434223542610615e-05, |
|
"loss": 1.9797, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 3.30081427606087e-05, |
|
"loss": 1.9471, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 3.169726882567642e-05, |
|
"loss": 2.0084, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"eval_accuracy": 0.6422005133308889, |
|
"eval_loss": 1.9317268133163452, |
|
"eval_runtime": 10.2419, |
|
"eval_samples_per_second": 196.936, |
|
"eval_steps_per_second": 98.517, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 3.0409873783250476e-05, |
|
"loss": 2.5298, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 2.914621313554567e-05, |
|
"loss": 1.9335, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 2.790653767434181e-05, |
|
"loss": 1.9663, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 2.6691093431210596e-05, |
|
"loss": 1.9546, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 2.550012162868731e-05, |
|
"loss": 1.9588, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 2.4333858632396336e-05, |
|
"loss": 1.9079, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 2.319253590414132e-05, |
|
"loss": 1.9706, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 2.207637995596791e-05, |
|
"loss": 1.9244, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 2.0985612305209404e-05, |
|
"loss": 1.9571, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 1.9920449430523266e-05, |
|
"loss": 1.9935, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 1.8881102728927854e-05, |
|
"loss": 1.965, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 1.7867778473847695e-05, |
|
"loss": 1.9879, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 1.6880677774175354e-05, |
|
"loss": 1.9765, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 1.5919996534358635e-05, |
|
"loss": 1.9608, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 1.4985925415520395e-05, |
|
"loss": 1.8867, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 1.407864979761908e-05, |
|
"loss": 1.9532, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 1.31983497426575e-05, |
|
"loss": 1.9672, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 1.2345199958946673e-05, |
|
"loss": 1.9372, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 1.1519369766432762e-05, |
|
"loss": 1.9676, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 1.072102306309276e-05, |
|
"loss": 1.9726, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 9.950318292406816e-06, |
|
"loss": 1.9565, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 9.207408411912603e-06, |
|
"loss": 2.0322, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 8.492440862848787e-06, |
|
"loss": 1.9868, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 7.805557540893276e-06, |
|
"loss": 1.9788, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 7.146894768001743e-06, |
|
"loss": 1.9226, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 6.516583265352754e-06, |
|
"loss": 1.9214, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 5.914748127404101e-06, |
|
"loss": 1.9815, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 5.34150879706613e-06, |
|
"loss": 1.916, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 4.796979041996485e-06, |
|
"loss": 1.9162, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 4.2812669320212375e-06, |
|
"loss": 1.9955, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 3.7944748176869766e-06, |
|
"loss": 1.926, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 3.336699309947721e-06, |
|
"loss": 1.9637, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 2.908031260991195e-06, |
|
"loss": 1.9602, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 2.5085557462078134e-06, |
|
"loss": 2.0124, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 2.1383520473062743e-06, |
|
"loss": 1.9393, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 1.7974936365789116e-06, |
|
"loss": 1.8888, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 1.4860481623201414e-06, |
|
"loss": 1.9409, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 1.2040774354005856e-06, |
|
"loss": 1.9617, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 9.516374169998508e-07, |
|
"loss": 1.9433, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 7.287782075002302e-07, |
|
"loss": 2.0248, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 5.355440365434893e-07, |
|
"loss": 1.9528, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 3.7197325425297186e-07, |
|
"loss": 1.9823, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 2.380983236223377e-07, |
|
"loss": 1.9716, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 1.3394581407289996e-07, |
|
"loss": 1.9205, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 5.9536396180492886e-08, |
|
"loss": 1.953, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 1.4884837573087006e-08, |
|
"loss": 2.0213, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 0.0, |
|
"loss": 1.9621, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"eval_accuracy": 0.6451979106514778, |
|
"eval_loss": 1.9133403301239014, |
|
"eval_runtime": 10.2297, |
|
"eval_samples_per_second": 197.171, |
|
"eval_steps_per_second": 98.634, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"step": 705, |
|
"total_flos": 4119345677467648.0, |
|
"train_loss": 2.1596999946215476, |
|
"train_runtime": 1037.2411, |
|
"train_samples_per_second": 87.439, |
|
"train_steps_per_second": 0.68 |
|
} |
|
], |
|
"max_steps": 705, |
|
"num_train_epochs": 5, |
|
"total_flos": 4119345677467648.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|