|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.999841596705211, |
|
"global_step": 3945, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.329113924050633e-05, |
|
"loss": 5.9056, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00012658227848101267, |
|
"loss": 4.729, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.000189873417721519, |
|
"loss": 4.2747, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00025316455696202533, |
|
"loss": 3.9938, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00031645569620253165, |
|
"loss": 3.7136, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000379746835443038, |
|
"loss": 3.6955, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0004430379746835443, |
|
"loss": 3.4653, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0005063291139240507, |
|
"loss": 3.3515, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000569620253164557, |
|
"loss": 3.2781, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0006329113924050633, |
|
"loss": 3.2616, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0006962025316455697, |
|
"loss": 3.2314, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000759493670886076, |
|
"loss": 3.1758, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0008227848101265824, |
|
"loss": 3.1956, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0008860759493670886, |
|
"loss": 3.1717, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009493670886075949, |
|
"loss": 3.0096, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009999998349118006, |
|
"loss": 3.0798, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000999994056836268, |
|
"loss": 3.0737, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000999980024459781, |
|
"loss": 3.1079, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0009999577380139975, |
|
"loss": 2.9692, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0009999271978668395, |
|
"loss": 3.0226, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0009998884045224886, |
|
"loss": 2.8905, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0009998413586213759, |
|
"loss": 2.9354, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0009997860609401732, |
|
"loss": 2.9668, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000999722512391778, |
|
"loss": 2.939, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0009996507140253013, |
|
"loss": 2.9213, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000999570667026047, |
|
"loss": 2.9371, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0009994823727154955, |
|
"loss": 2.9326, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0009993858325512794, |
|
"loss": 2.9915, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000999281048127161, |
|
"loss": 2.9352, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0009991680211730051, |
|
"loss": 2.8284, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.000999046753554751, |
|
"loss": 2.8232, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.000998917247274381, |
|
"loss": 2.8925, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0009987795044698884, |
|
"loss": 2.8381, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0009986335274152413, |
|
"loss": 2.8939, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0009984793185203455, |
|
"loss": 2.9206, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0009983168803310042, |
|
"loss": 2.733, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0009981462155288774, |
|
"loss": 2.8812, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.000997967326931435, |
|
"loss": 2.7199, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0009977802174919133, |
|
"loss": 2.8825, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0009975848902992641, |
|
"loss": 2.8283, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0009973813485781044, |
|
"loss": 2.8447, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000997169595688664, |
|
"loss": 2.7746, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0009969496351267277, |
|
"loss": 2.6779, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.000996721470523581, |
|
"loss": 2.8015, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0009964851056459465, |
|
"loss": 2.8746, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0009962405443959244, |
|
"loss": 2.7497, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0009959877908109274, |
|
"loss": 2.8197, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0009957268490636132, |
|
"loss": 2.7054, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000995457723461816, |
|
"loss": 2.7127, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0009951804184484766, |
|
"loss": 2.7058, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0009948949386015676, |
|
"loss": 2.7129, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0009946012886340176, |
|
"loss": 2.7425, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.000994299473393635, |
|
"loss": 2.7676, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0009939894978630267, |
|
"loss": 2.6728, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0009936713671595157, |
|
"loss": 2.7671, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0009933450865350583, |
|
"loss": 2.6662, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000993010661376155, |
|
"loss": 2.7334, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000992668097203763, |
|
"loss": 2.7489, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0009923173996732057, |
|
"loss": 2.6428, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0009919585745740776, |
|
"loss": 2.7059, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0009915916278301496, |
|
"loss": 2.6961, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0009912165654992716, |
|
"loss": 2.8165, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0009908333937732718, |
|
"loss": 2.5846, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0009904421189778551, |
|
"loss": 2.6058, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000990042747572498, |
|
"loss": 2.6858, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0009896352861503426, |
|
"loss": 2.6797, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0009892197414380869, |
|
"loss": 2.7078, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0009887961202958747, |
|
"loss": 2.5945, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.000988364429717182, |
|
"loss": 2.6061, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0009879246768287019, |
|
"loss": 2.6557, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0009874768688902251, |
|
"loss": 2.6387, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0009870210132945235, |
|
"loss": 2.6668, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0009865571175672243, |
|
"loss": 2.7257, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0009860851893666888, |
|
"loss": 2.7604, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0009856052364838846, |
|
"loss": 2.6524, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0009851172668422564, |
|
"loss": 2.5279, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0009846212884975969, |
|
"loss": 2.5394, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0009841173096379124, |
|
"loss": 2.6585, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.000983605338583288, |
|
"loss": 2.6494, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0009830853837857507, |
|
"loss": 2.6424, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0009825574538291292, |
|
"loss": 2.6346, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.000982021557428913, |
|
"loss": 2.6241, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0009814777034321067, |
|
"loss": 2.5922, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0009809259008170868, |
|
"loss": 2.6258, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0009803661586934513, |
|
"loss": 2.6141, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0009797984863018697, |
|
"loss": 2.624, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.000979222893013931, |
|
"loss": 2.6261, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0009786393883319884, |
|
"loss": 2.4675, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0009780479818890032, |
|
"loss": 2.6471, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0009774486834483848, |
|
"loss": 2.6596, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0009768415029038306, |
|
"loss": 2.6316, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0009762264502791609, |
|
"loss": 2.6561, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0009756035357281559, |
|
"loss": 2.6207, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0009749727695343859, |
|
"loss": 2.4761, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0009743341621110429, |
|
"loss": 2.5767, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0009736877240007677, |
|
"loss": 2.5302, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0009730334658754766, |
|
"loss": 2.5426, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0009723713985361853, |
|
"loss": 2.5384, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0009717015329128294, |
|
"loss": 2.4947, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0009710238800640854, |
|
"loss": 2.5495, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0009703384511771874, |
|
"loss": 2.5251, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0009696452575677426, |
|
"loss": 2.4983, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0009689443106795442, |
|
"loss": 2.4939, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.000968235622084383, |
|
"loss": 2.3895, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0009675192034818561, |
|
"loss": 2.4947, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0009667950666991733, |
|
"loss": 2.4988, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0009660632236909627, |
|
"loss": 2.573, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0009653236865390729, |
|
"loss": 2.6162, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0009645764674523731, |
|
"loss": 2.613, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0009638215787665529, |
|
"loss": 2.508, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0009630590329439169, |
|
"loss": 2.5019, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0009622888425731801, |
|
"loss": 2.622, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0009615110203692601, |
|
"loss": 2.4998, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0009607255791730664, |
|
"loss": 2.467, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0009599325319512893, |
|
"loss": 2.6073, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0009591318917961853, |
|
"loss": 2.6101, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.000958323671925361, |
|
"loss": 2.4277, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0009575078856815554, |
|
"loss": 2.5171, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0009566845465324185, |
|
"loss": 2.5376, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0009558536680702903, |
|
"loss": 2.5084, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0009550152640119756, |
|
"loss": 2.532, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0009541693481985178, |
|
"loss": 2.5169, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0009533159345949704, |
|
"loss": 2.6213, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.000952455037290166, |
|
"loss": 2.4695, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0009515866704964846, |
|
"loss": 2.4018, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0009507108485496185, |
|
"loss": 2.5084, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0009498275859083352, |
|
"loss": 2.5441, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0009489368971542393, |
|
"loss": 2.4815, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0009480387969915317, |
|
"loss": 2.468, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0009471333002467668, |
|
"loss": 2.5972, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0009462204218686074, |
|
"loss": 2.4825, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0009453001769275786, |
|
"loss": 2.5862, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0009443725806158182, |
|
"loss": 2.5174, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0009434376482468265, |
|
"loss": 2.3676, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0009424953952552133, |
|
"loss": 2.4715, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0009415458371964429, |
|
"loss": 2.5297, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0009405889897465778, |
|
"loss": 2.5442, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0009396248687020191, |
|
"loss": 2.5398, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.000938653489979247, |
|
"loss": 2.6478, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.000937674869614556, |
|
"loss": 2.4237, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.000936689023763793, |
|
"loss": 2.5133, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0009356959687020876, |
|
"loss": 2.3837, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0009346957208235855, |
|
"loss": 2.5308, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.000933688296641177, |
|
"loss": 2.4463, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0009326737127862249, |
|
"loss": 2.4815, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0009316519860082886, |
|
"loss": 2.5258, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0009306231331748496, |
|
"loss": 2.4161, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0009295871712710317, |
|
"loss": 2.5333, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0009285441173993206, |
|
"loss": 2.3897, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0009274939887792823, |
|
"loss": 2.5545, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0009264368027472784, |
|
"loss": 2.3463, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0009253725767561793, |
|
"loss": 2.4669, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0009243013283750773, |
|
"loss": 2.331, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0009232230752889957, |
|
"loss": 2.2515, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0009221378352985967, |
|
"loss": 2.462, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0009210456263198886, |
|
"loss": 2.3609, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0009199464663839289, |
|
"loss": 2.4393, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.382097005844116, |
|
"eval_runtime": 95.3367, |
|
"eval_samples_per_second": 134.345, |
|
"eval_steps_per_second": 16.793, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.000918840373636527, |
|
"loss": 2.5088, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0009177273663379448, |
|
"loss": 2.1884, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0009166074628625952, |
|
"loss": 2.4646, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0009154806816987386, |
|
"loss": 2.4983, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0009143470414481776, |
|
"loss": 2.3381, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0009132065608259505, |
|
"loss": 2.3715, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0009120592586600216, |
|
"loss": 2.4095, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0009109051538909707, |
|
"loss": 2.3306, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0009097442655716803, |
|
"loss": 2.3049, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0009085766128670217, |
|
"loss": 2.3163, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0009074022150535374, |
|
"loss": 2.3609, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.000906221091519124, |
|
"loss": 2.1964, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0009050332617627115, |
|
"loss": 2.3304, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0009038387453939415, |
|
"loss": 2.3855, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0009026375621328436, |
|
"loss": 2.2852, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0009014297318095099, |
|
"loss": 2.2837, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0009002152743637673, |
|
"loss": 2.2163, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0008989942098448485, |
|
"loss": 2.2896, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0008977665584110612, |
|
"loss": 2.3854, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0008965323403294553, |
|
"loss": 2.3061, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0008952915759754878, |
|
"loss": 2.3625, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.000894044285832687, |
|
"loss": 2.4091, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0008927904904923142, |
|
"loss": 2.3056, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0008915302106530234, |
|
"loss": 2.3323, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0008902634671205201, |
|
"loss": 2.3386, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0008889902808072176, |
|
"loss": 2.3661, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0008877106727318918, |
|
"loss": 2.3923, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0008864246640193341, |
|
"loss": 2.2784, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0008851322759000026, |
|
"loss": 2.3131, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0008838335297096721, |
|
"loss": 2.4255, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0008825284468890808, |
|
"loss": 2.4198, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0008812170489835783, |
|
"loss": 2.3279, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0008798993576427671, |
|
"loss": 2.3672, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0008785753946201483, |
|
"loss": 2.3138, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0008772451817727598, |
|
"loss": 2.2753, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0008759087410608169, |
|
"loss": 2.2821, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0008745660945473495, |
|
"loss": 2.2495, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0008732172643978382, |
|
"loss": 2.47, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0008718622728798476, |
|
"loss": 2.3261, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.0008705011423626587, |
|
"loss": 2.3, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.0008691338953169009, |
|
"loss": 2.3209, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.0008677605543141796, |
|
"loss": 2.3039, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.000866381142026704, |
|
"loss": 2.2033, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0008649956812269133, |
|
"loss": 2.3297, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0008636041947871001, |
|
"loss": 2.273, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0008622067056790332, |
|
"loss": 2.2885, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0008608032369735781, |
|
"loss": 2.3532, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0008593938118403164, |
|
"loss": 2.3094, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0008579784535471631, |
|
"loss": 2.3238, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0008565571854599825, |
|
"loss": 2.2605, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.0008551300310422027, |
|
"loss": 2.2964, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.0008536970138544277, |
|
"loss": 2.2659, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0008522581575540491, |
|
"loss": 2.3661, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0008508134858948553, |
|
"loss": 2.3099, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0008493630227266387, |
|
"loss": 2.291, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.0008479067919948031, |
|
"loss": 2.4099, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.000846444817739968, |
|
"loss": 2.2842, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0008449771240975707, |
|
"loss": 2.3229, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0008435037352974696, |
|
"loss": 2.2471, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.000842024675663543, |
|
"loss": 2.3572, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0008405399696132878, |
|
"loss": 2.1652, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.0008390496416574165, |
|
"loss": 2.3283, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.0008375537163994522, |
|
"loss": 2.2474, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.0008360522185353234, |
|
"loss": 2.3934, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0008345451728529552, |
|
"loss": 2.2949, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0008330326042318603, |
|
"loss": 2.2382, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0008315145376427288, |
|
"loss": 2.356, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.0008299909981470158, |
|
"loss": 2.2622, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.0008284620108965271, |
|
"loss": 2.2402, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.0008269276011330047, |
|
"loss": 2.2855, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.0008253877941877095, |
|
"loss": 2.1968, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0008238426154810035, |
|
"loss": 2.2679, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0008222920905219302, |
|
"loss": 2.2169, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0008207362449077932, |
|
"loss": 2.1951, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0008191751043237336, |
|
"loss": 2.3119, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.000817608694542306, |
|
"loss": 2.408, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.0008160370414230535, |
|
"loss": 2.1998, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00081446017091208, |
|
"loss": 2.3845, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.0008128781090416228, |
|
"loss": 2.2619, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.0008112908819296217, |
|
"loss": 2.3258, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.0008096985157792886, |
|
"loss": 2.3633, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.0008081010368786751, |
|
"loss": 2.2268, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.0008064984716002377, |
|
"loss": 2.2499, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0008048908464004032, |
|
"loss": 2.258, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0008032781878191313, |
|
"loss": 2.2347, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.0008016605224794773, |
|
"loss": 2.2717, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.0008000378770871515, |
|
"loss": 2.1741, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.0007984102784300793, |
|
"loss": 2.1593, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0007967777533779582, |
|
"loss": 2.2707, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.0007951403288818149, |
|
"loss": 2.2641, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.0007934980319735599, |
|
"loss": 2.2677, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.000791850889765541, |
|
"loss": 2.2982, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.0007901989294500968, |
|
"loss": 2.1954, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0007885421782991064, |
|
"loss": 2.2439, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0007868806636635405, |
|
"loss": 2.2238, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0007852144129730086, |
|
"loss": 2.234, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.0007835434537353067, |
|
"loss": 2.31, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.0007818678135359641, |
|
"loss": 2.2452, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.0007801875200377862, |
|
"loss": 2.1722, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.0007785026009803993, |
|
"loss": 2.1681, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.0007768130841797919, |
|
"loss": 2.2128, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.0007751189975278561, |
|
"loss": 2.121, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.0007734203689919261, |
|
"loss": 2.2649, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.0007717172266143178, |
|
"loss": 2.1976, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.000770009598511865, |
|
"loss": 2.3699, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.0007682975128754548, |
|
"loss": 2.1847, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.0007665809979695639, |
|
"loss": 2.2154, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00076486008213179, |
|
"loss": 2.2729, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.0007631347937723856, |
|
"loss": 2.3637, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.0007614051613737878, |
|
"loss": 2.244, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.0007596712134901487, |
|
"loss": 2.2387, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.0007579329787468638, |
|
"loss": 2.1745, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.0007561904858400999, |
|
"loss": 2.1605, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.0007544437635363206, |
|
"loss": 2.2522, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.0007526928406718115, |
|
"loss": 2.2604, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.0007509377461522049, |
|
"loss": 2.2935, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.000749178508952002, |
|
"loss": 2.2513, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.0007474151581140947, |
|
"loss": 2.1089, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0007456477227492858, |
|
"loss": 2.2211, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0007438762320358088, |
|
"loss": 2.2009, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0007421007152188468, |
|
"loss": 2.1967, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.0007403212016100484, |
|
"loss": 2.3052, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.0007385377205870442, |
|
"loss": 2.2927, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.0007367503015929626, |
|
"loss": 2.1667, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.0007349589741359431, |
|
"loss": 2.2273, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.000733163767788649, |
|
"loss": 2.3456, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.0007313647121877793, |
|
"loss": 2.279, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.0007295618370335799, |
|
"loss": 2.3189, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.0007277551720893528, |
|
"loss": 2.1636, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0007259447471809651, |
|
"loss": 2.1341, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0007241305921963557, |
|
"loss": 2.2994, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.0007223127370850433, |
|
"loss": 2.2897, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.000720491211857631, |
|
"loss": 2.2286, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.000718666046585311, |
|
"loss": 2.2796, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0007168372713993683, |
|
"loss": 2.2812, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.0007150049164906835, |
|
"loss": 2.1562, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.0007131690121092336, |
|
"loss": 2.305, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0007113295885635935, |
|
"loss": 2.2369, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.0007094866762204354, |
|
"loss": 2.2044, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.0007076403055040271, |
|
"loss": 2.1396, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0007057905068957299, |
|
"loss": 2.3368, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0007039373109334957, |
|
"loss": 2.2138, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.0007020807482113627, |
|
"loss": 2.1798, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00070022084937895, |
|
"loss": 2.1819, |
|
"step": 1505 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.0006983576451409517, |
|
"loss": 2.1694, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.0006964911662566308, |
|
"loss": 2.1964, |
|
"step": 1515 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.0006946214435393103, |
|
"loss": 2.0662, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.0006927485078558652, |
|
"loss": 2.1458, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.0006908723901262126, |
|
"loss": 2.2772, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.0006889931213228015, |
|
"loss": 2.1833, |
|
"step": 1535 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.0006871107324701008, |
|
"loss": 2.2241, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.0006852252546440885, |
|
"loss": 2.1994, |
|
"step": 1545 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.0006833367189717372, |
|
"loss": 2.229, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.0006814451566305014, |
|
"loss": 2.2222, |
|
"step": 1555 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.0006795505988478015, |
|
"loss": 2.2849, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.0006776530769005098, |
|
"loss": 2.11, |
|
"step": 1565 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.0006757526221144329, |
|
"loss": 2.1437, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0006738492658637956, |
|
"loss": 2.1549, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.1981911659240723, |
|
"eval_runtime": 95.327, |
|
"eval_samples_per_second": 134.359, |
|
"eval_steps_per_second": 16.795, |
|
"step": 1578 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0006719430395707215, |
|
"loss": 2.3153, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.0006700339747047161, |
|
"loss": 2.0107, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.0006681221027821458, |
|
"loss": 2.1278, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.000666207455365718, |
|
"loss": 2.0655, |
|
"step": 1595 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.0006642900640639608, |
|
"loss": 2.0664, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.0006623699605306999, |
|
"loss": 1.9791, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.0006604471764645369, |
|
"loss": 2.0518, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.000658521743608326, |
|
"loss": 2.0644, |
|
"step": 1615 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.0006565936937486492, |
|
"loss": 2.0737, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.0006546630587152929, |
|
"loss": 2.0784, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.000652729870380721, |
|
"loss": 2.1277, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.0006507941606595491, |
|
"loss": 2.1686, |
|
"step": 1635 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.0006488559615080188, |
|
"loss": 2.1398, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.0006469153049234683, |
|
"loss": 2.1453, |
|
"step": 1645 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.0006449722229438053, |
|
"loss": 2.0425, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.0006430267476469783, |
|
"loss": 2.0705, |
|
"step": 1655 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.0006410789111504461, |
|
"loss": 2.1157, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.0006391287456106483, |
|
"loss": 2.1243, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.000637176283222474, |
|
"loss": 2.0428, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.0006352215562187306, |
|
"loss": 2.0638, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.000633264596869612, |
|
"loss": 2.0722, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.0006313054374821646, |
|
"loss": 2.1089, |
|
"step": 1685 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.0006293441103997555, |
|
"loss": 2.0444, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.0006273806480015374, |
|
"loss": 2.0067, |
|
"step": 1695 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.0006254150827019148, |
|
"loss": 2.2493, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.0006234474469500085, |
|
"loss": 2.0435, |
|
"step": 1705 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.00062147777322912, |
|
"loss": 2.1426, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.0006195060940561949, |
|
"loss": 2.0512, |
|
"step": 1715 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.0006175324419812869, |
|
"loss": 2.1365, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0006155568495870196, |
|
"loss": 2.137, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.000613579349488049, |
|
"loss": 1.9907, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.0006115999743305252, |
|
"loss": 2.085, |
|
"step": 1735 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.0006096187567915528, |
|
"loss": 2.1276, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.0006076357295786525, |
|
"loss": 1.9774, |
|
"step": 1745 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.0006056509254292202, |
|
"loss": 2.0054, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.0006036643771099871, |
|
"loss": 2.0158, |
|
"step": 1755 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.000601676117416478, |
|
"loss": 1.9952, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.0005996861791724712, |
|
"loss": 2.0267, |
|
"step": 1765 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.0005976945952294553, |
|
"loss": 1.9985, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 0.0005957013984660875, |
|
"loss": 2.0334, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.0005937066217876511, |
|
"loss": 2.0078, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.0005917102981255113, |
|
"loss": 2.0976, |
|
"step": 1785 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.0005897124604365727, |
|
"loss": 2.0843, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.0005877131417027343, |
|
"loss": 1.9916, |
|
"step": 1795 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 0.0005857123749303454, |
|
"loss": 2.0005, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.000583710193149661, |
|
"loss": 2.134, |
|
"step": 1805 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.0005817066294142957, |
|
"loss": 2.1503, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.0005797017168006791, |
|
"loss": 2.033, |
|
"step": 1815 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.0005776954884075085, |
|
"loss": 2.0627, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.0005756879773552036, |
|
"loss": 1.985, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.000573679216785359, |
|
"loss": 2.0933, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.0005716692398601975, |
|
"loss": 2.1007, |
|
"step": 1835 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.0005696580797620226, |
|
"loss": 2.0164, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.0005676457696926703, |
|
"loss": 2.0588, |
|
"step": 1845 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.0005656323428729612, |
|
"loss": 2.0087, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.0005636178325421524, |
|
"loss": 2.0798, |
|
"step": 1855 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.000561602271957388, |
|
"loss": 2.1189, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.0005595856943931512, |
|
"loss": 2.0921, |
|
"step": 1865 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.0005575681331407137, |
|
"loss": 2.1177, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.0005555496215075871, |
|
"loss": 2.1912, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.0005535301928169722, |
|
"loss": 2.1851, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 0.00055150988040721, |
|
"loss": 2.1525, |
|
"step": 1885 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.00054948871763123, |
|
"loss": 2.0721, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.0005474667378560006, |
|
"loss": 2.1164, |
|
"step": 1895 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.0005454439744619776, |
|
"loss": 2.1189, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.0005434204608425538, |
|
"loss": 2.0945, |
|
"step": 1905 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.0005413962304035068, |
|
"loss": 2.2036, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.0005393713165624483, |
|
"loss": 2.1643, |
|
"step": 1915 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.0005373457527482725, |
|
"loss": 2.0757, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 0.0005353195724006031, |
|
"loss": 2.0375, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 0.0005332928089692425, |
|
"loss": 2.0028, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 0.0005312654959136194, |
|
"loss": 2.08, |
|
"step": 1935 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 0.0005292376667022355, |
|
"loss": 2.0521, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.0005272093548121141, |
|
"loss": 1.9639, |
|
"step": 1945 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.0005251805937282467, |
|
"loss": 2.1265, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.0005231514169430402, |
|
"loss": 2.0656, |
|
"step": 1955 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.000521121857955765, |
|
"loss": 2.1371, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.0005190919502720005, |
|
"loss": 2.0298, |
|
"step": 1965 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 0.0005170617274030827, |
|
"loss": 2.166, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 0.0005150312228655515, |
|
"loss": 2.0785, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 0.0005130004701805963, |
|
"loss": 2.0836, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 0.0005109695028735029, |
|
"loss": 2.1391, |
|
"step": 1985 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 0.0005089383544731012, |
|
"loss": 1.9789, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.0005069070585112097, |
|
"loss": 2.0498, |
|
"step": 1995 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.0005048756485220836, |
|
"loss": 2.1021, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 0.0005028441580418602, |
|
"loss": 2.0545, |
|
"step": 2005 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0005008126206080059, |
|
"loss": 2.0408, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0004987810697587617, |
|
"loss": 2.1183, |
|
"step": 2015 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.0004967495390325911, |
|
"loss": 2.1223, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.0004947180619676244, |
|
"loss": 1.9974, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.0004926866721011064, |
|
"loss": 2.1279, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 0.0004906554029688425, |
|
"loss": 1.9737, |
|
"step": 2035 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.0004886242881046447, |
|
"loss": 1.9904, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.00048659336103977805, |
|
"loss": 2.0324, |
|
"step": 2045 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 0.00048456265530240784, |
|
"loss": 2.1492, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 0.00048253220441704507, |
|
"loss": 1.9715, |
|
"step": 2055 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 0.00048050204190399335, |
|
"loss": 2.0, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.00047847220127879606, |
|
"loss": 2.1284, |
|
"step": 2065 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.00047644271605168213, |
|
"loss": 2.0984, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 0.0004744136197270135, |
|
"loss": 2.057, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 0.00047238494580273193, |
|
"loss": 2.0703, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 0.00047035672776980574, |
|
"loss": 2.154, |
|
"step": 2085 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.00046832899911167697, |
|
"loss": 2.154, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 0.00046630179330370866, |
|
"loss": 2.0377, |
|
"step": 2095 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 0.00046427514381263205, |
|
"loss": 2.1521, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 0.00046224908409599495, |
|
"loss": 2.159, |
|
"step": 2105 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 0.000460223647601608, |
|
"loss": 2.0178, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.0004581988677669935, |
|
"loss": 2.0769, |
|
"step": 2115 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 0.00045617477801883295, |
|
"loss": 2.1209, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 0.00045415141177241545, |
|
"loss": 1.9985, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.0004521288024310856, |
|
"loss": 1.9867, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 0.00045010698338569333, |
|
"loss": 2.022, |
|
"step": 2135 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 0.0004480859880140408, |
|
"loss": 2.0363, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.00044606584968033265, |
|
"loss": 2.1197, |
|
"step": 2145 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.00044404660173462477, |
|
"loss": 2.0195, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 0.0004420282775122739, |
|
"loss": 2.0232, |
|
"step": 2155 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.00044001091033338735, |
|
"loss": 2.0646, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.0004379945335022726, |
|
"loss": 1.9648, |
|
"step": 2165 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 0.00043597918030688777, |
|
"loss": 2.1262, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.0004339648840182919, |
|
"loss": 2.0872, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.0004319516778900957, |
|
"loss": 2.0648, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.0004299395951579126, |
|
"loss": 1.9803, |
|
"step": 2185 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.0004279286690388107, |
|
"loss": 2.0624, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.00042591893273076313, |
|
"loss": 1.989, |
|
"step": 2195 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 0.00042391041941210086, |
|
"loss": 2.0994, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 0.00042190316224096497, |
|
"loss": 2.1275, |
|
"step": 2205 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.00041989719435475865, |
|
"loss": 1.8627, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.0004178925488696012, |
|
"loss": 2.0948, |
|
"step": 2215 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.0004158892588797801, |
|
"loss": 2.1067, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 0.0004138873574572053, |
|
"loss": 2.0034, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.0004118868776508633, |
|
"loss": 2.0439, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.0004098878524862715, |
|
"loss": 2.0353, |
|
"step": 2235 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.00040789031496493255, |
|
"loss": 2.0757, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.00040589429806379057, |
|
"loss": 2.0462, |
|
"step": 2245 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.0004038998347346854, |
|
"loss": 1.9894, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.0004019069579038096, |
|
"loss": 2.0345, |
|
"step": 2255 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.00039991570047116456, |
|
"loss": 2.044, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.0003979260953100169, |
|
"loss": 1.9671, |
|
"step": 2265 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.0003959381752663572, |
|
"loss": 1.9423, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.00039395197315835573, |
|
"loss": 1.9895, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 0.0003919675217758224, |
|
"loss": 1.9851, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.00038998485387966425, |
|
"loss": 2.1416, |
|
"step": 2285 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.00038800400220134556, |
|
"loss": 2.0666, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.0003860249994423467, |
|
"loss": 2.0336, |
|
"step": 2295 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.00038404787827362477, |
|
"loss": 2.1442, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 0.0003820726713350742, |
|
"loss": 2.0041, |
|
"step": 2305 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.00038009941123498737, |
|
"loss": 2.0138, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.0003781281305495171, |
|
"loss": 2.1307, |
|
"step": 2315 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 0.00037615886182213826, |
|
"loss": 1.9921, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.0003741916375631105, |
|
"loss": 2.1046, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.0003722264902489425, |
|
"loss": 2.0436, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 0.00037026345232185414, |
|
"loss": 1.9607, |
|
"step": 2335 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00036830255618924265, |
|
"loss": 1.9253, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.0003663438342231462, |
|
"loss": 2.127, |
|
"step": 2345 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 0.00036438731875971034, |
|
"loss": 2.1264, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 0.00036243304209865405, |
|
"loss": 2.038, |
|
"step": 2355 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.0003604810365027362, |
|
"loss": 2.1015, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.00035853133419722316, |
|
"loss": 2.1424, |
|
"step": 2365 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.1064605712890625, |
|
"eval_runtime": 95.2849, |
|
"eval_samples_per_second": 134.418, |
|
"eval_steps_per_second": 16.802, |
|
"step": 2367 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.0003565839673693566, |
|
"loss": 2.0409, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 0.0003546389681678224, |
|
"loss": 2.023, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 0.0003526963687022198, |
|
"loss": 1.8723, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 0.0003507562010425312, |
|
"loss": 1.852, |
|
"step": 2385 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 0.000348818497218593, |
|
"loss": 2.0228, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 0.00034688328921956635, |
|
"loss": 1.8949, |
|
"step": 2395 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 0.0003449506089934095, |
|
"loss": 1.911, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 0.0003430204884463499, |
|
"loss": 1.9915, |
|
"step": 2405 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 0.0003410929594423585, |
|
"loss": 1.8748, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 0.0003391680538026224, |
|
"loss": 2.0819, |
|
"step": 2415 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 0.00033724580330502007, |
|
"loss": 1.9166, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 0.00033532623968359697, |
|
"loss": 1.9435, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 0.0003334093946280417, |
|
"loss": 1.9159, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 0.0003314952997831618, |
|
"loss": 2.0227, |
|
"step": 2435 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 0.00032958398674836324, |
|
"loss": 1.9393, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 0.00032767548707712693, |
|
"loss": 1.9019, |
|
"step": 2445 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 0.0003257698322764886, |
|
"loss": 1.9114, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 0.00032386705380651874, |
|
"loss": 1.8678, |
|
"step": 2455 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 0.00032196718307980277, |
|
"loss": 1.9498, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 0.00032007025146092347, |
|
"loss": 1.9621, |
|
"step": 2465 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.00031817629026594164, |
|
"loss": 1.9599, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 0.0003162853307618805, |
|
"loss": 1.8853, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 0.0003143974041662087, |
|
"loss": 1.9151, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 0.00031251254164632537, |
|
"loss": 1.8902, |
|
"step": 2485 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 0.0003106307743190452, |
|
"loss": 1.9937, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 0.0003087521332500854, |
|
"loss": 2.0179, |
|
"step": 2495 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 0.0003068766494535522, |
|
"loss": 2.0089, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 0.0003050043538914287, |
|
"loss": 1.9892, |
|
"step": 2505 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 0.00030313527747306444, |
|
"loss": 1.8901, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 0.00030126945105466487, |
|
"loss": 1.9044, |
|
"step": 2515 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 0.0002994069054387817, |
|
"loss": 1.949, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.00029754767137380426, |
|
"loss": 1.8189, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 0.0002956917795534528, |
|
"loss": 1.988, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 0.00029383926061627054, |
|
"loss": 2.0487, |
|
"step": 2535 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 0.0002919901451451187, |
|
"loss": 1.9252, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 0.00029014446366667113, |
|
"loss": 1.9607, |
|
"step": 2545 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 0.00028830224665091106, |
|
"loss": 2.0455, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 0.0002864635245106272, |
|
"loss": 1.9129, |
|
"step": 2555 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 0.0002846283276009124, |
|
"loss": 2.0086, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 0.0002827966862186616, |
|
"loss": 1.9384, |
|
"step": 2565 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 0.000280968630602073, |
|
"loss": 1.991, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 0.00027914419093014734, |
|
"loss": 1.8516, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 0.00027732339732219137, |
|
"loss": 1.966, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 0.0002755062798373189, |
|
"loss": 1.9472, |
|
"step": 2585 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 0.0002736928684739558, |
|
"loss": 1.8879, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 0.0002718831931693443, |
|
"loss": 1.9771, |
|
"step": 2595 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 0.00027007728379904814, |
|
"loss": 1.8985, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 0.0002682751701764615, |
|
"loss": 2.0464, |
|
"step": 2605 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 0.0002664768820523137, |
|
"loss": 1.8301, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 0.0002646824491141807, |
|
"loss": 1.8646, |
|
"step": 2615 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 0.0002628919009859934, |
|
"loss": 1.9277, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.0002611052672275495, |
|
"loss": 1.8923, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.0002593225773340251, |
|
"loss": 1.8747, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 0.00025754386073548776, |
|
"loss": 1.9835, |
|
"step": 2635 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 0.000255769146796411, |
|
"loss": 1.9034, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 0.0002539984648151886, |
|
"loss": 1.9571, |
|
"step": 2645 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 0.00025223184402365224, |
|
"loss": 1.9356, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 0.0002504693135865875, |
|
"loss": 1.9416, |
|
"step": 2655 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 0.00024871090260125423, |
|
"loss": 2.0262, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 0.000246956640096904, |
|
"loss": 2.0165, |
|
"step": 2665 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 0.0002452065550343026, |
|
"loss": 2.0138, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 0.00024346067630525082, |
|
"loss": 2.0022, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 0.00024171903273210828, |
|
"loss": 1.8902, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 0.0002399816530673171, |
|
"loss": 2.0161, |
|
"step": 2685 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 0.00023824856599292762, |
|
"loss": 1.973, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 0.00023651980012012454, |
|
"loss": 1.8932, |
|
"step": 2695 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 0.00023479538398875433, |
|
"loss": 1.9342, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 0.00023307534606685525, |
|
"loss": 1.8307, |
|
"step": 2705 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 0.00023135971475018557, |
|
"loss": 1.9087, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 0.00022964851836175705, |
|
"loss": 1.8213, |
|
"step": 2715 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 0.00022794178515136494, |
|
"loss": 1.9313, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 0.00022623954329512348, |
|
"loss": 2.0212, |
|
"step": 2725 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 0.00022454182089500004, |
|
"loss": 1.8266, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 0.00022284864597835058, |
|
"loss": 1.9772, |
|
"step": 2735 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 0.00022116004649745826, |
|
"loss": 1.9601, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 0.00021947605032907099, |
|
"loss": 1.8989, |
|
"step": 2745 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 0.00021779668527394187, |
|
"loss": 1.9805, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 0.0002161219790563691, |
|
"loss": 1.8144, |
|
"step": 2755 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 0.00021445195932373986, |
|
"loss": 1.9025, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 0.0002127866536460727, |
|
"loss": 1.8811, |
|
"step": 2765 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 0.00021112608951556316, |
|
"loss": 1.998, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 0.00020947029434612886, |
|
"loss": 1.8758, |
|
"step": 2775 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 0.00020781929547295815, |
|
"loss": 1.8745, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.00020617312015205842, |
|
"loss": 1.8758, |
|
"step": 2785 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 0.00020453179555980533, |
|
"loss": 1.9658, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 0.00020289534879249543, |
|
"loss": 1.7781, |
|
"step": 2795 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 0.00020126380686589807, |
|
"loss": 1.9071, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 0.0001996371967148098, |
|
"loss": 1.813, |
|
"step": 2805 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 0.00019801554519260884, |
|
"loss": 1.9647, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 0.00019639887907081294, |
|
"loss": 1.9145, |
|
"step": 2815 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 0.00019478722503863706, |
|
"loss": 1.9741, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 0.0001931806097025517, |
|
"loss": 1.9005, |
|
"step": 2825 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 0.00019157905958584582, |
|
"loss": 1.9434, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 0.0001899826011281865, |
|
"loss": 1.8557, |
|
"step": 2835 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.00018839126068518448, |
|
"loss": 1.9641, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 0.0001868050645279576, |
|
"loss": 1.9206, |
|
"step": 2845 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 0.00018522403884269813, |
|
"loss": 1.8437, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 0.00018364820973024, |
|
"loss": 1.9311, |
|
"step": 2855 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 0.00018207760320562794, |
|
"loss": 1.9442, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 0.00018051224519768816, |
|
"loss": 1.8921, |
|
"step": 2865 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 0.00017895216154859966, |
|
"loss": 1.8873, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 0.00017739737801346895, |
|
"loss": 1.9562, |
|
"step": 2875 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 0.00017584792025990287, |
|
"loss": 1.9437, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 0.00017430381386758747, |
|
"loss": 1.8848, |
|
"step": 2885 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 0.00017276508432786304, |
|
"loss": 1.9044, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 0.00017123175704330512, |
|
"loss": 1.8581, |
|
"step": 2895 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 0.00016970385732730414, |
|
"loss": 1.9017, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 0.00016818141040364815, |
|
"loss": 1.7544, |
|
"step": 2905 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 0.00016666444140610615, |
|
"loss": 1.92, |
|
"step": 2910 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 0.00016515297537801304, |
|
"loss": 1.8981, |
|
"step": 2915 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 0.00016364703727185664, |
|
"loss": 1.97, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 0.00016214665194886474, |
|
"loss": 1.8704, |
|
"step": 2925 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 0.00016065184417859613, |
|
"loss": 1.9395, |
|
"step": 2930 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 0.00015916263863853, |
|
"loss": 1.9092, |
|
"step": 2935 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 0.0001576790599136607, |
|
"loss": 1.8171, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 0.00015620113249608942, |
|
"loss": 1.8367, |
|
"step": 2945 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 0.00015472888078462172, |
|
"loss": 1.9402, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 0.00015326232908436405, |
|
"loss": 1.9839, |
|
"step": 2955 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 0.00015180150160632195, |
|
"loss": 1.8956, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 0.00015034642246700204, |
|
"loss": 1.8861, |
|
"step": 2965 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 0.00014889711568801152, |
|
"loss": 2.0093, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 0.00014745360519566382, |
|
"loss": 1.9242, |
|
"step": 2975 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 0.0001460159148205819, |
|
"loss": 1.9005, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 0.0001445840682973062, |
|
"loss": 1.9462, |
|
"step": 2985 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 0.00014315808926390188, |
|
"loss": 2.0249, |
|
"step": 2990 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 0.00014173800126156917, |
|
"loss": 1.9317, |
|
"step": 2995 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 0.00014032382773425384, |
|
"loss": 2.0039, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 0.00013891559202826133, |
|
"loss": 1.9658, |
|
"step": 3005 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 0.0001375133173918709, |
|
"loss": 1.9448, |
|
"step": 3010 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 0.00013611702697495086, |
|
"loss": 1.8653, |
|
"step": 3015 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.0001347267438285786, |
|
"loss": 1.9222, |
|
"step": 3020 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.0001333424909046574, |
|
"loss": 1.8796, |
|
"step": 3025 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 0.0001319642910555396, |
|
"loss": 1.8942, |
|
"step": 3030 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 0.00013059216703364813, |
|
"loss": 1.9026, |
|
"step": 3035 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 0.00012922614149110152, |
|
"loss": 1.9212, |
|
"step": 3040 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 0.0001278662369793398, |
|
"loss": 1.9068, |
|
"step": 3045 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 0.000126512475948752, |
|
"loss": 1.9956, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 0.00012516488074830584, |
|
"loss": 1.8775, |
|
"step": 3055 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 0.00012382347362517815, |
|
"loss": 1.9005, |
|
"step": 3060 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 0.00012248827672438868, |
|
"loss": 1.9538, |
|
"step": 3065 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 0.00012115931208843318, |
|
"loss": 1.8309, |
|
"step": 3070 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 0.00011983660165692079, |
|
"loss": 2.0001, |
|
"step": 3075 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 0.00011852016726621096, |
|
"loss": 1.8293, |
|
"step": 3080 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 0.00011721003064905328, |
|
"loss": 1.9337, |
|
"step": 3085 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 0.0001159062134342289, |
|
"loss": 1.912, |
|
"step": 3090 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 0.00011460873714619274, |
|
"loss": 1.8585, |
|
"step": 3095 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 0.00011331762320471906, |
|
"loss": 1.9535, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 0.00011203289292454726, |
|
"loss": 1.9018, |
|
"step": 3105 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 0.00011075456751503043, |
|
"loss": 1.9418, |
|
"step": 3110 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 0.0001094826680797843, |
|
"loss": 1.9581, |
|
"step": 3115 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 0.00010821721561634024, |
|
"loss": 1.9045, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 0.00010695823101579727, |
|
"loss": 1.8314, |
|
"step": 3125 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 0.00010570573506247811, |
|
"loss": 1.9717, |
|
"step": 3130 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 0.00010445974843358564, |
|
"loss": 1.9037, |
|
"step": 3135 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 0.0001032202916988616, |
|
"loss": 1.8119, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 0.00010198738532024716, |
|
"loss": 1.8549, |
|
"step": 3145 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 0.00010076104965154459, |
|
"loss": 1.8318, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 9.9541304938082e-05, |
|
"loss": 1.9885, |
|
"step": 3155 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.0513949394226074, |
|
"eval_runtime": 95.3176, |
|
"eval_samples_per_second": 134.372, |
|
"eval_steps_per_second": 16.796, |
|
"step": 3156 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 9.832817131637867e-05, |
|
"loss": 1.9155, |
|
"step": 3160 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 9.71216688138128e-05, |
|
"loss": 1.8082, |
|
"step": 3165 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 9.592181734829036e-05, |
|
"loss": 1.8647, |
|
"step": 3170 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 9.472863672791721e-05, |
|
"loss": 1.8277, |
|
"step": 3175 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 9.35421466506714e-05, |
|
"loss": 1.9529, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 9.236236670407771e-05, |
|
"loss": 1.8167, |
|
"step": 3185 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 9.11893163648857e-05, |
|
"loss": 1.9159, |
|
"step": 3190 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 9.002301499874621e-05, |
|
"loss": 1.7958, |
|
"step": 3195 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 8.886348185989334e-05, |
|
"loss": 1.865, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 8.771073609082531e-05, |
|
"loss": 1.859, |
|
"step": 3205 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 8.656479672198947e-05, |
|
"loss": 1.8602, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 8.54256826714676e-05, |
|
"loss": 1.8442, |
|
"step": 3215 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 8.429341274466368e-05, |
|
"loss": 1.8399, |
|
"step": 3220 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 8.316800563399307e-05, |
|
"loss": 1.895, |
|
"step": 3225 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 8.204947991857486e-05, |
|
"loss": 1.8071, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 8.093785406392429e-05, |
|
"loss": 1.9761, |
|
"step": 3235 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 7.983314642164786e-05, |
|
"loss": 1.8828, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 7.873537522914154e-05, |
|
"loss": 1.9077, |
|
"step": 3245 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 7.764455860928793e-05, |
|
"loss": 1.867, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 7.656071457015878e-05, |
|
"loss": 1.8226, |
|
"step": 3255 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 7.548386100471621e-05, |
|
"loss": 1.821, |
|
"step": 3260 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 7.441401569051847e-05, |
|
"loss": 1.8415, |
|
"step": 3265 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 7.335119628942594e-05, |
|
"loss": 1.8206, |
|
"step": 3270 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 7.229542034730952e-05, |
|
"loss": 1.8202, |
|
"step": 3275 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 7.124670529376126e-05, |
|
"loss": 1.9373, |
|
"step": 3280 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 7.020506844180608e-05, |
|
"loss": 1.8372, |
|
"step": 3285 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 6.917052698761666e-05, |
|
"loss": 1.8923, |
|
"step": 3290 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 6.814309801022872e-05, |
|
"loss": 1.8606, |
|
"step": 3295 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 6.712279847126035e-05, |
|
"loss": 1.8864, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 6.610964521463031e-05, |
|
"loss": 1.8178, |
|
"step": 3305 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 6.510365496628129e-05, |
|
"loss": 1.8438, |
|
"step": 3310 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 6.410484433390334e-05, |
|
"loss": 1.79, |
|
"step": 3315 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 6.311322980665934e-05, |
|
"loss": 1.8641, |
|
"step": 3320 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 6.212882775491352e-05, |
|
"loss": 1.8712, |
|
"step": 3325 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 6.115165442996056e-05, |
|
"loss": 1.863, |
|
"step": 3330 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 6.018172596375776e-05, |
|
"loss": 1.8744, |
|
"step": 3335 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 5.921905836865821e-05, |
|
"loss": 1.8524, |
|
"step": 3340 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 5.826366753714707e-05, |
|
"loss": 1.9031, |
|
"step": 3345 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 5.731556924157844e-05, |
|
"loss": 1.784, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 5.6374779133916035e-05, |
|
"loss": 1.8172, |
|
"step": 3355 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 5.544131274547365e-05, |
|
"loss": 1.8779, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 5.4515185486659454e-05, |
|
"loss": 1.7402, |
|
"step": 3365 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 5.359641264672155e-05, |
|
"loss": 1.9493, |
|
"step": 3370 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 5.268500939349513e-05, |
|
"loss": 1.8279, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 5.178099077315257e-05, |
|
"loss": 1.7599, |
|
"step": 3380 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 5.0884371709954804e-05, |
|
"loss": 1.7905, |
|
"step": 3385 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 4.999516700600504e-05, |
|
"loss": 1.8888, |
|
"step": 3390 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 4.911339134100401e-05, |
|
"loss": 1.8539, |
|
"step": 3395 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 4.823905927200833e-05, |
|
"loss": 1.8406, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 4.737218523318965e-05, |
|
"loss": 1.8983, |
|
"step": 3405 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 4.65127835355964e-05, |
|
"loss": 1.7739, |
|
"step": 3410 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 4.566086836691791e-05, |
|
"loss": 1.8877, |
|
"step": 3415 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 4.4816453791249436e-05, |
|
"loss": 1.8403, |
|
"step": 3420 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 4.3979553748861035e-05, |
|
"loss": 1.9217, |
|
"step": 3425 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 4.3150182055966384e-05, |
|
"loss": 1.8122, |
|
"step": 3430 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 4.232835240449534e-05, |
|
"loss": 1.8977, |
|
"step": 3435 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 4.1514078361867826e-05, |
|
"loss": 1.9035, |
|
"step": 3440 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 4.070737337076963e-05, |
|
"loss": 1.8811, |
|
"step": 3445 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 3.990825074893056e-05, |
|
"loss": 1.9786, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 3.911672368890462e-05, |
|
"loss": 1.9312, |
|
"step": 3455 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 3.8332805257852374e-05, |
|
"loss": 1.8757, |
|
"step": 3460 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 3.755650839732489e-05, |
|
"loss": 1.8269, |
|
"step": 3465 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 3.67878459230504e-05, |
|
"loss": 1.7784, |
|
"step": 3470 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 3.6026830524722444e-05, |
|
"loss": 1.8365, |
|
"step": 3475 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 3.527347476579068e-05, |
|
"loss": 1.9105, |
|
"step": 3480 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 3.4527791083253154e-05, |
|
"loss": 1.8909, |
|
"step": 3485 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 3.3789791787451344e-05, |
|
"loss": 1.9213, |
|
"step": 3490 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 3.305948906186662e-05, |
|
"loss": 1.7467, |
|
"step": 3495 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 3.233689496291931e-05, |
|
"loss": 1.8005, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 3.162202141976955e-05, |
|
"loss": 1.9142, |
|
"step": 3505 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 3.09148802341202e-05, |
|
"loss": 1.8589, |
|
"step": 3510 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 3.0215483080022478e-05, |
|
"loss": 1.9262, |
|
"step": 3515 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 2.9523841503682757e-05, |
|
"loss": 1.8654, |
|
"step": 3520 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 2.8839966923272288e-05, |
|
"loss": 1.8914, |
|
"step": 3525 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 2.8163870628738375e-05, |
|
"loss": 1.8989, |
|
"step": 3530 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 2.7495563781618328e-05, |
|
"loss": 1.8157, |
|
"step": 3535 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 2.6835057414855036e-05, |
|
"loss": 1.8452, |
|
"step": 3540 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 2.6182362432614513e-05, |
|
"loss": 1.8583, |
|
"step": 3545 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 2.5537489610106824e-05, |
|
"loss": 1.7927, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 2.4900449593406984e-05, |
|
"loss": 1.928, |
|
"step": 3555 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 2.4271252899280173e-05, |
|
"loss": 1.8615, |
|
"step": 3560 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 2.364990991500743e-05, |
|
"loss": 1.8917, |
|
"step": 3565 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 2.3036430898214832e-05, |
|
"loss": 1.7866, |
|
"step": 3570 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 2.2430825976703484e-05, |
|
"loss": 1.768, |
|
"step": 3575 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 2.1833105148282905e-05, |
|
"loss": 1.7868, |
|
"step": 3580 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 2.1243278280605515e-05, |
|
"loss": 1.8383, |
|
"step": 3585 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 2.0661355111004032e-05, |
|
"loss": 1.8518, |
|
"step": 3590 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 2.0087345246330713e-05, |
|
"loss": 1.939, |
|
"step": 3595 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 1.9521258162798263e-05, |
|
"loss": 1.8259, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 1.8963103205824395e-05, |
|
"loss": 1.9559, |
|
"step": 3605 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 1.8412889589876358e-05, |
|
"loss": 1.8916, |
|
"step": 3610 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 1.787062639831988e-05, |
|
"loss": 1.8576, |
|
"step": 3615 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 1.7336322583268336e-05, |
|
"loss": 1.8634, |
|
"step": 3620 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 1.6809986965435677e-05, |
|
"loss": 1.9129, |
|
"step": 3625 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 1.6291628233990387e-05, |
|
"loss": 1.7839, |
|
"step": 3630 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 1.5781254946412028e-05, |
|
"loss": 1.8129, |
|
"step": 3635 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 1.5278875528350277e-05, |
|
"loss": 1.8645, |
|
"step": 3640 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 1.4784498273485436e-05, |
|
"loss": 1.7878, |
|
"step": 3645 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 1.4298131343391918e-05, |
|
"loss": 1.8806, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 1.3819782767403033e-05, |
|
"loss": 1.8544, |
|
"step": 3655 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 1.3349460442479089e-05, |
|
"loss": 1.9462, |
|
"step": 3660 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 1.2887172133076219e-05, |
|
"loss": 1.8129, |
|
"step": 3665 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 1.243292547101893e-05, |
|
"loss": 1.8055, |
|
"step": 3670 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 1.1986727955373588e-05, |
|
"loss": 1.8297, |
|
"step": 3675 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 1.1548586952324912e-05, |
|
"loss": 1.7961, |
|
"step": 3680 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 1.1118509695054235e-05, |
|
"loss": 1.9098, |
|
"step": 3685 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 1.0696503283620319e-05, |
|
"loss": 1.7803, |
|
"step": 3690 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 1.0282574684841784e-05, |
|
"loss": 1.9406, |
|
"step": 3695 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 9.876730732182203e-06, |
|
"loss": 1.8219, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 9.478978125637583e-06, |
|
"loss": 1.9694, |
|
"step": 3705 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 9.089323431625273e-06, |
|
"loss": 1.8647, |
|
"step": 3710 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 8.70777308287618e-06, |
|
"loss": 1.8981, |
|
"step": 3715 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 8.334333378327785e-06, |
|
"loss": 1.8218, |
|
"step": 3720 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 7.969010483020845e-06, |
|
"loss": 1.8954, |
|
"step": 3725 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 7.611810427997412e-06, |
|
"loss": 1.7638, |
|
"step": 3730 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 7.262739110200922e-06, |
|
"loss": 1.7664, |
|
"step": 3735 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 6.921802292379376e-06, |
|
"loss": 1.8825, |
|
"step": 3740 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 6.589005602989862e-06, |
|
"loss": 1.7941, |
|
"step": 3745 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 6.264354536105799e-06, |
|
"loss": 1.8809, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 5.947854451326007e-06, |
|
"loss": 1.7973, |
|
"step": 3755 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 5.639510573686613e-06, |
|
"loss": 1.9447, |
|
"step": 3760 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 5.339327993574339e-06, |
|
"loss": 1.8651, |
|
"step": 3765 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 5.047311666642851e-06, |
|
"loss": 1.8057, |
|
"step": 3770 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 4.763466413730821e-06, |
|
"loss": 1.8872, |
|
"step": 3775 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 4.4877969207821615e-06, |
|
"loss": 1.7565, |
|
"step": 3780 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 4.2203077387688586e-06, |
|
"loss": 1.9462, |
|
"step": 3785 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 3.961003283615816e-06, |
|
"loss": 1.8327, |
|
"step": 3790 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 3.709887836128023e-06, |
|
"loss": 1.8064, |
|
"step": 3795 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 3.4669655419196087e-06, |
|
"loss": 1.8423, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 3.232240411345788e-06, |
|
"loss": 1.887, |
|
"step": 3805 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 3.0057163194363023e-06, |
|
"loss": 1.7267, |
|
"step": 3810 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 2.7873970058316934e-06, |
|
"loss": 1.8499, |
|
"step": 3815 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 2.5772860747214078e-06, |
|
"loss": 1.8719, |
|
"step": 3820 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 2.3753869947843455e-06, |
|
"loss": 1.8829, |
|
"step": 3825 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 2.1817030991317934e-06, |
|
"loss": 1.8648, |
|
"step": 3830 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 1.9962375852519698e-06, |
|
"loss": 1.8077, |
|
"step": 3835 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 1.8189935149576786e-06, |
|
"loss": 1.7893, |
|
"step": 3840 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 1.6499738143354593e-06, |
|
"loss": 1.8117, |
|
"step": 3845 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 1.4891812736974596e-06, |
|
"loss": 1.8163, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 1.3366185475351955e-06, |
|
"loss": 1.8515, |
|
"step": 3855 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 1.192288154476029e-06, |
|
"loss": 1.8055, |
|
"step": 3860 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 1.0561924772412024e-06, |
|
"loss": 1.8637, |
|
"step": 3865 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 9.283337626068144e-07, |
|
"loss": 1.8176, |
|
"step": 3870 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 8.087141213665717e-07, |
|
"loss": 1.9019, |
|
"step": 3875 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 6.973355282969274e-07, |
|
"loss": 1.8888, |
|
"step": 3880 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 5.941998221247191e-07, |
|
"loss": 1.8405, |
|
"step": 3885 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 4.993087054963597e-07, |
|
"loss": 1.8748, |
|
"step": 3890 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 4.1266374495024795e-07, |
|
"loss": 1.7451, |
|
"step": 3895 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 3.342663708905125e-07, |
|
"loss": 1.8469, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 2.6411787756353e-07, |
|
"loss": 1.9005, |
|
"step": 3905 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 2.0221942303666474e-07, |
|
"loss": 1.8063, |
|
"step": 3910 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 1.4857202917900603e-07, |
|
"loss": 1.8288, |
|
"step": 3915 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 1.0317658164454846e-07, |
|
"loss": 1.8788, |
|
"step": 3920 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 6.603382985759243e-08, |
|
"loss": 1.937, |
|
"step": 3925 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 3.714438700025413e-08, |
|
"loss": 1.7552, |
|
"step": 3930 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 1.650873000258457e-08, |
|
"loss": 1.803, |
|
"step": 3935 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 4.1271995344094316e-09, |
|
"loss": 1.8769, |
|
"step": 3940 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.0, |
|
"loss": 1.806, |
|
"step": 3945 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 2.0400655269622803, |
|
"eval_runtime": 95.5556, |
|
"eval_samples_per_second": 134.037, |
|
"eval_steps_per_second": 16.755, |
|
"step": 3945 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 3945, |
|
"total_flos": 1.3195381386195763e+17, |
|
"train_loss": 2.171095475799986, |
|
"train_runtime": 13808.1422, |
|
"train_samples_per_second": 36.573, |
|
"train_steps_per_second": 0.286 |
|
} |
|
], |
|
"max_steps": 3945, |
|
"num_train_epochs": 5, |
|
"total_flos": 1.3195381386195763e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|