|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.2, |
|
"eval_steps": 500, |
|
"global_step": 1000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0004993997599039616, |
|
"loss": 9.442, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0004987995198079231, |
|
"loss": 6.4069, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0004981992797118848, |
|
"loss": 5.2241, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0004975990396158463, |
|
"loss": 4.6629, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0004969987995198079, |
|
"loss": 4.4599, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0004963985594237695, |
|
"loss": 4.4699, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0004957983193277311, |
|
"loss": 4.328, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0004951980792316927, |
|
"loss": 4.2845, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0004945978391356542, |
|
"loss": 4.2205, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0004939975990396158, |
|
"loss": 4.1616, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0004933973589435775, |
|
"loss": 4.1001, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000492797118847539, |
|
"loss": 4.051, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0004921968787515006, |
|
"loss": 3.9317, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0004915966386554621, |
|
"loss": 3.9395, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0004909963985594238, |
|
"loss": 3.892, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0004903961584633854, |
|
"loss": 3.7325, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0004897959183673469, |
|
"loss": 3.735, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0004891956782713085, |
|
"loss": 3.6928, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0004885954381752701, |
|
"loss": 3.7048, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0004879951980792317, |
|
"loss": 3.5304, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00048739495798319325, |
|
"loss": 3.565, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00048679471788715486, |
|
"loss": 3.6801, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00048619447779111647, |
|
"loss": 3.5079, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00048559423769507803, |
|
"loss": 3.5053, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00048499399759903964, |
|
"loss": 3.35, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0004843937575030012, |
|
"loss": 3.4045, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0004837935174069628, |
|
"loss": 3.3187, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00048319327731092437, |
|
"loss": 3.3906, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.000482593037214886, |
|
"loss": 3.3228, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00048199279711884753, |
|
"loss": 3.297, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00048139255702280915, |
|
"loss": 3.3078, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0004807923169267707, |
|
"loss": 3.312, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0004801920768307323, |
|
"loss": 3.3131, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00047959183673469387, |
|
"loss": 3.2012, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0004789915966386555, |
|
"loss": 3.2605, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00047839135654261704, |
|
"loss": 3.1821, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00047779111644657865, |
|
"loss": 3.1836, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0004771908763505402, |
|
"loss": 3.1439, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0004765906362545018, |
|
"loss": 3.1493, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0004759903961584634, |
|
"loss": 3.0728, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000475390156062425, |
|
"loss": 3.1574, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00047478991596638654, |
|
"loss": 3.1272, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00047418967587034816, |
|
"loss": 3.0083, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00047358943577430977, |
|
"loss": 3.0536, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0004729891956782713, |
|
"loss": 3.0536, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0004723889555822329, |
|
"loss": 3.0091, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0004717887154861945, |
|
"loss": 3.0709, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0004711884753901561, |
|
"loss": 2.9084, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00047058823529411766, |
|
"loss": 2.9807, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0004699879951980792, |
|
"loss": 2.9563, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00046938775510204083, |
|
"loss": 2.9508, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00046878751500600244, |
|
"loss": 2.9896, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000468187274909964, |
|
"loss": 2.8946, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00046758703481392555, |
|
"loss": 2.9081, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00046698679471788716, |
|
"loss": 2.8824, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0004663865546218488, |
|
"loss": 2.9004, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00046578631452581033, |
|
"loss": 2.8155, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0004651860744297719, |
|
"loss": 2.8921, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0004645858343337335, |
|
"loss": 2.8942, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0004639855942376951, |
|
"loss": 2.8331, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00046338535414165667, |
|
"loss": 2.9032, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0004627851140456182, |
|
"loss": 2.8443, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00046218487394957984, |
|
"loss": 2.9231, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00046158463385354145, |
|
"loss": 2.8204, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.000460984393757503, |
|
"loss": 2.8024, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00046038415366146456, |
|
"loss": 2.8511, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00045978391356542623, |
|
"loss": 2.7767, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0004591836734693878, |
|
"loss": 2.7563, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00045858343337334934, |
|
"loss": 2.8491, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0004579831932773109, |
|
"loss": 2.8621, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00045738295318127257, |
|
"loss": 2.7985, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0004567827130852341, |
|
"loss": 2.8162, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0004561824729891957, |
|
"loss": 2.7087, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00045558223289315724, |
|
"loss": 2.7947, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0004549819927971189, |
|
"loss": 2.7413, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00045438175270108046, |
|
"loss": 2.7319, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.000453781512605042, |
|
"loss": 2.8137, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00045318127250900357, |
|
"loss": 2.7292, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00045258103241296524, |
|
"loss": 2.8123, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0004519807923169268, |
|
"loss": 2.7721, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00045138055222088835, |
|
"loss": 2.7073, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0004507803121248499, |
|
"loss": 2.6707, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0004501800720288116, |
|
"loss": 2.6422, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00044957983193277313, |
|
"loss": 2.7066, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0004489795918367347, |
|
"loss": 2.6398, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0004483793517406963, |
|
"loss": 2.7323, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0004477791116446579, |
|
"loss": 2.695, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00044717887154861947, |
|
"loss": 2.6634, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.000446578631452581, |
|
"loss": 2.6284, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00044597839135654264, |
|
"loss": 2.7046, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00044537815126050425, |
|
"loss": 2.7038, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0004447779111644658, |
|
"loss": 2.6489, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00044417767106842736, |
|
"loss": 2.6162, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.000443577430972389, |
|
"loss": 2.6625, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00044297719087635053, |
|
"loss": 2.6046, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00044237695078031214, |
|
"loss": 2.6556, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0004417767106842737, |
|
"loss": 2.5762, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0004411764705882353, |
|
"loss": 2.6441, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00044057623049219687, |
|
"loss": 2.6731, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0004399759903961585, |
|
"loss": 2.5839, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00043937575030012003, |
|
"loss": 2.5429, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00043877551020408165, |
|
"loss": 2.5735, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0004381752701080432, |
|
"loss": 2.561, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0004375750300120048, |
|
"loss": 2.5553, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00043697478991596637, |
|
"loss": 2.5196, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.000436374549819928, |
|
"loss": 2.5371, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00043577430972388954, |
|
"loss": 2.4678, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00043517406962785115, |
|
"loss": 2.5223, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00043457382953181276, |
|
"loss": 2.5111, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0004339735894357743, |
|
"loss": 2.5562, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0004333733493397359, |
|
"loss": 2.5992, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0004327731092436975, |
|
"loss": 2.5308, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0004321728691476591, |
|
"loss": 2.5141, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00043157262905162066, |
|
"loss": 2.5755, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0004309723889555822, |
|
"loss": 2.5723, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0004303721488595438, |
|
"loss": 2.4098, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00042977190876350544, |
|
"loss": 2.5296, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.000429171668667467, |
|
"loss": 2.547, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00042857142857142855, |
|
"loss": 2.4059, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00042797118847539016, |
|
"loss": 2.4101, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00042737094837935177, |
|
"loss": 2.5301, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00042677070828331333, |
|
"loss": 2.4837, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0004261704681872749, |
|
"loss": 2.5095, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0004255702280912365, |
|
"loss": 2.4423, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0004249699879951981, |
|
"loss": 2.4299, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00042436974789915967, |
|
"loss": 2.487, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0004237695078031212, |
|
"loss": 2.4534, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0004231692677070829, |
|
"loss": 2.4706, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00042256902761104444, |
|
"loss": 2.4558, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.000421968787515006, |
|
"loss": 2.4226, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00042136854741896756, |
|
"loss": 2.4443, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0004207683073229292, |
|
"loss": 2.4487, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0004201680672268908, |
|
"loss": 2.4283, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00041956782713085234, |
|
"loss": 2.4918, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0004189675870348139, |
|
"loss": 2.4895, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00041836734693877556, |
|
"loss": 2.4296, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0004177671068427371, |
|
"loss": 2.4259, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0004171668667466987, |
|
"loss": 2.4956, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00041656662665066023, |
|
"loss": 2.4708, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0004159663865546219, |
|
"loss": 2.4078, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00041536614645858345, |
|
"loss": 2.4024, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.000414765906362545, |
|
"loss": 2.3847, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00041416566626650657, |
|
"loss": 2.361, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00041356542617046823, |
|
"loss": 2.4494, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0004129651860744298, |
|
"loss": 2.4495, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00041236494597839135, |
|
"loss": 2.431, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0004117647058823529, |
|
"loss": 2.4143, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00041116446578631457, |
|
"loss": 2.3522, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00041056422569027613, |
|
"loss": 2.4095, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0004099639855942377, |
|
"loss": 2.3399, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0004093637454981993, |
|
"loss": 2.3234, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0004087635054021609, |
|
"loss": 2.3904, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00040816326530612246, |
|
"loss": 2.3691, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.000407563025210084, |
|
"loss": 2.3312, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00040696278511404563, |
|
"loss": 2.3323, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00040636254501800724, |
|
"loss": 2.3701, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0004057623049219688, |
|
"loss": 2.433, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00040516206482593036, |
|
"loss": 2.3172, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00040456182472989197, |
|
"loss": 2.3529, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0004039615846338536, |
|
"loss": 2.4238, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00040336134453781514, |
|
"loss": 2.3834, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0004027611044417767, |
|
"loss": 2.2364, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0004021608643457383, |
|
"loss": 2.3618, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0004015606242496999, |
|
"loss": 2.3188, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0004009603841536615, |
|
"loss": 2.3659, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00040036014405762303, |
|
"loss": 2.3043, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00039975990396158464, |
|
"loss": 2.299, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00039915966386554625, |
|
"loss": 2.1782, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0003985594237695078, |
|
"loss": 2.1026, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00039795918367346937, |
|
"loss": 2.1629, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.000397358943577431, |
|
"loss": 2.1208, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0003967587034813926, |
|
"loss": 2.0706, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00039615846338535415, |
|
"loss": 2.0824, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00039555822328931576, |
|
"loss": 2.1032, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0003949579831932773, |
|
"loss": 2.1157, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0003943577430972389, |
|
"loss": 2.072, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0003937575030012005, |
|
"loss": 2.1275, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0003931572629051621, |
|
"loss": 2.0514, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00039255702280912365, |
|
"loss": 2.0643, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00039195678271308526, |
|
"loss": 2.1485, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0003913565426170468, |
|
"loss": 2.0773, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00039075630252100843, |
|
"loss": 2.0944, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00039015606242497, |
|
"loss": 2.1342, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0003895558223289316, |
|
"loss": 2.0535, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00038895558223289316, |
|
"loss": 2.0487, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00038835534213685477, |
|
"loss": 2.0567, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0003877551020408163, |
|
"loss": 2.0674, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0003871548619447779, |
|
"loss": 2.103, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0003865546218487395, |
|
"loss": 2.0839, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0003859543817527011, |
|
"loss": 2.1078, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00038535414165666266, |
|
"loss": 2.1217, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0003847539015606242, |
|
"loss": 2.1457, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0003841536614645859, |
|
"loss": 2.0889, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00038355342136854744, |
|
"loss": 2.0706, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.000382953181272509, |
|
"loss": 2.1443, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00038235294117647055, |
|
"loss": 2.1191, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0003817527010804322, |
|
"loss": 2.0608, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0003811524609843938, |
|
"loss": 2.0786, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00038055222088835533, |
|
"loss": 2.1455, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0003799519807923169, |
|
"loss": 1.9875, |
|
"step": 1000 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 4165, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 1.974401690333184e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|