|
{ |
|
"best_metric": 1.1288557052612305, |
|
"best_model_checkpoint": "data/tinyllama_moe_sft_ultrachat_v2_ep3/checkpoint-3400", |
|
"epoch": 2.9986859395532193, |
|
"eval_steps": 100, |
|
"global_step": 3423, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.6666666666666668e-07, |
|
"loss": 9.5404, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.333333333333333e-07, |
|
"loss": 9.5866, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 9.5058, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.5e-06, |
|
"loss": 9.2314, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 8.5252, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 7.705, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5e-06, |
|
"loss": 6.9818, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.833333333333334e-06, |
|
"loss": 6.2066, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 5.626, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 4.934, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 4.1589, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.166666666666666e-06, |
|
"loss": 3.2853, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1e-05, |
|
"loss": 2.665, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.0833333333333334e-05, |
|
"loss": 2.2781, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.1666666666666668e-05, |
|
"loss": 2.0534, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.856, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.737, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.416666666666667e-05, |
|
"loss": 1.6503, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 1.5797, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.5833333333333333e-05, |
|
"loss": 1.496, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.4892, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_loss": 1.4464516639709473, |
|
"eval_runtime": 422.999, |
|
"eval_samples_per_second": 38.218, |
|
"eval_steps_per_second": 1.196, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.7500000000000002e-05, |
|
"loss": 1.4327, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.8333333333333333e-05, |
|
"loss": 1.4035, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.916666666666667e-05, |
|
"loss": 1.3875, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 2e-05, |
|
"loss": 1.3497, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9999886918439637e-05, |
|
"loss": 1.3439, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9999547676316034e-05, |
|
"loss": 1.3132, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.99989822813016e-05, |
|
"loss": 1.3238, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.999819074618348e-05, |
|
"loss": 1.331, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.9997173088863285e-05, |
|
"loss": 1.3065, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.9995929332356666e-05, |
|
"loss": 1.3042, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.999445950479281e-05, |
|
"loss": 1.2943, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9992763639413796e-05, |
|
"loss": 1.2866, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9990841774573843e-05, |
|
"loss": 1.2793, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9988693953738446e-05, |
|
"loss": 1.2825, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9986320225483396e-05, |
|
"loss": 1.2906, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9983720643493665e-05, |
|
"loss": 1.2636, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9980895266562217e-05, |
|
"loss": 1.2626, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.9977844158588655e-05, |
|
"loss": 1.269, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.997456738857779e-05, |
|
"loss": 1.2572, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.9971065030638076e-05, |
|
"loss": 1.2729, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_loss": 1.2643306255340576, |
|
"eval_runtime": 424.4121, |
|
"eval_samples_per_second": 38.09, |
|
"eval_steps_per_second": 1.192, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.996733716397993e-05, |
|
"loss": 1.2652, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.996338387291395e-05, |
|
"loss": 1.238, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.9959205246849e-05, |
|
"loss": 1.2698, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.9954801380290194e-05, |
|
"loss": 1.2448, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.995017237283675e-05, |
|
"loss": 1.2286, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.994531832917974e-05, |
|
"loss": 1.2492, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.994023935909974e-05, |
|
"loss": 1.257, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9934935577464312e-05, |
|
"loss": 1.2271, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9929407104225444e-05, |
|
"loss": 1.256, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.9923654064416813e-05, |
|
"loss": 1.2302, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.991767658815096e-05, |
|
"loss": 1.2399, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.9911474810616348e-05, |
|
"loss": 1.2421, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.9905048872074322e-05, |
|
"loss": 1.2231, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.989839891785591e-05, |
|
"loss": 1.2241, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.9891525098358553e-05, |
|
"loss": 1.2377, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.9884427569042693e-05, |
|
"loss": 1.213, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.9877106490428275e-05, |
|
"loss": 1.2462, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.9869562028091092e-05, |
|
"loss": 1.2121, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.986179435265906e-05, |
|
"loss": 1.2321, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.9853803639808357e-05, |
|
"loss": 1.2286, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_loss": 1.2280113697052002, |
|
"eval_runtime": 423.2939, |
|
"eval_samples_per_second": 38.191, |
|
"eval_steps_per_second": 1.195, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.984559007025943e-05, |
|
"loss": 1.2302, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.983715382977293e-05, |
|
"loss": 1.1996, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.9828495109145516e-05, |
|
"loss": 1.2167, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.9819614104205504e-05, |
|
"loss": 1.2126, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.9810511015808477e-05, |
|
"loss": 1.2107, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.980118604983273e-05, |
|
"loss": 1.2049, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.979163941717459e-05, |
|
"loss": 1.2586, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.9781871333743695e-05, |
|
"loss": 1.209, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.9771882020458055e-05, |
|
"loss": 1.2133, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.9761671703239108e-05, |
|
"loss": 1.2079, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.9751240613006568e-05, |
|
"loss": 1.2088, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.9740588985673226e-05, |
|
"loss": 1.2134, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.9729717062139616e-05, |
|
"loss": 1.2089, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.9718625088288544e-05, |
|
"loss": 1.2009, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.970731331497956e-05, |
|
"loss": 1.1949, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.969578199804326e-05, |
|
"loss": 1.2096, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.96840313982755e-05, |
|
"loss": 1.1993, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.967206178143152e-05, |
|
"loss": 1.1927, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.96598734182199e-05, |
|
"loss": 1.1936, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.9647466584296474e-05, |
|
"loss": 1.2007, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"eval_loss": 1.2074954509735107, |
|
"eval_runtime": 425.7671, |
|
"eval_samples_per_second": 37.969, |
|
"eval_steps_per_second": 1.188, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.9634841560258063e-05, |
|
"loss": 1.2202, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.9621998631636156e-05, |
|
"loss": 1.2151, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.960893808889043e-05, |
|
"loss": 1.1997, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.9595660227402204e-05, |
|
"loss": 1.2009, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.958216534746773e-05, |
|
"loss": 1.1919, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.9568453754291424e-05, |
|
"loss": 1.2008, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.9554525757978958e-05, |
|
"loss": 1.186, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.9540381673530247e-05, |
|
"loss": 1.2016, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.952602182083231e-05, |
|
"loss": 1.1897, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.9511446524652062e-05, |
|
"loss": 1.2155, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.949665611462895e-05, |
|
"loss": 1.185, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.9481650925267506e-05, |
|
"loss": 1.1842, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.946643129592977e-05, |
|
"loss": 1.1974, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.945099757082763e-05, |
|
"loss": 1.1975, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.9435350099015028e-05, |
|
"loss": 1.1918, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.9419489234380077e-05, |
|
"loss": 1.1997, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.940341533563703e-05, |
|
"loss": 1.1652, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.9387128766318205e-05, |
|
"loss": 1.199, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.9370629894765737e-05, |
|
"loss": 1.1819, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.935391909412325e-05, |
|
"loss": 1.1688, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 1.19331955909729, |
|
"eval_runtime": 424.0715, |
|
"eval_samples_per_second": 38.121, |
|
"eval_steps_per_second": 1.193, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.9336996742327424e-05, |
|
"loss": 1.1832, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.931986322209946e-05, |
|
"loss": 1.1862, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.930251892093638e-05, |
|
"loss": 1.1904, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.928496423110233e-05, |
|
"loss": 1.1653, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.9267199549619643e-05, |
|
"loss": 1.1834, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.92492252782599e-05, |
|
"loss": 1.1738, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.9231041823534835e-05, |
|
"loss": 1.1704, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.9212649596687136e-05, |
|
"loss": 1.1844, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.9194049013681134e-05, |
|
"loss": 1.1741, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.9175240495193433e-05, |
|
"loss": 1.1854, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.915622446660335e-05, |
|
"loss": 1.1866, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.9137001357983323e-05, |
|
"loss": 1.1928, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.9117571604089172e-05, |
|
"loss": 1.1908, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.9097935644350284e-05, |
|
"loss": 1.1806, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.9078093922859642e-05, |
|
"loss": 1.1821, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.9058046888363814e-05, |
|
"loss": 1.1668, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.9037794994252792e-05, |
|
"loss": 1.1862, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.901733869854973e-05, |
|
"loss": 1.1748, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.8996678463900596e-05, |
|
"loss": 1.1981, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.8975814757563707e-05, |
|
"loss": 1.1872, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_loss": 1.1829816102981567, |
|
"eval_runtime": 424.3474, |
|
"eval_samples_per_second": 38.096, |
|
"eval_steps_per_second": 1.192, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.8954748051399153e-05, |
|
"loss": 1.1582, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.893347882185814e-05, |
|
"loss": 1.1702, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.891200754997219e-05, |
|
"loss": 1.1912, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.88903347213423e-05, |
|
"loss": 1.1668, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.886846082612792e-05, |
|
"loss": 1.1707, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.8846386359035892e-05, |
|
"loss": 1.1706, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.8824111819309256e-05, |
|
"loss": 1.1919, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.8801637710715945e-05, |
|
"loss": 1.1796, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.8778964541537422e-05, |
|
"loss": 1.1786, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.8756092824557148e-05, |
|
"loss": 1.1675, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.873302307704902e-05, |
|
"loss": 1.1742, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.870975582076564e-05, |
|
"loss": 1.1794, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.8686291581926546e-05, |
|
"loss": 1.1674, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.8662630891206276e-05, |
|
"loss": 1.1581, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.86387742837224e-05, |
|
"loss": 1.1793, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.86147222990234e-05, |
|
"loss": 1.1886, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.8590475481076468e-05, |
|
"loss": 1.1685, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.8566034378255198e-05, |
|
"loss": 1.1694, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.8541399543327206e-05, |
|
"loss": 1.1626, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.8516571533441606e-05, |
|
"loss": 1.1732, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_loss": 1.1745516061782837, |
|
"eval_runtime": 424.2922, |
|
"eval_samples_per_second": 38.101, |
|
"eval_steps_per_second": 1.193, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.8491550910116415e-05, |
|
"loss": 1.1725, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.8466338239225862e-05, |
|
"loss": 1.1439, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8440934090987576e-05, |
|
"loss": 1.1523, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8415339039949702e-05, |
|
"loss": 1.1821, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.8389553664977905e-05, |
|
"loss": 1.1503, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.8363578549242266e-05, |
|
"loss": 1.191, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.8337414280204116e-05, |
|
"loss": 1.1758, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.8311061449602725e-05, |
|
"loss": 1.1663, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.8284520653441936e-05, |
|
"loss": 1.1779, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.8257792491976676e-05, |
|
"loss": 1.1638, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.8230877569699387e-05, |
|
"loss": 1.1777, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.8203776495326346e-05, |
|
"loss": 1.1646, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.8176489881783915e-05, |
|
"loss": 1.1499, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.8149018346194655e-05, |
|
"loss": 1.1506, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.8121362509863397e-05, |
|
"loss": 1.157, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.8093522998263154e-05, |
|
"loss": 1.1486, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.8065500441021018e-05, |
|
"loss": 1.1697, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.803729547190389e-05, |
|
"loss": 1.1435, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.800890872880414e-05, |
|
"loss": 1.1712, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.7980340853725223e-05, |
|
"loss": 1.1596, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"eval_loss": 1.1678704023361206, |
|
"eval_runtime": 427.5598, |
|
"eval_samples_per_second": 37.81, |
|
"eval_steps_per_second": 1.183, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.795159249276711e-05, |
|
"loss": 1.1437, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.792266429611171e-05, |
|
"loss": 1.1483, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.7893556918008136e-05, |
|
"loss": 1.1458, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.7864271016757942e-05, |
|
"loss": 1.1401, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.7834807254700212e-05, |
|
"loss": 1.161, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.7805166298196577e-05, |
|
"loss": 1.1569, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.7775348817616164e-05, |
|
"loss": 1.1807, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.7745355487320418e-05, |
|
"loss": 1.1602, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.7715186985647857e-05, |
|
"loss": 1.1728, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.768484399489873e-05, |
|
"loss": 1.1515, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.7654327201319584e-05, |
|
"loss": 1.1641, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.762363729508775e-05, |
|
"loss": 1.1588, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.7592774970295714e-05, |
|
"loss": 1.174, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.7561740924935456e-05, |
|
"loss": 1.1642, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.753053586088263e-05, |
|
"loss": 1.1461, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.7499160483880694e-05, |
|
"loss": 1.1597, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.7467615503524973e-05, |
|
"loss": 1.1449, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.7435901633246585e-05, |
|
"loss": 1.1453, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.740401959029632e-05, |
|
"loss": 1.1502, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.7371970095728408e-05, |
|
"loss": 1.1546, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"eval_loss": 1.1622368097305298, |
|
"eval_runtime": 421.0004, |
|
"eval_samples_per_second": 38.399, |
|
"eval_steps_per_second": 1.202, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.7339753874384218e-05, |
|
"loss": 1.161, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.730737165487587e-05, |
|
"loss": 1.1538, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.7274824169569747e-05, |
|
"loss": 1.1697, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.7242112154569928e-05, |
|
"loss": 1.1452, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.7209236349701553e-05, |
|
"loss": 1.1545, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.717619749849409e-05, |
|
"loss": 1.1459, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.7142996348164508e-05, |
|
"loss": 1.1694, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.710963364960038e-05, |
|
"loss": 1.1608, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.707611015734291e-05, |
|
"loss": 1.1595, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.704242662956986e-05, |
|
"loss": 1.1706, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.700858382807841e-05, |
|
"loss": 1.1671, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.6974582518267913e-05, |
|
"loss": 1.1478, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.694042346912261e-05, |
|
"loss": 1.1422, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.6906107453194207e-05, |
|
"loss": 1.1538, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.687163524658444e-05, |
|
"loss": 1.166, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.6837007628927483e-05, |
|
"loss": 1.1612, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.680222538337235e-05, |
|
"loss": 1.145, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.6767289296565155e-05, |
|
"loss": 1.1491, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.6732200158631343e-05, |
|
"loss": 1.1638, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.6696958763157808e-05, |
|
"loss": 1.1366, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_loss": 1.1571835279464722, |
|
"eval_runtime": 422.1339, |
|
"eval_samples_per_second": 38.296, |
|
"eval_steps_per_second": 1.199, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.666156590717495e-05, |
|
"loss": 1.1385, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.6626022391138643e-05, |
|
"loss": 1.1455, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 1.6590329018912134e-05, |
|
"loss": 1.1397, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 1.655448659774787e-05, |
|
"loss": 1.1364, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.6518495938269242e-05, |
|
"loss": 1.1565, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.6482357854452223e-05, |
|
"loss": 1.1352, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.6446073163607e-05, |
|
"loss": 1.1456, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.6409642686359472e-05, |
|
"loss": 1.1564, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.637306724663267e-05, |
|
"loss": 1.1534, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.6336347671628162e-05, |
|
"loss": 1.1447, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.629948479180731e-05, |
|
"loss": 1.1406, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.6262479440872505e-05, |
|
"loss": 1.1845, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.622533245574832e-05, |
|
"loss": 1.153, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.618804467656256e-05, |
|
"loss": 1.1444, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.6150616946627272e-05, |
|
"loss": 1.1382, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.6113050112419683e-05, |
|
"loss": 1.1463, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.6075345023563035e-05, |
|
"loss": 1.1344, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.6037502532807382e-05, |
|
"loss": 1.1311, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.599952349601031e-05, |
|
"loss": 1.1476, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.5961408772117567e-05, |
|
"loss": 1.1606, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"eval_loss": 1.1527125835418701, |
|
"eval_runtime": 424.489, |
|
"eval_samples_per_second": 38.083, |
|
"eval_steps_per_second": 1.192, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.592315922314364e-05, |
|
"loss": 1.1548, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.588477571415226e-05, |
|
"loss": 1.1291, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 1.5846259113236855e-05, |
|
"loss": 1.1244, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 1.580761029150089e-05, |
|
"loss": 1.1407, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.5768830123038172e-05, |
|
"loss": 1.1329, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.57299194849131e-05, |
|
"loss": 1.1531, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.5690879257140804e-05, |
|
"loss": 1.1388, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.5651710322667262e-05, |
|
"loss": 1.1518, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.5612413567349314e-05, |
|
"loss": 1.1301, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 1.557298987993465e-05, |
|
"loss": 1.0788, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 1.553344015204168e-05, |
|
"loss": 1.1098, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.5493765278139397e-05, |
|
"loss": 1.1177, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.5453966155527133e-05, |
|
"loss": 1.1023, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.541404368431426e-05, |
|
"loss": 1.1175, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 1.537399876739985e-05, |
|
"loss": 1.1121, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 1.5333832310452232e-05, |
|
"loss": 1.1135, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 1.5293545221888542e-05, |
|
"loss": 1.1125, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 1.525313841285414e-05, |
|
"loss": 1.1106, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 1.5212612797202033e-05, |
|
"loss": 1.1221, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 1.517196929147219e-05, |
|
"loss": 1.0967, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"eval_loss": 1.1505464315414429, |
|
"eval_runtime": 422.2168, |
|
"eval_samples_per_second": 38.288, |
|
"eval_steps_per_second": 1.198, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.5131208814870822e-05, |
|
"loss": 1.1237, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.5090332289249586e-05, |
|
"loss": 1.09, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.5049340639084742e-05, |
|
"loss": 1.1109, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 1.5008234791456242e-05, |
|
"loss": 1.1049, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 1.4967015676026768e-05, |
|
"loss": 1.1148, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 1.4925684225020694e-05, |
|
"loss": 1.0966, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 1.4884241373203014e-05, |
|
"loss": 1.0958, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 1.4842688057858203e-05, |
|
"loss": 1.1076, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 1.4801025218769001e-05, |
|
"loss": 1.1061, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 1.4759253798195183e-05, |
|
"loss": 1.1071, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 1.4717374740852236e-05, |
|
"loss": 1.1233, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 1.467538899388998e-05, |
|
"loss": 1.1128, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 1.463329750687118e-05, |
|
"loss": 1.0963, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 1.459110123175004e-05, |
|
"loss": 1.087, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 1.4548801122850682e-05, |
|
"loss": 1.1035, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 1.450639813684558e-05, |
|
"loss": 1.1179, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 1.4463893232733886e-05, |
|
"loss": 1.109, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 1.4421287371819781e-05, |
|
"loss": 1.1117, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 1.4378581517690711e-05, |
|
"loss": 1.0962, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 1.4335776636195605e-05, |
|
"loss": 1.099, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"eval_loss": 1.147972822189331, |
|
"eval_runtime": 424.4957, |
|
"eval_samples_per_second": 38.083, |
|
"eval_steps_per_second": 1.192, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 1.4292873695423012e-05, |
|
"loss": 1.1054, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 1.4249873665679241e-05, |
|
"loss": 1.0936, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 1.4206777519466375e-05, |
|
"loss": 1.11, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 1.4163586231460307e-05, |
|
"loss": 1.0954, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 1.4120300778488687e-05, |
|
"loss": 1.1073, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 1.4076922139508828e-05, |
|
"loss": 1.0925, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 1.4033451295585565e-05, |
|
"loss": 1.1018, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 1.3989889229869071e-05, |
|
"loss": 1.1036, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 1.394623692757262e-05, |
|
"loss": 1.1293, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 1.3902495375950303e-05, |
|
"loss": 1.0867, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 1.3858665564274699e-05, |
|
"loss": 1.1107, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 1.3814748483814511e-05, |
|
"loss": 1.0888, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 1.3770745127812134e-05, |
|
"loss": 1.1151, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 1.3726656491461196e-05, |
|
"loss": 1.1036, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 1.3682483571884064e-05, |
|
"loss": 1.1072, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 1.3638227368109268e-05, |
|
"loss": 1.1061, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 1.3593888881048922e-05, |
|
"loss": 1.0988, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 1.3549469113476087e-05, |
|
"loss": 1.1013, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 1.3504969070002091e-05, |
|
"loss": 1.1016, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 1.3460389757053802e-05, |
|
"loss": 1.1099, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"eval_loss": 1.1452659368515015, |
|
"eval_runtime": 422.7346, |
|
"eval_samples_per_second": 38.241, |
|
"eval_steps_per_second": 1.197, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 1.3415732182850873e-05, |
|
"loss": 1.0989, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 1.3370997357382943e-05, |
|
"loss": 1.1355, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 1.3326186292386778e-05, |
|
"loss": 1.1075, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 1.3281300001323416e-05, |
|
"loss": 1.1091, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 1.3236339499355217e-05, |
|
"loss": 1.1098, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 1.3191305803322929e-05, |
|
"loss": 1.0924, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 1.3146199931722674e-05, |
|
"loss": 1.1132, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 1.3101022904682918e-05, |
|
"loss": 1.0933, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 1.3055775743941409e-05, |
|
"loss": 1.1083, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 1.3010459472822046e-05, |
|
"loss": 1.0861, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 1.2965075116211769e-05, |
|
"loss": 1.1021, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.2919623700537342e-05, |
|
"loss": 1.1097, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.287410625374217e-05, |
|
"loss": 1.1021, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 1.282852380526303e-05, |
|
"loss": 1.0934, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 1.2782877386006807e-05, |
|
"loss": 1.1073, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 1.2737168028327163e-05, |
|
"loss": 1.0894, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 1.2691396766001192e-05, |
|
"loss": 1.0826, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 1.2645564634206054e-05, |
|
"loss": 1.0951, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 1.2599672669495537e-05, |
|
"loss": 1.1073, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 1.2553721909776644e-05, |
|
"loss": 1.1015, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 1.1432281732559204, |
|
"eval_runtime": 426.611, |
|
"eval_samples_per_second": 37.894, |
|
"eval_steps_per_second": 1.186, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 1.2507713394286088e-05, |
|
"loss": 1.0925, |
|
"step": 1505 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 1.246164816356682e-05, |
|
"loss": 1.111, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.2415527259444471e-05, |
|
"loss": 1.1127, |
|
"step": 1515 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.2369351725003802e-05, |
|
"loss": 1.1235, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 1.232312260456511e-05, |
|
"loss": 1.0993, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 1.2276840943660613e-05, |
|
"loss": 1.1048, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 1.2230507789010792e-05, |
|
"loss": 1.1052, |
|
"step": 1535 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 1.2184124188500735e-05, |
|
"loss": 1.0963, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 1.2137691191156425e-05, |
|
"loss": 1.0877, |
|
"step": 1545 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 1.209120984712102e-05, |
|
"loss": 1.0987, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 1.2044681207631104e-05, |
|
"loss": 1.0996, |
|
"step": 1555 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 1.1998106324992906e-05, |
|
"loss": 1.1174, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 1.1951486252558508e-05, |
|
"loss": 1.1, |
|
"step": 1565 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 1.1904822044702017e-05, |
|
"loss": 1.1037, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 1.1858114756795718e-05, |
|
"loss": 1.1123, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 1.1811365445186213e-05, |
|
"loss": 1.1118, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 1.1764575167170525e-05, |
|
"loss": 1.0929, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 1.1717744980972178e-05, |
|
"loss": 1.1119, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.1670875945717282e-05, |
|
"loss": 1.1026, |
|
"step": 1595 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.1623969121410563e-05, |
|
"loss": 1.104, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"eval_loss": 1.1408323049545288, |
|
"eval_runtime": 428.7587, |
|
"eval_samples_per_second": 37.704, |
|
"eval_steps_per_second": 1.18, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 1.1577025568911395e-05, |
|
"loss": 1.109, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 1.1530046349909816e-05, |
|
"loss": 1.0963, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 1.1483032526902502e-05, |
|
"loss": 1.1037, |
|
"step": 1615 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 1.1435985163168745e-05, |
|
"loss": 1.09, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 1.1388905322746406e-05, |
|
"loss": 1.1086, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 1.1341794070407847e-05, |
|
"loss": 1.1021, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 1.1294652471635857e-05, |
|
"loss": 1.0956, |
|
"step": 1635 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 1.124748159259954e-05, |
|
"loss": 1.0962, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 1.1200282500130222e-05, |
|
"loss": 1.0974, |
|
"step": 1645 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 1.1153056261697303e-05, |
|
"loss": 1.1021, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 1.1105803945384134e-05, |
|
"loss": 1.0901, |
|
"step": 1655 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 1.1058526619863846e-05, |
|
"loss": 1.0945, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 1.1011225354375184e-05, |
|
"loss": 1.115, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 1.0963901218698331e-05, |
|
"loss": 1.0992, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 1.0916555283130714e-05, |
|
"loss": 1.0961, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 1.0869188618462778e-05, |
|
"loss": 1.1053, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 1.0821802295953795e-05, |
|
"loss": 1.0811, |
|
"step": 1685 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 1.0774397387307628e-05, |
|
"loss": 1.0967, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 1.0726974964648478e-05, |
|
"loss": 1.0845, |
|
"step": 1695 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 1.0679536100496661e-05, |
|
"loss": 1.0998, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"eval_loss": 1.138985514640808, |
|
"eval_runtime": 426.7875, |
|
"eval_samples_per_second": 37.878, |
|
"eval_steps_per_second": 1.186, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 1.063208186774433e-05, |
|
"loss": 1.095, |
|
"step": 1705 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.0584613339631222e-05, |
|
"loss": 1.103, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.0537131589720387e-05, |
|
"loss": 1.0867, |
|
"step": 1715 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 1.0489637691873889e-05, |
|
"loss": 1.1142, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 1.0442132720228551e-05, |
|
"loss": 1.109, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 1.0394617749171636e-05, |
|
"loss": 1.1037, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 1.0347093853316555e-05, |
|
"loss": 1.0866, |
|
"step": 1735 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 1.0299562107478569e-05, |
|
"loss": 1.0862, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 1.0252023586650476e-05, |
|
"loss": 1.1113, |
|
"step": 1745 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 1.0204479365978298e-05, |
|
"loss": 1.0826, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 1.0156930520736965e-05, |
|
"loss": 1.1014, |
|
"step": 1755 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 1.0109378126306002e-05, |
|
"loss": 1.1119, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.00618232581452e-05, |
|
"loss": 1.1, |
|
"step": 1765 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.0014266991770299e-05, |
|
"loss": 1.0855, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 9.966710402728658e-06, |
|
"loss": 1.1033, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.919154566574942e-06, |
|
"loss": 1.1084, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.871600558846772e-06, |
|
"loss": 1.1048, |
|
"step": 1785 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 9.82404945504044e-06, |
|
"loss": 1.0908, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 9.776502330586535e-06, |
|
"loss": 1.0956, |
|
"step": 1795 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.728960260825675e-06, |
|
"loss": 1.0829, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"eval_loss": 1.1368587017059326, |
|
"eval_runtime": 427.258, |
|
"eval_samples_per_second": 37.837, |
|
"eval_steps_per_second": 1.184, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.681424320984136e-06, |
|
"loss": 1.0987, |
|
"step": 1805 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 9.633895586149575e-06, |
|
"loss": 1.0796, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 9.586375131246688e-06, |
|
"loss": 1.0841, |
|
"step": 1815 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 9.538864031012913e-06, |
|
"loss": 1.1152, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 9.491363359974121e-06, |
|
"loss": 1.0881, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 9.443874192420312e-06, |
|
"loss": 1.081, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 9.396397602381318e-06, |
|
"loss": 1.1145, |
|
"step": 1835 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 9.34893466360252e-06, |
|
"loss": 1.0914, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 9.301486449520543e-06, |
|
"loss": 1.1071, |
|
"step": 1845 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 9.254054033239017e-06, |
|
"loss": 1.0771, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 9.206638487504265e-06, |
|
"loss": 1.0867, |
|
"step": 1855 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 9.15924088468106e-06, |
|
"loss": 1.0867, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 9.11186229672839e-06, |
|
"loss": 1.0955, |
|
"step": 1865 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 9.064503795175175e-06, |
|
"loss": 1.0969, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 9.017166451096077e-06, |
|
"loss": 1.084, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 8.969851335087233e-06, |
|
"loss": 1.0977, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 8.922559517242078e-06, |
|
"loss": 1.0774, |
|
"step": 1885 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.87529206712712e-06, |
|
"loss": 1.0949, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.828050053757764e-06, |
|
"loss": 1.0808, |
|
"step": 1895 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.780834545574122e-06, |
|
"loss": 1.1052, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"eval_loss": 1.1353213787078857, |
|
"eval_runtime": 426.8906, |
|
"eval_samples_per_second": 37.869, |
|
"eval_steps_per_second": 1.185, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 8.73364661041687e-06, |
|
"loss": 1.1061, |
|
"step": 1905 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 8.686487315503066e-06, |
|
"loss": 1.0696, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 8.63935772740205e-06, |
|
"loss": 1.0771, |
|
"step": 1915 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 8.59225891201129e-06, |
|
"loss": 1.0824, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 8.545191934532294e-06, |
|
"loss": 1.0977, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 8.498157859446512e-06, |
|
"loss": 1.0753, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 8.451157750491265e-06, |
|
"loss": 1.1013, |
|
"step": 1935 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 8.404192670635683e-06, |
|
"loss": 1.1199, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 8.35726368205667e-06, |
|
"loss": 1.0938, |
|
"step": 1945 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 8.310371846114875e-06, |
|
"loss": 1.0801, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 8.263518223330698e-06, |
|
"loss": 1.0972, |
|
"step": 1955 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 8.216703873360292e-06, |
|
"loss": 1.0819, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 8.169929854971598e-06, |
|
"loss": 1.1023, |
|
"step": 1965 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 8.123197226020426e-06, |
|
"loss": 1.0719, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 8.076507043426482e-06, |
|
"loss": 1.1115, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 8.02986036314952e-06, |
|
"loss": 1.0833, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 7.983258240165406e-06, |
|
"loss": 1.1003, |
|
"step": 1985 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 7.936701728442308e-06, |
|
"loss": 1.1022, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 7.890191880916813e-06, |
|
"loss": 1.0957, |
|
"step": 1995 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 7.84372974947016e-06, |
|
"loss": 1.1082, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_loss": 1.133617877960205, |
|
"eval_runtime": 425.3584, |
|
"eval_samples_per_second": 38.006, |
|
"eval_steps_per_second": 1.19, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 7.797316384904402e-06, |
|
"loss": 1.0909, |
|
"step": 2005 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 7.750952836918679e-06, |
|
"loss": 1.0927, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 7.704640154085466e-06, |
|
"loss": 1.0977, |
|
"step": 2015 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 7.658379383826841e-06, |
|
"loss": 1.1009, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 7.612171572390834e-06, |
|
"loss": 1.0988, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 7.566017764827717e-06, |
|
"loss": 1.0999, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 7.519919004966414e-06, |
|
"loss": 1.0966, |
|
"step": 2035 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 7.473876335390857e-06, |
|
"loss": 1.0763, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 7.427890797416435e-06, |
|
"loss": 1.092, |
|
"step": 2045 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 7.3819634310664224e-06, |
|
"loss": 1.078, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 7.336095275048474e-06, |
|
"loss": 1.0982, |
|
"step": 2055 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 7.29028736673111e-06, |
|
"loss": 1.0983, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 7.244540742120294e-06, |
|
"loss": 1.0694, |
|
"step": 2065 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 7.1988564358359566e-06, |
|
"loss": 1.0895, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 7.153235481088624e-06, |
|
"loss": 1.1083, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 7.107678909656052e-06, |
|
"loss": 1.1023, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 7.062187751859868e-06, |
|
"loss": 1.1044, |
|
"step": 2085 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 7.016763036542305e-06, |
|
"loss": 1.0947, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.971405791042889e-06, |
|
"loss": 1.0862, |
|
"step": 2095 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.92611704117525e-06, |
|
"loss": 1.0948, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"eval_loss": 1.1320316791534424, |
|
"eval_runtime": 425.6577, |
|
"eval_samples_per_second": 37.979, |
|
"eval_steps_per_second": 1.189, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.880897811203877e-06, |
|
"loss": 1.101, |
|
"step": 2105 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 6.835749123820997e-06, |
|
"loss": 1.0866, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 6.790672000123405e-06, |
|
"loss": 1.1166, |
|
"step": 2115 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 6.7456674595894065e-06, |
|
"loss": 1.0838, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 6.700736520055725e-06, |
|
"loss": 1.0798, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.6558801976945206e-06, |
|
"loss": 1.0926, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.611099506990372e-06, |
|
"loss": 1.0993, |
|
"step": 2135 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.566395460717356e-06, |
|
"loss": 1.0906, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.521769069916136e-06, |
|
"loss": 1.0807, |
|
"step": 2145 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.477221343871088e-06, |
|
"loss": 1.0955, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 6.4327532900874945e-06, |
|
"loss": 1.1005, |
|
"step": 2155 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 6.38836591426873e-06, |
|
"loss": 1.0752, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 6.344060220293542e-06, |
|
"loss": 1.0944, |
|
"step": 2165 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 6.299837210193331e-06, |
|
"loss": 1.0875, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 6.255697884129495e-06, |
|
"loss": 1.1115, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 6.2116432403708015e-06, |
|
"loss": 1.0987, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 6.167674275270832e-06, |
|
"loss": 1.0972, |
|
"step": 2185 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 6.123791983245411e-06, |
|
"loss": 1.0875, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 6.0799973567501616e-06, |
|
"loss": 1.0981, |
|
"step": 2195 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 6.036291386258013e-06, |
|
"loss": 1.0682, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"eval_loss": 1.1308259963989258, |
|
"eval_runtime": 424.8533, |
|
"eval_samples_per_second": 38.051, |
|
"eval_steps_per_second": 1.191, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 5.992675060236841e-06, |
|
"loss": 1.1107, |
|
"step": 2205 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 5.94914936512708e-06, |
|
"loss": 1.0855, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 5.905715285319442e-06, |
|
"loss": 1.0945, |
|
"step": 2215 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 5.862373803132625e-06, |
|
"loss": 1.1054, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 5.819125898791115e-06, |
|
"loss": 1.088, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 5.775972550403015e-06, |
|
"loss": 1.0931, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 5.732914733937917e-06, |
|
"loss": 1.0895, |
|
"step": 2235 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 5.6899534232048395e-06, |
|
"loss": 1.1147, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 5.647089589830186e-06, |
|
"loss": 1.098, |
|
"step": 2245 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 5.604324203235798e-06, |
|
"loss": 1.0913, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 5.561658230616997e-06, |
|
"loss": 1.1042, |
|
"step": 2255 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 5.519092636920741e-06, |
|
"loss": 1.087, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 5.476628384823773e-06, |
|
"loss": 1.1111, |
|
"step": 2265 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 5.434266434710879e-06, |
|
"loss": 1.0931, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 5.392007744653134e-06, |
|
"loss": 1.0838, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 5.3498532703862685e-06, |
|
"loss": 1.0867, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 5.307803965289023e-06, |
|
"loss": 1.0827, |
|
"step": 2285 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 5.265860780361602e-06, |
|
"loss": 1.0477, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 5.2240246642041705e-06, |
|
"loss": 1.0477, |
|
"step": 2295 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 5.182296562995383e-06, |
|
"loss": 1.0688, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"eval_loss": 1.1318334341049194, |
|
"eval_runtime": 424.2756, |
|
"eval_samples_per_second": 38.103, |
|
"eval_steps_per_second": 1.193, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 5.140677420471003e-06, |
|
"loss": 1.0613, |
|
"step": 2305 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 5.099168177902539e-06, |
|
"loss": 1.0643, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 5.057769774075985e-06, |
|
"loss": 1.0693, |
|
"step": 2315 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 5.0164831452705494e-06, |
|
"loss": 1.0705, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.9753092252375245e-06, |
|
"loss": 1.0634, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.934248945179127e-06, |
|
"loss": 1.0667, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.893303233727472e-06, |
|
"loss": 1.055, |
|
"step": 2335 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.8524730169235404e-06, |
|
"loss": 1.0741, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.811759218196262e-06, |
|
"loss": 1.0634, |
|
"step": 2345 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.771162758341612e-06, |
|
"loss": 1.0618, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.730684555501799e-06, |
|
"loss": 1.0691, |
|
"step": 2355 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.690325525144488e-06, |
|
"loss": 1.0636, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.6500865800421015e-06, |
|
"loss": 1.0647, |
|
"step": 2365 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.609968630251187e-06, |
|
"loss": 1.061, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.569972583091807e-06, |
|
"loss": 1.0512, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.5300993431270565e-06, |
|
"loss": 1.0709, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.490349812142564e-06, |
|
"loss": 1.077, |
|
"step": 2385 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.450724889126135e-06, |
|
"loss": 1.0653, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.411225470247387e-06, |
|
"loss": 1.0504, |
|
"step": 2395 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.371852448837511e-06, |
|
"loss": 1.0754, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"eval_loss": 1.1316897869110107, |
|
"eval_runtime": 426.2864, |
|
"eval_samples_per_second": 37.923, |
|
"eval_steps_per_second": 1.187, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 4.332606715369041e-06, |
|
"loss": 1.0568, |
|
"step": 2405 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 4.2934891574357375e-06, |
|
"loss": 1.049, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 4.254500659732496e-06, |
|
"loss": 1.0706, |
|
"step": 2415 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 4.2156421040353435e-06, |
|
"loss": 1.0755, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 4.1769143691815095e-06, |
|
"loss": 1.0672, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 4.138318331049525e-06, |
|
"loss": 1.0697, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 4.09985486253944e-06, |
|
"loss": 1.0501, |
|
"step": 2435 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 4.061524833553058e-06, |
|
"loss": 1.0667, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 4.0233291109742726e-06, |
|
"loss": 1.0629, |
|
"step": 2445 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 3.985268558649472e-06, |
|
"loss": 1.0451, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 3.947344037367983e-06, |
|
"loss": 1.0625, |
|
"step": 2455 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 3.909556404842609e-06, |
|
"loss": 1.0517, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 3.871906515690249e-06, |
|
"loss": 1.0807, |
|
"step": 2465 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 3.834395221412537e-06, |
|
"loss": 1.0579, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 3.797023370376618e-06, |
|
"loss": 1.0735, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 3.7597918077959306e-06, |
|
"loss": 1.0517, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 3.7227013757111197e-06, |
|
"loss": 1.0747, |
|
"step": 2485 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 3.6857529129709655e-06, |
|
"loss": 1.0447, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 3.64894725521344e-06, |
|
"loss": 1.0592, |
|
"step": 2495 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 3.61228523484678e-06, |
|
"loss": 1.0646, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"eval_loss": 1.1311019659042358, |
|
"eval_runtime": 426.4079, |
|
"eval_samples_per_second": 37.912, |
|
"eval_steps_per_second": 1.187, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 3.5757676810306775e-06, |
|
"loss": 1.0573, |
|
"step": 2505 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 3.539395419657531e-06, |
|
"loss": 1.0672, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 3.5031692733337475e-06, |
|
"loss": 1.0573, |
|
"step": 2515 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 3.4670900613611656e-06, |
|
"loss": 1.0503, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 3.431158599718496e-06, |
|
"loss": 1.0727, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 3.3953757010428946e-06, |
|
"loss": 1.0788, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 3.359742174611558e-06, |
|
"loss": 1.0614, |
|
"step": 2535 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 3.3242588263234467e-06, |
|
"loss": 1.0555, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 3.2889264586810323e-06, |
|
"loss": 1.0472, |
|
"step": 2545 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 3.2537458707721735e-06, |
|
"loss": 1.062, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 3.2187178582520206e-06, |
|
"loss": 1.0742, |
|
"step": 2555 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 3.183843213325042e-06, |
|
"loss": 1.0398, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 3.149122724727083e-06, |
|
"loss": 1.0733, |
|
"step": 2565 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 3.1145571777075577e-06, |
|
"loss": 1.0564, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 3.080147354011659e-06, |
|
"loss": 1.0453, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 3.0458940318626963e-06, |
|
"loss": 1.0627, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 3.011797985944499e-06, |
|
"loss": 1.0731, |
|
"step": 2585 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 2.977859987383874e-06, |
|
"loss": 1.061, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 2.944080803733197e-06, |
|
"loss": 1.0406, |
|
"step": 2595 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 2.9104611989530196e-06, |
|
"loss": 1.058, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"eval_loss": 1.1305283308029175, |
|
"eval_runtime": 426.5628, |
|
"eval_samples_per_second": 37.898, |
|
"eval_steps_per_second": 1.186, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 2.8770019333948197e-06, |
|
"loss": 1.0686, |
|
"step": 2605 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 2.843703763783785e-06, |
|
"loss": 1.0622, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 2.810567443201717e-06, |
|
"loss": 1.0579, |
|
"step": 2615 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 2.7775937210699754e-06, |
|
"loss": 1.0625, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 2.7447833431325566e-06, |
|
"loss": 1.0313, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 2.712137051439202e-06, |
|
"loss": 1.0708, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 2.6796555843286375e-06, |
|
"loss": 1.0625, |
|
"step": 2635 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 2.6473396764118575e-06, |
|
"loss": 1.0642, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 2.6151900585555178e-06, |
|
"loss": 1.0606, |
|
"step": 2645 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 2.583207457865413e-06, |
|
"loss": 1.0547, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 2.5513925976700217e-06, |
|
"loss": 1.0587, |
|
"step": 2655 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 2.519746197504144e-06, |
|
"loss": 1.071, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 2.488268973092649e-06, |
|
"loss": 1.0637, |
|
"step": 2665 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 2.456961636334265e-06, |
|
"loss": 1.0849, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 2.425824895285488e-06, |
|
"loss": 1.0552, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 2.3948594541445735e-06, |
|
"loss": 1.0659, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 2.3640660132356e-06, |
|
"loss": 1.0469, |
|
"step": 2685 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 2.333445268992639e-06, |
|
"loss": 1.0558, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 2.302997913943994e-06, |
|
"loss": 1.0693, |
|
"step": 2695 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.272724636696555e-06, |
|
"loss": 1.0553, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"eval_loss": 1.1301469802856445, |
|
"eval_runtime": 426.3468, |
|
"eval_samples_per_second": 37.917, |
|
"eval_steps_per_second": 1.187, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.2426261219202006e-06, |
|
"loss": 1.0552, |
|
"step": 2705 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.21270305033234e-06, |
|
"loss": 1.0682, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 2.1829560986824937e-06, |
|
"loss": 1.0714, |
|
"step": 2715 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 2.1533859397370084e-06, |
|
"loss": 1.0615, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 2.1239932422638234e-06, |
|
"loss": 1.0724, |
|
"step": 2725 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 2.0947786710173545e-06, |
|
"loss": 1.0641, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.06574288672347e-06, |
|
"loss": 1.0627, |
|
"step": 2735 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.0368865460645202e-06, |
|
"loss": 1.046, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.008210301664518e-06, |
|
"loss": 1.0686, |
|
"step": 2745 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.9797148020743496e-06, |
|
"loss": 1.0586, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.951400691757133e-06, |
|
"loss": 1.0579, |
|
"step": 2755 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.9232686110736165e-06, |
|
"loss": 1.0625, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.895319196267722e-06, |
|
"loss": 1.0722, |
|
"step": 2765 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 1.8675530794521312e-06, |
|
"loss": 1.0521, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 1.8399708885940136e-06, |
|
"loss": 1.0627, |
|
"step": 2775 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.8125732475007983e-06, |
|
"loss": 1.0666, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.785360775806093e-06, |
|
"loss": 1.0626, |
|
"step": 2785 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.7583340889556456e-06, |
|
"loss": 1.047, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.7314937981934399e-06, |
|
"loss": 1.0551, |
|
"step": 2795 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.7048405105478717e-06, |
|
"loss": 1.0607, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"eval_loss": 1.1297959089279175, |
|
"eval_runtime": 426.8933, |
|
"eval_samples_per_second": 37.869, |
|
"eval_steps_per_second": 1.185, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.6783748288180058e-06, |
|
"loss": 1.0603, |
|
"step": 2805 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.652097351559967e-06, |
|
"loss": 1.0649, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 1.6260086730733749e-06, |
|
"loss": 1.0693, |
|
"step": 2815 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 1.6001093833879288e-06, |
|
"loss": 1.0584, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 1.5744000682500426e-06, |
|
"loss": 1.0495, |
|
"step": 2825 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.5488813091096145e-06, |
|
"loss": 1.0708, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.523553683106861e-06, |
|
"loss": 1.0802, |
|
"step": 2835 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.49841776305928e-06, |
|
"loss": 1.072, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.4734741174486788e-06, |
|
"loss": 1.0534, |
|
"step": 2845 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.4487233104083354e-06, |
|
"loss": 1.0652, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.424165901710224e-06, |
|
"loss": 1.0715, |
|
"step": 2855 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 1.3998024467523596e-06, |
|
"loss": 1.0464, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 1.3756334965462502e-06, |
|
"loss": 1.0829, |
|
"step": 2865 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 1.3516595977044112e-06, |
|
"loss": 1.06, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 1.3278812924280192e-06, |
|
"loss": 1.0429, |
|
"step": 2875 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 1.304299118494652e-06, |
|
"loss": 1.0592, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.2809136092461084e-06, |
|
"loss": 1.0758, |
|
"step": 2885 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.2577252935763695e-06, |
|
"loss": 1.0818, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 1.234734695919616e-06, |
|
"loss": 1.0652, |
|
"step": 2895 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 1.2119423362383776e-06, |
|
"loss": 1.0669, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"eval_loss": 1.1293786764144897, |
|
"eval_runtime": 427.2638, |
|
"eval_samples_per_second": 37.836, |
|
"eval_steps_per_second": 1.184, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 1.189348730011778e-06, |
|
"loss": 1.0469, |
|
"step": 2905 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 1.166954388223862e-06, |
|
"loss": 1.0572, |
|
"step": 2910 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 1.1447598173520558e-06, |
|
"loss": 1.0636, |
|
"step": 2915 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.1227655193556973e-06, |
|
"loss": 1.0599, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.1009719916646977e-06, |
|
"loss": 1.0613, |
|
"step": 2925 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 1.079379727168276e-06, |
|
"loss": 1.056, |
|
"step": 2930 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 1.0579892142038284e-06, |
|
"loss": 1.0726, |
|
"step": 2935 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 1.0368009365458697e-06, |
|
"loss": 1.059, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 1.0158153733950981e-06, |
|
"loss": 1.0583, |
|
"step": 2945 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 9.950329993675623e-07, |
|
"loss": 1.0466, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 9.744542844839145e-07, |
|
"loss": 1.0614, |
|
"step": 2955 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 9.540796941587983e-07, |
|
"loss": 1.0493, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 9.33909689190301e-07, |
|
"loss": 1.0612, |
|
"step": 2965 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 9.139447257495537e-07, |
|
"loss": 1.077, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 8.941852553703966e-07, |
|
"loss": 1.0723, |
|
"step": 2975 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 8.746317249391834e-07, |
|
"loss": 1.0589, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 8.55284576684654e-07, |
|
"loss": 1.0724, |
|
"step": 2985 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 8.361442481679561e-07, |
|
"loss": 1.0607, |
|
"step": 2990 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 8.172111722727294e-07, |
|
"loss": 1.0663, |
|
"step": 2995 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 7.984857771953303e-07, |
|
"loss": 1.0476, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"eval_loss": 1.1292005777359009, |
|
"eval_runtime": 426.0151, |
|
"eval_samples_per_second": 37.947, |
|
"eval_steps_per_second": 1.188, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 7.799684864351342e-07, |
|
"loss": 1.0608, |
|
"step": 3005 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 7.616597187849683e-07, |
|
"loss": 1.0502, |
|
"step": 3010 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 7.435598883216377e-07, |
|
"loss": 1.0575, |
|
"step": 3015 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 7.256694043965528e-07, |
|
"loss": 1.0571, |
|
"step": 3020 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 7.07988671626485e-07, |
|
"loss": 1.0744, |
|
"step": 3025 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 6.905180898844022e-07, |
|
"loss": 1.0507, |
|
"step": 3030 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 6.732580542904343e-07, |
|
"loss": 1.0436, |
|
"step": 3035 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 6.562089552029305e-07, |
|
"loss": 1.0638, |
|
"step": 3040 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 6.39371178209639e-07, |
|
"loss": 1.06, |
|
"step": 3045 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 6.227451041189759e-07, |
|
"loss": 1.0574, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 6.063311089514256e-07, |
|
"loss": 1.0706, |
|
"step": 3055 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 5.901295639310212e-07, |
|
"loss": 1.0676, |
|
"step": 3060 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 5.74140835476964e-07, |
|
"loss": 1.0787, |
|
"step": 3065 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 5.583652851953225e-07, |
|
"loss": 1.0548, |
|
"step": 3070 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 5.428032698708696e-07, |
|
"loss": 1.0513, |
|
"step": 3075 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 5.274551414589979e-07, |
|
"loss": 1.0633, |
|
"step": 3080 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 5.123212470777684e-07, |
|
"loss": 1.0769, |
|
"step": 3085 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.97401929000062e-07, |
|
"loss": 1.0609, |
|
"step": 3090 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.826975246458299e-07, |
|
"loss": 1.0626, |
|
"step": 3095 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.6820836657446964e-07, |
|
"loss": 1.0688, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"eval_loss": 1.1291499137878418, |
|
"eval_runtime": 428.0351, |
|
"eval_samples_per_second": 37.768, |
|
"eval_steps_per_second": 1.182, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.5393478247730436e-07, |
|
"loss": 1.0725, |
|
"step": 3105 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.398770951701647e-07, |
|
"loss": 1.0589, |
|
"step": 3110 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.2603562258609176e-07, |
|
"loss": 1.0578, |
|
"step": 3115 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.124106777681536e-07, |
|
"loss": 1.0502, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 3.9900256886235e-07, |
|
"loss": 1.0527, |
|
"step": 3125 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 3.8581159911065926e-07, |
|
"loss": 1.0679, |
|
"step": 3130 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 3.7283806684416777e-07, |
|
"loss": 1.0714, |
|
"step": 3135 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 3.600822654763314e-07, |
|
"loss": 1.0473, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 3.4754448349633374e-07, |
|
"loss": 1.0492, |
|
"step": 3145 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 3.35225004462566e-07, |
|
"loss": 1.0679, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 3.2312410699620986e-07, |
|
"loss": 1.0691, |
|
"step": 3155 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 3.11242064774937e-07, |
|
"loss": 1.0479, |
|
"step": 3160 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 2.99579146526725e-07, |
|
"loss": 1.0476, |
|
"step": 3165 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 2.8813561602377025e-07, |
|
"loss": 1.0622, |
|
"step": 3170 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 2.7691173207653355e-07, |
|
"loss": 1.067, |
|
"step": 3175 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 2.659077485278716e-07, |
|
"loss": 1.0667, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 2.551239142473161e-07, |
|
"loss": 1.0557, |
|
"step": 3185 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 2.4456047312542365e-07, |
|
"loss": 1.0689, |
|
"step": 3190 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 2.3421766406827807e-07, |
|
"loss": 1.0639, |
|
"step": 3195 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 2.2409572099207576e-07, |
|
"loss": 1.0583, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"eval_loss": 1.128919243812561, |
|
"eval_runtime": 428.0437, |
|
"eval_samples_per_second": 37.767, |
|
"eval_steps_per_second": 1.182, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 2.1419487281784002e-07, |
|
"loss": 1.0453, |
|
"step": 3205 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 2.045153434662428e-07, |
|
"loss": 1.0579, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 1.9505735185254226e-07, |
|
"loss": 1.0513, |
|
"step": 3215 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 1.8582111188162555e-07, |
|
"loss": 1.0592, |
|
"step": 3220 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.7680683244318154e-07, |
|
"loss": 1.0558, |
|
"step": 3225 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.6801471740696462e-07, |
|
"loss": 1.0664, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.594449656181918e-07, |
|
"loss": 1.0567, |
|
"step": 3235 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 1.510977708930461e-07, |
|
"loss": 1.0719, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 1.4297332201428703e-07, |
|
"loss": 1.0455, |
|
"step": 3245 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 1.3507180272698594e-07, |
|
"loss": 1.0817, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 1.2739339173436838e-07, |
|
"loss": 1.06, |
|
"step": 3255 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 1.1993826269377506e-07, |
|
"loss": 1.0707, |
|
"step": 3260 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 1.1270658421273062e-07, |
|
"loss": 1.0514, |
|
"step": 3265 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 1.0569851984513102e-07, |
|
"loss": 1.0555, |
|
"step": 3270 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 9.89142280875477e-08, |
|
"loss": 1.063, |
|
"step": 3275 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 9.235386237564148e-08, |
|
"loss": 1.066, |
|
"step": 3280 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 8.601757108068876e-08, |
|
"loss": 1.0488, |
|
"step": 3285 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 7.990549750623189e-08, |
|
"loss": 1.0568, |
|
"step": 3290 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 7.401777988483406e-08, |
|
"loss": 1.0687, |
|
"step": 3295 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 6.835455137495395e-08, |
|
"loss": 1.0545, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"eval_loss": 1.1288610696792603, |
|
"eval_runtime": 428.9651, |
|
"eval_samples_per_second": 37.686, |
|
"eval_steps_per_second": 1.18, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 6.2915940057936e-08, |
|
"loss": 1.0434, |
|
"step": 3305 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 5.7702068935109324e-08, |
|
"loss": 1.0357, |
|
"step": 3310 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 5.271305592501108e-08, |
|
"loss": 1.0492, |
|
"step": 3315 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 4.794901386071749e-08, |
|
"loss": 1.0609, |
|
"step": 3320 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 4.341005048728919e-08, |
|
"loss": 1.0617, |
|
"step": 3325 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 3.9096268459339893e-08, |
|
"loss": 1.0605, |
|
"step": 3330 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 3.50077653387082e-08, |
|
"loss": 1.0646, |
|
"step": 3335 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 3.114463359225717e-08, |
|
"loss": 1.0615, |
|
"step": 3340 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 2.7506960589781527e-08, |
|
"loss": 1.0613, |
|
"step": 3345 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 2.4094828602027052e-08, |
|
"loss": 1.0479, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 2.0908314798836483e-08, |
|
"loss": 1.0627, |
|
"step": 3355 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 1.7947491247399808e-08, |
|
"loss": 1.0723, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 1.5212424910627797e-08, |
|
"loss": 1.0669, |
|
"step": 3365 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 1.2703177645634335e-08, |
|
"loss": 1.0596, |
|
"step": 3370 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 1.0419806202336403e-08, |
|
"loss": 1.0596, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 8.362362222177345e-09, |
|
"loss": 1.0628, |
|
"step": 3380 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 6.530892236951136e-09, |
|
"loss": 1.0481, |
|
"step": 3385 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 4.925437667755439e-09, |
|
"loss": 1.061, |
|
"step": 3390 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 3.5460348240501376e-09, |
|
"loss": 1.0437, |
|
"step": 3395 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 2.39271490284243e-09, |
|
"loss": 1.0744, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"eval_loss": 1.1288557052612305, |
|
"eval_runtime": 426.2375, |
|
"eval_samples_per_second": 37.927, |
|
"eval_steps_per_second": 1.187, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 1.4655039879740706e-09, |
|
"loss": 1.065, |
|
"step": 3405 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 7.644230495373884e-10, |
|
"loss": 1.0557, |
|
"step": 3410 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 2.8948794339789255e-10, |
|
"loss": 1.0572, |
|
"step": 3415 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.07094108345607e-11, |
|
"loss": 1.0649, |
|
"step": 3420 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 3423, |
|
"total_flos": 3.426180505951378e+19, |
|
"train_loss": 1.2174325642654014, |
|
"train_runtime": 53375.7024, |
|
"train_samples_per_second": 8.209, |
|
"train_steps_per_second": 0.064 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 3423, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 3.426180505951378e+19, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|