|
{ |
|
"best_metric": 0.7278521656990051, |
|
"best_model_checkpoint": "checkpoints/bart/checkpoint-620000", |
|
"epoch": 20.18886356235754, |
|
"global_step": 620000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 10.4661, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 10.2325, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.5e-06, |
|
"loss": 9.8216, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 9.4694, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 2.5e-06, |
|
"loss": 9.1591, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3e-06, |
|
"loss": 8.9458, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 3.5e-06, |
|
"loss": 8.8337, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 8.7867, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.5e-06, |
|
"loss": 8.7671, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 5e-06, |
|
"loss": 8.7524, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 5.500000000000001e-06, |
|
"loss": 8.7339, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 6e-06, |
|
"loss": 8.6581, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 6.5000000000000004e-06, |
|
"loss": 8.5433, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 7e-06, |
|
"loss": 8.4347, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 8.2994, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 8.1404, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 8.5e-06, |
|
"loss": 7.9371, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 9e-06, |
|
"loss": 7.7451, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 9.5e-06, |
|
"loss": 7.5965, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1e-05, |
|
"loss": 7.482, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_loss": 7.40041971206665, |
|
"eval_runtime": 144.9465, |
|
"eval_samples_per_second": 547.871, |
|
"eval_steps_per_second": 2.146, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 9.994949494949497e-06, |
|
"loss": 7.385, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 9.989898989898991e-06, |
|
"loss": 7.3015, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 9.984848484848485e-06, |
|
"loss": 7.2289, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 9.97979797979798e-06, |
|
"loss": 7.1624, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 9.974747474747476e-06, |
|
"loss": 7.1031, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 9.96969696969697e-06, |
|
"loss": 7.0448, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 9.964646464646466e-06, |
|
"loss": 6.9899, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 9.95959595959596e-06, |
|
"loss": 6.9354, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 9.954545454545456e-06, |
|
"loss": 6.8847, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 9.94949494949495e-06, |
|
"loss": 6.8338, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 9.944444444444445e-06, |
|
"loss": 6.7857, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.939393939393939e-06, |
|
"loss": 6.7382, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.934343434343435e-06, |
|
"loss": 6.6942, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.92929292929293e-06, |
|
"loss": 6.6523, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 9.924242424242425e-06, |
|
"loss": 6.6065, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 9.91919191919192e-06, |
|
"loss": 6.5534, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 9.914141414141416e-06, |
|
"loss": 6.4647, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 9.90909090909091e-06, |
|
"loss": 6.3421, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 9.904040404040404e-06, |
|
"loss": 6.2265, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 9.8989898989899e-06, |
|
"loss": 6.1141, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_loss": 5.996727466583252, |
|
"eval_runtime": 102.6556, |
|
"eval_samples_per_second": 773.577, |
|
"eval_steps_per_second": 3.03, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 9.893939393939395e-06, |
|
"loss": 6.0157, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 9.88888888888889e-06, |
|
"loss": 5.925, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 9.883838383838385e-06, |
|
"loss": 5.841, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 9.87878787878788e-06, |
|
"loss": 5.7651, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 9.873737373737373e-06, |
|
"loss": 5.6874, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 9.86868686868687e-06, |
|
"loss": 5.6163, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 9.863636363636364e-06, |
|
"loss": 5.5472, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 9.85858585858586e-06, |
|
"loss": 5.4828, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 9.853535353535354e-06, |
|
"loss": 5.4214, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 9.84848484848485e-06, |
|
"loss": 5.3606, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 9.843434343434344e-06, |
|
"loss": 5.302, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 9.838383838383839e-06, |
|
"loss": 5.2467, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 9.833333333333333e-06, |
|
"loss": 5.19, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 9.828282828282829e-06, |
|
"loss": 5.1389, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 9.823232323232325e-06, |
|
"loss": 5.0893, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 9.81818181818182e-06, |
|
"loss": 5.036, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 9.813131313131315e-06, |
|
"loss": 4.9913, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 9.80808080808081e-06, |
|
"loss": 4.9422, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 9.803030303030304e-06, |
|
"loss": 4.9003, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 9.797979797979798e-06, |
|
"loss": 4.8521, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_loss": 4.616042137145996, |
|
"eval_runtime": 89.4823, |
|
"eval_samples_per_second": 887.46, |
|
"eval_steps_per_second": 3.476, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 9.792929292929294e-06, |
|
"loss": 4.8057, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 9.787878787878788e-06, |
|
"loss": 4.7651, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 9.782828282828284e-06, |
|
"loss": 4.7198, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 9.777777777777779e-06, |
|
"loss": 4.6846, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 9.772727272727273e-06, |
|
"loss": 4.6463, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 9.767676767676767e-06, |
|
"loss": 4.6111, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 9.762626262626263e-06, |
|
"loss": 4.5757, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 9.757575757575758e-06, |
|
"loss": 4.5376, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 9.752525252525254e-06, |
|
"loss": 4.5036, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 9.747474747474748e-06, |
|
"loss": 4.4697, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 9.742424242424244e-06, |
|
"loss": 4.4346, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 9.737373737373738e-06, |
|
"loss": 4.3974, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 9.732323232323232e-06, |
|
"loss": 4.3679, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 9.727272727272728e-06, |
|
"loss": 4.3366, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 9.722222222222223e-06, |
|
"loss": 4.3081, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 9.717171717171719e-06, |
|
"loss": 4.2805, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 9.712121212121213e-06, |
|
"loss": 4.2505, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 9.707070707070709e-06, |
|
"loss": 4.2206, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 9.702020202020203e-06, |
|
"loss": 4.1958, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 9.696969696969698e-06, |
|
"loss": 4.1595, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_loss": 4.127486705780029, |
|
"eval_runtime": 88.4336, |
|
"eval_samples_per_second": 897.985, |
|
"eval_steps_per_second": 3.517, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 9.691919191919192e-06, |
|
"loss": 4.1409, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 9.686868686868688e-06, |
|
"loss": 4.1087, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 9.681818181818182e-06, |
|
"loss": 4.0735, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 9.676767676767678e-06, |
|
"loss": 4.0509, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 9.671717171717172e-06, |
|
"loss": 4.0182, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 9.666666666666667e-06, |
|
"loss": 3.9825, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 9.661616161616163e-06, |
|
"loss": 3.9548, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 9.656565656565657e-06, |
|
"loss": 3.9287, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 9.651515151515153e-06, |
|
"loss": 3.8958, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 9.646464646464647e-06, |
|
"loss": 3.867, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 9.641414141414143e-06, |
|
"loss": 3.8407, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 9.636363636363638e-06, |
|
"loss": 3.813, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 9.631313131313132e-06, |
|
"loss": 3.7928, |
|
"step": 46500 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 9.626262626262626e-06, |
|
"loss": 3.7673, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 9.621212121212122e-06, |
|
"loss": 3.7444, |
|
"step": 47500 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.616161616161616e-06, |
|
"loss": 3.7203, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.611111111111112e-06, |
|
"loss": 3.7023, |
|
"step": 48500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 9.606060606060607e-06, |
|
"loss": 3.6724, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 9.601010101010103e-06, |
|
"loss": 3.653, |
|
"step": 49500 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 9.595959595959597e-06, |
|
"loss": 3.6342, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"eval_loss": 3.4967825412750244, |
|
"eval_runtime": 91.2654, |
|
"eval_samples_per_second": 870.122, |
|
"eval_steps_per_second": 3.408, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 9.590909090909091e-06, |
|
"loss": 3.6095, |
|
"step": 50500 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 9.585858585858586e-06, |
|
"loss": 3.5816, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 9.580808080808082e-06, |
|
"loss": 3.5653, |
|
"step": 51500 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 9.575757575757576e-06, |
|
"loss": 3.5455, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 9.570707070707072e-06, |
|
"loss": 3.5238, |
|
"step": 52500 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 9.565656565656566e-06, |
|
"loss": 3.4987, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 9.56060606060606e-06, |
|
"loss": 3.4826, |
|
"step": 53500 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 9.555555555555556e-06, |
|
"loss": 3.4585, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 9.55050505050505e-06, |
|
"loss": 3.442, |
|
"step": 54500 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 9.545454545454547e-06, |
|
"loss": 3.4201, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 9.540404040404041e-06, |
|
"loss": 3.4026, |
|
"step": 55500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 9.535353535353537e-06, |
|
"loss": 3.385, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 9.530303030303031e-06, |
|
"loss": 3.3663, |
|
"step": 56500 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 9.525252525252526e-06, |
|
"loss": 3.3497, |
|
"step": 57000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 9.52020202020202e-06, |
|
"loss": 3.3303, |
|
"step": 57500 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 9.515151515151516e-06, |
|
"loss": 3.3123, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 9.51010101010101e-06, |
|
"loss": 3.2963, |
|
"step": 58500 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 9.505050505050506e-06, |
|
"loss": 3.2779, |
|
"step": 59000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 9.5e-06, |
|
"loss": 3.2636, |
|
"step": 59500 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 9.494949494949497e-06, |
|
"loss": 3.2404, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"eval_loss": 2.9165635108947754, |
|
"eval_runtime": 91.1781, |
|
"eval_samples_per_second": 870.955, |
|
"eval_steps_per_second": 3.411, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 9.48989898989899e-06, |
|
"loss": 3.2228, |
|
"step": 60500 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 9.484848484848485e-06, |
|
"loss": 3.2067, |
|
"step": 61000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 9.479797979797981e-06, |
|
"loss": 3.1899, |
|
"step": 61500 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 9.474747474747475e-06, |
|
"loss": 3.1685, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 9.469696969696971e-06, |
|
"loss": 3.1507, |
|
"step": 62500 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 9.464646464646466e-06, |
|
"loss": 3.1322, |
|
"step": 63000 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 9.45959595959596e-06, |
|
"loss": 3.119, |
|
"step": 63500 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 9.454545454545456e-06, |
|
"loss": 3.0955, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 9.44949494949495e-06, |
|
"loss": 3.0801, |
|
"step": 64500 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 9.444444444444445e-06, |
|
"loss": 3.0589, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 9.43939393939394e-06, |
|
"loss": 3.0396, |
|
"step": 65500 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 9.434343434343435e-06, |
|
"loss": 3.0281, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 9.42929292929293e-06, |
|
"loss": 3.003, |
|
"step": 66500 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 9.424242424242425e-06, |
|
"loss": 2.9854, |
|
"step": 67000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 9.41919191919192e-06, |
|
"loss": 2.9694, |
|
"step": 67500 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 9.414141414141414e-06, |
|
"loss": 2.9498, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 9.40909090909091e-06, |
|
"loss": 2.9232, |
|
"step": 68500 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 9.404040404040404e-06, |
|
"loss": 2.9021, |
|
"step": 69000 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 9.3989898989899e-06, |
|
"loss": 2.8785, |
|
"step": 69500 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 9.393939393939396e-06, |
|
"loss": 2.8581, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"eval_loss": 2.5783917903900146, |
|
"eval_runtime": 91.27, |
|
"eval_samples_per_second": 870.077, |
|
"eval_steps_per_second": 3.407, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 9.38888888888889e-06, |
|
"loss": 2.8342, |
|
"step": 70500 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 9.383838383838385e-06, |
|
"loss": 2.8058, |
|
"step": 71000 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 9.378787878787879e-06, |
|
"loss": 2.7748, |
|
"step": 71500 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 9.373737373737375e-06, |
|
"loss": 2.7429, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 9.36868686868687e-06, |
|
"loss": 2.7156, |
|
"step": 72500 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 9.363636363636365e-06, |
|
"loss": 2.6877, |
|
"step": 73000 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 9.35858585858586e-06, |
|
"loss": 2.6559, |
|
"step": 73500 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 9.353535353535354e-06, |
|
"loss": 2.6292, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 9.34848484848485e-06, |
|
"loss": 2.6008, |
|
"step": 74500 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 9.343434343434344e-06, |
|
"loss": 2.5744, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 9.338383838383838e-06, |
|
"loss": 2.5463, |
|
"step": 75500 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 9.333333333333334e-06, |
|
"loss": 2.5264, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 9.328282828282829e-06, |
|
"loss": 2.4965, |
|
"step": 76500 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 9.323232323232325e-06, |
|
"loss": 2.4692, |
|
"step": 77000 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 9.318181818181819e-06, |
|
"loss": 2.4472, |
|
"step": 77500 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 9.313131313131313e-06, |
|
"loss": 2.426, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 9.30808080808081e-06, |
|
"loss": 2.4064, |
|
"step": 78500 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 9.303030303030303e-06, |
|
"loss": 2.3843, |
|
"step": 79000 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 9.2979797979798e-06, |
|
"loss": 2.3624, |
|
"step": 79500 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 9.292929292929294e-06, |
|
"loss": 2.3427, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"eval_loss": 1.944496750831604, |
|
"eval_runtime": 90.6503, |
|
"eval_samples_per_second": 876.026, |
|
"eval_steps_per_second": 3.431, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 9.28787878787879e-06, |
|
"loss": 2.3235, |
|
"step": 80500 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 9.282828282828284e-06, |
|
"loss": 2.3018, |
|
"step": 81000 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 9.277777777777778e-06, |
|
"loss": 2.285, |
|
"step": 81500 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 9.272727272727273e-06, |
|
"loss": 2.2638, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 9.267676767676769e-06, |
|
"loss": 2.2429, |
|
"step": 82500 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 9.262626262626263e-06, |
|
"loss": 2.2254, |
|
"step": 83000 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 9.257575757575759e-06, |
|
"loss": 2.2084, |
|
"step": 83500 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 9.252525252525253e-06, |
|
"loss": 2.1904, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 9.24747474747475e-06, |
|
"loss": 2.1747, |
|
"step": 84500 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 9.242424242424244e-06, |
|
"loss": 2.1589, |
|
"step": 85000 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 9.237373737373738e-06, |
|
"loss": 2.1424, |
|
"step": 85500 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 9.232323232323232e-06, |
|
"loss": 2.1265, |
|
"step": 86000 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 9.227272727272728e-06, |
|
"loss": 2.1108, |
|
"step": 86500 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 9.222222222222224e-06, |
|
"loss": 2.0934, |
|
"step": 87000 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 9.217171717171718e-06, |
|
"loss": 2.0784, |
|
"step": 87500 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 9.212121212121213e-06, |
|
"loss": 2.0634, |
|
"step": 88000 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 9.207070707070707e-06, |
|
"loss": 2.0508, |
|
"step": 88500 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 9.202020202020203e-06, |
|
"loss": 2.0325, |
|
"step": 89000 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 9.196969696969697e-06, |
|
"loss": 2.0171, |
|
"step": 89500 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 9.191919191919193e-06, |
|
"loss": 2.0014, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"eval_loss": 1.6067554950714111, |
|
"eval_runtime": 95.1881, |
|
"eval_samples_per_second": 834.264, |
|
"eval_steps_per_second": 3.267, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 9.186868686868688e-06, |
|
"loss": 1.9869, |
|
"step": 90500 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 9.181818181818184e-06, |
|
"loss": 1.9747, |
|
"step": 91000 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 9.176767676767678e-06, |
|
"loss": 1.9606, |
|
"step": 91500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 9.171717171717172e-06, |
|
"loss": 1.9454, |
|
"step": 92000 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 9.166666666666666e-06, |
|
"loss": 1.9341, |
|
"step": 92500 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 9.161616161616162e-06, |
|
"loss": 1.9222, |
|
"step": 93000 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 9.156565656565657e-06, |
|
"loss": 1.9068, |
|
"step": 93500 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 9.151515151515153e-06, |
|
"loss": 1.8961, |
|
"step": 94000 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 9.146464646464647e-06, |
|
"loss": 1.8837, |
|
"step": 94500 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 9.141414141414143e-06, |
|
"loss": 1.872, |
|
"step": 95000 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 9.136363636363637e-06, |
|
"loss": 1.8583, |
|
"step": 95500 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 9.131313131313132e-06, |
|
"loss": 1.8481, |
|
"step": 96000 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 9.126262626262628e-06, |
|
"loss": 1.8352, |
|
"step": 96500 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 9.121212121212122e-06, |
|
"loss": 1.8233, |
|
"step": 97000 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 9.116161616161618e-06, |
|
"loss": 1.814, |
|
"step": 97500 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 9.111111111111112e-06, |
|
"loss": 1.8015, |
|
"step": 98000 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 9.106060606060606e-06, |
|
"loss": 1.7941, |
|
"step": 98500 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 9.1010101010101e-06, |
|
"loss": 1.7809, |
|
"step": 99000 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 9.095959595959597e-06, |
|
"loss": 1.7673, |
|
"step": 99500 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 9.090909090909091e-06, |
|
"loss": 1.7589, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"eval_loss": 1.4153616428375244, |
|
"eval_runtime": 92.3909, |
|
"eval_samples_per_second": 859.522, |
|
"eval_steps_per_second": 3.366, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 9.085858585858587e-06, |
|
"loss": 1.7465, |
|
"step": 100500 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 9.080808080808081e-06, |
|
"loss": 1.7378, |
|
"step": 101000 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 9.075757575757577e-06, |
|
"loss": 1.7264, |
|
"step": 101500 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 9.070707070707072e-06, |
|
"loss": 1.7176, |
|
"step": 102000 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 9.065656565656566e-06, |
|
"loss": 1.7086, |
|
"step": 102500 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 9.06060606060606e-06, |
|
"loss": 1.6981, |
|
"step": 103000 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 9.055555555555556e-06, |
|
"loss": 1.6867, |
|
"step": 103500 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 9.050505050505052e-06, |
|
"loss": 1.6755, |
|
"step": 104000 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 9.045454545454546e-06, |
|
"loss": 1.6676, |
|
"step": 104500 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 9.040404040404042e-06, |
|
"loss": 1.6582, |
|
"step": 105000 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 9.035353535353537e-06, |
|
"loss": 1.6505, |
|
"step": 105500 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 9.030303030303031e-06, |
|
"loss": 1.6398, |
|
"step": 106000 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 9.025252525252525e-06, |
|
"loss": 1.6315, |
|
"step": 106500 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 9.020202020202021e-06, |
|
"loss": 1.6228, |
|
"step": 107000 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 9.015151515151516e-06, |
|
"loss": 1.6121, |
|
"step": 107500 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 9.010101010101012e-06, |
|
"loss": 1.606, |
|
"step": 108000 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 9.005050505050506e-06, |
|
"loss": 1.5963, |
|
"step": 108500 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 9e-06, |
|
"loss": 1.5873, |
|
"step": 109000 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 8.994949494949495e-06, |
|
"loss": 1.5805, |
|
"step": 109500 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 8.98989898989899e-06, |
|
"loss": 1.5718, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"eval_loss": 1.3169385194778442, |
|
"eval_runtime": 89.9714, |
|
"eval_samples_per_second": 882.636, |
|
"eval_steps_per_second": 3.457, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 8.984848484848485e-06, |
|
"loss": 1.5651, |
|
"step": 110500 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 8.97979797979798e-06, |
|
"loss": 1.5574, |
|
"step": 111000 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 8.974747474747475e-06, |
|
"loss": 1.5482, |
|
"step": 111500 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 8.969696969696971e-06, |
|
"loss": 1.5396, |
|
"step": 112000 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 8.964646464646465e-06, |
|
"loss": 1.5354, |
|
"step": 112500 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 8.95959595959596e-06, |
|
"loss": 1.5262, |
|
"step": 113000 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 8.954545454545456e-06, |
|
"loss": 1.5191, |
|
"step": 113500 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 8.94949494949495e-06, |
|
"loss": 1.51, |
|
"step": 114000 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 8.944444444444446e-06, |
|
"loss": 1.5044, |
|
"step": 114500 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 8.93939393939394e-06, |
|
"loss": 1.4982, |
|
"step": 115000 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 8.934343434343436e-06, |
|
"loss": 1.49, |
|
"step": 115500 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 8.92929292929293e-06, |
|
"loss": 1.484, |
|
"step": 116000 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 8.924242424242425e-06, |
|
"loss": 1.4765, |
|
"step": 116500 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 8.919191919191919e-06, |
|
"loss": 1.4701, |
|
"step": 117000 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 8.914141414141415e-06, |
|
"loss": 1.4645, |
|
"step": 117500 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 8.90909090909091e-06, |
|
"loss": 1.4558, |
|
"step": 118000 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 8.904040404040405e-06, |
|
"loss": 1.4492, |
|
"step": 118500 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 8.8989898989899e-06, |
|
"loss": 1.4444, |
|
"step": 119000 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 8.893939393939394e-06, |
|
"loss": 1.4393, |
|
"step": 119500 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 1.434, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"eval_loss": 1.213987946510315, |
|
"eval_runtime": 91.2226, |
|
"eval_samples_per_second": 870.53, |
|
"eval_steps_per_second": 3.409, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 8.883838383838384e-06, |
|
"loss": 1.4269, |
|
"step": 120500 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 8.87878787878788e-06, |
|
"loss": 1.42, |
|
"step": 121000 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 8.873737373737375e-06, |
|
"loss": 1.4156, |
|
"step": 121500 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 8.86868686868687e-06, |
|
"loss": 1.4107, |
|
"step": 122000 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 8.863636363636365e-06, |
|
"loss": 1.4045, |
|
"step": 122500 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 8.85858585858586e-06, |
|
"loss": 1.3989, |
|
"step": 123000 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 8.853535353535353e-06, |
|
"loss": 1.3944, |
|
"step": 123500 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 8.84848484848485e-06, |
|
"loss": 1.3868, |
|
"step": 124000 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 8.843434343434344e-06, |
|
"loss": 1.383, |
|
"step": 124500 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 8.83838383838384e-06, |
|
"loss": 1.3757, |
|
"step": 125000 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 8.833333333333334e-06, |
|
"loss": 1.3715, |
|
"step": 125500 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 8.82828282828283e-06, |
|
"loss": 1.3676, |
|
"step": 126000 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 8.823232323232324e-06, |
|
"loss": 1.362, |
|
"step": 126500 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 8.818181818181819e-06, |
|
"loss": 1.3577, |
|
"step": 127000 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 8.813131313131313e-06, |
|
"loss": 1.353, |
|
"step": 127500 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 8.808080808080809e-06, |
|
"loss": 1.3467, |
|
"step": 128000 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 8.803030303030303e-06, |
|
"loss": 1.3435, |
|
"step": 128500 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 8.7979797979798e-06, |
|
"loss": 1.3383, |
|
"step": 129000 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 8.792929292929293e-06, |
|
"loss": 1.3324, |
|
"step": 129500 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 8.787878787878788e-06, |
|
"loss": 1.3295, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"eval_loss": 1.165347695350647, |
|
"eval_runtime": 94.4005, |
|
"eval_samples_per_second": 841.225, |
|
"eval_steps_per_second": 3.294, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 8.782828282828284e-06, |
|
"loss": 1.3257, |
|
"step": 130500 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 8.777777777777778e-06, |
|
"loss": 1.3218, |
|
"step": 131000 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 8.772727272727274e-06, |
|
"loss": 1.317, |
|
"step": 131500 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 8.767676767676768e-06, |
|
"loss": 1.3128, |
|
"step": 132000 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 8.762626262626264e-06, |
|
"loss": 1.3093, |
|
"step": 132500 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 8.757575757575759e-06, |
|
"loss": 1.3051, |
|
"step": 133000 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 8.752525252525253e-06, |
|
"loss": 1.3019, |
|
"step": 133500 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 8.747474747474747e-06, |
|
"loss": 1.2969, |
|
"step": 134000 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 8.742424242424243e-06, |
|
"loss": 1.2921, |
|
"step": 134500 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 8.737373737373738e-06, |
|
"loss": 1.2898, |
|
"step": 135000 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 8.732323232323234e-06, |
|
"loss": 1.2858, |
|
"step": 135500 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 8.727272727272728e-06, |
|
"loss": 1.2819, |
|
"step": 136000 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 8.722222222222224e-06, |
|
"loss": 1.2785, |
|
"step": 136500 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 8.717171717171718e-06, |
|
"loss": 1.2751, |
|
"step": 137000 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 8.712121212121212e-06, |
|
"loss": 1.272, |
|
"step": 137500 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 8.707070707070707e-06, |
|
"loss": 1.2677, |
|
"step": 138000 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 8.702020202020203e-06, |
|
"loss": 1.2648, |
|
"step": 138500 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 8.696969696969699e-06, |
|
"loss": 1.2601, |
|
"step": 139000 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 8.691919191919193e-06, |
|
"loss": 1.2591, |
|
"step": 139500 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 8.686868686868687e-06, |
|
"loss": 1.2537, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"eval_loss": 1.0970981121063232, |
|
"eval_runtime": 91.1052, |
|
"eval_samples_per_second": 871.651, |
|
"eval_steps_per_second": 3.414, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 8.681818181818182e-06, |
|
"loss": 1.2513, |
|
"step": 140500 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 8.676767676767678e-06, |
|
"loss": 1.2472, |
|
"step": 141000 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 8.671717171717172e-06, |
|
"loss": 1.2447, |
|
"step": 141500 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 8.666666666666668e-06, |
|
"loss": 1.2418, |
|
"step": 142000 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 8.661616161616162e-06, |
|
"loss": 1.2394, |
|
"step": 142500 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 8.656565656565658e-06, |
|
"loss": 1.236, |
|
"step": 143000 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 8.651515151515152e-06, |
|
"loss": 1.2334, |
|
"step": 143500 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 8.646464646464647e-06, |
|
"loss": 1.2295, |
|
"step": 144000 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 8.641414141414141e-06, |
|
"loss": 1.2269, |
|
"step": 144500 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 8.636363636363637e-06, |
|
"loss": 1.2236, |
|
"step": 145000 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 8.631313131313131e-06, |
|
"loss": 1.22, |
|
"step": 145500 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 8.626262626262627e-06, |
|
"loss": 1.2184, |
|
"step": 146000 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 8.621212121212122e-06, |
|
"loss": 1.2156, |
|
"step": 146500 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 8.616161616161618e-06, |
|
"loss": 1.2129, |
|
"step": 147000 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 8.611111111111112e-06, |
|
"loss": 1.209, |
|
"step": 147500 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 8.606060606060606e-06, |
|
"loss": 1.2071, |
|
"step": 148000 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 8.601010101010102e-06, |
|
"loss": 1.2041, |
|
"step": 148500 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 8.595959595959596e-06, |
|
"loss": 1.2018, |
|
"step": 149000 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 8.590909090909092e-06, |
|
"loss": 1.1989, |
|
"step": 149500 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 8.585858585858587e-06, |
|
"loss": 1.1954, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"eval_loss": 1.0581802129745483, |
|
"eval_runtime": 89.9138, |
|
"eval_samples_per_second": 883.201, |
|
"eval_steps_per_second": 3.459, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 8.580808080808081e-06, |
|
"loss": 1.1939, |
|
"step": 150500 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 8.575757575757575e-06, |
|
"loss": 1.1918, |
|
"step": 151000 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 8.570707070707071e-06, |
|
"loss": 1.1882, |
|
"step": 151500 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 8.565656565656566e-06, |
|
"loss": 1.1868, |
|
"step": 152000 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 8.560606060606062e-06, |
|
"loss": 1.1855, |
|
"step": 152500 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 8.555555555555556e-06, |
|
"loss": 1.1829, |
|
"step": 153000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 8.550505050505052e-06, |
|
"loss": 1.1791, |
|
"step": 153500 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 8.545454545454546e-06, |
|
"loss": 1.1762, |
|
"step": 154000 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 8.54040404040404e-06, |
|
"loss": 1.1743, |
|
"step": 154500 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 8.535353535353535e-06, |
|
"loss": 1.1741, |
|
"step": 155000 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 8.53030303030303e-06, |
|
"loss": 1.1709, |
|
"step": 155500 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 8.525252525252527e-06, |
|
"loss": 1.1675, |
|
"step": 156000 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 8.520202020202021e-06, |
|
"loss": 1.166, |
|
"step": 156500 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 8.515151515151517e-06, |
|
"loss": 1.1651, |
|
"step": 157000 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 8.510101010101011e-06, |
|
"loss": 1.1622, |
|
"step": 157500 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 8.505050505050506e-06, |
|
"loss": 1.1614, |
|
"step": 158000 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"learning_rate": 8.5e-06, |
|
"loss": 1.1572, |
|
"step": 158500 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 8.494949494949496e-06, |
|
"loss": 1.157, |
|
"step": 159000 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 8.48989898989899e-06, |
|
"loss": 1.1549, |
|
"step": 159500 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 8.484848484848486e-06, |
|
"loss": 1.152, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"eval_loss": 1.0367275476455688, |
|
"eval_runtime": 89.0095, |
|
"eval_samples_per_second": 892.175, |
|
"eval_steps_per_second": 3.494, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 5.23, |
|
"learning_rate": 8.47979797979798e-06, |
|
"loss": 1.1502, |
|
"step": 160500 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 8.474747474747475e-06, |
|
"loss": 1.1474, |
|
"step": 161000 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"learning_rate": 8.46969696969697e-06, |
|
"loss": 1.1456, |
|
"step": 161500 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"learning_rate": 8.464646464646465e-06, |
|
"loss": 1.1447, |
|
"step": 162000 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 8.45959595959596e-06, |
|
"loss": 1.1435, |
|
"step": 162500 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 8.454545454545455e-06, |
|
"loss": 1.1408, |
|
"step": 163000 |
|
}, |
|
{ |
|
"epoch": 5.32, |
|
"learning_rate": 8.44949494949495e-06, |
|
"loss": 1.1382, |
|
"step": 163500 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 8.444444444444446e-06, |
|
"loss": 1.1363, |
|
"step": 164000 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 8.43939393939394e-06, |
|
"loss": 1.1343, |
|
"step": 164500 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 8.434343434343434e-06, |
|
"loss": 1.1329, |
|
"step": 165000 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 8.42929292929293e-06, |
|
"loss": 1.1325, |
|
"step": 165500 |
|
}, |
|
{ |
|
"epoch": 5.41, |
|
"learning_rate": 8.424242424242425e-06, |
|
"loss": 1.1296, |
|
"step": 166000 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 8.41919191919192e-06, |
|
"loss": 1.1274, |
|
"step": 166500 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 8.414141414141415e-06, |
|
"loss": 1.1253, |
|
"step": 167000 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 8.40909090909091e-06, |
|
"loss": 1.1251, |
|
"step": 167500 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 8.404040404040405e-06, |
|
"loss": 1.1218, |
|
"step": 168000 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 8.3989898989899e-06, |
|
"loss": 1.1217, |
|
"step": 168500 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 8.393939393939394e-06, |
|
"loss": 1.1199, |
|
"step": 169000 |
|
}, |
|
{ |
|
"epoch": 5.52, |
|
"learning_rate": 8.38888888888889e-06, |
|
"loss": 1.1171, |
|
"step": 169500 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 8.383838383838384e-06, |
|
"loss": 1.1158, |
|
"step": 170000 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"eval_loss": 1.0161751508712769, |
|
"eval_runtime": 96.0727, |
|
"eval_samples_per_second": 826.582, |
|
"eval_steps_per_second": 3.237, |
|
"step": 170000 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 8.37878787878788e-06, |
|
"loss": 1.1143, |
|
"step": 170500 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"learning_rate": 8.373737373737374e-06, |
|
"loss": 1.1134, |
|
"step": 171000 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 8.368686868686869e-06, |
|
"loss": 1.1119, |
|
"step": 171500 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 8.363636363636365e-06, |
|
"loss": 1.1093, |
|
"step": 172000 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 8.358585858585859e-06, |
|
"loss": 1.1083, |
|
"step": 172500 |
|
}, |
|
{ |
|
"epoch": 5.63, |
|
"learning_rate": 8.353535353535355e-06, |
|
"loss": 1.1076, |
|
"step": 173000 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"learning_rate": 8.348484848484849e-06, |
|
"loss": 1.1078, |
|
"step": 173500 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 8.343434343434345e-06, |
|
"loss": 1.1039, |
|
"step": 174000 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"learning_rate": 8.33838383838384e-06, |
|
"loss": 1.1016, |
|
"step": 174500 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 1.1014, |
|
"step": 175000 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 8.328282828282828e-06, |
|
"loss": 1.0998, |
|
"step": 175500 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"learning_rate": 8.323232323232324e-06, |
|
"loss": 1.0973, |
|
"step": 176000 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 8.318181818181818e-06, |
|
"loss": 1.0977, |
|
"step": 176500 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 8.313131313131314e-06, |
|
"loss": 1.0961, |
|
"step": 177000 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 8.308080808080809e-06, |
|
"loss": 1.0932, |
|
"step": 177500 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 8.303030303030305e-06, |
|
"loss": 1.091, |
|
"step": 178000 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 8.297979797979799e-06, |
|
"loss": 1.091, |
|
"step": 178500 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"learning_rate": 8.292929292929293e-06, |
|
"loss": 1.0885, |
|
"step": 179000 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 8.287878787878787e-06, |
|
"loss": 1.088, |
|
"step": 179500 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"learning_rate": 8.282828282828283e-06, |
|
"loss": 1.0873, |
|
"step": 180000 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"eval_loss": 0.9905126094818115, |
|
"eval_runtime": 95.2304, |
|
"eval_samples_per_second": 833.893, |
|
"eval_steps_per_second": 3.266, |
|
"step": 180000 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 8.277777777777778e-06, |
|
"loss": 1.0862, |
|
"step": 180500 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"learning_rate": 8.272727272727274e-06, |
|
"loss": 1.0836, |
|
"step": 181000 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 8.267676767676768e-06, |
|
"loss": 1.0828, |
|
"step": 181500 |
|
}, |
|
{ |
|
"epoch": 5.93, |
|
"learning_rate": 8.262626262626264e-06, |
|
"loss": 1.0814, |
|
"step": 182000 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"learning_rate": 8.257575757575758e-06, |
|
"loss": 1.0798, |
|
"step": 182500 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"learning_rate": 8.252525252525253e-06, |
|
"loss": 1.0786, |
|
"step": 183000 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"learning_rate": 8.247474747474749e-06, |
|
"loss": 1.0782, |
|
"step": 183500 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"learning_rate": 8.242424242424243e-06, |
|
"loss": 1.0769, |
|
"step": 184000 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 8.237373737373739e-06, |
|
"loss": 1.0758, |
|
"step": 184500 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 8.232323232323233e-06, |
|
"loss": 1.0736, |
|
"step": 185000 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 8.227272727272728e-06, |
|
"loss": 1.0725, |
|
"step": 185500 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 8.222222222222222e-06, |
|
"loss": 1.0714, |
|
"step": 186000 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 8.217171717171718e-06, |
|
"loss": 1.0703, |
|
"step": 186500 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 8.212121212121212e-06, |
|
"loss": 1.0694, |
|
"step": 187000 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"learning_rate": 8.207070707070708e-06, |
|
"loss": 1.0684, |
|
"step": 187500 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"learning_rate": 8.202020202020202e-06, |
|
"loss": 1.0667, |
|
"step": 188000 |
|
}, |
|
{ |
|
"epoch": 6.14, |
|
"learning_rate": 8.196969696969698e-06, |
|
"loss": 1.0655, |
|
"step": 188500 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"learning_rate": 8.191919191919193e-06, |
|
"loss": 1.0649, |
|
"step": 189000 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 8.186868686868687e-06, |
|
"loss": 1.0625, |
|
"step": 189500 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"learning_rate": 8.181818181818183e-06, |
|
"loss": 1.0615, |
|
"step": 190000 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"eval_loss": 0.9824332594871521, |
|
"eval_runtime": 88.8399, |
|
"eval_samples_per_second": 893.878, |
|
"eval_steps_per_second": 3.501, |
|
"step": 190000 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 8.176767676767677e-06, |
|
"loss": 1.0608, |
|
"step": 190500 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 8.171717171717173e-06, |
|
"loss": 1.0598, |
|
"step": 191000 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"learning_rate": 8.166666666666668e-06, |
|
"loss": 1.0585, |
|
"step": 191500 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 8.161616161616162e-06, |
|
"loss": 1.0579, |
|
"step": 192000 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 8.156565656565658e-06, |
|
"loss": 1.0568, |
|
"step": 192500 |
|
}, |
|
{ |
|
"epoch": 6.28, |
|
"learning_rate": 8.151515151515152e-06, |
|
"loss": 1.0562, |
|
"step": 193000 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 8.146464646464646e-06, |
|
"loss": 1.054, |
|
"step": 193500 |
|
}, |
|
{ |
|
"epoch": 6.32, |
|
"learning_rate": 8.141414141414142e-06, |
|
"loss": 1.0526, |
|
"step": 194000 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 8.136363636363637e-06, |
|
"loss": 1.0524, |
|
"step": 194500 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"learning_rate": 8.131313131313133e-06, |
|
"loss": 1.0513, |
|
"step": 195000 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 8.126262626262627e-06, |
|
"loss": 1.0498, |
|
"step": 195500 |
|
}, |
|
{ |
|
"epoch": 6.38, |
|
"learning_rate": 8.121212121212121e-06, |
|
"loss": 1.0479, |
|
"step": 196000 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 8.116161616161616e-06, |
|
"loss": 1.0476, |
|
"step": 196500 |
|
}, |
|
{ |
|
"epoch": 6.41, |
|
"learning_rate": 8.111111111111112e-06, |
|
"loss": 1.0482, |
|
"step": 197000 |
|
}, |
|
{ |
|
"epoch": 6.43, |
|
"learning_rate": 8.106060606060606e-06, |
|
"loss": 1.0463, |
|
"step": 197500 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"learning_rate": 8.101010101010102e-06, |
|
"loss": 1.0453, |
|
"step": 198000 |
|
}, |
|
{ |
|
"epoch": 6.46, |
|
"learning_rate": 8.095959595959598e-06, |
|
"loss": 1.0434, |
|
"step": 198500 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"learning_rate": 8.090909090909092e-06, |
|
"loss": 1.0425, |
|
"step": 199000 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 8.085858585858586e-06, |
|
"loss": 1.041, |
|
"step": 199500 |
|
}, |
|
{ |
|
"epoch": 6.51, |
|
"learning_rate": 8.08080808080808e-06, |
|
"loss": 1.0406, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 6.51, |
|
"eval_loss": 0.9657134413719177, |
|
"eval_runtime": 87.9909, |
|
"eval_samples_per_second": 902.502, |
|
"eval_steps_per_second": 3.534, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 8.075757575757577e-06, |
|
"loss": 1.0404, |
|
"step": 200500 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 8.070707070707071e-06, |
|
"loss": 1.0394, |
|
"step": 201000 |
|
}, |
|
{ |
|
"epoch": 6.56, |
|
"learning_rate": 8.065656565656567e-06, |
|
"loss": 1.0383, |
|
"step": 201500 |
|
}, |
|
{ |
|
"epoch": 6.58, |
|
"learning_rate": 8.060606060606061e-06, |
|
"loss": 1.0369, |
|
"step": 202000 |
|
}, |
|
{ |
|
"epoch": 6.59, |
|
"learning_rate": 8.055555555555557e-06, |
|
"loss": 1.0354, |
|
"step": 202500 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"learning_rate": 8.050505050505052e-06, |
|
"loss": 1.0343, |
|
"step": 203000 |
|
}, |
|
{ |
|
"epoch": 6.63, |
|
"learning_rate": 8.045454545454546e-06, |
|
"loss": 1.0338, |
|
"step": 203500 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 8.04040404040404e-06, |
|
"loss": 1.0321, |
|
"step": 204000 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"learning_rate": 8.035353535353536e-06, |
|
"loss": 1.0315, |
|
"step": 204500 |
|
}, |
|
{ |
|
"epoch": 6.68, |
|
"learning_rate": 8.03030303030303e-06, |
|
"loss": 1.0313, |
|
"step": 205000 |
|
}, |
|
{ |
|
"epoch": 6.69, |
|
"learning_rate": 8.025252525252526e-06, |
|
"loss": 1.0303, |
|
"step": 205500 |
|
}, |
|
{ |
|
"epoch": 6.71, |
|
"learning_rate": 8.02020202020202e-06, |
|
"loss": 1.0292, |
|
"step": 206000 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"learning_rate": 8.015151515151515e-06, |
|
"loss": 1.028, |
|
"step": 206500 |
|
}, |
|
{ |
|
"epoch": 6.74, |
|
"learning_rate": 8.010101010101011e-06, |
|
"loss": 1.0273, |
|
"step": 207000 |
|
}, |
|
{ |
|
"epoch": 6.76, |
|
"learning_rate": 8.005050505050505e-06, |
|
"loss": 1.0264, |
|
"step": 207500 |
|
}, |
|
{ |
|
"epoch": 6.77, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 1.0248, |
|
"step": 208000 |
|
}, |
|
{ |
|
"epoch": 6.79, |
|
"learning_rate": 7.994949494949496e-06, |
|
"loss": 1.0231, |
|
"step": 208500 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"learning_rate": 7.989898989898992e-06, |
|
"loss": 1.0236, |
|
"step": 209000 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 7.984848484848486e-06, |
|
"loss": 1.0221, |
|
"step": 209500 |
|
}, |
|
{ |
|
"epoch": 6.84, |
|
"learning_rate": 7.97979797979798e-06, |
|
"loss": 1.0219, |
|
"step": 210000 |
|
}, |
|
{ |
|
"epoch": 6.84, |
|
"eval_loss": 0.9520924091339111, |
|
"eval_runtime": 89.7334, |
|
"eval_samples_per_second": 884.977, |
|
"eval_steps_per_second": 3.466, |
|
"step": 210000 |
|
}, |
|
{ |
|
"epoch": 6.85, |
|
"learning_rate": 7.974747474747475e-06, |
|
"loss": 1.0195, |
|
"step": 210500 |
|
}, |
|
{ |
|
"epoch": 6.87, |
|
"learning_rate": 7.96969696969697e-06, |
|
"loss": 1.0197, |
|
"step": 211000 |
|
}, |
|
{ |
|
"epoch": 6.89, |
|
"learning_rate": 7.964646464646465e-06, |
|
"loss": 1.0178, |
|
"step": 211500 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 7.95959595959596e-06, |
|
"loss": 1.0177, |
|
"step": 212000 |
|
}, |
|
{ |
|
"epoch": 6.92, |
|
"learning_rate": 7.954545454545455e-06, |
|
"loss": 1.0166, |
|
"step": 212500 |
|
}, |
|
{ |
|
"epoch": 6.94, |
|
"learning_rate": 7.949494949494951e-06, |
|
"loss": 1.0153, |
|
"step": 213000 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"learning_rate": 7.944444444444445e-06, |
|
"loss": 1.0145, |
|
"step": 213500 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 7.93939393939394e-06, |
|
"loss": 1.0142, |
|
"step": 214000 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"learning_rate": 7.934343434343434e-06, |
|
"loss": 1.0136, |
|
"step": 214500 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 7.92929292929293e-06, |
|
"loss": 1.0124, |
|
"step": 215000 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 7.924242424242426e-06, |
|
"loss": 1.0116, |
|
"step": 215500 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 7.91919191919192e-06, |
|
"loss": 1.0097, |
|
"step": 216000 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"learning_rate": 7.914141414141415e-06, |
|
"loss": 1.0094, |
|
"step": 216500 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 7.909090909090909e-06, |
|
"loss": 1.0078, |
|
"step": 217000 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 7.904040404040405e-06, |
|
"loss": 1.0069, |
|
"step": 217500 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 7.898989898989899e-06, |
|
"loss": 1.0076, |
|
"step": 218000 |
|
}, |
|
{ |
|
"epoch": 7.11, |
|
"learning_rate": 7.893939393939395e-06, |
|
"loss": 1.0061, |
|
"step": 218500 |
|
}, |
|
{ |
|
"epoch": 7.13, |
|
"learning_rate": 7.88888888888889e-06, |
|
"loss": 1.0054, |
|
"step": 219000 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"learning_rate": 7.883838383838385e-06, |
|
"loss": 1.0034, |
|
"step": 219500 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"learning_rate": 7.87878787878788e-06, |
|
"loss": 1.0043, |
|
"step": 220000 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"eval_loss": 0.9399166107177734, |
|
"eval_runtime": 91.3908, |
|
"eval_samples_per_second": 868.928, |
|
"eval_steps_per_second": 3.403, |
|
"step": 220000 |
|
}, |
|
{ |
|
"epoch": 7.18, |
|
"learning_rate": 7.873737373737374e-06, |
|
"loss": 1.0021, |
|
"step": 220500 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 7.868686868686868e-06, |
|
"loss": 1.0022, |
|
"step": 221000 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"learning_rate": 7.863636363636364e-06, |
|
"loss": 1.0009, |
|
"step": 221500 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"learning_rate": 7.858585858585859e-06, |
|
"loss": 1.0004, |
|
"step": 222000 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"learning_rate": 7.853535353535355e-06, |
|
"loss": 0.9988, |
|
"step": 222500 |
|
}, |
|
{ |
|
"epoch": 7.26, |
|
"learning_rate": 7.848484848484849e-06, |
|
"loss": 0.9988, |
|
"step": 223000 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"learning_rate": 7.843434343434345e-06, |
|
"loss": 0.9983, |
|
"step": 223500 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"learning_rate": 7.838383838383839e-06, |
|
"loss": 0.9971, |
|
"step": 224000 |
|
}, |
|
{ |
|
"epoch": 7.31, |
|
"learning_rate": 7.833333333333333e-06, |
|
"loss": 0.9956, |
|
"step": 224500 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 7.82828282828283e-06, |
|
"loss": 0.9954, |
|
"step": 225000 |
|
}, |
|
{ |
|
"epoch": 7.34, |
|
"learning_rate": 7.823232323232324e-06, |
|
"loss": 0.9942, |
|
"step": 225500 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 7.81818181818182e-06, |
|
"loss": 0.9932, |
|
"step": 226000 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 7.813131313131314e-06, |
|
"loss": 0.9927, |
|
"step": 226500 |
|
}, |
|
{ |
|
"epoch": 7.39, |
|
"learning_rate": 7.808080808080808e-06, |
|
"loss": 0.9916, |
|
"step": 227000 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"learning_rate": 7.803030303030303e-06, |
|
"loss": 0.992, |
|
"step": 227500 |
|
}, |
|
{ |
|
"epoch": 7.42, |
|
"learning_rate": 7.797979797979799e-06, |
|
"loss": 0.9914, |
|
"step": 228000 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"learning_rate": 7.792929292929293e-06, |
|
"loss": 0.9902, |
|
"step": 228500 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 7.787878787878789e-06, |
|
"loss": 0.989, |
|
"step": 229000 |
|
}, |
|
{ |
|
"epoch": 7.47, |
|
"learning_rate": 7.782828282828283e-06, |
|
"loss": 0.9896, |
|
"step": 229500 |
|
}, |
|
{ |
|
"epoch": 7.49, |
|
"learning_rate": 7.77777777777778e-06, |
|
"loss": 0.9876, |
|
"step": 230000 |
|
}, |
|
{ |
|
"epoch": 7.49, |
|
"eval_loss": 0.9257983565330505, |
|
"eval_runtime": 89.899, |
|
"eval_samples_per_second": 883.347, |
|
"eval_steps_per_second": 3.459, |
|
"step": 230000 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"learning_rate": 7.772727272727273e-06, |
|
"loss": 0.9873, |
|
"step": 230500 |
|
}, |
|
{ |
|
"epoch": 7.52, |
|
"learning_rate": 7.767676767676768e-06, |
|
"loss": 0.9866, |
|
"step": 231000 |
|
}, |
|
{ |
|
"epoch": 7.54, |
|
"learning_rate": 7.762626262626262e-06, |
|
"loss": 0.9857, |
|
"step": 231500 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"learning_rate": 7.757575757575758e-06, |
|
"loss": 0.9855, |
|
"step": 232000 |
|
}, |
|
{ |
|
"epoch": 7.57, |
|
"learning_rate": 7.752525252525254e-06, |
|
"loss": 0.9837, |
|
"step": 232500 |
|
}, |
|
{ |
|
"epoch": 7.59, |
|
"learning_rate": 7.747474747474748e-06, |
|
"loss": 0.9834, |
|
"step": 233000 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"learning_rate": 7.742424242424244e-06, |
|
"loss": 0.9839, |
|
"step": 233500 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"learning_rate": 7.737373737373739e-06, |
|
"loss": 0.9823, |
|
"step": 234000 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"learning_rate": 7.732323232323233e-06, |
|
"loss": 0.9815, |
|
"step": 234500 |
|
}, |
|
{ |
|
"epoch": 7.65, |
|
"learning_rate": 7.727272727272727e-06, |
|
"loss": 0.9809, |
|
"step": 235000 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 7.722222222222223e-06, |
|
"loss": 0.9802, |
|
"step": 235500 |
|
}, |
|
{ |
|
"epoch": 7.68, |
|
"learning_rate": 7.717171717171717e-06, |
|
"loss": 0.98, |
|
"step": 236000 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"learning_rate": 7.712121212121213e-06, |
|
"loss": 0.9788, |
|
"step": 236500 |
|
}, |
|
{ |
|
"epoch": 7.72, |
|
"learning_rate": 7.707070707070708e-06, |
|
"loss": 0.9788, |
|
"step": 237000 |
|
}, |
|
{ |
|
"epoch": 7.73, |
|
"learning_rate": 7.702020202020202e-06, |
|
"loss": 0.9779, |
|
"step": 237500 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"learning_rate": 7.696969696969696e-06, |
|
"loss": 0.9769, |
|
"step": 238000 |
|
}, |
|
{ |
|
"epoch": 7.77, |
|
"learning_rate": 7.691919191919192e-06, |
|
"loss": 0.9762, |
|
"step": 238500 |
|
}, |
|
{ |
|
"epoch": 7.78, |
|
"learning_rate": 7.686868686868687e-06, |
|
"loss": 0.9757, |
|
"step": 239000 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"learning_rate": 7.681818181818183e-06, |
|
"loss": 0.9753, |
|
"step": 239500 |
|
}, |
|
{ |
|
"epoch": 7.82, |
|
"learning_rate": 7.676767676767677e-06, |
|
"loss": 0.9744, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 7.82, |
|
"eval_loss": 0.9180858135223389, |
|
"eval_runtime": 88.4669, |
|
"eval_samples_per_second": 897.647, |
|
"eval_steps_per_second": 3.515, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"learning_rate": 7.671717171717173e-06, |
|
"loss": 0.9732, |
|
"step": 240500 |
|
}, |
|
{ |
|
"epoch": 7.85, |
|
"learning_rate": 7.666666666666667e-06, |
|
"loss": 0.9739, |
|
"step": 241000 |
|
}, |
|
{ |
|
"epoch": 7.86, |
|
"learning_rate": 7.661616161616162e-06, |
|
"loss": 0.9728, |
|
"step": 241500 |
|
}, |
|
{ |
|
"epoch": 7.88, |
|
"learning_rate": 7.656565656565658e-06, |
|
"loss": 0.9718, |
|
"step": 242000 |
|
}, |
|
{ |
|
"epoch": 7.9, |
|
"learning_rate": 7.651515151515152e-06, |
|
"loss": 0.971, |
|
"step": 242500 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"learning_rate": 7.646464646464648e-06, |
|
"loss": 0.9707, |
|
"step": 243000 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"learning_rate": 7.641414141414142e-06, |
|
"loss": 0.9698, |
|
"step": 243500 |
|
}, |
|
{ |
|
"epoch": 7.95, |
|
"learning_rate": 7.636363636363638e-06, |
|
"loss": 0.9701, |
|
"step": 244000 |
|
}, |
|
{ |
|
"epoch": 7.96, |
|
"learning_rate": 7.631313131313132e-06, |
|
"loss": 0.9689, |
|
"step": 244500 |
|
}, |
|
{ |
|
"epoch": 7.98, |
|
"learning_rate": 7.6262626262626275e-06, |
|
"loss": 0.9683, |
|
"step": 245000 |
|
}, |
|
{ |
|
"epoch": 7.99, |
|
"learning_rate": 7.621212121212122e-06, |
|
"loss": 0.967, |
|
"step": 245500 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 7.616161616161617e-06, |
|
"loss": 0.9667, |
|
"step": 246000 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"learning_rate": 7.611111111111111e-06, |
|
"loss": 0.9668, |
|
"step": 246500 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 7.606060606060606e-06, |
|
"loss": 0.9658, |
|
"step": 247000 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 7.6010101010101016e-06, |
|
"loss": 0.9655, |
|
"step": 247500 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"learning_rate": 7.595959595959597e-06, |
|
"loss": 0.9648, |
|
"step": 248000 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 7.590909090909091e-06, |
|
"loss": 0.9642, |
|
"step": 248500 |
|
}, |
|
{ |
|
"epoch": 8.11, |
|
"learning_rate": 7.585858585858586e-06, |
|
"loss": 0.9638, |
|
"step": 249000 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"learning_rate": 7.580808080808082e-06, |
|
"loss": 0.9635, |
|
"step": 249500 |
|
}, |
|
{ |
|
"epoch": 8.14, |
|
"learning_rate": 7.5757575757575764e-06, |
|
"loss": 0.9624, |
|
"step": 250000 |
|
}, |
|
{ |
|
"epoch": 8.14, |
|
"eval_loss": 0.9136332869529724, |
|
"eval_runtime": 88.8056, |
|
"eval_samples_per_second": 894.223, |
|
"eval_steps_per_second": 3.502, |
|
"step": 250000 |
|
}, |
|
{ |
|
"epoch": 8.16, |
|
"learning_rate": 7.5707070707070716e-06, |
|
"loss": 0.9617, |
|
"step": 250500 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"learning_rate": 7.565656565656566e-06, |
|
"loss": 0.9614, |
|
"step": 251000 |
|
}, |
|
{ |
|
"epoch": 8.19, |
|
"learning_rate": 7.560606060606062e-06, |
|
"loss": 0.9609, |
|
"step": 251500 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"learning_rate": 7.555555555555556e-06, |
|
"loss": 0.9604, |
|
"step": 252000 |
|
}, |
|
{ |
|
"epoch": 8.22, |
|
"learning_rate": 7.550505050505051e-06, |
|
"loss": 0.9603, |
|
"step": 252500 |
|
}, |
|
{ |
|
"epoch": 8.24, |
|
"learning_rate": 7.545454545454546e-06, |
|
"loss": 0.9592, |
|
"step": 253000 |
|
}, |
|
{ |
|
"epoch": 8.25, |
|
"learning_rate": 7.540404040404042e-06, |
|
"loss": 0.9587, |
|
"step": 253500 |
|
}, |
|
{ |
|
"epoch": 8.27, |
|
"learning_rate": 7.535353535353536e-06, |
|
"loss": 0.9578, |
|
"step": 254000 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"learning_rate": 7.530303030303031e-06, |
|
"loss": 0.9575, |
|
"step": 254500 |
|
}, |
|
{ |
|
"epoch": 8.3, |
|
"learning_rate": 7.525252525252525e-06, |
|
"loss": 0.957, |
|
"step": 255000 |
|
}, |
|
{ |
|
"epoch": 8.32, |
|
"learning_rate": 7.520202020202021e-06, |
|
"loss": 0.9562, |
|
"step": 255500 |
|
}, |
|
{ |
|
"epoch": 8.34, |
|
"learning_rate": 7.515151515151516e-06, |
|
"loss": 0.9557, |
|
"step": 256000 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"learning_rate": 7.510101010101011e-06, |
|
"loss": 0.955, |
|
"step": 256500 |
|
}, |
|
{ |
|
"epoch": 8.37, |
|
"learning_rate": 7.505050505050505e-06, |
|
"loss": 0.9548, |
|
"step": 257000 |
|
}, |
|
{ |
|
"epoch": 8.38, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.9542, |
|
"step": 257500 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"learning_rate": 7.494949494949496e-06, |
|
"loss": 0.9529, |
|
"step": 258000 |
|
}, |
|
{ |
|
"epoch": 8.42, |
|
"learning_rate": 7.4898989898989905e-06, |
|
"loss": 0.9534, |
|
"step": 258500 |
|
}, |
|
{ |
|
"epoch": 8.43, |
|
"learning_rate": 7.484848484848486e-06, |
|
"loss": 0.9528, |
|
"step": 259000 |
|
}, |
|
{ |
|
"epoch": 8.45, |
|
"learning_rate": 7.47979797979798e-06, |
|
"loss": 0.9523, |
|
"step": 259500 |
|
}, |
|
{ |
|
"epoch": 8.47, |
|
"learning_rate": 7.474747474747476e-06, |
|
"loss": 0.9522, |
|
"step": 260000 |
|
}, |
|
{ |
|
"epoch": 8.47, |
|
"eval_loss": 0.9024671316146851, |
|
"eval_runtime": 87.0285, |
|
"eval_samples_per_second": 912.483, |
|
"eval_steps_per_second": 3.574, |
|
"step": 260000 |
|
}, |
|
{ |
|
"epoch": 8.48, |
|
"learning_rate": 7.46969696969697e-06, |
|
"loss": 0.9513, |
|
"step": 260500 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 7.464646464646465e-06, |
|
"loss": 0.9512, |
|
"step": 261000 |
|
}, |
|
{ |
|
"epoch": 8.52, |
|
"learning_rate": 7.45959595959596e-06, |
|
"loss": 0.9504, |
|
"step": 261500 |
|
}, |
|
{ |
|
"epoch": 8.53, |
|
"learning_rate": 7.454545454545456e-06, |
|
"loss": 0.9502, |
|
"step": 262000 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"learning_rate": 7.44949494949495e-06, |
|
"loss": 0.9497, |
|
"step": 262500 |
|
}, |
|
{ |
|
"epoch": 8.56, |
|
"learning_rate": 7.444444444444445e-06, |
|
"loss": 0.9491, |
|
"step": 263000 |
|
}, |
|
{ |
|
"epoch": 8.58, |
|
"learning_rate": 7.439393939393939e-06, |
|
"loss": 0.9482, |
|
"step": 263500 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"learning_rate": 7.434343434343435e-06, |
|
"loss": 0.9472, |
|
"step": 264000 |
|
}, |
|
{ |
|
"epoch": 8.61, |
|
"learning_rate": 7.42929292929293e-06, |
|
"loss": 0.9474, |
|
"step": 264500 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"learning_rate": 7.424242424242425e-06, |
|
"loss": 0.9474, |
|
"step": 265000 |
|
}, |
|
{ |
|
"epoch": 8.65, |
|
"learning_rate": 7.419191919191919e-06, |
|
"loss": 0.9464, |
|
"step": 265500 |
|
}, |
|
{ |
|
"epoch": 8.66, |
|
"learning_rate": 7.414141414141415e-06, |
|
"loss": 0.9463, |
|
"step": 266000 |
|
}, |
|
{ |
|
"epoch": 8.68, |
|
"learning_rate": 7.40909090909091e-06, |
|
"loss": 0.9459, |
|
"step": 266500 |
|
}, |
|
{ |
|
"epoch": 8.69, |
|
"learning_rate": 7.4040404040404045e-06, |
|
"loss": 0.9462, |
|
"step": 267000 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"learning_rate": 7.3989898989899e-06, |
|
"loss": 0.945, |
|
"step": 267500 |
|
}, |
|
{ |
|
"epoch": 8.73, |
|
"learning_rate": 7.393939393939395e-06, |
|
"loss": 0.9445, |
|
"step": 268000 |
|
}, |
|
{ |
|
"epoch": 8.74, |
|
"learning_rate": 7.38888888888889e-06, |
|
"loss": 0.9436, |
|
"step": 268500 |
|
}, |
|
{ |
|
"epoch": 8.76, |
|
"learning_rate": 7.383838383838384e-06, |
|
"loss": 0.9431, |
|
"step": 269000 |
|
}, |
|
{ |
|
"epoch": 8.78, |
|
"learning_rate": 7.378787878787879e-06, |
|
"loss": 0.943, |
|
"step": 269500 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"learning_rate": 7.373737373737374e-06, |
|
"loss": 0.9421, |
|
"step": 270000 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"eval_loss": 0.8987886905670166, |
|
"eval_runtime": 91.9986, |
|
"eval_samples_per_second": 863.187, |
|
"eval_steps_per_second": 3.38, |
|
"step": 270000 |
|
}, |
|
{ |
|
"epoch": 8.81, |
|
"learning_rate": 7.36868686868687e-06, |
|
"loss": 0.9422, |
|
"step": 270500 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 7.363636363636364e-06, |
|
"loss": 0.9417, |
|
"step": 271000 |
|
}, |
|
{ |
|
"epoch": 8.84, |
|
"learning_rate": 7.358585858585859e-06, |
|
"loss": 0.9409, |
|
"step": 271500 |
|
}, |
|
{ |
|
"epoch": 8.86, |
|
"learning_rate": 7.353535353535353e-06, |
|
"loss": 0.9403, |
|
"step": 272000 |
|
}, |
|
{ |
|
"epoch": 8.87, |
|
"learning_rate": 7.348484848484849e-06, |
|
"loss": 0.9399, |
|
"step": 272500 |
|
}, |
|
{ |
|
"epoch": 8.89, |
|
"learning_rate": 7.343434343434344e-06, |
|
"loss": 0.9392, |
|
"step": 273000 |
|
}, |
|
{ |
|
"epoch": 8.91, |
|
"learning_rate": 7.338383838383839e-06, |
|
"loss": 0.9393, |
|
"step": 273500 |
|
}, |
|
{ |
|
"epoch": 8.92, |
|
"learning_rate": 7.333333333333333e-06, |
|
"loss": 0.9381, |
|
"step": 274000 |
|
}, |
|
{ |
|
"epoch": 8.94, |
|
"learning_rate": 7.328282828282829e-06, |
|
"loss": 0.9388, |
|
"step": 274500 |
|
}, |
|
{ |
|
"epoch": 8.95, |
|
"learning_rate": 7.323232323232324e-06, |
|
"loss": 0.9377, |
|
"step": 275000 |
|
}, |
|
{ |
|
"epoch": 8.97, |
|
"learning_rate": 7.3181818181818186e-06, |
|
"loss": 0.9367, |
|
"step": 275500 |
|
}, |
|
{ |
|
"epoch": 8.99, |
|
"learning_rate": 7.3131313131313146e-06, |
|
"loss": 0.9372, |
|
"step": 276000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 7.308080808080809e-06, |
|
"loss": 0.9372, |
|
"step": 276500 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"learning_rate": 7.303030303030304e-06, |
|
"loss": 0.936, |
|
"step": 277000 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"learning_rate": 7.297979797979798e-06, |
|
"loss": 0.9364, |
|
"step": 277500 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 7.2929292929292934e-06, |
|
"loss": 0.9357, |
|
"step": 278000 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 7.287878787878789e-06, |
|
"loss": 0.9347, |
|
"step": 278500 |
|
}, |
|
{ |
|
"epoch": 9.08, |
|
"learning_rate": 7.282828282828284e-06, |
|
"loss": 0.9347, |
|
"step": 279000 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"learning_rate": 7.277777777777778e-06, |
|
"loss": 0.9344, |
|
"step": 279500 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"learning_rate": 7.272727272727273e-06, |
|
"loss": 0.9342, |
|
"step": 280000 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"eval_loss": 0.8905976414680481, |
|
"eval_runtime": 91.0407, |
|
"eval_samples_per_second": 872.269, |
|
"eval_steps_per_second": 3.416, |
|
"step": 280000 |
|
}, |
|
{ |
|
"epoch": 9.13, |
|
"learning_rate": 7.2676767676767675e-06, |
|
"loss": 0.9337, |
|
"step": 280500 |
|
}, |
|
{ |
|
"epoch": 9.15, |
|
"learning_rate": 7.2626262626262635e-06, |
|
"loss": 0.933, |
|
"step": 281000 |
|
}, |
|
{ |
|
"epoch": 9.17, |
|
"learning_rate": 7.257575757575758e-06, |
|
"loss": 0.9325, |
|
"step": 281500 |
|
}, |
|
{ |
|
"epoch": 9.18, |
|
"learning_rate": 7.252525252525253e-06, |
|
"loss": 0.9321, |
|
"step": 282000 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"learning_rate": 7.247474747474747e-06, |
|
"loss": 0.9319, |
|
"step": 282500 |
|
}, |
|
{ |
|
"epoch": 9.22, |
|
"learning_rate": 7.242424242424243e-06, |
|
"loss": 0.9319, |
|
"step": 283000 |
|
}, |
|
{ |
|
"epoch": 9.23, |
|
"learning_rate": 7.237373737373738e-06, |
|
"loss": 0.9306, |
|
"step": 283500 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"learning_rate": 7.232323232323233e-06, |
|
"loss": 0.931, |
|
"step": 284000 |
|
}, |
|
{ |
|
"epoch": 9.26, |
|
"learning_rate": 7.227272727272729e-06, |
|
"loss": 0.9292, |
|
"step": 284500 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"learning_rate": 7.222222222222223e-06, |
|
"loss": 0.93, |
|
"step": 285000 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 7.217171717171718e-06, |
|
"loss": 0.9296, |
|
"step": 285500 |
|
}, |
|
{ |
|
"epoch": 9.31, |
|
"learning_rate": 7.212121212121212e-06, |
|
"loss": 0.9287, |
|
"step": 286000 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 7.207070707070708e-06, |
|
"loss": 0.9282, |
|
"step": 286500 |
|
}, |
|
{ |
|
"epoch": 9.35, |
|
"learning_rate": 7.202020202020203e-06, |
|
"loss": 0.9281, |
|
"step": 287000 |
|
}, |
|
{ |
|
"epoch": 9.36, |
|
"learning_rate": 7.196969696969698e-06, |
|
"loss": 0.9277, |
|
"step": 287500 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 7.191919191919192e-06, |
|
"loss": 0.9276, |
|
"step": 288000 |
|
}, |
|
{ |
|
"epoch": 9.39, |
|
"learning_rate": 7.186868686868688e-06, |
|
"loss": 0.9269, |
|
"step": 288500 |
|
}, |
|
{ |
|
"epoch": 9.41, |
|
"learning_rate": 7.181818181818182e-06, |
|
"loss": 0.9262, |
|
"step": 289000 |
|
}, |
|
{ |
|
"epoch": 9.43, |
|
"learning_rate": 7.1767676767676775e-06, |
|
"loss": 0.926, |
|
"step": 289500 |
|
}, |
|
{ |
|
"epoch": 9.44, |
|
"learning_rate": 7.171717171717172e-06, |
|
"loss": 0.926, |
|
"step": 290000 |
|
}, |
|
{ |
|
"epoch": 9.44, |
|
"eval_loss": 0.8822857737541199, |
|
"eval_runtime": 87.516, |
|
"eval_samples_per_second": 907.4, |
|
"eval_steps_per_second": 3.554, |
|
"step": 290000 |
|
}, |
|
{ |
|
"epoch": 9.46, |
|
"learning_rate": 7.166666666666667e-06, |
|
"loss": 0.9252, |
|
"step": 290500 |
|
}, |
|
{ |
|
"epoch": 9.48, |
|
"learning_rate": 7.161616161616162e-06, |
|
"loss": 0.9248, |
|
"step": 291000 |
|
}, |
|
{ |
|
"epoch": 9.49, |
|
"learning_rate": 7.156565656565657e-06, |
|
"loss": 0.9253, |
|
"step": 291500 |
|
}, |
|
{ |
|
"epoch": 9.51, |
|
"learning_rate": 7.151515151515152e-06, |
|
"loss": 0.9242, |
|
"step": 292000 |
|
}, |
|
{ |
|
"epoch": 9.52, |
|
"learning_rate": 7.146464646464647e-06, |
|
"loss": 0.9234, |
|
"step": 292500 |
|
}, |
|
{ |
|
"epoch": 9.54, |
|
"learning_rate": 7.141414141414143e-06, |
|
"loss": 0.9238, |
|
"step": 293000 |
|
}, |
|
{ |
|
"epoch": 9.56, |
|
"learning_rate": 7.136363636363637e-06, |
|
"loss": 0.9238, |
|
"step": 293500 |
|
}, |
|
{ |
|
"epoch": 9.57, |
|
"learning_rate": 7.131313131313132e-06, |
|
"loss": 0.9228, |
|
"step": 294000 |
|
}, |
|
{ |
|
"epoch": 9.59, |
|
"learning_rate": 7.126262626262626e-06, |
|
"loss": 0.9216, |
|
"step": 294500 |
|
}, |
|
{ |
|
"epoch": 9.61, |
|
"learning_rate": 7.121212121212122e-06, |
|
"loss": 0.9216, |
|
"step": 295000 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"learning_rate": 7.116161616161617e-06, |
|
"loss": 0.9215, |
|
"step": 295500 |
|
}, |
|
{ |
|
"epoch": 9.64, |
|
"learning_rate": 7.111111111111112e-06, |
|
"loss": 0.9208, |
|
"step": 296000 |
|
}, |
|
{ |
|
"epoch": 9.65, |
|
"learning_rate": 7.106060606060606e-06, |
|
"loss": 0.9213, |
|
"step": 296500 |
|
}, |
|
{ |
|
"epoch": 9.67, |
|
"learning_rate": 7.101010101010102e-06, |
|
"loss": 0.9207, |
|
"step": 297000 |
|
}, |
|
{ |
|
"epoch": 9.69, |
|
"learning_rate": 7.095959595959596e-06, |
|
"loss": 0.9206, |
|
"step": 297500 |
|
}, |
|
{ |
|
"epoch": 9.7, |
|
"learning_rate": 7.0909090909090916e-06, |
|
"loss": 0.92, |
|
"step": 298000 |
|
}, |
|
{ |
|
"epoch": 9.72, |
|
"learning_rate": 7.085858585858586e-06, |
|
"loss": 0.9194, |
|
"step": 298500 |
|
}, |
|
{ |
|
"epoch": 9.74, |
|
"learning_rate": 7.080808080808082e-06, |
|
"loss": 0.9191, |
|
"step": 299000 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"learning_rate": 7.075757575757576e-06, |
|
"loss": 0.9191, |
|
"step": 299500 |
|
}, |
|
{ |
|
"epoch": 9.77, |
|
"learning_rate": 7.070707070707071e-06, |
|
"loss": 0.9184, |
|
"step": 300000 |
|
}, |
|
{ |
|
"epoch": 9.77, |
|
"eval_loss": 0.8790136575698853, |
|
"eval_runtime": 89.0584, |
|
"eval_samples_per_second": 891.684, |
|
"eval_steps_per_second": 3.492, |
|
"step": 300000 |
|
}, |
|
{ |
|
"epoch": 9.79, |
|
"learning_rate": 7.065656565656566e-06, |
|
"loss": 0.9183, |
|
"step": 300500 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 7.060606060606061e-06, |
|
"loss": 0.918, |
|
"step": 301000 |
|
}, |
|
{ |
|
"epoch": 9.82, |
|
"learning_rate": 7.055555555555557e-06, |
|
"loss": 0.9174, |
|
"step": 301500 |
|
}, |
|
{ |
|
"epoch": 9.83, |
|
"learning_rate": 7.050505050505051e-06, |
|
"loss": 0.9162, |
|
"step": 302000 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"learning_rate": 7.045454545454546e-06, |
|
"loss": 0.9162, |
|
"step": 302500 |
|
}, |
|
{ |
|
"epoch": 9.87, |
|
"learning_rate": 7.0404040404040404e-06, |
|
"loss": 0.9159, |
|
"step": 303000 |
|
}, |
|
{ |
|
"epoch": 9.88, |
|
"learning_rate": 7.0353535353535364e-06, |
|
"loss": 0.9158, |
|
"step": 303500 |
|
}, |
|
{ |
|
"epoch": 9.9, |
|
"learning_rate": 7.030303030303031e-06, |
|
"loss": 0.9154, |
|
"step": 304000 |
|
}, |
|
{ |
|
"epoch": 9.92, |
|
"learning_rate": 7.025252525252526e-06, |
|
"loss": 0.9154, |
|
"step": 304500 |
|
}, |
|
{ |
|
"epoch": 9.93, |
|
"learning_rate": 7.02020202020202e-06, |
|
"loss": 0.9144, |
|
"step": 305000 |
|
}, |
|
{ |
|
"epoch": 9.95, |
|
"learning_rate": 7.015151515151516e-06, |
|
"loss": 0.9151, |
|
"step": 305500 |
|
}, |
|
{ |
|
"epoch": 9.96, |
|
"learning_rate": 7.0101010101010105e-06, |
|
"loss": 0.9142, |
|
"step": 306000 |
|
}, |
|
{ |
|
"epoch": 9.98, |
|
"learning_rate": 7.005050505050506e-06, |
|
"loss": 0.9145, |
|
"step": 306500 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 7e-06, |
|
"loss": 0.9138, |
|
"step": 307000 |
|
}, |
|
{ |
|
"epoch": 10.01, |
|
"learning_rate": 6.994949494949496e-06, |
|
"loss": 0.9128, |
|
"step": 307500 |
|
}, |
|
{ |
|
"epoch": 10.03, |
|
"learning_rate": 6.98989898989899e-06, |
|
"loss": 0.9138, |
|
"step": 308000 |
|
}, |
|
{ |
|
"epoch": 10.05, |
|
"learning_rate": 6.984848484848485e-06, |
|
"loss": 0.913, |
|
"step": 308500 |
|
}, |
|
{ |
|
"epoch": 10.06, |
|
"learning_rate": 6.979797979797981e-06, |
|
"loss": 0.9125, |
|
"step": 309000 |
|
}, |
|
{ |
|
"epoch": 10.08, |
|
"learning_rate": 6.974747474747476e-06, |
|
"loss": 0.9117, |
|
"step": 309500 |
|
}, |
|
{ |
|
"epoch": 10.09, |
|
"learning_rate": 6.969696969696971e-06, |
|
"loss": 0.9114, |
|
"step": 310000 |
|
}, |
|
{ |
|
"epoch": 10.09, |
|
"eval_loss": 0.8732441067695618, |
|
"eval_runtime": 91.0609, |
|
"eval_samples_per_second": 872.076, |
|
"eval_steps_per_second": 3.415, |
|
"step": 310000 |
|
}, |
|
{ |
|
"epoch": 10.11, |
|
"learning_rate": 6.964646464646465e-06, |
|
"loss": 0.9112, |
|
"step": 310500 |
|
}, |
|
{ |
|
"epoch": 10.13, |
|
"learning_rate": 6.95959595959596e-06, |
|
"loss": 0.9108, |
|
"step": 311000 |
|
}, |
|
{ |
|
"epoch": 10.14, |
|
"learning_rate": 6.954545454545455e-06, |
|
"loss": 0.9102, |
|
"step": 311500 |
|
}, |
|
{ |
|
"epoch": 10.16, |
|
"learning_rate": 6.9494949494949505e-06, |
|
"loss": 0.9105, |
|
"step": 312000 |
|
}, |
|
{ |
|
"epoch": 10.18, |
|
"learning_rate": 6.944444444444445e-06, |
|
"loss": 0.9103, |
|
"step": 312500 |
|
}, |
|
{ |
|
"epoch": 10.19, |
|
"learning_rate": 6.93939393939394e-06, |
|
"loss": 0.9101, |
|
"step": 313000 |
|
}, |
|
{ |
|
"epoch": 10.21, |
|
"learning_rate": 6.934343434343434e-06, |
|
"loss": 0.9097, |
|
"step": 313500 |
|
}, |
|
{ |
|
"epoch": 10.22, |
|
"learning_rate": 6.92929292929293e-06, |
|
"loss": 0.9086, |
|
"step": 314000 |
|
}, |
|
{ |
|
"epoch": 10.24, |
|
"learning_rate": 6.9242424242424245e-06, |
|
"loss": 0.9089, |
|
"step": 314500 |
|
}, |
|
{ |
|
"epoch": 10.26, |
|
"learning_rate": 6.91919191919192e-06, |
|
"loss": 0.9082, |
|
"step": 315000 |
|
}, |
|
{ |
|
"epoch": 10.27, |
|
"learning_rate": 6.914141414141414e-06, |
|
"loss": 0.9082, |
|
"step": 315500 |
|
}, |
|
{ |
|
"epoch": 10.29, |
|
"learning_rate": 6.90909090909091e-06, |
|
"loss": 0.9077, |
|
"step": 316000 |
|
}, |
|
{ |
|
"epoch": 10.31, |
|
"learning_rate": 6.904040404040404e-06, |
|
"loss": 0.9072, |
|
"step": 316500 |
|
}, |
|
{ |
|
"epoch": 10.32, |
|
"learning_rate": 6.898989898989899e-06, |
|
"loss": 0.907, |
|
"step": 317000 |
|
}, |
|
{ |
|
"epoch": 10.34, |
|
"learning_rate": 6.893939393939395e-06, |
|
"loss": 0.9067, |
|
"step": 317500 |
|
}, |
|
{ |
|
"epoch": 10.35, |
|
"learning_rate": 6.88888888888889e-06, |
|
"loss": 0.9067, |
|
"step": 318000 |
|
}, |
|
{ |
|
"epoch": 10.37, |
|
"learning_rate": 6.883838383838385e-06, |
|
"loss": 0.9054, |
|
"step": 318500 |
|
}, |
|
{ |
|
"epoch": 10.39, |
|
"learning_rate": 6.878787878787879e-06, |
|
"loss": 0.9051, |
|
"step": 319000 |
|
}, |
|
{ |
|
"epoch": 10.4, |
|
"learning_rate": 6.873737373737375e-06, |
|
"loss": 0.905, |
|
"step": 319500 |
|
}, |
|
{ |
|
"epoch": 10.42, |
|
"learning_rate": 6.868686868686869e-06, |
|
"loss": 0.9047, |
|
"step": 320000 |
|
}, |
|
{ |
|
"epoch": 10.42, |
|
"eval_loss": 0.8707190155982971, |
|
"eval_runtime": 92.9604, |
|
"eval_samples_per_second": 854.256, |
|
"eval_steps_per_second": 3.346, |
|
"step": 320000 |
|
}, |
|
{ |
|
"epoch": 10.44, |
|
"learning_rate": 6.8636363636363645e-06, |
|
"loss": 0.9048, |
|
"step": 320500 |
|
}, |
|
{ |
|
"epoch": 10.45, |
|
"learning_rate": 6.858585858585859e-06, |
|
"loss": 0.9051, |
|
"step": 321000 |
|
}, |
|
{ |
|
"epoch": 10.47, |
|
"learning_rate": 6.853535353535354e-06, |
|
"loss": 0.9039, |
|
"step": 321500 |
|
}, |
|
{ |
|
"epoch": 10.49, |
|
"learning_rate": 6.848484848484849e-06, |
|
"loss": 0.9039, |
|
"step": 322000 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 6.843434343434344e-06, |
|
"loss": 0.9034, |
|
"step": 322500 |
|
}, |
|
{ |
|
"epoch": 10.52, |
|
"learning_rate": 6.8383838383838386e-06, |
|
"loss": 0.9028, |
|
"step": 323000 |
|
}, |
|
{ |
|
"epoch": 10.53, |
|
"learning_rate": 6.833333333333334e-06, |
|
"loss": 0.9025, |
|
"step": 323500 |
|
}, |
|
{ |
|
"epoch": 10.55, |
|
"learning_rate": 6.828282828282828e-06, |
|
"loss": 0.9027, |
|
"step": 324000 |
|
}, |
|
{ |
|
"epoch": 10.57, |
|
"learning_rate": 6.823232323232324e-06, |
|
"loss": 0.9017, |
|
"step": 324500 |
|
}, |
|
{ |
|
"epoch": 10.58, |
|
"learning_rate": 6.818181818181818e-06, |
|
"loss": 0.9026, |
|
"step": 325000 |
|
}, |
|
{ |
|
"epoch": 10.6, |
|
"learning_rate": 6.813131313131313e-06, |
|
"loss": 0.902, |
|
"step": 325500 |
|
}, |
|
{ |
|
"epoch": 10.62, |
|
"learning_rate": 6.808080808080809e-06, |
|
"loss": 0.9021, |
|
"step": 326000 |
|
}, |
|
{ |
|
"epoch": 10.63, |
|
"learning_rate": 6.803030303030304e-06, |
|
"loss": 0.9008, |
|
"step": 326500 |
|
}, |
|
{ |
|
"epoch": 10.65, |
|
"learning_rate": 6.797979797979799e-06, |
|
"loss": 0.9006, |
|
"step": 327000 |
|
}, |
|
{ |
|
"epoch": 10.66, |
|
"learning_rate": 6.792929292929293e-06, |
|
"loss": 0.9005, |
|
"step": 327500 |
|
}, |
|
{ |
|
"epoch": 10.68, |
|
"learning_rate": 6.787878787878789e-06, |
|
"loss": 0.8999, |
|
"step": 328000 |
|
}, |
|
{ |
|
"epoch": 10.7, |
|
"learning_rate": 6.7828282828282834e-06, |
|
"loss": 0.9, |
|
"step": 328500 |
|
}, |
|
{ |
|
"epoch": 10.71, |
|
"learning_rate": 6.777777777777779e-06, |
|
"loss": 0.9004, |
|
"step": 329000 |
|
}, |
|
{ |
|
"epoch": 10.73, |
|
"learning_rate": 6.772727272727273e-06, |
|
"loss": 0.8989, |
|
"step": 329500 |
|
}, |
|
{ |
|
"epoch": 10.75, |
|
"learning_rate": 6.767676767676769e-06, |
|
"loss": 0.8993, |
|
"step": 330000 |
|
}, |
|
{ |
|
"epoch": 10.75, |
|
"eval_loss": 0.8648310303688049, |
|
"eval_runtime": 90.2528, |
|
"eval_samples_per_second": 879.884, |
|
"eval_steps_per_second": 3.446, |
|
"step": 330000 |
|
}, |
|
{ |
|
"epoch": 10.76, |
|
"learning_rate": 6.762626262626263e-06, |
|
"loss": 0.8983, |
|
"step": 330500 |
|
}, |
|
{ |
|
"epoch": 10.78, |
|
"learning_rate": 6.757575757575758e-06, |
|
"loss": 0.8989, |
|
"step": 331000 |
|
}, |
|
{ |
|
"epoch": 10.79, |
|
"learning_rate": 6.752525252525253e-06, |
|
"loss": 0.8984, |
|
"step": 331500 |
|
}, |
|
{ |
|
"epoch": 10.81, |
|
"learning_rate": 6.747474747474749e-06, |
|
"loss": 0.898, |
|
"step": 332000 |
|
}, |
|
{ |
|
"epoch": 10.83, |
|
"learning_rate": 6.742424242424243e-06, |
|
"loss": 0.8975, |
|
"step": 332500 |
|
}, |
|
{ |
|
"epoch": 10.84, |
|
"learning_rate": 6.737373737373738e-06, |
|
"loss": 0.8973, |
|
"step": 333000 |
|
}, |
|
{ |
|
"epoch": 10.86, |
|
"learning_rate": 6.732323232323232e-06, |
|
"loss": 0.8971, |
|
"step": 333500 |
|
}, |
|
{ |
|
"epoch": 10.88, |
|
"learning_rate": 6.7272727272727275e-06, |
|
"loss": 0.8971, |
|
"step": 334000 |
|
}, |
|
{ |
|
"epoch": 10.89, |
|
"learning_rate": 6.7222222222222235e-06, |
|
"loss": 0.8962, |
|
"step": 334500 |
|
}, |
|
{ |
|
"epoch": 10.91, |
|
"learning_rate": 6.717171717171718e-06, |
|
"loss": 0.8963, |
|
"step": 335000 |
|
}, |
|
{ |
|
"epoch": 10.92, |
|
"learning_rate": 6.712121212121213e-06, |
|
"loss": 0.8961, |
|
"step": 335500 |
|
}, |
|
{ |
|
"epoch": 10.94, |
|
"learning_rate": 6.707070707070707e-06, |
|
"loss": 0.8961, |
|
"step": 336000 |
|
}, |
|
{ |
|
"epoch": 10.96, |
|
"learning_rate": 6.702020202020203e-06, |
|
"loss": 0.895, |
|
"step": 336500 |
|
}, |
|
{ |
|
"epoch": 10.97, |
|
"learning_rate": 6.6969696969696975e-06, |
|
"loss": 0.8949, |
|
"step": 337000 |
|
}, |
|
{ |
|
"epoch": 10.99, |
|
"learning_rate": 6.691919191919193e-06, |
|
"loss": 0.8944, |
|
"step": 337500 |
|
}, |
|
{ |
|
"epoch": 11.01, |
|
"learning_rate": 6.686868686868687e-06, |
|
"loss": 0.8942, |
|
"step": 338000 |
|
}, |
|
{ |
|
"epoch": 11.02, |
|
"learning_rate": 6.681818181818183e-06, |
|
"loss": 0.8942, |
|
"step": 338500 |
|
}, |
|
{ |
|
"epoch": 11.04, |
|
"learning_rate": 6.676767676767677e-06, |
|
"loss": 0.8944, |
|
"step": 339000 |
|
}, |
|
{ |
|
"epoch": 11.06, |
|
"learning_rate": 6.671717171717172e-06, |
|
"loss": 0.8938, |
|
"step": 339500 |
|
}, |
|
{ |
|
"epoch": 11.07, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.8938, |
|
"step": 340000 |
|
}, |
|
{ |
|
"epoch": 11.07, |
|
"eval_loss": 0.8565430641174316, |
|
"eval_runtime": 91.4844, |
|
"eval_samples_per_second": 868.039, |
|
"eval_steps_per_second": 3.399, |
|
"step": 340000 |
|
}, |
|
{ |
|
"epoch": 11.09, |
|
"learning_rate": 6.661616161616163e-06, |
|
"loss": 0.8931, |
|
"step": 340500 |
|
}, |
|
{ |
|
"epoch": 11.1, |
|
"learning_rate": 6.656565656565657e-06, |
|
"loss": 0.8921, |
|
"step": 341000 |
|
}, |
|
{ |
|
"epoch": 11.12, |
|
"learning_rate": 6.651515151515152e-06, |
|
"loss": 0.8925, |
|
"step": 341500 |
|
}, |
|
{ |
|
"epoch": 11.14, |
|
"learning_rate": 6.646464646464646e-06, |
|
"loss": 0.8921, |
|
"step": 342000 |
|
}, |
|
{ |
|
"epoch": 11.15, |
|
"learning_rate": 6.641414141414142e-06, |
|
"loss": 0.8919, |
|
"step": 342500 |
|
}, |
|
{ |
|
"epoch": 11.17, |
|
"learning_rate": 6.6363636363636375e-06, |
|
"loss": 0.8913, |
|
"step": 343000 |
|
}, |
|
{ |
|
"epoch": 11.19, |
|
"learning_rate": 6.631313131313132e-06, |
|
"loss": 0.8913, |
|
"step": 343500 |
|
}, |
|
{ |
|
"epoch": 11.2, |
|
"learning_rate": 6.626262626262627e-06, |
|
"loss": 0.8904, |
|
"step": 344000 |
|
}, |
|
{ |
|
"epoch": 11.22, |
|
"learning_rate": 6.621212121212121e-06, |
|
"loss": 0.8906, |
|
"step": 344500 |
|
}, |
|
{ |
|
"epoch": 11.23, |
|
"learning_rate": 6.616161616161617e-06, |
|
"loss": 0.8901, |
|
"step": 345000 |
|
}, |
|
{ |
|
"epoch": 11.25, |
|
"learning_rate": 6.6111111111111115e-06, |
|
"loss": 0.8903, |
|
"step": 345500 |
|
}, |
|
{ |
|
"epoch": 11.27, |
|
"learning_rate": 6.606060606060607e-06, |
|
"loss": 0.8897, |
|
"step": 346000 |
|
}, |
|
{ |
|
"epoch": 11.28, |
|
"learning_rate": 6.601010101010101e-06, |
|
"loss": 0.8892, |
|
"step": 346500 |
|
}, |
|
{ |
|
"epoch": 11.3, |
|
"learning_rate": 6.595959595959597e-06, |
|
"loss": 0.8893, |
|
"step": 347000 |
|
}, |
|
{ |
|
"epoch": 11.32, |
|
"learning_rate": 6.590909090909091e-06, |
|
"loss": 0.8893, |
|
"step": 347500 |
|
}, |
|
{ |
|
"epoch": 11.33, |
|
"learning_rate": 6.585858585858586e-06, |
|
"loss": 0.8889, |
|
"step": 348000 |
|
}, |
|
{ |
|
"epoch": 11.35, |
|
"learning_rate": 6.580808080808081e-06, |
|
"loss": 0.888, |
|
"step": 348500 |
|
}, |
|
{ |
|
"epoch": 11.36, |
|
"learning_rate": 6.575757575757577e-06, |
|
"loss": 0.888, |
|
"step": 349000 |
|
}, |
|
{ |
|
"epoch": 11.38, |
|
"learning_rate": 6.570707070707071e-06, |
|
"loss": 0.8875, |
|
"step": 349500 |
|
}, |
|
{ |
|
"epoch": 11.4, |
|
"learning_rate": 6.565656565656566e-06, |
|
"loss": 0.8867, |
|
"step": 350000 |
|
}, |
|
{ |
|
"epoch": 11.4, |
|
"eval_loss": 0.8537823557853699, |
|
"eval_runtime": 91.6919, |
|
"eval_samples_per_second": 866.075, |
|
"eval_steps_per_second": 3.392, |
|
"step": 350000 |
|
}, |
|
{ |
|
"epoch": 11.41, |
|
"learning_rate": 6.56060606060606e-06, |
|
"loss": 0.8865, |
|
"step": 350500 |
|
}, |
|
{ |
|
"epoch": 11.43, |
|
"learning_rate": 6.555555555555556e-06, |
|
"loss": 0.8867, |
|
"step": 351000 |
|
}, |
|
{ |
|
"epoch": 11.45, |
|
"learning_rate": 6.5505050505050516e-06, |
|
"loss": 0.8868, |
|
"step": 351500 |
|
}, |
|
{ |
|
"epoch": 11.46, |
|
"learning_rate": 6.545454545454546e-06, |
|
"loss": 0.8861, |
|
"step": 352000 |
|
}, |
|
{ |
|
"epoch": 11.48, |
|
"learning_rate": 6.540404040404042e-06, |
|
"loss": 0.8854, |
|
"step": 352500 |
|
}, |
|
{ |
|
"epoch": 11.49, |
|
"learning_rate": 6.535353535353536e-06, |
|
"loss": 0.8856, |
|
"step": 353000 |
|
}, |
|
{ |
|
"epoch": 11.51, |
|
"learning_rate": 6.530303030303031e-06, |
|
"loss": 0.8856, |
|
"step": 353500 |
|
}, |
|
{ |
|
"epoch": 11.53, |
|
"learning_rate": 6.525252525252526e-06, |
|
"loss": 0.8851, |
|
"step": 354000 |
|
}, |
|
{ |
|
"epoch": 11.54, |
|
"learning_rate": 6.520202020202021e-06, |
|
"loss": 0.8844, |
|
"step": 354500 |
|
}, |
|
{ |
|
"epoch": 11.56, |
|
"learning_rate": 6.515151515151516e-06, |
|
"loss": 0.8842, |
|
"step": 355000 |
|
}, |
|
{ |
|
"epoch": 11.58, |
|
"learning_rate": 6.510101010101011e-06, |
|
"loss": 0.8838, |
|
"step": 355500 |
|
}, |
|
{ |
|
"epoch": 11.59, |
|
"learning_rate": 6.505050505050505e-06, |
|
"loss": 0.8834, |
|
"step": 356000 |
|
}, |
|
{ |
|
"epoch": 11.61, |
|
"learning_rate": 6.5000000000000004e-06, |
|
"loss": 0.8828, |
|
"step": 356500 |
|
}, |
|
{ |
|
"epoch": 11.62, |
|
"learning_rate": 6.494949494949495e-06, |
|
"loss": 0.8829, |
|
"step": 357000 |
|
}, |
|
{ |
|
"epoch": 11.64, |
|
"learning_rate": 6.489898989898991e-06, |
|
"loss": 0.883, |
|
"step": 357500 |
|
}, |
|
{ |
|
"epoch": 11.66, |
|
"learning_rate": 6.484848484848485e-06, |
|
"loss": 0.8826, |
|
"step": 358000 |
|
}, |
|
{ |
|
"epoch": 11.67, |
|
"learning_rate": 6.47979797979798e-06, |
|
"loss": 0.8827, |
|
"step": 358500 |
|
}, |
|
{ |
|
"epoch": 11.69, |
|
"learning_rate": 6.4747474747474745e-06, |
|
"loss": 0.8824, |
|
"step": 359000 |
|
}, |
|
{ |
|
"epoch": 11.71, |
|
"learning_rate": 6.4696969696969705e-06, |
|
"loss": 0.8815, |
|
"step": 359500 |
|
}, |
|
{ |
|
"epoch": 11.72, |
|
"learning_rate": 6.464646464646466e-06, |
|
"loss": 0.881, |
|
"step": 360000 |
|
}, |
|
{ |
|
"epoch": 11.72, |
|
"eval_loss": 0.8482265472412109, |
|
"eval_runtime": 90.1088, |
|
"eval_samples_per_second": 881.29, |
|
"eval_steps_per_second": 3.451, |
|
"step": 360000 |
|
}, |
|
{ |
|
"epoch": 11.74, |
|
"learning_rate": 6.45959595959596e-06, |
|
"loss": 0.8809, |
|
"step": 360500 |
|
}, |
|
{ |
|
"epoch": 11.76, |
|
"learning_rate": 6.454545454545456e-06, |
|
"loss": 0.8808, |
|
"step": 361000 |
|
}, |
|
{ |
|
"epoch": 11.77, |
|
"learning_rate": 6.44949494949495e-06, |
|
"loss": 0.8804, |
|
"step": 361500 |
|
}, |
|
{ |
|
"epoch": 11.79, |
|
"learning_rate": 6.444444444444445e-06, |
|
"loss": 0.8803, |
|
"step": 362000 |
|
}, |
|
{ |
|
"epoch": 11.8, |
|
"learning_rate": 6.43939393939394e-06, |
|
"loss": 0.8799, |
|
"step": 362500 |
|
}, |
|
{ |
|
"epoch": 11.82, |
|
"learning_rate": 6.434343434343436e-06, |
|
"loss": 0.8795, |
|
"step": 363000 |
|
}, |
|
{ |
|
"epoch": 11.84, |
|
"learning_rate": 6.42929292929293e-06, |
|
"loss": 0.88, |
|
"step": 363500 |
|
}, |
|
{ |
|
"epoch": 11.85, |
|
"learning_rate": 6.424242424242425e-06, |
|
"loss": 0.8782, |
|
"step": 364000 |
|
}, |
|
{ |
|
"epoch": 11.87, |
|
"learning_rate": 6.419191919191919e-06, |
|
"loss": 0.8792, |
|
"step": 364500 |
|
}, |
|
{ |
|
"epoch": 11.89, |
|
"learning_rate": 6.4141414141414145e-06, |
|
"loss": 0.8784, |
|
"step": 365000 |
|
}, |
|
{ |
|
"epoch": 11.9, |
|
"learning_rate": 6.40909090909091e-06, |
|
"loss": 0.8781, |
|
"step": 365500 |
|
}, |
|
{ |
|
"epoch": 11.92, |
|
"learning_rate": 6.404040404040405e-06, |
|
"loss": 0.8786, |
|
"step": 366000 |
|
}, |
|
{ |
|
"epoch": 11.93, |
|
"learning_rate": 6.398989898989899e-06, |
|
"loss": 0.8776, |
|
"step": 366500 |
|
}, |
|
{ |
|
"epoch": 11.95, |
|
"learning_rate": 6.393939393939394e-06, |
|
"loss": 0.8779, |
|
"step": 367000 |
|
}, |
|
{ |
|
"epoch": 11.97, |
|
"learning_rate": 6.3888888888888885e-06, |
|
"loss": 0.877, |
|
"step": 367500 |
|
}, |
|
{ |
|
"epoch": 11.98, |
|
"learning_rate": 6.3838383838383845e-06, |
|
"loss": 0.8773, |
|
"step": 368000 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 6.37878787878788e-06, |
|
"loss": 0.8764, |
|
"step": 368500 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"learning_rate": 6.373737373737374e-06, |
|
"loss": 0.8766, |
|
"step": 369000 |
|
}, |
|
{ |
|
"epoch": 12.03, |
|
"learning_rate": 6.36868686868687e-06, |
|
"loss": 0.8764, |
|
"step": 369500 |
|
}, |
|
{ |
|
"epoch": 12.05, |
|
"learning_rate": 6.363636363636364e-06, |
|
"loss": 0.8756, |
|
"step": 370000 |
|
}, |
|
{ |
|
"epoch": 12.05, |
|
"eval_loss": 0.842664361000061, |
|
"eval_runtime": 89.679, |
|
"eval_samples_per_second": 885.514, |
|
"eval_steps_per_second": 3.468, |
|
"step": 370000 |
|
}, |
|
{ |
|
"epoch": 12.06, |
|
"learning_rate": 6.358585858585859e-06, |
|
"loss": 0.8757, |
|
"step": 370500 |
|
}, |
|
{ |
|
"epoch": 12.08, |
|
"learning_rate": 6.353535353535354e-06, |
|
"loss": 0.8754, |
|
"step": 371000 |
|
}, |
|
{ |
|
"epoch": 12.1, |
|
"learning_rate": 6.34848484848485e-06, |
|
"loss": 0.8751, |
|
"step": 371500 |
|
}, |
|
{ |
|
"epoch": 12.11, |
|
"learning_rate": 6.343434343434344e-06, |
|
"loss": 0.8745, |
|
"step": 372000 |
|
}, |
|
{ |
|
"epoch": 12.13, |
|
"learning_rate": 6.338383838383839e-06, |
|
"loss": 0.8755, |
|
"step": 372500 |
|
}, |
|
{ |
|
"epoch": 12.15, |
|
"learning_rate": 6.333333333333333e-06, |
|
"loss": 0.8746, |
|
"step": 373000 |
|
}, |
|
{ |
|
"epoch": 12.16, |
|
"learning_rate": 6.328282828282829e-06, |
|
"loss": 0.8745, |
|
"step": 373500 |
|
}, |
|
{ |
|
"epoch": 12.18, |
|
"learning_rate": 6.323232323232324e-06, |
|
"loss": 0.8742, |
|
"step": 374000 |
|
}, |
|
{ |
|
"epoch": 12.19, |
|
"learning_rate": 6.318181818181819e-06, |
|
"loss": 0.8738, |
|
"step": 374500 |
|
}, |
|
{ |
|
"epoch": 12.21, |
|
"learning_rate": 6.313131313131313e-06, |
|
"loss": 0.8734, |
|
"step": 375000 |
|
}, |
|
{ |
|
"epoch": 12.23, |
|
"learning_rate": 6.308080808080809e-06, |
|
"loss": 0.873, |
|
"step": 375500 |
|
}, |
|
{ |
|
"epoch": 12.24, |
|
"learning_rate": 6.303030303030303e-06, |
|
"loss": 0.873, |
|
"step": 376000 |
|
}, |
|
{ |
|
"epoch": 12.26, |
|
"learning_rate": 6.2979797979797986e-06, |
|
"loss": 0.8729, |
|
"step": 376500 |
|
}, |
|
{ |
|
"epoch": 12.28, |
|
"learning_rate": 6.292929292929294e-06, |
|
"loss": 0.8722, |
|
"step": 377000 |
|
}, |
|
{ |
|
"epoch": 12.29, |
|
"learning_rate": 6.287878787878788e-06, |
|
"loss": 0.872, |
|
"step": 377500 |
|
}, |
|
{ |
|
"epoch": 12.31, |
|
"learning_rate": 6.282828282828284e-06, |
|
"loss": 0.872, |
|
"step": 378000 |
|
}, |
|
{ |
|
"epoch": 12.32, |
|
"learning_rate": 6.277777777777778e-06, |
|
"loss": 0.8715, |
|
"step": 378500 |
|
}, |
|
{ |
|
"epoch": 12.34, |
|
"learning_rate": 6.2727272727272734e-06, |
|
"loss": 0.8714, |
|
"step": 379000 |
|
}, |
|
{ |
|
"epoch": 12.36, |
|
"learning_rate": 6.267676767676768e-06, |
|
"loss": 0.8718, |
|
"step": 379500 |
|
}, |
|
{ |
|
"epoch": 12.37, |
|
"learning_rate": 6.262626262626264e-06, |
|
"loss": 0.8713, |
|
"step": 380000 |
|
}, |
|
{ |
|
"epoch": 12.37, |
|
"eval_loss": 0.8392786383628845, |
|
"eval_runtime": 91.281, |
|
"eval_samples_per_second": 869.973, |
|
"eval_steps_per_second": 3.407, |
|
"step": 380000 |
|
}, |
|
{ |
|
"epoch": 12.39, |
|
"learning_rate": 6.257575757575758e-06, |
|
"loss": 0.8704, |
|
"step": 380500 |
|
}, |
|
{ |
|
"epoch": 12.41, |
|
"learning_rate": 6.252525252525253e-06, |
|
"loss": 0.871, |
|
"step": 381000 |
|
}, |
|
{ |
|
"epoch": 12.42, |
|
"learning_rate": 6.2474747474747474e-06, |
|
"loss": 0.8705, |
|
"step": 381500 |
|
}, |
|
{ |
|
"epoch": 12.44, |
|
"learning_rate": 6.2424242424242434e-06, |
|
"loss": 0.8699, |
|
"step": 382000 |
|
}, |
|
{ |
|
"epoch": 12.46, |
|
"learning_rate": 6.237373737373738e-06, |
|
"loss": 0.8698, |
|
"step": 382500 |
|
}, |
|
{ |
|
"epoch": 12.47, |
|
"learning_rate": 6.232323232323233e-06, |
|
"loss": 0.87, |
|
"step": 383000 |
|
}, |
|
{ |
|
"epoch": 12.49, |
|
"learning_rate": 6.227272727272727e-06, |
|
"loss": 0.8696, |
|
"step": 383500 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 6.222222222222223e-06, |
|
"loss": 0.8691, |
|
"step": 384000 |
|
}, |
|
{ |
|
"epoch": 12.52, |
|
"learning_rate": 6.2171717171717175e-06, |
|
"loss": 0.8687, |
|
"step": 384500 |
|
}, |
|
{ |
|
"epoch": 12.54, |
|
"learning_rate": 6.212121212121213e-06, |
|
"loss": 0.8683, |
|
"step": 385000 |
|
}, |
|
{ |
|
"epoch": 12.55, |
|
"learning_rate": 6.207070707070707e-06, |
|
"loss": 0.8683, |
|
"step": 385500 |
|
}, |
|
{ |
|
"epoch": 12.57, |
|
"learning_rate": 6.202020202020203e-06, |
|
"loss": 0.8683, |
|
"step": 386000 |
|
}, |
|
{ |
|
"epoch": 12.59, |
|
"learning_rate": 6.196969696969698e-06, |
|
"loss": 0.8675, |
|
"step": 386500 |
|
}, |
|
{ |
|
"epoch": 12.6, |
|
"learning_rate": 6.191919191919192e-06, |
|
"loss": 0.8675, |
|
"step": 387000 |
|
}, |
|
{ |
|
"epoch": 12.62, |
|
"learning_rate": 6.1868686868686875e-06, |
|
"loss": 0.8671, |
|
"step": 387500 |
|
}, |
|
{ |
|
"epoch": 12.63, |
|
"learning_rate": 6.181818181818182e-06, |
|
"loss": 0.8675, |
|
"step": 388000 |
|
}, |
|
{ |
|
"epoch": 12.65, |
|
"learning_rate": 6.176767676767678e-06, |
|
"loss": 0.8675, |
|
"step": 388500 |
|
}, |
|
{ |
|
"epoch": 12.67, |
|
"learning_rate": 6.171717171717172e-06, |
|
"loss": 0.8672, |
|
"step": 389000 |
|
}, |
|
{ |
|
"epoch": 12.68, |
|
"learning_rate": 6.166666666666667e-06, |
|
"loss": 0.8665, |
|
"step": 389500 |
|
}, |
|
{ |
|
"epoch": 12.7, |
|
"learning_rate": 6.1616161616161615e-06, |
|
"loss": 0.8666, |
|
"step": 390000 |
|
}, |
|
{ |
|
"epoch": 12.7, |
|
"eval_loss": 0.8361514806747437, |
|
"eval_runtime": 88.2176, |
|
"eval_samples_per_second": 900.183, |
|
"eval_steps_per_second": 3.525, |
|
"step": 390000 |
|
}, |
|
{ |
|
"epoch": 12.72, |
|
"learning_rate": 6.1565656565656575e-06, |
|
"loss": 0.8666, |
|
"step": 390500 |
|
}, |
|
{ |
|
"epoch": 12.73, |
|
"learning_rate": 6.151515151515152e-06, |
|
"loss": 0.8662, |
|
"step": 391000 |
|
}, |
|
{ |
|
"epoch": 12.75, |
|
"learning_rate": 6.146464646464647e-06, |
|
"loss": 0.8659, |
|
"step": 391500 |
|
}, |
|
{ |
|
"epoch": 12.76, |
|
"learning_rate": 6.141414141414141e-06, |
|
"loss": 0.8662, |
|
"step": 392000 |
|
}, |
|
{ |
|
"epoch": 12.78, |
|
"learning_rate": 6.136363636363637e-06, |
|
"loss": 0.866, |
|
"step": 392500 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"learning_rate": 6.1313131313131315e-06, |
|
"loss": 0.8651, |
|
"step": 393000 |
|
}, |
|
{ |
|
"epoch": 12.81, |
|
"learning_rate": 6.126262626262627e-06, |
|
"loss": 0.8647, |
|
"step": 393500 |
|
}, |
|
{ |
|
"epoch": 12.83, |
|
"learning_rate": 6.121212121212121e-06, |
|
"loss": 0.8647, |
|
"step": 394000 |
|
}, |
|
{ |
|
"epoch": 12.85, |
|
"learning_rate": 6.116161616161617e-06, |
|
"loss": 0.8649, |
|
"step": 394500 |
|
}, |
|
{ |
|
"epoch": 12.86, |
|
"learning_rate": 6.111111111111112e-06, |
|
"loss": 0.8641, |
|
"step": 395000 |
|
}, |
|
{ |
|
"epoch": 12.88, |
|
"learning_rate": 6.106060606060606e-06, |
|
"loss": 0.8637, |
|
"step": 395500 |
|
}, |
|
{ |
|
"epoch": 12.89, |
|
"learning_rate": 6.1010101010101015e-06, |
|
"loss": 0.8637, |
|
"step": 396000 |
|
}, |
|
{ |
|
"epoch": 12.91, |
|
"learning_rate": 6.095959595959597e-06, |
|
"loss": 0.8636, |
|
"step": 396500 |
|
}, |
|
{ |
|
"epoch": 12.93, |
|
"learning_rate": 6.090909090909092e-06, |
|
"loss": 0.8634, |
|
"step": 397000 |
|
}, |
|
{ |
|
"epoch": 12.94, |
|
"learning_rate": 6.085858585858586e-06, |
|
"loss": 0.8637, |
|
"step": 397500 |
|
}, |
|
{ |
|
"epoch": 12.96, |
|
"learning_rate": 6.080808080808081e-06, |
|
"loss": 0.863, |
|
"step": 398000 |
|
}, |
|
{ |
|
"epoch": 12.98, |
|
"learning_rate": 6.0757575757575755e-06, |
|
"loss": 0.8627, |
|
"step": 398500 |
|
}, |
|
{ |
|
"epoch": 12.99, |
|
"learning_rate": 6.0707070707070715e-06, |
|
"loss": 0.8623, |
|
"step": 399000 |
|
}, |
|
{ |
|
"epoch": 13.01, |
|
"learning_rate": 6.065656565656566e-06, |
|
"loss": 0.8623, |
|
"step": 399500 |
|
}, |
|
{ |
|
"epoch": 13.03, |
|
"learning_rate": 6.060606060606061e-06, |
|
"loss": 0.8623, |
|
"step": 400000 |
|
}, |
|
{ |
|
"epoch": 13.03, |
|
"eval_loss": 0.832267165184021, |
|
"eval_runtime": 90.9314, |
|
"eval_samples_per_second": 873.318, |
|
"eval_steps_per_second": 3.42, |
|
"step": 400000 |
|
}, |
|
{ |
|
"epoch": 13.04, |
|
"learning_rate": 6.055555555555555e-06, |
|
"loss": 0.8615, |
|
"step": 400500 |
|
}, |
|
{ |
|
"epoch": 13.06, |
|
"learning_rate": 6.050505050505051e-06, |
|
"loss": 0.8617, |
|
"step": 401000 |
|
}, |
|
{ |
|
"epoch": 13.07, |
|
"learning_rate": 6.0454545454545456e-06, |
|
"loss": 0.8615, |
|
"step": 401500 |
|
}, |
|
{ |
|
"epoch": 13.09, |
|
"learning_rate": 6.040404040404041e-06, |
|
"loss": 0.8618, |
|
"step": 402000 |
|
}, |
|
{ |
|
"epoch": 13.11, |
|
"learning_rate": 6.035353535353535e-06, |
|
"loss": 0.8611, |
|
"step": 402500 |
|
}, |
|
{ |
|
"epoch": 13.12, |
|
"learning_rate": 6.030303030303031e-06, |
|
"loss": 0.8611, |
|
"step": 403000 |
|
}, |
|
{ |
|
"epoch": 13.14, |
|
"learning_rate": 6.025252525252526e-06, |
|
"loss": 0.86, |
|
"step": 403500 |
|
}, |
|
{ |
|
"epoch": 13.16, |
|
"learning_rate": 6.0202020202020204e-06, |
|
"loss": 0.8604, |
|
"step": 404000 |
|
}, |
|
{ |
|
"epoch": 13.17, |
|
"learning_rate": 6.015151515151516e-06, |
|
"loss": 0.8607, |
|
"step": 404500 |
|
}, |
|
{ |
|
"epoch": 13.19, |
|
"learning_rate": 6.010101010101011e-06, |
|
"loss": 0.8601, |
|
"step": 405000 |
|
}, |
|
{ |
|
"epoch": 13.2, |
|
"learning_rate": 6.005050505050506e-06, |
|
"loss": 0.8596, |
|
"step": 405500 |
|
}, |
|
{ |
|
"epoch": 13.22, |
|
"learning_rate": 6e-06, |
|
"loss": 0.8596, |
|
"step": 406000 |
|
}, |
|
{ |
|
"epoch": 13.24, |
|
"learning_rate": 5.994949494949496e-06, |
|
"loss": 0.8595, |
|
"step": 406500 |
|
}, |
|
{ |
|
"epoch": 13.25, |
|
"learning_rate": 5.9898989898989904e-06, |
|
"loss": 0.8591, |
|
"step": 407000 |
|
}, |
|
{ |
|
"epoch": 13.27, |
|
"learning_rate": 5.984848484848486e-06, |
|
"loss": 0.8594, |
|
"step": 407500 |
|
}, |
|
{ |
|
"epoch": 13.29, |
|
"learning_rate": 5.97979797979798e-06, |
|
"loss": 0.8587, |
|
"step": 408000 |
|
}, |
|
{ |
|
"epoch": 13.3, |
|
"learning_rate": 5.974747474747475e-06, |
|
"loss": 0.8585, |
|
"step": 408500 |
|
}, |
|
{ |
|
"epoch": 13.32, |
|
"learning_rate": 5.96969696969697e-06, |
|
"loss": 0.8586, |
|
"step": 409000 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"learning_rate": 5.964646464646465e-06, |
|
"loss": 0.8582, |
|
"step": 409500 |
|
}, |
|
{ |
|
"epoch": 13.35, |
|
"learning_rate": 5.95959595959596e-06, |
|
"loss": 0.8578, |
|
"step": 410000 |
|
}, |
|
{ |
|
"epoch": 13.35, |
|
"eval_loss": 0.8293071389198303, |
|
"eval_runtime": 90.0727, |
|
"eval_samples_per_second": 881.643, |
|
"eval_steps_per_second": 3.453, |
|
"step": 410000 |
|
}, |
|
{ |
|
"epoch": 13.37, |
|
"learning_rate": 5.954545454545455e-06, |
|
"loss": 0.8582, |
|
"step": 410500 |
|
}, |
|
{ |
|
"epoch": 13.38, |
|
"learning_rate": 5.949494949494949e-06, |
|
"loss": 0.858, |
|
"step": 411000 |
|
}, |
|
{ |
|
"epoch": 13.4, |
|
"learning_rate": 5.944444444444445e-06, |
|
"loss": 0.8582, |
|
"step": 411500 |
|
}, |
|
{ |
|
"epoch": 13.42, |
|
"learning_rate": 5.93939393939394e-06, |
|
"loss": 0.8575, |
|
"step": 412000 |
|
}, |
|
{ |
|
"epoch": 13.43, |
|
"learning_rate": 5.9343434343434345e-06, |
|
"loss": 0.8575, |
|
"step": 412500 |
|
}, |
|
{ |
|
"epoch": 13.45, |
|
"learning_rate": 5.9292929292929305e-06, |
|
"loss": 0.8566, |
|
"step": 413000 |
|
}, |
|
{ |
|
"epoch": 13.46, |
|
"learning_rate": 5.924242424242425e-06, |
|
"loss": 0.8575, |
|
"step": 413500 |
|
}, |
|
{ |
|
"epoch": 13.48, |
|
"learning_rate": 5.91919191919192e-06, |
|
"loss": 0.8563, |
|
"step": 414000 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 5.914141414141414e-06, |
|
"loss": 0.8565, |
|
"step": 414500 |
|
}, |
|
{ |
|
"epoch": 13.51, |
|
"learning_rate": 5.90909090909091e-06, |
|
"loss": 0.8561, |
|
"step": 415000 |
|
}, |
|
{ |
|
"epoch": 13.53, |
|
"learning_rate": 5.9040404040404045e-06, |
|
"loss": 0.8554, |
|
"step": 415500 |
|
}, |
|
{ |
|
"epoch": 13.55, |
|
"learning_rate": 5.8989898989899e-06, |
|
"loss": 0.8558, |
|
"step": 416000 |
|
}, |
|
{ |
|
"epoch": 13.56, |
|
"learning_rate": 5.893939393939394e-06, |
|
"loss": 0.8555, |
|
"step": 416500 |
|
}, |
|
{ |
|
"epoch": 13.58, |
|
"learning_rate": 5.88888888888889e-06, |
|
"loss": 0.8558, |
|
"step": 417000 |
|
}, |
|
{ |
|
"epoch": 13.59, |
|
"learning_rate": 5.883838383838384e-06, |
|
"loss": 0.8552, |
|
"step": 417500 |
|
}, |
|
{ |
|
"epoch": 13.61, |
|
"learning_rate": 5.878787878787879e-06, |
|
"loss": 0.8546, |
|
"step": 418000 |
|
}, |
|
{ |
|
"epoch": 13.63, |
|
"learning_rate": 5.873737373737374e-06, |
|
"loss": 0.8551, |
|
"step": 418500 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"learning_rate": 5.868686868686869e-06, |
|
"loss": 0.8545, |
|
"step": 419000 |
|
}, |
|
{ |
|
"epoch": 13.66, |
|
"learning_rate": 5.863636363636364e-06, |
|
"loss": 0.8549, |
|
"step": 419500 |
|
}, |
|
{ |
|
"epoch": 13.68, |
|
"learning_rate": 5.858585858585859e-06, |
|
"loss": 0.8536, |
|
"step": 420000 |
|
}, |
|
{ |
|
"epoch": 13.68, |
|
"eval_loss": 0.8251619935035706, |
|
"eval_runtime": 88.5613, |
|
"eval_samples_per_second": 896.69, |
|
"eval_steps_per_second": 3.512, |
|
"step": 420000 |
|
}, |
|
{ |
|
"epoch": 13.69, |
|
"learning_rate": 5.853535353535354e-06, |
|
"loss": 0.8547, |
|
"step": 420500 |
|
}, |
|
{ |
|
"epoch": 13.71, |
|
"learning_rate": 5.8484848484848485e-06, |
|
"loss": 0.8536, |
|
"step": 421000 |
|
}, |
|
{ |
|
"epoch": 13.73, |
|
"learning_rate": 5.8434343434343445e-06, |
|
"loss": 0.8538, |
|
"step": 421500 |
|
}, |
|
{ |
|
"epoch": 13.74, |
|
"learning_rate": 5.838383838383839e-06, |
|
"loss": 0.8532, |
|
"step": 422000 |
|
}, |
|
{ |
|
"epoch": 13.76, |
|
"learning_rate": 5.833333333333334e-06, |
|
"loss": 0.8535, |
|
"step": 422500 |
|
}, |
|
{ |
|
"epoch": 13.77, |
|
"learning_rate": 5.828282828282828e-06, |
|
"loss": 0.8528, |
|
"step": 423000 |
|
}, |
|
{ |
|
"epoch": 13.79, |
|
"learning_rate": 5.823232323232324e-06, |
|
"loss": 0.8527, |
|
"step": 423500 |
|
}, |
|
{ |
|
"epoch": 13.81, |
|
"learning_rate": 5.8181818181818185e-06, |
|
"loss": 0.8533, |
|
"step": 424000 |
|
}, |
|
{ |
|
"epoch": 13.82, |
|
"learning_rate": 5.813131313131314e-06, |
|
"loss": 0.8523, |
|
"step": 424500 |
|
}, |
|
{ |
|
"epoch": 13.84, |
|
"learning_rate": 5.808080808080808e-06, |
|
"loss": 0.8524, |
|
"step": 425000 |
|
}, |
|
{ |
|
"epoch": 13.86, |
|
"learning_rate": 5.803030303030304e-06, |
|
"loss": 0.8525, |
|
"step": 425500 |
|
}, |
|
{ |
|
"epoch": 13.87, |
|
"learning_rate": 5.797979797979798e-06, |
|
"loss": 0.8525, |
|
"step": 426000 |
|
}, |
|
{ |
|
"epoch": 13.89, |
|
"learning_rate": 5.792929292929293e-06, |
|
"loss": 0.8518, |
|
"step": 426500 |
|
}, |
|
{ |
|
"epoch": 13.9, |
|
"learning_rate": 5.787878787878788e-06, |
|
"loss": 0.8514, |
|
"step": 427000 |
|
}, |
|
{ |
|
"epoch": 13.92, |
|
"learning_rate": 5.782828282828284e-06, |
|
"loss": 0.8515, |
|
"step": 427500 |
|
}, |
|
{ |
|
"epoch": 13.94, |
|
"learning_rate": 5.777777777777778e-06, |
|
"loss": 0.8508, |
|
"step": 428000 |
|
}, |
|
{ |
|
"epoch": 13.95, |
|
"learning_rate": 5.772727272727273e-06, |
|
"loss": 0.8512, |
|
"step": 428500 |
|
}, |
|
{ |
|
"epoch": 13.97, |
|
"learning_rate": 5.767676767676768e-06, |
|
"loss": 0.8506, |
|
"step": 429000 |
|
}, |
|
{ |
|
"epoch": 13.99, |
|
"learning_rate": 5.762626262626263e-06, |
|
"loss": 0.8511, |
|
"step": 429500 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 5.7575757575757586e-06, |
|
"loss": 0.8505, |
|
"step": 430000 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_loss": 0.8214622139930725, |
|
"eval_runtime": 91.6492, |
|
"eval_samples_per_second": 866.478, |
|
"eval_steps_per_second": 3.393, |
|
"step": 430000 |
|
}, |
|
{ |
|
"epoch": 14.02, |
|
"learning_rate": 5.752525252525253e-06, |
|
"loss": 0.8508, |
|
"step": 430500 |
|
}, |
|
{ |
|
"epoch": 14.03, |
|
"learning_rate": 5.747474747474748e-06, |
|
"loss": 0.8502, |
|
"step": 431000 |
|
}, |
|
{ |
|
"epoch": 14.05, |
|
"learning_rate": 5.742424242424242e-06, |
|
"loss": 0.8499, |
|
"step": 431500 |
|
}, |
|
{ |
|
"epoch": 14.07, |
|
"learning_rate": 5.737373737373738e-06, |
|
"loss": 0.8496, |
|
"step": 432000 |
|
}, |
|
{ |
|
"epoch": 14.08, |
|
"learning_rate": 5.732323232323233e-06, |
|
"loss": 0.8497, |
|
"step": 432500 |
|
}, |
|
{ |
|
"epoch": 14.1, |
|
"learning_rate": 5.727272727272728e-06, |
|
"loss": 0.8494, |
|
"step": 433000 |
|
}, |
|
{ |
|
"epoch": 14.12, |
|
"learning_rate": 5.722222222222222e-06, |
|
"loss": 0.8488, |
|
"step": 433500 |
|
}, |
|
{ |
|
"epoch": 14.13, |
|
"learning_rate": 5.717171717171718e-06, |
|
"loss": 0.8482, |
|
"step": 434000 |
|
}, |
|
{ |
|
"epoch": 14.15, |
|
"learning_rate": 5.712121212121212e-06, |
|
"loss": 0.8489, |
|
"step": 434500 |
|
}, |
|
{ |
|
"epoch": 14.16, |
|
"learning_rate": 5.7070707070707075e-06, |
|
"loss": 0.8489, |
|
"step": 435000 |
|
}, |
|
{ |
|
"epoch": 14.18, |
|
"learning_rate": 5.702020202020202e-06, |
|
"loss": 0.8484, |
|
"step": 435500 |
|
}, |
|
{ |
|
"epoch": 14.2, |
|
"learning_rate": 5.696969696969698e-06, |
|
"loss": 0.8482, |
|
"step": 436000 |
|
}, |
|
{ |
|
"epoch": 14.21, |
|
"learning_rate": 5.691919191919192e-06, |
|
"loss": 0.848, |
|
"step": 436500 |
|
}, |
|
{ |
|
"epoch": 14.23, |
|
"learning_rate": 5.686868686868687e-06, |
|
"loss": 0.8477, |
|
"step": 437000 |
|
}, |
|
{ |
|
"epoch": 14.25, |
|
"learning_rate": 5.681818181818183e-06, |
|
"loss": 0.8482, |
|
"step": 437500 |
|
}, |
|
{ |
|
"epoch": 14.26, |
|
"learning_rate": 5.6767676767676775e-06, |
|
"loss": 0.8478, |
|
"step": 438000 |
|
}, |
|
{ |
|
"epoch": 14.28, |
|
"learning_rate": 5.671717171717173e-06, |
|
"loss": 0.8472, |
|
"step": 438500 |
|
}, |
|
{ |
|
"epoch": 14.3, |
|
"learning_rate": 5.666666666666667e-06, |
|
"loss": 0.8471, |
|
"step": 439000 |
|
}, |
|
{ |
|
"epoch": 14.31, |
|
"learning_rate": 5.661616161616162e-06, |
|
"loss": 0.8474, |
|
"step": 439500 |
|
}, |
|
{ |
|
"epoch": 14.33, |
|
"learning_rate": 5.656565656565657e-06, |
|
"loss": 0.8468, |
|
"step": 440000 |
|
}, |
|
{ |
|
"epoch": 14.33, |
|
"eval_loss": 0.8191090226173401, |
|
"eval_runtime": 90.6354, |
|
"eval_samples_per_second": 876.169, |
|
"eval_steps_per_second": 3.431, |
|
"step": 440000 |
|
}, |
|
{ |
|
"epoch": 14.34, |
|
"learning_rate": 5.651515151515152e-06, |
|
"loss": 0.847, |
|
"step": 440500 |
|
}, |
|
{ |
|
"epoch": 14.36, |
|
"learning_rate": 5.646464646464647e-06, |
|
"loss": 0.8464, |
|
"step": 441000 |
|
}, |
|
{ |
|
"epoch": 14.38, |
|
"learning_rate": 5.641414141414142e-06, |
|
"loss": 0.8464, |
|
"step": 441500 |
|
}, |
|
{ |
|
"epoch": 14.39, |
|
"learning_rate": 5.636363636363636e-06, |
|
"loss": 0.8463, |
|
"step": 442000 |
|
}, |
|
{ |
|
"epoch": 14.41, |
|
"learning_rate": 5.631313131313132e-06, |
|
"loss": 0.8457, |
|
"step": 442500 |
|
}, |
|
{ |
|
"epoch": 14.43, |
|
"learning_rate": 5.626262626262626e-06, |
|
"loss": 0.846, |
|
"step": 443000 |
|
}, |
|
{ |
|
"epoch": 14.44, |
|
"learning_rate": 5.6212121212121215e-06, |
|
"loss": 0.8456, |
|
"step": 443500 |
|
}, |
|
{ |
|
"epoch": 14.46, |
|
"learning_rate": 5.616161616161616e-06, |
|
"loss": 0.8448, |
|
"step": 444000 |
|
}, |
|
{ |
|
"epoch": 14.47, |
|
"learning_rate": 5.611111111111112e-06, |
|
"loss": 0.8457, |
|
"step": 444500 |
|
}, |
|
{ |
|
"epoch": 14.49, |
|
"learning_rate": 5.606060606060606e-06, |
|
"loss": 0.8455, |
|
"step": 445000 |
|
}, |
|
{ |
|
"epoch": 14.51, |
|
"learning_rate": 5.601010101010101e-06, |
|
"loss": 0.845, |
|
"step": 445500 |
|
}, |
|
{ |
|
"epoch": 14.52, |
|
"learning_rate": 5.595959595959597e-06, |
|
"loss": 0.8447, |
|
"step": 446000 |
|
}, |
|
{ |
|
"epoch": 14.54, |
|
"learning_rate": 5.5909090909090915e-06, |
|
"loss": 0.8451, |
|
"step": 446500 |
|
}, |
|
{ |
|
"epoch": 14.56, |
|
"learning_rate": 5.585858585858587e-06, |
|
"loss": 0.8442, |
|
"step": 447000 |
|
}, |
|
{ |
|
"epoch": 14.57, |
|
"learning_rate": 5.580808080808081e-06, |
|
"loss": 0.8443, |
|
"step": 447500 |
|
}, |
|
{ |
|
"epoch": 14.59, |
|
"learning_rate": 5.575757575757577e-06, |
|
"loss": 0.8442, |
|
"step": 448000 |
|
}, |
|
{ |
|
"epoch": 14.6, |
|
"learning_rate": 5.570707070707071e-06, |
|
"loss": 0.844, |
|
"step": 448500 |
|
}, |
|
{ |
|
"epoch": 14.62, |
|
"learning_rate": 5.565656565656566e-06, |
|
"loss": 0.8441, |
|
"step": 449000 |
|
}, |
|
{ |
|
"epoch": 14.64, |
|
"learning_rate": 5.560606060606061e-06, |
|
"loss": 0.8434, |
|
"step": 449500 |
|
}, |
|
{ |
|
"epoch": 14.65, |
|
"learning_rate": 5.555555555555557e-06, |
|
"loss": 0.8437, |
|
"step": 450000 |
|
}, |
|
{ |
|
"epoch": 14.65, |
|
"eval_loss": 0.8145061135292053, |
|
"eval_runtime": 90.5256, |
|
"eval_samples_per_second": 877.233, |
|
"eval_steps_per_second": 3.435, |
|
"step": 450000 |
|
}, |
|
{ |
|
"epoch": 14.67, |
|
"learning_rate": 5.550505050505051e-06, |
|
"loss": 0.8433, |
|
"step": 450500 |
|
}, |
|
{ |
|
"epoch": 14.69, |
|
"learning_rate": 5.545454545454546e-06, |
|
"loss": 0.8435, |
|
"step": 451000 |
|
}, |
|
{ |
|
"epoch": 14.7, |
|
"learning_rate": 5.54040404040404e-06, |
|
"loss": 0.8427, |
|
"step": 451500 |
|
}, |
|
{ |
|
"epoch": 14.72, |
|
"learning_rate": 5.5353535353535355e-06, |
|
"loss": 0.8424, |
|
"step": 452000 |
|
}, |
|
{ |
|
"epoch": 14.73, |
|
"learning_rate": 5.530303030303031e-06, |
|
"loss": 0.8433, |
|
"step": 452500 |
|
}, |
|
{ |
|
"epoch": 14.75, |
|
"learning_rate": 5.525252525252526e-06, |
|
"loss": 0.8424, |
|
"step": 453000 |
|
}, |
|
{ |
|
"epoch": 14.77, |
|
"learning_rate": 5.52020202020202e-06, |
|
"loss": 0.8425, |
|
"step": 453500 |
|
}, |
|
{ |
|
"epoch": 14.78, |
|
"learning_rate": 5.515151515151515e-06, |
|
"loss": 0.8418, |
|
"step": 454000 |
|
}, |
|
{ |
|
"epoch": 14.8, |
|
"learning_rate": 5.510101010101011e-06, |
|
"loss": 0.8421, |
|
"step": 454500 |
|
}, |
|
{ |
|
"epoch": 14.82, |
|
"learning_rate": 5.5050505050505056e-06, |
|
"loss": 0.8417, |
|
"step": 455000 |
|
}, |
|
{ |
|
"epoch": 14.83, |
|
"learning_rate": 5.500000000000001e-06, |
|
"loss": 0.8412, |
|
"step": 455500 |
|
}, |
|
{ |
|
"epoch": 14.85, |
|
"learning_rate": 5.494949494949495e-06, |
|
"loss": 0.8412, |
|
"step": 456000 |
|
}, |
|
{ |
|
"epoch": 14.86, |
|
"learning_rate": 5.489898989898991e-06, |
|
"loss": 0.8417, |
|
"step": 456500 |
|
}, |
|
{ |
|
"epoch": 14.88, |
|
"learning_rate": 5.484848484848485e-06, |
|
"loss": 0.8412, |
|
"step": 457000 |
|
}, |
|
{ |
|
"epoch": 14.9, |
|
"learning_rate": 5.4797979797979804e-06, |
|
"loss": 0.8415, |
|
"step": 457500 |
|
}, |
|
{ |
|
"epoch": 14.91, |
|
"learning_rate": 5.474747474747475e-06, |
|
"loss": 0.8407, |
|
"step": 458000 |
|
}, |
|
{ |
|
"epoch": 14.93, |
|
"learning_rate": 5.469696969696971e-06, |
|
"loss": 0.8402, |
|
"step": 458500 |
|
}, |
|
{ |
|
"epoch": 14.95, |
|
"learning_rate": 5.464646464646465e-06, |
|
"loss": 0.8405, |
|
"step": 459000 |
|
}, |
|
{ |
|
"epoch": 14.96, |
|
"learning_rate": 5.45959595959596e-06, |
|
"loss": 0.8401, |
|
"step": 459500 |
|
}, |
|
{ |
|
"epoch": 14.98, |
|
"learning_rate": 5.4545454545454545e-06, |
|
"loss": 0.8396, |
|
"step": 460000 |
|
}, |
|
{ |
|
"epoch": 14.98, |
|
"eval_loss": 0.8129719495773315, |
|
"eval_runtime": 88.2583, |
|
"eval_samples_per_second": 899.768, |
|
"eval_steps_per_second": 3.524, |
|
"step": 460000 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 5.4494949494949504e-06, |
|
"loss": 0.8402, |
|
"step": 460500 |
|
}, |
|
{ |
|
"epoch": 15.01, |
|
"learning_rate": 5.444444444444445e-06, |
|
"loss": 0.8397, |
|
"step": 461000 |
|
}, |
|
{ |
|
"epoch": 15.03, |
|
"learning_rate": 5.43939393939394e-06, |
|
"loss": 0.8401, |
|
"step": 461500 |
|
}, |
|
{ |
|
"epoch": 15.04, |
|
"learning_rate": 5.434343434343434e-06, |
|
"loss": 0.8389, |
|
"step": 462000 |
|
}, |
|
{ |
|
"epoch": 15.06, |
|
"learning_rate": 5.429292929292929e-06, |
|
"loss": 0.8388, |
|
"step": 462500 |
|
}, |
|
{ |
|
"epoch": 15.08, |
|
"learning_rate": 5.424242424242425e-06, |
|
"loss": 0.839, |
|
"step": 463000 |
|
}, |
|
{ |
|
"epoch": 15.09, |
|
"learning_rate": 5.41919191919192e-06, |
|
"loss": 0.8382, |
|
"step": 463500 |
|
}, |
|
{ |
|
"epoch": 15.11, |
|
"learning_rate": 5.414141414141415e-06, |
|
"loss": 0.8387, |
|
"step": 464000 |
|
}, |
|
{ |
|
"epoch": 15.13, |
|
"learning_rate": 5.409090909090909e-06, |
|
"loss": 0.8384, |
|
"step": 464500 |
|
}, |
|
{ |
|
"epoch": 15.14, |
|
"learning_rate": 5.404040404040405e-06, |
|
"loss": 0.8385, |
|
"step": 465000 |
|
}, |
|
{ |
|
"epoch": 15.16, |
|
"learning_rate": 5.398989898989899e-06, |
|
"loss": 0.8386, |
|
"step": 465500 |
|
}, |
|
{ |
|
"epoch": 15.17, |
|
"learning_rate": 5.3939393939393945e-06, |
|
"loss": 0.8383, |
|
"step": 466000 |
|
}, |
|
{ |
|
"epoch": 15.19, |
|
"learning_rate": 5.388888888888889e-06, |
|
"loss": 0.8376, |
|
"step": 466500 |
|
}, |
|
{ |
|
"epoch": 15.21, |
|
"learning_rate": 5.383838383838385e-06, |
|
"loss": 0.8381, |
|
"step": 467000 |
|
}, |
|
{ |
|
"epoch": 15.22, |
|
"learning_rate": 5.378787878787879e-06, |
|
"loss": 0.8379, |
|
"step": 467500 |
|
}, |
|
{ |
|
"epoch": 15.24, |
|
"learning_rate": 5.373737373737374e-06, |
|
"loss": 0.8377, |
|
"step": 468000 |
|
}, |
|
{ |
|
"epoch": 15.26, |
|
"learning_rate": 5.3686868686868685e-06, |
|
"loss": 0.8374, |
|
"step": 468500 |
|
}, |
|
{ |
|
"epoch": 15.27, |
|
"learning_rate": 5.3636363636363645e-06, |
|
"loss": 0.837, |
|
"step": 469000 |
|
}, |
|
{ |
|
"epoch": 15.29, |
|
"learning_rate": 5.358585858585859e-06, |
|
"loss": 0.8364, |
|
"step": 469500 |
|
}, |
|
{ |
|
"epoch": 15.3, |
|
"learning_rate": 5.353535353535354e-06, |
|
"loss": 0.8371, |
|
"step": 470000 |
|
}, |
|
{ |
|
"epoch": 15.3, |
|
"eval_loss": 0.8082245588302612, |
|
"eval_runtime": 89.0044, |
|
"eval_samples_per_second": 892.226, |
|
"eval_steps_per_second": 3.494, |
|
"step": 470000 |
|
}, |
|
{ |
|
"epoch": 15.32, |
|
"learning_rate": 5.348484848484848e-06, |
|
"loss": 0.8364, |
|
"step": 470500 |
|
}, |
|
{ |
|
"epoch": 15.34, |
|
"learning_rate": 5.343434343434344e-06, |
|
"loss": 0.8365, |
|
"step": 471000 |
|
}, |
|
{ |
|
"epoch": 15.35, |
|
"learning_rate": 5.338383838383839e-06, |
|
"loss": 0.836, |
|
"step": 471500 |
|
}, |
|
{ |
|
"epoch": 15.37, |
|
"learning_rate": 5.333333333333334e-06, |
|
"loss": 0.8365, |
|
"step": 472000 |
|
}, |
|
{ |
|
"epoch": 15.39, |
|
"learning_rate": 5.328282828282829e-06, |
|
"loss": 0.8355, |
|
"step": 472500 |
|
}, |
|
{ |
|
"epoch": 15.4, |
|
"learning_rate": 5.323232323232324e-06, |
|
"loss": 0.8358, |
|
"step": 473000 |
|
}, |
|
{ |
|
"epoch": 15.42, |
|
"learning_rate": 5.318181818181819e-06, |
|
"loss": 0.8357, |
|
"step": 473500 |
|
}, |
|
{ |
|
"epoch": 15.43, |
|
"learning_rate": 5.313131313131313e-06, |
|
"loss": 0.8348, |
|
"step": 474000 |
|
}, |
|
{ |
|
"epoch": 15.45, |
|
"learning_rate": 5.3080808080808085e-06, |
|
"loss": 0.8353, |
|
"step": 474500 |
|
}, |
|
{ |
|
"epoch": 15.47, |
|
"learning_rate": 5.303030303030303e-06, |
|
"loss": 0.8347, |
|
"step": 475000 |
|
}, |
|
{ |
|
"epoch": 15.48, |
|
"learning_rate": 5.297979797979799e-06, |
|
"loss": 0.8347, |
|
"step": 475500 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 5.292929292929293e-06, |
|
"loss": 0.8347, |
|
"step": 476000 |
|
}, |
|
{ |
|
"epoch": 15.52, |
|
"learning_rate": 5.287878787878788e-06, |
|
"loss": 0.8348, |
|
"step": 476500 |
|
}, |
|
{ |
|
"epoch": 15.53, |
|
"learning_rate": 5.2828282828282825e-06, |
|
"loss": 0.834, |
|
"step": 477000 |
|
}, |
|
{ |
|
"epoch": 15.55, |
|
"learning_rate": 5.2777777777777785e-06, |
|
"loss": 0.8344, |
|
"step": 477500 |
|
}, |
|
{ |
|
"epoch": 15.56, |
|
"learning_rate": 5.272727272727273e-06, |
|
"loss": 0.8339, |
|
"step": 478000 |
|
}, |
|
{ |
|
"epoch": 15.58, |
|
"learning_rate": 5.267676767676768e-06, |
|
"loss": 0.834, |
|
"step": 478500 |
|
}, |
|
{ |
|
"epoch": 15.6, |
|
"learning_rate": 5.262626262626262e-06, |
|
"loss": 0.8343, |
|
"step": 479000 |
|
}, |
|
{ |
|
"epoch": 15.61, |
|
"learning_rate": 5.257575757575758e-06, |
|
"loss": 0.8337, |
|
"step": 479500 |
|
}, |
|
{ |
|
"epoch": 15.63, |
|
"learning_rate": 5.252525252525253e-06, |
|
"loss": 0.8329, |
|
"step": 480000 |
|
}, |
|
{ |
|
"epoch": 15.63, |
|
"eval_loss": 0.8069521188735962, |
|
"eval_runtime": 92.7218, |
|
"eval_samples_per_second": 856.454, |
|
"eval_steps_per_second": 3.354, |
|
"step": 480000 |
|
}, |
|
{ |
|
"epoch": 15.65, |
|
"learning_rate": 5.247474747474748e-06, |
|
"loss": 0.833, |
|
"step": 480500 |
|
}, |
|
{ |
|
"epoch": 15.66, |
|
"learning_rate": 5.242424242424244e-06, |
|
"loss": 0.8334, |
|
"step": 481000 |
|
}, |
|
{ |
|
"epoch": 15.68, |
|
"learning_rate": 5.237373737373738e-06, |
|
"loss": 0.8331, |
|
"step": 481500 |
|
}, |
|
{ |
|
"epoch": 15.7, |
|
"learning_rate": 5.232323232323233e-06, |
|
"loss": 0.8327, |
|
"step": 482000 |
|
}, |
|
{ |
|
"epoch": 15.71, |
|
"learning_rate": 5.2272727272727274e-06, |
|
"loss": 0.833, |
|
"step": 482500 |
|
}, |
|
{ |
|
"epoch": 15.73, |
|
"learning_rate": 5.2222222222222226e-06, |
|
"loss": 0.8325, |
|
"step": 483000 |
|
}, |
|
{ |
|
"epoch": 15.74, |
|
"learning_rate": 5.217171717171718e-06, |
|
"loss": 0.8324, |
|
"step": 483500 |
|
}, |
|
{ |
|
"epoch": 15.76, |
|
"learning_rate": 5.212121212121213e-06, |
|
"loss": 0.8318, |
|
"step": 484000 |
|
}, |
|
{ |
|
"epoch": 15.78, |
|
"learning_rate": 5.207070707070707e-06, |
|
"loss": 0.8318, |
|
"step": 484500 |
|
}, |
|
{ |
|
"epoch": 15.79, |
|
"learning_rate": 5.202020202020202e-06, |
|
"loss": 0.8325, |
|
"step": 485000 |
|
}, |
|
{ |
|
"epoch": 15.81, |
|
"learning_rate": 5.196969696969697e-06, |
|
"loss": 0.8318, |
|
"step": 485500 |
|
}, |
|
{ |
|
"epoch": 15.83, |
|
"learning_rate": 5.191919191919193e-06, |
|
"loss": 0.8316, |
|
"step": 486000 |
|
}, |
|
{ |
|
"epoch": 15.84, |
|
"learning_rate": 5.186868686868687e-06, |
|
"loss": 0.8312, |
|
"step": 486500 |
|
}, |
|
{ |
|
"epoch": 15.86, |
|
"learning_rate": 5.181818181818182e-06, |
|
"loss": 0.8309, |
|
"step": 487000 |
|
}, |
|
{ |
|
"epoch": 15.87, |
|
"learning_rate": 5.176767676767676e-06, |
|
"loss": 0.8314, |
|
"step": 487500 |
|
}, |
|
{ |
|
"epoch": 15.89, |
|
"learning_rate": 5.171717171717172e-06, |
|
"loss": 0.8309, |
|
"step": 488000 |
|
}, |
|
{ |
|
"epoch": 15.91, |
|
"learning_rate": 5.1666666666666675e-06, |
|
"loss": 0.8307, |
|
"step": 488500 |
|
}, |
|
{ |
|
"epoch": 15.92, |
|
"learning_rate": 5.161616161616162e-06, |
|
"loss": 0.8306, |
|
"step": 489000 |
|
}, |
|
{ |
|
"epoch": 15.94, |
|
"learning_rate": 5.156565656565658e-06, |
|
"loss": 0.8302, |
|
"step": 489500 |
|
}, |
|
{ |
|
"epoch": 15.96, |
|
"learning_rate": 5.151515151515152e-06, |
|
"loss": 0.8301, |
|
"step": 490000 |
|
}, |
|
{ |
|
"epoch": 15.96, |
|
"eval_loss": 0.8032618761062622, |
|
"eval_runtime": 92.9139, |
|
"eval_samples_per_second": 854.684, |
|
"eval_steps_per_second": 3.347, |
|
"step": 490000 |
|
}, |
|
{ |
|
"epoch": 15.97, |
|
"learning_rate": 5.146464646464647e-06, |
|
"loss": 0.8298, |
|
"step": 490500 |
|
}, |
|
{ |
|
"epoch": 15.99, |
|
"learning_rate": 5.1414141414141415e-06, |
|
"loss": 0.8299, |
|
"step": 491000 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 5.1363636363636375e-06, |
|
"loss": 0.8295, |
|
"step": 491500 |
|
}, |
|
{ |
|
"epoch": 16.02, |
|
"learning_rate": 5.131313131313132e-06, |
|
"loss": 0.8295, |
|
"step": 492000 |
|
}, |
|
{ |
|
"epoch": 16.04, |
|
"learning_rate": 5.126262626262627e-06, |
|
"loss": 0.8287, |
|
"step": 492500 |
|
}, |
|
{ |
|
"epoch": 16.05, |
|
"learning_rate": 5.121212121212121e-06, |
|
"loss": 0.8289, |
|
"step": 493000 |
|
}, |
|
{ |
|
"epoch": 16.07, |
|
"learning_rate": 5.116161616161617e-06, |
|
"loss": 0.8283, |
|
"step": 493500 |
|
}, |
|
{ |
|
"epoch": 16.09, |
|
"learning_rate": 5.1111111111111115e-06, |
|
"loss": 0.829, |
|
"step": 494000 |
|
}, |
|
{ |
|
"epoch": 16.1, |
|
"learning_rate": 5.106060606060607e-06, |
|
"loss": 0.828, |
|
"step": 494500 |
|
}, |
|
{ |
|
"epoch": 16.12, |
|
"learning_rate": 5.101010101010101e-06, |
|
"loss": 0.8285, |
|
"step": 495000 |
|
}, |
|
{ |
|
"epoch": 16.13, |
|
"learning_rate": 5.095959595959596e-06, |
|
"loss": 0.8283, |
|
"step": 495500 |
|
}, |
|
{ |
|
"epoch": 16.15, |
|
"learning_rate": 5.090909090909091e-06, |
|
"loss": 0.8276, |
|
"step": 496000 |
|
}, |
|
{ |
|
"epoch": 16.17, |
|
"learning_rate": 5.085858585858586e-06, |
|
"loss": 0.8281, |
|
"step": 496500 |
|
}, |
|
{ |
|
"epoch": 16.18, |
|
"learning_rate": 5.0808080808080815e-06, |
|
"loss": 0.8273, |
|
"step": 497000 |
|
}, |
|
{ |
|
"epoch": 16.2, |
|
"learning_rate": 5.075757575757576e-06, |
|
"loss": 0.8277, |
|
"step": 497500 |
|
}, |
|
{ |
|
"epoch": 16.22, |
|
"learning_rate": 5.070707070707072e-06, |
|
"loss": 0.8271, |
|
"step": 498000 |
|
}, |
|
{ |
|
"epoch": 16.23, |
|
"learning_rate": 5.065656565656566e-06, |
|
"loss": 0.827, |
|
"step": 498500 |
|
}, |
|
{ |
|
"epoch": 16.25, |
|
"learning_rate": 5.060606060606061e-06, |
|
"loss": 0.8269, |
|
"step": 499000 |
|
}, |
|
{ |
|
"epoch": 16.27, |
|
"learning_rate": 5.0555555555555555e-06, |
|
"loss": 0.8271, |
|
"step": 499500 |
|
}, |
|
{ |
|
"epoch": 16.28, |
|
"learning_rate": 5.0505050505050515e-06, |
|
"loss": 0.8264, |
|
"step": 500000 |
|
}, |
|
{ |
|
"epoch": 16.28, |
|
"eval_loss": 0.7977773547172546, |
|
"eval_runtime": 89.9319, |
|
"eval_samples_per_second": 883.023, |
|
"eval_steps_per_second": 3.458, |
|
"step": 500000 |
|
}, |
|
{ |
|
"epoch": 16.3, |
|
"learning_rate": 5.045454545454546e-06, |
|
"loss": 0.8265, |
|
"step": 500500 |
|
}, |
|
{ |
|
"epoch": 16.31, |
|
"learning_rate": 5.040404040404041e-06, |
|
"loss": 0.8264, |
|
"step": 501000 |
|
}, |
|
{ |
|
"epoch": 16.33, |
|
"learning_rate": 5.035353535353535e-06, |
|
"loss": 0.8255, |
|
"step": 501500 |
|
}, |
|
{ |
|
"epoch": 16.35, |
|
"learning_rate": 5.030303030303031e-06, |
|
"loss": 0.8261, |
|
"step": 502000 |
|
}, |
|
{ |
|
"epoch": 16.36, |
|
"learning_rate": 5.0252525252525255e-06, |
|
"loss": 0.8252, |
|
"step": 502500 |
|
}, |
|
{ |
|
"epoch": 16.38, |
|
"learning_rate": 5.020202020202021e-06, |
|
"loss": 0.8253, |
|
"step": 503000 |
|
}, |
|
{ |
|
"epoch": 16.4, |
|
"learning_rate": 5.015151515151515e-06, |
|
"loss": 0.8254, |
|
"step": 503500 |
|
}, |
|
{ |
|
"epoch": 16.41, |
|
"learning_rate": 5.010101010101011e-06, |
|
"loss": 0.8252, |
|
"step": 504000 |
|
}, |
|
{ |
|
"epoch": 16.43, |
|
"learning_rate": 5.005050505050505e-06, |
|
"loss": 0.8253, |
|
"step": 504500 |
|
}, |
|
{ |
|
"epoch": 16.44, |
|
"learning_rate": 5e-06, |
|
"loss": 0.8254, |
|
"step": 505000 |
|
}, |
|
{ |
|
"epoch": 16.46, |
|
"learning_rate": 4.9949494949494956e-06, |
|
"loss": 0.8245, |
|
"step": 505500 |
|
}, |
|
{ |
|
"epoch": 16.48, |
|
"learning_rate": 4.98989898989899e-06, |
|
"loss": 0.8238, |
|
"step": 506000 |
|
}, |
|
{ |
|
"epoch": 16.49, |
|
"learning_rate": 4.984848484848485e-06, |
|
"loss": 0.8241, |
|
"step": 506500 |
|
}, |
|
{ |
|
"epoch": 16.51, |
|
"learning_rate": 4.97979797979798e-06, |
|
"loss": 0.8239, |
|
"step": 507000 |
|
}, |
|
{ |
|
"epoch": 16.53, |
|
"learning_rate": 4.974747474747475e-06, |
|
"loss": 0.8242, |
|
"step": 507500 |
|
}, |
|
{ |
|
"epoch": 16.54, |
|
"learning_rate": 4.9696969696969696e-06, |
|
"loss": 0.8239, |
|
"step": 508000 |
|
}, |
|
{ |
|
"epoch": 16.56, |
|
"learning_rate": 4.964646464646465e-06, |
|
"loss": 0.8235, |
|
"step": 508500 |
|
}, |
|
{ |
|
"epoch": 16.57, |
|
"learning_rate": 4.95959595959596e-06, |
|
"loss": 0.8235, |
|
"step": 509000 |
|
}, |
|
{ |
|
"epoch": 16.59, |
|
"learning_rate": 4.954545454545455e-06, |
|
"loss": 0.8233, |
|
"step": 509500 |
|
}, |
|
{ |
|
"epoch": 16.61, |
|
"learning_rate": 4.94949494949495e-06, |
|
"loss": 0.8233, |
|
"step": 510000 |
|
}, |
|
{ |
|
"epoch": 16.61, |
|
"eval_loss": 0.7961931824684143, |
|
"eval_runtime": 87.3644, |
|
"eval_samples_per_second": 908.975, |
|
"eval_steps_per_second": 3.56, |
|
"step": 510000 |
|
}, |
|
{ |
|
"epoch": 16.62, |
|
"learning_rate": 4.944444444444445e-06, |
|
"loss": 0.8221, |
|
"step": 510500 |
|
}, |
|
{ |
|
"epoch": 16.64, |
|
"learning_rate": 4.93939393939394e-06, |
|
"loss": 0.8231, |
|
"step": 511000 |
|
}, |
|
{ |
|
"epoch": 16.66, |
|
"learning_rate": 4.934343434343435e-06, |
|
"loss": 0.8224, |
|
"step": 511500 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"learning_rate": 4.92929292929293e-06, |
|
"loss": 0.8228, |
|
"step": 512000 |
|
}, |
|
{ |
|
"epoch": 16.69, |
|
"learning_rate": 4.924242424242425e-06, |
|
"loss": 0.822, |
|
"step": 512500 |
|
}, |
|
{ |
|
"epoch": 16.7, |
|
"learning_rate": 4.919191919191919e-06, |
|
"loss": 0.822, |
|
"step": 513000 |
|
}, |
|
{ |
|
"epoch": 16.72, |
|
"learning_rate": 4.9141414141414145e-06, |
|
"loss": 0.8223, |
|
"step": 513500 |
|
}, |
|
{ |
|
"epoch": 16.74, |
|
"learning_rate": 4.90909090909091e-06, |
|
"loss": 0.8216, |
|
"step": 514000 |
|
}, |
|
{ |
|
"epoch": 16.75, |
|
"learning_rate": 4.904040404040405e-06, |
|
"loss": 0.8216, |
|
"step": 514500 |
|
}, |
|
{ |
|
"epoch": 16.77, |
|
"learning_rate": 4.898989898989899e-06, |
|
"loss": 0.8214, |
|
"step": 515000 |
|
}, |
|
{ |
|
"epoch": 16.79, |
|
"learning_rate": 4.893939393939394e-06, |
|
"loss": 0.8216, |
|
"step": 515500 |
|
}, |
|
{ |
|
"epoch": 16.8, |
|
"learning_rate": 4.888888888888889e-06, |
|
"loss": 0.8211, |
|
"step": 516000 |
|
}, |
|
{ |
|
"epoch": 16.82, |
|
"learning_rate": 4.883838383838384e-06, |
|
"loss": 0.8207, |
|
"step": 516500 |
|
}, |
|
{ |
|
"epoch": 16.83, |
|
"learning_rate": 4.878787878787879e-06, |
|
"loss": 0.8211, |
|
"step": 517000 |
|
}, |
|
{ |
|
"epoch": 16.85, |
|
"learning_rate": 4.873737373737374e-06, |
|
"loss": 0.8205, |
|
"step": 517500 |
|
}, |
|
{ |
|
"epoch": 16.87, |
|
"learning_rate": 4.868686868686869e-06, |
|
"loss": 0.8203, |
|
"step": 518000 |
|
}, |
|
{ |
|
"epoch": 16.88, |
|
"learning_rate": 4.863636363636364e-06, |
|
"loss": 0.82, |
|
"step": 518500 |
|
}, |
|
{ |
|
"epoch": 16.9, |
|
"learning_rate": 4.858585858585859e-06, |
|
"loss": 0.8198, |
|
"step": 519000 |
|
}, |
|
{ |
|
"epoch": 16.92, |
|
"learning_rate": 4.8535353535353545e-06, |
|
"loss": 0.8195, |
|
"step": 519500 |
|
}, |
|
{ |
|
"epoch": 16.93, |
|
"learning_rate": 4.848484848484849e-06, |
|
"loss": 0.8196, |
|
"step": 520000 |
|
}, |
|
{ |
|
"epoch": 16.93, |
|
"eval_loss": 0.7930386662483215, |
|
"eval_runtime": 89.4487, |
|
"eval_samples_per_second": 887.794, |
|
"eval_steps_per_second": 3.477, |
|
"step": 520000 |
|
}, |
|
{ |
|
"epoch": 16.95, |
|
"learning_rate": 4.843434343434344e-06, |
|
"loss": 0.8193, |
|
"step": 520500 |
|
}, |
|
{ |
|
"epoch": 16.97, |
|
"learning_rate": 4.838383838383839e-06, |
|
"loss": 0.8193, |
|
"step": 521000 |
|
}, |
|
{ |
|
"epoch": 16.98, |
|
"learning_rate": 4.833333333333333e-06, |
|
"loss": 0.819, |
|
"step": 521500 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 4.8282828282828285e-06, |
|
"loss": 0.8188, |
|
"step": 522000 |
|
}, |
|
{ |
|
"epoch": 17.01, |
|
"learning_rate": 4.823232323232324e-06, |
|
"loss": 0.8185, |
|
"step": 522500 |
|
}, |
|
{ |
|
"epoch": 17.03, |
|
"learning_rate": 4.818181818181819e-06, |
|
"loss": 0.8185, |
|
"step": 523000 |
|
}, |
|
{ |
|
"epoch": 17.05, |
|
"learning_rate": 4.813131313131313e-06, |
|
"loss": 0.8178, |
|
"step": 523500 |
|
}, |
|
{ |
|
"epoch": 17.06, |
|
"learning_rate": 4.808080808080808e-06, |
|
"loss": 0.8177, |
|
"step": 524000 |
|
}, |
|
{ |
|
"epoch": 17.08, |
|
"learning_rate": 4.803030303030303e-06, |
|
"loss": 0.8182, |
|
"step": 524500 |
|
}, |
|
{ |
|
"epoch": 17.1, |
|
"learning_rate": 4.7979797979797985e-06, |
|
"loss": 0.8173, |
|
"step": 525000 |
|
}, |
|
{ |
|
"epoch": 17.11, |
|
"learning_rate": 4.792929292929293e-06, |
|
"loss": 0.8181, |
|
"step": 525500 |
|
}, |
|
{ |
|
"epoch": 17.13, |
|
"learning_rate": 4.787878787878788e-06, |
|
"loss": 0.8172, |
|
"step": 526000 |
|
}, |
|
{ |
|
"epoch": 17.14, |
|
"learning_rate": 4.782828282828283e-06, |
|
"loss": 0.8174, |
|
"step": 526500 |
|
}, |
|
{ |
|
"epoch": 17.16, |
|
"learning_rate": 4.777777777777778e-06, |
|
"loss": 0.817, |
|
"step": 527000 |
|
}, |
|
{ |
|
"epoch": 17.18, |
|
"learning_rate": 4.772727272727273e-06, |
|
"loss": 0.8164, |
|
"step": 527500 |
|
}, |
|
{ |
|
"epoch": 17.19, |
|
"learning_rate": 4.7676767676767685e-06, |
|
"loss": 0.8169, |
|
"step": 528000 |
|
}, |
|
{ |
|
"epoch": 17.21, |
|
"learning_rate": 4.762626262626263e-06, |
|
"loss": 0.8162, |
|
"step": 528500 |
|
}, |
|
{ |
|
"epoch": 17.23, |
|
"learning_rate": 4.757575757575758e-06, |
|
"loss": 0.8166, |
|
"step": 529000 |
|
}, |
|
{ |
|
"epoch": 17.24, |
|
"learning_rate": 4.752525252525253e-06, |
|
"loss": 0.8156, |
|
"step": 529500 |
|
}, |
|
{ |
|
"epoch": 17.26, |
|
"learning_rate": 4.747474747474748e-06, |
|
"loss": 0.8154, |
|
"step": 530000 |
|
}, |
|
{ |
|
"epoch": 17.26, |
|
"eval_loss": 0.7897705435752869, |
|
"eval_runtime": 89.4546, |
|
"eval_samples_per_second": 887.735, |
|
"eval_steps_per_second": 3.477, |
|
"step": 530000 |
|
}, |
|
{ |
|
"epoch": 17.27, |
|
"learning_rate": 4.7424242424242426e-06, |
|
"loss": 0.815, |
|
"step": 530500 |
|
}, |
|
{ |
|
"epoch": 17.29, |
|
"learning_rate": 4.737373737373738e-06, |
|
"loss": 0.8146, |
|
"step": 531000 |
|
}, |
|
{ |
|
"epoch": 17.31, |
|
"learning_rate": 4.732323232323233e-06, |
|
"loss": 0.8152, |
|
"step": 531500 |
|
}, |
|
{ |
|
"epoch": 17.32, |
|
"learning_rate": 4.727272727272728e-06, |
|
"loss": 0.815, |
|
"step": 532000 |
|
}, |
|
{ |
|
"epoch": 17.34, |
|
"learning_rate": 4.722222222222222e-06, |
|
"loss": 0.8143, |
|
"step": 532500 |
|
}, |
|
{ |
|
"epoch": 17.36, |
|
"learning_rate": 4.717171717171717e-06, |
|
"loss": 0.8143, |
|
"step": 533000 |
|
}, |
|
{ |
|
"epoch": 17.37, |
|
"learning_rate": 4.7121212121212126e-06, |
|
"loss": 0.814, |
|
"step": 533500 |
|
}, |
|
{ |
|
"epoch": 17.39, |
|
"learning_rate": 4.707070707070707e-06, |
|
"loss": 0.8141, |
|
"step": 534000 |
|
}, |
|
{ |
|
"epoch": 17.4, |
|
"learning_rate": 4.702020202020202e-06, |
|
"loss": 0.8139, |
|
"step": 534500 |
|
}, |
|
{ |
|
"epoch": 17.42, |
|
"learning_rate": 4.696969696969698e-06, |
|
"loss": 0.8132, |
|
"step": 535000 |
|
}, |
|
{ |
|
"epoch": 17.44, |
|
"learning_rate": 4.691919191919192e-06, |
|
"loss": 0.8127, |
|
"step": 535500 |
|
}, |
|
{ |
|
"epoch": 17.45, |
|
"learning_rate": 4.6868686868686874e-06, |
|
"loss": 0.8138, |
|
"step": 536000 |
|
}, |
|
{ |
|
"epoch": 17.47, |
|
"learning_rate": 4.681818181818183e-06, |
|
"loss": 0.8128, |
|
"step": 536500 |
|
}, |
|
{ |
|
"epoch": 17.49, |
|
"learning_rate": 4.676767676767677e-06, |
|
"loss": 0.8125, |
|
"step": 537000 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 4.671717171717172e-06, |
|
"loss": 0.8125, |
|
"step": 537500 |
|
}, |
|
{ |
|
"epoch": 17.52, |
|
"learning_rate": 4.666666666666667e-06, |
|
"loss": 0.8122, |
|
"step": 538000 |
|
}, |
|
{ |
|
"epoch": 17.54, |
|
"learning_rate": 4.661616161616162e-06, |
|
"loss": 0.812, |
|
"step": 538500 |
|
}, |
|
{ |
|
"epoch": 17.55, |
|
"learning_rate": 4.656565656565657e-06, |
|
"loss": 0.8116, |
|
"step": 539000 |
|
}, |
|
{ |
|
"epoch": 17.57, |
|
"learning_rate": 4.651515151515152e-06, |
|
"loss": 0.8121, |
|
"step": 539500 |
|
}, |
|
{ |
|
"epoch": 17.58, |
|
"learning_rate": 4.646464646464647e-06, |
|
"loss": 0.8114, |
|
"step": 540000 |
|
}, |
|
{ |
|
"epoch": 17.58, |
|
"eval_loss": 0.7859430313110352, |
|
"eval_runtime": 87.9327, |
|
"eval_samples_per_second": 903.1, |
|
"eval_steps_per_second": 3.537, |
|
"step": 540000 |
|
}, |
|
{ |
|
"epoch": 17.6, |
|
"learning_rate": 4.641414141414142e-06, |
|
"loss": 0.8116, |
|
"step": 540500 |
|
}, |
|
{ |
|
"epoch": 17.62, |
|
"learning_rate": 4.636363636363636e-06, |
|
"loss": 0.8106, |
|
"step": 541000 |
|
}, |
|
{ |
|
"epoch": 17.63, |
|
"learning_rate": 4.6313131313131315e-06, |
|
"loss": 0.8103, |
|
"step": 541500 |
|
}, |
|
{ |
|
"epoch": 17.65, |
|
"learning_rate": 4.626262626262627e-06, |
|
"loss": 0.81, |
|
"step": 542000 |
|
}, |
|
{ |
|
"epoch": 17.67, |
|
"learning_rate": 4.621212121212122e-06, |
|
"loss": 0.8101, |
|
"step": 542500 |
|
}, |
|
{ |
|
"epoch": 17.68, |
|
"learning_rate": 4.616161616161616e-06, |
|
"loss": 0.8094, |
|
"step": 543000 |
|
}, |
|
{ |
|
"epoch": 17.7, |
|
"learning_rate": 4.611111111111112e-06, |
|
"loss": 0.8095, |
|
"step": 543500 |
|
}, |
|
{ |
|
"epoch": 17.71, |
|
"learning_rate": 4.606060606060606e-06, |
|
"loss": 0.8092, |
|
"step": 544000 |
|
}, |
|
{ |
|
"epoch": 17.73, |
|
"learning_rate": 4.6010101010101015e-06, |
|
"loss": 0.8087, |
|
"step": 544500 |
|
}, |
|
{ |
|
"epoch": 17.75, |
|
"learning_rate": 4.595959595959597e-06, |
|
"loss": 0.8085, |
|
"step": 545000 |
|
}, |
|
{ |
|
"epoch": 17.76, |
|
"learning_rate": 4.590909090909092e-06, |
|
"loss": 0.8088, |
|
"step": 545500 |
|
}, |
|
{ |
|
"epoch": 17.78, |
|
"learning_rate": 4.585858585858586e-06, |
|
"loss": 0.8081, |
|
"step": 546000 |
|
}, |
|
{ |
|
"epoch": 17.8, |
|
"learning_rate": 4.580808080808081e-06, |
|
"loss": 0.8078, |
|
"step": 546500 |
|
}, |
|
{ |
|
"epoch": 17.81, |
|
"learning_rate": 4.575757575757576e-06, |
|
"loss": 0.8073, |
|
"step": 547000 |
|
}, |
|
{ |
|
"epoch": 17.83, |
|
"learning_rate": 4.5707070707070715e-06, |
|
"loss": 0.8075, |
|
"step": 547500 |
|
}, |
|
{ |
|
"epoch": 17.84, |
|
"learning_rate": 4.565656565656566e-06, |
|
"loss": 0.8066, |
|
"step": 548000 |
|
}, |
|
{ |
|
"epoch": 17.86, |
|
"learning_rate": 4.560606060606061e-06, |
|
"loss": 0.8064, |
|
"step": 548500 |
|
}, |
|
{ |
|
"epoch": 17.88, |
|
"learning_rate": 4.555555555555556e-06, |
|
"loss": 0.806, |
|
"step": 549000 |
|
}, |
|
{ |
|
"epoch": 17.89, |
|
"learning_rate": 4.55050505050505e-06, |
|
"loss": 0.8062, |
|
"step": 549500 |
|
}, |
|
{ |
|
"epoch": 17.91, |
|
"learning_rate": 4.5454545454545455e-06, |
|
"loss": 0.8052, |
|
"step": 550000 |
|
}, |
|
{ |
|
"epoch": 17.91, |
|
"eval_loss": 0.7798945903778076, |
|
"eval_runtime": 88.3824, |
|
"eval_samples_per_second": 898.505, |
|
"eval_steps_per_second": 3.519, |
|
"step": 550000 |
|
}, |
|
{ |
|
"epoch": 17.93, |
|
"learning_rate": 4.540404040404041e-06, |
|
"loss": 0.8054, |
|
"step": 550500 |
|
}, |
|
{ |
|
"epoch": 17.94, |
|
"learning_rate": 4.535353535353536e-06, |
|
"loss": 0.8051, |
|
"step": 551000 |
|
}, |
|
{ |
|
"epoch": 17.96, |
|
"learning_rate": 4.53030303030303e-06, |
|
"loss": 0.8043, |
|
"step": 551500 |
|
}, |
|
{ |
|
"epoch": 17.97, |
|
"learning_rate": 4.525252525252526e-06, |
|
"loss": 0.8051, |
|
"step": 552000 |
|
}, |
|
{ |
|
"epoch": 17.99, |
|
"learning_rate": 4.520202020202021e-06, |
|
"loss": 0.8036, |
|
"step": 552500 |
|
}, |
|
{ |
|
"epoch": 18.01, |
|
"learning_rate": 4.5151515151515155e-06, |
|
"loss": 0.8038, |
|
"step": 553000 |
|
}, |
|
{ |
|
"epoch": 18.02, |
|
"learning_rate": 4.510101010101011e-06, |
|
"loss": 0.803, |
|
"step": 553500 |
|
}, |
|
{ |
|
"epoch": 18.04, |
|
"learning_rate": 4.505050505050506e-06, |
|
"loss": 0.8026, |
|
"step": 554000 |
|
}, |
|
{ |
|
"epoch": 18.06, |
|
"learning_rate": 4.5e-06, |
|
"loss": 0.8019, |
|
"step": 554500 |
|
}, |
|
{ |
|
"epoch": 18.07, |
|
"learning_rate": 4.494949494949495e-06, |
|
"loss": 0.8017, |
|
"step": 555000 |
|
}, |
|
{ |
|
"epoch": 18.09, |
|
"learning_rate": 4.48989898989899e-06, |
|
"loss": 0.801, |
|
"step": 555500 |
|
}, |
|
{ |
|
"epoch": 18.1, |
|
"learning_rate": 4.4848484848484855e-06, |
|
"loss": 0.8016, |
|
"step": 556000 |
|
}, |
|
{ |
|
"epoch": 18.12, |
|
"learning_rate": 4.47979797979798e-06, |
|
"loss": 0.8005, |
|
"step": 556500 |
|
}, |
|
{ |
|
"epoch": 18.14, |
|
"learning_rate": 4.474747474747475e-06, |
|
"loss": 0.7995, |
|
"step": 557000 |
|
}, |
|
{ |
|
"epoch": 18.15, |
|
"learning_rate": 4.46969696969697e-06, |
|
"loss": 0.7997, |
|
"step": 557500 |
|
}, |
|
{ |
|
"epoch": 18.17, |
|
"learning_rate": 4.464646464646465e-06, |
|
"loss": 0.7994, |
|
"step": 558000 |
|
}, |
|
{ |
|
"epoch": 18.19, |
|
"learning_rate": 4.4595959595959596e-06, |
|
"loss": 0.7986, |
|
"step": 558500 |
|
}, |
|
{ |
|
"epoch": 18.2, |
|
"learning_rate": 4.454545454545455e-06, |
|
"loss": 0.7985, |
|
"step": 559000 |
|
}, |
|
{ |
|
"epoch": 18.22, |
|
"learning_rate": 4.44949494949495e-06, |
|
"loss": 0.7975, |
|
"step": 559500 |
|
}, |
|
{ |
|
"epoch": 18.24, |
|
"learning_rate": 4.444444444444444e-06, |
|
"loss": 0.7969, |
|
"step": 560000 |
|
}, |
|
{ |
|
"epoch": 18.24, |
|
"eval_loss": 0.7699345350265503, |
|
"eval_runtime": 91.5062, |
|
"eval_samples_per_second": 867.832, |
|
"eval_steps_per_second": 3.399, |
|
"step": 560000 |
|
}, |
|
{ |
|
"epoch": 18.25, |
|
"learning_rate": 4.43939393939394e-06, |
|
"loss": 0.7963, |
|
"step": 560500 |
|
}, |
|
{ |
|
"epoch": 18.27, |
|
"learning_rate": 4.434343434343435e-06, |
|
"loss": 0.7959, |
|
"step": 561000 |
|
}, |
|
{ |
|
"epoch": 18.28, |
|
"learning_rate": 4.42929292929293e-06, |
|
"loss": 0.7952, |
|
"step": 561500 |
|
}, |
|
{ |
|
"epoch": 18.3, |
|
"learning_rate": 4.424242424242425e-06, |
|
"loss": 0.7947, |
|
"step": 562000 |
|
}, |
|
{ |
|
"epoch": 18.32, |
|
"learning_rate": 4.41919191919192e-06, |
|
"loss": 0.794, |
|
"step": 562500 |
|
}, |
|
{ |
|
"epoch": 18.33, |
|
"learning_rate": 4.414141414141415e-06, |
|
"loss": 0.7937, |
|
"step": 563000 |
|
}, |
|
{ |
|
"epoch": 18.35, |
|
"learning_rate": 4.409090909090909e-06, |
|
"loss": 0.7929, |
|
"step": 563500 |
|
}, |
|
{ |
|
"epoch": 18.37, |
|
"learning_rate": 4.4040404040404044e-06, |
|
"loss": 0.7926, |
|
"step": 564000 |
|
}, |
|
{ |
|
"epoch": 18.38, |
|
"learning_rate": 4.3989898989899e-06, |
|
"loss": 0.7924, |
|
"step": 564500 |
|
}, |
|
{ |
|
"epoch": 18.4, |
|
"learning_rate": 4.393939393939394e-06, |
|
"loss": 0.7908, |
|
"step": 565000 |
|
}, |
|
{ |
|
"epoch": 18.41, |
|
"learning_rate": 4.388888888888889e-06, |
|
"loss": 0.7906, |
|
"step": 565500 |
|
}, |
|
{ |
|
"epoch": 18.43, |
|
"learning_rate": 4.383838383838384e-06, |
|
"loss": 0.7902, |
|
"step": 566000 |
|
}, |
|
{ |
|
"epoch": 18.45, |
|
"learning_rate": 4.378787878787879e-06, |
|
"loss": 0.7894, |
|
"step": 566500 |
|
}, |
|
{ |
|
"epoch": 18.46, |
|
"learning_rate": 4.373737373737374e-06, |
|
"loss": 0.7884, |
|
"step": 567000 |
|
}, |
|
{ |
|
"epoch": 18.48, |
|
"learning_rate": 4.368686868686869e-06, |
|
"loss": 0.7881, |
|
"step": 567500 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 4.363636363636364e-06, |
|
"loss": 0.7869, |
|
"step": 568000 |
|
}, |
|
{ |
|
"epoch": 18.51, |
|
"learning_rate": 4.358585858585859e-06, |
|
"loss": 0.7861, |
|
"step": 568500 |
|
}, |
|
{ |
|
"epoch": 18.53, |
|
"learning_rate": 4.353535353535353e-06, |
|
"loss": 0.7858, |
|
"step": 569000 |
|
}, |
|
{ |
|
"epoch": 18.54, |
|
"learning_rate": 4.348484848484849e-06, |
|
"loss": 0.7854, |
|
"step": 569500 |
|
}, |
|
{ |
|
"epoch": 18.56, |
|
"learning_rate": 4.343434343434344e-06, |
|
"loss": 0.7846, |
|
"step": 570000 |
|
}, |
|
{ |
|
"epoch": 18.56, |
|
"eval_loss": 0.7586176991462708, |
|
"eval_runtime": 88.5769, |
|
"eval_samples_per_second": 896.531, |
|
"eval_steps_per_second": 3.511, |
|
"step": 570000 |
|
}, |
|
{ |
|
"epoch": 18.58, |
|
"learning_rate": 4.338383838383839e-06, |
|
"loss": 0.7837, |
|
"step": 570500 |
|
}, |
|
{ |
|
"epoch": 18.59, |
|
"learning_rate": 4.333333333333334e-06, |
|
"loss": 0.7832, |
|
"step": 571000 |
|
}, |
|
{ |
|
"epoch": 18.61, |
|
"learning_rate": 4.328282828282829e-06, |
|
"loss": 0.7826, |
|
"step": 571500 |
|
}, |
|
{ |
|
"epoch": 18.63, |
|
"learning_rate": 4.323232323232323e-06, |
|
"loss": 0.7824, |
|
"step": 572000 |
|
}, |
|
{ |
|
"epoch": 18.64, |
|
"learning_rate": 4.3181818181818185e-06, |
|
"loss": 0.7817, |
|
"step": 572500 |
|
}, |
|
{ |
|
"epoch": 18.66, |
|
"learning_rate": 4.313131313131314e-06, |
|
"loss": 0.7812, |
|
"step": 573000 |
|
}, |
|
{ |
|
"epoch": 18.67, |
|
"learning_rate": 4.308080808080809e-06, |
|
"loss": 0.7805, |
|
"step": 573500 |
|
}, |
|
{ |
|
"epoch": 18.69, |
|
"learning_rate": 4.303030303030303e-06, |
|
"loss": 0.78, |
|
"step": 574000 |
|
}, |
|
{ |
|
"epoch": 18.71, |
|
"learning_rate": 4.297979797979798e-06, |
|
"loss": 0.7789, |
|
"step": 574500 |
|
}, |
|
{ |
|
"epoch": 18.72, |
|
"learning_rate": 4.292929292929293e-06, |
|
"loss": 0.7789, |
|
"step": 575000 |
|
}, |
|
{ |
|
"epoch": 18.74, |
|
"learning_rate": 4.287878787878788e-06, |
|
"loss": 0.7782, |
|
"step": 575500 |
|
}, |
|
{ |
|
"epoch": 18.76, |
|
"learning_rate": 4.282828282828283e-06, |
|
"loss": 0.778, |
|
"step": 576000 |
|
}, |
|
{ |
|
"epoch": 18.77, |
|
"learning_rate": 4.277777777777778e-06, |
|
"loss": 0.777, |
|
"step": 576500 |
|
}, |
|
{ |
|
"epoch": 18.79, |
|
"learning_rate": 4.272727272727273e-06, |
|
"loss": 0.7762, |
|
"step": 577000 |
|
}, |
|
{ |
|
"epoch": 18.8, |
|
"learning_rate": 4.267676767676767e-06, |
|
"loss": 0.776, |
|
"step": 577500 |
|
}, |
|
{ |
|
"epoch": 18.82, |
|
"learning_rate": 4.262626262626263e-06, |
|
"loss": 0.7755, |
|
"step": 578000 |
|
}, |
|
{ |
|
"epoch": 18.84, |
|
"learning_rate": 4.2575757575757585e-06, |
|
"loss": 0.7748, |
|
"step": 578500 |
|
}, |
|
{ |
|
"epoch": 18.85, |
|
"learning_rate": 4.252525252525253e-06, |
|
"loss": 0.7744, |
|
"step": 579000 |
|
}, |
|
{ |
|
"epoch": 18.87, |
|
"learning_rate": 4.247474747474748e-06, |
|
"loss": 0.7742, |
|
"step": 579500 |
|
}, |
|
{ |
|
"epoch": 18.89, |
|
"learning_rate": 4.242424242424243e-06, |
|
"loss": 0.7735, |
|
"step": 580000 |
|
}, |
|
{ |
|
"epoch": 18.89, |
|
"eval_loss": 0.7486483454704285, |
|
"eval_runtime": 89.3916, |
|
"eval_samples_per_second": 888.361, |
|
"eval_steps_per_second": 3.479, |
|
"step": 580000 |
|
}, |
|
{ |
|
"epoch": 18.9, |
|
"learning_rate": 4.237373737373737e-06, |
|
"loss": 0.7729, |
|
"step": 580500 |
|
}, |
|
{ |
|
"epoch": 18.92, |
|
"learning_rate": 4.2323232323232325e-06, |
|
"loss": 0.7723, |
|
"step": 581000 |
|
}, |
|
{ |
|
"epoch": 18.94, |
|
"learning_rate": 4.227272727272728e-06, |
|
"loss": 0.7725, |
|
"step": 581500 |
|
}, |
|
{ |
|
"epoch": 18.95, |
|
"learning_rate": 4.222222222222223e-06, |
|
"loss": 0.7715, |
|
"step": 582000 |
|
}, |
|
{ |
|
"epoch": 18.97, |
|
"learning_rate": 4.217171717171717e-06, |
|
"loss": 0.7712, |
|
"step": 582500 |
|
}, |
|
{ |
|
"epoch": 18.98, |
|
"learning_rate": 4.212121212121212e-06, |
|
"loss": 0.7706, |
|
"step": 583000 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 4.207070707070707e-06, |
|
"loss": 0.7705, |
|
"step": 583500 |
|
}, |
|
{ |
|
"epoch": 19.02, |
|
"learning_rate": 4.2020202020202026e-06, |
|
"loss": 0.7698, |
|
"step": 584000 |
|
}, |
|
{ |
|
"epoch": 19.03, |
|
"learning_rate": 4.196969696969697e-06, |
|
"loss": 0.769, |
|
"step": 584500 |
|
}, |
|
{ |
|
"epoch": 19.05, |
|
"learning_rate": 4.191919191919192e-06, |
|
"loss": 0.7683, |
|
"step": 585000 |
|
}, |
|
{ |
|
"epoch": 19.07, |
|
"learning_rate": 4.186868686868687e-06, |
|
"loss": 0.7689, |
|
"step": 585500 |
|
}, |
|
{ |
|
"epoch": 19.08, |
|
"learning_rate": 4.181818181818182e-06, |
|
"loss": 0.7683, |
|
"step": 586000 |
|
}, |
|
{ |
|
"epoch": 19.1, |
|
"learning_rate": 4.1767676767676774e-06, |
|
"loss": 0.767, |
|
"step": 586500 |
|
}, |
|
{ |
|
"epoch": 19.11, |
|
"learning_rate": 4.1717171717171726e-06, |
|
"loss": 0.7677, |
|
"step": 587000 |
|
}, |
|
{ |
|
"epoch": 19.13, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 0.7673, |
|
"step": 587500 |
|
}, |
|
{ |
|
"epoch": 19.15, |
|
"learning_rate": 4.161616161616162e-06, |
|
"loss": 0.7668, |
|
"step": 588000 |
|
}, |
|
{ |
|
"epoch": 19.16, |
|
"learning_rate": 4.156565656565657e-06, |
|
"loss": 0.7665, |
|
"step": 588500 |
|
}, |
|
{ |
|
"epoch": 19.18, |
|
"learning_rate": 4.151515151515152e-06, |
|
"loss": 0.7663, |
|
"step": 589000 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"learning_rate": 4.146464646464647e-06, |
|
"loss": 0.7653, |
|
"step": 589500 |
|
}, |
|
{ |
|
"epoch": 19.21, |
|
"learning_rate": 4.141414141414142e-06, |
|
"loss": 0.7648, |
|
"step": 590000 |
|
}, |
|
{ |
|
"epoch": 19.21, |
|
"eval_loss": 0.7404940724372864, |
|
"eval_runtime": 90.2196, |
|
"eval_samples_per_second": 880.208, |
|
"eval_steps_per_second": 3.447, |
|
"step": 590000 |
|
}, |
|
{ |
|
"epoch": 19.23, |
|
"learning_rate": 4.136363636363637e-06, |
|
"loss": 0.7649, |
|
"step": 590500 |
|
}, |
|
{ |
|
"epoch": 19.24, |
|
"learning_rate": 4.131313131313132e-06, |
|
"loss": 0.7642, |
|
"step": 591000 |
|
}, |
|
{ |
|
"epoch": 19.26, |
|
"learning_rate": 4.126262626262626e-06, |
|
"loss": 0.764, |
|
"step": 591500 |
|
}, |
|
{ |
|
"epoch": 19.28, |
|
"learning_rate": 4.1212121212121215e-06, |
|
"loss": 0.7635, |
|
"step": 592000 |
|
}, |
|
{ |
|
"epoch": 19.29, |
|
"learning_rate": 4.116161616161617e-06, |
|
"loss": 0.7635, |
|
"step": 592500 |
|
}, |
|
{ |
|
"epoch": 19.31, |
|
"learning_rate": 4.111111111111111e-06, |
|
"loss": 0.7627, |
|
"step": 593000 |
|
}, |
|
{ |
|
"epoch": 19.33, |
|
"learning_rate": 4.106060606060606e-06, |
|
"loss": 0.7625, |
|
"step": 593500 |
|
}, |
|
{ |
|
"epoch": 19.34, |
|
"learning_rate": 4.101010101010101e-06, |
|
"loss": 0.7623, |
|
"step": 594000 |
|
}, |
|
{ |
|
"epoch": 19.36, |
|
"learning_rate": 4.095959595959596e-06, |
|
"loss": 0.7613, |
|
"step": 594500 |
|
}, |
|
{ |
|
"epoch": 19.37, |
|
"learning_rate": 4.0909090909090915e-06, |
|
"loss": 0.762, |
|
"step": 595000 |
|
}, |
|
{ |
|
"epoch": 19.39, |
|
"learning_rate": 4.085858585858587e-06, |
|
"loss": 0.7613, |
|
"step": 595500 |
|
}, |
|
{ |
|
"epoch": 19.41, |
|
"learning_rate": 4.080808080808081e-06, |
|
"loss": 0.7611, |
|
"step": 596000 |
|
}, |
|
{ |
|
"epoch": 19.42, |
|
"learning_rate": 4.075757575757576e-06, |
|
"loss": 0.7599, |
|
"step": 596500 |
|
}, |
|
{ |
|
"epoch": 19.44, |
|
"learning_rate": 4.070707070707071e-06, |
|
"loss": 0.7601, |
|
"step": 597000 |
|
}, |
|
{ |
|
"epoch": 19.46, |
|
"learning_rate": 4.065656565656566e-06, |
|
"loss": 0.7604, |
|
"step": 597500 |
|
}, |
|
{ |
|
"epoch": 19.47, |
|
"learning_rate": 4.060606060606061e-06, |
|
"loss": 0.7596, |
|
"step": 598000 |
|
}, |
|
{ |
|
"epoch": 19.49, |
|
"learning_rate": 4.055555555555556e-06, |
|
"loss": 0.7597, |
|
"step": 598500 |
|
}, |
|
{ |
|
"epoch": 19.51, |
|
"learning_rate": 4.050505050505051e-06, |
|
"loss": 0.7588, |
|
"step": 599000 |
|
}, |
|
{ |
|
"epoch": 19.52, |
|
"learning_rate": 4.045454545454546e-06, |
|
"loss": 0.7588, |
|
"step": 599500 |
|
}, |
|
{ |
|
"epoch": 19.54, |
|
"learning_rate": 4.04040404040404e-06, |
|
"loss": 0.7586, |
|
"step": 600000 |
|
}, |
|
{ |
|
"epoch": 19.54, |
|
"eval_loss": 0.7349938750267029, |
|
"eval_runtime": 88.7424, |
|
"eval_samples_per_second": 894.86, |
|
"eval_steps_per_second": 3.505, |
|
"step": 600000 |
|
}, |
|
{ |
|
"epoch": 19.55, |
|
"learning_rate": 4.0353535353535355e-06, |
|
"loss": 0.7584, |
|
"step": 600500 |
|
}, |
|
{ |
|
"epoch": 19.57, |
|
"learning_rate": 4.030303030303031e-06, |
|
"loss": 0.7582, |
|
"step": 601000 |
|
}, |
|
{ |
|
"epoch": 19.59, |
|
"learning_rate": 4.025252525252526e-06, |
|
"loss": 0.7574, |
|
"step": 601500 |
|
}, |
|
{ |
|
"epoch": 19.6, |
|
"learning_rate": 4.02020202020202e-06, |
|
"loss": 0.7573, |
|
"step": 602000 |
|
}, |
|
{ |
|
"epoch": 19.62, |
|
"learning_rate": 4.015151515151515e-06, |
|
"loss": 0.7572, |
|
"step": 602500 |
|
}, |
|
{ |
|
"epoch": 19.64, |
|
"learning_rate": 4.01010101010101e-06, |
|
"loss": 0.7571, |
|
"step": 603000 |
|
}, |
|
{ |
|
"epoch": 19.65, |
|
"learning_rate": 4.0050505050505055e-06, |
|
"loss": 0.7563, |
|
"step": 603500 |
|
}, |
|
{ |
|
"epoch": 19.67, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.7561, |
|
"step": 604000 |
|
}, |
|
{ |
|
"epoch": 19.68, |
|
"learning_rate": 3.994949494949496e-06, |
|
"loss": 0.7559, |
|
"step": 604500 |
|
}, |
|
{ |
|
"epoch": 19.7, |
|
"learning_rate": 3.98989898989899e-06, |
|
"loss": 0.7559, |
|
"step": 605000 |
|
}, |
|
{ |
|
"epoch": 19.72, |
|
"learning_rate": 3.984848484848485e-06, |
|
"loss": 0.7557, |
|
"step": 605500 |
|
}, |
|
{ |
|
"epoch": 19.73, |
|
"learning_rate": 3.97979797979798e-06, |
|
"loss": 0.7557, |
|
"step": 606000 |
|
}, |
|
{ |
|
"epoch": 19.75, |
|
"learning_rate": 3.9747474747474755e-06, |
|
"loss": 0.7552, |
|
"step": 606500 |
|
}, |
|
{ |
|
"epoch": 19.77, |
|
"learning_rate": 3.96969696969697e-06, |
|
"loss": 0.7546, |
|
"step": 607000 |
|
}, |
|
{ |
|
"epoch": 19.78, |
|
"learning_rate": 3.964646464646465e-06, |
|
"loss": 0.755, |
|
"step": 607500 |
|
}, |
|
{ |
|
"epoch": 19.8, |
|
"learning_rate": 3.95959595959596e-06, |
|
"loss": 0.7547, |
|
"step": 608000 |
|
}, |
|
{ |
|
"epoch": 19.81, |
|
"learning_rate": 3.954545454545454e-06, |
|
"loss": 0.7539, |
|
"step": 608500 |
|
}, |
|
{ |
|
"epoch": 19.83, |
|
"learning_rate": 3.9494949494949496e-06, |
|
"loss": 0.7541, |
|
"step": 609000 |
|
}, |
|
{ |
|
"epoch": 19.85, |
|
"learning_rate": 3.944444444444445e-06, |
|
"loss": 0.7541, |
|
"step": 609500 |
|
}, |
|
{ |
|
"epoch": 19.86, |
|
"learning_rate": 3.93939393939394e-06, |
|
"loss": 0.7531, |
|
"step": 610000 |
|
}, |
|
{ |
|
"epoch": 19.86, |
|
"eval_loss": 0.7310336828231812, |
|
"eval_runtime": 88.6702, |
|
"eval_samples_per_second": 895.588, |
|
"eval_steps_per_second": 3.507, |
|
"step": 610000 |
|
}, |
|
{ |
|
"epoch": 19.88, |
|
"learning_rate": 3.934343434343434e-06, |
|
"loss": 0.7535, |
|
"step": 610500 |
|
}, |
|
{ |
|
"epoch": 19.9, |
|
"learning_rate": 3.929292929292929e-06, |
|
"loss": 0.7533, |
|
"step": 611000 |
|
}, |
|
{ |
|
"epoch": 19.91, |
|
"learning_rate": 3.9242424242424244e-06, |
|
"loss": 0.7529, |
|
"step": 611500 |
|
}, |
|
{ |
|
"epoch": 19.93, |
|
"learning_rate": 3.9191919191919196e-06, |
|
"loss": 0.7529, |
|
"step": 612000 |
|
}, |
|
{ |
|
"epoch": 19.94, |
|
"learning_rate": 3.914141414141415e-06, |
|
"loss": 0.7525, |
|
"step": 612500 |
|
}, |
|
{ |
|
"epoch": 19.96, |
|
"learning_rate": 3.90909090909091e-06, |
|
"loss": 0.7522, |
|
"step": 613000 |
|
}, |
|
{ |
|
"epoch": 19.98, |
|
"learning_rate": 3.904040404040404e-06, |
|
"loss": 0.7523, |
|
"step": 613500 |
|
}, |
|
{ |
|
"epoch": 19.99, |
|
"learning_rate": 3.898989898989899e-06, |
|
"loss": 0.7517, |
|
"step": 614000 |
|
}, |
|
{ |
|
"epoch": 20.01, |
|
"learning_rate": 3.8939393939393944e-06, |
|
"loss": 0.7513, |
|
"step": 614500 |
|
}, |
|
{ |
|
"epoch": 20.03, |
|
"learning_rate": 3.88888888888889e-06, |
|
"loss": 0.7514, |
|
"step": 615000 |
|
}, |
|
{ |
|
"epoch": 20.04, |
|
"learning_rate": 3.883838383838384e-06, |
|
"loss": 0.7512, |
|
"step": 615500 |
|
}, |
|
{ |
|
"epoch": 20.06, |
|
"learning_rate": 3.878787878787879e-06, |
|
"loss": 0.7506, |
|
"step": 616000 |
|
}, |
|
{ |
|
"epoch": 20.07, |
|
"learning_rate": 3.873737373737374e-06, |
|
"loss": 0.7507, |
|
"step": 616500 |
|
}, |
|
{ |
|
"epoch": 20.09, |
|
"learning_rate": 3.868686868686869e-06, |
|
"loss": 0.7504, |
|
"step": 617000 |
|
}, |
|
{ |
|
"epoch": 20.11, |
|
"learning_rate": 3.863636363636364e-06, |
|
"loss": 0.7504, |
|
"step": 617500 |
|
}, |
|
{ |
|
"epoch": 20.12, |
|
"learning_rate": 3.858585858585859e-06, |
|
"loss": 0.7503, |
|
"step": 618000 |
|
}, |
|
{ |
|
"epoch": 20.14, |
|
"learning_rate": 3.853535353535354e-06, |
|
"loss": 0.75, |
|
"step": 618500 |
|
}, |
|
{ |
|
"epoch": 20.16, |
|
"learning_rate": 3.848484848484848e-06, |
|
"loss": 0.7501, |
|
"step": 619000 |
|
}, |
|
{ |
|
"epoch": 20.17, |
|
"learning_rate": 3.843434343434343e-06, |
|
"loss": 0.7496, |
|
"step": 619500 |
|
}, |
|
{ |
|
"epoch": 20.19, |
|
"learning_rate": 3.8383838383838385e-06, |
|
"loss": 0.7494, |
|
"step": 620000 |
|
}, |
|
{ |
|
"epoch": 20.19, |
|
"eval_loss": 0.7278521656990051, |
|
"eval_runtime": 91.3349, |
|
"eval_samples_per_second": 869.459, |
|
"eval_steps_per_second": 3.405, |
|
"step": 620000 |
|
} |
|
], |
|
"max_steps": 1000000, |
|
"num_train_epochs": 33, |
|
"total_flos": 4.911250179876717e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|