|
{ |
|
"best_metric": 27.8224, |
|
"best_model_checkpoint": "./ko-en_mbartLarge_exp20p_batch/checkpoint-8000", |
|
"epoch": 11.138183083884442, |
|
"eval_steps": 4000, |
|
"global_step": 24000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.5446, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.343, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 1.2767, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 5e-05, |
|
"loss": 1.242, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 4.9995663025798074e-05, |
|
"loss": 1.1213, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.998261883224581e-05, |
|
"loss": 1.0725, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.996087194586878e-05, |
|
"loss": 1.0628, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 4.99304299421927e-05, |
|
"loss": 1.0603, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"eval_bleu": 27.7215, |
|
"eval_gen_len": 18.8456, |
|
"eval_loss": 1.1473960876464844, |
|
"eval_runtime": 1471.0712, |
|
"eval_samples_per_second": 11.717, |
|
"eval_steps_per_second": 1.465, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.989130342568659e-05, |
|
"loss": 0.9588, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 4.984371446700271e-05, |
|
"loss": 0.8217, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 4.978741887280011e-05, |
|
"loss": 0.8436, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.972238441511973e-05, |
|
"loss": 0.8488, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 4.964873791724829e-05, |
|
"loss": 0.8471, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 4.956650503393613e-05, |
|
"loss": 0.6385, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 4.947571441100011e-05, |
|
"loss": 0.6568, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 4.937660479537447e-05, |
|
"loss": 0.6774, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"eval_bleu": 27.8224, |
|
"eval_gen_len": 18.6267, |
|
"eval_loss": 1.2035210132598877, |
|
"eval_runtime": 1442.7545, |
|
"eval_samples_per_second": 11.947, |
|
"eval_steps_per_second": 1.494, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 4.9269037523258574e-05, |
|
"loss": 0.6888, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 4.915304991597116e-05, |
|
"loss": 0.5341, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 4.902842459717919e-05, |
|
"loss": 0.505, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 4.889570329351636e-05, |
|
"loss": 0.5258, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 4.8754400304796405e-05, |
|
"loss": 0.5402, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 4.860482247305548e-05, |
|
"loss": 0.4654, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 4.8447021903716625e-05, |
|
"loss": 0.3817, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"learning_rate": 4.8281053566592745e-05, |
|
"loss": 0.4006, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"eval_bleu": 27.049, |
|
"eval_gen_len": 18.5618, |
|
"eval_loss": 1.4320647716522217, |
|
"eval_runtime": 1443.7655, |
|
"eval_samples_per_second": 11.939, |
|
"eval_steps_per_second": 1.493, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 4.810697527673781e-05, |
|
"loss": 0.4168, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 4.792484767430706e-05, |
|
"loss": 0.4084, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 4.7734734203432976e-05, |
|
"loss": 0.2881, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 4.7537105014759254e-05, |
|
"loss": 0.3053, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 4.733123687447865e-05, |
|
"loss": 0.3213, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"learning_rate": 4.711758965001871e-05, |
|
"loss": 0.3334, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.19, |
|
"learning_rate": 4.689623776536918e-05, |
|
"loss": 0.2383, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 7.43, |
|
"learning_rate": 4.66672583284374e-05, |
|
"loss": 0.2328, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 7.43, |
|
"eval_bleu": 25.8623, |
|
"eval_gen_len": 18.4608, |
|
"eval_loss": 1.73158597946167, |
|
"eval_runtime": 1439.3983, |
|
"eval_samples_per_second": 11.975, |
|
"eval_steps_per_second": 1.497, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"learning_rate": 4.643073110418783e-05, |
|
"loss": 0.248, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 7.89, |
|
"learning_rate": 4.618673848685586e-05, |
|
"loss": 0.257, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"learning_rate": 4.593536547124587e-05, |
|
"loss": 0.2129, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"learning_rate": 4.567722417359549e-05, |
|
"loss": 0.179, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 8.59, |
|
"learning_rate": 4.541136991303665e-05, |
|
"loss": 0.1914, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 4.513840535376658e-05, |
|
"loss": 0.2002, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 4.485842558296277e-05, |
|
"loss": 0.1893, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"learning_rate": 4.457152813155099e-05, |
|
"loss": 0.1369, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"eval_bleu": 25.6902, |
|
"eval_gen_len": 18.3337, |
|
"eval_loss": 1.9367432594299316, |
|
"eval_runtime": 1429.8923, |
|
"eval_samples_per_second": 12.055, |
|
"eval_steps_per_second": 1.507, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 9.51, |
|
"learning_rate": 4.4277812940230354e-05, |
|
"loss": 0.1495, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"learning_rate": 4.3977989818708614e-05, |
|
"loss": 0.1579, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 9.98, |
|
"learning_rate": 4.367096154941818e-05, |
|
"loss": 0.1631, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 10.21, |
|
"learning_rate": 4.3357429252499605e-05, |
|
"loss": 0.1143, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 10.44, |
|
"learning_rate": 4.3037502146897856e-05, |
|
"loss": 0.1177, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 10.67, |
|
"learning_rate": 4.2711291679188836e-05, |
|
"loss": 0.1267, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 10.91, |
|
"learning_rate": 4.2378911484757056e-05, |
|
"loss": 0.1315, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 11.14, |
|
"learning_rate": 4.204047734821069e-05, |
|
"loss": 0.1067, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 11.14, |
|
"eval_bleu": 26.0105, |
|
"eval_gen_len": 18.4579, |
|
"eval_loss": 2.086793899536133, |
|
"eval_runtime": 1436.0818, |
|
"eval_samples_per_second": 12.003, |
|
"eval_steps_per_second": 1.501, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 11.14, |
|
"step": 24000, |
|
"total_flos": 3.328711323549696e+18, |
|
"train_loss": 0.5267434423764547, |
|
"train_runtime": 55597.7197, |
|
"train_samples_per_second": 99.213, |
|
"train_steps_per_second": 1.55 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 86160, |
|
"num_train_epochs": 40, |
|
"save_steps": 4000, |
|
"total_flos": 3.328711323549696e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|