|
{ |
|
"best_metric": 27.8203, |
|
"best_model_checkpoint": "./ko-en_mbartLarge_exp20p_batch64_linear/checkpoint-8000", |
|
"epoch": 3.712727694628147, |
|
"eval_steps": 4000, |
|
"global_step": 8000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.5426, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.343, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 1.2768, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 5e-05, |
|
"loss": 1.242, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 4.970294676806084e-05, |
|
"loss": 1.1208, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.940589353612168e-05, |
|
"loss": 1.0718, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.910884030418251e-05, |
|
"loss": 1.061, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 4.881178707224335e-05, |
|
"loss": 1.0585, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"eval_bleu": 27.494, |
|
"eval_gen_len": 18.9136, |
|
"eval_loss": 1.1434648036956787, |
|
"eval_runtime": 1314.8151, |
|
"eval_samples_per_second": 13.11, |
|
"eval_steps_per_second": 1.639, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.851473384030418e-05, |
|
"loss": 0.9558, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 4.8217680608365025e-05, |
|
"loss": 0.8186, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 4.792062737642586e-05, |
|
"loss": 0.8398, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.7623574144486695e-05, |
|
"loss": 0.8454, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 4.732652091254753e-05, |
|
"loss": 0.8416, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 4.702946768060837e-05, |
|
"loss": 0.6354, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 4.67324144486692e-05, |
|
"loss": 0.6545, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 4.643536121673004e-05, |
|
"loss": 0.6719, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"eval_bleu": 27.8203, |
|
"eval_gen_len": 18.6815, |
|
"eval_loss": 1.204200267791748, |
|
"eval_runtime": 1295.2995, |
|
"eval_samples_per_second": 13.307, |
|
"eval_steps_per_second": 1.664, |
|
"step": 8000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 86160, |
|
"num_train_epochs": 40, |
|
"save_steps": 4000, |
|
"total_flos": 1.109570441183232e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|