|
{ |
|
"best_metric": 1.523019790649414, |
|
"best_model_checkpoint": "./mbartLarge_mid_en-ko1/checkpoint-1500", |
|
"epoch": 5.609573672400898, |
|
"eval_steps": 1500, |
|
"global_step": 7500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 5e-05, |
|
"loss": 1.7061, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.952812382030955e-05, |
|
"loss": 1.619, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.9056247640619104e-05, |
|
"loss": 1.4343, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"eval_bleu": 11.2393, |
|
"eval_gen_len": 16.3612, |
|
"eval_loss": 1.523019790649414, |
|
"eval_runtime": 172.8656, |
|
"eval_samples_per_second": 15.457, |
|
"eval_steps_per_second": 0.966, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 4.8584371460928654e-05, |
|
"loss": 1.1601, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 4.8112495281238205e-05, |
|
"loss": 1.1956, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 4.7640619101547756e-05, |
|
"loss": 0.9153, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"eval_bleu": 11.9955, |
|
"eval_gen_len": 16.2665, |
|
"eval_loss": 1.600303053855896, |
|
"eval_runtime": 166.8515, |
|
"eval_samples_per_second": 16.014, |
|
"eval_steps_per_second": 1.001, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 4.7168742921857306e-05, |
|
"loss": 0.79, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 4.669686674216686e-05, |
|
"loss": 0.8315, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 4.622499056247641e-05, |
|
"loss": 0.4823, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"eval_bleu": 11.5074, |
|
"eval_gen_len": 16.1329, |
|
"eval_loss": 1.7483373880386353, |
|
"eval_runtime": 163.6405, |
|
"eval_samples_per_second": 16.328, |
|
"eval_steps_per_second": 1.021, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 4.575311438278596e-05, |
|
"loss": 0.5193, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 4.528123820309551e-05, |
|
"loss": 0.4565, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 4.480936202340506e-05, |
|
"loss": 0.3021, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"eval_bleu": 11.452, |
|
"eval_gen_len": 16.2631, |
|
"eval_loss": 1.888548731803894, |
|
"eval_runtime": 159.6603, |
|
"eval_samples_per_second": 16.736, |
|
"eval_steps_per_second": 1.046, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 4.433748584371461e-05, |
|
"loss": 0.3266, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 4.386560966402416e-05, |
|
"loss": 0.2338, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"learning_rate": 4.339373348433372e-05, |
|
"loss": 0.1974, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"eval_bleu": 12.065, |
|
"eval_gen_len": 16.0183, |
|
"eval_loss": 2.0004332065582275, |
|
"eval_runtime": 159.6772, |
|
"eval_samples_per_second": 16.734, |
|
"eval_steps_per_second": 1.046, |
|
"step": 7500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 53480, |
|
"num_train_epochs": 40, |
|
"save_steps": 1500, |
|
"total_flos": 2.6005557215232e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|