|
{ |
|
"best_metric": 0.510191798210144, |
|
"best_model_checkpoint": "./Vit-GPT2-COCO2017Flickr-85k-09/checkpoint-4000", |
|
"epoch": 1.5863581226089392, |
|
"eval_steps": 500, |
|
"global_step": 8500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09330969487729775, |
|
"grad_norm": 1.1049598455429077, |
|
"learning_rate": 4.8444693293517484e-05, |
|
"loss": 0.2429, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09330969487729775, |
|
"eval_gen_len": 11.738, |
|
"eval_loss": 0.5351373553276062, |
|
"eval_rouge1": 39.4446, |
|
"eval_rouge2": 14.1599, |
|
"eval_rougeL": 35.6123, |
|
"eval_rougeLsum": 35.5846, |
|
"eval_runtime": 463.8252, |
|
"eval_samples_per_second": 8.624, |
|
"eval_steps_per_second": 2.156, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.1866193897545955, |
|
"grad_norm": 1.1426328420639038, |
|
"learning_rate": 4.6889386587034965e-05, |
|
"loss": 0.2537, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.1866193897545955, |
|
"eval_gen_len": 12.34875, |
|
"eval_loss": 0.5301018357276917, |
|
"eval_rouge1": 39.5332, |
|
"eval_rouge2": 14.4745, |
|
"eval_rougeL": 35.644, |
|
"eval_rougeLsum": 35.6159, |
|
"eval_runtime": 451.0336, |
|
"eval_samples_per_second": 8.869, |
|
"eval_steps_per_second": 2.217, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.27992908463189325, |
|
"grad_norm": 1.017817735671997, |
|
"learning_rate": 4.5334079880552446e-05, |
|
"loss": 0.2564, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.27992908463189325, |
|
"eval_gen_len": 12.2455, |
|
"eval_loss": 0.5198264122009277, |
|
"eval_rouge1": 39.8297, |
|
"eval_rouge2": 14.555, |
|
"eval_rougeL": 35.8598, |
|
"eval_rougeLsum": 35.8344, |
|
"eval_runtime": 446.7054, |
|
"eval_samples_per_second": 8.954, |
|
"eval_steps_per_second": 2.239, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.373238779509191, |
|
"grad_norm": 0.9865152835845947, |
|
"learning_rate": 4.3778773174069934e-05, |
|
"loss": 0.2585, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.373238779509191, |
|
"eval_gen_len": 11.8575, |
|
"eval_loss": 0.5207428932189941, |
|
"eval_rouge1": 39.4558, |
|
"eval_rouge2": 14.0496, |
|
"eval_rougeL": 35.5597, |
|
"eval_rougeLsum": 35.526, |
|
"eval_runtime": 441.2453, |
|
"eval_samples_per_second": 9.065, |
|
"eval_steps_per_second": 2.266, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.4665484743864888, |
|
"grad_norm": 0.9201194047927856, |
|
"learning_rate": 4.222346646758741e-05, |
|
"loss": 0.2579, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.4665484743864888, |
|
"eval_gen_len": 11.97325, |
|
"eval_loss": 0.5187843441963196, |
|
"eval_rouge1": 39.1359, |
|
"eval_rouge2": 14.125, |
|
"eval_rougeL": 35.4068, |
|
"eval_rougeLsum": 35.3709, |
|
"eval_runtime": 456.9413, |
|
"eval_samples_per_second": 8.754, |
|
"eval_steps_per_second": 2.188, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.5598581692637865, |
|
"grad_norm": 1.011809229850769, |
|
"learning_rate": 4.066815976110489e-05, |
|
"loss": 0.2588, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.5598581692637865, |
|
"eval_gen_len": 12.278, |
|
"eval_loss": 0.5195733904838562, |
|
"eval_rouge1": 39.0831, |
|
"eval_rouge2": 14.0658, |
|
"eval_rougeL": 35.4608, |
|
"eval_rougeLsum": 35.4283, |
|
"eval_runtime": 453.7444, |
|
"eval_samples_per_second": 8.816, |
|
"eval_steps_per_second": 2.204, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6531678641410843, |
|
"grad_norm": 0.9602178931236267, |
|
"learning_rate": 3.911285305462237e-05, |
|
"loss": 0.2618, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6531678641410843, |
|
"eval_gen_len": 11.99425, |
|
"eval_loss": 0.5193876028060913, |
|
"eval_rouge1": 39.751, |
|
"eval_rouge2": 14.443, |
|
"eval_rougeL": 36.076, |
|
"eval_rougeLsum": 36.0475, |
|
"eval_runtime": 451.6054, |
|
"eval_samples_per_second": 8.857, |
|
"eval_steps_per_second": 2.214, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.746477559018382, |
|
"grad_norm": 1.0043072700500488, |
|
"learning_rate": 3.755754634813985e-05, |
|
"loss": 0.2579, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.746477559018382, |
|
"eval_gen_len": 12.05125, |
|
"eval_loss": 0.510191798210144, |
|
"eval_rouge1": 39.7601, |
|
"eval_rouge2": 14.5095, |
|
"eval_rougeL": 36.0252, |
|
"eval_rougeLsum": 35.9857, |
|
"eval_runtime": 450.2804, |
|
"eval_samples_per_second": 8.883, |
|
"eval_steps_per_second": 2.221, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.8397872538956798, |
|
"grad_norm": 0.9865091443061829, |
|
"learning_rate": 3.600223964165734e-05, |
|
"loss": 0.2569, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.8397872538956798, |
|
"eval_gen_len": 11.64825, |
|
"eval_loss": 0.5199217796325684, |
|
"eval_rouge1": 39.398, |
|
"eval_rouge2": 13.8871, |
|
"eval_rougeL": 35.7218, |
|
"eval_rougeLsum": 35.6911, |
|
"eval_runtime": 440.2487, |
|
"eval_samples_per_second": 9.086, |
|
"eval_steps_per_second": 2.271, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.9330969487729776, |
|
"grad_norm": 1.1134284734725952, |
|
"learning_rate": 3.444693293517482e-05, |
|
"loss": 0.253, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9330969487729776, |
|
"eval_gen_len": 12.01975, |
|
"eval_loss": 0.5200353860855103, |
|
"eval_rouge1": 39.8951, |
|
"eval_rouge2": 14.4146, |
|
"eval_rougeL": 35.883, |
|
"eval_rougeLsum": 35.8507, |
|
"eval_runtime": 445.4978, |
|
"eval_samples_per_second": 8.979, |
|
"eval_steps_per_second": 2.245, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.0264066436502752, |
|
"grad_norm": 0.8591415286064148, |
|
"learning_rate": 3.28916262286923e-05, |
|
"loss": 0.2361, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.0264066436502752, |
|
"eval_gen_len": 12.183, |
|
"eval_loss": 0.560513973236084, |
|
"eval_rouge1": 39.3352, |
|
"eval_rouge2": 14.2234, |
|
"eval_rougeL": 35.3107, |
|
"eval_rougeLsum": 35.2772, |
|
"eval_runtime": 451.5632, |
|
"eval_samples_per_second": 8.858, |
|
"eval_steps_per_second": 2.215, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.119716338527573, |
|
"grad_norm": 0.9471074342727661, |
|
"learning_rate": 3.1336319522209783e-05, |
|
"loss": 0.2, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.119716338527573, |
|
"eval_gen_len": 11.85975, |
|
"eval_loss": 0.5701556205749512, |
|
"eval_rouge1": 39.2184, |
|
"eval_rouge2": 14.0096, |
|
"eval_rougeL": 35.5475, |
|
"eval_rougeLsum": 35.5042, |
|
"eval_runtime": 443.3267, |
|
"eval_samples_per_second": 9.023, |
|
"eval_steps_per_second": 2.256, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.2130260334048708, |
|
"grad_norm": 1.184536099433899, |
|
"learning_rate": 2.978101281572726e-05, |
|
"loss": 0.2034, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.2130260334048708, |
|
"eval_gen_len": 11.878, |
|
"eval_loss": 0.5543492436408997, |
|
"eval_rouge1": 39.7118, |
|
"eval_rouge2": 14.2757, |
|
"eval_rougeL": 35.7613, |
|
"eval_rougeLsum": 35.7316, |
|
"eval_runtime": 448.2166, |
|
"eval_samples_per_second": 8.924, |
|
"eval_steps_per_second": 2.231, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.3063357282821686, |
|
"grad_norm": 0.9648805260658264, |
|
"learning_rate": 2.8225706109244742e-05, |
|
"loss": 0.1968, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.3063357282821686, |
|
"eval_gen_len": 12.1725, |
|
"eval_loss": 0.5584425330162048, |
|
"eval_rouge1": 39.1847, |
|
"eval_rouge2": 13.9003, |
|
"eval_rougeL": 35.3962, |
|
"eval_rougeLsum": 35.3713, |
|
"eval_runtime": 447.4882, |
|
"eval_samples_per_second": 8.939, |
|
"eval_steps_per_second": 2.235, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.3996454231594662, |
|
"grad_norm": 1.0540575981140137, |
|
"learning_rate": 2.6670399402762224e-05, |
|
"loss": 0.1986, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.3996454231594662, |
|
"eval_gen_len": 11.8395, |
|
"eval_loss": 0.5572038292884827, |
|
"eval_rouge1": 39.4428, |
|
"eval_rouge2": 14.2672, |
|
"eval_rougeL": 35.7359, |
|
"eval_rougeLsum": 35.7093, |
|
"eval_runtime": 437.4159, |
|
"eval_samples_per_second": 9.145, |
|
"eval_steps_per_second": 2.286, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.4930484277316414, |
|
"grad_norm": 1.1180176734924316, |
|
"learning_rate": 2.5115092696279708e-05, |
|
"loss": 0.1988, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.4930484277316414, |
|
"eval_gen_len": 11.99325, |
|
"eval_loss": 0.555167019367218, |
|
"eval_rouge1": 39.2719, |
|
"eval_rouge2": 14.0411, |
|
"eval_rougeL": 35.482, |
|
"eval_rougeLsum": 35.4833, |
|
"eval_runtime": 466.4795, |
|
"eval_samples_per_second": 8.575, |
|
"eval_steps_per_second": 2.144, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.5863581226089392, |
|
"grad_norm": 0.9527018070220947, |
|
"learning_rate": 2.355978598979719e-05, |
|
"loss": 0.1971, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.5863581226089392, |
|
"eval_gen_len": 12.10025, |
|
"eval_loss": 0.5571706891059875, |
|
"eval_rouge1": 39.2681, |
|
"eval_rouge2": 14.1036, |
|
"eval_rougeL": 35.4466, |
|
"eval_rougeLsum": 35.4245, |
|
"eval_runtime": 436.9857, |
|
"eval_samples_per_second": 9.154, |
|
"eval_steps_per_second": 2.288, |
|
"step": 8500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 16074, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.4542728264555168e+19, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|