{ "best_metric": 0.5301018357276917, "best_model_checkpoint": "./Vit-GPT2-COCO2017Flickr-85k-09/checkpoint-1000", "epoch": 0.1866193897545955, "eval_steps": 500, "global_step": 1000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.09330969487729775, "grad_norm": 1.1049598455429077, "learning_rate": 4.8444693293517484e-05, "loss": 0.2429, "step": 500 }, { "epoch": 0.09330969487729775, "eval_gen_len": 11.738, "eval_loss": 0.5351373553276062, "eval_rouge1": 39.4446, "eval_rouge2": 14.1599, "eval_rougeL": 35.6123, "eval_rougeLsum": 35.5846, "eval_runtime": 463.8252, "eval_samples_per_second": 8.624, "eval_steps_per_second": 2.156, "step": 500 }, { "epoch": 0.1866193897545955, "grad_norm": 1.1426328420639038, "learning_rate": 4.6889386587034965e-05, "loss": 0.2537, "step": 1000 }, { "epoch": 0.1866193897545955, "eval_gen_len": 12.34875, "eval_loss": 0.5301018357276917, "eval_rouge1": 39.5332, "eval_rouge2": 14.4745, "eval_rougeL": 35.644, "eval_rougeLsum": 35.6159, "eval_runtime": 451.0336, "eval_samples_per_second": 8.869, "eval_steps_per_second": 2.217, "step": 1000 } ], "logging_steps": 500, "max_steps": 16074, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 2.887422257922048e+18, "train_batch_size": 4, "trial_name": null, "trial_params": null }