{ "best_metric": 0.4729623794555664, "best_model_checkpoint": "./Vit-GPT2-COCO2017Flickr-80k-08/checkpoint-500", "epoch": 0.3, "eval_steps": 500, "global_step": 1500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.1, "grad_norm": 0.9981909990310669, "learning_rate": 4.8333333333333334e-05, "loss": 0.3691, "step": 500 }, { "epoch": 0.1, "eval_gen_len": 11.77575, "eval_loss": 0.4729623794555664, "eval_rouge1": 39.8086, "eval_rouge2": 14.7674, "eval_rougeL": 36.1546, "eval_rougeLsum": 36.1739, "eval_runtime": 459.5392, "eval_samples_per_second": 8.704, "eval_steps_per_second": 2.176, "step": 500 }, { "epoch": 0.2, "grad_norm": 0.8220647573471069, "learning_rate": 4.666666666666667e-05, "loss": 0.3706, "step": 1000 }, { "epoch": 0.2, "eval_gen_len": 11.59775, "eval_loss": 0.47387805581092834, "eval_rouge1": 39.8972, "eval_rouge2": 14.9064, "eval_rougeL": 36.1193, "eval_rougeLsum": 36.138, "eval_runtime": 446.0956, "eval_samples_per_second": 8.967, "eval_steps_per_second": 2.242, "step": 1000 }, { "epoch": 0.3, "grad_norm": 1.052058219909668, "learning_rate": 4.5e-05, "loss": 0.3709, "step": 1500 }, { "epoch": 0.3, "eval_gen_len": 11.71025, "eval_loss": 0.47592678666114807, "eval_rouge1": 39.9874, "eval_rouge2": 14.8528, "eval_rougeL": 36.3155, "eval_rougeLsum": 36.3317, "eval_runtime": 466.5502, "eval_samples_per_second": 8.574, "eval_steps_per_second": 2.143, "step": 1500 } ], "logging_steps": 500, "max_steps": 15000, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "total_flos": 4.331133386883072e+18, "train_batch_size": 4, "trial_name": null, "trial_params": null }