pegasus_xsum_gigaword / trainer_state.json
Marc Tomlinson
pegasus XSUM model fine-tuned with 11500 iterations on gigaword to produce short coherant summaries
bb2d7e8
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.012092661331875204,
"global_step": 11500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.9991237201933425e-05,
"loss": 3.5502,
"step": 500
},
{
"epoch": 0.0,
"learning_rate": 4.9982474403866854e-05,
"loss": 3.0742,
"step": 1000
},
{
"epoch": 0.0,
"learning_rate": 4.997371160580027e-05,
"loss": 2.9389,
"step": 1500
},
{
"epoch": 0.0,
"learning_rate": 4.99649488077337e-05,
"loss": 2.8079,
"step": 2000
},
{
"epoch": 0.0,
"learning_rate": 4.995618600966712e-05,
"loss": 2.7687,
"step": 2500
},
{
"epoch": 0.0,
"learning_rate": 4.9947423211600545e-05,
"loss": 2.7448,
"step": 3000
},
{
"epoch": 0.0,
"learning_rate": 4.993866041353397e-05,
"loss": 2.6388,
"step": 3500
},
{
"epoch": 0.0,
"learning_rate": 4.992989761546739e-05,
"loss": 2.6045,
"step": 4000
},
{
"epoch": 0.0,
"learning_rate": 4.992113481740082e-05,
"loss": 2.5276,
"step": 4500
},
{
"epoch": 0.01,
"learning_rate": 4.991237201933424e-05,
"loss": 2.5322,
"step": 5000
},
{
"epoch": 0.01,
"learning_rate": 4.9903609221267665e-05,
"loss": 2.4583,
"step": 5500
},
{
"epoch": 0.01,
"learning_rate": 4.989484642320109e-05,
"loss": 2.5075,
"step": 6000
},
{
"epoch": 0.01,
"learning_rate": 4.988608362513451e-05,
"loss": 2.4178,
"step": 6500
},
{
"epoch": 0.01,
"learning_rate": 4.987732082706794e-05,
"loss": 2.3881,
"step": 7000
},
{
"epoch": 0.01,
"learning_rate": 4.986855802900136e-05,
"loss": 2.3897,
"step": 7500
},
{
"epoch": 0.01,
"learning_rate": 4.985979523093478e-05,
"loss": 2.3356,
"step": 8000
},
{
"epoch": 0.01,
"learning_rate": 4.985103243286821e-05,
"loss": 2.3406,
"step": 8500
},
{
"epoch": 0.01,
"learning_rate": 4.984226963480163e-05,
"loss": 2.3413,
"step": 9000
},
{
"epoch": 0.01,
"learning_rate": 4.983350683673505e-05,
"loss": 2.3287,
"step": 9500
},
{
"epoch": 0.01,
"learning_rate": 4.9824744038668475e-05,
"loss": 2.3343,
"step": 10000
},
{
"epoch": 0.01,
"learning_rate": 4.98159812406019e-05,
"loss": 2.3305,
"step": 10500
},
{
"epoch": 0.01,
"learning_rate": 4.980721844253533e-05,
"loss": 2.3109,
"step": 11000
},
{
"epoch": 0.01,
"learning_rate": 4.979845564446875e-05,
"loss": 2.3059,
"step": 11500
}
],
"max_steps": 2852970,
"num_train_epochs": 3,
"total_flos": 1.082983380369408e+16,
"trial_name": null,
"trial_params": null
}