longt5-base-global-mediasum / trainer_state.json
nbroad's picture
nbroad HF staff
End of training
2f99d0f
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"global_step": 5001,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.3,
"learning_rate": 4.99001996007984e-05,
"loss": 3.6478,
"step": 500
},
{
"epoch": 0.6,
"learning_rate": 4.445555555555555e-05,
"loss": 2.7964,
"step": 1000
},
{
"epoch": 0.9,
"learning_rate": 3.8900000000000004e-05,
"loss": 2.66,
"step": 1500
},
{
"epoch": 1.0,
"eval_loss": 2.064293622970581,
"eval_runtime": 200.8568,
"eval_samples_per_second": 9.957,
"eval_steps_per_second": 0.622,
"step": 1667
},
{
"epoch": 1.2,
"learning_rate": 3.334444444444445e-05,
"loss": 2.5197,
"step": 2000
},
{
"epoch": 1.5,
"learning_rate": 2.7788888888888892e-05,
"loss": 2.4882,
"step": 2500
},
{
"epoch": 1.8,
"learning_rate": 2.2233333333333333e-05,
"loss": 2.472,
"step": 3000
},
{
"epoch": 2.0,
"eval_loss": 2.0240793228149414,
"eval_runtime": 212.8772,
"eval_samples_per_second": 9.395,
"eval_steps_per_second": 0.587,
"step": 3334
},
{
"epoch": 2.1,
"learning_rate": 1.6677777777777777e-05,
"loss": 2.4282,
"step": 3500
},
{
"epoch": 2.4,
"learning_rate": 1.1122222222222223e-05,
"loss": 2.3806,
"step": 4000
},
{
"epoch": 2.7,
"learning_rate": 5.566666666666667e-06,
"loss": 2.3865,
"step": 4500
},
{
"epoch": 3.0,
"learning_rate": 1.1111111111111112e-08,
"loss": 2.3574,
"step": 5000
},
{
"epoch": 3.0,
"eval_loss": 2.012880325317383,
"eval_runtime": 199.4135,
"eval_samples_per_second": 10.029,
"eval_steps_per_second": 0.627,
"step": 5001
},
{
"epoch": 3.0,
"step": 5001,
"total_flos": 1.8376153805625754e+17,
"train_loss": 1.619314953175289,
"train_runtime": 7201.217,
"train_samples_per_second": 8.332,
"train_steps_per_second": 0.694
}
],
"max_steps": 5001,
"num_train_epochs": 3,
"total_flos": 1.8376153805625754e+17,
"trial_name": null,
"trial_params": null
}