debug_t5-base_squad / trainer_state.json
tiagoblima's picture
End of training
8ca3cf5
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 5476,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18,
"learning_rate": 9.086924762600438e-05,
"loss": 1.4977,
"step": 500
},
{
"epoch": 0.37,
"learning_rate": 8.173849525200878e-05,
"loss": 1.2444,
"step": 1000
},
{
"epoch": 0.55,
"learning_rate": 7.260774287801315e-05,
"loss": 1.1926,
"step": 1500
},
{
"epoch": 0.73,
"learning_rate": 6.347699050401752e-05,
"loss": 1.1644,
"step": 2000
},
{
"epoch": 0.91,
"learning_rate": 5.434623813002192e-05,
"loss": 1.1414,
"step": 2500
},
{
"epoch": 1.1,
"learning_rate": 4.52154857560263e-05,
"loss": 1.0927,
"step": 3000
},
{
"epoch": 1.28,
"learning_rate": 3.6084733382030684e-05,
"loss": 1.0721,
"step": 3500
},
{
"epoch": 1.46,
"learning_rate": 2.695398100803506e-05,
"loss": 1.0598,
"step": 4000
},
{
"epoch": 1.64,
"learning_rate": 1.7823228634039447e-05,
"loss": 1.0517,
"step": 4500
},
{
"epoch": 1.83,
"learning_rate": 8.692476260043827e-06,
"loss": 1.0524,
"step": 5000
},
{
"epoch": 2.0,
"step": 5476,
"total_flos": 8.001615338274816e+16,
"train_loss": 1.1475538333198987,
"train_runtime": 10232.2752,
"train_samples_per_second": 17.122,
"train_steps_per_second": 0.535
}
],
"logging_steps": 500,
"max_steps": 5476,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 8.001615338274816e+16,
"trial_name": null,
"trial_params": null
}