lilt_ls_re_en / trainer_state.json
arvisioncode's picture
Upload trainer_state.json with huggingface_hub
6fb52af verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 263.1578947368421,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 26.32,
"learning_rate": 2.5e-05,
"loss": 0.2555,
"step": 500
},
{
"epoch": 52.63,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.1603,
"step": 1000
},
{
"epoch": 78.95,
"learning_rate": 1.9444444444444445e-05,
"loss": 0.1074,
"step": 1500
},
{
"epoch": 105.26,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.0881,
"step": 2000
},
{
"epoch": 131.58,
"learning_rate": 1.388888888888889e-05,
"loss": 0.0761,
"step": 2500
},
{
"epoch": 157.89,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.0678,
"step": 3000
},
{
"epoch": 184.21,
"learning_rate": 8.333333333333334e-06,
"loss": 0.0619,
"step": 3500
},
{
"epoch": 210.53,
"learning_rate": 5.555555555555556e-06,
"loss": 0.0583,
"step": 4000
},
{
"epoch": 236.84,
"learning_rate": 2.777777777777778e-06,
"loss": 0.0549,
"step": 4500
},
{
"epoch": 263.16,
"learning_rate": 0.0,
"loss": 0.0543,
"step": 5000
},
{
"epoch": 263.16,
"step": 5000,
"total_flos": 3.53160818688e+16,
"train_runtime": 2716.3773,
"train_samples_per_second": 1.841
}
],
"max_steps": 5000,
"num_train_epochs": 264,
"total_flos": 3.53160818688e+16,
"trial_name": null,
"trial_params": null
}