roberta-large-data-seed-0 / trainer_state.json
anas-awadalla's picture
End of training
4d8c018
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"global_step": 7382,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14,
"learning_rate": 2.7968030344080195e-05,
"loss": 1.3568,
"step": 500
},
{
"epoch": 0.27,
"learning_rate": 2.5936060688160393e-05,
"loss": 0.9714,
"step": 1000
},
{
"epoch": 0.41,
"learning_rate": 2.3904091032240584e-05,
"loss": 0.915,
"step": 1500
},
{
"epoch": 0.54,
"learning_rate": 2.187212137632078e-05,
"loss": 0.8459,
"step": 2000
},
{
"epoch": 0.68,
"learning_rate": 1.9840151720400976e-05,
"loss": 0.8315,
"step": 2500
},
{
"epoch": 0.81,
"learning_rate": 1.7808182064481173e-05,
"loss": 0.7995,
"step": 3000
},
{
"epoch": 0.95,
"learning_rate": 1.5776212408561364e-05,
"loss": 0.7776,
"step": 3500
},
{
"epoch": 1.08,
"learning_rate": 1.374424275264156e-05,
"loss": 0.6493,
"step": 4000
},
{
"epoch": 1.22,
"learning_rate": 1.1712273096721756e-05,
"loss": 0.5626,
"step": 4500
},
{
"epoch": 1.35,
"learning_rate": 9.68030344080195e-06,
"loss": 0.5631,
"step": 5000
},
{
"epoch": 1.49,
"learning_rate": 7.648333784882147e-06,
"loss": 0.564,
"step": 5500
},
{
"epoch": 1.63,
"learning_rate": 5.616364128962341e-06,
"loss": 0.5734,
"step": 6000
},
{
"epoch": 1.76,
"learning_rate": 3.584394473042536e-06,
"loss": 0.5533,
"step": 6500
},
{
"epoch": 1.9,
"learning_rate": 1.552424817122731e-06,
"loss": 0.5477,
"step": 7000
},
{
"epoch": 2.0,
"step": 7382,
"total_flos": 1.2338058008251597e+17,
"train_loss": 0.7406173908623277,
"train_runtime": 8011.1743,
"train_samples_per_second": 22.111,
"train_steps_per_second": 0.921
}
],
"max_steps": 7382,
"num_train_epochs": 2,
"total_flos": 1.2338058008251597e+17,
"trial_name": null,
"trial_params": null
}