roberta-large-squad / trainer_state.json
princeton-nlp's picture
init
877f4b9
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"global_step": 8241,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18,
"learning_rate": 2.8179832544594102e-05,
"loss": 1.3765,
"step": 500
},
{
"epoch": 0.36,
"learning_rate": 2.6359665089188204e-05,
"loss": 0.903,
"step": 1000
},
{
"epoch": 0.55,
"learning_rate": 2.4539497633782306e-05,
"loss": 0.8343,
"step": 1500
},
{
"epoch": 0.73,
"learning_rate": 2.271933017837641e-05,
"loss": 0.7909,
"step": 2000
},
{
"epoch": 0.91,
"learning_rate": 2.0899162722970516e-05,
"loss": 0.7492,
"step": 2500
},
{
"epoch": 1.09,
"learning_rate": 1.9078995267564617e-05,
"loss": 0.6332,
"step": 3000
},
{
"epoch": 1.27,
"learning_rate": 1.725882781215872e-05,
"loss": 0.5434,
"step": 3500
},
{
"epoch": 1.46,
"learning_rate": 1.543866035675282e-05,
"loss": 0.5355,
"step": 4000
},
{
"epoch": 1.64,
"learning_rate": 1.3618492901346924e-05,
"loss": 0.549,
"step": 4500
},
{
"epoch": 1.82,
"learning_rate": 1.1798325445941026e-05,
"loss": 0.521,
"step": 5000
},
{
"epoch": 2.0,
"learning_rate": 9.97815799053513e-06,
"loss": 0.5263,
"step": 5500
},
{
"epoch": 2.18,
"learning_rate": 8.157990535129232e-06,
"loss": 0.3775,
"step": 6000
},
{
"epoch": 2.37,
"learning_rate": 6.337823079723334e-06,
"loss": 0.3725,
"step": 6500
},
{
"epoch": 2.55,
"learning_rate": 4.517655624317437e-06,
"loss": 0.363,
"step": 7000
},
{
"epoch": 2.73,
"learning_rate": 2.69748816891154e-06,
"loss": 0.3497,
"step": 7500
},
{
"epoch": 2.91,
"learning_rate": 8.773207135056426e-07,
"loss": 0.3415,
"step": 8000
},
{
"epoch": 3.0,
"step": 8241,
"total_flos": 2.7545611910240102e+17,
"train_loss": 0.6024896357623918,
"train_runtime": 2829.1187,
"train_samples_per_second": 139.785,
"train_steps_per_second": 2.913
}
],
"max_steps": 8241,
"num_train_epochs": 3,
"total_flos": 2.7545611910240102e+17,
"trial_name": null,
"trial_params": null
}