llama-160m-qqp / trainer_state.json
Cheng98's picture
End of training
a1c373a
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.999120531199156,
"global_step": 11368,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18,
"learning_rate": 4.792839549612949e-05,
"loss": 0.659,
"step": 500
},
{
"epoch": 0.35,
"learning_rate": 4.572923997185081e-05,
"loss": 0.611,
"step": 1000
},
{
"epoch": 0.53,
"learning_rate": 4.3530084447572136e-05,
"loss": 0.6055,
"step": 1500
},
{
"epoch": 0.7,
"learning_rate": 4.1335327234342017e-05,
"loss": 0.6116,
"step": 2000
},
{
"epoch": 0.88,
"learning_rate": 3.9136171710063336e-05,
"loss": 0.6019,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.6733860994311155,
"eval_loss": 0.5970579385757446,
"eval_runtime": 112.0914,
"eval_samples_per_second": 360.688,
"eval_steps_per_second": 11.277,
"step": 2842
},
{
"epoch": 1.06,
"learning_rate": 3.693701618578466e-05,
"loss": 0.5947,
"step": 3000
},
{
"epoch": 1.23,
"learning_rate": 3.473786066150598e-05,
"loss": 0.5897,
"step": 3500
},
{
"epoch": 1.41,
"learning_rate": 3.253870513722731e-05,
"loss": 0.5881,
"step": 4000
},
{
"epoch": 1.58,
"learning_rate": 3.033954961294863e-05,
"loss": 0.5853,
"step": 4500
},
{
"epoch": 1.76,
"learning_rate": 2.814479239971851e-05,
"loss": 0.5839,
"step": 5000
},
{
"epoch": 1.93,
"learning_rate": 2.5945636875439833e-05,
"loss": 0.5849,
"step": 5500
},
{
"epoch": 2.0,
"eval_accuracy": 0.6843185753153599,
"eval_loss": 0.5835733413696289,
"eval_runtime": 111.952,
"eval_samples_per_second": 361.137,
"eval_steps_per_second": 11.291,
"step": 5685
},
{
"epoch": 2.11,
"learning_rate": 2.3746481351161152e-05,
"loss": 0.5784,
"step": 6000
},
{
"epoch": 2.29,
"learning_rate": 2.154732582688248e-05,
"loss": 0.5803,
"step": 6500
},
{
"epoch": 2.46,
"learning_rate": 1.93481703026038e-05,
"loss": 0.58,
"step": 7000
},
{
"epoch": 2.64,
"learning_rate": 1.715341308937368e-05,
"loss": 0.5807,
"step": 7500
},
{
"epoch": 2.81,
"learning_rate": 1.4954257565095003e-05,
"loss": 0.5813,
"step": 8000
},
{
"epoch": 2.99,
"learning_rate": 1.2755102040816327e-05,
"loss": 0.5819,
"step": 8500
},
{
"epoch": 3.0,
"eval_accuracy": 0.6855305466237942,
"eval_loss": 0.5815231204032898,
"eval_runtime": 111.9533,
"eval_samples_per_second": 361.133,
"eval_steps_per_second": 11.29,
"step": 8527
},
{
"epoch": 3.17,
"learning_rate": 1.055594651653765e-05,
"loss": 0.5771,
"step": 9000
},
{
"epoch": 3.34,
"learning_rate": 8.356790992258972e-06,
"loss": 0.5794,
"step": 9500
},
{
"epoch": 3.52,
"learning_rate": 6.162033779028853e-06,
"loss": 0.5802,
"step": 10000
},
{
"epoch": 3.69,
"learning_rate": 3.962878254750176e-06,
"loss": 0.5831,
"step": 10500
},
{
"epoch": 3.87,
"learning_rate": 1.7637227304714988e-06,
"loss": 0.5768,
"step": 11000
},
{
"epoch": 4.0,
"eval_accuracy": 0.6842443729903537,
"eval_loss": 0.5816319584846497,
"eval_runtime": 111.9533,
"eval_samples_per_second": 361.133,
"eval_steps_per_second": 11.29,
"step": 11368
},
{
"epoch": 4.0,
"step": 11368,
"total_flos": 5.063018043612856e+17,
"train_loss": 0.5902769758531864,
"train_runtime": 11488.0766,
"train_samples_per_second": 126.686,
"train_steps_per_second": 0.99
}
],
"max_steps": 11368,
"num_train_epochs": 4,
"total_flos": 5.063018043612856e+17,
"trial_name": null,
"trial_params": null
}