mlm / trainer_state.json
JennnDexter's picture
End of training
db6cd00
{
"best_metric": 0.7289605637620273,
"best_model_checkpoint": "D:/1_SyscoPY_D/NLP/Data/Transformers_Hug/checkpoint/roberta-base\\checkpoint-150",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 450,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 6.666666666666667e-06,
"loss": 1.8568,
"step": 10
},
{
"epoch": 0.13,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.7794,
"step": 20
},
{
"epoch": 0.2,
"learning_rate": 1.9999999999999998e-05,
"loss": 1.5937,
"step": 30
},
{
"epoch": 0.27,
"learning_rate": 2.6666666666666667e-05,
"loss": 1.557,
"step": 40
},
{
"epoch": 0.33,
"learning_rate": 2.962962962962963e-05,
"loss": 1.5075,
"step": 50
},
{
"epoch": 0.4,
"learning_rate": 2.8888888888888888e-05,
"loss": 1.4945,
"step": 60
},
{
"epoch": 0.47,
"learning_rate": 2.8148148148148147e-05,
"loss": 1.4764,
"step": 70
},
{
"epoch": 0.53,
"learning_rate": 2.7481481481481482e-05,
"loss": 1.4568,
"step": 80
},
{
"epoch": 0.6,
"learning_rate": 2.6740740740740743e-05,
"loss": 1.4276,
"step": 90
},
{
"epoch": 0.67,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.4126,
"step": 100
},
{
"epoch": 0.73,
"learning_rate": 2.525925925925926e-05,
"loss": 1.3712,
"step": 110
},
{
"epoch": 0.8,
"learning_rate": 2.451851851851852e-05,
"loss": 1.4503,
"step": 120
},
{
"epoch": 0.87,
"learning_rate": 2.377777777777778e-05,
"loss": 1.4199,
"step": 130
},
{
"epoch": 0.93,
"learning_rate": 2.303703703703704e-05,
"loss": 1.469,
"step": 140
},
{
"epoch": 1.0,
"learning_rate": 2.2296296296296297e-05,
"loss": 1.3784,
"step": 150
},
{
"epoch": 1.0,
"eval_accuracy": 0.7289605637620273,
"eval_loss": 1.282206416130066,
"eval_runtime": 14.3101,
"eval_samples_per_second": 34.661,
"eval_steps_per_second": 4.333,
"step": 150
},
{
"epoch": 1.07,
"learning_rate": 2.155555555555556e-05,
"loss": 1.3744,
"step": 160
},
{
"epoch": 1.13,
"learning_rate": 2.0814814814814817e-05,
"loss": 1.4279,
"step": 170
},
{
"epoch": 1.2,
"learning_rate": 2.014814814814815e-05,
"loss": 1.4016,
"step": 180
},
{
"epoch": 1.27,
"learning_rate": 1.9407407407407407e-05,
"loss": 1.3812,
"step": 190
},
{
"epoch": 1.33,
"learning_rate": 1.866666666666667e-05,
"loss": 1.415,
"step": 200
},
{
"epoch": 1.4,
"learning_rate": 1.7925925925925927e-05,
"loss": 1.3856,
"step": 210
},
{
"epoch": 1.47,
"learning_rate": 1.7185185185185185e-05,
"loss": 1.3874,
"step": 220
},
{
"epoch": 1.53,
"learning_rate": 1.6444444444444444e-05,
"loss": 1.3965,
"step": 230
},
{
"epoch": 1.6,
"learning_rate": 1.5703703703703705e-05,
"loss": 1.4211,
"step": 240
},
{
"epoch": 1.67,
"learning_rate": 1.4962962962962964e-05,
"loss": 1.3866,
"step": 250
},
{
"epoch": 1.73,
"learning_rate": 1.4222222222222224e-05,
"loss": 1.3759,
"step": 260
},
{
"epoch": 1.8,
"learning_rate": 1.3481481481481482e-05,
"loss": 1.4049,
"step": 270
},
{
"epoch": 1.87,
"learning_rate": 1.2740740740740742e-05,
"loss": 1.3919,
"step": 280
},
{
"epoch": 1.93,
"learning_rate": 1.2e-05,
"loss": 1.3781,
"step": 290
},
{
"epoch": 2.0,
"learning_rate": 1.125925925925926e-05,
"loss": 1.3804,
"step": 300
},
{
"epoch": 2.0,
"eval_accuracy": 0.7273364801078894,
"eval_loss": 1.275496482849121,
"eval_runtime": 14.199,
"eval_samples_per_second": 34.932,
"eval_steps_per_second": 4.366,
"step": 300
},
{
"epoch": 2.07,
"learning_rate": 1.051851851851852e-05,
"loss": 1.3812,
"step": 310
},
{
"epoch": 2.13,
"learning_rate": 9.777777777777779e-06,
"loss": 1.4056,
"step": 320
},
{
"epoch": 2.2,
"learning_rate": 9.037037037037039e-06,
"loss": 1.3889,
"step": 330
},
{
"epoch": 2.27,
"learning_rate": 8.296296296296295e-06,
"loss": 1.3811,
"step": 340
},
{
"epoch": 2.33,
"learning_rate": 7.555555555555555e-06,
"loss": 1.3451,
"step": 350
},
{
"epoch": 2.4,
"learning_rate": 6.814814814814815e-06,
"loss": 1.3535,
"step": 360
},
{
"epoch": 2.47,
"learning_rate": 6.0740740740740745e-06,
"loss": 1.3702,
"step": 370
},
{
"epoch": 2.53,
"learning_rate": 5.333333333333334e-06,
"loss": 1.3499,
"step": 380
},
{
"epoch": 2.6,
"learning_rate": 4.592592592592593e-06,
"loss": 1.3318,
"step": 390
},
{
"epoch": 2.67,
"learning_rate": 3.851851851851852e-06,
"loss": 1.3391,
"step": 400
},
{
"epoch": 2.73,
"learning_rate": 3.111111111111111e-06,
"loss": 1.3648,
"step": 410
},
{
"epoch": 2.8,
"learning_rate": 2.3703703703703703e-06,
"loss": 1.3147,
"step": 420
},
{
"epoch": 2.87,
"learning_rate": 1.6296296296296297e-06,
"loss": 1.3406,
"step": 430
},
{
"epoch": 2.93,
"learning_rate": 8.88888888888889e-07,
"loss": 1.3526,
"step": 440
},
{
"epoch": 3.0,
"learning_rate": 1.4814814814814815e-07,
"loss": 1.3586,
"step": 450
},
{
"epoch": 3.0,
"eval_accuracy": 0.7288328898061153,
"eval_loss": 1.2627531290054321,
"eval_runtime": 14.2737,
"eval_samples_per_second": 34.749,
"eval_steps_per_second": 4.344,
"step": 450
},
{
"epoch": 3.0,
"step": 450,
"total_flos": 3789443078682624.0,
"train_loss": 1.420832945505778,
"train_runtime": 1162.9561,
"train_samples_per_second": 12.377,
"train_steps_per_second": 0.387
}
],
"logging_steps": 10,
"max_steps": 450,
"num_train_epochs": 3,
"save_steps": 100,
"total_flos": 3789443078682624.0,
"trial_name": null,
"trial_params": null
}