HW_5_calc / chkp /trainer_state.json
PetrovDE's picture
Add app and some data to model
8b21bf3
raw
history blame
6.84 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.23157894736842105,
"eval_steps": 500,
"global_step": 550,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 1e-05,
"loss": 4.8744,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 2e-05,
"loss": 4.1114,
"step": 20
},
{
"epoch": 0.01,
"learning_rate": 3e-05,
"loss": 3.528,
"step": 30
},
{
"epoch": 0.02,
"learning_rate": 4e-05,
"loss": 3.2573,
"step": 40
},
{
"epoch": 0.02,
"learning_rate": 5e-05,
"loss": 3.1417,
"step": 50
},
{
"epoch": 0.03,
"learning_rate": 6e-05,
"loss": 3.0506,
"step": 60
},
{
"epoch": 0.03,
"learning_rate": 7e-05,
"loss": 2.9644,
"step": 70
},
{
"epoch": 0.03,
"learning_rate": 8e-05,
"loss": 2.8399,
"step": 80
},
{
"epoch": 0.04,
"learning_rate": 9e-05,
"loss": 2.6935,
"step": 90
},
{
"epoch": 0.04,
"learning_rate": 0.0001,
"loss": 2.5259,
"step": 100
},
{
"epoch": 0.05,
"learning_rate": 9.956043956043956e-05,
"loss": 2.3713,
"step": 110
},
{
"epoch": 0.05,
"learning_rate": 9.912087912087913e-05,
"loss": 2.2237,
"step": 120
},
{
"epoch": 0.05,
"learning_rate": 9.868131868131869e-05,
"loss": 2.0999,
"step": 130
},
{
"epoch": 0.06,
"learning_rate": 9.824175824175824e-05,
"loss": 2.0211,
"step": 140
},
{
"epoch": 0.06,
"learning_rate": 9.780219780219781e-05,
"loss": 1.949,
"step": 150
},
{
"epoch": 0.07,
"learning_rate": 9.736263736263737e-05,
"loss": 1.8819,
"step": 160
},
{
"epoch": 0.07,
"learning_rate": 9.692307692307692e-05,
"loss": 1.8244,
"step": 170
},
{
"epoch": 0.08,
"learning_rate": 9.64835164835165e-05,
"loss": 1.7849,
"step": 180
},
{
"epoch": 0.08,
"learning_rate": 9.604395604395605e-05,
"loss": 1.7288,
"step": 190
},
{
"epoch": 0.08,
"learning_rate": 9.560439560439561e-05,
"loss": 1.6914,
"step": 200
},
{
"epoch": 0.09,
"learning_rate": 9.516483516483517e-05,
"loss": 1.6534,
"step": 210
},
{
"epoch": 0.09,
"learning_rate": 9.472527472527473e-05,
"loss": 1.6332,
"step": 220
},
{
"epoch": 0.1,
"learning_rate": 9.428571428571429e-05,
"loss": 1.5944,
"step": 230
},
{
"epoch": 0.1,
"learning_rate": 9.384615384615386e-05,
"loss": 1.5789,
"step": 240
},
{
"epoch": 0.11,
"learning_rate": 9.340659340659341e-05,
"loss": 1.5583,
"step": 250
},
{
"epoch": 0.11,
"learning_rate": 9.296703296703297e-05,
"loss": 1.5485,
"step": 260
},
{
"epoch": 0.11,
"learning_rate": 9.252747252747253e-05,
"loss": 1.5124,
"step": 270
},
{
"epoch": 0.12,
"learning_rate": 9.208791208791209e-05,
"loss": 1.5004,
"step": 280
},
{
"epoch": 0.12,
"learning_rate": 9.164835164835165e-05,
"loss": 1.5042,
"step": 290
},
{
"epoch": 0.13,
"learning_rate": 9.12087912087912e-05,
"loss": 1.4991,
"step": 300
},
{
"epoch": 0.13,
"learning_rate": 9.076923076923078e-05,
"loss": 1.4692,
"step": 310
},
{
"epoch": 0.13,
"learning_rate": 9.032967032967033e-05,
"loss": 1.4628,
"step": 320
},
{
"epoch": 0.14,
"learning_rate": 8.989010989010989e-05,
"loss": 1.4481,
"step": 330
},
{
"epoch": 0.14,
"learning_rate": 8.945054945054946e-05,
"loss": 1.454,
"step": 340
},
{
"epoch": 0.15,
"learning_rate": 8.901098901098901e-05,
"loss": 1.4409,
"step": 350
},
{
"epoch": 0.15,
"learning_rate": 8.857142857142857e-05,
"loss": 1.4339,
"step": 360
},
{
"epoch": 0.16,
"learning_rate": 8.813186813186814e-05,
"loss": 1.4262,
"step": 370
},
{
"epoch": 0.16,
"learning_rate": 8.76923076923077e-05,
"loss": 1.4298,
"step": 380
},
{
"epoch": 0.16,
"learning_rate": 8.725274725274725e-05,
"loss": 1.4247,
"step": 390
},
{
"epoch": 0.17,
"learning_rate": 8.681318681318682e-05,
"loss": 1.4191,
"step": 400
},
{
"epoch": 0.17,
"learning_rate": 8.637362637362638e-05,
"loss": 1.4156,
"step": 410
},
{
"epoch": 0.18,
"learning_rate": 8.593406593406593e-05,
"loss": 1.4102,
"step": 420
},
{
"epoch": 0.18,
"learning_rate": 8.54945054945055e-05,
"loss": 1.4121,
"step": 430
},
{
"epoch": 0.19,
"learning_rate": 8.505494505494506e-05,
"loss": 1.4033,
"step": 440
},
{
"epoch": 0.19,
"learning_rate": 8.461538461538461e-05,
"loss": 1.3986,
"step": 450
},
{
"epoch": 0.19,
"learning_rate": 8.417582417582419e-05,
"loss": 1.3935,
"step": 460
},
{
"epoch": 0.2,
"learning_rate": 8.373626373626374e-05,
"loss": 1.399,
"step": 470
},
{
"epoch": 0.2,
"learning_rate": 8.32967032967033e-05,
"loss": 1.3888,
"step": 480
},
{
"epoch": 0.21,
"learning_rate": 8.285714285714287e-05,
"loss": 1.3958,
"step": 490
},
{
"epoch": 0.21,
"learning_rate": 8.241758241758242e-05,
"loss": 1.3758,
"step": 500
},
{
"epoch": 0.21,
"learning_rate": 8.197802197802198e-05,
"loss": 1.3779,
"step": 510
},
{
"epoch": 0.22,
"learning_rate": 8.153846153846155e-05,
"loss": 1.3754,
"step": 520
},
{
"epoch": 0.22,
"learning_rate": 8.10989010989011e-05,
"loss": 1.3754,
"step": 530
},
{
"epoch": 0.23,
"learning_rate": 8.065934065934066e-05,
"loss": 1.3854,
"step": 540
},
{
"epoch": 0.23,
"learning_rate": 8.021978021978022e-05,
"loss": 1.3579,
"step": 550
}
],
"logging_steps": 10,
"max_steps": 2375,
"num_train_epochs": 1,
"save_steps": 50,
"total_flos": 271592074027008.0,
"trial_name": null,
"trial_params": null
}