roberta / trainer_state.json
Doohae's picture
initial
e26886c
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"global_step": 21790,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.23,
"learning_rate": 4.885268471776045e-06,
"loss": 1.4166,
"step": 500
},
{
"epoch": 0.46,
"learning_rate": 4.770536943552089e-06,
"loss": 0.6545,
"step": 1000
},
{
"epoch": 0.69,
"learning_rate": 4.655805415328132e-06,
"loss": 0.5315,
"step": 1500
},
{
"epoch": 0.92,
"learning_rate": 4.541073887104177e-06,
"loss": 0.4715,
"step": 2000
},
{
"epoch": 1.0,
"exact_match": 67.5,
"f1": 75.87103174603179,
"step": 2179
},
{
"epoch": 1.15,
"learning_rate": 4.4263423588802205e-06,
"loss": 0.3414,
"step": 2500
},
{
"epoch": 1.38,
"learning_rate": 4.311610830656265e-06,
"loss": 0.2945,
"step": 3000
},
{
"epoch": 1.61,
"learning_rate": 4.196879302432309e-06,
"loss": 0.3019,
"step": 3500
},
{
"epoch": 1.84,
"learning_rate": 4.0821477742083525e-06,
"loss": 0.3033,
"step": 4000
},
{
"epoch": 2.0,
"exact_match": 68.33333333333333,
"f1": 76.34986772486776,
"step": 4358
},
{
"epoch": 2.07,
"learning_rate": 3.967416245984397e-06,
"loss": 0.2622,
"step": 4500
},
{
"epoch": 2.29,
"learning_rate": 3.852684717760441e-06,
"loss": 0.1801,
"step": 5000
},
{
"epoch": 2.52,
"learning_rate": 3.7379531895364852e-06,
"loss": 0.1697,
"step": 5500
},
{
"epoch": 2.75,
"learning_rate": 3.623221661312529e-06,
"loss": 0.2118,
"step": 6000
},
{
"epoch": 2.98,
"learning_rate": 3.508490133088573e-06,
"loss": 0.1559,
"step": 6500
},
{
"epoch": 3.0,
"exact_match": 67.91666666666667,
"f1": 76.26147001147002,
"step": 6537
},
{
"epoch": 3.21,
"learning_rate": 3.393758604864617e-06,
"loss": 0.1068,
"step": 7000
},
{
"epoch": 3.44,
"learning_rate": 3.2790270766406612e-06,
"loss": 0.0951,
"step": 7500
},
{
"epoch": 3.67,
"learning_rate": 3.1642955484167054e-06,
"loss": 0.0986,
"step": 8000
},
{
"epoch": 3.9,
"learning_rate": 3.049564020192749e-06,
"loss": 0.1122,
"step": 8500
},
{
"epoch": 4.0,
"exact_match": 67.5,
"f1": 74.74419793169795,
"step": 8716
},
{
"epoch": 4.13,
"learning_rate": 2.934832491968793e-06,
"loss": 0.0811,
"step": 9000
},
{
"epoch": 4.36,
"learning_rate": 2.8201009637448373e-06,
"loss": 0.0546,
"step": 9500
},
{
"epoch": 4.59,
"learning_rate": 2.7053694355208814e-06,
"loss": 0.056,
"step": 10000
},
{
"epoch": 4.82,
"learning_rate": 2.5906379072969255e-06,
"loss": 0.0731,
"step": 10500
},
{
"epoch": 5.0,
"exact_match": 70.41666666666667,
"f1": 77.59175084175087,
"step": 10895
},
{
"epoch": 5.05,
"learning_rate": 2.4759063790729696e-06,
"loss": 0.0687,
"step": 11000
},
{
"epoch": 5.28,
"learning_rate": 2.3611748508490133e-06,
"loss": 0.0471,
"step": 11500
},
{
"epoch": 5.51,
"learning_rate": 2.2464433226250574e-06,
"loss": 0.0426,
"step": 12000
},
{
"epoch": 5.74,
"learning_rate": 2.1317117944011015e-06,
"loss": 0.0437,
"step": 12500
},
{
"epoch": 5.97,
"learning_rate": 2.0169802661771456e-06,
"loss": 0.0299,
"step": 13000
},
{
"epoch": 6.0,
"exact_match": 66.66666666666667,
"f1": 74.826330804272,
"step": 13074
},
{
"epoch": 6.2,
"learning_rate": 1.9022487379531897e-06,
"loss": 0.0232,
"step": 13500
},
{
"epoch": 6.42,
"learning_rate": 1.7875172097292336e-06,
"loss": 0.0221,
"step": 14000
},
{
"epoch": 6.65,
"learning_rate": 1.672785681505278e-06,
"loss": 0.0242,
"step": 14500
},
{
"epoch": 6.88,
"learning_rate": 1.5580541532813219e-06,
"loss": 0.0327,
"step": 15000
},
{
"epoch": 7.0,
"exact_match": 67.08333333333333,
"f1": 75.72919876963996,
"step": 15253
},
{
"epoch": 7.11,
"learning_rate": 1.443322625057366e-06,
"loss": 0.0207,
"step": 15500
},
{
"epoch": 7.34,
"learning_rate": 1.3285910968334099e-06,
"loss": 0.0243,
"step": 16000
},
{
"epoch": 7.57,
"learning_rate": 1.213859568609454e-06,
"loss": 0.0127,
"step": 16500
},
{
"epoch": 7.8,
"learning_rate": 1.099128040385498e-06,
"loss": 0.0189,
"step": 17000
},
{
"epoch": 8.0,
"exact_match": 66.25,
"f1": 73.67448496492618,
"step": 17432
},
{
"epoch": 8.03,
"learning_rate": 9.84396512161542e-07,
"loss": 0.0234,
"step": 17500
},
{
"epoch": 8.26,
"learning_rate": 8.696649839375861e-07,
"loss": 0.0041,
"step": 18000
},
{
"epoch": 8.49,
"learning_rate": 7.549334557136302e-07,
"loss": 0.0115,
"step": 18500
},
{
"epoch": 8.72,
"learning_rate": 6.402019274896742e-07,
"loss": 0.0159,
"step": 19000
},
{
"epoch": 8.95,
"learning_rate": 5.254703992657182e-07,
"loss": 0.0116,
"step": 19500
},
{
"epoch": 9.0,
"exact_match": 69.16666666666667,
"f1": 77.33799506593625,
"step": 19611
},
{
"epoch": 9.18,
"learning_rate": 4.107388710417623e-07,
"loss": 0.0096,
"step": 20000
},
{
"epoch": 9.41,
"learning_rate": 2.9600734281780635e-07,
"loss": 0.0057,
"step": 20500
},
{
"epoch": 9.64,
"learning_rate": 1.8127581459385043e-07,
"loss": 0.005,
"step": 21000
},
{
"epoch": 9.87,
"learning_rate": 6.654428636989445e-08,
"loss": 0.0042,
"step": 21500
},
{
"epoch": 10.0,
"exact_match": 66.66666666666667,
"f1": 73.91331557875677,
"step": 21790
},
{
"epoch": 10.0,
"step": 21790,
"total_flos": 1.3475301465871872e+17,
"train_runtime": 17438.5747,
"train_samples_per_second": 1.25
}
],
"max_steps": 21790,
"num_train_epochs": 10,
"total_flos": 1.3475301465871872e+17,
"trial_name": null,
"trial_params": null
}