roberta-base-generics-mlm / trainer_state.json
sello-ralethe's picture
Initial commit
452db9d
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9372888823615804,
"global_step": 20000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 1.9510451852939737e-05,
"loss": 2.5338,
"step": 500
},
{
"epoch": 0.15,
"learning_rate": 1.9020903705879475e-05,
"loss": 2.4335,
"step": 1000
},
{
"epoch": 0.22,
"learning_rate": 1.853135555881921e-05,
"loss": 2.3284,
"step": 1500
},
{
"epoch": 0.29,
"learning_rate": 1.8041807411758948e-05,
"loss": 2.3165,
"step": 2000
},
{
"epoch": 0.37,
"learning_rate": 1.7552259264698686e-05,
"loss": 2.2819,
"step": 2500
},
{
"epoch": 0.44,
"learning_rate": 1.706271111763842e-05,
"loss": 2.2629,
"step": 3000
},
{
"epoch": 0.51,
"learning_rate": 1.657316297057816e-05,
"loss": 2.2099,
"step": 3500
},
{
"epoch": 0.59,
"learning_rate": 1.6083614823517894e-05,
"loss": 2.2439,
"step": 4000
},
{
"epoch": 0.66,
"learning_rate": 1.5594066676457633e-05,
"loss": 2.1858,
"step": 4500
},
{
"epoch": 0.73,
"learning_rate": 1.5104518529397367e-05,
"loss": 2.1387,
"step": 5000
},
{
"epoch": 0.81,
"learning_rate": 1.4614970382337104e-05,
"loss": 2.1852,
"step": 5500
},
{
"epoch": 0.88,
"learning_rate": 1.412542223527684e-05,
"loss": 2.1869,
"step": 6000
},
{
"epoch": 0.95,
"learning_rate": 1.3635874088216576e-05,
"loss": 2.0771,
"step": 6500
},
{
"epoch": 1.0,
"eval_loss": 2.0530622005462646,
"eval_runtime": 23.6575,
"eval_samples_per_second": 255.817,
"step": 6809
},
{
"epoch": 1.03,
"learning_rate": 1.3146325941156315e-05,
"loss": 2.1258,
"step": 7000
},
{
"epoch": 1.1,
"learning_rate": 1.2656777794096052e-05,
"loss": 2.0593,
"step": 7500
},
{
"epoch": 1.17,
"learning_rate": 1.2167229647035787e-05,
"loss": 2.0118,
"step": 8000
},
{
"epoch": 1.25,
"learning_rate": 1.1677681499975524e-05,
"loss": 1.9987,
"step": 8500
},
{
"epoch": 1.32,
"learning_rate": 1.118813335291526e-05,
"loss": 2.0134,
"step": 9000
},
{
"epoch": 1.4,
"learning_rate": 1.0698585205854997e-05,
"loss": 1.9785,
"step": 9500
},
{
"epoch": 1.47,
"learning_rate": 1.0209037058794733e-05,
"loss": 1.928,
"step": 10000
},
{
"epoch": 1.54,
"learning_rate": 9.71948891173447e-06,
"loss": 1.9685,
"step": 10500
},
{
"epoch": 1.62,
"learning_rate": 9.229940764674206e-06,
"loss": 1.8942,
"step": 11000
},
{
"epoch": 1.69,
"learning_rate": 8.740392617613943e-06,
"loss": 1.9443,
"step": 11500
},
{
"epoch": 1.76,
"learning_rate": 8.25084447055368e-06,
"loss": 1.8947,
"step": 12000
},
{
"epoch": 1.84,
"learning_rate": 7.761296323493416e-06,
"loss": 1.9557,
"step": 12500
},
{
"epoch": 1.91,
"learning_rate": 7.271748176433154e-06,
"loss": 1.9083,
"step": 13000
},
{
"epoch": 1.98,
"learning_rate": 6.782200029372889e-06,
"loss": 1.966,
"step": 13500
},
{
"epoch": 2.0,
"eval_loss": 1.8375927209854126,
"eval_runtime": 23.5457,
"eval_samples_per_second": 257.032,
"step": 13618
},
{
"epoch": 2.06,
"learning_rate": 6.292651882312626e-06,
"loss": 1.943,
"step": 14000
},
{
"epoch": 2.13,
"learning_rate": 5.8031037352523625e-06,
"loss": 1.7706,
"step": 14500
},
{
"epoch": 2.2,
"learning_rate": 5.313555588192098e-06,
"loss": 1.8079,
"step": 15000
},
{
"epoch": 2.28,
"learning_rate": 4.824007441131836e-06,
"loss": 1.9021,
"step": 15500
},
{
"epoch": 2.35,
"learning_rate": 4.334459294071572e-06,
"loss": 1.8314,
"step": 16000
},
{
"epoch": 2.42,
"learning_rate": 3.844911147011309e-06,
"loss": 1.7882,
"step": 16500
},
{
"epoch": 2.5,
"learning_rate": 3.3553629999510454e-06,
"loss": 1.8472,
"step": 17000
},
{
"epoch": 2.57,
"learning_rate": 2.865814852890782e-06,
"loss": 1.8157,
"step": 17500
},
{
"epoch": 2.64,
"learning_rate": 2.3762667058305186e-06,
"loss": 1.8546,
"step": 18000
},
{
"epoch": 2.72,
"learning_rate": 1.8867185587702552e-06,
"loss": 1.8019,
"step": 18500
},
{
"epoch": 2.79,
"learning_rate": 1.3971704117099918e-06,
"loss": 1.7841,
"step": 19000
},
{
"epoch": 2.86,
"learning_rate": 9.076222646497284e-07,
"loss": 1.7894,
"step": 19500
},
{
"epoch": 2.94,
"learning_rate": 4.1807411758946493e-07,
"loss": 1.8128,
"step": 20000
}
],
"max_steps": 20427,
"num_train_epochs": 3,
"total_flos": 2165411575408158.0,
"trial_name": null,
"trial_params": null
}