electra-baseSQUAD / trainer_state.json
VarshaDhakad's picture
Upload 11 files
c86920f
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9639762881896945,
"global_step": 32500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 4.9240006079951365e-05,
"loss": 3.2149,
"step": 500
},
{
"epoch": 0.09,
"learning_rate": 4.848001215990272e-05,
"loss": 2.0552,
"step": 1000
},
{
"epoch": 0.14,
"learning_rate": 4.772001823985408e-05,
"loss": 1.7909,
"step": 1500
},
{
"epoch": 0.18,
"learning_rate": 4.6960024319805444e-05,
"loss": 1.5849,
"step": 2000
},
{
"epoch": 0.23,
"learning_rate": 4.620003039975681e-05,
"loss": 1.5404,
"step": 2500
},
{
"epoch": 0.27,
"learning_rate": 4.544003647970816e-05,
"loss": 1.4643,
"step": 3000
},
{
"epoch": 0.32,
"learning_rate": 4.4680042559659524e-05,
"loss": 1.4016,
"step": 3500
},
{
"epoch": 0.36,
"learning_rate": 4.3920048639610886e-05,
"loss": 1.3873,
"step": 4000
},
{
"epoch": 0.41,
"learning_rate": 4.316005471956224e-05,
"loss": 1.4027,
"step": 4500
},
{
"epoch": 0.46,
"learning_rate": 4.240006079951361e-05,
"loss": 1.3512,
"step": 5000
},
{
"epoch": 0.5,
"learning_rate": 4.1640066879464966e-05,
"loss": 1.3113,
"step": 5500
},
{
"epoch": 0.55,
"learning_rate": 4.088007295941633e-05,
"loss": 1.321,
"step": 6000
},
{
"epoch": 0.59,
"learning_rate": 4.0120079039367684e-05,
"loss": 1.2949,
"step": 6500
},
{
"epoch": 0.64,
"learning_rate": 3.9360085119319046e-05,
"loss": 1.3101,
"step": 7000
},
{
"epoch": 0.68,
"learning_rate": 3.860009119927041e-05,
"loss": 1.2456,
"step": 7500
},
{
"epoch": 0.73,
"learning_rate": 3.784009727922177e-05,
"loss": 1.273,
"step": 8000
},
{
"epoch": 0.78,
"learning_rate": 3.708010335917313e-05,
"loss": 1.2514,
"step": 8500
},
{
"epoch": 0.82,
"learning_rate": 3.632010943912449e-05,
"loss": 1.2244,
"step": 9000
},
{
"epoch": 0.87,
"learning_rate": 3.556011551907585e-05,
"loss": 1.2002,
"step": 9500
},
{
"epoch": 0.91,
"learning_rate": 3.4800121599027206e-05,
"loss": 1.1955,
"step": 10000
},
{
"epoch": 0.96,
"learning_rate": 3.404012767897857e-05,
"loss": 1.1896,
"step": 10500
},
{
"epoch": 1.0,
"learning_rate": 3.328013375892993e-05,
"loss": 1.1549,
"step": 11000
},
{
"epoch": 1.05,
"learning_rate": 3.252013983888129e-05,
"loss": 0.9814,
"step": 11500
},
{
"epoch": 1.09,
"learning_rate": 3.176014591883265e-05,
"loss": 0.9973,
"step": 12000
},
{
"epoch": 1.14,
"learning_rate": 3.100015199878401e-05,
"loss": 1.0072,
"step": 12500
},
{
"epoch": 1.19,
"learning_rate": 3.024015807873537e-05,
"loss": 0.9585,
"step": 13000
},
{
"epoch": 1.23,
"learning_rate": 2.9480164158686728e-05,
"loss": 1.0121,
"step": 13500
},
{
"epoch": 1.28,
"learning_rate": 2.8720170238638093e-05,
"loss": 0.984,
"step": 14000
},
{
"epoch": 1.32,
"learning_rate": 2.7960176318589452e-05,
"loss": 0.9659,
"step": 14500
},
{
"epoch": 1.37,
"learning_rate": 2.7200182398540814e-05,
"loss": 1.0413,
"step": 15000
},
{
"epoch": 1.41,
"learning_rate": 2.6440188478492173e-05,
"loss": 0.9652,
"step": 15500
},
{
"epoch": 1.46,
"learning_rate": 2.5680194558443532e-05,
"loss": 0.9588,
"step": 16000
},
{
"epoch": 1.5,
"learning_rate": 2.4920200638394894e-05,
"loss": 1.0316,
"step": 16500
},
{
"epoch": 1.55,
"learning_rate": 2.4160206718346253e-05,
"loss": 1.0073,
"step": 17000
},
{
"epoch": 1.6,
"learning_rate": 2.3400212798297615e-05,
"loss": 0.9782,
"step": 17500
},
{
"epoch": 1.64,
"learning_rate": 2.2640218878248974e-05,
"loss": 1.0236,
"step": 18000
},
{
"epoch": 1.69,
"learning_rate": 2.1880224958200336e-05,
"loss": 0.938,
"step": 18500
},
{
"epoch": 1.73,
"learning_rate": 2.11202310381517e-05,
"loss": 0.9681,
"step": 19000
},
{
"epoch": 1.78,
"learning_rate": 2.0360237118103057e-05,
"loss": 0.9608,
"step": 19500
},
{
"epoch": 1.82,
"learning_rate": 1.9600243198054416e-05,
"loss": 0.991,
"step": 20000
},
{
"epoch": 1.87,
"learning_rate": 1.884024927800578e-05,
"loss": 0.9445,
"step": 20500
},
{
"epoch": 1.92,
"learning_rate": 1.8080255357957137e-05,
"loss": 0.9633,
"step": 21000
},
{
"epoch": 1.96,
"learning_rate": 1.7320261437908496e-05,
"loss": 0.9731,
"step": 21500
},
{
"epoch": 2.01,
"learning_rate": 1.6560267517859858e-05,
"loss": 0.8811,
"step": 22000
},
{
"epoch": 2.05,
"learning_rate": 1.580027359781122e-05,
"loss": 0.8006,
"step": 22500
},
{
"epoch": 2.1,
"learning_rate": 1.5040279677762578e-05,
"loss": 0.7736,
"step": 23000
},
{
"epoch": 2.14,
"learning_rate": 1.4280285757713938e-05,
"loss": 0.7877,
"step": 23500
},
{
"epoch": 2.19,
"learning_rate": 1.35202918376653e-05,
"loss": 0.772,
"step": 24000
},
{
"epoch": 2.23,
"learning_rate": 1.2760297917616659e-05,
"loss": 0.7695,
"step": 24500
},
{
"epoch": 2.28,
"learning_rate": 1.200030399756802e-05,
"loss": 0.7893,
"step": 25000
},
{
"epoch": 2.33,
"learning_rate": 1.1240310077519382e-05,
"loss": 0.7996,
"step": 25500
},
{
"epoch": 2.37,
"learning_rate": 1.048031615747074e-05,
"loss": 0.7658,
"step": 26000
},
{
"epoch": 2.42,
"learning_rate": 9.720322237422101e-06,
"loss": 0.7911,
"step": 26500
},
{
"epoch": 2.46,
"learning_rate": 8.960328317373462e-06,
"loss": 0.7829,
"step": 27000
},
{
"epoch": 2.51,
"learning_rate": 8.200334397324822e-06,
"loss": 0.772,
"step": 27500
},
{
"epoch": 2.55,
"learning_rate": 7.440340477276183e-06,
"loss": 0.7764,
"step": 28000
},
{
"epoch": 2.6,
"learning_rate": 6.680346557227543e-06,
"loss": 0.7846,
"step": 28500
},
{
"epoch": 2.64,
"learning_rate": 5.920352637178903e-06,
"loss": 0.7781,
"step": 29000
},
{
"epoch": 2.69,
"learning_rate": 5.160358717130263e-06,
"loss": 0.7588,
"step": 29500
},
{
"epoch": 2.74,
"learning_rate": 4.400364797081624e-06,
"loss": 0.7829,
"step": 30000
},
{
"epoch": 2.78,
"learning_rate": 3.6403708770329835e-06,
"loss": 0.7754,
"step": 30500
},
{
"epoch": 2.83,
"learning_rate": 2.880376956984344e-06,
"loss": 0.781,
"step": 31000
},
{
"epoch": 2.87,
"learning_rate": 2.1203830369357045e-06,
"loss": 0.779,
"step": 31500
},
{
"epoch": 2.92,
"learning_rate": 1.3603891168870648e-06,
"loss": 0.7928,
"step": 32000
},
{
"epoch": 2.96,
"learning_rate": 6.003951968384252e-07,
"loss": 0.7543,
"step": 32500
}
],
"max_steps": 32895,
"num_train_epochs": 3,
"total_flos": 7596206619746304.0,
"trial_name": null,
"trial_params": null
}