bert-finetuned-squad / trainer_state.json
ronenh24's picture
Complete Training.
a00f2c2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"global_step": 33276,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 1.970188724606323e-05,
"loss": 2.6662,
"step": 500
},
{
"epoch": 0.09,
"learning_rate": 1.9401370357014065e-05,
"loss": 1.7064,
"step": 1000
},
{
"epoch": 0.14,
"learning_rate": 1.91008534679649e-05,
"loss": 1.5199,
"step": 1500
},
{
"epoch": 0.18,
"learning_rate": 1.8800937612693835e-05,
"loss": 1.3984,
"step": 2000
},
{
"epoch": 0.23,
"learning_rate": 1.850042072364467e-05,
"loss": 1.3264,
"step": 2500
},
{
"epoch": 0.27,
"learning_rate": 1.8199903834595505e-05,
"loss": 1.2529,
"step": 3000
},
{
"epoch": 0.32,
"learning_rate": 1.7899386945546344e-05,
"loss": 1.2787,
"step": 3500
},
{
"epoch": 0.36,
"learning_rate": 1.759887005649718e-05,
"loss": 1.232,
"step": 4000
},
{
"epoch": 0.41,
"learning_rate": 1.729835316744801e-05,
"loss": 1.2349,
"step": 4500
},
{
"epoch": 0.45,
"learning_rate": 1.6997836278398846e-05,
"loss": 1.1331,
"step": 5000
},
{
"epoch": 0.5,
"learning_rate": 1.669731938934968e-05,
"loss": 1.1264,
"step": 5500
},
{
"epoch": 0.54,
"learning_rate": 1.6396802500300516e-05,
"loss": 1.1499,
"step": 6000
},
{
"epoch": 0.59,
"learning_rate": 1.6096285611251354e-05,
"loss": 1.0859,
"step": 6500
},
{
"epoch": 0.63,
"learning_rate": 1.579576872220219e-05,
"loss": 1.1449,
"step": 7000
},
{
"epoch": 0.68,
"learning_rate": 1.5495251833153025e-05,
"loss": 1.1412,
"step": 7500
},
{
"epoch": 0.72,
"learning_rate": 1.5195335977881957e-05,
"loss": 1.1083,
"step": 8000
},
{
"epoch": 0.77,
"learning_rate": 1.4894819088832793e-05,
"loss": 1.0436,
"step": 8500
},
{
"epoch": 0.81,
"learning_rate": 1.459430219978363e-05,
"loss": 1.0928,
"step": 9000
},
{
"epoch": 0.86,
"learning_rate": 1.4293785310734465e-05,
"loss": 1.0547,
"step": 9500
},
{
"epoch": 0.9,
"learning_rate": 1.3993869455463399e-05,
"loss": 1.0276,
"step": 10000
},
{
"epoch": 0.95,
"learning_rate": 1.3693953600192331e-05,
"loss": 1.0272,
"step": 10500
},
{
"epoch": 0.99,
"learning_rate": 1.3393436711143167e-05,
"loss": 1.0437,
"step": 11000
},
{
"epoch": 1.04,
"learning_rate": 1.3092919822094003e-05,
"loss": 0.8151,
"step": 11500
},
{
"epoch": 1.08,
"learning_rate": 1.2792402933044839e-05,
"loss": 0.7652,
"step": 12000
},
{
"epoch": 1.13,
"learning_rate": 1.249308811155187e-05,
"loss": 0.7361,
"step": 12500
},
{
"epoch": 1.17,
"learning_rate": 1.2192571222502706e-05,
"loss": 0.7696,
"step": 13000
},
{
"epoch": 1.22,
"learning_rate": 1.1892054333453542e-05,
"loss": 0.7398,
"step": 13500
},
{
"epoch": 1.26,
"learning_rate": 1.1591537444404378e-05,
"loss": 0.7561,
"step": 14000
},
{
"epoch": 1.31,
"learning_rate": 1.1291020555355213e-05,
"loss": 0.77,
"step": 14500
},
{
"epoch": 1.35,
"learning_rate": 1.0990503666306048e-05,
"loss": 0.7927,
"step": 15000
},
{
"epoch": 1.4,
"learning_rate": 1.0689986777256883e-05,
"loss": 0.7343,
"step": 15500
},
{
"epoch": 1.44,
"learning_rate": 1.0389469888207716e-05,
"loss": 0.7739,
"step": 16000
},
{
"epoch": 1.49,
"learning_rate": 1.0088952999158555e-05,
"loss": 0.7592,
"step": 16500
},
{
"epoch": 1.53,
"learning_rate": 9.78843611010939e-06,
"loss": 0.7158,
"step": 17000
},
{
"epoch": 1.58,
"learning_rate": 9.487919221060224e-06,
"loss": 0.7616,
"step": 17500
},
{
"epoch": 1.62,
"learning_rate": 9.188003365789158e-06,
"loss": 0.7447,
"step": 18000
},
{
"epoch": 1.67,
"learning_rate": 8.887486476739993e-06,
"loss": 0.7607,
"step": 18500
},
{
"epoch": 1.71,
"learning_rate": 8.586969587690828e-06,
"loss": 0.7451,
"step": 19000
},
{
"epoch": 1.76,
"learning_rate": 8.286452698641663e-06,
"loss": 0.7547,
"step": 19500
},
{
"epoch": 1.8,
"learning_rate": 7.9859358095925e-06,
"loss": 0.7424,
"step": 20000
},
{
"epoch": 1.85,
"learning_rate": 7.685418920543335e-06,
"loss": 0.7562,
"step": 20500
},
{
"epoch": 1.89,
"learning_rate": 7.385503065272269e-06,
"loss": 0.7334,
"step": 21000
},
{
"epoch": 1.94,
"learning_rate": 7.084986176223104e-06,
"loss": 0.7318,
"step": 21500
},
{
"epoch": 1.98,
"learning_rate": 6.784469287173939e-06,
"loss": 0.7416,
"step": 22000
},
{
"epoch": 2.03,
"learning_rate": 6.483952398124775e-06,
"loss": 0.602,
"step": 22500
},
{
"epoch": 2.07,
"learning_rate": 6.18343550907561e-06,
"loss": 0.5129,
"step": 23000
},
{
"epoch": 2.12,
"learning_rate": 5.8829186200264455e-06,
"loss": 0.5119,
"step": 23500
},
{
"epoch": 2.16,
"learning_rate": 5.5824017309772815e-06,
"loss": 0.5449,
"step": 24000
},
{
"epoch": 2.21,
"learning_rate": 5.283086909484313e-06,
"loss": 0.5218,
"step": 24500
},
{
"epoch": 2.25,
"learning_rate": 4.982570020435149e-06,
"loss": 0.5353,
"step": 25000
},
{
"epoch": 2.3,
"learning_rate": 4.682053131385984e-06,
"loss": 0.5039,
"step": 25500
},
{
"epoch": 2.34,
"learning_rate": 4.382137276114918e-06,
"loss": 0.5112,
"step": 26000
},
{
"epoch": 2.39,
"learning_rate": 4.081620387065753e-06,
"loss": 0.5133,
"step": 26500
},
{
"epoch": 2.43,
"learning_rate": 3.781103498016589e-06,
"loss": 0.5225,
"step": 27000
},
{
"epoch": 2.48,
"learning_rate": 3.4805866089674246e-06,
"loss": 0.5131,
"step": 27500
},
{
"epoch": 2.52,
"learning_rate": 3.1800697199182593e-06,
"loss": 0.5408,
"step": 28000
},
{
"epoch": 2.57,
"learning_rate": 2.879552830869095e-06,
"loss": 0.4938,
"step": 28500
},
{
"epoch": 2.61,
"learning_rate": 2.5790359418199305e-06,
"loss": 0.5297,
"step": 29000
},
{
"epoch": 2.66,
"learning_rate": 2.278519052770766e-06,
"loss": 0.5052,
"step": 29500
},
{
"epoch": 2.7,
"learning_rate": 1.978002163721601e-06,
"loss": 0.4977,
"step": 30000
},
{
"epoch": 2.75,
"learning_rate": 1.6774852746724368e-06,
"loss": 0.5051,
"step": 30500
},
{
"epoch": 2.79,
"learning_rate": 1.3769683856232721e-06,
"loss": 0.5007,
"step": 31000
},
{
"epoch": 2.84,
"learning_rate": 1.0764514965741075e-06,
"loss": 0.5179,
"step": 31500
},
{
"epoch": 2.88,
"learning_rate": 7.75934607524943e-07,
"loss": 0.4834,
"step": 32000
},
{
"epoch": 2.93,
"learning_rate": 4.760187522538767e-07,
"loss": 0.4984,
"step": 32500
},
{
"epoch": 2.98,
"learning_rate": 1.7550186320471213e-07,
"loss": 0.4986,
"step": 33000
}
],
"max_steps": 33276,
"num_train_epochs": 3,
"total_flos": 5.216534983896422e+16,
"trial_name": null,
"trial_params": null
}