exKcBERT-kowiki / trainer_state.json
beomi's picture
1epoch ckpt
dae012c
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9960159362549801,
"global_step": 51000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.9999511756893996e-05,
"loss": 10.8875,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 4.975587844699633e-05,
"loss": 5.458,
"step": 500
},
{
"epoch": 0.02,
"learning_rate": 4.951175689399266e-05,
"loss": 4.5892,
"step": 1000
},
{
"epoch": 0.03,
"learning_rate": 4.926763534098899e-05,
"loss": 4.4037,
"step": 1500
},
{
"epoch": 0.04,
"learning_rate": 4.9023513787985316e-05,
"loss": 4.2792,
"step": 2000
},
{
"epoch": 0.05,
"learning_rate": 4.8779392234981645e-05,
"loss": 4.201,
"step": 2500
},
{
"epoch": 0.06,
"learning_rate": 4.853527068197797e-05,
"loss": 4.1614,
"step": 3000
},
{
"epoch": 0.07,
"learning_rate": 4.82911491289743e-05,
"loss": 4.1349,
"step": 3500
},
{
"epoch": 0.08,
"learning_rate": 4.804702757597063e-05,
"loss": 4.0883,
"step": 4000
},
{
"epoch": 0.09,
"learning_rate": 4.780290602296696e-05,
"loss": 4.0732,
"step": 4500
},
{
"epoch": 0.1,
"learning_rate": 4.755878446996329e-05,
"loss": 4.048,
"step": 5000
},
{
"epoch": 0.11,
"learning_rate": 4.7314662916959616e-05,
"loss": 4.0258,
"step": 5500
},
{
"epoch": 0.12,
"learning_rate": 4.7070541363955944e-05,
"loss": 4.0192,
"step": 6000
},
{
"epoch": 0.13,
"learning_rate": 4.682641981095227e-05,
"loss": 4.0183,
"step": 6500
},
{
"epoch": 0.14,
"learning_rate": 4.65822982579486e-05,
"loss": 3.9832,
"step": 7000
},
{
"epoch": 0.15,
"learning_rate": 4.633817670494493e-05,
"loss": 3.9937,
"step": 7500
},
{
"epoch": 0.16,
"learning_rate": 4.609405515194126e-05,
"loss": 3.9712,
"step": 8000
},
{
"epoch": 0.17,
"learning_rate": 4.5849933598937586e-05,
"loss": 3.9645,
"step": 8500
},
{
"epoch": 0.18,
"learning_rate": 4.5605812045933915e-05,
"loss": 3.951,
"step": 9000
},
{
"epoch": 0.19,
"learning_rate": 4.536169049293024e-05,
"loss": 3.9571,
"step": 9500
},
{
"epoch": 0.2,
"learning_rate": 4.511756893992657e-05,
"loss": 3.9625,
"step": 10000
},
{
"epoch": 0.21,
"learning_rate": 4.48734473869229e-05,
"loss": 3.9393,
"step": 10500
},
{
"epoch": 0.21,
"learning_rate": 4.462932583391923e-05,
"loss": 3.9326,
"step": 11000
},
{
"epoch": 0.22,
"learning_rate": 4.438520428091556e-05,
"loss": 3.9223,
"step": 11500
},
{
"epoch": 0.23,
"learning_rate": 4.4141082727911886e-05,
"loss": 3.9397,
"step": 12000
},
{
"epoch": 0.24,
"learning_rate": 4.3896961174908214e-05,
"loss": 3.9159,
"step": 12500
},
{
"epoch": 0.25,
"learning_rate": 4.365283962190454e-05,
"loss": 3.905,
"step": 13000
},
{
"epoch": 0.26,
"learning_rate": 4.3408718068900864e-05,
"loss": 3.8848,
"step": 13500
},
{
"epoch": 0.27,
"learning_rate": 4.31645965158972e-05,
"loss": 3.887,
"step": 14000
},
{
"epoch": 0.28,
"learning_rate": 4.292047496289353e-05,
"loss": 3.8847,
"step": 14500
},
{
"epoch": 0.29,
"learning_rate": 4.267635340988985e-05,
"loss": 3.8797,
"step": 15000
},
{
"epoch": 0.3,
"learning_rate": 4.2432231856886185e-05,
"loss": 3.8924,
"step": 15500
},
{
"epoch": 0.31,
"learning_rate": 4.2188110303882513e-05,
"loss": 3.9005,
"step": 16000
},
{
"epoch": 0.32,
"learning_rate": 4.1943988750878835e-05,
"loss": 3.858,
"step": 16500
},
{
"epoch": 0.33,
"learning_rate": 4.169986719787517e-05,
"loss": 3.8763,
"step": 17000
},
{
"epoch": 0.34,
"learning_rate": 4.14557456448715e-05,
"loss": 3.8808,
"step": 17500
},
{
"epoch": 0.35,
"learning_rate": 4.121162409186782e-05,
"loss": 3.8721,
"step": 18000
},
{
"epoch": 0.36,
"learning_rate": 4.0967502538864156e-05,
"loss": 3.8733,
"step": 18500
},
{
"epoch": 0.37,
"learning_rate": 4.0723380985860484e-05,
"loss": 3.8694,
"step": 19000
},
{
"epoch": 0.38,
"learning_rate": 4.0479259432856806e-05,
"loss": 3.8685,
"step": 19500
},
{
"epoch": 0.39,
"learning_rate": 4.023513787985314e-05,
"loss": 3.8451,
"step": 20000
},
{
"epoch": 0.4,
"learning_rate": 3.999101632684947e-05,
"loss": 3.8267,
"step": 20500
},
{
"epoch": 0.41,
"learning_rate": 3.974689477384579e-05,
"loss": 3.8492,
"step": 21000
},
{
"epoch": 0.42,
"learning_rate": 3.950277322084212e-05,
"loss": 3.8322,
"step": 21500
},
{
"epoch": 0.43,
"learning_rate": 3.9258651667838455e-05,
"loss": 3.8824,
"step": 22000
},
{
"epoch": 0.44,
"learning_rate": 3.901453011483478e-05,
"loss": 3.8454,
"step": 22500
},
{
"epoch": 0.45,
"learning_rate": 3.8770408561831105e-05,
"loss": 3.8463,
"step": 23000
},
{
"epoch": 0.46,
"learning_rate": 3.852628700882744e-05,
"loss": 3.8457,
"step": 23500
},
{
"epoch": 0.47,
"learning_rate": 3.828216545582376e-05,
"loss": 3.8437,
"step": 24000
},
{
"epoch": 0.48,
"learning_rate": 3.803804390282009e-05,
"loss": 3.8046,
"step": 24500
},
{
"epoch": 0.49,
"learning_rate": 3.7793922349816426e-05,
"loss": 3.8114,
"step": 25000
},
{
"epoch": 0.5,
"learning_rate": 3.754980079681275e-05,
"loss": 3.8322,
"step": 25500
},
{
"epoch": 0.51,
"learning_rate": 3.7305679243809076e-05,
"loss": 3.797,
"step": 26000
},
{
"epoch": 0.52,
"learning_rate": 3.706155769080541e-05,
"loss": 3.855,
"step": 26500
},
{
"epoch": 0.53,
"learning_rate": 3.681743613780173e-05,
"loss": 3.8149,
"step": 27000
},
{
"epoch": 0.54,
"learning_rate": 3.657331458479806e-05,
"loss": 3.8168,
"step": 27500
},
{
"epoch": 0.55,
"learning_rate": 3.63291930317944e-05,
"loss": 3.8025,
"step": 28000
},
{
"epoch": 0.56,
"learning_rate": 3.6085071478790725e-05,
"loss": 3.8156,
"step": 28500
},
{
"epoch": 0.57,
"learning_rate": 3.584094992578705e-05,
"loss": 3.8241,
"step": 29000
},
{
"epoch": 0.58,
"learning_rate": 3.559682837278338e-05,
"loss": 3.7813,
"step": 29500
},
{
"epoch": 0.59,
"learning_rate": 3.535270681977971e-05,
"loss": 3.7708,
"step": 30000
},
{
"epoch": 0.6,
"learning_rate": 3.510858526677603e-05,
"loss": 3.7432,
"step": 30500
},
{
"epoch": 0.61,
"learning_rate": 3.486446371377236e-05,
"loss": 3.7315,
"step": 31000
},
{
"epoch": 0.62,
"learning_rate": 3.4620342160768696e-05,
"loss": 3.7491,
"step": 31500
},
{
"epoch": 0.62,
"learning_rate": 3.437622060776502e-05,
"loss": 3.7062,
"step": 32000
},
{
"epoch": 0.63,
"learning_rate": 3.4132099054761346e-05,
"loss": 3.7109,
"step": 32500
},
{
"epoch": 0.64,
"learning_rate": 3.388797750175768e-05,
"loss": 3.7077,
"step": 33000
},
{
"epoch": 0.65,
"learning_rate": 3.3643855948754e-05,
"loss": 3.6941,
"step": 33500
},
{
"epoch": 0.66,
"learning_rate": 3.339973439575033e-05,
"loss": 3.7062,
"step": 34000
},
{
"epoch": 0.67,
"learning_rate": 3.315561284274667e-05,
"loss": 3.6663,
"step": 34500
},
{
"epoch": 0.68,
"learning_rate": 3.291149128974299e-05,
"loss": 3.6651,
"step": 35000
},
{
"epoch": 0.69,
"learning_rate": 3.266736973673932e-05,
"loss": 3.6516,
"step": 35500
},
{
"epoch": 0.7,
"learning_rate": 3.242324818373565e-05,
"loss": 3.6591,
"step": 36000
},
{
"epoch": 0.71,
"learning_rate": 3.2179126630731974e-05,
"loss": 3.648,
"step": 36500
},
{
"epoch": 0.72,
"learning_rate": 3.19350050777283e-05,
"loss": 3.6457,
"step": 37000
},
{
"epoch": 0.73,
"learning_rate": 3.169088352472464e-05,
"loss": 3.6374,
"step": 37500
},
{
"epoch": 0.74,
"learning_rate": 3.144676197172096e-05,
"loss": 3.6312,
"step": 38000
},
{
"epoch": 0.75,
"learning_rate": 3.120264041871729e-05,
"loss": 3.6071,
"step": 38500
},
{
"epoch": 0.76,
"learning_rate": 3.0958518865713616e-05,
"loss": 3.5633,
"step": 39000
},
{
"epoch": 0.77,
"learning_rate": 3.0714397312709945e-05,
"loss": 3.5886,
"step": 39500
},
{
"epoch": 0.78,
"learning_rate": 3.0470275759706273e-05,
"loss": 3.5567,
"step": 40000
},
{
"epoch": 0.79,
"learning_rate": 3.02261542067026e-05,
"loss": 3.5192,
"step": 40500
},
{
"epoch": 0.8,
"learning_rate": 2.9982032653698933e-05,
"loss": 3.5032,
"step": 41000
},
{
"epoch": 0.81,
"learning_rate": 2.973791110069526e-05,
"loss": 3.4983,
"step": 41500
},
{
"epoch": 0.82,
"learning_rate": 2.9493789547691587e-05,
"loss": 3.485,
"step": 42000
},
{
"epoch": 0.83,
"learning_rate": 2.924966799468792e-05,
"loss": 3.4727,
"step": 42500
},
{
"epoch": 0.84,
"learning_rate": 2.9005546441684244e-05,
"loss": 3.44,
"step": 43000
},
{
"epoch": 0.85,
"learning_rate": 2.8761424888680572e-05,
"loss": 3.4306,
"step": 43500
},
{
"epoch": 0.86,
"learning_rate": 2.8517303335676904e-05,
"loss": 3.3684,
"step": 44000
},
{
"epoch": 0.87,
"learning_rate": 2.827318178267323e-05,
"loss": 3.354,
"step": 44500
},
{
"epoch": 0.88,
"learning_rate": 2.8029060229669558e-05,
"loss": 3.3174,
"step": 45000
},
{
"epoch": 0.89,
"learning_rate": 2.778493867666589e-05,
"loss": 3.2533,
"step": 45500
},
{
"epoch": 0.9,
"learning_rate": 2.7540817123662215e-05,
"loss": 3.2496,
"step": 46000
},
{
"epoch": 0.91,
"learning_rate": 2.7296695570658543e-05,
"loss": 3.2356,
"step": 46500
},
{
"epoch": 0.92,
"learning_rate": 2.7052574017654875e-05,
"loss": 3.2401,
"step": 47000
},
{
"epoch": 0.93,
"learning_rate": 2.68084524646512e-05,
"loss": 3.2198,
"step": 47500
},
{
"epoch": 0.94,
"learning_rate": 2.656433091164753e-05,
"loss": 3.174,
"step": 48000
},
{
"epoch": 0.95,
"learning_rate": 2.6320209358643854e-05,
"loss": 3.1457,
"step": 48500
},
{
"epoch": 0.96,
"learning_rate": 2.6076087805640186e-05,
"loss": 3.1575,
"step": 49000
},
{
"epoch": 0.97,
"learning_rate": 2.5831966252636514e-05,
"loss": 3.1276,
"step": 49500
},
{
"epoch": 0.98,
"learning_rate": 2.558784469963284e-05,
"loss": 3.0936,
"step": 50000
},
{
"epoch": 0.99,
"learning_rate": 2.534372314662917e-05,
"loss": 3.0638,
"step": 50500
},
{
"epoch": 1.0,
"learning_rate": 2.50996015936255e-05,
"loss": 3.0411,
"step": 51000
}
],
"max_steps": 102408,
"num_train_epochs": 2,
"total_flos": 2.469751052832e+17,
"trial_name": null,
"trial_params": null
}