kcgpt2-dev / trainer_state.json
beomi's picture
add ckpt 70000
9beced9
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.7556620099821925,
"global_step": 70000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.96864889267889e-05,
"loss": 9.0385,
"step": 500
},
{
"epoch": 0.03,
"learning_rate": 4.937297785357779e-05,
"loss": 8.2693,
"step": 1000
},
{
"epoch": 0.04,
"learning_rate": 4.9059466780366684e-05,
"loss": 7.9544,
"step": 1500
},
{
"epoch": 0.05,
"learning_rate": 4.874595570715558e-05,
"loss": 7.7454,
"step": 2000
},
{
"epoch": 0.06,
"learning_rate": 4.843244463394448e-05,
"loss": 7.5746,
"step": 2500
},
{
"epoch": 0.08,
"learning_rate": 4.8118933560733366e-05,
"loss": 7.4312,
"step": 3000
},
{
"epoch": 0.09,
"learning_rate": 4.780542248752226e-05,
"loss": 7.3059,
"step": 3500
},
{
"epoch": 0.1,
"learning_rate": 4.749191141431116e-05,
"loss": 7.1942,
"step": 4000
},
{
"epoch": 0.11,
"learning_rate": 4.717840034110005e-05,
"loss": 7.0934,
"step": 4500
},
{
"epoch": 0.13,
"learning_rate": 4.6864889267888944e-05,
"loss": 7.0091,
"step": 5000
},
{
"epoch": 0.14,
"learning_rate": 4.6551378194677834e-05,
"loss": 6.9312,
"step": 5500
},
{
"epoch": 0.15,
"learning_rate": 4.623786712146673e-05,
"loss": 6.8625,
"step": 6000
},
{
"epoch": 0.16,
"learning_rate": 4.5924356048255626e-05,
"loss": 6.806,
"step": 6500
},
{
"epoch": 0.18,
"learning_rate": 4.561084497504452e-05,
"loss": 6.7588,
"step": 7000
},
{
"epoch": 0.19,
"learning_rate": 4.529733390183342e-05,
"loss": 6.7066,
"step": 7500
},
{
"epoch": 0.2,
"learning_rate": 4.498382282862231e-05,
"loss": 6.6641,
"step": 8000
},
{
"epoch": 0.21,
"learning_rate": 4.4670311755411204e-05,
"loss": 6.6211,
"step": 8500
},
{
"epoch": 0.23,
"learning_rate": 4.4356800682200094e-05,
"loss": 6.5876,
"step": 9000
},
{
"epoch": 0.24,
"learning_rate": 4.404328960898899e-05,
"loss": 6.5559,
"step": 9500
},
{
"epoch": 0.25,
"learning_rate": 4.3729778535777886e-05,
"loss": 6.524,
"step": 10000
},
{
"epoch": 0.26,
"learning_rate": 4.3416267462566776e-05,
"loss": 6.4988,
"step": 10500
},
{
"epoch": 0.28,
"learning_rate": 4.310275638935567e-05,
"loss": 6.4699,
"step": 11000
},
{
"epoch": 0.29,
"learning_rate": 4.278924531614457e-05,
"loss": 6.4434,
"step": 11500
},
{
"epoch": 0.3,
"learning_rate": 4.2475734242933464e-05,
"loss": 6.4212,
"step": 12000
},
{
"epoch": 0.31,
"learning_rate": 4.216222316972236e-05,
"loss": 6.3987,
"step": 12500
},
{
"epoch": 0.33,
"learning_rate": 4.184871209651125e-05,
"loss": 6.3757,
"step": 13000
},
{
"epoch": 0.34,
"learning_rate": 4.1535201023300146e-05,
"loss": 6.3612,
"step": 13500
},
{
"epoch": 0.35,
"learning_rate": 4.1221689950089036e-05,
"loss": 6.3416,
"step": 14000
},
{
"epoch": 0.36,
"learning_rate": 4.090817887687793e-05,
"loss": 6.3212,
"step": 14500
},
{
"epoch": 0.38,
"learning_rate": 4.059466780366683e-05,
"loss": 6.3015,
"step": 15000
},
{
"epoch": 0.39,
"learning_rate": 4.0281156730455724e-05,
"loss": 6.2876,
"step": 15500
},
{
"epoch": 0.4,
"learning_rate": 3.996764565724462e-05,
"loss": 6.2793,
"step": 16000
},
{
"epoch": 0.41,
"learning_rate": 3.965413458403351e-05,
"loss": 6.2627,
"step": 16500
},
{
"epoch": 0.43,
"learning_rate": 3.9340623510822406e-05,
"loss": 6.2455,
"step": 17000
},
{
"epoch": 0.44,
"learning_rate": 3.9027112437611296e-05,
"loss": 6.2309,
"step": 17500
},
{
"epoch": 0.45,
"learning_rate": 3.871360136440019e-05,
"loss": 6.2154,
"step": 18000
},
{
"epoch": 0.46,
"learning_rate": 3.840009029118909e-05,
"loss": 6.2052,
"step": 18500
},
{
"epoch": 0.48,
"learning_rate": 3.808657921797798e-05,
"loss": 6.1916,
"step": 19000
},
{
"epoch": 0.49,
"learning_rate": 3.7773068144766874e-05,
"loss": 6.1812,
"step": 19500
},
{
"epoch": 0.5,
"learning_rate": 3.745955707155577e-05,
"loss": 6.1724,
"step": 20000
},
{
"epoch": 0.51,
"learning_rate": 3.7146045998344666e-05,
"loss": 6.1634,
"step": 20500
},
{
"epoch": 0.53,
"learning_rate": 3.6832534925133556e-05,
"loss": 6.1548,
"step": 21000
},
{
"epoch": 0.54,
"learning_rate": 3.651902385192245e-05,
"loss": 6.1417,
"step": 21500
},
{
"epoch": 0.55,
"learning_rate": 3.620551277871135e-05,
"loss": 6.134,
"step": 22000
},
{
"epoch": 0.56,
"learning_rate": 3.589200170550024e-05,
"loss": 6.1222,
"step": 22500
},
{
"epoch": 0.58,
"learning_rate": 3.5578490632289134e-05,
"loss": 6.1157,
"step": 23000
},
{
"epoch": 0.59,
"learning_rate": 3.526497955907802e-05,
"loss": 6.1062,
"step": 23500
},
{
"epoch": 0.6,
"learning_rate": 3.4951468485866926e-05,
"loss": 6.0985,
"step": 24000
},
{
"epoch": 0.61,
"learning_rate": 3.463795741265582e-05,
"loss": 6.0918,
"step": 24500
},
{
"epoch": 0.63,
"learning_rate": 3.432444633944471e-05,
"loss": 6.0845,
"step": 25000
},
{
"epoch": 0.64,
"learning_rate": 3.401093526623361e-05,
"loss": 6.0771,
"step": 25500
},
{
"epoch": 0.65,
"learning_rate": 3.36974241930225e-05,
"loss": 6.0696,
"step": 26000
},
{
"epoch": 0.66,
"learning_rate": 3.3383913119811394e-05,
"loss": 6.0638,
"step": 26500
},
{
"epoch": 0.68,
"learning_rate": 3.307040204660028e-05,
"loss": 6.0535,
"step": 27000
},
{
"epoch": 0.69,
"learning_rate": 3.275689097338918e-05,
"loss": 6.0473,
"step": 27500
},
{
"epoch": 0.7,
"learning_rate": 3.2443379900178076e-05,
"loss": 6.042,
"step": 28000
},
{
"epoch": 0.71,
"learning_rate": 3.212986882696697e-05,
"loss": 6.0379,
"step": 28500
},
{
"epoch": 0.73,
"learning_rate": 3.181635775375587e-05,
"loss": 6.0337,
"step": 29000
},
{
"epoch": 0.74,
"learning_rate": 3.150284668054476e-05,
"loss": 6.0254,
"step": 29500
},
{
"epoch": 0.75,
"learning_rate": 3.1189335607333654e-05,
"loss": 6.0222,
"step": 30000
},
{
"epoch": 0.76,
"learning_rate": 3.087582453412255e-05,
"loss": 6.0072,
"step": 30500
},
{
"epoch": 0.78,
"learning_rate": 3.056231346091144e-05,
"loss": 6.005,
"step": 31000
},
{
"epoch": 0.79,
"learning_rate": 3.0248802387700336e-05,
"loss": 6.0058,
"step": 31500
},
{
"epoch": 0.8,
"learning_rate": 2.993529131448923e-05,
"loss": 5.9953,
"step": 32000
},
{
"epoch": 0.82,
"learning_rate": 2.9621780241278125e-05,
"loss": 5.993,
"step": 32500
},
{
"epoch": 0.83,
"learning_rate": 2.9308269168067014e-05,
"loss": 5.9817,
"step": 33000
},
{
"epoch": 0.84,
"learning_rate": 2.899475809485591e-05,
"loss": 5.978,
"step": 33500
},
{
"epoch": 0.85,
"learning_rate": 2.8681247021644807e-05,
"loss": 5.9807,
"step": 34000
},
{
"epoch": 0.87,
"learning_rate": 2.83677359484337e-05,
"loss": 5.9742,
"step": 34500
},
{
"epoch": 0.88,
"learning_rate": 2.8054224875222596e-05,
"loss": 5.9678,
"step": 35000
},
{
"epoch": 0.89,
"learning_rate": 2.7740713802011485e-05,
"loss": 5.9637,
"step": 35500
},
{
"epoch": 0.9,
"learning_rate": 2.7427202728800385e-05,
"loss": 5.962,
"step": 36000
},
{
"epoch": 0.92,
"learning_rate": 2.711369165558928e-05,
"loss": 5.9555,
"step": 36500
},
{
"epoch": 0.93,
"learning_rate": 2.680018058237817e-05,
"loss": 5.952,
"step": 37000
},
{
"epoch": 0.94,
"learning_rate": 2.6486669509167067e-05,
"loss": 5.9503,
"step": 37500
},
{
"epoch": 0.95,
"learning_rate": 2.617315843595596e-05,
"loss": 5.9442,
"step": 38000
},
{
"epoch": 0.97,
"learning_rate": 2.5859647362744856e-05,
"loss": 5.9362,
"step": 38500
},
{
"epoch": 0.98,
"learning_rate": 2.5546136289533745e-05,
"loss": 5.9394,
"step": 39000
},
{
"epoch": 0.99,
"learning_rate": 2.523262521632264e-05,
"loss": 5.9358,
"step": 39500
},
{
"epoch": 1.0,
"learning_rate": 2.4919114143111534e-05,
"loss": 5.931,
"step": 40000
},
{
"epoch": 1.02,
"learning_rate": 2.460560306990043e-05,
"loss": 5.9197,
"step": 40500
},
{
"epoch": 1.03,
"learning_rate": 2.4292091996689323e-05,
"loss": 5.914,
"step": 41000
},
{
"epoch": 1.04,
"learning_rate": 2.397858092347822e-05,
"loss": 5.9091,
"step": 41500
},
{
"epoch": 1.05,
"learning_rate": 2.3665069850267112e-05,
"loss": 5.911,
"step": 42000
},
{
"epoch": 1.07,
"learning_rate": 2.335155877705601e-05,
"loss": 5.9081,
"step": 42500
},
{
"epoch": 1.08,
"learning_rate": 2.30380477038449e-05,
"loss": 5.9025,
"step": 43000
},
{
"epoch": 1.09,
"learning_rate": 2.2724536630633794e-05,
"loss": 5.9001,
"step": 43500
},
{
"epoch": 1.1,
"learning_rate": 2.2411025557422687e-05,
"loss": 5.9017,
"step": 44000
},
{
"epoch": 1.12,
"learning_rate": 2.2097514484211583e-05,
"loss": 5.8982,
"step": 44500
},
{
"epoch": 1.13,
"learning_rate": 2.178400341100048e-05,
"loss": 5.8917,
"step": 45000
},
{
"epoch": 1.14,
"learning_rate": 2.1470492337789372e-05,
"loss": 5.8934,
"step": 45500
},
{
"epoch": 1.15,
"learning_rate": 2.1156981264578265e-05,
"loss": 5.8908,
"step": 46000
},
{
"epoch": 1.17,
"learning_rate": 2.0843470191367158e-05,
"loss": 5.891,
"step": 46500
},
{
"epoch": 1.18,
"learning_rate": 2.0529959118156054e-05,
"loss": 5.8866,
"step": 47000
},
{
"epoch": 1.19,
"learning_rate": 2.021644804494495e-05,
"loss": 5.8782,
"step": 47500
},
{
"epoch": 1.2,
"learning_rate": 1.9902936971733843e-05,
"loss": 5.8775,
"step": 48000
},
{
"epoch": 1.22,
"learning_rate": 1.9589425898522736e-05,
"loss": 5.8807,
"step": 48500
},
{
"epoch": 1.23,
"learning_rate": 1.9275914825311632e-05,
"loss": 5.8766,
"step": 49000
},
{
"epoch": 1.24,
"learning_rate": 1.8962403752100525e-05,
"loss": 5.87,
"step": 49500
},
{
"epoch": 1.25,
"learning_rate": 1.8648892678889418e-05,
"loss": 5.8691,
"step": 50000
},
{
"epoch": 1.27,
"learning_rate": 1.8335381605678314e-05,
"loss": 5.8686,
"step": 50500
},
{
"epoch": 1.28,
"learning_rate": 1.8021870532467207e-05,
"loss": 5.8658,
"step": 51000
},
{
"epoch": 1.29,
"learning_rate": 1.7708359459256103e-05,
"loss": 5.8609,
"step": 51500
},
{
"epoch": 1.3,
"learning_rate": 1.7394848386044996e-05,
"loss": 5.866,
"step": 52000
},
{
"epoch": 1.32,
"learning_rate": 1.708133731283389e-05,
"loss": 5.8587,
"step": 52500
},
{
"epoch": 1.33,
"learning_rate": 1.676782623962278e-05,
"loss": 5.8565,
"step": 53000
},
{
"epoch": 1.34,
"learning_rate": 1.645431516641168e-05,
"loss": 5.8542,
"step": 53500
},
{
"epoch": 1.35,
"learning_rate": 1.6140804093200574e-05,
"loss": 5.8546,
"step": 54000
},
{
"epoch": 1.37,
"learning_rate": 1.5827293019989467e-05,
"loss": 5.8478,
"step": 54500
},
{
"epoch": 1.38,
"learning_rate": 1.551378194677836e-05,
"loss": 5.8457,
"step": 55000
},
{
"epoch": 1.39,
"learning_rate": 1.5200270873567254e-05,
"loss": 5.8515,
"step": 55500
},
{
"epoch": 1.4,
"learning_rate": 1.4886759800356149e-05,
"loss": 5.8507,
"step": 56000
},
{
"epoch": 1.42,
"learning_rate": 1.4573248727145045e-05,
"loss": 5.8477,
"step": 56500
},
{
"epoch": 1.43,
"learning_rate": 1.4259737653933938e-05,
"loss": 5.8422,
"step": 57000
},
{
"epoch": 1.44,
"learning_rate": 1.3946226580722832e-05,
"loss": 5.8443,
"step": 57500
},
{
"epoch": 1.45,
"learning_rate": 1.3632715507511725e-05,
"loss": 5.8392,
"step": 58000
},
{
"epoch": 1.47,
"learning_rate": 1.331920443430062e-05,
"loss": 5.8372,
"step": 58500
},
{
"epoch": 1.48,
"learning_rate": 1.3005693361089513e-05,
"loss": 5.8368,
"step": 59000
},
{
"epoch": 1.49,
"learning_rate": 1.269218228787841e-05,
"loss": 5.8384,
"step": 59500
},
{
"epoch": 1.5,
"learning_rate": 1.2378671214667302e-05,
"loss": 5.8315,
"step": 60000
},
{
"epoch": 1.52,
"learning_rate": 1.2065160141456198e-05,
"loss": 5.8347,
"step": 60500
},
{
"epoch": 1.53,
"learning_rate": 1.175164906824509e-05,
"loss": 5.8279,
"step": 61000
},
{
"epoch": 1.54,
"learning_rate": 1.1438137995033985e-05,
"loss": 5.8289,
"step": 61500
},
{
"epoch": 1.56,
"learning_rate": 1.112462692182288e-05,
"loss": 5.829,
"step": 62000
},
{
"epoch": 1.57,
"learning_rate": 1.0811115848611773e-05,
"loss": 5.8283,
"step": 62500
},
{
"epoch": 1.58,
"learning_rate": 1.0497604775400667e-05,
"loss": 5.8224,
"step": 63000
},
{
"epoch": 1.59,
"learning_rate": 1.0184093702189562e-05,
"loss": 5.8251,
"step": 63500
},
{
"epoch": 1.61,
"learning_rate": 9.870582628978456e-06,
"loss": 5.823,
"step": 64000
},
{
"epoch": 1.62,
"learning_rate": 9.557071555767349e-06,
"loss": 5.8226,
"step": 64500
},
{
"epoch": 1.63,
"learning_rate": 9.243560482556245e-06,
"loss": 5.8251,
"step": 65000
},
{
"epoch": 1.64,
"learning_rate": 8.930049409345138e-06,
"loss": 5.8237,
"step": 65500
},
{
"epoch": 1.66,
"learning_rate": 8.616538336134033e-06,
"loss": 5.8247,
"step": 66000
},
{
"epoch": 1.67,
"learning_rate": 8.303027262922927e-06,
"loss": 5.8149,
"step": 66500
},
{
"epoch": 1.68,
"learning_rate": 7.989516189711822e-06,
"loss": 5.8161,
"step": 67000
},
{
"epoch": 1.69,
"learning_rate": 7.676005116500714e-06,
"loss": 5.8129,
"step": 67500
},
{
"epoch": 1.71,
"learning_rate": 7.36249404328961e-06,
"loss": 5.8167,
"step": 68000
},
{
"epoch": 1.72,
"learning_rate": 7.0489829700785035e-06,
"loss": 5.8165,
"step": 68500
},
{
"epoch": 1.73,
"learning_rate": 6.735471896867397e-06,
"loss": 5.8153,
"step": 69000
},
{
"epoch": 1.74,
"learning_rate": 6.4219608236562925e-06,
"loss": 5.8099,
"step": 69500
},
{
"epoch": 1.76,
"learning_rate": 6.108449750445186e-06,
"loss": 5.8079,
"step": 70000
}
],
"max_steps": 79742,
"num_train_epochs": 2,
"total_flos": 7.994200817664e+16,
"trial_name": null,
"trial_params": null
}