|
{
|
|
"best_metric": 0.6842610364683301,
|
|
"best_model_checkpoint": "./checkpoints/skt_kobert-base-v1\\checkpoint-3126",
|
|
"epoch": 3.0,
|
|
"eval_steps": 500,
|
|
"global_step": 4689,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.03198976327575176,
|
|
"grad_norm": 2.0306639671325684,
|
|
"learning_rate": 1.9786734911494988e-05,
|
|
"loss": 1.013,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.06397952655150352,
|
|
"grad_norm": 2.591745138168335,
|
|
"learning_rate": 1.9573469822989978e-05,
|
|
"loss": 0.9463,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.09596928982725528,
|
|
"grad_norm": 2.405771017074585,
|
|
"learning_rate": 1.9360204734484968e-05,
|
|
"loss": 0.912,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 0.12795905310300704,
|
|
"grad_norm": 3.699129104614258,
|
|
"learning_rate": 1.9146939645979955e-05,
|
|
"loss": 0.8349,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.1599488163787588,
|
|
"grad_norm": 3.0695576667785645,
|
|
"learning_rate": 1.8933674557474945e-05,
|
|
"loss": 0.8967,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 0.19193857965451055,
|
|
"grad_norm": 2.762423038482666,
|
|
"learning_rate": 1.872040946896993e-05,
|
|
"loss": 0.8609,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.22392834293026231,
|
|
"grad_norm": 5.626348495483398,
|
|
"learning_rate": 1.8507144380464918e-05,
|
|
"loss": 0.8619,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 0.2559181062060141,
|
|
"grad_norm": 2.3783187866210938,
|
|
"learning_rate": 1.8293879291959908e-05,
|
|
"loss": 0.847,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.28790786948176583,
|
|
"grad_norm": 2.9742441177368164,
|
|
"learning_rate": 1.8080614203454897e-05,
|
|
"loss": 0.8161,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 0.3198976327575176,
|
|
"grad_norm": 1.8092529773712158,
|
|
"learning_rate": 1.7867349114949884e-05,
|
|
"loss": 0.7752,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.35188739603326935,
|
|
"grad_norm": 2.72413969039917,
|
|
"learning_rate": 1.7654084026444874e-05,
|
|
"loss": 0.8281,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 0.3838771593090211,
|
|
"grad_norm": 3.9095618724823,
|
|
"learning_rate": 1.744081893793986e-05,
|
|
"loss": 0.7984,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.41586692258477287,
|
|
"grad_norm": 3.821829319000244,
|
|
"learning_rate": 1.7227553849434847e-05,
|
|
"loss": 0.845,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 0.44785668586052463,
|
|
"grad_norm": 6.882596492767334,
|
|
"learning_rate": 1.7014288760929837e-05,
|
|
"loss": 0.8218,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 0.4798464491362764,
|
|
"grad_norm": 3.819617986679077,
|
|
"learning_rate": 1.6801023672424827e-05,
|
|
"loss": 0.8316,
|
|
"step": 750
|
|
},
|
|
{
|
|
"epoch": 0.5118362124120281,
|
|
"grad_norm": 2.8060302734375,
|
|
"learning_rate": 1.6587758583919813e-05,
|
|
"loss": 0.8009,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 0.5438259756877799,
|
|
"grad_norm": 6.112952709197998,
|
|
"learning_rate": 1.6374493495414803e-05,
|
|
"loss": 0.7959,
|
|
"step": 850
|
|
},
|
|
{
|
|
"epoch": 0.5758157389635317,
|
|
"grad_norm": 6.098555564880371,
|
|
"learning_rate": 1.616122840690979e-05,
|
|
"loss": 0.822,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 0.6078055022392834,
|
|
"grad_norm": 8.28978157043457,
|
|
"learning_rate": 1.5947963318404776e-05,
|
|
"loss": 0.7781,
|
|
"step": 950
|
|
},
|
|
{
|
|
"epoch": 0.6397952655150352,
|
|
"grad_norm": 3.776757001876831,
|
|
"learning_rate": 1.5734698229899766e-05,
|
|
"loss": 0.8017,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.6717850287907869,
|
|
"grad_norm": 2.7059381008148193,
|
|
"learning_rate": 1.5521433141394756e-05,
|
|
"loss": 0.7822,
|
|
"step": 1050
|
|
},
|
|
{
|
|
"epoch": 0.7037747920665387,
|
|
"grad_norm": 6.732589244842529,
|
|
"learning_rate": 1.5308168052889743e-05,
|
|
"loss": 0.7965,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 0.7357645553422905,
|
|
"grad_norm": 8.803948402404785,
|
|
"learning_rate": 1.5094902964384733e-05,
|
|
"loss": 0.772,
|
|
"step": 1150
|
|
},
|
|
{
|
|
"epoch": 0.7677543186180422,
|
|
"grad_norm": 3.7579758167266846,
|
|
"learning_rate": 1.488163787587972e-05,
|
|
"loss": 0.877,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 0.799744081893794,
|
|
"grad_norm": 3.5114402770996094,
|
|
"learning_rate": 1.4668372787374708e-05,
|
|
"loss": 0.7834,
|
|
"step": 1250
|
|
},
|
|
{
|
|
"epoch": 0.8317338451695457,
|
|
"grad_norm": 3.5781891345977783,
|
|
"learning_rate": 1.4455107698869698e-05,
|
|
"loss": 0.7746,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 0.8637236084452975,
|
|
"grad_norm": 4.3330278396606445,
|
|
"learning_rate": 1.4241842610364684e-05,
|
|
"loss": 0.8138,
|
|
"step": 1350
|
|
},
|
|
{
|
|
"epoch": 0.8957133717210493,
|
|
"grad_norm": 3.0052335262298584,
|
|
"learning_rate": 1.4028577521859672e-05,
|
|
"loss": 0.7651,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 0.927703134996801,
|
|
"grad_norm": 3.8835694789886475,
|
|
"learning_rate": 1.3815312433354662e-05,
|
|
"loss": 0.7984,
|
|
"step": 1450
|
|
},
|
|
{
|
|
"epoch": 0.9596928982725528,
|
|
"grad_norm": 5.093181133270264,
|
|
"learning_rate": 1.3602047344849649e-05,
|
|
"loss": 0.7943,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 0.9916826615483045,
|
|
"grad_norm": 2.615415096282959,
|
|
"learning_rate": 1.3388782256344637e-05,
|
|
"loss": 0.7568,
|
|
"step": 1550
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.673064619321817,
|
|
"eval_f1": 0.5774602220992764,
|
|
"eval_loss": 0.755181610584259,
|
|
"eval_runtime": 146.1897,
|
|
"eval_samples_per_second": 21.383,
|
|
"eval_steps_per_second": 1.341,
|
|
"step": 1563
|
|
},
|
|
{
|
|
"epoch": 1.0236724248240563,
|
|
"grad_norm": 3.9289746284484863,
|
|
"learning_rate": 1.3175517167839627e-05,
|
|
"loss": 0.7968,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 1.055662188099808,
|
|
"grad_norm": 3.1284472942352295,
|
|
"learning_rate": 1.2962252079334613e-05,
|
|
"loss": 0.7693,
|
|
"step": 1650
|
|
},
|
|
{
|
|
"epoch": 1.0876519513755598,
|
|
"grad_norm": 6.5303544998168945,
|
|
"learning_rate": 1.2748986990829602e-05,
|
|
"loss": 0.7763,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"epoch": 1.1196417146513116,
|
|
"grad_norm": 4.240009784698486,
|
|
"learning_rate": 1.2535721902324592e-05,
|
|
"loss": 0.7499,
|
|
"step": 1750
|
|
},
|
|
{
|
|
"epoch": 1.1516314779270633,
|
|
"grad_norm": 3.231553554534912,
|
|
"learning_rate": 1.2322456813819578e-05,
|
|
"loss": 0.8081,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 1.183621241202815,
|
|
"grad_norm": 3.248556613922119,
|
|
"learning_rate": 1.2109191725314566e-05,
|
|
"loss": 0.7372,
|
|
"step": 1850
|
|
},
|
|
{
|
|
"epoch": 1.2156110044785668,
|
|
"grad_norm": 4.725924968719482,
|
|
"learning_rate": 1.1895926636809556e-05,
|
|
"loss": 0.7564,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"epoch": 1.2476007677543186,
|
|
"grad_norm": 7.154216766357422,
|
|
"learning_rate": 1.1682661548304543e-05,
|
|
"loss": 0.7358,
|
|
"step": 1950
|
|
},
|
|
{
|
|
"epoch": 1.2795905310300704,
|
|
"grad_norm": 5.529367923736572,
|
|
"learning_rate": 1.1469396459799531e-05,
|
|
"loss": 0.7921,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 1.3115802943058221,
|
|
"grad_norm": 2.6566271781921387,
|
|
"learning_rate": 1.1256131371294521e-05,
|
|
"loss": 0.7684,
|
|
"step": 2050
|
|
},
|
|
{
|
|
"epoch": 1.3435700575815739,
|
|
"grad_norm": 3.7145371437072754,
|
|
"learning_rate": 1.1042866282789508e-05,
|
|
"loss": 0.7612,
|
|
"step": 2100
|
|
},
|
|
{
|
|
"epoch": 1.3755598208573256,
|
|
"grad_norm": 4.2554707527160645,
|
|
"learning_rate": 1.0829601194284496e-05,
|
|
"loss": 0.7622,
|
|
"step": 2150
|
|
},
|
|
{
|
|
"epoch": 1.4075495841330774,
|
|
"grad_norm": 3.207913398742676,
|
|
"learning_rate": 1.0616336105779486e-05,
|
|
"loss": 0.7331,
|
|
"step": 2200
|
|
},
|
|
{
|
|
"epoch": 1.4395393474088292,
|
|
"grad_norm": 7.0538153648376465,
|
|
"learning_rate": 1.0403071017274472e-05,
|
|
"loss": 0.7578,
|
|
"step": 2250
|
|
},
|
|
{
|
|
"epoch": 1.471529110684581,
|
|
"grad_norm": 4.054543972015381,
|
|
"learning_rate": 1.018980592876946e-05,
|
|
"loss": 0.7711,
|
|
"step": 2300
|
|
},
|
|
{
|
|
"epoch": 1.5035188739603327,
|
|
"grad_norm": 5.265899658203125,
|
|
"learning_rate": 9.976540840264449e-06,
|
|
"loss": 0.742,
|
|
"step": 2350
|
|
},
|
|
{
|
|
"epoch": 1.5355086372360844,
|
|
"grad_norm": 5.515292167663574,
|
|
"learning_rate": 9.763275751759437e-06,
|
|
"loss": 0.763,
|
|
"step": 2400
|
|
},
|
|
{
|
|
"epoch": 1.5674984005118362,
|
|
"grad_norm": 3.4068896770477295,
|
|
"learning_rate": 9.550010663254427e-06,
|
|
"loss": 0.7787,
|
|
"step": 2450
|
|
},
|
|
{
|
|
"epoch": 1.599488163787588,
|
|
"grad_norm": 3.8786113262176514,
|
|
"learning_rate": 9.336745574749414e-06,
|
|
"loss": 0.7665,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 1.6314779270633397,
|
|
"grad_norm": 7.389112949371338,
|
|
"learning_rate": 9.123480486244403e-06,
|
|
"loss": 0.7324,
|
|
"step": 2550
|
|
},
|
|
{
|
|
"epoch": 1.6634676903390915,
|
|
"grad_norm": 3.790463924407959,
|
|
"learning_rate": 8.910215397739392e-06,
|
|
"loss": 0.7479,
|
|
"step": 2600
|
|
},
|
|
{
|
|
"epoch": 1.6954574536148432,
|
|
"grad_norm": 3.296069622039795,
|
|
"learning_rate": 8.696950309234378e-06,
|
|
"loss": 0.6861,
|
|
"step": 2650
|
|
},
|
|
{
|
|
"epoch": 1.727447216890595,
|
|
"grad_norm": 3.9884438514709473,
|
|
"learning_rate": 8.483685220729368e-06,
|
|
"loss": 0.7621,
|
|
"step": 2700
|
|
},
|
|
{
|
|
"epoch": 1.7594369801663468,
|
|
"grad_norm": 4.0090532302856445,
|
|
"learning_rate": 8.270420132224356e-06,
|
|
"loss": 0.7455,
|
|
"step": 2750
|
|
},
|
|
{
|
|
"epoch": 1.7914267434420985,
|
|
"grad_norm": 5.72037935256958,
|
|
"learning_rate": 8.057155043719343e-06,
|
|
"loss": 0.7615,
|
|
"step": 2800
|
|
},
|
|
{
|
|
"epoch": 1.8234165067178503,
|
|
"grad_norm": 5.367597579956055,
|
|
"learning_rate": 7.843889955214333e-06,
|
|
"loss": 0.7478,
|
|
"step": 2850
|
|
},
|
|
{
|
|
"epoch": 1.855406269993602,
|
|
"grad_norm": 4.492830753326416,
|
|
"learning_rate": 7.630624866709321e-06,
|
|
"loss": 0.7644,
|
|
"step": 2900
|
|
},
|
|
{
|
|
"epoch": 1.8873960332693538,
|
|
"grad_norm": 4.395884990692139,
|
|
"learning_rate": 7.4173597782043085e-06,
|
|
"loss": 0.726,
|
|
"step": 2950
|
|
},
|
|
{
|
|
"epoch": 1.9193857965451055,
|
|
"grad_norm": 5.388432025909424,
|
|
"learning_rate": 7.204094689699297e-06,
|
|
"loss": 0.7305,
|
|
"step": 3000
|
|
},
|
|
{
|
|
"epoch": 1.9513755598208573,
|
|
"grad_norm": 3.6554412841796875,
|
|
"learning_rate": 6.990829601194286e-06,
|
|
"loss": 0.7312,
|
|
"step": 3050
|
|
},
|
|
{
|
|
"epoch": 1.983365323096609,
|
|
"grad_norm": 2.9544596672058105,
|
|
"learning_rate": 6.777564512689273e-06,
|
|
"loss": 0.7535,
|
|
"step": 3100
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.6842610364683301,
|
|
"eval_f1": 0.5914024960556081,
|
|
"eval_loss": 0.71613609790802,
|
|
"eval_runtime": 145.4665,
|
|
"eval_samples_per_second": 21.489,
|
|
"eval_steps_per_second": 1.347,
|
|
"step": 3126
|
|
},
|
|
{
|
|
"epoch": 2.015355086372361,
|
|
"grad_norm": 2.3898167610168457,
|
|
"learning_rate": 6.5642994241842614e-06,
|
|
"loss": 0.7209,
|
|
"step": 3150
|
|
},
|
|
{
|
|
"epoch": 2.0473448496481126,
|
|
"grad_norm": 3.216728448867798,
|
|
"learning_rate": 6.3510343356792505e-06,
|
|
"loss": 0.7328,
|
|
"step": 3200
|
|
},
|
|
{
|
|
"epoch": 2.0793346129238643,
|
|
"grad_norm": 5.2315673828125,
|
|
"learning_rate": 6.137769247174238e-06,
|
|
"loss": 0.6979,
|
|
"step": 3250
|
|
},
|
|
{
|
|
"epoch": 2.111324376199616,
|
|
"grad_norm": 5.086841583251953,
|
|
"learning_rate": 5.924504158669226e-06,
|
|
"loss": 0.7401,
|
|
"step": 3300
|
|
},
|
|
{
|
|
"epoch": 2.143314139475368,
|
|
"grad_norm": 3.975651502609253,
|
|
"learning_rate": 5.711239070164215e-06,
|
|
"loss": 0.7695,
|
|
"step": 3350
|
|
},
|
|
{
|
|
"epoch": 2.1753039027511196,
|
|
"grad_norm": 5.14952278137207,
|
|
"learning_rate": 5.497973981659203e-06,
|
|
"loss": 0.7259,
|
|
"step": 3400
|
|
},
|
|
{
|
|
"epoch": 2.2072936660268714,
|
|
"grad_norm": 10.398428916931152,
|
|
"learning_rate": 5.284708893154191e-06,
|
|
"loss": 0.7099,
|
|
"step": 3450
|
|
},
|
|
{
|
|
"epoch": 2.239283429302623,
|
|
"grad_norm": 5.051026821136475,
|
|
"learning_rate": 5.07144380464918e-06,
|
|
"loss": 0.7256,
|
|
"step": 3500
|
|
},
|
|
{
|
|
"epoch": 2.271273192578375,
|
|
"grad_norm": 5.900022506713867,
|
|
"learning_rate": 4.858178716144167e-06,
|
|
"loss": 0.6803,
|
|
"step": 3550
|
|
},
|
|
{
|
|
"epoch": 2.3032629558541267,
|
|
"grad_norm": 3.650153636932373,
|
|
"learning_rate": 4.644913627639156e-06,
|
|
"loss": 0.6936,
|
|
"step": 3600
|
|
},
|
|
{
|
|
"epoch": 2.3352527191298784,
|
|
"grad_norm": 3.192567825317383,
|
|
"learning_rate": 4.431648539134144e-06,
|
|
"loss": 0.7388,
|
|
"step": 3650
|
|
},
|
|
{
|
|
"epoch": 2.36724248240563,
|
|
"grad_norm": 2.792283058166504,
|
|
"learning_rate": 4.218383450629132e-06,
|
|
"loss": 0.7199,
|
|
"step": 3700
|
|
},
|
|
{
|
|
"epoch": 2.399232245681382,
|
|
"grad_norm": 4.723822593688965,
|
|
"learning_rate": 4.005118362124121e-06,
|
|
"loss": 0.7536,
|
|
"step": 3750
|
|
},
|
|
{
|
|
"epoch": 2.4312220089571337,
|
|
"grad_norm": 6.793713092803955,
|
|
"learning_rate": 3.791853273619109e-06,
|
|
"loss": 0.7043,
|
|
"step": 3800
|
|
},
|
|
{
|
|
"epoch": 2.4632117722328855,
|
|
"grad_norm": 4.855713367462158,
|
|
"learning_rate": 3.5785881851140968e-06,
|
|
"loss": 0.7587,
|
|
"step": 3850
|
|
},
|
|
{
|
|
"epoch": 2.495201535508637,
|
|
"grad_norm": 3.8809781074523926,
|
|
"learning_rate": 3.3653230966090854e-06,
|
|
"loss": 0.7007,
|
|
"step": 3900
|
|
},
|
|
{
|
|
"epoch": 2.527191298784389,
|
|
"grad_norm": 3.591956377029419,
|
|
"learning_rate": 3.1520580081040737e-06,
|
|
"loss": 0.6805,
|
|
"step": 3950
|
|
},
|
|
{
|
|
"epoch": 2.5591810620601407,
|
|
"grad_norm": 4.135153293609619,
|
|
"learning_rate": 2.9387929195990615e-06,
|
|
"loss": 0.7628,
|
|
"step": 4000
|
|
},
|
|
{
|
|
"epoch": 2.5911708253358925,
|
|
"grad_norm": 4.887539386749268,
|
|
"learning_rate": 2.72552783109405e-06,
|
|
"loss": 0.7203,
|
|
"step": 4050
|
|
},
|
|
{
|
|
"epoch": 2.6231605886116443,
|
|
"grad_norm": 5.000283241271973,
|
|
"learning_rate": 2.5122627425890384e-06,
|
|
"loss": 0.709,
|
|
"step": 4100
|
|
},
|
|
{
|
|
"epoch": 2.655150351887396,
|
|
"grad_norm": 5.802310943603516,
|
|
"learning_rate": 2.2989976540840266e-06,
|
|
"loss": 0.6938,
|
|
"step": 4150
|
|
},
|
|
{
|
|
"epoch": 2.6871401151631478,
|
|
"grad_norm": 2.909243583679199,
|
|
"learning_rate": 2.085732565579015e-06,
|
|
"loss": 0.7236,
|
|
"step": 4200
|
|
},
|
|
{
|
|
"epoch": 2.7191298784388995,
|
|
"grad_norm": 3.879182815551758,
|
|
"learning_rate": 1.872467477074003e-06,
|
|
"loss": 0.7327,
|
|
"step": 4250
|
|
},
|
|
{
|
|
"epoch": 2.7511196417146513,
|
|
"grad_norm": 5.3820295333862305,
|
|
"learning_rate": 1.6592023885689915e-06,
|
|
"loss": 0.6762,
|
|
"step": 4300
|
|
},
|
|
{
|
|
"epoch": 2.783109404990403,
|
|
"grad_norm": 4.583397388458252,
|
|
"learning_rate": 1.4459373000639796e-06,
|
|
"loss": 0.7111,
|
|
"step": 4350
|
|
},
|
|
{
|
|
"epoch": 2.815099168266155,
|
|
"grad_norm": 3.990649938583374,
|
|
"learning_rate": 1.2326722115589678e-06,
|
|
"loss": 0.7177,
|
|
"step": 4400
|
|
},
|
|
{
|
|
"epoch": 2.8470889315419066,
|
|
"grad_norm": 5.5702433586120605,
|
|
"learning_rate": 1.019407123053956e-06,
|
|
"loss": 0.7068,
|
|
"step": 4450
|
|
},
|
|
{
|
|
"epoch": 2.8790786948176583,
|
|
"grad_norm": 4.175040245056152,
|
|
"learning_rate": 8.061420345489445e-07,
|
|
"loss": 0.7092,
|
|
"step": 4500
|
|
},
|
|
{
|
|
"epoch": 2.91106845809341,
|
|
"grad_norm": 4.912069797515869,
|
|
"learning_rate": 5.928769460439326e-07,
|
|
"loss": 0.7022,
|
|
"step": 4550
|
|
},
|
|
{
|
|
"epoch": 2.943058221369162,
|
|
"grad_norm": 4.595305442810059,
|
|
"learning_rate": 3.796118575389209e-07,
|
|
"loss": 0.7228,
|
|
"step": 4600
|
|
},
|
|
{
|
|
"epoch": 2.9750479846449136,
|
|
"grad_norm": 3.835669755935669,
|
|
"learning_rate": 1.6634676903390917e-07,
|
|
"loss": 0.6946,
|
|
"step": 4650
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.6829814459373,
|
|
"eval_f1": 0.5863851194804445,
|
|
"eval_loss": 0.7020198106765747,
|
|
"eval_runtime": 144.2948,
|
|
"eval_samples_per_second": 21.664,
|
|
"eval_steps_per_second": 1.358,
|
|
"step": 4689
|
|
}
|
|
],
|
|
"logging_steps": 50,
|
|
"max_steps": 4689,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 1757912647073562.0,
|
|
"train_batch_size": 16,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|