FineTunedDistilledBertAIChecker
/
finetuned_entity_categorical_classification
/checkpoint-23640
/trainer_state.json
Ubuntu
added finetuned categorical classification model + more evaluated dataset modifications
2e0c701
{ | |
"best_metric": 0.21237443387508392, | |
"best_model_checkpoint": "finetuned_entity_categorical_classification/checkpoint-4728", | |
"epoch": 15.0, | |
"eval_steps": 500, | |
"global_step": 23640, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.32, | |
"learning_rate": 1.957698815566836e-05, | |
"loss": 1.5567, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 1.915397631133672e-05, | |
"loss": 0.3944, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 1.873096446700508e-05, | |
"loss": 0.2773, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 1.0, | |
"eval_accuracy": 0.9374900840869427, | |
"eval_loss": 0.2187376469373703, | |
"eval_runtime": 2.2114, | |
"eval_samples_per_second": 2850.256, | |
"eval_steps_per_second": 178.169, | |
"step": 1576 | |
}, | |
{ | |
"epoch": 1.27, | |
"learning_rate": 1.830795262267344e-05, | |
"loss": 0.1997, | |
"step": 2000 | |
}, | |
{ | |
"epoch": 1.59, | |
"learning_rate": 1.7884940778341796e-05, | |
"loss": 0.202, | |
"step": 2500 | |
}, | |
{ | |
"epoch": 1.9, | |
"learning_rate": 1.7461928934010152e-05, | |
"loss": 0.1797, | |
"step": 3000 | |
}, | |
{ | |
"epoch": 2.0, | |
"eval_accuracy": 0.9452641599238458, | |
"eval_loss": 0.22844311594963074, | |
"eval_runtime": 2.2403, | |
"eval_samples_per_second": 2813.437, | |
"eval_steps_per_second": 175.868, | |
"step": 3152 | |
}, | |
{ | |
"epoch": 2.22, | |
"learning_rate": 1.7038917089678512e-05, | |
"loss": 0.148, | |
"step": 3500 | |
}, | |
{ | |
"epoch": 2.54, | |
"learning_rate": 1.661590524534687e-05, | |
"loss": 0.1357, | |
"step": 4000 | |
}, | |
{ | |
"epoch": 2.86, | |
"learning_rate": 1.619289340101523e-05, | |
"loss": 0.1525, | |
"step": 4500 | |
}, | |
{ | |
"epoch": 3.0, | |
"eval_accuracy": 0.9481199428843408, | |
"eval_loss": 0.21237443387508392, | |
"eval_runtime": 2.2281, | |
"eval_samples_per_second": 2828.834, | |
"eval_steps_per_second": 176.83, | |
"step": 4728 | |
}, | |
{ | |
"epoch": 3.17, | |
"learning_rate": 1.576988155668359e-05, | |
"loss": 0.1218, | |
"step": 5000 | |
}, | |
{ | |
"epoch": 3.49, | |
"learning_rate": 1.5346869712351946e-05, | |
"loss": 0.1147, | |
"step": 5500 | |
}, | |
{ | |
"epoch": 3.81, | |
"learning_rate": 1.4923857868020306e-05, | |
"loss": 0.1195, | |
"step": 6000 | |
}, | |
{ | |
"epoch": 4.0, | |
"eval_accuracy": 0.94859590671109, | |
"eval_loss": 0.2216227501630783, | |
"eval_runtime": 2.2255, | |
"eval_samples_per_second": 2832.161, | |
"eval_steps_per_second": 177.038, | |
"step": 6304 | |
}, | |
{ | |
"epoch": 4.12, | |
"learning_rate": 1.4500846023688663e-05, | |
"loss": 0.114, | |
"step": 6500 | |
}, | |
{ | |
"epoch": 4.44, | |
"learning_rate": 1.4077834179357023e-05, | |
"loss": 0.104, | |
"step": 7000 | |
}, | |
{ | |
"epoch": 4.76, | |
"learning_rate": 1.3654822335025382e-05, | |
"loss": 0.0936, | |
"step": 7500 | |
}, | |
{ | |
"epoch": 5.0, | |
"eval_accuracy": 0.9462160875773441, | |
"eval_loss": 0.24313929677009583, | |
"eval_runtime": 2.1752, | |
"eval_samples_per_second": 2897.6, | |
"eval_steps_per_second": 181.129, | |
"step": 7880 | |
}, | |
{ | |
"epoch": 5.08, | |
"learning_rate": 1.323181049069374e-05, | |
"loss": 0.099, | |
"step": 8000 | |
}, | |
{ | |
"epoch": 5.39, | |
"learning_rate": 1.28087986463621e-05, | |
"loss": 0.0929, | |
"step": 8500 | |
}, | |
{ | |
"epoch": 5.71, | |
"learning_rate": 1.2385786802030457e-05, | |
"loss": 0.0872, | |
"step": 9000 | |
}, | |
{ | |
"epoch": 6.0, | |
"eval_accuracy": 0.9465333967951769, | |
"eval_loss": 0.25265371799468994, | |
"eval_runtime": 2.2222, | |
"eval_samples_per_second": 2836.351, | |
"eval_steps_per_second": 177.3, | |
"step": 9456 | |
}, | |
{ | |
"epoch": 6.03, | |
"learning_rate": 1.1962774957698817e-05, | |
"loss": 0.0963, | |
"step": 9500 | |
}, | |
{ | |
"epoch": 6.35, | |
"learning_rate": 1.1539763113367176e-05, | |
"loss": 0.0733, | |
"step": 10000 | |
}, | |
{ | |
"epoch": 6.66, | |
"learning_rate": 1.1116751269035532e-05, | |
"loss": 0.0812, | |
"step": 10500 | |
}, | |
{ | |
"epoch": 6.98, | |
"learning_rate": 1.0693739424703892e-05, | |
"loss": 0.0929, | |
"step": 11000 | |
}, | |
{ | |
"epoch": 7.0, | |
"eval_accuracy": 0.9490718705378391, | |
"eval_loss": 0.23417465388774872, | |
"eval_runtime": 2.1699, | |
"eval_samples_per_second": 2904.721, | |
"eval_steps_per_second": 181.574, | |
"step": 11032 | |
}, | |
{ | |
"epoch": 7.3, | |
"learning_rate": 1.0270727580372251e-05, | |
"loss": 0.0629, | |
"step": 11500 | |
}, | |
{ | |
"epoch": 7.61, | |
"learning_rate": 9.84771573604061e-06, | |
"loss": 0.0802, | |
"step": 12000 | |
}, | |
{ | |
"epoch": 7.93, | |
"learning_rate": 9.424703891708968e-06, | |
"loss": 0.0757, | |
"step": 12500 | |
}, | |
{ | |
"epoch": 8.0, | |
"eval_accuracy": 0.9490718705378391, | |
"eval_loss": 0.24814845621585846, | |
"eval_runtime": 2.241, | |
"eval_samples_per_second": 2812.616, | |
"eval_steps_per_second": 175.816, | |
"step": 12608 | |
}, | |
{ | |
"epoch": 8.25, | |
"learning_rate": 9.001692047377328e-06, | |
"loss": 0.063, | |
"step": 13000 | |
}, | |
{ | |
"epoch": 8.57, | |
"learning_rate": 8.578680203045686e-06, | |
"loss": 0.0619, | |
"step": 13500 | |
}, | |
{ | |
"epoch": 8.88, | |
"learning_rate": 8.155668358714045e-06, | |
"loss": 0.0673, | |
"step": 14000 | |
}, | |
{ | |
"epoch": 9.0, | |
"eval_accuracy": 0.9474853244486753, | |
"eval_loss": 0.2526280879974365, | |
"eval_runtime": 2.1778, | |
"eval_samples_per_second": 2894.229, | |
"eval_steps_per_second": 180.918, | |
"step": 14184 | |
}, | |
{ | |
"epoch": 9.2, | |
"learning_rate": 7.732656514382403e-06, | |
"loss": 0.0599, | |
"step": 14500 | |
}, | |
{ | |
"epoch": 9.52, | |
"learning_rate": 7.309644670050762e-06, | |
"loss": 0.0605, | |
"step": 15000 | |
}, | |
{ | |
"epoch": 9.84, | |
"learning_rate": 6.886632825719121e-06, | |
"loss": 0.0563, | |
"step": 15500 | |
}, | |
{ | |
"epoch": 10.0, | |
"eval_accuracy": 0.9482785974932572, | |
"eval_loss": 0.2582007646560669, | |
"eval_runtime": 2.1873, | |
"eval_samples_per_second": 2881.619, | |
"eval_steps_per_second": 180.13, | |
"step": 15760 | |
}, | |
{ | |
"epoch": 10.15, | |
"learning_rate": 6.4636209813874795e-06, | |
"loss": 0.0581, | |
"step": 16000 | |
}, | |
{ | |
"epoch": 10.47, | |
"learning_rate": 6.040609137055839e-06, | |
"loss": 0.0614, | |
"step": 16500 | |
}, | |
{ | |
"epoch": 10.79, | |
"learning_rate": 5.617597292724196e-06, | |
"loss": 0.0488, | |
"step": 17000 | |
}, | |
{ | |
"epoch": 11.0, | |
"eval_accuracy": 0.9487545613200064, | |
"eval_loss": 0.2686789333820343, | |
"eval_runtime": 2.2111, | |
"eval_samples_per_second": 2850.648, | |
"eval_steps_per_second": 178.194, | |
"step": 17336 | |
}, | |
{ | |
"epoch": 11.1, | |
"learning_rate": 5.194585448392555e-06, | |
"loss": 0.054, | |
"step": 17500 | |
}, | |
{ | |
"epoch": 11.42, | |
"learning_rate": 4.771573604060914e-06, | |
"loss": 0.0552, | |
"step": 18000 | |
}, | |
{ | |
"epoch": 11.74, | |
"learning_rate": 4.3485617597292725e-06, | |
"loss": 0.0516, | |
"step": 18500 | |
}, | |
{ | |
"epoch": 12.0, | |
"eval_accuracy": 0.9497064889735046, | |
"eval_loss": 0.26535487174987793, | |
"eval_runtime": 2.1929, | |
"eval_samples_per_second": 2874.229, | |
"eval_steps_per_second": 179.668, | |
"step": 18912 | |
}, | |
{ | |
"epoch": 12.06, | |
"learning_rate": 3.925549915397631e-06, | |
"loss": 0.0543, | |
"step": 19000 | |
}, | |
{ | |
"epoch": 12.37, | |
"learning_rate": 3.5025380710659903e-06, | |
"loss": 0.05, | |
"step": 19500 | |
}, | |
{ | |
"epoch": 12.69, | |
"learning_rate": 3.079526226734349e-06, | |
"loss": 0.0494, | |
"step": 20000 | |
}, | |
{ | |
"epoch": 13.0, | |
"eval_accuracy": 0.9487545613200064, | |
"eval_loss": 0.2722657322883606, | |
"eval_runtime": 2.2256, | |
"eval_samples_per_second": 2832.044, | |
"eval_steps_per_second": 177.031, | |
"step": 20488 | |
}, | |
{ | |
"epoch": 13.01, | |
"learning_rate": 2.656514382402707e-06, | |
"loss": 0.0443, | |
"step": 20500 | |
}, | |
{ | |
"epoch": 13.32, | |
"learning_rate": 2.233502538071066e-06, | |
"loss": 0.0355, | |
"step": 21000 | |
}, | |
{ | |
"epoch": 13.64, | |
"learning_rate": 1.810490693739425e-06, | |
"loss": 0.0502, | |
"step": 21500 | |
}, | |
{ | |
"epoch": 13.96, | |
"learning_rate": 1.3874788494077834e-06, | |
"loss": 0.0478, | |
"step": 22000 | |
}, | |
{ | |
"epoch": 14.0, | |
"eval_accuracy": 0.9500237981913374, | |
"eval_loss": 0.2725750505924225, | |
"eval_runtime": 2.2066, | |
"eval_samples_per_second": 2856.45, | |
"eval_steps_per_second": 178.556, | |
"step": 22064 | |
}, | |
{ | |
"epoch": 14.28, | |
"learning_rate": 9.644670050761422e-07, | |
"loss": 0.0423, | |
"step": 22500 | |
}, | |
{ | |
"epoch": 14.59, | |
"learning_rate": 5.414551607445009e-07, | |
"loss": 0.0415, | |
"step": 23000 | |
}, | |
{ | |
"epoch": 14.91, | |
"learning_rate": 1.1844331641285957e-07, | |
"loss": 0.0367, | |
"step": 23500 | |
}, | |
{ | |
"epoch": 15.0, | |
"eval_accuracy": 0.9501824528002538, | |
"eval_loss": 0.27884194254875183, | |
"eval_runtime": 2.2284, | |
"eval_samples_per_second": 2828.43, | |
"eval_steps_per_second": 176.805, | |
"step": 23640 | |
} | |
], | |
"logging_steps": 500, | |
"max_steps": 23640, | |
"num_train_epochs": 15, | |
"save_steps": 500, | |
"total_flos": 1508157302578992.0, | |
"trial_name": null, | |
"trial_params": null | |
} | |