SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/distilbert
/distilbert_base_uncased_twitter
/trainer_state.json
{ | |
"best_metric": 0.48642775416374207, | |
"best_model_checkpoint": "../../experiments_checkpoints/MAdAiLab/distilbert/distilbert_base_uncased_twitter/checkpoint-100", | |
"epoch": 3.0, | |
"eval_steps": 50, | |
"global_step": 408, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.07, | |
"grad_norm": 0.9765774607658386, | |
"learning_rate": 1.950980392156863e-05, | |
"loss": 0.6187, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.15, | |
"grad_norm": 1.5788111686706543, | |
"learning_rate": 1.9019607843137255e-05, | |
"loss": 0.5843, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.22, | |
"grad_norm": 3.5568511486053467, | |
"learning_rate": 1.8529411764705884e-05, | |
"loss": 0.5014, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.29, | |
"grad_norm": 1.6089941263198853, | |
"learning_rate": 1.8039215686274513e-05, | |
"loss": 0.4959, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.37, | |
"grad_norm": 2.2181787490844727, | |
"learning_rate": 1.7549019607843138e-05, | |
"loss": 0.4671, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.37, | |
"eval_accuracy": 0.7665441176470589, | |
"eval_f1_macro": 0.7213912692811775, | |
"eval_f1_micro": 0.7665441176470589, | |
"eval_loss": 0.4990096390247345, | |
"eval_runtime": 0.5393, | |
"eval_samples_per_second": 2017.389, | |
"eval_steps_per_second": 31.522, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.44, | |
"grad_norm": 1.884325623512268, | |
"learning_rate": 1.7058823529411767e-05, | |
"loss": 0.49, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.51, | |
"grad_norm": 2.515151262283325, | |
"learning_rate": 1.6568627450980395e-05, | |
"loss": 0.468, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.59, | |
"grad_norm": 1.46433424949646, | |
"learning_rate": 1.607843137254902e-05, | |
"loss": 0.4629, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.66, | |
"grad_norm": 3.789057731628418, | |
"learning_rate": 1.558823529411765e-05, | |
"loss": 0.4995, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.74, | |
"grad_norm": 1.6152119636535645, | |
"learning_rate": 1.5098039215686276e-05, | |
"loss": 0.4724, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.74, | |
"eval_accuracy": 0.7665441176470589, | |
"eval_f1_macro": 0.7129836193609764, | |
"eval_f1_micro": 0.7665441176470589, | |
"eval_loss": 0.48642775416374207, | |
"eval_runtime": 0.5851, | |
"eval_samples_per_second": 1859.58, | |
"eval_steps_per_second": 29.056, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.81, | |
"grad_norm": 2.266235589981079, | |
"learning_rate": 1.4607843137254903e-05, | |
"loss": 0.4756, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.88, | |
"grad_norm": 1.525882601737976, | |
"learning_rate": 1.4117647058823532e-05, | |
"loss": 0.4502, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.96, | |
"grad_norm": 2.974560022354126, | |
"learning_rate": 1.3627450980392158e-05, | |
"loss": 0.4522, | |
"step": 130 | |
}, | |
{ | |
"epoch": 1.03, | |
"grad_norm": 1.4635220766067505, | |
"learning_rate": 1.3137254901960785e-05, | |
"loss": 0.4668, | |
"step": 140 | |
}, | |
{ | |
"epoch": 1.1, | |
"grad_norm": 1.4645050764083862, | |
"learning_rate": 1.2647058823529412e-05, | |
"loss": 0.4569, | |
"step": 150 | |
}, | |
{ | |
"epoch": 1.1, | |
"eval_accuracy": 0.7619485294117647, | |
"eval_f1_macro": 0.7171487219637542, | |
"eval_f1_micro": 0.7619485294117647, | |
"eval_loss": 0.49238911271095276, | |
"eval_runtime": 0.5364, | |
"eval_samples_per_second": 2028.197, | |
"eval_steps_per_second": 31.691, | |
"step": 150 | |
}, | |
{ | |
"epoch": 1.18, | |
"grad_norm": 1.8205065727233887, | |
"learning_rate": 1.215686274509804e-05, | |
"loss": 0.4361, | |
"step": 160 | |
}, | |
{ | |
"epoch": 1.25, | |
"grad_norm": 1.9796948432922363, | |
"learning_rate": 1.1666666666666668e-05, | |
"loss": 0.4132, | |
"step": 170 | |
}, | |
{ | |
"epoch": 1.32, | |
"grad_norm": 2.2274928092956543, | |
"learning_rate": 1.1176470588235295e-05, | |
"loss": 0.4032, | |
"step": 180 | |
}, | |
{ | |
"epoch": 1.4, | |
"grad_norm": 2.062063217163086, | |
"learning_rate": 1.0686274509803922e-05, | |
"loss": 0.4225, | |
"step": 190 | |
}, | |
{ | |
"epoch": 1.47, | |
"grad_norm": 2.1751012802124023, | |
"learning_rate": 1.0196078431372549e-05, | |
"loss": 0.4577, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.47, | |
"eval_accuracy": 0.7674632352941176, | |
"eval_f1_macro": 0.735733452379238, | |
"eval_f1_micro": 0.7674632352941176, | |
"eval_loss": 0.48808765411376953, | |
"eval_runtime": 0.5854, | |
"eval_samples_per_second": 1858.556, | |
"eval_steps_per_second": 29.04, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.54, | |
"grad_norm": 2.3987302780151367, | |
"learning_rate": 9.705882352941177e-06, | |
"loss": 0.4241, | |
"step": 210 | |
}, | |
{ | |
"epoch": 1.62, | |
"grad_norm": 1.7723791599273682, | |
"learning_rate": 9.215686274509804e-06, | |
"loss": 0.4869, | |
"step": 220 | |
}, | |
{ | |
"epoch": 1.69, | |
"grad_norm": 1.5343266725540161, | |
"learning_rate": 8.725490196078433e-06, | |
"loss": 0.4019, | |
"step": 230 | |
}, | |
{ | |
"epoch": 1.76, | |
"grad_norm": 1.4921599626541138, | |
"learning_rate": 8.23529411764706e-06, | |
"loss": 0.431, | |
"step": 240 | |
}, | |
{ | |
"epoch": 1.84, | |
"grad_norm": 2.095546007156372, | |
"learning_rate": 7.745098039215687e-06, | |
"loss": 0.4438, | |
"step": 250 | |
}, | |
{ | |
"epoch": 1.84, | |
"eval_accuracy": 0.7637867647058824, | |
"eval_f1_macro": 0.7221905945635532, | |
"eval_f1_micro": 0.7637867647058824, | |
"eval_loss": 0.4902326464653015, | |
"eval_runtime": 0.5401, | |
"eval_samples_per_second": 2014.546, | |
"eval_steps_per_second": 31.477, | |
"step": 250 | |
}, | |
{ | |
"epoch": 1.91, | |
"grad_norm": 1.6771706342697144, | |
"learning_rate": 7.2549019607843145e-06, | |
"loss": 0.4097, | |
"step": 260 | |
}, | |
{ | |
"epoch": 1.99, | |
"grad_norm": 2.656497001647949, | |
"learning_rate": 6.764705882352942e-06, | |
"loss": 0.3988, | |
"step": 270 | |
}, | |
{ | |
"epoch": 2.06, | |
"grad_norm": 1.9385000467300415, | |
"learning_rate": 6.274509803921569e-06, | |
"loss": 0.417, | |
"step": 280 | |
}, | |
{ | |
"epoch": 2.13, | |
"grad_norm": 2.667146682739258, | |
"learning_rate": 5.784313725490197e-06, | |
"loss": 0.4311, | |
"step": 290 | |
}, | |
{ | |
"epoch": 2.21, | |
"grad_norm": 1.8320512771606445, | |
"learning_rate": 5.294117647058824e-06, | |
"loss": 0.405, | |
"step": 300 | |
}, | |
{ | |
"epoch": 2.21, | |
"eval_accuracy": 0.7647058823529411, | |
"eval_f1_macro": 0.7211496489942373, | |
"eval_f1_micro": 0.7647058823529411, | |
"eval_loss": 0.49007949233055115, | |
"eval_runtime": 0.5398, | |
"eval_samples_per_second": 2015.417, | |
"eval_steps_per_second": 31.491, | |
"step": 300 | |
}, | |
{ | |
"epoch": 2.28, | |
"grad_norm": 2.1121017932891846, | |
"learning_rate": 4.803921568627452e-06, | |
"loss": 0.4111, | |
"step": 310 | |
}, | |
{ | |
"epoch": 2.35, | |
"grad_norm": 1.9665944576263428, | |
"learning_rate": 4.313725490196079e-06, | |
"loss": 0.3779, | |
"step": 320 | |
}, | |
{ | |
"epoch": 2.43, | |
"grad_norm": 2.207554817199707, | |
"learning_rate": 3.8235294117647055e-06, | |
"loss": 0.4133, | |
"step": 330 | |
}, | |
{ | |
"epoch": 2.5, | |
"grad_norm": 1.5441542863845825, | |
"learning_rate": 3.3333333333333333e-06, | |
"loss": 0.3995, | |
"step": 340 | |
}, | |
{ | |
"epoch": 2.57, | |
"grad_norm": 2.1563522815704346, | |
"learning_rate": 2.843137254901961e-06, | |
"loss": 0.4308, | |
"step": 350 | |
}, | |
{ | |
"epoch": 2.57, | |
"eval_accuracy": 0.7693014705882353, | |
"eval_f1_macro": 0.73261662432013, | |
"eval_f1_micro": 0.7693014705882353, | |
"eval_loss": 0.4900199770927429, | |
"eval_runtime": 0.5915, | |
"eval_samples_per_second": 1839.294, | |
"eval_steps_per_second": 28.739, | |
"step": 350 | |
}, | |
{ | |
"epoch": 2.65, | |
"grad_norm": 1.54690682888031, | |
"learning_rate": 2.3529411764705885e-06, | |
"loss": 0.4072, | |
"step": 360 | |
}, | |
{ | |
"epoch": 2.72, | |
"grad_norm": 1.971561312675476, | |
"learning_rate": 1.8627450980392158e-06, | |
"loss": 0.3733, | |
"step": 370 | |
}, | |
{ | |
"epoch": 2.79, | |
"grad_norm": 1.7932841777801514, | |
"learning_rate": 1.3725490196078434e-06, | |
"loss": 0.3814, | |
"step": 380 | |
}, | |
{ | |
"epoch": 2.87, | |
"grad_norm": 2.5148913860321045, | |
"learning_rate": 8.823529411764707e-07, | |
"loss": 0.4002, | |
"step": 390 | |
}, | |
{ | |
"epoch": 2.94, | |
"grad_norm": 2.2834231853485107, | |
"learning_rate": 3.921568627450981e-07, | |
"loss": 0.3584, | |
"step": 400 | |
}, | |
{ | |
"epoch": 2.94, | |
"eval_accuracy": 0.7674632352941176, | |
"eval_f1_macro": 0.7287566403205288, | |
"eval_f1_micro": 0.7674632352941176, | |
"eval_loss": 0.4930545687675476, | |
"eval_runtime": 0.5929, | |
"eval_samples_per_second": 1834.989, | |
"eval_steps_per_second": 28.672, | |
"step": 400 | |
}, | |
{ | |
"epoch": 3.0, | |
"step": 408, | |
"total_flos": 864747139366912.0, | |
"train_loss": 0.44109968811857936, | |
"train_runtime": 52.5835, | |
"train_samples_per_second": 496.354, | |
"train_steps_per_second": 7.759 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 408, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 3, | |
"save_steps": 50, | |
"total_flos": 864747139366912.0, | |
"train_batch_size": 32, | |
"trial_name": null, | |
"trial_params": null | |
} | |