SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/distilbert
/distilbert_base_uncased_scotus
/trainer_state.json
{ | |
"best_metric": 1.5984479188919067, | |
"best_model_checkpoint": "../../experiments_checkpoints/MAdAiLab/distilbert/distilbert_base_uncased_scotus/checkpoint-200", | |
"epoch": 3.0, | |
"eval_steps": 50, | |
"global_step": 237, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.13, | |
"grad_norm": 2.7217512130737305, | |
"learning_rate": 1.9156118143459917e-05, | |
"loss": 2.4003, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.25, | |
"grad_norm": 2.063310384750366, | |
"learning_rate": 1.8312236286919833e-05, | |
"loss": 2.2082, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.38, | |
"grad_norm": 1.5437555313110352, | |
"learning_rate": 1.746835443037975e-05, | |
"loss": 2.1492, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.51, | |
"grad_norm": 1.9715864658355713, | |
"learning_rate": 1.662447257383966e-05, | |
"loss": 2.076, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.63, | |
"grad_norm": 2.068974018096924, | |
"learning_rate": 1.578059071729958e-05, | |
"loss": 1.9785, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.63, | |
"eval_accuracy": 0.39571428571428574, | |
"eval_f1_macro": 0.1007228498239773, | |
"eval_f1_micro": 0.39571428571428574, | |
"eval_loss": 1.9737688302993774, | |
"eval_runtime": 0.7432, | |
"eval_samples_per_second": 1883.664, | |
"eval_steps_per_second": 29.6, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.76, | |
"grad_norm": 2.165163516998291, | |
"learning_rate": 1.4936708860759495e-05, | |
"loss": 1.9311, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.89, | |
"grad_norm": 2.0208511352539062, | |
"learning_rate": 1.4092827004219412e-05, | |
"loss": 1.7853, | |
"step": 70 | |
}, | |
{ | |
"epoch": 1.01, | |
"grad_norm": 2.3129401206970215, | |
"learning_rate": 1.3248945147679326e-05, | |
"loss": 1.8448, | |
"step": 80 | |
}, | |
{ | |
"epoch": 1.14, | |
"grad_norm": 3.225861072540283, | |
"learning_rate": 1.240506329113924e-05, | |
"loss": 1.6705, | |
"step": 90 | |
}, | |
{ | |
"epoch": 1.27, | |
"grad_norm": 2.632293939590454, | |
"learning_rate": 1.1561181434599158e-05, | |
"loss": 1.6121, | |
"step": 100 | |
}, | |
{ | |
"epoch": 1.27, | |
"eval_accuracy": 0.48714285714285716, | |
"eval_f1_macro": 0.18153434278456468, | |
"eval_f1_micro": 0.48714285714285716, | |
"eval_loss": 1.7287405729293823, | |
"eval_runtime": 0.7378, | |
"eval_samples_per_second": 1897.481, | |
"eval_steps_per_second": 29.818, | |
"step": 100 | |
}, | |
{ | |
"epoch": 1.39, | |
"grad_norm": 2.7373692989349365, | |
"learning_rate": 1.0717299578059072e-05, | |
"loss": 1.694, | |
"step": 110 | |
}, | |
{ | |
"epoch": 1.52, | |
"grad_norm": 4.147618293762207, | |
"learning_rate": 9.87341772151899e-06, | |
"loss": 1.6588, | |
"step": 120 | |
}, | |
{ | |
"epoch": 1.65, | |
"grad_norm": 2.533884286880493, | |
"learning_rate": 9.029535864978903e-06, | |
"loss": 1.6191, | |
"step": 130 | |
}, | |
{ | |
"epoch": 1.77, | |
"grad_norm": 2.6445531845092773, | |
"learning_rate": 8.18565400843882e-06, | |
"loss": 1.5258, | |
"step": 140 | |
}, | |
{ | |
"epoch": 1.9, | |
"grad_norm": 2.774038314819336, | |
"learning_rate": 7.341772151898735e-06, | |
"loss": 1.6415, | |
"step": 150 | |
}, | |
{ | |
"epoch": 1.9, | |
"eval_accuracy": 0.5057142857142857, | |
"eval_f1_macro": 0.19889010501840287, | |
"eval_f1_micro": 0.5057142857142857, | |
"eval_loss": 1.6443278789520264, | |
"eval_runtime": 0.7425, | |
"eval_samples_per_second": 1885.397, | |
"eval_steps_per_second": 29.628, | |
"step": 150 | |
}, | |
{ | |
"epoch": 2.03, | |
"grad_norm": 3.8844122886657715, | |
"learning_rate": 6.49789029535865e-06, | |
"loss": 1.5591, | |
"step": 160 | |
}, | |
{ | |
"epoch": 2.15, | |
"grad_norm": 3.7135651111602783, | |
"learning_rate": 5.654008438818566e-06, | |
"loss": 1.4955, | |
"step": 170 | |
}, | |
{ | |
"epoch": 2.28, | |
"grad_norm": 2.7151761054992676, | |
"learning_rate": 4.8101265822784815e-06, | |
"loss": 1.544, | |
"step": 180 | |
}, | |
{ | |
"epoch": 2.41, | |
"grad_norm": 3.708754301071167, | |
"learning_rate": 3.9662447257383965e-06, | |
"loss": 1.4917, | |
"step": 190 | |
}, | |
{ | |
"epoch": 2.53, | |
"grad_norm": 2.7288765907287598, | |
"learning_rate": 3.1223628691983127e-06, | |
"loss": 1.4786, | |
"step": 200 | |
}, | |
{ | |
"epoch": 2.53, | |
"eval_accuracy": 0.5242857142857142, | |
"eval_f1_macro": 0.2442080274860101, | |
"eval_f1_micro": 0.5242857142857142, | |
"eval_loss": 1.5984479188919067, | |
"eval_runtime": 0.7477, | |
"eval_samples_per_second": 1872.513, | |
"eval_steps_per_second": 29.425, | |
"step": 200 | |
}, | |
{ | |
"epoch": 2.66, | |
"grad_norm": 3.4662201404571533, | |
"learning_rate": 2.278481012658228e-06, | |
"loss": 1.4878, | |
"step": 210 | |
}, | |
{ | |
"epoch": 2.78, | |
"grad_norm": 2.5271451473236084, | |
"learning_rate": 1.4345991561181436e-06, | |
"loss": 1.4375, | |
"step": 220 | |
}, | |
{ | |
"epoch": 2.91, | |
"grad_norm": 3.117112159729004, | |
"learning_rate": 5.907172995780591e-07, | |
"loss": 1.4802, | |
"step": 230 | |
}, | |
{ | |
"epoch": 3.0, | |
"step": 237, | |
"total_flos": 502414912258048.0, | |
"train_loss": 1.721195534814762, | |
"train_runtime": 30.5391, | |
"train_samples_per_second": 491.173, | |
"train_steps_per_second": 7.761 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 237, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 3, | |
"save_steps": 50, | |
"total_flos": 502414912258048.0, | |
"train_batch_size": 32, | |
"trial_name": null, | |
"trial_params": null | |
} | |