SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/distilbert
/distilroberta_base_twitter
/trainer_state.json
{ | |
"best_metric": 0.48123905062675476, | |
"best_model_checkpoint": "../../experiments_checkpoints/MAdAiLab/distilbert/distilroberta_base_twitter/checkpoint-200", | |
"epoch": 3.0, | |
"eval_steps": 50, | |
"global_step": 408, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.07, | |
"grad_norm": 1.314621925354004, | |
"learning_rate": 1.950980392156863e-05, | |
"loss": 0.6097, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.15, | |
"grad_norm": 2.451284408569336, | |
"learning_rate": 1.9019607843137255e-05, | |
"loss": 0.6246, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.22, | |
"grad_norm": 8.775372505187988, | |
"learning_rate": 1.8529411764705884e-05, | |
"loss": 0.5457, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.29, | |
"grad_norm": 3.2106316089630127, | |
"learning_rate": 1.8039215686274513e-05, | |
"loss": 0.5083, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.37, | |
"grad_norm": 2.1298840045928955, | |
"learning_rate": 1.7549019607843138e-05, | |
"loss": 0.4863, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.37, | |
"eval_accuracy": 0.7582720588235294, | |
"eval_f1_macro": 0.7215736986471983, | |
"eval_f1_micro": 0.7582720588235294, | |
"eval_loss": 0.4847523272037506, | |
"eval_runtime": 0.5343, | |
"eval_samples_per_second": 2036.144, | |
"eval_steps_per_second": 31.815, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.44, | |
"grad_norm": 2.0992162227630615, | |
"learning_rate": 1.7058823529411767e-05, | |
"loss": 0.479, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.51, | |
"grad_norm": 4.017960548400879, | |
"learning_rate": 1.6568627450980395e-05, | |
"loss": 0.5089, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.59, | |
"grad_norm": 2.547389268875122, | |
"learning_rate": 1.607843137254902e-05, | |
"loss": 0.4534, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.66, | |
"grad_norm": 5.25526762008667, | |
"learning_rate": 1.558823529411765e-05, | |
"loss": 0.5069, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.74, | |
"grad_norm": 2.0313820838928223, | |
"learning_rate": 1.5098039215686276e-05, | |
"loss": 0.4736, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.74, | |
"eval_accuracy": 0.7628676470588235, | |
"eval_f1_macro": 0.7078921078921079, | |
"eval_f1_micro": 0.7628676470588235, | |
"eval_loss": 0.486197292804718, | |
"eval_runtime": 0.5322, | |
"eval_samples_per_second": 2044.173, | |
"eval_steps_per_second": 31.94, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.81, | |
"grad_norm": 3.0662012100219727, | |
"learning_rate": 1.4607843137254903e-05, | |
"loss": 0.4831, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.88, | |
"grad_norm": 2.1004221439361572, | |
"learning_rate": 1.4117647058823532e-05, | |
"loss": 0.4413, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.96, | |
"grad_norm": 5.478739261627197, | |
"learning_rate": 1.3627450980392158e-05, | |
"loss": 0.4581, | |
"step": 130 | |
}, | |
{ | |
"epoch": 1.03, | |
"grad_norm": 3.0216825008392334, | |
"learning_rate": 1.3137254901960785e-05, | |
"loss": 0.4658, | |
"step": 140 | |
}, | |
{ | |
"epoch": 1.1, | |
"grad_norm": 2.5667436122894287, | |
"learning_rate": 1.2647058823529412e-05, | |
"loss": 0.463, | |
"step": 150 | |
}, | |
{ | |
"epoch": 1.1, | |
"eval_accuracy": 0.7738970588235294, | |
"eval_f1_macro": 0.7356166061472933, | |
"eval_f1_micro": 0.7738970588235294, | |
"eval_loss": 0.49347999691963196, | |
"eval_runtime": 0.5316, | |
"eval_samples_per_second": 2046.653, | |
"eval_steps_per_second": 31.979, | |
"step": 150 | |
}, | |
{ | |
"epoch": 1.18, | |
"grad_norm": 3.2256863117218018, | |
"learning_rate": 1.215686274509804e-05, | |
"loss": 0.4481, | |
"step": 160 | |
}, | |
{ | |
"epoch": 1.25, | |
"grad_norm": 2.5312764644622803, | |
"learning_rate": 1.1666666666666668e-05, | |
"loss": 0.4336, | |
"step": 170 | |
}, | |
{ | |
"epoch": 1.32, | |
"grad_norm": 3.472776174545288, | |
"learning_rate": 1.1176470588235295e-05, | |
"loss": 0.4159, | |
"step": 180 | |
}, | |
{ | |
"epoch": 1.4, | |
"grad_norm": 3.2552549839019775, | |
"learning_rate": 1.0686274509803922e-05, | |
"loss": 0.4319, | |
"step": 190 | |
}, | |
{ | |
"epoch": 1.47, | |
"grad_norm": 2.458969831466675, | |
"learning_rate": 1.0196078431372549e-05, | |
"loss": 0.4703, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.47, | |
"eval_accuracy": 0.7720588235294118, | |
"eval_f1_macro": 0.7372512970724334, | |
"eval_f1_micro": 0.7720588235294118, | |
"eval_loss": 0.48123905062675476, | |
"eval_runtime": 0.5286, | |
"eval_samples_per_second": 2058.189, | |
"eval_steps_per_second": 32.159, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.54, | |
"grad_norm": 3.01029372215271, | |
"learning_rate": 9.705882352941177e-06, | |
"loss": 0.4198, | |
"step": 210 | |
}, | |
{ | |
"epoch": 1.62, | |
"grad_norm": 3.3124213218688965, | |
"learning_rate": 9.215686274509804e-06, | |
"loss": 0.4827, | |
"step": 220 | |
}, | |
{ | |
"epoch": 1.69, | |
"grad_norm": 2.8357181549072266, | |
"learning_rate": 8.725490196078433e-06, | |
"loss": 0.4128, | |
"step": 230 | |
}, | |
{ | |
"epoch": 1.76, | |
"grad_norm": 2.963460683822632, | |
"learning_rate": 8.23529411764706e-06, | |
"loss": 0.4388, | |
"step": 240 | |
}, | |
{ | |
"epoch": 1.84, | |
"grad_norm": 4.509665489196777, | |
"learning_rate": 7.745098039215687e-06, | |
"loss": 0.4537, | |
"step": 250 | |
}, | |
{ | |
"epoch": 1.84, | |
"eval_accuracy": 0.7665441176470589, | |
"eval_f1_macro": 0.7304776952035423, | |
"eval_f1_micro": 0.7665441176470589, | |
"eval_loss": 0.48549798130989075, | |
"eval_runtime": 0.5326, | |
"eval_samples_per_second": 2042.806, | |
"eval_steps_per_second": 31.919, | |
"step": 250 | |
}, | |
{ | |
"epoch": 1.91, | |
"grad_norm": 2.8789613246917725, | |
"learning_rate": 7.2549019607843145e-06, | |
"loss": 0.419, | |
"step": 260 | |
}, | |
{ | |
"epoch": 1.99, | |
"grad_norm": 2.5148537158966064, | |
"learning_rate": 6.764705882352942e-06, | |
"loss": 0.4017, | |
"step": 270 | |
}, | |
{ | |
"epoch": 2.06, | |
"grad_norm": 4.027554988861084, | |
"learning_rate": 6.274509803921569e-06, | |
"loss": 0.4302, | |
"step": 280 | |
}, | |
{ | |
"epoch": 2.13, | |
"grad_norm": 4.653955936431885, | |
"learning_rate": 5.784313725490197e-06, | |
"loss": 0.4383, | |
"step": 290 | |
}, | |
{ | |
"epoch": 2.21, | |
"grad_norm": 2.9630045890808105, | |
"learning_rate": 5.294117647058824e-06, | |
"loss": 0.4206, | |
"step": 300 | |
}, | |
{ | |
"epoch": 2.21, | |
"eval_accuracy": 0.7674632352941176, | |
"eval_f1_macro": 0.7283154440249711, | |
"eval_f1_micro": 0.7674632352941176, | |
"eval_loss": 0.48802924156188965, | |
"eval_runtime": 0.5312, | |
"eval_samples_per_second": 2048.138, | |
"eval_steps_per_second": 32.002, | |
"step": 300 | |
}, | |
{ | |
"epoch": 2.28, | |
"grad_norm": 4.027430057525635, | |
"learning_rate": 4.803921568627452e-06, | |
"loss": 0.4307, | |
"step": 310 | |
}, | |
{ | |
"epoch": 2.35, | |
"grad_norm": 5.239951133728027, | |
"learning_rate": 4.313725490196079e-06, | |
"loss": 0.395, | |
"step": 320 | |
}, | |
{ | |
"epoch": 2.43, | |
"grad_norm": 4.224455833435059, | |
"learning_rate": 3.8235294117647055e-06, | |
"loss": 0.4372, | |
"step": 330 | |
}, | |
{ | |
"epoch": 2.5, | |
"grad_norm": 1.989615797996521, | |
"learning_rate": 3.3333333333333333e-06, | |
"loss": 0.4237, | |
"step": 340 | |
}, | |
{ | |
"epoch": 2.57, | |
"grad_norm": 3.768437385559082, | |
"learning_rate": 2.843137254901961e-06, | |
"loss": 0.4561, | |
"step": 350 | |
}, | |
{ | |
"epoch": 2.57, | |
"eval_accuracy": 0.7628676470588235, | |
"eval_f1_macro": 0.7299219507689465, | |
"eval_f1_micro": 0.7628676470588235, | |
"eval_loss": 0.49129608273506165, | |
"eval_runtime": 0.5297, | |
"eval_samples_per_second": 2054.079, | |
"eval_steps_per_second": 32.095, | |
"step": 350 | |
}, | |
{ | |
"epoch": 2.65, | |
"grad_norm": 3.4412877559661865, | |
"learning_rate": 2.3529411764705885e-06, | |
"loss": 0.4268, | |
"step": 360 | |
}, | |
{ | |
"epoch": 2.72, | |
"grad_norm": 3.195571184158325, | |
"learning_rate": 1.8627450980392158e-06, | |
"loss": 0.3908, | |
"step": 370 | |
}, | |
{ | |
"epoch": 2.79, | |
"grad_norm": 2.5130951404571533, | |
"learning_rate": 1.3725490196078434e-06, | |
"loss": 0.395, | |
"step": 380 | |
}, | |
{ | |
"epoch": 2.87, | |
"grad_norm": 3.635629892349243, | |
"learning_rate": 8.823529411764707e-07, | |
"loss": 0.4135, | |
"step": 390 | |
}, | |
{ | |
"epoch": 2.94, | |
"grad_norm": 3.1357531547546387, | |
"learning_rate": 3.921568627450981e-07, | |
"loss": 0.3738, | |
"step": 400 | |
}, | |
{ | |
"epoch": 2.94, | |
"eval_accuracy": 0.7702205882352942, | |
"eval_f1_macro": 0.7359428393911152, | |
"eval_f1_micro": 0.7702205882352942, | |
"eval_loss": 0.4950219392776489, | |
"eval_runtime": 0.5296, | |
"eval_samples_per_second": 2054.226, | |
"eval_steps_per_second": 32.097, | |
"step": 400 | |
}, | |
{ | |
"epoch": 3.0, | |
"step": 408, | |
"total_flos": 864747139366912.0, | |
"train_loss": 0.45243801556381524, | |
"train_runtime": 56.8681, | |
"train_samples_per_second": 458.957, | |
"train_steps_per_second": 7.175 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 408, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 3, | |
"save_steps": 50, | |
"total_flos": 864747139366912.0, | |
"train_batch_size": 32, | |
"trial_name": null, | |
"trial_params": null | |
} | |