SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/google_bert
/bert_base_uncased_amazon
/trainer_state.json
{ | |
"best_metric": 0.7915631532669067, | |
"best_model_checkpoint": "../../experiments_checkpoints/MAdAiLab/google_bert/bert_base_uncased_amazon/checkpoint-550", | |
"epoch": 3.0, | |
"eval_steps": 50, | |
"global_step": 570, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.05, | |
"grad_norm": 3.0047507286071777, | |
"learning_rate": 1.9649122807017544e-05, | |
"loss": 3.1497, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.11, | |
"grad_norm": 3.8671908378601074, | |
"learning_rate": 1.929824561403509e-05, | |
"loss": 3.0472, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.16, | |
"grad_norm": 4.191209316253662, | |
"learning_rate": 1.894736842105263e-05, | |
"loss": 2.9007, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.21, | |
"grad_norm": 3.6195719242095947, | |
"learning_rate": 1.8596491228070176e-05, | |
"loss": 2.7123, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.26, | |
"grad_norm": 3.8005239963531494, | |
"learning_rate": 1.824561403508772e-05, | |
"loss": 2.5476, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.26, | |
"eval_accuracy": 0.49670619235836627, | |
"eval_f1_macro": 0.3546099871534922, | |
"eval_f1_micro": 0.49670619235836627, | |
"eval_loss": 2.4070885181427, | |
"eval_runtime": 1.3956, | |
"eval_samples_per_second": 1087.704, | |
"eval_steps_per_second": 17.197, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.32, | |
"grad_norm": 4.975920677185059, | |
"learning_rate": 1.7894736842105264e-05, | |
"loss": 2.3704, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.37, | |
"grad_norm": 4.028805255889893, | |
"learning_rate": 1.754385964912281e-05, | |
"loss": 2.1795, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.42, | |
"grad_norm": 4.310976028442383, | |
"learning_rate": 1.719298245614035e-05, | |
"loss": 1.9854, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.47, | |
"grad_norm": 3.7890875339508057, | |
"learning_rate": 1.6842105263157896e-05, | |
"loss": 1.8812, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.53, | |
"grad_norm": 3.57995343208313, | |
"learning_rate": 1.649122807017544e-05, | |
"loss": 1.7401, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.53, | |
"eval_accuracy": 0.6337285902503293, | |
"eval_f1_macro": 0.4899315412700184, | |
"eval_f1_micro": 0.6337285902503293, | |
"eval_loss": 1.6469579935073853, | |
"eval_runtime": 1.4546, | |
"eval_samples_per_second": 1043.605, | |
"eval_steps_per_second": 16.5, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.58, | |
"grad_norm": 3.649951696395874, | |
"learning_rate": 1.6140350877192984e-05, | |
"loss": 1.6134, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.63, | |
"grad_norm": 3.6378748416900635, | |
"learning_rate": 1.578947368421053e-05, | |
"loss": 1.5476, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.68, | |
"grad_norm": 4.442119598388672, | |
"learning_rate": 1.543859649122807e-05, | |
"loss": 1.4687, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.74, | |
"grad_norm": 3.7188234329223633, | |
"learning_rate": 1.5087719298245615e-05, | |
"loss": 1.396, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.79, | |
"grad_norm": 4.182339191436768, | |
"learning_rate": 1.4736842105263159e-05, | |
"loss": 1.3223, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.79, | |
"eval_accuracy": 0.6897233201581028, | |
"eval_f1_macro": 0.5664899534257014, | |
"eval_f1_micro": 0.6897233201581028, | |
"eval_loss": 1.2888767719268799, | |
"eval_runtime": 1.4176, | |
"eval_samples_per_second": 1070.835, | |
"eval_steps_per_second": 16.93, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.84, | |
"grad_norm": 4.865353584289551, | |
"learning_rate": 1.4385964912280704e-05, | |
"loss": 1.2931, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.89, | |
"grad_norm": 3.749250650405884, | |
"learning_rate": 1.4035087719298246e-05, | |
"loss": 1.2661, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.95, | |
"grad_norm": 4.157919406890869, | |
"learning_rate": 1.3684210526315791e-05, | |
"loss": 1.2114, | |
"step": 180 | |
}, | |
{ | |
"epoch": 1.0, | |
"grad_norm": 5.845521926879883, | |
"learning_rate": 1.3333333333333333e-05, | |
"loss": 1.2428, | |
"step": 190 | |
}, | |
{ | |
"epoch": 1.05, | |
"grad_norm": 4.141289234161377, | |
"learning_rate": 1.2982456140350879e-05, | |
"loss": 1.1317, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.05, | |
"eval_accuracy": 0.735836627140975, | |
"eval_f1_macro": 0.6577255881727345, | |
"eval_f1_micro": 0.735836627140975, | |
"eval_loss": 1.1047031879425049, | |
"eval_runtime": 1.4246, | |
"eval_samples_per_second": 1065.595, | |
"eval_steps_per_second": 16.847, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.11, | |
"grad_norm": 4.0819010734558105, | |
"learning_rate": 1.263157894736842e-05, | |
"loss": 1.0801, | |
"step": 210 | |
}, | |
{ | |
"epoch": 1.16, | |
"grad_norm": 5.738018035888672, | |
"learning_rate": 1.2280701754385966e-05, | |
"loss": 1.0759, | |
"step": 220 | |
}, | |
{ | |
"epoch": 1.21, | |
"grad_norm": 4.718931674957275, | |
"learning_rate": 1.192982456140351e-05, | |
"loss": 1.0298, | |
"step": 230 | |
}, | |
{ | |
"epoch": 1.26, | |
"grad_norm": 3.587674379348755, | |
"learning_rate": 1.1578947368421053e-05, | |
"loss": 0.9587, | |
"step": 240 | |
}, | |
{ | |
"epoch": 1.32, | |
"grad_norm": 4.904854774475098, | |
"learning_rate": 1.1228070175438597e-05, | |
"loss": 0.9137, | |
"step": 250 | |
}, | |
{ | |
"epoch": 1.32, | |
"eval_accuracy": 0.7536231884057971, | |
"eval_f1_macro": 0.6820274313440232, | |
"eval_f1_micro": 0.7536231884057971, | |
"eval_loss": 0.9906623959541321, | |
"eval_runtime": 1.431, | |
"eval_samples_per_second": 1060.794, | |
"eval_steps_per_second": 16.771, | |
"step": 250 | |
}, | |
{ | |
"epoch": 1.37, | |
"grad_norm": 3.534885883331299, | |
"learning_rate": 1.0877192982456142e-05, | |
"loss": 0.9299, | |
"step": 260 | |
}, | |
{ | |
"epoch": 1.42, | |
"grad_norm": 4.119781017303467, | |
"learning_rate": 1.0526315789473684e-05, | |
"loss": 0.9538, | |
"step": 270 | |
}, | |
{ | |
"epoch": 1.47, | |
"grad_norm": 3.929755687713623, | |
"learning_rate": 1.017543859649123e-05, | |
"loss": 0.9447, | |
"step": 280 | |
}, | |
{ | |
"epoch": 1.53, | |
"grad_norm": 4.733183860778809, | |
"learning_rate": 9.824561403508772e-06, | |
"loss": 0.854, | |
"step": 290 | |
}, | |
{ | |
"epoch": 1.58, | |
"grad_norm": 3.168888807296753, | |
"learning_rate": 9.473684210526315e-06, | |
"loss": 0.9434, | |
"step": 300 | |
}, | |
{ | |
"epoch": 1.58, | |
"eval_accuracy": 0.7602108036890646, | |
"eval_f1_macro": 0.6895556379814868, | |
"eval_f1_micro": 0.7602108036890646, | |
"eval_loss": 0.9264395833015442, | |
"eval_runtime": 1.434, | |
"eval_samples_per_second": 1058.582, | |
"eval_steps_per_second": 16.736, | |
"step": 300 | |
}, | |
{ | |
"epoch": 1.63, | |
"grad_norm": 4.717241287231445, | |
"learning_rate": 9.12280701754386e-06, | |
"loss": 0.9216, | |
"step": 310 | |
}, | |
{ | |
"epoch": 1.68, | |
"grad_norm": 4.8470988273620605, | |
"learning_rate": 8.771929824561405e-06, | |
"loss": 0.8579, | |
"step": 320 | |
}, | |
{ | |
"epoch": 1.74, | |
"grad_norm": 5.183309555053711, | |
"learning_rate": 8.421052631578948e-06, | |
"loss": 0.8586, | |
"step": 330 | |
}, | |
{ | |
"epoch": 1.79, | |
"grad_norm": 7.0329766273498535, | |
"learning_rate": 8.070175438596492e-06, | |
"loss": 0.9531, | |
"step": 340 | |
}, | |
{ | |
"epoch": 1.84, | |
"grad_norm": 5.28553581237793, | |
"learning_rate": 7.719298245614036e-06, | |
"loss": 0.86, | |
"step": 350 | |
}, | |
{ | |
"epoch": 1.84, | |
"eval_accuracy": 0.7779973649538867, | |
"eval_f1_macro": 0.7135374057878592, | |
"eval_f1_micro": 0.7779973649538867, | |
"eval_loss": 0.8728891015052795, | |
"eval_runtime": 1.4867, | |
"eval_samples_per_second": 1021.034, | |
"eval_steps_per_second": 16.143, | |
"step": 350 | |
}, | |
{ | |
"epoch": 1.89, | |
"grad_norm": 5.873104095458984, | |
"learning_rate": 7.368421052631579e-06, | |
"loss": 0.7864, | |
"step": 360 | |
}, | |
{ | |
"epoch": 1.95, | |
"grad_norm": 4.824563026428223, | |
"learning_rate": 7.017543859649123e-06, | |
"loss": 0.8333, | |
"step": 370 | |
}, | |
{ | |
"epoch": 2.0, | |
"grad_norm": 5.800777435302734, | |
"learning_rate": 6.666666666666667e-06, | |
"loss": 0.8403, | |
"step": 380 | |
}, | |
{ | |
"epoch": 2.05, | |
"grad_norm": 3.9126601219177246, | |
"learning_rate": 6.31578947368421e-06, | |
"loss": 0.799, | |
"step": 390 | |
}, | |
{ | |
"epoch": 2.11, | |
"grad_norm": 4.892070293426514, | |
"learning_rate": 5.964912280701755e-06, | |
"loss": 0.7567, | |
"step": 400 | |
}, | |
{ | |
"epoch": 2.11, | |
"eval_accuracy": 0.7859025032938076, | |
"eval_f1_macro": 0.7244375969329084, | |
"eval_f1_micro": 0.7859025032938076, | |
"eval_loss": 0.8321741223335266, | |
"eval_runtime": 1.488, | |
"eval_samples_per_second": 1020.179, | |
"eval_steps_per_second": 16.129, | |
"step": 400 | |
}, | |
{ | |
"epoch": 2.16, | |
"grad_norm": 4.8746724128723145, | |
"learning_rate": 5.6140350877192985e-06, | |
"loss": 0.6814, | |
"step": 410 | |
}, | |
{ | |
"epoch": 2.21, | |
"grad_norm": 5.263884544372559, | |
"learning_rate": 5.263157894736842e-06, | |
"loss": 0.7323, | |
"step": 420 | |
}, | |
{ | |
"epoch": 2.26, | |
"grad_norm": 4.6470947265625, | |
"learning_rate": 4.912280701754386e-06, | |
"loss": 0.6902, | |
"step": 430 | |
}, | |
{ | |
"epoch": 2.32, | |
"grad_norm": 5.3066816329956055, | |
"learning_rate": 4.56140350877193e-06, | |
"loss": 0.7446, | |
"step": 440 | |
}, | |
{ | |
"epoch": 2.37, | |
"grad_norm": 3.0504865646362305, | |
"learning_rate": 4.210526315789474e-06, | |
"loss": 0.7028, | |
"step": 450 | |
}, | |
{ | |
"epoch": 2.37, | |
"eval_accuracy": 0.7891963109354414, | |
"eval_f1_macro": 0.7339111590418241, | |
"eval_f1_micro": 0.7891963109354414, | |
"eval_loss": 0.8130051493644714, | |
"eval_runtime": 1.4895, | |
"eval_samples_per_second": 1019.157, | |
"eval_steps_per_second": 16.113, | |
"step": 450 | |
}, | |
{ | |
"epoch": 2.42, | |
"grad_norm": 5.582711696624756, | |
"learning_rate": 3.859649122807018e-06, | |
"loss": 0.6924, | |
"step": 460 | |
}, | |
{ | |
"epoch": 2.47, | |
"grad_norm": 4.516101837158203, | |
"learning_rate": 3.5087719298245615e-06, | |
"loss": 0.7345, | |
"step": 470 | |
}, | |
{ | |
"epoch": 2.53, | |
"grad_norm": 4.358815670013428, | |
"learning_rate": 3.157894736842105e-06, | |
"loss": 0.7219, | |
"step": 480 | |
}, | |
{ | |
"epoch": 2.58, | |
"grad_norm": 4.740716934204102, | |
"learning_rate": 2.8070175438596493e-06, | |
"loss": 0.6904, | |
"step": 490 | |
}, | |
{ | |
"epoch": 2.63, | |
"grad_norm": 4.01389217376709, | |
"learning_rate": 2.456140350877193e-06, | |
"loss": 0.6842, | |
"step": 500 | |
}, | |
{ | |
"epoch": 2.63, | |
"eval_accuracy": 0.7891963109354414, | |
"eval_f1_macro": 0.7284370779832114, | |
"eval_f1_micro": 0.7891963109354414, | |
"eval_loss": 0.8005011081695557, | |
"eval_runtime": 1.4404, | |
"eval_samples_per_second": 1053.867, | |
"eval_steps_per_second": 16.662, | |
"step": 500 | |
}, | |
{ | |
"epoch": 2.68, | |
"grad_norm": 5.79302978515625, | |
"learning_rate": 2.105263157894737e-06, | |
"loss": 0.661, | |
"step": 510 | |
}, | |
{ | |
"epoch": 2.74, | |
"grad_norm": 4.712301731109619, | |
"learning_rate": 1.7543859649122807e-06, | |
"loss": 0.7197, | |
"step": 520 | |
}, | |
{ | |
"epoch": 2.79, | |
"grad_norm": 4.111674785614014, | |
"learning_rate": 1.4035087719298246e-06, | |
"loss": 0.7293, | |
"step": 530 | |
}, | |
{ | |
"epoch": 2.84, | |
"grad_norm": 4.260751247406006, | |
"learning_rate": 1.0526315789473685e-06, | |
"loss": 0.6879, | |
"step": 540 | |
}, | |
{ | |
"epoch": 2.89, | |
"grad_norm": 4.812810897827148, | |
"learning_rate": 7.017543859649123e-07, | |
"loss": 0.6784, | |
"step": 550 | |
}, | |
{ | |
"epoch": 2.89, | |
"eval_accuracy": 0.7878787878787878, | |
"eval_f1_macro": 0.7307727509227439, | |
"eval_f1_micro": 0.7878787878787878, | |
"eval_loss": 0.7915631532669067, | |
"eval_runtime": 1.4939, | |
"eval_samples_per_second": 1016.159, | |
"eval_steps_per_second": 16.066, | |
"step": 550 | |
}, | |
{ | |
"epoch": 2.95, | |
"grad_norm": 5.386019229888916, | |
"learning_rate": 3.5087719298245616e-07, | |
"loss": 0.7219, | |
"step": 560 | |
}, | |
{ | |
"epoch": 3.0, | |
"grad_norm": 6.00460147857666, | |
"learning_rate": 0.0, | |
"loss": 0.7506, | |
"step": 570 | |
}, | |
{ | |
"epoch": 3.0, | |
"step": 570, | |
"total_flos": 2400025338445824.0, | |
"train_loss": 1.1961698414986595, | |
"train_runtime": 135.7278, | |
"train_samples_per_second": 268.42, | |
"train_steps_per_second": 4.2 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 570, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 3, | |
"save_steps": 50, | |
"total_flos": 2400025338445824.0, | |
"train_batch_size": 32, | |
"trial_name": null, | |
"trial_params": null | |
} | |