SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/google_t5
/t5_base_amazon
/checkpoint-700
/trainer_state.json
{ | |
"best_metric": 0.5565423369407654, | |
"best_model_checkpoint": "../../experiments_checkpoints/MAdAiLab/google_t5/t5_base_amazon/checkpoint-700", | |
"epoch": 1.8421052631578947, | |
"eval_steps": 50, | |
"global_step": 700, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.03, | |
"grad_norm": 2.8495495319366455, | |
"learning_rate": 0.0004956140350877193, | |
"loss": 3.117, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.05, | |
"grad_norm": 2.5190072059631348, | |
"learning_rate": 0.0004912280701754386, | |
"loss": 2.7209, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.08, | |
"grad_norm": 2.294928789138794, | |
"learning_rate": 0.0004868421052631579, | |
"loss": 2.0205, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.11, | |
"grad_norm": 3.267091751098633, | |
"learning_rate": 0.0004824561403508772, | |
"loss": 1.2866, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.13, | |
"grad_norm": 4.188276767730713, | |
"learning_rate": 0.00047807017543859647, | |
"loss": 1.2275, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.13, | |
"eval_accuracy": 0.6949934123847167, | |
"eval_f1_macro": 0.6073038050199799, | |
"eval_f1_micro": 0.6949934123847167, | |
"eval_loss": 1.035279631614685, | |
"eval_runtime": 3.7814, | |
"eval_samples_per_second": 401.442, | |
"eval_steps_per_second": 12.694, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.16, | |
"grad_norm": 6.240413665771484, | |
"learning_rate": 0.00047368421052631577, | |
"loss": 1.0172, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.18, | |
"grad_norm": 3.5446884632110596, | |
"learning_rate": 0.0004692982456140351, | |
"loss": 1.0344, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.21, | |
"grad_norm": 4.263936519622803, | |
"learning_rate": 0.00046491228070175437, | |
"loss": 0.8594, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.24, | |
"grad_norm": 3.0901923179626465, | |
"learning_rate": 0.0004605263157894737, | |
"loss": 0.8781, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.26, | |
"grad_norm": 2.737272262573242, | |
"learning_rate": 0.000456140350877193, | |
"loss": 0.8341, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.26, | |
"eval_accuracy": 0.738471673254282, | |
"eval_f1_macro": 0.6814380216401201, | |
"eval_f1_micro": 0.738471673254282, | |
"eval_loss": 0.8838083744049072, | |
"eval_runtime": 3.8044, | |
"eval_samples_per_second": 399.016, | |
"eval_steps_per_second": 12.617, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.29, | |
"grad_norm": 2.692996025085449, | |
"learning_rate": 0.00045175438596491233, | |
"loss": 0.9941, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.32, | |
"grad_norm": 2.3376717567443848, | |
"learning_rate": 0.0004473684210526316, | |
"loss": 0.8984, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.34, | |
"grad_norm": 2.112525224685669, | |
"learning_rate": 0.0004429824561403509, | |
"loss": 0.909, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.37, | |
"grad_norm": 2.8785059452056885, | |
"learning_rate": 0.0004385964912280702, | |
"loss": 0.7654, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.39, | |
"grad_norm": 2.3538177013397217, | |
"learning_rate": 0.0004342105263157895, | |
"loss": 0.7773, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.39, | |
"eval_accuracy": 0.7832674571805006, | |
"eval_f1_macro": 0.7339618917892343, | |
"eval_f1_micro": 0.7832674571805006, | |
"eval_loss": 0.7473268508911133, | |
"eval_runtime": 3.8195, | |
"eval_samples_per_second": 397.435, | |
"eval_steps_per_second": 12.567, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.42, | |
"grad_norm": 2.850787401199341, | |
"learning_rate": 0.0004298245614035088, | |
"loss": 0.7734, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.45, | |
"grad_norm": 3.4294583797454834, | |
"learning_rate": 0.0004254385964912281, | |
"loss": 0.7876, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.47, | |
"grad_norm": 2.0919501781463623, | |
"learning_rate": 0.00042105263157894734, | |
"loss": 0.7329, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.5, | |
"grad_norm": 5.026761531829834, | |
"learning_rate": 0.0004166666666666667, | |
"loss": 0.8396, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.53, | |
"grad_norm": 2.3940157890319824, | |
"learning_rate": 0.000412280701754386, | |
"loss": 0.7188, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.53, | |
"eval_accuracy": 0.7924901185770751, | |
"eval_f1_macro": 0.7432732481576452, | |
"eval_f1_micro": 0.7924901185770751, | |
"eval_loss": 0.7023962140083313, | |
"eval_runtime": 3.8263, | |
"eval_samples_per_second": 396.73, | |
"eval_steps_per_second": 12.545, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.55, | |
"grad_norm": 2.965258836746216, | |
"learning_rate": 0.00040789473684210524, | |
"loss": 0.66, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.58, | |
"grad_norm": 3.2059409618377686, | |
"learning_rate": 0.00040350877192982455, | |
"loss": 0.7795, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.61, | |
"grad_norm": 4.9381537437438965, | |
"learning_rate": 0.0003991228070175439, | |
"loss": 0.7163, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.63, | |
"grad_norm": 4.937159061431885, | |
"learning_rate": 0.00039473684210526315, | |
"loss": 0.7546, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.66, | |
"grad_norm": 2.786454439163208, | |
"learning_rate": 0.00039035087719298245, | |
"loss": 0.7483, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.66, | |
"eval_accuracy": 0.7872200263504612, | |
"eval_f1_macro": 0.7396308270254102, | |
"eval_f1_micro": 0.7872200263504612, | |
"eval_loss": 0.7055577039718628, | |
"eval_runtime": 3.8209, | |
"eval_samples_per_second": 397.29, | |
"eval_steps_per_second": 12.563, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.68, | |
"grad_norm": 2.2485697269439697, | |
"learning_rate": 0.00038596491228070175, | |
"loss": 0.7216, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.71, | |
"grad_norm": 3.388631582260132, | |
"learning_rate": 0.00038157894736842105, | |
"loss": 0.7437, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.74, | |
"grad_norm": 2.945599317550659, | |
"learning_rate": 0.00037719298245614036, | |
"loss": 0.6433, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.76, | |
"grad_norm": 2.894361972808838, | |
"learning_rate": 0.00037280701754385966, | |
"loss": 0.6333, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.79, | |
"grad_norm": 1.8753783702850342, | |
"learning_rate": 0.00036842105263157896, | |
"loss": 0.6228, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.79, | |
"eval_accuracy": 0.8129117259552042, | |
"eval_f1_macro": 0.7636250391666534, | |
"eval_f1_micro": 0.8129117259552042, | |
"eval_loss": 0.6337724924087524, | |
"eval_runtime": 3.8242, | |
"eval_samples_per_second": 396.945, | |
"eval_steps_per_second": 12.552, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.82, | |
"grad_norm": 3.340951442718506, | |
"learning_rate": 0.00036403508771929826, | |
"loss": 0.5978, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.84, | |
"grad_norm": 3.2675557136535645, | |
"learning_rate": 0.00035964912280701756, | |
"loss": 0.7124, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.87, | |
"grad_norm": 2.308924674987793, | |
"learning_rate": 0.00035526315789473687, | |
"loss": 0.7423, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.89, | |
"grad_norm": 3.407076835632324, | |
"learning_rate": 0.0003508771929824561, | |
"loss": 0.6787, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.92, | |
"grad_norm": 2.5976576805114746, | |
"learning_rate": 0.00034649122807017547, | |
"loss": 0.7089, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.92, | |
"eval_accuracy": 0.8208168642951251, | |
"eval_f1_macro": 0.7962943499701793, | |
"eval_f1_micro": 0.8208168642951251, | |
"eval_loss": 0.6130083799362183, | |
"eval_runtime": 3.8233, | |
"eval_samples_per_second": 397.037, | |
"eval_steps_per_second": 12.555, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.95, | |
"grad_norm": 2.053323745727539, | |
"learning_rate": 0.00034210526315789477, | |
"loss": 0.5299, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.97, | |
"grad_norm": 3.201794147491455, | |
"learning_rate": 0.000337719298245614, | |
"loss": 0.7405, | |
"step": 370 | |
}, | |
{ | |
"epoch": 1.0, | |
"grad_norm": 2.8295910358428955, | |
"learning_rate": 0.0003333333333333333, | |
"loss": 0.7584, | |
"step": 380 | |
}, | |
{ | |
"epoch": 1.03, | |
"grad_norm": 1.9317399263381958, | |
"learning_rate": 0.0003289473684210527, | |
"loss": 0.4822, | |
"step": 390 | |
}, | |
{ | |
"epoch": 1.05, | |
"grad_norm": 2.5840656757354736, | |
"learning_rate": 0.0003245614035087719, | |
"loss": 0.5055, | |
"step": 400 | |
}, | |
{ | |
"epoch": 1.05, | |
"eval_accuracy": 0.8300395256916996, | |
"eval_f1_macro": 0.8075388232802375, | |
"eval_f1_micro": 0.8300395256916996, | |
"eval_loss": 0.5938563346862793, | |
"eval_runtime": 3.8245, | |
"eval_samples_per_second": 396.912, | |
"eval_steps_per_second": 12.551, | |
"step": 400 | |
}, | |
{ | |
"epoch": 1.08, | |
"grad_norm": 2.4889354705810547, | |
"learning_rate": 0.00032017543859649123, | |
"loss": 0.425, | |
"step": 410 | |
}, | |
{ | |
"epoch": 1.11, | |
"grad_norm": 2.618088722229004, | |
"learning_rate": 0.00031578947368421053, | |
"loss": 0.3865, | |
"step": 420 | |
}, | |
{ | |
"epoch": 1.13, | |
"grad_norm": 2.8282623291015625, | |
"learning_rate": 0.00031140350877192983, | |
"loss": 0.4543, | |
"step": 430 | |
}, | |
{ | |
"epoch": 1.16, | |
"grad_norm": 3.4801692962646484, | |
"learning_rate": 0.00030701754385964913, | |
"loss": 0.4152, | |
"step": 440 | |
}, | |
{ | |
"epoch": 1.18, | |
"grad_norm": 1.7531658411026, | |
"learning_rate": 0.00030263157894736844, | |
"loss": 0.3942, | |
"step": 450 | |
}, | |
{ | |
"epoch": 1.18, | |
"eval_accuracy": 0.8241106719367589, | |
"eval_f1_macro": 0.7915647113122625, | |
"eval_f1_micro": 0.8241106719367589, | |
"eval_loss": 0.6020949482917786, | |
"eval_runtime": 3.8282, | |
"eval_samples_per_second": 396.536, | |
"eval_steps_per_second": 12.539, | |
"step": 450 | |
}, | |
{ | |
"epoch": 1.21, | |
"grad_norm": 2.6115176677703857, | |
"learning_rate": 0.0002982456140350877, | |
"loss": 0.4801, | |
"step": 460 | |
}, | |
{ | |
"epoch": 1.24, | |
"grad_norm": 2.1855995655059814, | |
"learning_rate": 0.00029385964912280704, | |
"loss": 0.3942, | |
"step": 470 | |
}, | |
{ | |
"epoch": 1.26, | |
"grad_norm": 2.3812382221221924, | |
"learning_rate": 0.00028947368421052634, | |
"loss": 0.485, | |
"step": 480 | |
}, | |
{ | |
"epoch": 1.29, | |
"grad_norm": 2.102308750152588, | |
"learning_rate": 0.00028508771929824564, | |
"loss": 0.417, | |
"step": 490 | |
}, | |
{ | |
"epoch": 1.32, | |
"grad_norm": 4.095526218414307, | |
"learning_rate": 0.0002807017543859649, | |
"loss": 0.4248, | |
"step": 500 | |
}, | |
{ | |
"epoch": 1.32, | |
"eval_accuracy": 0.8300395256916996, | |
"eval_f1_macro": 0.8060251760691185, | |
"eval_f1_micro": 0.8300395256916996, | |
"eval_loss": 0.5956056714057922, | |
"eval_runtime": 3.8218, | |
"eval_samples_per_second": 397.192, | |
"eval_steps_per_second": 12.559, | |
"step": 500 | |
}, | |
{ | |
"epoch": 1.34, | |
"grad_norm": 3.2039239406585693, | |
"learning_rate": 0.00027631578947368425, | |
"loss": 0.4209, | |
"step": 510 | |
}, | |
{ | |
"epoch": 1.37, | |
"grad_norm": 1.9944714307785034, | |
"learning_rate": 0.00027192982456140355, | |
"loss": 0.5373, | |
"step": 520 | |
}, | |
{ | |
"epoch": 1.39, | |
"grad_norm": 3.2802696228027344, | |
"learning_rate": 0.0002675438596491228, | |
"loss": 0.5216, | |
"step": 530 | |
}, | |
{ | |
"epoch": 1.42, | |
"grad_norm": 1.4747893810272217, | |
"learning_rate": 0.0002631578947368421, | |
"loss": 0.3785, | |
"step": 540 | |
}, | |
{ | |
"epoch": 1.45, | |
"grad_norm": 1.2378747463226318, | |
"learning_rate": 0.00025877192982456146, | |
"loss": 0.3595, | |
"step": 550 | |
}, | |
{ | |
"epoch": 1.45, | |
"eval_accuracy": 0.8175230566534915, | |
"eval_f1_macro": 0.7897278945523275, | |
"eval_f1_micro": 0.8175230566534915, | |
"eval_loss": 0.6172593832015991, | |
"eval_runtime": 3.8184, | |
"eval_samples_per_second": 397.551, | |
"eval_steps_per_second": 12.571, | |
"step": 550 | |
}, | |
{ | |
"epoch": 1.47, | |
"grad_norm": 2.6604156494140625, | |
"learning_rate": 0.0002543859649122807, | |
"loss": 0.4231, | |
"step": 560 | |
}, | |
{ | |
"epoch": 1.5, | |
"grad_norm": 1.879952311515808, | |
"learning_rate": 0.00025, | |
"loss": 0.4555, | |
"step": 570 | |
}, | |
{ | |
"epoch": 1.53, | |
"grad_norm": 3.9825170040130615, | |
"learning_rate": 0.0002456140350877193, | |
"loss": 0.3988, | |
"step": 580 | |
}, | |
{ | |
"epoch": 1.55, | |
"grad_norm": 2.999025583267212, | |
"learning_rate": 0.0002412280701754386, | |
"loss": 0.3854, | |
"step": 590 | |
}, | |
{ | |
"epoch": 1.58, | |
"grad_norm": 2.778930425643921, | |
"learning_rate": 0.00023684210526315788, | |
"loss": 0.5263, | |
"step": 600 | |
}, | |
{ | |
"epoch": 1.58, | |
"eval_accuracy": 0.8162055335968379, | |
"eval_f1_macro": 0.7908253969964322, | |
"eval_f1_micro": 0.8162055335968379, | |
"eval_loss": 0.6170048117637634, | |
"eval_runtime": 3.838, | |
"eval_samples_per_second": 395.517, | |
"eval_steps_per_second": 12.506, | |
"step": 600 | |
}, | |
{ | |
"epoch": 1.61, | |
"grad_norm": 2.2400004863739014, | |
"learning_rate": 0.00023245614035087719, | |
"loss": 0.443, | |
"step": 610 | |
}, | |
{ | |
"epoch": 1.63, | |
"grad_norm": 2.5458765029907227, | |
"learning_rate": 0.0002280701754385965, | |
"loss": 0.4106, | |
"step": 620 | |
}, | |
{ | |
"epoch": 1.66, | |
"grad_norm": 2.955345392227173, | |
"learning_rate": 0.0002236842105263158, | |
"loss": 0.4078, | |
"step": 630 | |
}, | |
{ | |
"epoch": 1.68, | |
"grad_norm": 3.5653369426727295, | |
"learning_rate": 0.0002192982456140351, | |
"loss": 0.4746, | |
"step": 640 | |
}, | |
{ | |
"epoch": 1.71, | |
"grad_norm": 1.5618356466293335, | |
"learning_rate": 0.0002149122807017544, | |
"loss": 0.5153, | |
"step": 650 | |
}, | |
{ | |
"epoch": 1.71, | |
"eval_accuracy": 0.8326745718050066, | |
"eval_f1_macro": 0.8042732309505177, | |
"eval_f1_micro": 0.8326745718050066, | |
"eval_loss": 0.6007006168365479, | |
"eval_runtime": 3.8189, | |
"eval_samples_per_second": 397.495, | |
"eval_steps_per_second": 12.569, | |
"step": 650 | |
}, | |
{ | |
"epoch": 1.74, | |
"grad_norm": 3.3417813777923584, | |
"learning_rate": 0.00021052631578947367, | |
"loss": 0.3366, | |
"step": 660 | |
}, | |
{ | |
"epoch": 1.76, | |
"grad_norm": 3.232940912246704, | |
"learning_rate": 0.000206140350877193, | |
"loss": 0.4284, | |
"step": 670 | |
}, | |
{ | |
"epoch": 1.79, | |
"grad_norm": 2.414170265197754, | |
"learning_rate": 0.00020175438596491227, | |
"loss": 0.4816, | |
"step": 680 | |
}, | |
{ | |
"epoch": 1.82, | |
"grad_norm": 2.161409378051758, | |
"learning_rate": 0.00019736842105263157, | |
"loss": 0.398, | |
"step": 690 | |
}, | |
{ | |
"epoch": 1.84, | |
"grad_norm": 2.6579811573028564, | |
"learning_rate": 0.00019298245614035088, | |
"loss": 0.4237, | |
"step": 700 | |
}, | |
{ | |
"epoch": 1.84, | |
"eval_accuracy": 0.8399209486166008, | |
"eval_f1_macro": 0.8112898753723374, | |
"eval_f1_micro": 0.8399209486166008, | |
"eval_loss": 0.5565423369407654, | |
"eval_runtime": 3.8246, | |
"eval_samples_per_second": 396.904, | |
"eval_steps_per_second": 12.55, | |
"step": 700 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1140, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 3, | |
"save_steps": 50, | |
"total_flos": 3420628430356480.0, | |
"train_batch_size": 16, | |
"trial_name": null, | |
"trial_params": null | |
} | |