|
{ |
|
"best_metric": 0.5565423369407654, |
|
"best_model_checkpoint": "../../experiments_checkpoints/MAdAiLab/google_t5/t5_base_amazon/checkpoint-700", |
|
"epoch": 3.0, |
|
"eval_steps": 50, |
|
"global_step": 1140, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 2.8495495319366455, |
|
"learning_rate": 0.0004956140350877193, |
|
"loss": 3.117, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 2.5190072059631348, |
|
"learning_rate": 0.0004912280701754386, |
|
"loss": 2.7209, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 2.294928789138794, |
|
"learning_rate": 0.0004868421052631579, |
|
"loss": 2.0205, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 3.267091751098633, |
|
"learning_rate": 0.0004824561403508772, |
|
"loss": 1.2866, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 4.188276767730713, |
|
"learning_rate": 0.00047807017543859647, |
|
"loss": 1.2275, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_accuracy": 0.6949934123847167, |
|
"eval_f1_macro": 0.6073038050199799, |
|
"eval_f1_micro": 0.6949934123847167, |
|
"eval_loss": 1.035279631614685, |
|
"eval_runtime": 3.7814, |
|
"eval_samples_per_second": 401.442, |
|
"eval_steps_per_second": 12.694, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 6.240413665771484, |
|
"learning_rate": 0.00047368421052631577, |
|
"loss": 1.0172, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 3.5446884632110596, |
|
"learning_rate": 0.0004692982456140351, |
|
"loss": 1.0344, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 4.263936519622803, |
|
"learning_rate": 0.00046491228070175437, |
|
"loss": 0.8594, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 3.0901923179626465, |
|
"learning_rate": 0.0004605263157894737, |
|
"loss": 0.8781, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 2.737272262573242, |
|
"learning_rate": 0.000456140350877193, |
|
"loss": 0.8341, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_accuracy": 0.738471673254282, |
|
"eval_f1_macro": 0.6814380216401201, |
|
"eval_f1_micro": 0.738471673254282, |
|
"eval_loss": 0.8838083744049072, |
|
"eval_runtime": 3.8044, |
|
"eval_samples_per_second": 399.016, |
|
"eval_steps_per_second": 12.617, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 2.692996025085449, |
|
"learning_rate": 0.00045175438596491233, |
|
"loss": 0.9941, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 2.3376717567443848, |
|
"learning_rate": 0.0004473684210526316, |
|
"loss": 0.8984, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 2.112525224685669, |
|
"learning_rate": 0.0004429824561403509, |
|
"loss": 0.909, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 2.8785059452056885, |
|
"learning_rate": 0.0004385964912280702, |
|
"loss": 0.7654, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 2.3538177013397217, |
|
"learning_rate": 0.0004342105263157895, |
|
"loss": 0.7773, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"eval_accuracy": 0.7832674571805006, |
|
"eval_f1_macro": 0.7339618917892343, |
|
"eval_f1_micro": 0.7832674571805006, |
|
"eval_loss": 0.7473268508911133, |
|
"eval_runtime": 3.8195, |
|
"eval_samples_per_second": 397.435, |
|
"eval_steps_per_second": 12.567, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 2.850787401199341, |
|
"learning_rate": 0.0004298245614035088, |
|
"loss": 0.7734, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 3.4294583797454834, |
|
"learning_rate": 0.0004254385964912281, |
|
"loss": 0.7876, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 2.0919501781463623, |
|
"learning_rate": 0.00042105263157894734, |
|
"loss": 0.7329, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 5.026761531829834, |
|
"learning_rate": 0.0004166666666666667, |
|
"loss": 0.8396, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 2.3940157890319824, |
|
"learning_rate": 0.000412280701754386, |
|
"loss": 0.7188, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_accuracy": 0.7924901185770751, |
|
"eval_f1_macro": 0.7432732481576452, |
|
"eval_f1_micro": 0.7924901185770751, |
|
"eval_loss": 0.7023962140083313, |
|
"eval_runtime": 3.8263, |
|
"eval_samples_per_second": 396.73, |
|
"eval_steps_per_second": 12.545, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 2.965258836746216, |
|
"learning_rate": 0.00040789473684210524, |
|
"loss": 0.66, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 3.2059409618377686, |
|
"learning_rate": 0.00040350877192982455, |
|
"loss": 0.7795, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 4.9381537437438965, |
|
"learning_rate": 0.0003991228070175439, |
|
"loss": 0.7163, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 4.937159061431885, |
|
"learning_rate": 0.00039473684210526315, |
|
"loss": 0.7546, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 2.786454439163208, |
|
"learning_rate": 0.00039035087719298245, |
|
"loss": 0.7483, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_accuracy": 0.7872200263504612, |
|
"eval_f1_macro": 0.7396308270254102, |
|
"eval_f1_micro": 0.7872200263504612, |
|
"eval_loss": 0.7055577039718628, |
|
"eval_runtime": 3.8209, |
|
"eval_samples_per_second": 397.29, |
|
"eval_steps_per_second": 12.563, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 2.2485697269439697, |
|
"learning_rate": 0.00038596491228070175, |
|
"loss": 0.7216, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 3.388631582260132, |
|
"learning_rate": 0.00038157894736842105, |
|
"loss": 0.7437, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 2.945599317550659, |
|
"learning_rate": 0.00037719298245614036, |
|
"loss": 0.6433, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 2.894361972808838, |
|
"learning_rate": 0.00037280701754385966, |
|
"loss": 0.6333, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 1.8753783702850342, |
|
"learning_rate": 0.00036842105263157896, |
|
"loss": 0.6228, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"eval_accuracy": 0.8129117259552042, |
|
"eval_f1_macro": 0.7636250391666534, |
|
"eval_f1_micro": 0.8129117259552042, |
|
"eval_loss": 0.6337724924087524, |
|
"eval_runtime": 3.8242, |
|
"eval_samples_per_second": 396.945, |
|
"eval_steps_per_second": 12.552, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 3.340951442718506, |
|
"learning_rate": 0.00036403508771929826, |
|
"loss": 0.5978, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 3.2675557136535645, |
|
"learning_rate": 0.00035964912280701756, |
|
"loss": 0.7124, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 2.308924674987793, |
|
"learning_rate": 0.00035526315789473687, |
|
"loss": 0.7423, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 3.407076835632324, |
|
"learning_rate": 0.0003508771929824561, |
|
"loss": 0.6787, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 2.5976576805114746, |
|
"learning_rate": 0.00034649122807017547, |
|
"loss": 0.7089, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"eval_accuracy": 0.8208168642951251, |
|
"eval_f1_macro": 0.7962943499701793, |
|
"eval_f1_micro": 0.8208168642951251, |
|
"eval_loss": 0.6130083799362183, |
|
"eval_runtime": 3.8233, |
|
"eval_samples_per_second": 397.037, |
|
"eval_steps_per_second": 12.555, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 2.053323745727539, |
|
"learning_rate": 0.00034210526315789477, |
|
"loss": 0.5299, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 3.201794147491455, |
|
"learning_rate": 0.000337719298245614, |
|
"loss": 0.7405, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 2.8295910358428955, |
|
"learning_rate": 0.0003333333333333333, |
|
"loss": 0.7584, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"grad_norm": 1.9317399263381958, |
|
"learning_rate": 0.0003289473684210527, |
|
"loss": 0.4822, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 2.5840656757354736, |
|
"learning_rate": 0.0003245614035087719, |
|
"loss": 0.5055, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"eval_accuracy": 0.8300395256916996, |
|
"eval_f1_macro": 0.8075388232802375, |
|
"eval_f1_micro": 0.8300395256916996, |
|
"eval_loss": 0.5938563346862793, |
|
"eval_runtime": 3.8245, |
|
"eval_samples_per_second": 396.912, |
|
"eval_steps_per_second": 12.551, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 2.4889354705810547, |
|
"learning_rate": 0.00032017543859649123, |
|
"loss": 0.425, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"grad_norm": 2.618088722229004, |
|
"learning_rate": 0.00031578947368421053, |
|
"loss": 0.3865, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"grad_norm": 2.8282623291015625, |
|
"learning_rate": 0.00031140350877192983, |
|
"loss": 0.4543, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"grad_norm": 3.4801692962646484, |
|
"learning_rate": 0.00030701754385964913, |
|
"loss": 0.4152, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 1.7531658411026, |
|
"learning_rate": 0.00030263157894736844, |
|
"loss": 0.3942, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_accuracy": 0.8241106719367589, |
|
"eval_f1_macro": 0.7915647113122625, |
|
"eval_f1_micro": 0.8241106719367589, |
|
"eval_loss": 0.6020949482917786, |
|
"eval_runtime": 3.8282, |
|
"eval_samples_per_second": 396.536, |
|
"eval_steps_per_second": 12.539, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"grad_norm": 2.6115176677703857, |
|
"learning_rate": 0.0002982456140350877, |
|
"loss": 0.4801, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 2.1855995655059814, |
|
"learning_rate": 0.00029385964912280704, |
|
"loss": 0.3942, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"grad_norm": 2.3812382221221924, |
|
"learning_rate": 0.00028947368421052634, |
|
"loss": 0.485, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"grad_norm": 2.102308750152588, |
|
"learning_rate": 0.00028508771929824564, |
|
"loss": 0.417, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 4.095526218414307, |
|
"learning_rate": 0.0002807017543859649, |
|
"loss": 0.4248, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"eval_accuracy": 0.8300395256916996, |
|
"eval_f1_macro": 0.8060251760691185, |
|
"eval_f1_micro": 0.8300395256916996, |
|
"eval_loss": 0.5956056714057922, |
|
"eval_runtime": 3.8218, |
|
"eval_samples_per_second": 397.192, |
|
"eval_steps_per_second": 12.559, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"grad_norm": 3.2039239406585693, |
|
"learning_rate": 0.00027631578947368425, |
|
"loss": 0.4209, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"grad_norm": 1.9944714307785034, |
|
"learning_rate": 0.00027192982456140355, |
|
"loss": 0.5373, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"grad_norm": 3.2802696228027344, |
|
"learning_rate": 0.0002675438596491228, |
|
"loss": 0.5216, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"grad_norm": 1.4747893810272217, |
|
"learning_rate": 0.0002631578947368421, |
|
"loss": 0.3785, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"grad_norm": 1.2378747463226318, |
|
"learning_rate": 0.00025877192982456146, |
|
"loss": 0.3595, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"eval_accuracy": 0.8175230566534915, |
|
"eval_f1_macro": 0.7897278945523275, |
|
"eval_f1_micro": 0.8175230566534915, |
|
"eval_loss": 0.6172593832015991, |
|
"eval_runtime": 3.8184, |
|
"eval_samples_per_second": 397.551, |
|
"eval_steps_per_second": 12.571, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"grad_norm": 2.6604156494140625, |
|
"learning_rate": 0.0002543859649122807, |
|
"loss": 0.4231, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 1.879952311515808, |
|
"learning_rate": 0.00025, |
|
"loss": 0.4555, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"grad_norm": 3.9825170040130615, |
|
"learning_rate": 0.0002456140350877193, |
|
"loss": 0.3988, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"grad_norm": 2.999025583267212, |
|
"learning_rate": 0.0002412280701754386, |
|
"loss": 0.3854, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"grad_norm": 2.778930425643921, |
|
"learning_rate": 0.00023684210526315788, |
|
"loss": 0.5263, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"eval_accuracy": 0.8162055335968379, |
|
"eval_f1_macro": 0.7908253969964322, |
|
"eval_f1_micro": 0.8162055335968379, |
|
"eval_loss": 0.6170048117637634, |
|
"eval_runtime": 3.838, |
|
"eval_samples_per_second": 395.517, |
|
"eval_steps_per_second": 12.506, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 2.2400004863739014, |
|
"learning_rate": 0.00023245614035087719, |
|
"loss": 0.443, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"grad_norm": 2.5458765029907227, |
|
"learning_rate": 0.0002280701754385965, |
|
"loss": 0.4106, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"grad_norm": 2.955345392227173, |
|
"learning_rate": 0.0002236842105263158, |
|
"loss": 0.4078, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"grad_norm": 3.5653369426727295, |
|
"learning_rate": 0.0002192982456140351, |
|
"loss": 0.4746, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"grad_norm": 1.5618356466293335, |
|
"learning_rate": 0.0002149122807017544, |
|
"loss": 0.5153, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"eval_accuracy": 0.8326745718050066, |
|
"eval_f1_macro": 0.8042732309505177, |
|
"eval_f1_micro": 0.8326745718050066, |
|
"eval_loss": 0.6007006168365479, |
|
"eval_runtime": 3.8189, |
|
"eval_samples_per_second": 397.495, |
|
"eval_steps_per_second": 12.569, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"grad_norm": 3.3417813777923584, |
|
"learning_rate": 0.00021052631578947367, |
|
"loss": 0.3366, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 3.232940912246704, |
|
"learning_rate": 0.000206140350877193, |
|
"loss": 0.4284, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"grad_norm": 2.414170265197754, |
|
"learning_rate": 0.00020175438596491227, |
|
"loss": 0.4816, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"grad_norm": 2.161409378051758, |
|
"learning_rate": 0.00019736842105263157, |
|
"loss": 0.398, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"grad_norm": 2.6579811573028564, |
|
"learning_rate": 0.00019298245614035088, |
|
"loss": 0.4237, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"eval_accuracy": 0.8399209486166008, |
|
"eval_f1_macro": 0.8112898753723374, |
|
"eval_f1_micro": 0.8399209486166008, |
|
"eval_loss": 0.5565423369407654, |
|
"eval_runtime": 3.8246, |
|
"eval_samples_per_second": 396.904, |
|
"eval_steps_per_second": 12.55, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"grad_norm": 3.548602342605591, |
|
"learning_rate": 0.00018859649122807018, |
|
"loss": 0.4094, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"grad_norm": 2.4683637619018555, |
|
"learning_rate": 0.00018421052631578948, |
|
"loss": 0.4157, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 3.2135560512542725, |
|
"learning_rate": 0.00017982456140350878, |
|
"loss": 0.4306, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"grad_norm": 2.9333302974700928, |
|
"learning_rate": 0.00017543859649122806, |
|
"loss": 0.4584, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"grad_norm": 3.854954957962036, |
|
"learning_rate": 0.00017105263157894739, |
|
"loss": 0.3852, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_accuracy": 0.8438735177865613, |
|
"eval_f1_macro": 0.8146328190948308, |
|
"eval_f1_micro": 0.8438735177865613, |
|
"eval_loss": 0.563149094581604, |
|
"eval_runtime": 3.8078, |
|
"eval_samples_per_second": 398.657, |
|
"eval_steps_per_second": 12.606, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 2.8746769428253174, |
|
"learning_rate": 0.00016666666666666666, |
|
"loss": 0.3997, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"grad_norm": 1.5188621282577515, |
|
"learning_rate": 0.00016228070175438596, |
|
"loss": 0.2052, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"grad_norm": 2.1245510578155518, |
|
"learning_rate": 0.00015789473684210527, |
|
"loss": 0.2673, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 1.595406174659729, |
|
"learning_rate": 0.00015350877192982457, |
|
"loss": 0.1844, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"grad_norm": 1.294638991355896, |
|
"learning_rate": 0.00014912280701754384, |
|
"loss": 0.1916, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"eval_accuracy": 0.8438735177865613, |
|
"eval_f1_macro": 0.8132477856483462, |
|
"eval_f1_micro": 0.8438735177865613, |
|
"eval_loss": 0.5847834944725037, |
|
"eval_runtime": 3.8164, |
|
"eval_samples_per_second": 397.762, |
|
"eval_steps_per_second": 12.577, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"grad_norm": 1.5228568315505981, |
|
"learning_rate": 0.00014473684210526317, |
|
"loss": 0.203, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"grad_norm": 2.10640025138855, |
|
"learning_rate": 0.00014035087719298245, |
|
"loss": 0.1871, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"grad_norm": 2.23407244682312, |
|
"learning_rate": 0.00013596491228070177, |
|
"loss": 0.2381, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"grad_norm": 4.305612564086914, |
|
"learning_rate": 0.00013157894736842105, |
|
"loss": 0.1792, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"grad_norm": 2.1483376026153564, |
|
"learning_rate": 0.00012719298245614035, |
|
"loss": 0.2108, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"eval_accuracy": 0.8432147562582345, |
|
"eval_f1_macro": 0.8094003161642315, |
|
"eval_f1_micro": 0.8432147562582345, |
|
"eval_loss": 0.6054214239120483, |
|
"eval_runtime": 3.8173, |
|
"eval_samples_per_second": 397.661, |
|
"eval_steps_per_second": 12.574, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"grad_norm": 2.0426666736602783, |
|
"learning_rate": 0.00012280701754385965, |
|
"loss": 0.1777, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"grad_norm": 1.9927353858947754, |
|
"learning_rate": 0.00011842105263157894, |
|
"loss": 0.1886, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"grad_norm": 2.371792793273926, |
|
"learning_rate": 0.00011403508771929824, |
|
"loss": 0.1927, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"grad_norm": 1.061587929725647, |
|
"learning_rate": 0.00010964912280701755, |
|
"loss": 0.1, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"grad_norm": 3.100485324859619, |
|
"learning_rate": 0.00010526315789473683, |
|
"loss": 0.1752, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"eval_accuracy": 0.8438735177865613, |
|
"eval_f1_macro": 0.8131496787168201, |
|
"eval_f1_micro": 0.8438735177865613, |
|
"eval_loss": 0.6142441630363464, |
|
"eval_runtime": 3.8158, |
|
"eval_samples_per_second": 397.817, |
|
"eval_steps_per_second": 12.579, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"grad_norm": 4.032674789428711, |
|
"learning_rate": 0.00010087719298245614, |
|
"loss": 0.2409, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"grad_norm": 1.537070393562317, |
|
"learning_rate": 9.649122807017544e-05, |
|
"loss": 0.2244, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"grad_norm": 2.81392765045166, |
|
"learning_rate": 9.210526315789474e-05, |
|
"loss": 0.2367, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"grad_norm": 4.410009860992432, |
|
"learning_rate": 8.771929824561403e-05, |
|
"loss": 0.2912, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 2.0353574752807617, |
|
"learning_rate": 8.333333333333333e-05, |
|
"loss": 0.1502, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_accuracy": 0.8451910408432147, |
|
"eval_f1_macro": 0.8119149292307762, |
|
"eval_f1_micro": 0.8451910408432147, |
|
"eval_loss": 0.6100274324417114, |
|
"eval_runtime": 3.8177, |
|
"eval_samples_per_second": 397.617, |
|
"eval_steps_per_second": 12.573, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"grad_norm": 0.4018252193927765, |
|
"learning_rate": 7.894736842105263e-05, |
|
"loss": 0.2425, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"grad_norm": 1.633636236190796, |
|
"learning_rate": 7.456140350877192e-05, |
|
"loss": 0.3121, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"grad_norm": 1.6794798374176025, |
|
"learning_rate": 7.017543859649122e-05, |
|
"loss": 0.1456, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"grad_norm": 1.684718132019043, |
|
"learning_rate": 6.578947368421052e-05, |
|
"loss": 0.1977, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"grad_norm": 1.3666688203811646, |
|
"learning_rate": 6.140350877192983e-05, |
|
"loss": 0.2253, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"eval_accuracy": 0.8438735177865613, |
|
"eval_f1_macro": 0.8227552343954732, |
|
"eval_f1_micro": 0.8438735177865613, |
|
"eval_loss": 0.6083930730819702, |
|
"eval_runtime": 3.8094, |
|
"eval_samples_per_second": 398.489, |
|
"eval_steps_per_second": 12.6, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"grad_norm": 1.7882211208343506, |
|
"learning_rate": 5.701754385964912e-05, |
|
"loss": 0.1514, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"grad_norm": 1.7349433898925781, |
|
"learning_rate": 5.263157894736842e-05, |
|
"loss": 0.2528, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"grad_norm": 1.9312665462493896, |
|
"learning_rate": 4.824561403508772e-05, |
|
"loss": 0.238, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"grad_norm": 2.227630376815796, |
|
"learning_rate": 4.3859649122807014e-05, |
|
"loss": 0.2113, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"grad_norm": 1.098223328590393, |
|
"learning_rate": 3.9473684210526316e-05, |
|
"loss": 0.2193, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"eval_accuracy": 0.8484848484848485, |
|
"eval_f1_macro": 0.817141995514716, |
|
"eval_f1_micro": 0.8484848484848485, |
|
"eval_loss": 0.6062248945236206, |
|
"eval_runtime": 3.8206, |
|
"eval_samples_per_second": 397.322, |
|
"eval_steps_per_second": 12.564, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"grad_norm": 0.6382438540458679, |
|
"learning_rate": 3.508771929824561e-05, |
|
"loss": 0.2207, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"grad_norm": 2.2816574573516846, |
|
"learning_rate": 3.0701754385964913e-05, |
|
"loss": 0.2896, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"grad_norm": 1.8550561666488647, |
|
"learning_rate": 2.631578947368421e-05, |
|
"loss": 0.2776, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"grad_norm": 1.517103672027588, |
|
"learning_rate": 2.1929824561403507e-05, |
|
"loss": 0.1744, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"grad_norm": 1.9958040714263916, |
|
"learning_rate": 1.7543859649122806e-05, |
|
"loss": 0.2182, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"eval_accuracy": 0.849802371541502, |
|
"eval_f1_macro": 0.8181612702287439, |
|
"eval_f1_micro": 0.849802371541502, |
|
"eval_loss": 0.5966492891311646, |
|
"eval_runtime": 3.822, |
|
"eval_samples_per_second": 397.173, |
|
"eval_steps_per_second": 12.559, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"grad_norm": 1.0382195711135864, |
|
"learning_rate": 1.3157894736842104e-05, |
|
"loss": 0.2314, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"grad_norm": 0.6311368346214294, |
|
"learning_rate": 8.771929824561403e-06, |
|
"loss": 0.2827, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"grad_norm": 2.1355645656585693, |
|
"learning_rate": 4.3859649122807014e-06, |
|
"loss": 0.1823, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.8415520191192627, |
|
"learning_rate": 0.0, |
|
"loss": 0.168, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 1140, |
|
"total_flos": 5570737729437696.0, |
|
"train_loss": 0.5285837122222834, |
|
"train_runtime": 447.0645, |
|
"train_samples_per_second": 81.492, |
|
"train_steps_per_second": 2.55 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1140, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"total_flos": 5570737729437696.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|