SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/google_t5
/t5_base_patent
/trainer_state.json
{ | |
"best_metric": 0.9275591969490051, | |
"best_model_checkpoint": "../../experiments_checkpoints/MAdAiLab/google_t5/t5_base_patent/checkpoint-1550", | |
"epoch": 3.0, | |
"eval_steps": 50, | |
"global_step": 2346, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.01, | |
"grad_norm": 2.4974615573883057, | |
"learning_rate": 0.0004978687127024723, | |
"loss": 2.0423, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.03, | |
"grad_norm": 1.5172781944274902, | |
"learning_rate": 0.0004957374254049446, | |
"loss": 1.9857, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.04, | |
"grad_norm": 1.7396634817123413, | |
"learning_rate": 0.0004936061381074169, | |
"loss": 1.7715, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.05, | |
"grad_norm": 2.822936773300171, | |
"learning_rate": 0.0004914748508098892, | |
"loss": 1.4156, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.06, | |
"grad_norm": 2.627573013305664, | |
"learning_rate": 0.0004893435635123615, | |
"loss": 1.3522, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.06, | |
"eval_accuracy": 0.5254, | |
"eval_f1_macro": 0.36088387014645285, | |
"eval_f1_micro": 0.5254, | |
"eval_loss": 1.4201537370681763, | |
"eval_runtime": 12.9425, | |
"eval_samples_per_second": 386.324, | |
"eval_steps_per_second": 12.131, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.08, | |
"grad_norm": 2.4126358032226562, | |
"learning_rate": 0.00048721227621483377, | |
"loss": 1.2477, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.09, | |
"grad_norm": 1.7662773132324219, | |
"learning_rate": 0.00048508098891730605, | |
"loss": 1.2976, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.1, | |
"grad_norm": 2.7678210735321045, | |
"learning_rate": 0.0004829497016197784, | |
"loss": 1.2331, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.12, | |
"grad_norm": 2.4279048442840576, | |
"learning_rate": 0.00048081841432225065, | |
"loss": 1.2387, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.13, | |
"grad_norm": 1.8737666606903076, | |
"learning_rate": 0.0004786871270247229, | |
"loss": 1.1693, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.13, | |
"eval_accuracy": 0.597, | |
"eval_f1_macro": 0.4695490866463984, | |
"eval_f1_micro": 0.597, | |
"eval_loss": 1.1673717498779297, | |
"eval_runtime": 13.0292, | |
"eval_samples_per_second": 383.754, | |
"eval_steps_per_second": 12.05, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.14, | |
"grad_norm": 2.6654303073883057, | |
"learning_rate": 0.00047655583972719526, | |
"loss": 1.2759, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.15, | |
"grad_norm": 2.226668119430542, | |
"learning_rate": 0.00047442455242966753, | |
"loss": 1.2904, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.17, | |
"grad_norm": 2.8846795558929443, | |
"learning_rate": 0.0004722932651321398, | |
"loss": 1.1975, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.18, | |
"grad_norm": 2.5968539714813232, | |
"learning_rate": 0.00047016197783461214, | |
"loss": 1.2166, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.19, | |
"grad_norm": 1.5721819400787354, | |
"learning_rate": 0.0004680306905370844, | |
"loss": 1.171, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.19, | |
"eval_accuracy": 0.6052, | |
"eval_f1_macro": 0.4713385676807627, | |
"eval_f1_micro": 0.6052, | |
"eval_loss": 1.1373018026351929, | |
"eval_runtime": 13.0547, | |
"eval_samples_per_second": 383.005, | |
"eval_steps_per_second": 12.026, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.2, | |
"grad_norm": 2.7147936820983887, | |
"learning_rate": 0.0004658994032395567, | |
"loss": 1.3234, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.22, | |
"grad_norm": 1.9919277429580688, | |
"learning_rate": 0.000463768115942029, | |
"loss": 1.2193, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.23, | |
"grad_norm": 1.9794032573699951, | |
"learning_rate": 0.0004616368286445013, | |
"loss": 1.1321, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.24, | |
"grad_norm": 2.1101603507995605, | |
"learning_rate": 0.00045950554134697357, | |
"loss": 1.2112, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.26, | |
"grad_norm": 2.165870428085327, | |
"learning_rate": 0.0004573742540494459, | |
"loss": 1.048, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.26, | |
"eval_accuracy": 0.6286, | |
"eval_f1_macro": 0.5499463679348023, | |
"eval_f1_micro": 0.6286, | |
"eval_loss": 1.0826044082641602, | |
"eval_runtime": 13.0087, | |
"eval_samples_per_second": 384.358, | |
"eval_steps_per_second": 12.069, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.27, | |
"grad_norm": 2.1708426475524902, | |
"learning_rate": 0.0004552429667519182, | |
"loss": 1.089, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.28, | |
"grad_norm": 2.311868190765381, | |
"learning_rate": 0.00045311167945439045, | |
"loss": 1.1744, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.29, | |
"grad_norm": 2.2947585582733154, | |
"learning_rate": 0.0004509803921568628, | |
"loss": 1.0987, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.31, | |
"grad_norm": 2.115337371826172, | |
"learning_rate": 0.00044884910485933505, | |
"loss": 1.1407, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.32, | |
"grad_norm": 2.1188411712646484, | |
"learning_rate": 0.00044671781756180733, | |
"loss": 0.9991, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.32, | |
"eval_accuracy": 0.638, | |
"eval_f1_macro": 0.5422074962269037, | |
"eval_f1_micro": 0.638, | |
"eval_loss": 1.0598992109298706, | |
"eval_runtime": 13.0177, | |
"eval_samples_per_second": 384.093, | |
"eval_steps_per_second": 12.061, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.33, | |
"grad_norm": 2.213646411895752, | |
"learning_rate": 0.00044458653026427966, | |
"loss": 1.0853, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.35, | |
"grad_norm": 2.5469391345977783, | |
"learning_rate": 0.00044245524296675193, | |
"loss": 1.0904, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.36, | |
"grad_norm": 2.0771143436431885, | |
"learning_rate": 0.0004403239556692242, | |
"loss": 1.04, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.37, | |
"grad_norm": 5.050015926361084, | |
"learning_rate": 0.00043819266837169654, | |
"loss": 1.1204, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.38, | |
"grad_norm": 2.272465705871582, | |
"learning_rate": 0.0004360613810741688, | |
"loss": 1.1814, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.38, | |
"eval_accuracy": 0.6332, | |
"eval_f1_macro": 0.559347721629516, | |
"eval_f1_micro": 0.6332, | |
"eval_loss": 1.0633331537246704, | |
"eval_runtime": 13.0185, | |
"eval_samples_per_second": 384.069, | |
"eval_steps_per_second": 12.06, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.4, | |
"grad_norm": 2.2277400493621826, | |
"learning_rate": 0.0004339300937766411, | |
"loss": 1.1177, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.41, | |
"grad_norm": 1.6775270700454712, | |
"learning_rate": 0.0004317988064791134, | |
"loss": 0.9599, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.42, | |
"grad_norm": 2.208353042602539, | |
"learning_rate": 0.0004296675191815857, | |
"loss": 1.1534, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.43, | |
"grad_norm": 1.7430713176727295, | |
"learning_rate": 0.00042753623188405797, | |
"loss": 1.1505, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.45, | |
"grad_norm": 2.3776443004608154, | |
"learning_rate": 0.0004254049445865303, | |
"loss": 1.0864, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.45, | |
"eval_accuracy": 0.6392, | |
"eval_f1_macro": 0.5678118184018813, | |
"eval_f1_micro": 0.6392, | |
"eval_loss": 1.040041208267212, | |
"eval_runtime": 12.9954, | |
"eval_samples_per_second": 384.752, | |
"eval_steps_per_second": 12.081, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.46, | |
"grad_norm": 1.8053480386734009, | |
"learning_rate": 0.0004232736572890026, | |
"loss": 1.0725, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.47, | |
"grad_norm": 1.8648344278335571, | |
"learning_rate": 0.00042114236999147485, | |
"loss": 1.1635, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.49, | |
"grad_norm": 1.467507243156433, | |
"learning_rate": 0.0004190110826939472, | |
"loss": 1.1063, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.5, | |
"grad_norm": 2.082329750061035, | |
"learning_rate": 0.00041687979539641946, | |
"loss": 1.176, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.51, | |
"grad_norm": 1.4149022102355957, | |
"learning_rate": 0.00041474850809889173, | |
"loss": 0.9748, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.51, | |
"eval_accuracy": 0.6424, | |
"eval_f1_macro": 0.5612605717682496, | |
"eval_f1_micro": 0.6424, | |
"eval_loss": 1.0440382957458496, | |
"eval_runtime": 13.0068, | |
"eval_samples_per_second": 384.415, | |
"eval_steps_per_second": 12.071, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.52, | |
"grad_norm": 2.5410144329071045, | |
"learning_rate": 0.00041261722080136406, | |
"loss": 1.1015, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.54, | |
"grad_norm": 1.6288481950759888, | |
"learning_rate": 0.00041048593350383634, | |
"loss": 1.1052, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.55, | |
"grad_norm": 2.03472638130188, | |
"learning_rate": 0.0004083546462063086, | |
"loss": 1.073, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.56, | |
"grad_norm": 1.3407670259475708, | |
"learning_rate": 0.00040622335890878094, | |
"loss": 1.0857, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.58, | |
"grad_norm": 2.194918155670166, | |
"learning_rate": 0.0004040920716112532, | |
"loss": 1.0267, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.58, | |
"eval_accuracy": 0.6526, | |
"eval_f1_macro": 0.5818441324147113, | |
"eval_f1_micro": 0.6526, | |
"eval_loss": 1.01156485080719, | |
"eval_runtime": 13.0045, | |
"eval_samples_per_second": 384.482, | |
"eval_steps_per_second": 12.073, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.59, | |
"grad_norm": 2.4339776039123535, | |
"learning_rate": 0.0004019607843137255, | |
"loss": 1.0199, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.6, | |
"grad_norm": 1.824545979499817, | |
"learning_rate": 0.0003998294970161978, | |
"loss": 1.0241, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.61, | |
"grad_norm": 1.3973139524459839, | |
"learning_rate": 0.0003976982097186701, | |
"loss": 1.0537, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.63, | |
"grad_norm": 1.7667598724365234, | |
"learning_rate": 0.00039556692242114237, | |
"loss": 1.1928, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.64, | |
"grad_norm": 1.8740298748016357, | |
"learning_rate": 0.0003934356351236147, | |
"loss": 1.0052, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.64, | |
"eval_accuracy": 0.657, | |
"eval_f1_macro": 0.5787057373173895, | |
"eval_f1_micro": 0.657, | |
"eval_loss": 0.9947913289070129, | |
"eval_runtime": 13.0161, | |
"eval_samples_per_second": 384.139, | |
"eval_steps_per_second": 12.062, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.65, | |
"grad_norm": 2.1658332347869873, | |
"learning_rate": 0.000391304347826087, | |
"loss": 1.0857, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.66, | |
"grad_norm": 1.7591208219528198, | |
"learning_rate": 0.00038917306052855925, | |
"loss": 0.9992, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.68, | |
"grad_norm": 2.5212056636810303, | |
"learning_rate": 0.0003870417732310316, | |
"loss": 1.0708, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.69, | |
"grad_norm": 1.8266304731369019, | |
"learning_rate": 0.00038491048593350386, | |
"loss": 1.0144, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.7, | |
"grad_norm": 2.446922540664673, | |
"learning_rate": 0.00038277919863597613, | |
"loss": 0.9244, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.7, | |
"eval_accuracy": 0.657, | |
"eval_f1_macro": 0.5869643883356389, | |
"eval_f1_micro": 0.657, | |
"eval_loss": 1.000241994857788, | |
"eval_runtime": 12.9976, | |
"eval_samples_per_second": 384.686, | |
"eval_steps_per_second": 12.079, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.72, | |
"grad_norm": 2.180079936981201, | |
"learning_rate": 0.00038064791133844846, | |
"loss": 0.9825, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.73, | |
"grad_norm": 2.3864924907684326, | |
"learning_rate": 0.00037851662404092074, | |
"loss": 0.9943, | |
"step": 570 | |
}, | |
{ | |
"epoch": 0.74, | |
"grad_norm": 1.930967092514038, | |
"learning_rate": 0.000376385336743393, | |
"loss": 0.9622, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.75, | |
"grad_norm": 1.6336820125579834, | |
"learning_rate": 0.00037425404944586534, | |
"loss": 1.0352, | |
"step": 590 | |
}, | |
{ | |
"epoch": 0.77, | |
"grad_norm": 1.4508672952651978, | |
"learning_rate": 0.0003721227621483376, | |
"loss": 1.0172, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.77, | |
"eval_accuracy": 0.661, | |
"eval_f1_macro": 0.5889167117692362, | |
"eval_f1_micro": 0.661, | |
"eval_loss": 0.9869310259819031, | |
"eval_runtime": 13.005, | |
"eval_samples_per_second": 384.466, | |
"eval_steps_per_second": 12.072, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.78, | |
"grad_norm": 2.1621804237365723, | |
"learning_rate": 0.0003699914748508099, | |
"loss": 1.0703, | |
"step": 610 | |
}, | |
{ | |
"epoch": 0.79, | |
"grad_norm": 1.213054895401001, | |
"learning_rate": 0.0003678601875532822, | |
"loss": 0.9779, | |
"step": 620 | |
}, | |
{ | |
"epoch": 0.81, | |
"grad_norm": 1.6633962392807007, | |
"learning_rate": 0.0003657289002557545, | |
"loss": 1.0609, | |
"step": 630 | |
}, | |
{ | |
"epoch": 0.82, | |
"grad_norm": 1.6113487482070923, | |
"learning_rate": 0.0003635976129582268, | |
"loss": 0.9406, | |
"step": 640 | |
}, | |
{ | |
"epoch": 0.83, | |
"grad_norm": 1.8522602319717407, | |
"learning_rate": 0.0003614663256606991, | |
"loss": 1.032, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.83, | |
"eval_accuracy": 0.658, | |
"eval_f1_macro": 0.5967445966950204, | |
"eval_f1_micro": 0.658, | |
"eval_loss": 0.9922341108322144, | |
"eval_runtime": 13.0065, | |
"eval_samples_per_second": 384.423, | |
"eval_steps_per_second": 12.071, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.84, | |
"grad_norm": 2.109052896499634, | |
"learning_rate": 0.0003593350383631714, | |
"loss": 0.9791, | |
"step": 660 | |
}, | |
{ | |
"epoch": 0.86, | |
"grad_norm": 2.2362546920776367, | |
"learning_rate": 0.00035720375106564365, | |
"loss": 1.0476, | |
"step": 670 | |
}, | |
{ | |
"epoch": 0.87, | |
"grad_norm": 1.6640779972076416, | |
"learning_rate": 0.000355072463768116, | |
"loss": 1.0723, | |
"step": 680 | |
}, | |
{ | |
"epoch": 0.88, | |
"grad_norm": 2.161720037460327, | |
"learning_rate": 0.00035294117647058826, | |
"loss": 1.1059, | |
"step": 690 | |
}, | |
{ | |
"epoch": 0.9, | |
"grad_norm": 1.3153353929519653, | |
"learning_rate": 0.00035080988917306053, | |
"loss": 0.9623, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.9, | |
"eval_accuracy": 0.6488, | |
"eval_f1_macro": 0.5862935110976637, | |
"eval_f1_micro": 0.6488, | |
"eval_loss": 0.9955071210861206, | |
"eval_runtime": 13.0077, | |
"eval_samples_per_second": 384.387, | |
"eval_steps_per_second": 12.07, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.91, | |
"grad_norm": 1.304362416267395, | |
"learning_rate": 0.00034867860187553286, | |
"loss": 1.0294, | |
"step": 710 | |
}, | |
{ | |
"epoch": 0.92, | |
"grad_norm": 1.4024779796600342, | |
"learning_rate": 0.00034654731457800514, | |
"loss": 1.0708, | |
"step": 720 | |
}, | |
{ | |
"epoch": 0.93, | |
"grad_norm": 2.148181676864624, | |
"learning_rate": 0.0003444160272804774, | |
"loss": 0.9677, | |
"step": 730 | |
}, | |
{ | |
"epoch": 0.95, | |
"grad_norm": 2.531114101409912, | |
"learning_rate": 0.00034228473998294974, | |
"loss": 0.966, | |
"step": 740 | |
}, | |
{ | |
"epoch": 0.96, | |
"grad_norm": 1.9326214790344238, | |
"learning_rate": 0.000340153452685422, | |
"loss": 0.9257, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.96, | |
"eval_accuracy": 0.6556, | |
"eval_f1_macro": 0.5884168435088513, | |
"eval_f1_micro": 0.6556, | |
"eval_loss": 0.9993236064910889, | |
"eval_runtime": 13.0548, | |
"eval_samples_per_second": 383.002, | |
"eval_steps_per_second": 12.026, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.97, | |
"grad_norm": 2.2656257152557373, | |
"learning_rate": 0.0003380221653878943, | |
"loss": 0.991, | |
"step": 760 | |
}, | |
{ | |
"epoch": 0.98, | |
"grad_norm": 5.2825469970703125, | |
"learning_rate": 0.0003358908780903666, | |
"loss": 1.0599, | |
"step": 770 | |
}, | |
{ | |
"epoch": 1.0, | |
"grad_norm": 1.6359708309173584, | |
"learning_rate": 0.0003337595907928389, | |
"loss": 1.0755, | |
"step": 780 | |
}, | |
{ | |
"epoch": 1.01, | |
"grad_norm": 1.8754523992538452, | |
"learning_rate": 0.0003316283034953112, | |
"loss": 0.8551, | |
"step": 790 | |
}, | |
{ | |
"epoch": 1.02, | |
"grad_norm": 1.687317132949829, | |
"learning_rate": 0.0003294970161977835, | |
"loss": 0.7956, | |
"step": 800 | |
}, | |
{ | |
"epoch": 1.02, | |
"eval_accuracy": 0.6662, | |
"eval_f1_macro": 0.6147575900411174, | |
"eval_f1_micro": 0.6662, | |
"eval_loss": 0.9737330079078674, | |
"eval_runtime": 12.9893, | |
"eval_samples_per_second": 384.933, | |
"eval_steps_per_second": 12.087, | |
"step": 800 | |
}, | |
{ | |
"epoch": 1.04, | |
"grad_norm": 2.0937979221343994, | |
"learning_rate": 0.0003273657289002558, | |
"loss": 0.8608, | |
"step": 810 | |
}, | |
{ | |
"epoch": 1.05, | |
"grad_norm": 1.6474498510360718, | |
"learning_rate": 0.00032523444160272806, | |
"loss": 0.6793, | |
"step": 820 | |
}, | |
{ | |
"epoch": 1.06, | |
"grad_norm": 2.0066182613372803, | |
"learning_rate": 0.0003231031543052004, | |
"loss": 0.8549, | |
"step": 830 | |
}, | |
{ | |
"epoch": 1.07, | |
"grad_norm": 2.7142579555511475, | |
"learning_rate": 0.0003209718670076726, | |
"loss": 0.8081, | |
"step": 840 | |
}, | |
{ | |
"epoch": 1.09, | |
"grad_norm": 2.2779908180236816, | |
"learning_rate": 0.0003188405797101449, | |
"loss": 0.8475, | |
"step": 850 | |
}, | |
{ | |
"epoch": 1.09, | |
"eval_accuracy": 0.6544, | |
"eval_f1_macro": 0.5728923304855623, | |
"eval_f1_micro": 0.6544, | |
"eval_loss": 1.0124825239181519, | |
"eval_runtime": 13.0224, | |
"eval_samples_per_second": 383.955, | |
"eval_steps_per_second": 12.056, | |
"step": 850 | |
}, | |
{ | |
"epoch": 1.1, | |
"grad_norm": 2.1091928482055664, | |
"learning_rate": 0.0003167092924126172, | |
"loss": 0.8423, | |
"step": 860 | |
}, | |
{ | |
"epoch": 1.11, | |
"grad_norm": 1.7248471975326538, | |
"learning_rate": 0.0003145780051150895, | |
"loss": 0.8673, | |
"step": 870 | |
}, | |
{ | |
"epoch": 1.13, | |
"grad_norm": 1.7542129755020142, | |
"learning_rate": 0.00031244671781756176, | |
"loss": 0.7861, | |
"step": 880 | |
}, | |
{ | |
"epoch": 1.14, | |
"grad_norm": 2.300936222076416, | |
"learning_rate": 0.0003103154305200341, | |
"loss": 0.8717, | |
"step": 890 | |
}, | |
{ | |
"epoch": 1.15, | |
"grad_norm": 2.793142318725586, | |
"learning_rate": 0.00030818414322250637, | |
"loss": 0.8527, | |
"step": 900 | |
}, | |
{ | |
"epoch": 1.15, | |
"eval_accuracy": 0.6524, | |
"eval_f1_macro": 0.5897355833668082, | |
"eval_f1_micro": 0.6524, | |
"eval_loss": 0.9999291300773621, | |
"eval_runtime": 12.9931, | |
"eval_samples_per_second": 384.819, | |
"eval_steps_per_second": 12.083, | |
"step": 900 | |
}, | |
{ | |
"epoch": 1.16, | |
"grad_norm": 1.7186204195022583, | |
"learning_rate": 0.00030605285592497864, | |
"loss": 0.899, | |
"step": 910 | |
}, | |
{ | |
"epoch": 1.18, | |
"grad_norm": 1.7719347476959229, | |
"learning_rate": 0.00030392156862745097, | |
"loss": 0.8529, | |
"step": 920 | |
}, | |
{ | |
"epoch": 1.19, | |
"grad_norm": 1.558523416519165, | |
"learning_rate": 0.00030179028132992325, | |
"loss": 0.7822, | |
"step": 930 | |
}, | |
{ | |
"epoch": 1.2, | |
"grad_norm": 1.474290132522583, | |
"learning_rate": 0.0002996589940323955, | |
"loss": 0.7782, | |
"step": 940 | |
}, | |
{ | |
"epoch": 1.21, | |
"grad_norm": 2.070176839828491, | |
"learning_rate": 0.00029752770673486785, | |
"loss": 0.8587, | |
"step": 950 | |
}, | |
{ | |
"epoch": 1.21, | |
"eval_accuracy": 0.6576, | |
"eval_f1_macro": 0.5872663998332279, | |
"eval_f1_micro": 0.6576, | |
"eval_loss": 1.0072367191314697, | |
"eval_runtime": 13.0045, | |
"eval_samples_per_second": 384.481, | |
"eval_steps_per_second": 12.073, | |
"step": 950 | |
}, | |
{ | |
"epoch": 1.23, | |
"grad_norm": 1.9397375583648682, | |
"learning_rate": 0.00029539641943734013, | |
"loss": 0.7791, | |
"step": 960 | |
}, | |
{ | |
"epoch": 1.24, | |
"grad_norm": 2.1534318923950195, | |
"learning_rate": 0.0002932651321398124, | |
"loss": 0.9695, | |
"step": 970 | |
}, | |
{ | |
"epoch": 1.25, | |
"grad_norm": 1.9184337854385376, | |
"learning_rate": 0.00029113384484228473, | |
"loss": 0.8261, | |
"step": 980 | |
}, | |
{ | |
"epoch": 1.27, | |
"grad_norm": 1.854306697845459, | |
"learning_rate": 0.000289002557544757, | |
"loss": 0.7905, | |
"step": 990 | |
}, | |
{ | |
"epoch": 1.28, | |
"grad_norm": 2.377044200897217, | |
"learning_rate": 0.0002868712702472293, | |
"loss": 0.8855, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 1.28, | |
"eval_accuracy": 0.6592, | |
"eval_f1_macro": 0.6034965481160524, | |
"eval_f1_micro": 0.6592, | |
"eval_loss": 0.9839973449707031, | |
"eval_runtime": 13.0007, | |
"eval_samples_per_second": 384.595, | |
"eval_steps_per_second": 12.076, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 1.29, | |
"grad_norm": 2.329939603805542, | |
"learning_rate": 0.0002847399829497016, | |
"loss": 0.873, | |
"step": 1010 | |
}, | |
{ | |
"epoch": 1.3, | |
"grad_norm": 2.4969725608825684, | |
"learning_rate": 0.0002826086956521739, | |
"loss": 0.8279, | |
"step": 1020 | |
}, | |
{ | |
"epoch": 1.32, | |
"grad_norm": 1.957000970840454, | |
"learning_rate": 0.00028047740835464616, | |
"loss": 0.7704, | |
"step": 1030 | |
}, | |
{ | |
"epoch": 1.33, | |
"grad_norm": 2.533355951309204, | |
"learning_rate": 0.0002783461210571185, | |
"loss": 0.8184, | |
"step": 1040 | |
}, | |
{ | |
"epoch": 1.34, | |
"grad_norm": 2.0572900772094727, | |
"learning_rate": 0.00027621483375959077, | |
"loss": 0.7015, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 1.34, | |
"eval_accuracy": 0.6682, | |
"eval_f1_macro": 0.5993496469081598, | |
"eval_f1_micro": 0.6682, | |
"eval_loss": 0.9846757650375366, | |
"eval_runtime": 13.0022, | |
"eval_samples_per_second": 384.551, | |
"eval_steps_per_second": 12.075, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 1.36, | |
"grad_norm": 2.5160746574401855, | |
"learning_rate": 0.0002740835464620631, | |
"loss": 0.9059, | |
"step": 1060 | |
}, | |
{ | |
"epoch": 1.37, | |
"grad_norm": 1.8105578422546387, | |
"learning_rate": 0.0002719522591645354, | |
"loss": 0.9492, | |
"step": 1070 | |
}, | |
{ | |
"epoch": 1.38, | |
"grad_norm": 1.9851925373077393, | |
"learning_rate": 0.00026982097186700765, | |
"loss": 0.8391, | |
"step": 1080 | |
}, | |
{ | |
"epoch": 1.39, | |
"grad_norm": 1.477924108505249, | |
"learning_rate": 0.00026768968456948, | |
"loss": 0.8203, | |
"step": 1090 | |
}, | |
{ | |
"epoch": 1.41, | |
"grad_norm": 2.1417877674102783, | |
"learning_rate": 0.00026555839727195225, | |
"loss": 0.8116, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 1.41, | |
"eval_accuracy": 0.6678, | |
"eval_f1_macro": 0.6079453841442157, | |
"eval_f1_micro": 0.6678, | |
"eval_loss": 0.9702128171920776, | |
"eval_runtime": 12.99, | |
"eval_samples_per_second": 384.912, | |
"eval_steps_per_second": 12.086, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 1.42, | |
"grad_norm": 2.5949513912200928, | |
"learning_rate": 0.00026342710997442453, | |
"loss": 0.8707, | |
"step": 1110 | |
}, | |
{ | |
"epoch": 1.43, | |
"grad_norm": 2.1411094665527344, | |
"learning_rate": 0.00026129582267689686, | |
"loss": 0.7374, | |
"step": 1120 | |
}, | |
{ | |
"epoch": 1.45, | |
"grad_norm": 3.001037836074829, | |
"learning_rate": 0.00025916453537936913, | |
"loss": 0.8355, | |
"step": 1130 | |
}, | |
{ | |
"epoch": 1.46, | |
"grad_norm": 1.8605945110321045, | |
"learning_rate": 0.0002570332480818414, | |
"loss": 0.8346, | |
"step": 1140 | |
}, | |
{ | |
"epoch": 1.47, | |
"grad_norm": 2.481954574584961, | |
"learning_rate": 0.00025490196078431374, | |
"loss": 0.8409, | |
"step": 1150 | |
}, | |
{ | |
"epoch": 1.47, | |
"eval_accuracy": 0.6606, | |
"eval_f1_macro": 0.6017292151887733, | |
"eval_f1_micro": 0.6606, | |
"eval_loss": 0.978935182094574, | |
"eval_runtime": 12.9978, | |
"eval_samples_per_second": 384.68, | |
"eval_steps_per_second": 12.079, | |
"step": 1150 | |
}, | |
{ | |
"epoch": 1.48, | |
"grad_norm": 2.708108425140381, | |
"learning_rate": 0.000252770673486786, | |
"loss": 0.9453, | |
"step": 1160 | |
}, | |
{ | |
"epoch": 1.5, | |
"grad_norm": 2.0015769004821777, | |
"learning_rate": 0.0002506393861892583, | |
"loss": 0.9272, | |
"step": 1170 | |
}, | |
{ | |
"epoch": 1.51, | |
"grad_norm": 1.576611042022705, | |
"learning_rate": 0.0002485080988917306, | |
"loss": 0.7801, | |
"step": 1180 | |
}, | |
{ | |
"epoch": 1.52, | |
"grad_norm": 1.3962684869766235, | |
"learning_rate": 0.0002463768115942029, | |
"loss": 0.8375, | |
"step": 1190 | |
}, | |
{ | |
"epoch": 1.53, | |
"grad_norm": 2.231125593185425, | |
"learning_rate": 0.0002442455242966752, | |
"loss": 0.7889, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 1.53, | |
"eval_accuracy": 0.6818, | |
"eval_f1_macro": 0.6125360407387105, | |
"eval_f1_micro": 0.6818, | |
"eval_loss": 0.9462108612060547, | |
"eval_runtime": 13.0042, | |
"eval_samples_per_second": 384.492, | |
"eval_steps_per_second": 12.073, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 1.55, | |
"grad_norm": 3.0127203464508057, | |
"learning_rate": 0.0002421142369991475, | |
"loss": 0.7764, | |
"step": 1210 | |
}, | |
{ | |
"epoch": 1.56, | |
"grad_norm": 1.7407495975494385, | |
"learning_rate": 0.0002399829497016198, | |
"loss": 0.9385, | |
"step": 1220 | |
}, | |
{ | |
"epoch": 1.57, | |
"grad_norm": 1.6206921339035034, | |
"learning_rate": 0.00023785166240409208, | |
"loss": 0.8436, | |
"step": 1230 | |
}, | |
{ | |
"epoch": 1.59, | |
"grad_norm": 1.9031857252120972, | |
"learning_rate": 0.00023572037510656438, | |
"loss": 0.8037, | |
"step": 1240 | |
}, | |
{ | |
"epoch": 1.6, | |
"grad_norm": 1.3563182353973389, | |
"learning_rate": 0.00023358908780903668, | |
"loss": 0.8059, | |
"step": 1250 | |
}, | |
{ | |
"epoch": 1.6, | |
"eval_accuracy": 0.6694, | |
"eval_f1_macro": 0.6092531381348701, | |
"eval_f1_micro": 0.6694, | |
"eval_loss": 0.9374752044677734, | |
"eval_runtime": 13.0242, | |
"eval_samples_per_second": 383.9, | |
"eval_steps_per_second": 12.054, | |
"step": 1250 | |
}, | |
{ | |
"epoch": 1.61, | |
"grad_norm": 1.6967196464538574, | |
"learning_rate": 0.00023145780051150893, | |
"loss": 0.7743, | |
"step": 1260 | |
}, | |
{ | |
"epoch": 1.62, | |
"grad_norm": 2.4736151695251465, | |
"learning_rate": 0.00022932651321398123, | |
"loss": 0.7983, | |
"step": 1270 | |
}, | |
{ | |
"epoch": 1.64, | |
"grad_norm": 1.9957785606384277, | |
"learning_rate": 0.00022719522591645354, | |
"loss": 0.8545, | |
"step": 1280 | |
}, | |
{ | |
"epoch": 1.65, | |
"grad_norm": 1.7612760066986084, | |
"learning_rate": 0.0002250639386189258, | |
"loss": 0.7164, | |
"step": 1290 | |
}, | |
{ | |
"epoch": 1.66, | |
"grad_norm": 1.766614317893982, | |
"learning_rate": 0.00022293265132139811, | |
"loss": 0.7893, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 1.66, | |
"eval_accuracy": 0.6762, | |
"eval_f1_macro": 0.6102136225362762, | |
"eval_f1_micro": 0.6762, | |
"eval_loss": 0.946709394454956, | |
"eval_runtime": 13.0006, | |
"eval_samples_per_second": 384.598, | |
"eval_steps_per_second": 12.076, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 1.68, | |
"grad_norm": 1.87382972240448, | |
"learning_rate": 0.00022080136402387042, | |
"loss": 0.8124, | |
"step": 1310 | |
}, | |
{ | |
"epoch": 1.69, | |
"grad_norm": 2.0260634422302246, | |
"learning_rate": 0.0002186700767263427, | |
"loss": 0.8122, | |
"step": 1320 | |
}, | |
{ | |
"epoch": 1.7, | |
"grad_norm": 2.4003918170928955, | |
"learning_rate": 0.000216538789428815, | |
"loss": 0.7986, | |
"step": 1330 | |
}, | |
{ | |
"epoch": 1.71, | |
"grad_norm": 1.366548776626587, | |
"learning_rate": 0.0002144075021312873, | |
"loss": 0.8306, | |
"step": 1340 | |
}, | |
{ | |
"epoch": 1.73, | |
"grad_norm": 1.6609718799591064, | |
"learning_rate": 0.00021227621483375957, | |
"loss": 0.8152, | |
"step": 1350 | |
}, | |
{ | |
"epoch": 1.73, | |
"eval_accuracy": 0.6822, | |
"eval_f1_macro": 0.6157614528042974, | |
"eval_f1_micro": 0.6822, | |
"eval_loss": 0.9396414160728455, | |
"eval_runtime": 12.9993, | |
"eval_samples_per_second": 384.637, | |
"eval_steps_per_second": 12.078, | |
"step": 1350 | |
}, | |
{ | |
"epoch": 1.74, | |
"grad_norm": 1.8561837673187256, | |
"learning_rate": 0.00021014492753623187, | |
"loss": 0.8248, | |
"step": 1360 | |
}, | |
{ | |
"epoch": 1.75, | |
"grad_norm": 1.7248742580413818, | |
"learning_rate": 0.00020801364023870418, | |
"loss": 0.7829, | |
"step": 1370 | |
}, | |
{ | |
"epoch": 1.76, | |
"grad_norm": 1.480776071548462, | |
"learning_rate": 0.00020588235294117645, | |
"loss": 0.7657, | |
"step": 1380 | |
}, | |
{ | |
"epoch": 1.78, | |
"grad_norm": 1.869985580444336, | |
"learning_rate": 0.00020375106564364876, | |
"loss": 0.8593, | |
"step": 1390 | |
}, | |
{ | |
"epoch": 1.79, | |
"grad_norm": 2.7019195556640625, | |
"learning_rate": 0.00020161977834612106, | |
"loss": 0.7644, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 1.79, | |
"eval_accuracy": 0.6798, | |
"eval_f1_macro": 0.619022399994399, | |
"eval_f1_micro": 0.6798, | |
"eval_loss": 0.9445300698280334, | |
"eval_runtime": 13.0377, | |
"eval_samples_per_second": 383.502, | |
"eval_steps_per_second": 12.042, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 1.8, | |
"grad_norm": 2.2178022861480713, | |
"learning_rate": 0.00019948849104859333, | |
"loss": 0.8636, | |
"step": 1410 | |
}, | |
{ | |
"epoch": 1.82, | |
"grad_norm": 2.1172451972961426, | |
"learning_rate": 0.00019735720375106564, | |
"loss": 0.7812, | |
"step": 1420 | |
}, | |
{ | |
"epoch": 1.83, | |
"grad_norm": 2.43135142326355, | |
"learning_rate": 0.00019522591645353794, | |
"loss": 0.7948, | |
"step": 1430 | |
}, | |
{ | |
"epoch": 1.84, | |
"grad_norm": 3.7343592643737793, | |
"learning_rate": 0.0001930946291560102, | |
"loss": 0.8153, | |
"step": 1440 | |
}, | |
{ | |
"epoch": 1.85, | |
"grad_norm": 1.6375885009765625, | |
"learning_rate": 0.00019096334185848252, | |
"loss": 0.7252, | |
"step": 1450 | |
}, | |
{ | |
"epoch": 1.85, | |
"eval_accuracy": 0.688, | |
"eval_f1_macro": 0.6208786695728835, | |
"eval_f1_micro": 0.688, | |
"eval_loss": 0.9284820556640625, | |
"eval_runtime": 12.989, | |
"eval_samples_per_second": 384.94, | |
"eval_steps_per_second": 12.087, | |
"step": 1450 | |
}, | |
{ | |
"epoch": 1.87, | |
"grad_norm": 2.259469509124756, | |
"learning_rate": 0.00018883205456095482, | |
"loss": 0.7284, | |
"step": 1460 | |
}, | |
{ | |
"epoch": 1.88, | |
"grad_norm": 2.0817081928253174, | |
"learning_rate": 0.0001867007672634271, | |
"loss": 0.8395, | |
"step": 1470 | |
}, | |
{ | |
"epoch": 1.89, | |
"grad_norm": 1.963901400566101, | |
"learning_rate": 0.0001845694799658994, | |
"loss": 0.8532, | |
"step": 1480 | |
}, | |
{ | |
"epoch": 1.91, | |
"grad_norm": 2.1315038204193115, | |
"learning_rate": 0.0001824381926683717, | |
"loss": 0.8127, | |
"step": 1490 | |
}, | |
{ | |
"epoch": 1.92, | |
"grad_norm": 1.7359952926635742, | |
"learning_rate": 0.000180306905370844, | |
"loss": 1.0028, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 1.92, | |
"eval_accuracy": 0.6702, | |
"eval_f1_macro": 0.6079183899918916, | |
"eval_f1_micro": 0.6702, | |
"eval_loss": 0.9379138946533203, | |
"eval_runtime": 13.0102, | |
"eval_samples_per_second": 384.315, | |
"eval_steps_per_second": 12.067, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 1.93, | |
"grad_norm": 1.7253365516662598, | |
"learning_rate": 0.00017817561807331628, | |
"loss": 0.7684, | |
"step": 1510 | |
}, | |
{ | |
"epoch": 1.94, | |
"grad_norm": 1.6444343328475952, | |
"learning_rate": 0.00017604433077578858, | |
"loss": 0.7371, | |
"step": 1520 | |
}, | |
{ | |
"epoch": 1.96, | |
"grad_norm": 2.163722038269043, | |
"learning_rate": 0.00017391304347826088, | |
"loss": 0.8029, | |
"step": 1530 | |
}, | |
{ | |
"epoch": 1.97, | |
"grad_norm": 1.5761432647705078, | |
"learning_rate": 0.00017178175618073316, | |
"loss": 0.8265, | |
"step": 1540 | |
}, | |
{ | |
"epoch": 1.98, | |
"grad_norm": 2.286616563796997, | |
"learning_rate": 0.00016965046888320546, | |
"loss": 0.8056, | |
"step": 1550 | |
}, | |
{ | |
"epoch": 1.98, | |
"eval_accuracy": 0.6776, | |
"eval_f1_macro": 0.6236906518711645, | |
"eval_f1_micro": 0.6776, | |
"eval_loss": 0.9275591969490051, | |
"eval_runtime": 13.0046, | |
"eval_samples_per_second": 384.479, | |
"eval_steps_per_second": 12.073, | |
"step": 1550 | |
}, | |
{ | |
"epoch": 1.99, | |
"grad_norm": 2.3306214809417725, | |
"learning_rate": 0.00016751918158567776, | |
"loss": 0.847, | |
"step": 1560 | |
}, | |
{ | |
"epoch": 2.01, | |
"grad_norm": 1.5776536464691162, | |
"learning_rate": 0.00016538789428815004, | |
"loss": 0.5966, | |
"step": 1570 | |
}, | |
{ | |
"epoch": 2.02, | |
"grad_norm": 1.7790110111236572, | |
"learning_rate": 0.00016325660699062234, | |
"loss": 0.6188, | |
"step": 1580 | |
}, | |
{ | |
"epoch": 2.03, | |
"grad_norm": 1.3440601825714111, | |
"learning_rate": 0.00016112531969309464, | |
"loss": 0.5905, | |
"step": 1590 | |
}, | |
{ | |
"epoch": 2.05, | |
"grad_norm": 1.428155779838562, | |
"learning_rate": 0.00015899403239556692, | |
"loss": 0.5781, | |
"step": 1600 | |
}, | |
{ | |
"epoch": 2.05, | |
"eval_accuracy": 0.6864, | |
"eval_f1_macro": 0.6214549114468936, | |
"eval_f1_micro": 0.6864, | |
"eval_loss": 0.9509050846099854, | |
"eval_runtime": 13.0155, | |
"eval_samples_per_second": 384.159, | |
"eval_steps_per_second": 12.063, | |
"step": 1600 | |
}, | |
{ | |
"epoch": 2.06, | |
"grad_norm": 1.9990568161010742, | |
"learning_rate": 0.00015686274509803922, | |
"loss": 0.5745, | |
"step": 1610 | |
}, | |
{ | |
"epoch": 2.07, | |
"grad_norm": 2.303602695465088, | |
"learning_rate": 0.00015473145780051152, | |
"loss": 0.7054, | |
"step": 1620 | |
}, | |
{ | |
"epoch": 2.08, | |
"grad_norm": 3.2110447883605957, | |
"learning_rate": 0.0001526001705029838, | |
"loss": 0.6983, | |
"step": 1630 | |
}, | |
{ | |
"epoch": 2.1, | |
"grad_norm": 1.5513195991516113, | |
"learning_rate": 0.0001504688832054561, | |
"loss": 0.6798, | |
"step": 1640 | |
}, | |
{ | |
"epoch": 2.11, | |
"grad_norm": 2.165961742401123, | |
"learning_rate": 0.0001483375959079284, | |
"loss": 0.5592, | |
"step": 1650 | |
}, | |
{ | |
"epoch": 2.11, | |
"eval_accuracy": 0.6866, | |
"eval_f1_macro": 0.6354378889969068, | |
"eval_f1_micro": 0.6866, | |
"eval_loss": 0.9535236358642578, | |
"eval_runtime": 12.9954, | |
"eval_samples_per_second": 384.753, | |
"eval_steps_per_second": 12.081, | |
"step": 1650 | |
}, | |
{ | |
"epoch": 2.12, | |
"grad_norm": 1.664448857307434, | |
"learning_rate": 0.00014620630861040068, | |
"loss": 0.5266, | |
"step": 1660 | |
}, | |
{ | |
"epoch": 2.14, | |
"grad_norm": 1.8533668518066406, | |
"learning_rate": 0.00014407502131287298, | |
"loss": 0.6073, | |
"step": 1670 | |
}, | |
{ | |
"epoch": 2.15, | |
"grad_norm": 1.854029893875122, | |
"learning_rate": 0.00014194373401534528, | |
"loss": 0.5735, | |
"step": 1680 | |
}, | |
{ | |
"epoch": 2.16, | |
"grad_norm": 1.8497388362884521, | |
"learning_rate": 0.00013981244671781756, | |
"loss": 0.5193, | |
"step": 1690 | |
}, | |
{ | |
"epoch": 2.17, | |
"grad_norm": 2.2256019115448, | |
"learning_rate": 0.00013768115942028986, | |
"loss": 0.6818, | |
"step": 1700 | |
}, | |
{ | |
"epoch": 2.17, | |
"eval_accuracy": 0.682, | |
"eval_f1_macro": 0.6203018813077676, | |
"eval_f1_micro": 0.682, | |
"eval_loss": 0.9811761975288391, | |
"eval_runtime": 13.0213, | |
"eval_samples_per_second": 383.987, | |
"eval_steps_per_second": 12.057, | |
"step": 1700 | |
}, | |
{ | |
"epoch": 2.19, | |
"grad_norm": 2.653561592102051, | |
"learning_rate": 0.00013554987212276216, | |
"loss": 0.5838, | |
"step": 1710 | |
}, | |
{ | |
"epoch": 2.2, | |
"grad_norm": 2.145022392272949, | |
"learning_rate": 0.00013341858482523444, | |
"loss": 0.6166, | |
"step": 1720 | |
}, | |
{ | |
"epoch": 2.21, | |
"grad_norm": 2.2668209075927734, | |
"learning_rate": 0.00013128729752770674, | |
"loss": 0.5747, | |
"step": 1730 | |
}, | |
{ | |
"epoch": 2.23, | |
"grad_norm": 2.4513309001922607, | |
"learning_rate": 0.00012915601023017904, | |
"loss": 0.6496, | |
"step": 1740 | |
}, | |
{ | |
"epoch": 2.24, | |
"grad_norm": 1.9956375360488892, | |
"learning_rate": 0.00012702472293265132, | |
"loss": 0.6022, | |
"step": 1750 | |
}, | |
{ | |
"epoch": 2.24, | |
"eval_accuracy": 0.6822, | |
"eval_f1_macro": 0.6269853728622894, | |
"eval_f1_micro": 0.6822, | |
"eval_loss": 0.9842416048049927, | |
"eval_runtime": 13.0046, | |
"eval_samples_per_second": 384.48, | |
"eval_steps_per_second": 12.073, | |
"step": 1750 | |
}, | |
{ | |
"epoch": 2.25, | |
"grad_norm": 2.1136081218719482, | |
"learning_rate": 0.00012489343563512362, | |
"loss": 0.6225, | |
"step": 1760 | |
}, | |
{ | |
"epoch": 2.26, | |
"grad_norm": 3.2503561973571777, | |
"learning_rate": 0.00012276214833759592, | |
"loss": 0.7232, | |
"step": 1770 | |
}, | |
{ | |
"epoch": 2.28, | |
"grad_norm": 2.015230178833008, | |
"learning_rate": 0.00012063086104006821, | |
"loss": 0.4675, | |
"step": 1780 | |
}, | |
{ | |
"epoch": 2.29, | |
"grad_norm": 1.9434397220611572, | |
"learning_rate": 0.0001184995737425405, | |
"loss": 0.6161, | |
"step": 1790 | |
}, | |
{ | |
"epoch": 2.3, | |
"grad_norm": 1.8537811040878296, | |
"learning_rate": 0.00011636828644501279, | |
"loss": 0.5771, | |
"step": 1800 | |
}, | |
{ | |
"epoch": 2.3, | |
"eval_accuracy": 0.6832, | |
"eval_f1_macro": 0.629473764064584, | |
"eval_f1_micro": 0.6832, | |
"eval_loss": 1.0099546909332275, | |
"eval_runtime": 13.0398, | |
"eval_samples_per_second": 383.441, | |
"eval_steps_per_second": 12.04, | |
"step": 1800 | |
}, | |
{ | |
"epoch": 2.31, | |
"grad_norm": 2.997060537338257, | |
"learning_rate": 0.0001142369991474851, | |
"loss": 0.6235, | |
"step": 1810 | |
}, | |
{ | |
"epoch": 2.33, | |
"grad_norm": 2.2757112979888916, | |
"learning_rate": 0.00011210571184995738, | |
"loss": 0.5508, | |
"step": 1820 | |
}, | |
{ | |
"epoch": 2.34, | |
"grad_norm": 2.376284599304199, | |
"learning_rate": 0.00010997442455242967, | |
"loss": 0.5721, | |
"step": 1830 | |
}, | |
{ | |
"epoch": 2.35, | |
"grad_norm": 2.2959024906158447, | |
"learning_rate": 0.00010784313725490197, | |
"loss": 0.5075, | |
"step": 1840 | |
}, | |
{ | |
"epoch": 2.37, | |
"grad_norm": 3.0388712882995605, | |
"learning_rate": 0.00010571184995737426, | |
"loss": 0.596, | |
"step": 1850 | |
}, | |
{ | |
"epoch": 2.37, | |
"eval_accuracy": 0.6784, | |
"eval_f1_macro": 0.6279532390891456, | |
"eval_f1_micro": 0.6784, | |
"eval_loss": 1.0079002380371094, | |
"eval_runtime": 13.0035, | |
"eval_samples_per_second": 384.513, | |
"eval_steps_per_second": 12.074, | |
"step": 1850 | |
}, | |
{ | |
"epoch": 2.38, | |
"grad_norm": 3.541138172149658, | |
"learning_rate": 0.00010358056265984655, | |
"loss": 0.5778, | |
"step": 1860 | |
}, | |
{ | |
"epoch": 2.39, | |
"grad_norm": 2.2630205154418945, | |
"learning_rate": 0.00010144927536231885, | |
"loss": 0.621, | |
"step": 1870 | |
}, | |
{ | |
"epoch": 2.4, | |
"grad_norm": 2.3571438789367676, | |
"learning_rate": 9.931798806479114e-05, | |
"loss": 0.5038, | |
"step": 1880 | |
}, | |
{ | |
"epoch": 2.42, | |
"grad_norm": 2.7417750358581543, | |
"learning_rate": 9.718670076726342e-05, | |
"loss": 0.623, | |
"step": 1890 | |
}, | |
{ | |
"epoch": 2.43, | |
"grad_norm": 2.4251346588134766, | |
"learning_rate": 9.505541346973572e-05, | |
"loss": 0.5209, | |
"step": 1900 | |
}, | |
{ | |
"epoch": 2.43, | |
"eval_accuracy": 0.6828, | |
"eval_f1_macro": 0.6256502211255408, | |
"eval_f1_micro": 0.6828, | |
"eval_loss": 1.0117747783660889, | |
"eval_runtime": 13.0241, | |
"eval_samples_per_second": 383.903, | |
"eval_steps_per_second": 12.055, | |
"step": 1900 | |
}, | |
{ | |
"epoch": 2.44, | |
"grad_norm": 1.5803205966949463, | |
"learning_rate": 9.292412617220801e-05, | |
"loss": 0.5006, | |
"step": 1910 | |
}, | |
{ | |
"epoch": 2.46, | |
"grad_norm": 2.4539146423339844, | |
"learning_rate": 9.07928388746803e-05, | |
"loss": 0.5724, | |
"step": 1920 | |
}, | |
{ | |
"epoch": 2.47, | |
"grad_norm": 3.344937801361084, | |
"learning_rate": 8.86615515771526e-05, | |
"loss": 0.5583, | |
"step": 1930 | |
}, | |
{ | |
"epoch": 2.48, | |
"grad_norm": 4.246283054351807, | |
"learning_rate": 8.653026427962489e-05, | |
"loss": 0.5545, | |
"step": 1940 | |
}, | |
{ | |
"epoch": 2.49, | |
"grad_norm": 2.021843194961548, | |
"learning_rate": 8.439897698209718e-05, | |
"loss": 0.4842, | |
"step": 1950 | |
}, | |
{ | |
"epoch": 2.49, | |
"eval_accuracy": 0.68, | |
"eval_f1_macro": 0.6252536732819449, | |
"eval_f1_micro": 0.68, | |
"eval_loss": 1.0165306329727173, | |
"eval_runtime": 13.0212, | |
"eval_samples_per_second": 383.989, | |
"eval_steps_per_second": 12.057, | |
"step": 1950 | |
}, | |
{ | |
"epoch": 2.51, | |
"grad_norm": 2.490739345550537, | |
"learning_rate": 8.226768968456948e-05, | |
"loss": 0.5029, | |
"step": 1960 | |
}, | |
{ | |
"epoch": 2.52, | |
"grad_norm": 2.5319392681121826, | |
"learning_rate": 8.013640238704177e-05, | |
"loss": 0.6053, | |
"step": 1970 | |
}, | |
{ | |
"epoch": 2.53, | |
"grad_norm": 3.017944812774658, | |
"learning_rate": 7.800511508951406e-05, | |
"loss": 0.6431, | |
"step": 1980 | |
}, | |
{ | |
"epoch": 2.54, | |
"grad_norm": 2.701408863067627, | |
"learning_rate": 7.587382779198636e-05, | |
"loss": 0.5215, | |
"step": 1990 | |
}, | |
{ | |
"epoch": 2.56, | |
"grad_norm": 3.0047366619110107, | |
"learning_rate": 7.374254049445865e-05, | |
"loss": 0.6581, | |
"step": 2000 | |
}, | |
{ | |
"epoch": 2.56, | |
"eval_accuracy": 0.6774, | |
"eval_f1_macro": 0.6233546271904019, | |
"eval_f1_micro": 0.6774, | |
"eval_loss": 1.011858582496643, | |
"eval_runtime": 13.0072, | |
"eval_samples_per_second": 384.403, | |
"eval_steps_per_second": 12.07, | |
"step": 2000 | |
}, | |
{ | |
"epoch": 2.57, | |
"grad_norm": 3.27360200881958, | |
"learning_rate": 7.161125319693094e-05, | |
"loss": 0.6303, | |
"step": 2010 | |
}, | |
{ | |
"epoch": 2.58, | |
"grad_norm": 2.361065626144409, | |
"learning_rate": 6.947996589940324e-05, | |
"loss": 0.5241, | |
"step": 2020 | |
}, | |
{ | |
"epoch": 2.6, | |
"grad_norm": 2.344688653945923, | |
"learning_rate": 6.734867860187553e-05, | |
"loss": 0.5364, | |
"step": 2030 | |
}, | |
{ | |
"epoch": 2.61, | |
"grad_norm": 1.7860138416290283, | |
"learning_rate": 6.521739130434782e-05, | |
"loss": 0.5373, | |
"step": 2040 | |
}, | |
{ | |
"epoch": 2.62, | |
"grad_norm": 2.6773765087127686, | |
"learning_rate": 6.308610400682012e-05, | |
"loss": 0.6417, | |
"step": 2050 | |
}, | |
{ | |
"epoch": 2.62, | |
"eval_accuracy": 0.6834, | |
"eval_f1_macro": 0.6345451404555418, | |
"eval_f1_micro": 0.6834, | |
"eval_loss": 1.0035045146942139, | |
"eval_runtime": 13.0033, | |
"eval_samples_per_second": 384.517, | |
"eval_steps_per_second": 12.074, | |
"step": 2050 | |
}, | |
{ | |
"epoch": 2.63, | |
"grad_norm": 2.499213457107544, | |
"learning_rate": 6.095481670929241e-05, | |
"loss": 0.5684, | |
"step": 2060 | |
}, | |
{ | |
"epoch": 2.65, | |
"grad_norm": 2.7441232204437256, | |
"learning_rate": 5.882352941176471e-05, | |
"loss": 0.4905, | |
"step": 2070 | |
}, | |
{ | |
"epoch": 2.66, | |
"grad_norm": 1.9716840982437134, | |
"learning_rate": 5.6692242114237e-05, | |
"loss": 0.5095, | |
"step": 2080 | |
}, | |
{ | |
"epoch": 2.67, | |
"grad_norm": 3.7521777153015137, | |
"learning_rate": 5.456095481670929e-05, | |
"loss": 0.6517, | |
"step": 2090 | |
}, | |
{ | |
"epoch": 2.69, | |
"grad_norm": 2.506255626678467, | |
"learning_rate": 5.242966751918159e-05, | |
"loss": 0.5388, | |
"step": 2100 | |
}, | |
{ | |
"epoch": 2.69, | |
"eval_accuracy": 0.681, | |
"eval_f1_macro": 0.6320816934828284, | |
"eval_f1_micro": 0.681, | |
"eval_loss": 1.013260006904602, | |
"eval_runtime": 13.0167, | |
"eval_samples_per_second": 384.123, | |
"eval_steps_per_second": 12.061, | |
"step": 2100 | |
}, | |
{ | |
"epoch": 2.7, | |
"grad_norm": 3.5748813152313232, | |
"learning_rate": 5.0298380221653884e-05, | |
"loss": 0.5287, | |
"step": 2110 | |
}, | |
{ | |
"epoch": 2.71, | |
"grad_norm": 2.2621006965637207, | |
"learning_rate": 4.816709292412617e-05, | |
"loss": 0.5491, | |
"step": 2120 | |
}, | |
{ | |
"epoch": 2.72, | |
"grad_norm": 2.285644769668579, | |
"learning_rate": 4.603580562659847e-05, | |
"loss": 0.536, | |
"step": 2130 | |
}, | |
{ | |
"epoch": 2.74, | |
"grad_norm": 2.299203634262085, | |
"learning_rate": 4.3904518329070764e-05, | |
"loss": 0.5505, | |
"step": 2140 | |
}, | |
{ | |
"epoch": 2.75, | |
"grad_norm": 2.765207290649414, | |
"learning_rate": 4.177323103154305e-05, | |
"loss": 0.546, | |
"step": 2150 | |
}, | |
{ | |
"epoch": 2.75, | |
"eval_accuracy": 0.6808, | |
"eval_f1_macro": 0.6312971954178016, | |
"eval_f1_micro": 0.6808, | |
"eval_loss": 1.0133347511291504, | |
"eval_runtime": 13.0018, | |
"eval_samples_per_second": 384.561, | |
"eval_steps_per_second": 12.075, | |
"step": 2150 | |
}, | |
{ | |
"epoch": 2.76, | |
"grad_norm": 2.2666966915130615, | |
"learning_rate": 3.964194373401535e-05, | |
"loss": 0.5687, | |
"step": 2160 | |
}, | |
{ | |
"epoch": 2.77, | |
"grad_norm": 2.788783311843872, | |
"learning_rate": 3.7510656436487644e-05, | |
"loss": 0.5833, | |
"step": 2170 | |
}, | |
{ | |
"epoch": 2.79, | |
"grad_norm": 2.8643319606781006, | |
"learning_rate": 3.537936913895993e-05, | |
"loss": 0.5527, | |
"step": 2180 | |
}, | |
{ | |
"epoch": 2.8, | |
"grad_norm": 1.8892581462860107, | |
"learning_rate": 3.324808184143223e-05, | |
"loss": 0.5383, | |
"step": 2190 | |
}, | |
{ | |
"epoch": 2.81, | |
"grad_norm": 2.8217148780822754, | |
"learning_rate": 3.111679454390452e-05, | |
"loss": 0.5825, | |
"step": 2200 | |
}, | |
{ | |
"epoch": 2.81, | |
"eval_accuracy": 0.683, | |
"eval_f1_macro": 0.6315846004482086, | |
"eval_f1_micro": 0.683, | |
"eval_loss": 1.0057953596115112, | |
"eval_runtime": 13.0117, | |
"eval_samples_per_second": 384.268, | |
"eval_steps_per_second": 12.066, | |
"step": 2200 | |
}, | |
{ | |
"epoch": 2.83, | |
"grad_norm": 2.832188129425049, | |
"learning_rate": 2.8985507246376814e-05, | |
"loss": 0.5727, | |
"step": 2210 | |
}, | |
{ | |
"epoch": 2.84, | |
"grad_norm": 2.1628849506378174, | |
"learning_rate": 2.6854219948849106e-05, | |
"loss": 0.5121, | |
"step": 2220 | |
}, | |
{ | |
"epoch": 2.85, | |
"grad_norm": 2.1341159343719482, | |
"learning_rate": 2.4722932651321398e-05, | |
"loss": 0.5458, | |
"step": 2230 | |
}, | |
{ | |
"epoch": 2.86, | |
"grad_norm": 2.9404149055480957, | |
"learning_rate": 2.2591645353793694e-05, | |
"loss": 0.6009, | |
"step": 2240 | |
}, | |
{ | |
"epoch": 2.88, | |
"grad_norm": 4.241429328918457, | |
"learning_rate": 2.0460358056265986e-05, | |
"loss": 0.6251, | |
"step": 2250 | |
}, | |
{ | |
"epoch": 2.88, | |
"eval_accuracy": 0.6848, | |
"eval_f1_macro": 0.6356618196094358, | |
"eval_f1_micro": 0.6848, | |
"eval_loss": 1.0061750411987305, | |
"eval_runtime": 13.0056, | |
"eval_samples_per_second": 384.449, | |
"eval_steps_per_second": 12.072, | |
"step": 2250 | |
}, | |
{ | |
"epoch": 2.89, | |
"grad_norm": 3.0413520336151123, | |
"learning_rate": 1.8329070758738275e-05, | |
"loss": 0.5449, | |
"step": 2260 | |
}, | |
{ | |
"epoch": 2.9, | |
"grad_norm": 2.552320718765259, | |
"learning_rate": 1.619778346121057e-05, | |
"loss": 0.6832, | |
"step": 2270 | |
}, | |
{ | |
"epoch": 2.92, | |
"grad_norm": 2.7984275817871094, | |
"learning_rate": 1.4066496163682863e-05, | |
"loss": 0.499, | |
"step": 2280 | |
}, | |
{ | |
"epoch": 2.93, | |
"grad_norm": 2.6913793087005615, | |
"learning_rate": 1.1935208866155157e-05, | |
"loss": 0.5622, | |
"step": 2290 | |
}, | |
{ | |
"epoch": 2.94, | |
"grad_norm": 4.210132598876953, | |
"learning_rate": 9.803921568627451e-06, | |
"loss": 0.619, | |
"step": 2300 | |
}, | |
{ | |
"epoch": 2.94, | |
"eval_accuracy": 0.6826, | |
"eval_f1_macro": 0.6307238611192139, | |
"eval_f1_micro": 0.6826, | |
"eval_loss": 1.0014315843582153, | |
"eval_runtime": 13.0143, | |
"eval_samples_per_second": 384.193, | |
"eval_steps_per_second": 12.064, | |
"step": 2300 | |
}, | |
{ | |
"epoch": 2.95, | |
"grad_norm": 2.2768547534942627, | |
"learning_rate": 7.672634271099745e-06, | |
"loss": 0.5254, | |
"step": 2310 | |
}, | |
{ | |
"epoch": 2.97, | |
"grad_norm": 2.5382449626922607, | |
"learning_rate": 5.541346973572038e-06, | |
"loss": 0.4836, | |
"step": 2320 | |
}, | |
{ | |
"epoch": 2.98, | |
"grad_norm": 2.7772395610809326, | |
"learning_rate": 3.4100596760443308e-06, | |
"loss": 0.5497, | |
"step": 2330 | |
}, | |
{ | |
"epoch": 2.99, | |
"grad_norm": 3.6928863525390625, | |
"learning_rate": 1.2787723785166241e-06, | |
"loss": 0.5256, | |
"step": 2340 | |
}, | |
{ | |
"epoch": 3.0, | |
"step": 2346, | |
"total_flos": 1.1463371943575552e+16, | |
"train_loss": 0.8408535616273872, | |
"train_runtime": 1368.6484, | |
"train_samples_per_second": 54.799, | |
"train_steps_per_second": 1.714 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 2346, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 3, | |
"save_steps": 50, | |
"total_flos": 1.1463371943575552e+16, | |
"train_batch_size": 16, | |
"trial_name": null, | |
"trial_params": null | |
} | |