|
{ |
|
"best_metric": 0.003461688058450818, |
|
"best_model_checkpoint": "autotrain-9t83i-0umcp/checkpoint-1372", |
|
"epoch": 7.0, |
|
"eval_steps": 500, |
|
"global_step": 1372, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04591836734693878, |
|
"grad_norm": 2.446383237838745, |
|
"learning_rate": 1.1479591836734695e-06, |
|
"loss": 0.1983, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.09183673469387756, |
|
"grad_norm": 3.9726994037628174, |
|
"learning_rate": 2.295918367346939e-06, |
|
"loss": 0.2526, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1377551020408163, |
|
"grad_norm": 4.180245399475098, |
|
"learning_rate": 3.443877551020408e-06, |
|
"loss": 0.1536, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1836734693877551, |
|
"grad_norm": 3.1698622703552246, |
|
"learning_rate": 4.591836734693878e-06, |
|
"loss": 0.3117, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.22959183673469388, |
|
"grad_norm": 4.330101490020752, |
|
"learning_rate": 5.7397959183673475e-06, |
|
"loss": 0.2641, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.2755102040816326, |
|
"grad_norm": 2.323159694671631, |
|
"learning_rate": 6.887755102040816e-06, |
|
"loss": 0.4657, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.32142857142857145, |
|
"grad_norm": 5.427762508392334, |
|
"learning_rate": 8.035714285714286e-06, |
|
"loss": 0.3486, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.3673469387755102, |
|
"grad_norm": 4.673620223999023, |
|
"learning_rate": 9.183673469387756e-06, |
|
"loss": 0.3346, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.413265306122449, |
|
"grad_norm": 4.061607837677002, |
|
"learning_rate": 1.0331632653061225e-05, |
|
"loss": 0.1703, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.45918367346938777, |
|
"grad_norm": 2.6127052307128906, |
|
"learning_rate": 1.1479591836734695e-05, |
|
"loss": 0.4217, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5051020408163265, |
|
"grad_norm": 4.84128475189209, |
|
"learning_rate": 1.2627551020408163e-05, |
|
"loss": 0.1865, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.5510204081632653, |
|
"grad_norm": 3.507438898086548, |
|
"learning_rate": 1.3775510204081633e-05, |
|
"loss": 0.3114, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.5969387755102041, |
|
"grad_norm": 1.1375735998153687, |
|
"learning_rate": 1.4923469387755104e-05, |
|
"loss": 0.3519, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.6428571428571429, |
|
"grad_norm": 3.5756773948669434, |
|
"learning_rate": 1.6071428571428572e-05, |
|
"loss": 0.2934, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.6887755102040817, |
|
"grad_norm": 3.714728593826294, |
|
"learning_rate": 1.7219387755102043e-05, |
|
"loss": 0.3112, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.7346938775510204, |
|
"grad_norm": 4.473227500915527, |
|
"learning_rate": 1.836734693877551e-05, |
|
"loss": 0.1897, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.7806122448979592, |
|
"grad_norm": 2.203939914703369, |
|
"learning_rate": 1.9515306122448983e-05, |
|
"loss": 0.2168, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.826530612244898, |
|
"grad_norm": 1.36044180393219, |
|
"learning_rate": 2.066326530612245e-05, |
|
"loss": 0.117, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.8724489795918368, |
|
"grad_norm": 1.2238707542419434, |
|
"learning_rate": 2.181122448979592e-05, |
|
"loss": 0.2936, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.9183673469387755, |
|
"grad_norm": 2.6794865131378174, |
|
"learning_rate": 2.295918367346939e-05, |
|
"loss": 0.1305, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.9642857142857143, |
|
"grad_norm": 3.089512825012207, |
|
"learning_rate": 2.4107142857142858e-05, |
|
"loss": 0.397, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_gen_len": 128.0, |
|
"eval_loss": 0.18853867053985596, |
|
"eval_rouge1": 89.2949, |
|
"eval_rouge2": 83.057, |
|
"eval_rougeL": 88.1848, |
|
"eval_rougeLsum": 88.7391, |
|
"eval_runtime": 38.0997, |
|
"eval_samples_per_second": 2.572, |
|
"eval_steps_per_second": 0.656, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.010204081632653, |
|
"grad_norm": 2.5835583209991455, |
|
"learning_rate": 2.5255102040816326e-05, |
|
"loss": 0.3554, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.0561224489795917, |
|
"grad_norm": 1.9029709100723267, |
|
"learning_rate": 2.6403061224489794e-05, |
|
"loss": 0.1324, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 1.1020408163265305, |
|
"grad_norm": 2.5668606758117676, |
|
"learning_rate": 2.7551020408163265e-05, |
|
"loss": 0.1243, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 1.1479591836734695, |
|
"grad_norm": 2.5577433109283447, |
|
"learning_rate": 2.869897959183674e-05, |
|
"loss": 0.0995, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.193877551020408, |
|
"grad_norm": 4.285234451293945, |
|
"learning_rate": 2.9846938775510208e-05, |
|
"loss": 0.2354, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 1.239795918367347, |
|
"grad_norm": 3.811396598815918, |
|
"learning_rate": 3.0994897959183676e-05, |
|
"loss": 0.181, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 1.2857142857142856, |
|
"grad_norm": 0.5611918568611145, |
|
"learning_rate": 3.2142857142857144e-05, |
|
"loss": 0.0721, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 1.3316326530612246, |
|
"grad_norm": 4.734651565551758, |
|
"learning_rate": 3.329081632653062e-05, |
|
"loss": 0.1276, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 1.3775510204081631, |
|
"grad_norm": 2.2007200717926025, |
|
"learning_rate": 3.443877551020409e-05, |
|
"loss": 0.0872, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.4234693877551021, |
|
"grad_norm": 1.3555959463119507, |
|
"learning_rate": 3.5586734693877555e-05, |
|
"loss": 0.2188, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 1.469387755102041, |
|
"grad_norm": 2.6970372200012207, |
|
"learning_rate": 3.673469387755102e-05, |
|
"loss": 0.2266, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 1.5153061224489797, |
|
"grad_norm": 10.701809883117676, |
|
"learning_rate": 3.788265306122449e-05, |
|
"loss": 0.157, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 1.5612244897959182, |
|
"grad_norm": 1.6892890930175781, |
|
"learning_rate": 3.9030612244897965e-05, |
|
"loss": 0.0885, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 1.6071428571428572, |
|
"grad_norm": 1.5699105262756348, |
|
"learning_rate": 4.017857142857143e-05, |
|
"loss": 0.1323, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 1.6530612244897958, |
|
"grad_norm": 4.3087639808654785, |
|
"learning_rate": 4.13265306122449e-05, |
|
"loss": 0.1482, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 1.6989795918367347, |
|
"grad_norm": 0.6790181994438171, |
|
"learning_rate": 4.247448979591837e-05, |
|
"loss": 0.1814, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 1.7448979591836735, |
|
"grad_norm": 2.1411118507385254, |
|
"learning_rate": 4.362244897959184e-05, |
|
"loss": 0.0834, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 1.7908163265306123, |
|
"grad_norm": 1.8243377208709717, |
|
"learning_rate": 4.477040816326531e-05, |
|
"loss": 0.1124, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 1.836734693877551, |
|
"grad_norm": 1.9441792964935303, |
|
"learning_rate": 4.591836734693878e-05, |
|
"loss": 0.095, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.8826530612244898, |
|
"grad_norm": 3.742511034011841, |
|
"learning_rate": 4.706632653061225e-05, |
|
"loss": 0.0955, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 1.9285714285714286, |
|
"grad_norm": 1.4945087432861328, |
|
"learning_rate": 4.8214285714285716e-05, |
|
"loss": 0.0543, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 1.9744897959183674, |
|
"grad_norm": 1.0501391887664795, |
|
"learning_rate": 4.9362244897959184e-05, |
|
"loss": 0.0861, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_gen_len": 128.0, |
|
"eval_loss": 0.05252710357308388, |
|
"eval_rouge1": 96.0027, |
|
"eval_rouge2": 93.3236, |
|
"eval_rougeL": 95.501, |
|
"eval_rougeLsum": 95.7994, |
|
"eval_runtime": 37.9097, |
|
"eval_samples_per_second": 2.585, |
|
"eval_steps_per_second": 0.659, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 2.020408163265306, |
|
"grad_norm": 2.9681637287139893, |
|
"learning_rate": 4.9943310657596374e-05, |
|
"loss": 0.0559, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 2.066326530612245, |
|
"grad_norm": 0.807901918888092, |
|
"learning_rate": 4.981575963718821e-05, |
|
"loss": 0.0334, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 2.1122448979591835, |
|
"grad_norm": 5.0659918785095215, |
|
"learning_rate": 4.968820861678005e-05, |
|
"loss": 0.1104, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 2.1581632653061225, |
|
"grad_norm": 2.9605283737182617, |
|
"learning_rate": 4.956065759637189e-05, |
|
"loss": 0.0817, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 2.204081632653061, |
|
"grad_norm": 2.554128408432007, |
|
"learning_rate": 4.9433106575963725e-05, |
|
"loss": 0.0524, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"grad_norm": 1.5891895294189453, |
|
"learning_rate": 4.930555555555556e-05, |
|
"loss": 0.0988, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 2.295918367346939, |
|
"grad_norm": 3.2821803092956543, |
|
"learning_rate": 4.917800453514739e-05, |
|
"loss": 0.08, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.3418367346938775, |
|
"grad_norm": 1.6306498050689697, |
|
"learning_rate": 4.905045351473923e-05, |
|
"loss": 0.0339, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 2.387755102040816, |
|
"grad_norm": 3.2567660808563232, |
|
"learning_rate": 4.892290249433107e-05, |
|
"loss": 0.0755, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 2.433673469387755, |
|
"grad_norm": 2.143312931060791, |
|
"learning_rate": 4.8795351473922906e-05, |
|
"loss": 0.0682, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 2.479591836734694, |
|
"grad_norm": 1.9906742572784424, |
|
"learning_rate": 4.866780045351474e-05, |
|
"loss": 0.0747, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 2.5255102040816326, |
|
"grad_norm": 6.981854438781738, |
|
"learning_rate": 4.8540249433106574e-05, |
|
"loss": 0.0533, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 2.571428571428571, |
|
"grad_norm": 1.767125129699707, |
|
"learning_rate": 4.841269841269841e-05, |
|
"loss": 0.0537, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 2.61734693877551, |
|
"grad_norm": 1.545177698135376, |
|
"learning_rate": 4.828514739229025e-05, |
|
"loss": 0.0535, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 2.663265306122449, |
|
"grad_norm": 1.537906527519226, |
|
"learning_rate": 4.8157596371882094e-05, |
|
"loss": 0.0721, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 2.7091836734693877, |
|
"grad_norm": 1.4103052616119385, |
|
"learning_rate": 4.8030045351473925e-05, |
|
"loss": 0.0515, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 2.7551020408163263, |
|
"grad_norm": 1.9743316173553467, |
|
"learning_rate": 4.790249433106576e-05, |
|
"loss": 0.0606, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.8010204081632653, |
|
"grad_norm": 1.1865582466125488, |
|
"learning_rate": 4.77749433106576e-05, |
|
"loss": 0.0336, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 2.8469387755102042, |
|
"grad_norm": 1.7514479160308838, |
|
"learning_rate": 4.764739229024944e-05, |
|
"loss": 0.0357, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 2.892857142857143, |
|
"grad_norm": 3.3062944412231445, |
|
"learning_rate": 4.751984126984127e-05, |
|
"loss": 0.0613, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 2.938775510204082, |
|
"grad_norm": 0.8094915151596069, |
|
"learning_rate": 4.7392290249433106e-05, |
|
"loss": 0.0449, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 2.9846938775510203, |
|
"grad_norm": 2.2876973152160645, |
|
"learning_rate": 4.7264739229024944e-05, |
|
"loss": 0.06, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_gen_len": 127.6939, |
|
"eval_loss": 0.025030160322785378, |
|
"eval_rouge1": 98.6655, |
|
"eval_rouge2": 97.5818, |
|
"eval_rougeL": 98.5529, |
|
"eval_rougeLsum": 98.5395, |
|
"eval_runtime": 37.9756, |
|
"eval_samples_per_second": 2.581, |
|
"eval_steps_per_second": 0.658, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 3.0306122448979593, |
|
"grad_norm": 1.7782725095748901, |
|
"learning_rate": 4.713718820861678e-05, |
|
"loss": 0.0219, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 3.076530612244898, |
|
"grad_norm": 0.317821741104126, |
|
"learning_rate": 4.700963718820862e-05, |
|
"loss": 0.037, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 3.122448979591837, |
|
"grad_norm": 0.8859459757804871, |
|
"learning_rate": 4.688208616780046e-05, |
|
"loss": 0.0531, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 3.1683673469387754, |
|
"grad_norm": 0.3588017225265503, |
|
"learning_rate": 4.67687074829932e-05, |
|
"loss": 0.0307, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 3.2142857142857144, |
|
"grad_norm": 2.5311052799224854, |
|
"learning_rate": 4.664115646258503e-05, |
|
"loss": 0.0218, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 3.260204081632653, |
|
"grad_norm": 0.19862987101078033, |
|
"learning_rate": 4.651360544217687e-05, |
|
"loss": 0.0367, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 3.306122448979592, |
|
"grad_norm": 0.6744495034217834, |
|
"learning_rate": 4.638605442176871e-05, |
|
"loss": 0.0369, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 3.3520408163265305, |
|
"grad_norm": 2.8351736068725586, |
|
"learning_rate": 4.625850340136055e-05, |
|
"loss": 0.0465, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 3.3979591836734695, |
|
"grad_norm": 3.43568754196167, |
|
"learning_rate": 4.613095238095239e-05, |
|
"loss": 0.0335, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 3.443877551020408, |
|
"grad_norm": 2.1830670833587646, |
|
"learning_rate": 4.600340136054422e-05, |
|
"loss": 0.0284, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 3.489795918367347, |
|
"grad_norm": 0.9732754826545715, |
|
"learning_rate": 4.587585034013606e-05, |
|
"loss": 0.0269, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 3.5357142857142856, |
|
"grad_norm": 2.1796019077301025, |
|
"learning_rate": 4.5748299319727895e-05, |
|
"loss": 0.0362, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 3.5816326530612246, |
|
"grad_norm": 1.1703124046325684, |
|
"learning_rate": 4.562074829931973e-05, |
|
"loss": 0.0229, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 3.627551020408163, |
|
"grad_norm": 2.6701223850250244, |
|
"learning_rate": 4.549319727891156e-05, |
|
"loss": 0.0471, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 3.673469387755102, |
|
"grad_norm": 1.3318531513214111, |
|
"learning_rate": 4.53656462585034e-05, |
|
"loss": 0.0327, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 3.7193877551020407, |
|
"grad_norm": 1.4701616764068604, |
|
"learning_rate": 4.523809523809524e-05, |
|
"loss": 0.0184, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 3.7653061224489797, |
|
"grad_norm": 0.9515037536621094, |
|
"learning_rate": 4.5110544217687076e-05, |
|
"loss": 0.0308, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 3.811224489795918, |
|
"grad_norm": 3.164966583251953, |
|
"learning_rate": 4.4982993197278914e-05, |
|
"loss": 0.0262, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 3.857142857142857, |
|
"grad_norm": 0.8453909158706665, |
|
"learning_rate": 4.485544217687075e-05, |
|
"loss": 0.0327, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 3.9030612244897958, |
|
"grad_norm": 2.3475284576416016, |
|
"learning_rate": 4.472789115646259e-05, |
|
"loss": 0.0222, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 3.9489795918367347, |
|
"grad_norm": 0.677014946937561, |
|
"learning_rate": 4.4600340136054427e-05, |
|
"loss": 0.0244, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 3.9948979591836737, |
|
"grad_norm": 3.246267318725586, |
|
"learning_rate": 4.4472789115646264e-05, |
|
"loss": 0.0297, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_gen_len": 127.7143, |
|
"eval_loss": 0.010969710536301136, |
|
"eval_rouge1": 99.1634, |
|
"eval_rouge2": 98.7404, |
|
"eval_rougeL": 99.1194, |
|
"eval_rougeLsum": 99.1779, |
|
"eval_runtime": 37.9041, |
|
"eval_samples_per_second": 2.585, |
|
"eval_steps_per_second": 0.66, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 4.040816326530612, |
|
"grad_norm": 0.39469975233078003, |
|
"learning_rate": 4.4345238095238095e-05, |
|
"loss": 0.0244, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 4.086734693877551, |
|
"grad_norm": 2.384838581085205, |
|
"learning_rate": 4.421768707482993e-05, |
|
"loss": 0.0213, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 4.13265306122449, |
|
"grad_norm": 3.5253398418426514, |
|
"learning_rate": 4.409013605442177e-05, |
|
"loss": 0.0089, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 4.178571428571429, |
|
"grad_norm": 2.1386358737945557, |
|
"learning_rate": 4.396258503401361e-05, |
|
"loss": 0.036, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 4.224489795918367, |
|
"grad_norm": 0.44415947794914246, |
|
"learning_rate": 4.383503401360544e-05, |
|
"loss": 0.0147, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 4.270408163265306, |
|
"grad_norm": 2.4095001220703125, |
|
"learning_rate": 4.3707482993197277e-05, |
|
"loss": 0.0321, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 4.316326530612245, |
|
"grad_norm": 2.395909070968628, |
|
"learning_rate": 4.357993197278912e-05, |
|
"loss": 0.0296, |
|
"step": 846 |
|
}, |
|
{ |
|
"epoch": 4.362244897959184, |
|
"grad_norm": 4.824786186218262, |
|
"learning_rate": 4.345238095238096e-05, |
|
"loss": 0.0233, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 4.408163265306122, |
|
"grad_norm": 2.400609254837036, |
|
"learning_rate": 4.3324829931972796e-05, |
|
"loss": 0.0389, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 4.454081632653061, |
|
"grad_norm": 1.0374516248703003, |
|
"learning_rate": 4.319727891156463e-05, |
|
"loss": 0.0307, |
|
"step": 873 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"grad_norm": 2.5744516849517822, |
|
"learning_rate": 4.3069727891156465e-05, |
|
"loss": 0.0234, |
|
"step": 882 |
|
}, |
|
{ |
|
"epoch": 4.545918367346939, |
|
"grad_norm": 0.4013276696205139, |
|
"learning_rate": 4.29421768707483e-05, |
|
"loss": 0.0171, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 4.591836734693878, |
|
"grad_norm": 1.4164782762527466, |
|
"learning_rate": 4.281462585034014e-05, |
|
"loss": 0.0217, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.637755102040816, |
|
"grad_norm": 0.2612836956977844, |
|
"learning_rate": 4.268707482993197e-05, |
|
"loss": 0.007, |
|
"step": 909 |
|
}, |
|
{ |
|
"epoch": 4.683673469387755, |
|
"grad_norm": 0.537272036075592, |
|
"learning_rate": 4.255952380952381e-05, |
|
"loss": 0.034, |
|
"step": 918 |
|
}, |
|
{ |
|
"epoch": 4.729591836734694, |
|
"grad_norm": 0.7186601758003235, |
|
"learning_rate": 4.2431972789115646e-05, |
|
"loss": 0.0173, |
|
"step": 927 |
|
}, |
|
{ |
|
"epoch": 4.775510204081632, |
|
"grad_norm": 0.5405403971672058, |
|
"learning_rate": 4.2304421768707484e-05, |
|
"loss": 0.0175, |
|
"step": 936 |
|
}, |
|
{ |
|
"epoch": 4.821428571428571, |
|
"grad_norm": 0.49115684628486633, |
|
"learning_rate": 4.217687074829932e-05, |
|
"loss": 0.0115, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 4.86734693877551, |
|
"grad_norm": 0.08387177437543869, |
|
"learning_rate": 4.204931972789116e-05, |
|
"loss": 0.0159, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 4.913265306122449, |
|
"grad_norm": 0.31179898977279663, |
|
"learning_rate": 4.1921768707483e-05, |
|
"loss": 0.0163, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 4.959183673469388, |
|
"grad_norm": 1.2666131258010864, |
|
"learning_rate": 4.1794217687074834e-05, |
|
"loss": 0.0221, |
|
"step": 972 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_gen_len": 127.7143, |
|
"eval_loss": 0.00930405966937542, |
|
"eval_rouge1": 99.2277, |
|
"eval_rouge2": 98.8302, |
|
"eval_rougeL": 99.2299, |
|
"eval_rougeLsum": 99.2242, |
|
"eval_runtime": 37.8008, |
|
"eval_samples_per_second": 2.593, |
|
"eval_steps_per_second": 0.661, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 5.005102040816326, |
|
"grad_norm": 1.5087450742721558, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.0229, |
|
"step": 981 |
|
}, |
|
{ |
|
"epoch": 5.051020408163265, |
|
"grad_norm": 0.1325344741344452, |
|
"learning_rate": 4.15391156462585e-05, |
|
"loss": 0.011, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 5.096938775510204, |
|
"grad_norm": 0.32866305112838745, |
|
"learning_rate": 4.141156462585034e-05, |
|
"loss": 0.0112, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 5.142857142857143, |
|
"grad_norm": 3.6175878047943115, |
|
"learning_rate": 4.128401360544218e-05, |
|
"loss": 0.0151, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 5.188775510204081, |
|
"grad_norm": 1.1477692127227783, |
|
"learning_rate": 4.1156462585034016e-05, |
|
"loss": 0.0093, |
|
"step": 1017 |
|
}, |
|
{ |
|
"epoch": 5.23469387755102, |
|
"grad_norm": 0.1428331732749939, |
|
"learning_rate": 4.1028911564625853e-05, |
|
"loss": 0.0115, |
|
"step": 1026 |
|
}, |
|
{ |
|
"epoch": 5.280612244897959, |
|
"grad_norm": 0.1712471842765808, |
|
"learning_rate": 4.0901360544217684e-05, |
|
"loss": 0.0219, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 5.326530612244898, |
|
"grad_norm": 0.21103200316429138, |
|
"learning_rate": 4.077380952380952e-05, |
|
"loss": 0.0121, |
|
"step": 1044 |
|
}, |
|
{ |
|
"epoch": 5.372448979591836, |
|
"grad_norm": 0.15935823321342468, |
|
"learning_rate": 4.0646258503401366e-05, |
|
"loss": 0.0074, |
|
"step": 1053 |
|
}, |
|
{ |
|
"epoch": 5.418367346938775, |
|
"grad_norm": 0.7716410756111145, |
|
"learning_rate": 4.0518707482993204e-05, |
|
"loss": 0.0314, |
|
"step": 1062 |
|
}, |
|
{ |
|
"epoch": 5.464285714285714, |
|
"grad_norm": 0.5172207951545715, |
|
"learning_rate": 4.0391156462585035e-05, |
|
"loss": 0.0101, |
|
"step": 1071 |
|
}, |
|
{ |
|
"epoch": 5.510204081632653, |
|
"grad_norm": 1.6434773206710815, |
|
"learning_rate": 4.026360544217687e-05, |
|
"loss": 0.0176, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 5.5561224489795915, |
|
"grad_norm": 0.2688583731651306, |
|
"learning_rate": 4.013605442176871e-05, |
|
"loss": 0.0136, |
|
"step": 1089 |
|
}, |
|
{ |
|
"epoch": 5.6020408163265305, |
|
"grad_norm": 0.7320523858070374, |
|
"learning_rate": 4.000850340136055e-05, |
|
"loss": 0.0132, |
|
"step": 1098 |
|
}, |
|
{ |
|
"epoch": 5.6479591836734695, |
|
"grad_norm": 0.7341743111610413, |
|
"learning_rate": 3.9880952380952386e-05, |
|
"loss": 0.0064, |
|
"step": 1107 |
|
}, |
|
{ |
|
"epoch": 5.6938775510204085, |
|
"grad_norm": 1.0884281396865845, |
|
"learning_rate": 3.9753401360544216e-05, |
|
"loss": 0.0192, |
|
"step": 1116 |
|
}, |
|
{ |
|
"epoch": 5.739795918367347, |
|
"grad_norm": 0.36947500705718994, |
|
"learning_rate": 3.9625850340136054e-05, |
|
"loss": 0.0128, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 5.785714285714286, |
|
"grad_norm": 0.5507375597953796, |
|
"learning_rate": 3.949829931972789e-05, |
|
"loss": 0.005, |
|
"step": 1134 |
|
}, |
|
{ |
|
"epoch": 5.831632653061225, |
|
"grad_norm": 2.703362226486206, |
|
"learning_rate": 3.937074829931973e-05, |
|
"loss": 0.0336, |
|
"step": 1143 |
|
}, |
|
{ |
|
"epoch": 5.877551020408164, |
|
"grad_norm": 0.28341037034988403, |
|
"learning_rate": 3.924319727891157e-05, |
|
"loss": 0.0085, |
|
"step": 1152 |
|
}, |
|
{ |
|
"epoch": 5.923469387755102, |
|
"grad_norm": 0.5502461194992065, |
|
"learning_rate": 3.9115646258503405e-05, |
|
"loss": 0.0197, |
|
"step": 1161 |
|
}, |
|
{ |
|
"epoch": 5.969387755102041, |
|
"grad_norm": 0.14302176237106323, |
|
"learning_rate": 3.898809523809524e-05, |
|
"loss": 0.0085, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_gen_len": 127.0408, |
|
"eval_loss": 0.005346208810806274, |
|
"eval_rouge1": 99.1155, |
|
"eval_rouge2": 98.9388, |
|
"eval_rougeL": 99.0647, |
|
"eval_rougeLsum": 99.1173, |
|
"eval_runtime": 37.8929, |
|
"eval_samples_per_second": 2.586, |
|
"eval_steps_per_second": 0.66, |
|
"step": 1176 |
|
}, |
|
{ |
|
"epoch": 6.01530612244898, |
|
"grad_norm": 3.993354558944702, |
|
"learning_rate": 3.886054421768708e-05, |
|
"loss": 0.0131, |
|
"step": 1179 |
|
}, |
|
{ |
|
"epoch": 6.061224489795919, |
|
"grad_norm": 1.2994415760040283, |
|
"learning_rate": 3.873299319727892e-05, |
|
"loss": 0.0164, |
|
"step": 1188 |
|
}, |
|
{ |
|
"epoch": 6.107142857142857, |
|
"grad_norm": 1.7750136852264404, |
|
"learning_rate": 3.860544217687075e-05, |
|
"loss": 0.0049, |
|
"step": 1197 |
|
}, |
|
{ |
|
"epoch": 6.153061224489796, |
|
"grad_norm": 1.9320180416107178, |
|
"learning_rate": 3.8477891156462586e-05, |
|
"loss": 0.0039, |
|
"step": 1206 |
|
}, |
|
{ |
|
"epoch": 6.198979591836735, |
|
"grad_norm": 0.19184532761573792, |
|
"learning_rate": 3.8350340136054424e-05, |
|
"loss": 0.0045, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 6.244897959183674, |
|
"grad_norm": 0.7971787452697754, |
|
"learning_rate": 3.822278911564626e-05, |
|
"loss": 0.0063, |
|
"step": 1224 |
|
}, |
|
{ |
|
"epoch": 6.290816326530612, |
|
"grad_norm": 0.3261989951133728, |
|
"learning_rate": 3.809523809523809e-05, |
|
"loss": 0.0166, |
|
"step": 1233 |
|
}, |
|
{ |
|
"epoch": 6.336734693877551, |
|
"grad_norm": 3.3873021602630615, |
|
"learning_rate": 3.796768707482993e-05, |
|
"loss": 0.0074, |
|
"step": 1242 |
|
}, |
|
{ |
|
"epoch": 6.38265306122449, |
|
"grad_norm": 2.5156352519989014, |
|
"learning_rate": 3.784013605442177e-05, |
|
"loss": 0.0132, |
|
"step": 1251 |
|
}, |
|
{ |
|
"epoch": 6.428571428571429, |
|
"grad_norm": 4.740504264831543, |
|
"learning_rate": 3.771258503401361e-05, |
|
"loss": 0.0197, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 6.474489795918368, |
|
"grad_norm": 0.14355747401714325, |
|
"learning_rate": 3.758503401360544e-05, |
|
"loss": 0.005, |
|
"step": 1269 |
|
}, |
|
{ |
|
"epoch": 6.520408163265306, |
|
"grad_norm": 1.1807011365890503, |
|
"learning_rate": 3.745748299319728e-05, |
|
"loss": 0.0073, |
|
"step": 1278 |
|
}, |
|
{ |
|
"epoch": 6.566326530612245, |
|
"grad_norm": 0.22915400564670563, |
|
"learning_rate": 3.732993197278912e-05, |
|
"loss": 0.006, |
|
"step": 1287 |
|
}, |
|
{ |
|
"epoch": 6.612244897959184, |
|
"grad_norm": 0.3873462975025177, |
|
"learning_rate": 3.7202380952380956e-05, |
|
"loss": 0.0113, |
|
"step": 1296 |
|
}, |
|
{ |
|
"epoch": 6.658163265306122, |
|
"grad_norm": 1.502334475517273, |
|
"learning_rate": 3.707482993197279e-05, |
|
"loss": 0.0145, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 6.704081632653061, |
|
"grad_norm": 1.4580602645874023, |
|
"learning_rate": 3.6947278911564624e-05, |
|
"loss": 0.0115, |
|
"step": 1314 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"grad_norm": 0.14768829941749573, |
|
"learning_rate": 3.681972789115646e-05, |
|
"loss": 0.0057, |
|
"step": 1323 |
|
}, |
|
{ |
|
"epoch": 6.795918367346939, |
|
"grad_norm": 0.10615669190883636, |
|
"learning_rate": 3.66921768707483e-05, |
|
"loss": 0.0113, |
|
"step": 1332 |
|
}, |
|
{ |
|
"epoch": 6.841836734693878, |
|
"grad_norm": 0.17342956364154816, |
|
"learning_rate": 3.656462585034014e-05, |
|
"loss": 0.0077, |
|
"step": 1341 |
|
}, |
|
{ |
|
"epoch": 6.887755102040816, |
|
"grad_norm": 1.031613826751709, |
|
"learning_rate": 3.6437074829931975e-05, |
|
"loss": 0.0061, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 6.933673469387755, |
|
"grad_norm": 0.5434854030609131, |
|
"learning_rate": 3.630952380952381e-05, |
|
"loss": 0.0046, |
|
"step": 1359 |
|
}, |
|
{ |
|
"epoch": 6.979591836734694, |
|
"grad_norm": 2.6706666946411133, |
|
"learning_rate": 3.618197278911565e-05, |
|
"loss": 0.008, |
|
"step": 1368 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_gen_len": 127.7143, |
|
"eval_loss": 0.003461688058450818, |
|
"eval_rouge1": 99.738, |
|
"eval_rouge2": 99.6362, |
|
"eval_rougeL": 99.7372, |
|
"eval_rougeLsum": 99.739, |
|
"eval_runtime": 37.7713, |
|
"eval_samples_per_second": 2.595, |
|
"eval_steps_per_second": 0.662, |
|
"step": 1372 |
|
} |
|
], |
|
"logging_steps": 9, |
|
"max_steps": 3920, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"total_flos": 93017113362432.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|