|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.8586640851887706, |
|
"eval_steps": 600, |
|
"global_step": 1200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.015488867376573089, |
|
"grad_norm": 421527552.0, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 61.1175, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.030977734753146177, |
|
"grad_norm": 337641472.0, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 60.1666, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.046466602129719266, |
|
"grad_norm": 413138944.0, |
|
"learning_rate": 3e-06, |
|
"loss": 60.9036, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.061955469506292354, |
|
"grad_norm": 1367343104.0, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 61.7597, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07744433688286544, |
|
"grad_norm": 57933824.0, |
|
"learning_rate": 5e-06, |
|
"loss": 61.2844, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09293320425943853, |
|
"grad_norm": 94371840.0, |
|
"learning_rate": 6e-06, |
|
"loss": 60.3878, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.10842207163601161, |
|
"grad_norm": 40632320.0, |
|
"learning_rate": 7.000000000000001e-06, |
|
"loss": 59.0391, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.12391093901258471, |
|
"grad_norm": 20971520.0, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 61.2704, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.1393998063891578, |
|
"grad_norm": 110624768.0, |
|
"learning_rate": 9e-06, |
|
"loss": 59.6674, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.15488867376573087, |
|
"grad_norm": 66584576.0, |
|
"learning_rate": 1e-05, |
|
"loss": 59.4588, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.17037754114230397, |
|
"grad_norm": 89653248.0, |
|
"learning_rate": 1.1000000000000001e-05, |
|
"loss": 58.9144, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.18586640851887706, |
|
"grad_norm": 56360960.0, |
|
"learning_rate": 1.2e-05, |
|
"loss": 58.3093, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.20135527589545016, |
|
"grad_norm": 19791872.0, |
|
"learning_rate": 1.3000000000000001e-05, |
|
"loss": 58.0143, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.21684414327202323, |
|
"grad_norm": 5242880.0, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 56.9984, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.23233301064859632, |
|
"grad_norm": 55050240.0, |
|
"learning_rate": 1.5e-05, |
|
"loss": 57.6238, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.24782187802516942, |
|
"grad_norm": 118489088.0, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 55.2309, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.2633107454017425, |
|
"grad_norm": 61865984.0, |
|
"learning_rate": 1.7000000000000003e-05, |
|
"loss": 54.6207, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.2787996127783156, |
|
"grad_norm": 13041664.0, |
|
"learning_rate": 1.8e-05, |
|
"loss": 52.8616, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.2942884801548887, |
|
"grad_norm": 87031808.0, |
|
"learning_rate": 1.9e-05, |
|
"loss": 52.4976, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.30977734753146174, |
|
"grad_norm": 3129344.0, |
|
"learning_rate": 2e-05, |
|
"loss": 52.6041, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.32526621490803487, |
|
"grad_norm": 99090432.0, |
|
"learning_rate": 2.1e-05, |
|
"loss": 54.6559, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.34075508228460794, |
|
"grad_norm": 3238002688.0, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 52.2666, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.356243949661181, |
|
"grad_norm": 8454144.0, |
|
"learning_rate": 2.3000000000000003e-05, |
|
"loss": 51.7553, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.3717328170377541, |
|
"grad_norm": 81264640.0, |
|
"learning_rate": 2.4e-05, |
|
"loss": 49.4186, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.3872216844143272, |
|
"grad_norm": 1531904.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 48.724, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.4027105517909003, |
|
"grad_norm": 6782976.0, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 47.4897, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.4181994191674734, |
|
"grad_norm": 462848.0, |
|
"learning_rate": 2.7000000000000002e-05, |
|
"loss": 46.1025, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.43368828654404645, |
|
"grad_norm": 227328.0, |
|
"learning_rate": 2.8000000000000003e-05, |
|
"loss": 43.953, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.4491771539206196, |
|
"grad_norm": 212992.0, |
|
"learning_rate": 2.9e-05, |
|
"loss": 42.786, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.46466602129719264, |
|
"grad_norm": 40704.0, |
|
"learning_rate": 3e-05, |
|
"loss": 41.3757, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.4801548886737657, |
|
"grad_norm": 42991616.0, |
|
"learning_rate": 3.1e-05, |
|
"loss": 42.0031, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.49564375605033884, |
|
"grad_norm": 21102592.0, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 41.9207, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.5111326234269119, |
|
"grad_norm": 16187392.0, |
|
"learning_rate": 3.3e-05, |
|
"loss": 42.2336, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.526621490803485, |
|
"grad_norm": 164864.0, |
|
"learning_rate": 3.4000000000000007e-05, |
|
"loss": 40.6817, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.542110358180058, |
|
"grad_norm": 517996544.0, |
|
"learning_rate": 3.5e-05, |
|
"loss": 44.9866, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.5575992255566312, |
|
"grad_norm": 87556096.0, |
|
"learning_rate": 3.6e-05, |
|
"loss": 47.7577, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.5730880929332043, |
|
"grad_norm": 29753344.0, |
|
"learning_rate": 3.7e-05, |
|
"loss": 50.8352, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.5885769603097774, |
|
"grad_norm": 38273024.0, |
|
"learning_rate": 3.8e-05, |
|
"loss": 50.4585, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.6040658276863504, |
|
"grad_norm": 24248320.0, |
|
"learning_rate": 3.9000000000000006e-05, |
|
"loss": 53.4794, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.6195546950629235, |
|
"grad_norm": 637534208.0, |
|
"learning_rate": 4e-05, |
|
"loss": 53.7074, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.6350435624394967, |
|
"grad_norm": 679477248.0, |
|
"learning_rate": 4.1e-05, |
|
"loss": 57.4144, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.6505324298160697, |
|
"grad_norm": 583008256.0, |
|
"learning_rate": 4.2e-05, |
|
"loss": 53.7095, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.6660212971926428, |
|
"grad_norm": 57933824.0, |
|
"learning_rate": 4.3e-05, |
|
"loss": 54.9929, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.6815101645692159, |
|
"grad_norm": 61079552.0, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 59.2993, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.6969990319457889, |
|
"grad_norm": 227540992.0, |
|
"learning_rate": 4.5e-05, |
|
"loss": 60.4038, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.712487899322362, |
|
"grad_norm": 190840832.0, |
|
"learning_rate": 4.600000000000001e-05, |
|
"loss": 63.1003, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.7279767666989352, |
|
"grad_norm": 327155712.0, |
|
"learning_rate": 4.7e-05, |
|
"loss": 64.5368, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.7434656340755083, |
|
"grad_norm": 679477248.0, |
|
"learning_rate": 4.8e-05, |
|
"loss": 61.6716, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.7589545014520813, |
|
"grad_norm": 276824064.0, |
|
"learning_rate": 4.9e-05, |
|
"loss": 62.8625, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.7744433688286544, |
|
"grad_norm": 78643200.0, |
|
"learning_rate": 5e-05, |
|
"loss": 63.9848, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.7899322362052275, |
|
"grad_norm": 224395264.0, |
|
"learning_rate": 5.1000000000000006e-05, |
|
"loss": 64.5113, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.8054211035818006, |
|
"grad_norm": 154140672.0, |
|
"learning_rate": 5.2000000000000004e-05, |
|
"loss": 64.9807, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.8209099709583737, |
|
"grad_norm": 805306368.0, |
|
"learning_rate": 5.300000000000001e-05, |
|
"loss": 65.7812, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.8363988383349468, |
|
"grad_norm": 339738624.0, |
|
"learning_rate": 5.4000000000000005e-05, |
|
"loss": 67.1272, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.8518877057115198, |
|
"grad_norm": 1002438656.0, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 66.3905, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.8673765730880929, |
|
"grad_norm": 2583691264.0, |
|
"learning_rate": 5.6000000000000006e-05, |
|
"loss": 66.0419, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.882865440464666, |
|
"grad_norm": 411041792.0, |
|
"learning_rate": 5.6999999999999996e-05, |
|
"loss": 62.0064, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.8983543078412392, |
|
"grad_norm": 70254592.0, |
|
"learning_rate": 5.8e-05, |
|
"loss": 63.6127, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.9138431752178122, |
|
"grad_norm": 191889408.0, |
|
"learning_rate": 5.9e-05, |
|
"loss": 63.481, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.9293320425943853, |
|
"grad_norm": 436207616.0, |
|
"learning_rate": 6e-05, |
|
"loss": 66.0833, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.9293320425943853, |
|
"eval_loss": 69.23383331298828, |
|
"eval_runtime": 141.0364, |
|
"eval_samples_per_second": 10.636, |
|
"eval_steps_per_second": 2.659, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.9448209099709584, |
|
"grad_norm": 33161216.0, |
|
"learning_rate": 6.1e-05, |
|
"loss": 65.1055, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.9603097773475314, |
|
"grad_norm": 79167488.0, |
|
"learning_rate": 6.2e-05, |
|
"loss": 65.4074, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.9757986447241046, |
|
"grad_norm": 119013376.0, |
|
"learning_rate": 6.3e-05, |
|
"loss": 64.0661, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.9912875121006777, |
|
"grad_norm": 116391936.0, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 63.0841, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.0067763794772506, |
|
"grad_norm": 191889408.0, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 63.6633, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.0222652468538238, |
|
"grad_norm": 35389440.0, |
|
"learning_rate": 6.6e-05, |
|
"loss": 63.879, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.037754114230397, |
|
"grad_norm": 219152384.0, |
|
"learning_rate": 6.7e-05, |
|
"loss": 65.5105, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.05324298160697, |
|
"grad_norm": 476053504.0, |
|
"learning_rate": 6.800000000000001e-05, |
|
"loss": 66.3527, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.0687318489835431, |
|
"grad_norm": 2348810240.0, |
|
"learning_rate": 6.9e-05, |
|
"loss": 66.1827, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.084220716360116, |
|
"grad_norm": 452984832.0, |
|
"learning_rate": 7e-05, |
|
"loss": 67.8703, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.0997095837366893, |
|
"grad_norm": 46137344.0, |
|
"learning_rate": 7.1e-05, |
|
"loss": 67.2648, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.1151984511132624, |
|
"grad_norm": 70778880.0, |
|
"learning_rate": 7.2e-05, |
|
"loss": 62.0604, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.1306873184898354, |
|
"grad_norm": 283115520.0, |
|
"learning_rate": 7.3e-05, |
|
"loss": 60.8914, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.1461761858664086, |
|
"grad_norm": 78118912.0, |
|
"learning_rate": 7.4e-05, |
|
"loss": 60.0754, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.1616650532429815, |
|
"grad_norm": 120061952.0, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 63.6695, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.1771539206195547, |
|
"grad_norm": 90701824.0, |
|
"learning_rate": 7.6e-05, |
|
"loss": 65.6076, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.1926427879961277, |
|
"grad_norm": 23724032.0, |
|
"learning_rate": 7.7e-05, |
|
"loss": 64.6991, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.2081316553727008, |
|
"grad_norm": 22806528.0, |
|
"learning_rate": 7.800000000000001e-05, |
|
"loss": 64.4208, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.223620522749274, |
|
"grad_norm": 19529728.0, |
|
"learning_rate": 7.900000000000001e-05, |
|
"loss": 58.9719, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.239109390125847, |
|
"grad_norm": 154140672.0, |
|
"learning_rate": 8e-05, |
|
"loss": 58.4048, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.2545982575024202, |
|
"grad_norm": 4882432.0, |
|
"learning_rate": 8.1e-05, |
|
"loss": 63.3226, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.2700871248789931, |
|
"grad_norm": 4718592.0, |
|
"learning_rate": 8.2e-05, |
|
"loss": 62.6276, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.2855759922555663, |
|
"grad_norm": 56098816.0, |
|
"learning_rate": 8.3e-05, |
|
"loss": 57.6699, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.3010648596321395, |
|
"grad_norm": 2281701376.0, |
|
"learning_rate": 8.4e-05, |
|
"loss": 55.8831, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.3165537270087124, |
|
"grad_norm": 63963136.0, |
|
"learning_rate": 8.5e-05, |
|
"loss": 58.8188, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.3320425943852856, |
|
"grad_norm": 66322432.0, |
|
"learning_rate": 8.6e-05, |
|
"loss": 57.2062, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.3475314617618586, |
|
"grad_norm": 17956864.0, |
|
"learning_rate": 8.7e-05, |
|
"loss": 59.4092, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.3630203291384317, |
|
"grad_norm": 246415360.0, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 56.8145, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.378509196515005, |
|
"grad_norm": 205520896.0, |
|
"learning_rate": 8.900000000000001e-05, |
|
"loss": 56.4558, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.3939980638915779, |
|
"grad_norm": 90177536.0, |
|
"learning_rate": 9e-05, |
|
"loss": 54.4208, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.409486931268151, |
|
"grad_norm": 383778816.0, |
|
"learning_rate": 9.1e-05, |
|
"loss": 53.8711, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.424975798644724, |
|
"grad_norm": 23461888.0, |
|
"learning_rate": 9.200000000000001e-05, |
|
"loss": 52.0157, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.4404646660212972, |
|
"grad_norm": 27394048.0, |
|
"learning_rate": 9.300000000000001e-05, |
|
"loss": 48.0571, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.4559535333978704, |
|
"grad_norm": 8716288.0, |
|
"learning_rate": 9.4e-05, |
|
"loss": 43.4202, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.4714424007744433, |
|
"grad_norm": 5570560.0, |
|
"learning_rate": 9.5e-05, |
|
"loss": 40.7559, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.4869312681510165, |
|
"grad_norm": 20316160.0, |
|
"learning_rate": 9.6e-05, |
|
"loss": 39.1436, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.5024201355275895, |
|
"grad_norm": 15204352.0, |
|
"learning_rate": 9.7e-05, |
|
"loss": 36.618, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.5179090029041626, |
|
"grad_norm": 5832704.0, |
|
"learning_rate": 9.8e-05, |
|
"loss": 34.3857, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.5333978702807358, |
|
"grad_norm": 2293760.0, |
|
"learning_rate": 9.900000000000001e-05, |
|
"loss": 29.7184, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.5488867376573088, |
|
"grad_norm": 22282240.0, |
|
"learning_rate": 0.0001, |
|
"loss": 24.8724, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.5643756050338817, |
|
"grad_norm": 10682368.0, |
|
"learning_rate": 9.98165137614679e-05, |
|
"loss": 21.2392, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.579864472410455, |
|
"grad_norm": 25690112.0, |
|
"learning_rate": 9.963302752293578e-05, |
|
"loss": 18.7594, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.595353339787028, |
|
"grad_norm": 15400960.0, |
|
"learning_rate": 9.944954128440368e-05, |
|
"loss": 17.9276, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.6108422071636013, |
|
"grad_norm": 2359296.0, |
|
"learning_rate": 9.926605504587157e-05, |
|
"loss": 16.505, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.6263310745401742, |
|
"grad_norm": 19005440.0, |
|
"learning_rate": 9.908256880733946e-05, |
|
"loss": 15.258, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.6418199419167472, |
|
"grad_norm": 9240576.0, |
|
"learning_rate": 9.889908256880734e-05, |
|
"loss": 13.9628, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.6573088092933204, |
|
"grad_norm": 7012352.0, |
|
"learning_rate": 9.871559633027525e-05, |
|
"loss": 14.8131, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.6727976766698935, |
|
"grad_norm": 8388608.0, |
|
"learning_rate": 9.853211009174312e-05, |
|
"loss": 13.4851, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.6882865440464667, |
|
"grad_norm": 1286144.0, |
|
"learning_rate": 9.834862385321102e-05, |
|
"loss": 12.8225, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.7037754114230397, |
|
"grad_norm": 2490368.0, |
|
"learning_rate": 9.816513761467891e-05, |
|
"loss": 11.7111, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.7192642787996126, |
|
"grad_norm": 1064960.0, |
|
"learning_rate": 9.79816513761468e-05, |
|
"loss": 11.347, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.7347531461761858, |
|
"grad_norm": 352256.0, |
|
"learning_rate": 9.779816513761468e-05, |
|
"loss": 10.6953, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.750242013552759, |
|
"grad_norm": 4046848.0, |
|
"learning_rate": 9.761467889908259e-05, |
|
"loss": 10.3879, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.7657308809293322, |
|
"grad_norm": 348160.0, |
|
"learning_rate": 9.743119266055046e-05, |
|
"loss": 10.5364, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.7812197483059051, |
|
"grad_norm": 6193152.0, |
|
"learning_rate": 9.724770642201836e-05, |
|
"loss": 10.7673, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.796708615682478, |
|
"grad_norm": 671744.0, |
|
"learning_rate": 9.706422018348625e-05, |
|
"loss": 10.0088, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.8121974830590513, |
|
"grad_norm": 557056.0, |
|
"learning_rate": 9.688073394495414e-05, |
|
"loss": 10.1469, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.8276863504356244, |
|
"grad_norm": 835584.0, |
|
"learning_rate": 9.669724770642202e-05, |
|
"loss": 10.2772, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.8431752178121976, |
|
"grad_norm": 2277376.0, |
|
"learning_rate": 9.651376146788991e-05, |
|
"loss": 9.8019, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.8586640851887706, |
|
"grad_norm": 307200.0, |
|
"learning_rate": 9.63302752293578e-05, |
|
"loss": 9.5204, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.8586640851887706, |
|
"eval_loss": 9.286345481872559, |
|
"eval_runtime": 140.9651, |
|
"eval_samples_per_second": 10.641, |
|
"eval_steps_per_second": 2.66, |
|
"step": 1200 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 6450, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 600, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.568007548325069e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|