|
{ |
|
"best_metric": 0.12753550708293915, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 1.7204301075268817, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.017204301075268817, |
|
"grad_norm": 4.601269245147705, |
|
"learning_rate": 2e-05, |
|
"loss": 0.4272, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.017204301075268817, |
|
"eval_loss": 1.4783204793930054, |
|
"eval_runtime": 5.1438, |
|
"eval_samples_per_second": 76.208, |
|
"eval_steps_per_second": 9.526, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.034408602150537634, |
|
"grad_norm": 7.236650466918945, |
|
"learning_rate": 4e-05, |
|
"loss": 0.6626, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.05161290322580645, |
|
"grad_norm": 7.9963555335998535, |
|
"learning_rate": 6e-05, |
|
"loss": 0.6886, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.06881720430107527, |
|
"grad_norm": 4.405614376068115, |
|
"learning_rate": 8e-05, |
|
"loss": 0.4014, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.08602150537634409, |
|
"grad_norm": 3.2208263874053955, |
|
"learning_rate": 0.0001, |
|
"loss": 0.3021, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.1032258064516129, |
|
"grad_norm": 1.8561677932739258, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 0.2245, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.12043010752688173, |
|
"grad_norm": 1.6137471199035645, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 0.2223, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.13763440860215054, |
|
"grad_norm": 0.9222277998924255, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 0.1546, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.15483870967741936, |
|
"grad_norm": 0.9431182146072388, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 0.1754, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.17204301075268819, |
|
"grad_norm": 0.7953192591667175, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 0.1822, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.18924731182795698, |
|
"grad_norm": 0.9455572366714478, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 0.2185, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.2064516129032258, |
|
"grad_norm": 0.8408288359642029, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 0.2108, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.22365591397849463, |
|
"grad_norm": 0.35438165068626404, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 0.0773, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.24086021505376345, |
|
"grad_norm": 0.42859965562820435, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 0.0717, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.25806451612903225, |
|
"grad_norm": 0.36290422081947327, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 0.0868, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.2752688172043011, |
|
"grad_norm": 0.5616369247436523, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 0.1069, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2924731182795699, |
|
"grad_norm": 0.47484615445137024, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 0.0989, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.3096774193548387, |
|
"grad_norm": 0.4143848121166229, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 0.11, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.32688172043010755, |
|
"grad_norm": 0.49859318137168884, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 0.114, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.34408602150537637, |
|
"grad_norm": 0.5608797073364258, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 0.1247, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.36129032258064514, |
|
"grad_norm": 0.5681672096252441, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 0.1305, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.37849462365591396, |
|
"grad_norm": 0.5968943238258362, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 0.1441, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3956989247311828, |
|
"grad_norm": 0.5786952376365662, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 0.135, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.4129032258064516, |
|
"grad_norm": 0.7746197581291199, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.1746, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.43010752688172044, |
|
"grad_norm": 0.9171465039253235, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 0.2272, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.43010752688172044, |
|
"eval_loss": 0.1376180797815323, |
|
"eval_runtime": 5.1567, |
|
"eval_samples_per_second": 76.018, |
|
"eval_steps_per_second": 9.502, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.44731182795698926, |
|
"grad_norm": 0.16825996339321136, |
|
"learning_rate": 8.842005554284296e-05, |
|
"loss": 0.0449, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.4645161290322581, |
|
"grad_norm": 0.2606010437011719, |
|
"learning_rate": 8.73410738492077e-05, |
|
"loss": 0.0656, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.4817204301075269, |
|
"grad_norm": 0.3626805245876312, |
|
"learning_rate": 8.622126023955446e-05, |
|
"loss": 0.0898, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.4989247311827957, |
|
"grad_norm": 0.3628605008125305, |
|
"learning_rate": 8.506183921362443e-05, |
|
"loss": 0.0909, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.5161290322580645, |
|
"grad_norm": 0.34184515476226807, |
|
"learning_rate": 8.386407858128706e-05, |
|
"loss": 0.0935, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5333333333333333, |
|
"grad_norm": 0.4369655251502991, |
|
"learning_rate": 8.262928807620843e-05, |
|
"loss": 0.1039, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.5505376344086022, |
|
"grad_norm": 0.4320714473724365, |
|
"learning_rate": 8.135881792367686e-05, |
|
"loss": 0.1077, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.567741935483871, |
|
"grad_norm": 0.4757688045501709, |
|
"learning_rate": 8.005405736415126e-05, |
|
"loss": 0.1194, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.5849462365591398, |
|
"grad_norm": 0.48429444432258606, |
|
"learning_rate": 7.871643313414718e-05, |
|
"loss": 0.1216, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.6021505376344086, |
|
"grad_norm": 0.5377363562583923, |
|
"learning_rate": 7.734740790612136e-05, |
|
"loss": 0.142, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.6193548387096774, |
|
"grad_norm": 0.5409618616104126, |
|
"learning_rate": 7.594847868906076e-05, |
|
"loss": 0.1612, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6365591397849463, |
|
"grad_norm": 0.7560884356498718, |
|
"learning_rate": 7.452117519152542e-05, |
|
"loss": 0.198, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.6537634408602151, |
|
"grad_norm": 0.25169119238853455, |
|
"learning_rate": 7.30670581489344e-05, |
|
"loss": 0.0631, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.6709677419354839, |
|
"grad_norm": 0.2085525542497635, |
|
"learning_rate": 7.158771761692464e-05, |
|
"loss": 0.0531, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.6881720430107527, |
|
"grad_norm": 0.2509376108646393, |
|
"learning_rate": 7.008477123264848e-05, |
|
"loss": 0.0688, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7053763440860215, |
|
"grad_norm": 0.27264726161956787, |
|
"learning_rate": 6.855986244591104e-05, |
|
"loss": 0.0792, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.7225806451612903, |
|
"grad_norm": 0.2584315240383148, |
|
"learning_rate": 6.701465872208216e-05, |
|
"loss": 0.0824, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7397849462365591, |
|
"grad_norm": 0.34367600083351135, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.0932, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.7569892473118279, |
|
"grad_norm": 0.3771838843822479, |
|
"learning_rate": 6.387014543809223e-05, |
|
"loss": 0.1051, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.7741935483870968, |
|
"grad_norm": 0.3859615921974182, |
|
"learning_rate": 6.227427435703997e-05, |
|
"loss": 0.1106, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.7913978494623656, |
|
"grad_norm": 0.4388357698917389, |
|
"learning_rate": 6.066498153718735e-05, |
|
"loss": 0.1173, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.8086021505376344, |
|
"grad_norm": 0.5637166500091553, |
|
"learning_rate": 5.90440267166055e-05, |
|
"loss": 0.131, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.8258064516129032, |
|
"grad_norm": 0.47906917333602905, |
|
"learning_rate": 5.74131823855921e-05, |
|
"loss": 0.1548, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.843010752688172, |
|
"grad_norm": 0.5929440855979919, |
|
"learning_rate": 5.577423184847932e-05, |
|
"loss": 0.1613, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.8602150537634409, |
|
"grad_norm": 0.918541431427002, |
|
"learning_rate": 5.4128967273616625e-05, |
|
"loss": 0.2385, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.8602150537634409, |
|
"eval_loss": 0.13366764783859253, |
|
"eval_runtime": 5.1579, |
|
"eval_samples_per_second": 76.0, |
|
"eval_steps_per_second": 9.5, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.8774193548387097, |
|
"grad_norm": 0.1686541885137558, |
|
"learning_rate": 5.247918773366112e-05, |
|
"loss": 0.0467, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.8946236559139785, |
|
"grad_norm": 0.23044119775295258, |
|
"learning_rate": 5.0826697238317935e-05, |
|
"loss": 0.0716, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.9118279569892473, |
|
"grad_norm": 0.28096097707748413, |
|
"learning_rate": 4.917330276168208e-05, |
|
"loss": 0.0885, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.9290322580645162, |
|
"grad_norm": 0.36806416511535645, |
|
"learning_rate": 4.7520812266338885e-05, |
|
"loss": 0.1005, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.946236559139785, |
|
"grad_norm": 0.3925113081932068, |
|
"learning_rate": 4.5871032726383386e-05, |
|
"loss": 0.1102, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.9634408602150538, |
|
"grad_norm": 0.4250904321670532, |
|
"learning_rate": 4.4225768151520694e-05, |
|
"loss": 0.1243, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.9806451612903225, |
|
"grad_norm": 0.47191616892814636, |
|
"learning_rate": 4.2586817614407895e-05, |
|
"loss": 0.1558, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.9978494623655914, |
|
"grad_norm": 0.6524373292922974, |
|
"learning_rate": 4.095597328339452e-05, |
|
"loss": 0.1878, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.0150537634408603, |
|
"grad_norm": 1.1412904262542725, |
|
"learning_rate": 3.933501846281267e-05, |
|
"loss": 0.1142, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.032258064516129, |
|
"grad_norm": 0.17924538254737854, |
|
"learning_rate": 3.772572564296005e-05, |
|
"loss": 0.0528, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.049462365591398, |
|
"grad_norm": 0.2397976964712143, |
|
"learning_rate": 3.612985456190778e-05, |
|
"loss": 0.0629, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.0666666666666667, |
|
"grad_norm": 0.23171919584274292, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.0708, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.0838709677419356, |
|
"grad_norm": 0.26210978627204895, |
|
"learning_rate": 3.298534127791785e-05, |
|
"loss": 0.0732, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.1010752688172043, |
|
"grad_norm": 0.2662266790866852, |
|
"learning_rate": 3.144013755408895e-05, |
|
"loss": 0.0717, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.118279569892473, |
|
"grad_norm": 0.33063340187072754, |
|
"learning_rate": 2.991522876735154e-05, |
|
"loss": 0.0796, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.135483870967742, |
|
"grad_norm": 0.32689982652664185, |
|
"learning_rate": 2.8412282383075363e-05, |
|
"loss": 0.0801, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.1526881720430107, |
|
"grad_norm": 0.32011306285858154, |
|
"learning_rate": 2.693294185106562e-05, |
|
"loss": 0.0885, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.1698924731182796, |
|
"grad_norm": 0.40406063199043274, |
|
"learning_rate": 2.547882480847461e-05, |
|
"loss": 0.1112, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.1870967741935483, |
|
"grad_norm": 0.3917810618877411, |
|
"learning_rate": 2.405152131093926e-05, |
|
"loss": 0.116, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.2043010752688172, |
|
"grad_norm": 0.4483303427696228, |
|
"learning_rate": 2.2652592093878666e-05, |
|
"loss": 0.1178, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.221505376344086, |
|
"grad_norm": 0.23751965165138245, |
|
"learning_rate": 2.128356686585282e-05, |
|
"loss": 0.0572, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.238709677419355, |
|
"grad_norm": 0.1653883159160614, |
|
"learning_rate": 1.9945942635848748e-05, |
|
"loss": 0.0467, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.2559139784946236, |
|
"grad_norm": 0.2108721137046814, |
|
"learning_rate": 1.8641182076323148e-05, |
|
"loss": 0.059, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.2731182795698925, |
|
"grad_norm": 0.2989300787448883, |
|
"learning_rate": 1.7370711923791567e-05, |
|
"loss": 0.0675, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.2903225806451613, |
|
"grad_norm": 0.24671778082847595, |
|
"learning_rate": 1.6135921418712956e-05, |
|
"loss": 0.0677, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.2903225806451613, |
|
"eval_loss": 0.1286679357290268, |
|
"eval_runtime": 5.1485, |
|
"eval_samples_per_second": 76.139, |
|
"eval_steps_per_second": 9.517, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.3075268817204302, |
|
"grad_norm": 0.287640243768692, |
|
"learning_rate": 1.4938160786375572e-05, |
|
"loss": 0.0692, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.324731182795699, |
|
"grad_norm": 0.3015075922012329, |
|
"learning_rate": 1.3778739760445552e-05, |
|
"loss": 0.0807, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.3419354838709676, |
|
"grad_norm": 0.2982653081417084, |
|
"learning_rate": 1.2658926150792322e-05, |
|
"loss": 0.0783, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.3591397849462366, |
|
"grad_norm": 0.3233046233654022, |
|
"learning_rate": 1.157994445715706e-05, |
|
"loss": 0.0851, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.3763440860215055, |
|
"grad_norm": 0.31660306453704834, |
|
"learning_rate": 1.0542974530180327e-05, |
|
"loss": 0.0887, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.3935483870967742, |
|
"grad_norm": 0.3477661609649658, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.0887, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.410752688172043, |
|
"grad_norm": 0.43353694677352905, |
|
"learning_rate": 8.599558442598998e-06, |
|
"loss": 0.1107, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.4279569892473118, |
|
"grad_norm": 0.5698493719100952, |
|
"learning_rate": 7.695237378953223e-06, |
|
"loss": 0.1305, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.4451612903225808, |
|
"grad_norm": 0.26135772466659546, |
|
"learning_rate": 6.837175952121306e-06, |
|
"loss": 0.0479, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.4623655913978495, |
|
"grad_norm": 0.205588236451149, |
|
"learning_rate": 6.026312439675552e-06, |
|
"loss": 0.0516, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.4795698924731182, |
|
"grad_norm": 0.24263671040534973, |
|
"learning_rate": 5.263533508961827e-06, |
|
"loss": 0.0628, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.4967741935483871, |
|
"grad_norm": 0.26423949003219604, |
|
"learning_rate": 4.549673247541875e-06, |
|
"loss": 0.0681, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.513978494623656, |
|
"grad_norm": 0.2682032287120819, |
|
"learning_rate": 3.885512251130763e-06, |
|
"loss": 0.0733, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.5311827956989248, |
|
"grad_norm": 0.30018141865730286, |
|
"learning_rate": 3.271776770026963e-06, |
|
"loss": 0.077, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.5483870967741935, |
|
"grad_norm": 0.34617161750793457, |
|
"learning_rate": 2.7091379149682685e-06, |
|
"loss": 0.0858, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.5655913978494622, |
|
"grad_norm": 0.3334634304046631, |
|
"learning_rate": 2.1982109232821178e-06, |
|
"loss": 0.0799, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.5827956989247312, |
|
"grad_norm": 0.35072311758995056, |
|
"learning_rate": 1.7395544861325718e-06, |
|
"loss": 0.0855, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 0.3579203188419342, |
|
"learning_rate": 1.333670137599713e-06, |
|
"loss": 0.0947, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.6172043010752688, |
|
"grad_norm": 0.4464438855648041, |
|
"learning_rate": 9.810017062595322e-07, |
|
"loss": 0.1047, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.6344086021505375, |
|
"grad_norm": 0.4803522527217865, |
|
"learning_rate": 6.819348298638839e-07, |
|
"loss": 0.1119, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.6516129032258065, |
|
"grad_norm": 0.2232884019613266, |
|
"learning_rate": 4.367965336512403e-07, |
|
"loss": 0.0485, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.6688172043010754, |
|
"grad_norm": 0.1642189770936966, |
|
"learning_rate": 2.458548727494292e-07, |
|
"loss": 0.0433, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.686021505376344, |
|
"grad_norm": 0.2120582014322281, |
|
"learning_rate": 1.0931863906127327e-07, |
|
"loss": 0.0515, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.7032258064516128, |
|
"grad_norm": 0.25639230012893677, |
|
"learning_rate": 2.7337132953697554e-08, |
|
"loss": 0.0691, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.7204301075268817, |
|
"grad_norm": 0.31680721044540405, |
|
"learning_rate": 0.0, |
|
"loss": 0.0791, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.7204301075268817, |
|
"eval_loss": 0.12753550708293915, |
|
"eval_runtime": 5.1539, |
|
"eval_samples_per_second": 76.058, |
|
"eval_steps_per_second": 9.507, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.178927560327168e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|