|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.02284174099749883, |
|
"eval_steps": 500, |
|
"global_step": 1000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0002284174099749883, |
|
"grad_norm": 0.6011635661125183, |
|
"learning_rate": 0.00019995065603657316, |
|
"loss": 2.2538, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0004568348199499766, |
|
"grad_norm": 0.8175456523895264, |
|
"learning_rate": 0.00019980267284282717, |
|
"loss": 2.1126, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0006852522299249648, |
|
"grad_norm": 1.1491563320159912, |
|
"learning_rate": 0.00019955619646030802, |
|
"loss": 2.228, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0009136696398999532, |
|
"grad_norm": 1.0827499628067017, |
|
"learning_rate": 0.0001992114701314478, |
|
"loss": 2.1703, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0011420870498749414, |
|
"grad_norm": 1.157465934753418, |
|
"learning_rate": 0.00019876883405951377, |
|
"loss": 2.0955, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0013705044598499297, |
|
"grad_norm": 1.0857897996902466, |
|
"learning_rate": 0.0001982287250728689, |
|
"loss": 2.1769, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0015989218698249181, |
|
"grad_norm": 1.377300500869751, |
|
"learning_rate": 0.00019759167619387476, |
|
"loss": 2.1335, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0018273392797999064, |
|
"grad_norm": 0.8308928608894348, |
|
"learning_rate": 0.0001968583161128631, |
|
"loss": 2.0746, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0020557566897748945, |
|
"grad_norm": 1.1545639038085938, |
|
"learning_rate": 0.0001960293685676943, |
|
"loss": 2.3118, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0022841740997498828, |
|
"grad_norm": 0.9956186413764954, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 2.1476, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.002512591509724871, |
|
"grad_norm": 1.1778929233551025, |
|
"learning_rate": 0.00019408807689542257, |
|
"loss": 1.9218, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.0027410089196998593, |
|
"grad_norm": 1.2166578769683838, |
|
"learning_rate": 0.00019297764858882514, |
|
"loss": 2.1764, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.002969426329674848, |
|
"grad_norm": 0.9831048846244812, |
|
"learning_rate": 0.00019177546256839812, |
|
"loss": 2.1327, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.0031978437396498363, |
|
"grad_norm": 0.9150891900062561, |
|
"learning_rate": 0.00019048270524660196, |
|
"loss": 2.0719, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.0034262611496248246, |
|
"grad_norm": 1.078007459640503, |
|
"learning_rate": 0.0001891006524188368, |
|
"loss": 2.1048, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.003654678559599813, |
|
"grad_norm": 0.9422916173934937, |
|
"learning_rate": 0.00018763066800438636, |
|
"loss": 2.0419, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.003883095969574801, |
|
"grad_norm": 1.0269567966461182, |
|
"learning_rate": 0.0001860742027003944, |
|
"loss": 2.0837, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.004111513379549789, |
|
"grad_norm": 0.9426142573356628, |
|
"learning_rate": 0.00018443279255020152, |
|
"loss": 2.0422, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.004339930789524778, |
|
"grad_norm": 1.018721103668213, |
|
"learning_rate": 0.00018270805742745617, |
|
"loss": 2.1384, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.0045683481994997655, |
|
"grad_norm": 1.1313502788543701, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 2.1318, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.004796765609474754, |
|
"grad_norm": 0.6934864521026611, |
|
"learning_rate": 0.00017901550123756906, |
|
"loss": 2.2722, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.005025183019449742, |
|
"grad_norm": 0.8822490572929382, |
|
"learning_rate": 0.00017705132427757895, |
|
"loss": 2.0774, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.005253600429424731, |
|
"grad_norm": 1.1275163888931274, |
|
"learning_rate": 0.00017501110696304596, |
|
"loss": 2.1253, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.005482017839399719, |
|
"grad_norm": 1.0035189390182495, |
|
"learning_rate": 0.00017289686274214118, |
|
"loss": 2.1624, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.005710435249374707, |
|
"grad_norm": 0.8009467124938965, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 2.0543, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.005938852659349696, |
|
"grad_norm": 0.9582347869873047, |
|
"learning_rate": 0.00016845471059286887, |
|
"loss": 2.1542, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.006167270069324684, |
|
"grad_norm": 1.0823897123336792, |
|
"learning_rate": 0.00016613118653236518, |
|
"loss": 2.3268, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.006395687479299673, |
|
"grad_norm": 0.9078360795974731, |
|
"learning_rate": 0.000163742398974869, |
|
"loss": 2.0749, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.0066241048892746604, |
|
"grad_norm": 1.0934821367263794, |
|
"learning_rate": 0.00016129070536529766, |
|
"loss": 2.2403, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.006852522299249649, |
|
"grad_norm": 1.464321494102478, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 2.0575, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.007080939709224637, |
|
"grad_norm": 0.9363439679145813, |
|
"learning_rate": 0.00015620833778521307, |
|
"loss": 2.3062, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.007309357119199626, |
|
"grad_norm": 1.1936748027801514, |
|
"learning_rate": 0.00015358267949789966, |
|
"loss": 2.081, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.0075377745291746135, |
|
"grad_norm": 0.8682211637496948, |
|
"learning_rate": 0.00015090414157503714, |
|
"loss": 2.0289, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.007766191939149602, |
|
"grad_norm": 1.123822569847107, |
|
"learning_rate": 0.00014817536741017152, |
|
"loss": 2.2525, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.00799460934912459, |
|
"grad_norm": 0.8017176389694214, |
|
"learning_rate": 0.00014539904997395468, |
|
"loss": 2.213, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.008223026759099578, |
|
"grad_norm": 0.8773420453071594, |
|
"learning_rate": 0.00014257792915650728, |
|
"loss": 2.0703, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.008451444169074568, |
|
"grad_norm": 1.1905558109283447, |
|
"learning_rate": 0.00013971478906347806, |
|
"loss": 2.1984, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.008679861579049555, |
|
"grad_norm": 0.8345734477043152, |
|
"learning_rate": 0.00013681245526846783, |
|
"loss": 2.0921, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.008908278989024543, |
|
"grad_norm": 1.126680612564087, |
|
"learning_rate": 0.00013387379202452917, |
|
"loss": 2.0888, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.009136696398999531, |
|
"grad_norm": 1.0113277435302734, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 2.0344, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.00936511380897452, |
|
"grad_norm": 1.6838539838790894, |
|
"learning_rate": 0.00012789911060392294, |
|
"loss": 2.0585, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.009593531218949508, |
|
"grad_norm": 0.9599931240081787, |
|
"learning_rate": 0.0001248689887164855, |
|
"loss": 2.1937, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.009821948628924496, |
|
"grad_norm": 1.091261386871338, |
|
"learning_rate": 0.00012181432413965428, |
|
"loss": 2.2075, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.010050366038899484, |
|
"grad_norm": 1.618888020515442, |
|
"learning_rate": 0.00011873813145857249, |
|
"loss": 2.0918, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.010278783448874474, |
|
"grad_norm": 0.8518305420875549, |
|
"learning_rate": 0.0001156434465040231, |
|
"loss": 2.0674, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.010507200858849462, |
|
"grad_norm": 0.823520839214325, |
|
"learning_rate": 0.00011253332335643043, |
|
"loss": 2.118, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.01073561826882445, |
|
"grad_norm": 1.0874645709991455, |
|
"learning_rate": 0.00010941083133185146, |
|
"loss": 2.1401, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.010964035678799437, |
|
"grad_norm": 1.0509600639343262, |
|
"learning_rate": 0.00010627905195293135, |
|
"loss": 2.2394, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.011192453088774427, |
|
"grad_norm": 0.8310794234275818, |
|
"learning_rate": 0.00010314107590781284, |
|
"loss": 2.09, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.011420870498749415, |
|
"grad_norm": 0.963942289352417, |
|
"learning_rate": 0.0001, |
|
"loss": 2.0202, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.011649287908724403, |
|
"grad_norm": 0.9792200326919556, |
|
"learning_rate": 9.685892409218717e-05, |
|
"loss": 2.2468, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.011877705318699392, |
|
"grad_norm": 0.8218494653701782, |
|
"learning_rate": 9.372094804706867e-05, |
|
"loss": 2.3187, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.01210612272867438, |
|
"grad_norm": 1.120417833328247, |
|
"learning_rate": 9.058916866814858e-05, |
|
"loss": 2.1024, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.012334540138649368, |
|
"grad_norm": 1.0600074529647827, |
|
"learning_rate": 8.746667664356956e-05, |
|
"loss": 2.1056, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.012562957548624356, |
|
"grad_norm": 1.0849493741989136, |
|
"learning_rate": 8.435655349597689e-05, |
|
"loss": 2.0486, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.012791374958599345, |
|
"grad_norm": 0.9597050547599792, |
|
"learning_rate": 8.126186854142752e-05, |
|
"loss": 2.1524, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.013019792368574333, |
|
"grad_norm": 0.6964913010597229, |
|
"learning_rate": 7.818567586034577e-05, |
|
"loss": 2.5253, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.013248209778549321, |
|
"grad_norm": 0.8805552124977112, |
|
"learning_rate": 7.513101128351454e-05, |
|
"loss": 1.9732, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.013476627188524309, |
|
"grad_norm": 0.8434686660766602, |
|
"learning_rate": 7.210088939607708e-05, |
|
"loss": 2.1449, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.013705044598499298, |
|
"grad_norm": 0.7308034896850586, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 1.9493, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.013933462008474286, |
|
"grad_norm": 0.7747200131416321, |
|
"learning_rate": 6.612620797547087e-05, |
|
"loss": 2.4289, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.014161879418449274, |
|
"grad_norm": 0.9479460716247559, |
|
"learning_rate": 6.318754473153221e-05, |
|
"loss": 2.0873, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.014390296828424262, |
|
"grad_norm": 0.8679899573326111, |
|
"learning_rate": 6.0285210936521955e-05, |
|
"loss": 2.1007, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.014618714238399251, |
|
"grad_norm": 1.1402413845062256, |
|
"learning_rate": 5.7422070843492734e-05, |
|
"loss": 1.9974, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.01484713164837424, |
|
"grad_norm": 0.944284975528717, |
|
"learning_rate": 5.4600950026045326e-05, |
|
"loss": 2.1228, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.015075549058349227, |
|
"grad_norm": 1.2122584581375122, |
|
"learning_rate": 5.182463258982846e-05, |
|
"loss": 2.1118, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.015303966468324215, |
|
"grad_norm": 0.8726314306259155, |
|
"learning_rate": 4.909585842496287e-05, |
|
"loss": 2.0166, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.015532383878299204, |
|
"grad_norm": 0.8935607075691223, |
|
"learning_rate": 4.6417320502100316e-05, |
|
"loss": 2.0992, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.01576080128827419, |
|
"grad_norm": 0.7619888186454773, |
|
"learning_rate": 4.379166221478697e-05, |
|
"loss": 2.1272, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.01598921869824918, |
|
"grad_norm": 0.9617013931274414, |
|
"learning_rate": 4.12214747707527e-05, |
|
"loss": 2.0397, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.01621763610822417, |
|
"grad_norm": 0.7471845746040344, |
|
"learning_rate": 3.8709294634702376e-05, |
|
"loss": 2.1752, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.016446053518199156, |
|
"grad_norm": 0.8926049470901489, |
|
"learning_rate": 3.6257601025131026e-05, |
|
"loss": 2.0067, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.016674470928174145, |
|
"grad_norm": 0.885032594203949, |
|
"learning_rate": 3.386881346763483e-05, |
|
"loss": 2.0448, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.016902888338149135, |
|
"grad_norm": 1.1092376708984375, |
|
"learning_rate": 3.154528940713113e-05, |
|
"loss": 2.1073, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.01713130574812412, |
|
"grad_norm": 1.155680537223816, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 1.978, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.01735972315809911, |
|
"grad_norm": 0.829304575920105, |
|
"learning_rate": 2.7103137257858868e-05, |
|
"loss": 1.9944, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.0175881405680741, |
|
"grad_norm": 0.8414214849472046, |
|
"learning_rate": 2.4988893036954043e-05, |
|
"loss": 2.5413, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.017816557978049086, |
|
"grad_norm": 0.9645959138870239, |
|
"learning_rate": 2.2948675722421086e-05, |
|
"loss": 2.2689, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.018044975388024076, |
|
"grad_norm": 0.8575518727302551, |
|
"learning_rate": 2.098449876243096e-05, |
|
"loss": 2.1654, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.018273392797999062, |
|
"grad_norm": 0.8750948309898376, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 2.2528, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.01850181020797405, |
|
"grad_norm": 0.7940350770950317, |
|
"learning_rate": 1.7291942572543807e-05, |
|
"loss": 1.9905, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.01873022761794904, |
|
"grad_norm": 0.9825338125228882, |
|
"learning_rate": 1.5567207449798515e-05, |
|
"loss": 2.0347, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.018958645027924027, |
|
"grad_norm": 0.7255304455757141, |
|
"learning_rate": 1.3925797299605647e-05, |
|
"loss": 2.0235, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.019187062437899017, |
|
"grad_norm": 0.9329121708869934, |
|
"learning_rate": 1.2369331995613665e-05, |
|
"loss": 2.0792, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.019415479847874006, |
|
"grad_norm": 0.8776724934577942, |
|
"learning_rate": 1.0899347581163221e-05, |
|
"loss": 2.0174, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.019643897257848993, |
|
"grad_norm": 0.9908276796340942, |
|
"learning_rate": 9.517294753398064e-06, |
|
"loss": 1.9447, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.019872314667823982, |
|
"grad_norm": 0.9467048645019531, |
|
"learning_rate": 8.224537431601886e-06, |
|
"loss": 2.1375, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.02010073207779897, |
|
"grad_norm": 1.004712700843811, |
|
"learning_rate": 7.022351411174866e-06, |
|
"loss": 2.3284, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.020329149487773958, |
|
"grad_norm": 1.0520464181900024, |
|
"learning_rate": 5.911923104577455e-06, |
|
"loss": 2.0863, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.020557566897748947, |
|
"grad_norm": 0.8571903109550476, |
|
"learning_rate": 4.8943483704846475e-06, |
|
"loss": 2.0996, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.020785984307723934, |
|
"grad_norm": 0.9216386079788208, |
|
"learning_rate": 3.970631432305694e-06, |
|
"loss": 2.0802, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.021014401717698923, |
|
"grad_norm": 0.9897679686546326, |
|
"learning_rate": 3.1416838871368924e-06, |
|
"loss": 2.2645, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.021242819127673913, |
|
"grad_norm": 0.94163578748703, |
|
"learning_rate": 2.4083238061252567e-06, |
|
"loss": 2.1278, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.0214712365376489, |
|
"grad_norm": 0.8382057547569275, |
|
"learning_rate": 1.771274927131139e-06, |
|
"loss": 2.1074, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.02169965394762389, |
|
"grad_norm": 0.9168360829353333, |
|
"learning_rate": 1.231165940486234e-06, |
|
"loss": 1.993, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.021928071357598874, |
|
"grad_norm": 1.0946999788284302, |
|
"learning_rate": 7.885298685522235e-07, |
|
"loss": 2.1184, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.022156488767573864, |
|
"grad_norm": 0.8011813163757324, |
|
"learning_rate": 4.438035396920004e-07, |
|
"loss": 2.1275, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.022384906177548854, |
|
"grad_norm": 1.2118070125579834, |
|
"learning_rate": 1.973271571728441e-07, |
|
"loss": 2.0635, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.02261332358752384, |
|
"grad_norm": 0.8423234820365906, |
|
"learning_rate": 4.934396342684e-08, |
|
"loss": 2.049, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.02284174099749883, |
|
"grad_norm": 1.2177627086639404, |
|
"learning_rate": 0.0, |
|
"loss": 2.2816, |
|
"step": 1000 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.681733252309811e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|