|
{ |
|
"best_metric": 0.7902164459228516, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 0.4727103094386565, |
|
"eval_steps": 25, |
|
"global_step": 95, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.004975897994091121, |
|
"grad_norm": 0.23871222138404846, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.0835, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004975897994091121, |
|
"eval_loss": 1.137823224067688, |
|
"eval_runtime": 1.0224, |
|
"eval_samples_per_second": 48.904, |
|
"eval_steps_per_second": 12.715, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.009951795988182242, |
|
"grad_norm": 0.41071704030036926, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.9659, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.014927693982273364, |
|
"grad_norm": 0.39753258228302, |
|
"learning_rate": 0.0001, |
|
"loss": 1.0785, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.019903591976364484, |
|
"grad_norm": 0.3230868875980377, |
|
"learning_rate": 9.997376600647783e-05, |
|
"loss": 1.0597, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.024879489970455606, |
|
"grad_norm": 0.27026239037513733, |
|
"learning_rate": 9.989509461357426e-05, |
|
"loss": 1.0604, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.029855387964546728, |
|
"grad_norm": 0.43072497844696045, |
|
"learning_rate": 9.976407754861426e-05, |
|
"loss": 1.088, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.034831285958637846, |
|
"grad_norm": 0.2670774459838867, |
|
"learning_rate": 9.958086757163489e-05, |
|
"loss": 1.0971, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.03980718395272897, |
|
"grad_norm": 0.2514101266860962, |
|
"learning_rate": 9.934567829727386e-05, |
|
"loss": 1.1188, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.04478308194682009, |
|
"grad_norm": 0.23422400653362274, |
|
"learning_rate": 9.905878394570453e-05, |
|
"loss": 1.1227, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.04975897994091121, |
|
"grad_norm": 0.28173166513442993, |
|
"learning_rate": 9.872051902290737e-05, |
|
"loss": 1.1324, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.054734877935002334, |
|
"grad_norm": 0.2882803976535797, |
|
"learning_rate": 9.833127793065098e-05, |
|
"loss": 1.1169, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.059710775929093456, |
|
"grad_norm": 0.38014933466911316, |
|
"learning_rate": 9.789151450663723e-05, |
|
"loss": 1.0644, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.06468667392318457, |
|
"grad_norm": 0.19597893953323364, |
|
"learning_rate": 9.740174149534693e-05, |
|
"loss": 1.0606, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.06966257191727569, |
|
"grad_norm": 0.20651479065418243, |
|
"learning_rate": 9.686252995020249e-05, |
|
"loss": 0.866, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.07463846991136681, |
|
"grad_norm": 0.22201478481292725, |
|
"learning_rate": 9.627450856774539e-05, |
|
"loss": 0.943, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.07961436790545794, |
|
"grad_norm": 0.19648012518882751, |
|
"learning_rate": 9.563836295460398e-05, |
|
"loss": 0.8928, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.08459026589954906, |
|
"grad_norm": 0.1876300871372223, |
|
"learning_rate": 9.495483482810688e-05, |
|
"loss": 0.964, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.08956616389364018, |
|
"grad_norm": 0.19657588005065918, |
|
"learning_rate": 9.422472115147382e-05, |
|
"loss": 0.9633, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0945420618877313, |
|
"grad_norm": 0.19236579537391663, |
|
"learning_rate": 9.3448873204592e-05, |
|
"loss": 0.9711, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.09951795988182242, |
|
"grad_norm": 0.2960802912712097, |
|
"learning_rate": 9.2628195591462e-05, |
|
"loss": 1.042, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.10449385787591355, |
|
"grad_norm": 0.22051385045051575, |
|
"learning_rate": 9.176364518546989e-05, |
|
"loss": 0.9778, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.10946975587000467, |
|
"grad_norm": 0.22082725167274475, |
|
"learning_rate": 9.08562300137157e-05, |
|
"loss": 1.0256, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.11444565386409579, |
|
"grad_norm": 0.23512785136699677, |
|
"learning_rate": 8.990700808169889e-05, |
|
"loss": 1.0135, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.11942155185818691, |
|
"grad_norm": 0.25989964604377747, |
|
"learning_rate": 8.891708613973126e-05, |
|
"loss": 0.9302, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.12439744985227803, |
|
"grad_norm": 0.3857329487800598, |
|
"learning_rate": 8.788761839251559e-05, |
|
"loss": 0.9751, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.12439744985227803, |
|
"eval_loss": 0.871764600276947, |
|
"eval_runtime": 1.0251, |
|
"eval_samples_per_second": 48.774, |
|
"eval_steps_per_second": 12.681, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.12937334784636914, |
|
"grad_norm": 0.1513061821460724, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 1.0278, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.13434924584046026, |
|
"grad_norm": 0.1964515596628189, |
|
"learning_rate": 8.571489144483944e-05, |
|
"loss": 0.8645, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.13932514383455138, |
|
"grad_norm": 0.24235105514526367, |
|
"learning_rate": 8.457416554680877e-05, |
|
"loss": 0.8915, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.1443010418286425, |
|
"grad_norm": 0.20897287130355835, |
|
"learning_rate": 8.339895749467238e-05, |
|
"loss": 0.9104, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.14927693982273363, |
|
"grad_norm": 0.20914201438426971, |
|
"learning_rate": 8.219063752844926e-05, |
|
"loss": 0.9081, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.15425283781682475, |
|
"grad_norm": 0.20824605226516724, |
|
"learning_rate": 8.095061449516903e-05, |
|
"loss": 0.9098, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.15922873581091587, |
|
"grad_norm": 0.19852378964424133, |
|
"learning_rate": 7.968033420621935e-05, |
|
"loss": 0.9486, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.164204633805007, |
|
"grad_norm": 0.306395024061203, |
|
"learning_rate": 7.838127775159452e-05, |
|
"loss": 0.955, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.16918053179909812, |
|
"grad_norm": 0.2190617471933365, |
|
"learning_rate": 7.705495977301078e-05, |
|
"loss": 0.9907, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.17415642979318924, |
|
"grad_norm": 0.21145637333393097, |
|
"learning_rate": 7.570292669790186e-05, |
|
"loss": 0.9608, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.17913232778728036, |
|
"grad_norm": 0.24957385659217834, |
|
"learning_rate": 7.43267549363537e-05, |
|
"loss": 0.9218, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.18410822578137148, |
|
"grad_norm": 0.2624641954898834, |
|
"learning_rate": 7.292804904308087e-05, |
|
"loss": 0.8877, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.1890841237754626, |
|
"grad_norm": 0.15153071284294128, |
|
"learning_rate": 7.150843984658754e-05, |
|
"loss": 1.0023, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.19406002176955373, |
|
"grad_norm": 0.1561977118253708, |
|
"learning_rate": 7.006958254769438e-05, |
|
"loss": 0.8461, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.19903591976364485, |
|
"grad_norm": 0.18604518473148346, |
|
"learning_rate": 6.861315478964841e-05, |
|
"loss": 0.8691, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.20401181775773597, |
|
"grad_norm": 0.16946768760681152, |
|
"learning_rate": 6.714085470206609e-05, |
|
"loss": 0.8581, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.2089877157518271, |
|
"grad_norm": 0.1662844568490982, |
|
"learning_rate": 6.56543989209901e-05, |
|
"loss": 0.8686, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.2139636137459182, |
|
"grad_norm": 0.16444341838359833, |
|
"learning_rate": 6.415552058736854e-05, |
|
"loss": 0.8854, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.21893951174000933, |
|
"grad_norm": 0.17267289757728577, |
|
"learning_rate": 6.264596732629e-05, |
|
"loss": 0.8595, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.22391540973410046, |
|
"grad_norm": 0.20546430349349976, |
|
"learning_rate": 6.112749920933111e-05, |
|
"loss": 0.9038, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.22889130772819158, |
|
"grad_norm": 0.1972435861825943, |
|
"learning_rate": 5.960188670239154e-05, |
|
"loss": 0.9723, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.2338672057222827, |
|
"grad_norm": 0.2849563658237457, |
|
"learning_rate": 5.80709086014102e-05, |
|
"loss": 0.9412, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.23884310371637382, |
|
"grad_norm": 0.2184210568666458, |
|
"learning_rate": 5.653634995836856e-05, |
|
"loss": 0.9191, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.24381900171046494, |
|
"grad_norm": 0.23135943710803986, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.8842, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.24879489970455607, |
|
"grad_norm": 0.3442687690258026, |
|
"learning_rate": 5.346365004163145e-05, |
|
"loss": 0.8583, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.24879489970455607, |
|
"eval_loss": 0.8090626001358032, |
|
"eval_runtime": 1.0254, |
|
"eval_samples_per_second": 48.761, |
|
"eval_steps_per_second": 12.678, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2537707976986472, |
|
"grad_norm": 0.12577220797538757, |
|
"learning_rate": 5.192909139858981e-05, |
|
"loss": 1.0566, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.2587466956927383, |
|
"grad_norm": 0.16750161349773407, |
|
"learning_rate": 5.0398113297608465e-05, |
|
"loss": 0.8238, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.26372259368682943, |
|
"grad_norm": 0.17324009537696838, |
|
"learning_rate": 4.887250079066892e-05, |
|
"loss": 0.8423, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.2686984916809205, |
|
"grad_norm": 0.17715862393379211, |
|
"learning_rate": 4.7354032673710005e-05, |
|
"loss": 0.8862, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.2736743896750117, |
|
"grad_norm": 0.17103923857212067, |
|
"learning_rate": 4.584447941263149e-05, |
|
"loss": 0.8712, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.27865028766910277, |
|
"grad_norm": 0.16644009947776794, |
|
"learning_rate": 4.43456010790099e-05, |
|
"loss": 0.8458, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.2836261856631939, |
|
"grad_norm": 0.17309409379959106, |
|
"learning_rate": 4.285914529793391e-05, |
|
"loss": 0.8803, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.288602083657285, |
|
"grad_norm": 0.1786348819732666, |
|
"learning_rate": 4.13868452103516e-05, |
|
"loss": 0.8838, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.29357798165137616, |
|
"grad_norm": 0.19110921025276184, |
|
"learning_rate": 3.9930417452305626e-05, |
|
"loss": 0.9268, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.29855387964546726, |
|
"grad_norm": 0.198200523853302, |
|
"learning_rate": 3.8491560153412466e-05, |
|
"loss": 0.9084, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3035297776395584, |
|
"grad_norm": 0.22449031472206116, |
|
"learning_rate": 3.707195095691913e-05, |
|
"loss": 0.8733, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.3085056756336495, |
|
"grad_norm": 0.26478344202041626, |
|
"learning_rate": 3.567324506364632e-05, |
|
"loss": 0.8939, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.31348157362774065, |
|
"grad_norm": 0.13545911014080048, |
|
"learning_rate": 3.4297073302098156e-05, |
|
"loss": 1.0545, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.31845747162183174, |
|
"grad_norm": 0.1377517431974411, |
|
"learning_rate": 3.2945040226989244e-05, |
|
"loss": 0.8699, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.3234333696159229, |
|
"grad_norm": 0.1665552407503128, |
|
"learning_rate": 3.16187222484055e-05, |
|
"loss": 0.8334, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.328409267610014, |
|
"grad_norm": 0.15971739590168, |
|
"learning_rate": 3.0319665793780648e-05, |
|
"loss": 0.8595, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.33338516560410514, |
|
"grad_norm": 0.1528182327747345, |
|
"learning_rate": 2.9049385504830985e-05, |
|
"loss": 0.8277, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.33836106359819623, |
|
"grad_norm": 0.1469763070344925, |
|
"learning_rate": 2.7809362471550748e-05, |
|
"loss": 0.8526, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.3433369615922874, |
|
"grad_norm": 0.14643485844135284, |
|
"learning_rate": 2.660104250532764e-05, |
|
"loss": 0.8201, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.3483128595863785, |
|
"grad_norm": 0.1613270789384842, |
|
"learning_rate": 2.5425834453191232e-05, |
|
"loss": 0.8523, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.3532887575804696, |
|
"grad_norm": 0.17931152880191803, |
|
"learning_rate": 2.4285108555160577e-05, |
|
"loss": 0.8531, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.3582646555745607, |
|
"grad_norm": 0.1984926164150238, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 0.8898, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.36324055356865187, |
|
"grad_norm": 0.20060153305530548, |
|
"learning_rate": 2.2112381607484417e-05, |
|
"loss": 0.8438, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.36821645156274296, |
|
"grad_norm": 0.24903373420238495, |
|
"learning_rate": 2.1082913860268765e-05, |
|
"loss": 0.8631, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.37319234955683406, |
|
"grad_norm": 0.31300994753837585, |
|
"learning_rate": 2.0092991918301108e-05, |
|
"loss": 0.8392, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.37319234955683406, |
|
"eval_loss": 0.7902164459228516, |
|
"eval_runtime": 1.0566, |
|
"eval_samples_per_second": 47.321, |
|
"eval_steps_per_second": 12.304, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.3781682475509252, |
|
"grad_norm": 0.1049114391207695, |
|
"learning_rate": 1.91437699862843e-05, |
|
"loss": 0.9218, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.3831441455450163, |
|
"grad_norm": 0.12478011101484299, |
|
"learning_rate": 1.8236354814530112e-05, |
|
"loss": 0.7758, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.38812004353910745, |
|
"grad_norm": 0.1491149365901947, |
|
"learning_rate": 1.7371804408538024e-05, |
|
"loss": 0.8426, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.39309594153319855, |
|
"grad_norm": 0.1671927273273468, |
|
"learning_rate": 1.6551126795408016e-05, |
|
"loss": 0.8121, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.3980718395272897, |
|
"grad_norm": 0.17548252642154694, |
|
"learning_rate": 1.577527884852619e-05, |
|
"loss": 0.8689, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.4030477375213808, |
|
"grad_norm": 0.1623646467924118, |
|
"learning_rate": 1.5045165171893116e-05, |
|
"loss": 0.8746, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.40802363551547194, |
|
"grad_norm": 0.17200708389282227, |
|
"learning_rate": 1.4361637045396029e-05, |
|
"loss": 0.907, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.41299953350956303, |
|
"grad_norm": 0.1750720888376236, |
|
"learning_rate": 1.3725491432254624e-05, |
|
"loss": 0.8647, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.4179754315036542, |
|
"grad_norm": 0.16917237639427185, |
|
"learning_rate": 1.313747004979751e-05, |
|
"loss": 0.8754, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.4229513294977453, |
|
"grad_norm": 0.19028206169605255, |
|
"learning_rate": 1.2598258504653081e-05, |
|
"loss": 0.8705, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.4279272274918364, |
|
"grad_norm": 0.20847304165363312, |
|
"learning_rate": 1.2108485493362765e-05, |
|
"loss": 0.8799, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.4329031254859275, |
|
"grad_norm": 0.2649713456630707, |
|
"learning_rate": 1.1668722069349041e-05, |
|
"loss": 0.7941, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.43787902348001867, |
|
"grad_norm": 0.12713563442230225, |
|
"learning_rate": 1.1279480977092635e-05, |
|
"loss": 0.8887, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.44285492147410976, |
|
"grad_norm": 0.11893071234226227, |
|
"learning_rate": 1.094121605429547e-05, |
|
"loss": 0.9033, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.4478308194682009, |
|
"grad_norm": 0.12651102244853973, |
|
"learning_rate": 1.0654321702726141e-05, |
|
"loss": 0.782, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.452806717462292, |
|
"grad_norm": 0.12543635070323944, |
|
"learning_rate": 1.0419132428365116e-05, |
|
"loss": 0.781, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.45778261545638316, |
|
"grad_norm": 0.12762346863746643, |
|
"learning_rate": 1.0235922451385733e-05, |
|
"loss": 0.8043, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.46275851345047425, |
|
"grad_norm": 0.1441301554441452, |
|
"learning_rate": 1.0104905386425733e-05, |
|
"loss": 0.8657, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.4677344114445654, |
|
"grad_norm": 0.2067331075668335, |
|
"learning_rate": 1.002623399352217e-05, |
|
"loss": 0.8734, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.4727103094386565, |
|
"grad_norm": 0.1604580581188202, |
|
"learning_rate": 1e-05, |
|
"loss": 0.8626, |
|
"step": 95 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 95, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.5834911449546752e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|