mistral-7b-approx_ndcg / trainer_state.json
yangzhao02's picture
Model save
0451aeb verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9981298423724285,
"eval_steps": 200,
"global_step": 467,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0021373230029388193,
"grad_norm": 0.0,
"learning_rate": 0.0,
"logits": -2.7276527881622314,
"logps": -123.19757843017578,
"loss": -0.7819,
"step": 1
},
{
"epoch": 0.010686615014694095,
"grad_norm": 0.0,
"learning_rate": 0.0,
"logits": -2.8715224266052246,
"logps": -234.59034729003906,
"loss": -0.7834,
"step": 5
},
{
"epoch": 0.02137323002938819,
"grad_norm": 62.58979166531292,
"learning_rate": 3.191489361702127e-08,
"logits": -2.846045732498169,
"logps": -248.165771484375,
"loss": -0.7829,
"step": 10
},
{
"epoch": 0.03205984504408229,
"grad_norm": 165.21106318043798,
"learning_rate": 7.446808510638298e-08,
"logits": -2.777956485748291,
"logps": -229.2180938720703,
"loss": -0.7878,
"step": 15
},
{
"epoch": 0.04274646005877638,
"grad_norm": 73.29436509866687,
"learning_rate": 1.2765957446808508e-07,
"logits": -2.769435405731201,
"logps": -204.04058837890625,
"loss": -0.86,
"step": 20
},
{
"epoch": 0.053433075073470476,
"grad_norm": 43.89267095591359,
"learning_rate": 1.8085106382978725e-07,
"logits": -2.9436452388763428,
"logps": -289.7768859863281,
"loss": -0.939,
"step": 25
},
{
"epoch": 0.06411969008816458,
"grad_norm": 29.65124187980817,
"learning_rate": 2.3404255319148937e-07,
"logits": -2.9300923347473145,
"logps": -277.5953674316406,
"loss": -0.9585,
"step": 30
},
{
"epoch": 0.07480630510285867,
"grad_norm": 27.17018222606885,
"learning_rate": 2.872340425531915e-07,
"logits": -2.9261233806610107,
"logps": -234.77285766601562,
"loss": -0.9666,
"step": 35
},
{
"epoch": 0.08549292011755276,
"grad_norm": 23.213967474767983,
"learning_rate": 3.404255319148936e-07,
"logits": -2.960068464279175,
"logps": -236.64358520507812,
"loss": -0.9697,
"step": 40
},
{
"epoch": 0.09617953513224686,
"grad_norm": 21.820617838068017,
"learning_rate": 3.9361702127659574e-07,
"logits": -2.910217046737671,
"logps": -254.3408203125,
"loss": -0.9706,
"step": 45
},
{
"epoch": 0.10686615014694095,
"grad_norm": 19.42383634052037,
"learning_rate": 4.4680851063829783e-07,
"logits": -2.80342960357666,
"logps": -266.91448974609375,
"loss": -0.9712,
"step": 50
},
{
"epoch": 0.11755276516163506,
"grad_norm": 19.073870329982725,
"learning_rate": 5e-07,
"logits": -3.0574581623077393,
"logps": -278.97833251953125,
"loss": -0.9752,
"step": 55
},
{
"epoch": 0.12823938017632916,
"grad_norm": 22.140537506897275,
"learning_rate": 4.998251761970996e-07,
"logits": -2.982924699783325,
"logps": -246.08999633789062,
"loss": -0.9711,
"step": 60
},
{
"epoch": 0.13892599519102325,
"grad_norm": 22.027187117031247,
"learning_rate": 4.993009492952949e-07,
"logits": -3.066633939743042,
"logps": -287.9792175292969,
"loss": -0.9739,
"step": 65
},
{
"epoch": 0.14961261020571734,
"grad_norm": 28.360950734285428,
"learning_rate": 4.984280524733107e-07,
"logits": -2.9325830936431885,
"logps": -254.9385528564453,
"loss": -0.969,
"step": 70
},
{
"epoch": 0.16029922522041143,
"grad_norm": 26.470079510381215,
"learning_rate": 4.972077065562821e-07,
"logits": -2.8176350593566895,
"logps": -247.518798828125,
"loss": -0.9746,
"step": 75
},
{
"epoch": 0.17098584023510552,
"grad_norm": 19.869439451969985,
"learning_rate": 4.956416183083221e-07,
"logits": -3.0621449947357178,
"logps": -289.036376953125,
"loss": -0.974,
"step": 80
},
{
"epoch": 0.18167245524979964,
"grad_norm": 18.092489458888704,
"learning_rate": 4.937319780454559e-07,
"logits": -2.8688838481903076,
"logps": -279.94183349609375,
"loss": -0.9776,
"step": 85
},
{
"epoch": 0.19235907026449373,
"grad_norm": 23.563928671841666,
"learning_rate": 4.91481456572267e-07,
"logits": -3.0502922534942627,
"logps": -219.67733764648438,
"loss": -0.9768,
"step": 90
},
{
"epoch": 0.20304568527918782,
"grad_norm": 18.6682864992676,
"learning_rate": 4.888932014465352e-07,
"logits": -2.9103236198425293,
"logps": -270.4436340332031,
"loss": -0.9753,
"step": 95
},
{
"epoch": 0.2137323002938819,
"grad_norm": 18.181568458747662,
"learning_rate": 4.859708325770919e-07,
"logits": -2.9558568000793457,
"logps": -222.9886932373047,
"loss": -0.9758,
"step": 100
},
{
"epoch": 0.224418915308576,
"grad_norm": 22.1137260269499,
"learning_rate": 4.82718437161051e-07,
"logits": -3.025911808013916,
"logps": -265.393310546875,
"loss": -0.9784,
"step": 105
},
{
"epoch": 0.2351055303232701,
"grad_norm": 18.34089290810698,
"learning_rate": 4.79140563967494e-07,
"logits": -3.0452187061309814,
"logps": -291.10003662109375,
"loss": -0.977,
"step": 110
},
{
"epoch": 0.2457921453379642,
"grad_norm": 24.000868485282172,
"learning_rate": 4.752422169756047e-07,
"logits": -2.9980249404907227,
"logps": -270.8134765625,
"loss": -0.9769,
"step": 115
},
{
"epoch": 0.2564787603526583,
"grad_norm": 22.432824006478324,
"learning_rate": 4.710288483761524e-07,
"logits": -3.0276284217834473,
"logps": -242.854248046875,
"loss": -0.9781,
"step": 120
},
{
"epoch": 0.2671653753673524,
"grad_norm": 19.623943572874033,
"learning_rate": 4.6650635094610966e-07,
"logits": -2.9869930744171143,
"logps": -264.6936340332031,
"loss": -0.9784,
"step": 125
},
{
"epoch": 0.2778519903820465,
"grad_norm": 16.514800564439714,
"learning_rate": 4.6168104980707103e-07,
"logits": -3.074154853820801,
"logps": -254.39559936523438,
"loss": -0.9789,
"step": 130
},
{
"epoch": 0.2885386053967406,
"grad_norm": 17.940806695880376,
"learning_rate": 4.565596935789987e-07,
"logits": -3.064635753631592,
"logps": -253.6359405517578,
"loss": -0.9772,
"step": 135
},
{
"epoch": 0.2992252204114347,
"grad_norm": 15.99587855831518,
"learning_rate": 4.511494449416671e-07,
"logits": -3.021132707595825,
"logps": -291.09625244140625,
"loss": -0.9776,
"step": 140
},
{
"epoch": 0.30991183542612877,
"grad_norm": 15.5792510590759,
"learning_rate": 4.4545787061700746e-07,
"logits": -2.853203296661377,
"logps": -281.72210693359375,
"loss": -0.9771,
"step": 145
},
{
"epoch": 0.32059845044082286,
"grad_norm": 17.480990084091566,
"learning_rate": 4.394929307863632e-07,
"logits": -2.993375539779663,
"logps": -265.2592468261719,
"loss": -0.9753,
"step": 150
},
{
"epoch": 0.33128506545551695,
"grad_norm": 17.70526730878467,
"learning_rate": 4.332629679574565e-07,
"logits": -3.0605502128601074,
"logps": -305.0889587402344,
"loss": -0.9766,
"step": 155
},
{
"epoch": 0.34197168047021104,
"grad_norm": 18.40262893823541,
"learning_rate": 4.2677669529663686e-07,
"logits": -2.9697351455688477,
"logps": -306.7701721191406,
"loss": -0.9792,
"step": 160
},
{
"epoch": 0.3526582954849052,
"grad_norm": 20.54973812877713,
"learning_rate": 4.200431844427298e-07,
"logits": -3.0495619773864746,
"logps": -295.7186279296875,
"loss": -0.9785,
"step": 165
},
{
"epoch": 0.36334491049959927,
"grad_norm": 23.008419291973887,
"learning_rate": 4.130718528195303e-07,
"logits": -2.997997283935547,
"logps": -275.9290466308594,
"loss": -0.9772,
"step": 170
},
{
"epoch": 0.37403152551429336,
"grad_norm": 22.007125412303857,
"learning_rate": 4.058724504646834e-07,
"logits": -2.998748779296875,
"logps": -276.2112731933594,
"loss": -0.9773,
"step": 175
},
{
"epoch": 0.38471814052898745,
"grad_norm": 14.558828760504346,
"learning_rate": 3.9845504639337535e-07,
"logits": -3.035895586013794,
"logps": -280.32708740234375,
"loss": -0.9778,
"step": 180
},
{
"epoch": 0.39540475554368154,
"grad_norm": 15.094528036756223,
"learning_rate": 3.908300145159055e-07,
"logits": -2.99044132232666,
"logps": -231.31869506835938,
"loss": -0.9782,
"step": 185
},
{
"epoch": 0.40609137055837563,
"grad_norm": 19.931751129133623,
"learning_rate": 3.8300801912883414e-07,
"logits": -3.1248080730438232,
"logps": -272.81658935546875,
"loss": -0.9794,
"step": 190
},
{
"epoch": 0.4167779855730697,
"grad_norm": 19.163508181353045,
"learning_rate": 3.75e-07,
"logits": -3.047405242919922,
"logps": -332.17578125,
"loss": -0.979,
"step": 195
},
{
"epoch": 0.4274646005877638,
"grad_norm": 16.407530767721205,
"learning_rate": 3.668171570682655e-07,
"logits": -3.0323641300201416,
"logps": -251.59506225585938,
"loss": -0.9788,
"step": 200
},
{
"epoch": 0.4274646005877638,
"eval_logits": -3.0493266582489014,
"eval_logps": -289.3082275390625,
"eval_loss": -0.9775694608688354,
"eval_runtime": 586.0302,
"eval_samples_per_second": 3.358,
"eval_steps_per_second": 0.21,
"step": 200
},
{
"epoch": 0.4381512156024579,
"grad_norm": 18.72596941199976,
"learning_rate": 3.584709347793895e-07,
"logits": -2.9899675846099854,
"logps": -264.5972595214844,
"loss": -0.9774,
"step": 205
},
{
"epoch": 0.448837830617152,
"grad_norm": 14.684631204607477,
"learning_rate": 3.499730060799352e-07,
"logits": -2.9389395713806152,
"logps": -270.917236328125,
"loss": -0.977,
"step": 210
},
{
"epoch": 0.45952444563184613,
"grad_norm": 19.60342980482045,
"learning_rate": 3.413352560915988e-07,
"logits": -2.9131505489349365,
"logps": -250.50747680664062,
"loss": -0.9779,
"step": 215
},
{
"epoch": 0.4702110606465402,
"grad_norm": 12.44502702657658,
"learning_rate": 3.325697654887918e-07,
"logits": -2.8331167697906494,
"logps": -209.47265625,
"loss": -0.9759,
"step": 220
},
{
"epoch": 0.4808976756612343,
"grad_norm": 19.223731552060446,
"learning_rate": 3.2368879360272606e-07,
"logits": -3.003699779510498,
"logps": -293.6314392089844,
"loss": -0.9776,
"step": 225
},
{
"epoch": 0.4915842906759284,
"grad_norm": 18.20205804125661,
"learning_rate": 3.147047612756302e-07,
"logits": -3.0232555866241455,
"logps": -305.18316650390625,
"loss": -0.9783,
"step": 230
},
{
"epoch": 0.5022709056906225,
"grad_norm": 16.46859278231364,
"learning_rate": 3.056302334890786e-07,
"logits": -2.9345574378967285,
"logps": -237.68637084960938,
"loss": -0.9797,
"step": 235
},
{
"epoch": 0.5129575207053166,
"grad_norm": 16.465984487681027,
"learning_rate": 2.964779017907287e-07,
"logits": -2.9307010173797607,
"logps": -286.076171875,
"loss": -0.9805,
"step": 240
},
{
"epoch": 0.5236441357200107,
"grad_norm": 17.54623610211809,
"learning_rate": 2.872605665440436e-07,
"logits": -2.974175453186035,
"logps": -262.3833312988281,
"loss": -0.9787,
"step": 245
},
{
"epoch": 0.5343307507347048,
"grad_norm": 18.88275216616494,
"learning_rate": 2.7799111902582693e-07,
"logits": -3.046766996383667,
"logps": -251.1654052734375,
"loss": -0.9818,
"step": 250
},
{
"epoch": 0.5450173657493989,
"grad_norm": 16.60487185722242,
"learning_rate": 2.6868252339660607e-07,
"logits": -2.889235496520996,
"logps": -254.0398712158203,
"loss": -0.9779,
"step": 255
},
{
"epoch": 0.555703980764093,
"grad_norm": 19.312080394342164,
"learning_rate": 2.593477985690815e-07,
"logits": -2.904877185821533,
"logps": -260.0421142578125,
"loss": -0.9792,
"step": 260
},
{
"epoch": 0.566390595778787,
"grad_norm": 17.837317168454984,
"learning_rate": 2.5e-07,
"logits": -3.0580921173095703,
"logps": -253.33425903320312,
"loss": -0.9783,
"step": 265
},
{
"epoch": 0.5770772107934812,
"grad_norm": 19.59229912598117,
"learning_rate": 2.406522014309186e-07,
"logits": -2.8213086128234863,
"logps": -248.05899047851562,
"loss": -0.9789,
"step": 270
},
{
"epoch": 0.5877638258081752,
"grad_norm": 19.31749257546076,
"learning_rate": 2.3131747660339394e-07,
"logits": -3.022545576095581,
"logps": -261.65911865234375,
"loss": -0.9795,
"step": 275
},
{
"epoch": 0.5984504408228694,
"grad_norm": 18.893405028741554,
"learning_rate": 2.2200888097417302e-07,
"logits": -2.8628716468811035,
"logps": -276.71771240234375,
"loss": -0.9787,
"step": 280
},
{
"epoch": 0.6091370558375635,
"grad_norm": 12.031725724871942,
"learning_rate": 2.1273943345595635e-07,
"logits": -2.8501031398773193,
"logps": -225.07083129882812,
"loss": -0.9802,
"step": 285
},
{
"epoch": 0.6198236708522575,
"grad_norm": 17.440618788817403,
"learning_rate": 2.0352209820927135e-07,
"logits": -2.9809391498565674,
"logps": -279.31085205078125,
"loss": -0.98,
"step": 290
},
{
"epoch": 0.6305102858669517,
"grad_norm": 17.533312297864594,
"learning_rate": 1.9436976651092142e-07,
"logits": -2.8259799480438232,
"logps": -233.43637084960938,
"loss": -0.9791,
"step": 295
},
{
"epoch": 0.6411969008816457,
"grad_norm": 19.09114013567347,
"learning_rate": 1.8529523872436977e-07,
"logits": -2.877838134765625,
"logps": -250.38919067382812,
"loss": -0.979,
"step": 300
},
{
"epoch": 0.6518835158963399,
"grad_norm": 14.469325828491328,
"learning_rate": 1.763112063972739e-07,
"logits": -2.9694724082946777,
"logps": -265.880615234375,
"loss": -0.9795,
"step": 305
},
{
"epoch": 0.6625701309110339,
"grad_norm": 13.892022212321942,
"learning_rate": 1.674302345112083e-07,
"logits": -2.8365535736083984,
"logps": -249.2300567626953,
"loss": -0.9815,
"step": 310
},
{
"epoch": 0.673256745925728,
"grad_norm": 16.882467097549785,
"learning_rate": 1.5866474390840124e-07,
"logits": -3.010043144226074,
"logps": -297.1403503417969,
"loss": -0.9803,
"step": 315
},
{
"epoch": 0.6839433609404221,
"grad_norm": 16.561775281344463,
"learning_rate": 1.500269939200648e-07,
"logits": -2.910022735595703,
"logps": -245.9589080810547,
"loss": -0.981,
"step": 320
},
{
"epoch": 0.6946299759551162,
"grad_norm": 11.956009192905249,
"learning_rate": 1.4152906522061047e-07,
"logits": -3.090505599975586,
"logps": -297.404052734375,
"loss": -0.9801,
"step": 325
},
{
"epoch": 0.7053165909698104,
"grad_norm": 20.558342213046632,
"learning_rate": 1.3318284293173449e-07,
"logits": -3.017181396484375,
"logps": -282.6482238769531,
"loss": -0.9807,
"step": 330
},
{
"epoch": 0.7160032059845044,
"grad_norm": 13.386951582952191,
"learning_rate": 1.2500000000000005e-07,
"logits": -2.884382963180542,
"logps": -307.8055419921875,
"loss": -0.982,
"step": 335
},
{
"epoch": 0.7266898209991985,
"grad_norm": 20.886776560670956,
"learning_rate": 1.1699198087116588e-07,
"logits": -2.919635057449341,
"logps": -249.56100463867188,
"loss": -0.9798,
"step": 340
},
{
"epoch": 0.7373764360138926,
"grad_norm": 18.641185325431465,
"learning_rate": 1.0916998548409447e-07,
"logits": -2.8742291927337646,
"logps": -281.08062744140625,
"loss": -0.9797,
"step": 345
},
{
"epoch": 0.7480630510285867,
"grad_norm": 12.516321612741994,
"learning_rate": 1.0154495360662463e-07,
"logits": -2.859714984893799,
"logps": -246.27450561523438,
"loss": -0.9812,
"step": 350
},
{
"epoch": 0.7587496660432808,
"grad_norm": 15.783450182742786,
"learning_rate": 9.412754953531663e-08,
"logits": -2.9711270332336426,
"logps": -252.8061981201172,
"loss": -0.9806,
"step": 355
},
{
"epoch": 0.7694362810579749,
"grad_norm": 16.203610890848484,
"learning_rate": 8.692814718046978e-08,
"logits": -2.942884683609009,
"logps": -284.9074401855469,
"loss": -0.9813,
"step": 360
},
{
"epoch": 0.7801228960726689,
"grad_norm": 15.344868690260943,
"learning_rate": 7.99568155572701e-08,
"logits": -2.9541680812835693,
"logps": -254.73123168945312,
"loss": -0.9793,
"step": 365
},
{
"epoch": 0.7908095110873631,
"grad_norm": 15.263546884716034,
"learning_rate": 7.322330470336313e-08,
"logits": -2.816589832305908,
"logps": -258.56964111328125,
"loss": -0.9813,
"step": 370
},
{
"epoch": 0.8014961261020572,
"grad_norm": 15.150826166868379,
"learning_rate": 6.673703204254347e-08,
"logits": -2.874589681625366,
"logps": -264.58929443359375,
"loss": -0.9818,
"step": 375
},
{
"epoch": 0.8121827411167513,
"grad_norm": 17.746820632954226,
"learning_rate": 6.050706921363672e-08,
"logits": -2.811462879180908,
"logps": -274.1175842285156,
"loss": -0.9821,
"step": 380
},
{
"epoch": 0.8228693561314454,
"grad_norm": 17.866909167465558,
"learning_rate": 5.454212938299255e-08,
"logits": -2.751802444458008,
"logps": -265.1951904296875,
"loss": -0.981,
"step": 385
},
{
"epoch": 0.8335559711461394,
"grad_norm": 19.146717420675806,
"learning_rate": 4.885055505833291e-08,
"logits": -2.872286319732666,
"logps": -271.44451904296875,
"loss": -0.9816,
"step": 390
},
{
"epoch": 0.8442425861608336,
"grad_norm": 16.977738077995557,
"learning_rate": 4.3440306421001324e-08,
"logits": -2.9000906944274902,
"logps": -246.639892578125,
"loss": -0.9818,
"step": 395
},
{
"epoch": 0.8549292011755276,
"grad_norm": 15.148125130733664,
"learning_rate": 3.831895019292897e-08,
"logits": -2.962857723236084,
"logps": -276.59356689453125,
"loss": -0.9812,
"step": 400
},
{
"epoch": 0.8549292011755276,
"eval_logits": -2.9494612216949463,
"eval_logps": -291.1517028808594,
"eval_loss": -0.9810261130332947,
"eval_runtime": 560.2713,
"eval_samples_per_second": 3.513,
"eval_steps_per_second": 0.22,
"step": 400
},
{
"epoch": 0.8656158161902218,
"grad_norm": 17.51200256618434,
"learning_rate": 3.349364905389032e-08,
"logits": -2.8892321586608887,
"logps": -291.7529296875,
"loss": -0.9807,
"step": 405
},
{
"epoch": 0.8763024312049158,
"grad_norm": 22.832692473414674,
"learning_rate": 2.8971151623847584e-08,
"logits": -2.803345203399658,
"logps": -268.4388122558594,
"loss": -0.9811,
"step": 410
},
{
"epoch": 0.88698904621961,
"grad_norm": 17.595837977158578,
"learning_rate": 2.475778302439524e-08,
"logits": -2.8722121715545654,
"logps": -251.7541961669922,
"loss": -0.9823,
"step": 415
},
{
"epoch": 0.897675661234304,
"grad_norm": 18.312171813693368,
"learning_rate": 2.085943603250595e-08,
"logits": -2.9985263347625732,
"logps": -270.0942077636719,
"loss": -0.9802,
"step": 420
},
{
"epoch": 0.9083622762489981,
"grad_norm": 16.517443174199403,
"learning_rate": 1.7281562838948966e-08,
"logits": -2.8859758377075195,
"logps": -265.142578125,
"loss": -0.9812,
"step": 425
},
{
"epoch": 0.9190488912636923,
"grad_norm": 19.65350536281161,
"learning_rate": 1.4029167422908105e-08,
"logits": -2.95444393157959,
"logps": -290.2696228027344,
"loss": -0.9818,
"step": 430
},
{
"epoch": 0.9297355062783863,
"grad_norm": 15.855089650199325,
"learning_rate": 1.1106798553464802e-08,
"logits": -2.9999821186065674,
"logps": -269.6681213378906,
"loss": -0.9815,
"step": 435
},
{
"epoch": 0.9404221212930804,
"grad_norm": 17.34381390762727,
"learning_rate": 8.518543427732949e-09,
"logits": -2.989065647125244,
"logps": -246.51412963867188,
"loss": -0.9808,
"step": 440
},
{
"epoch": 0.9511087363077745,
"grad_norm": 16.16955244150207,
"learning_rate": 6.268021954544095e-09,
"logits": -2.859179973602295,
"logps": -305.86053466796875,
"loss": -0.9795,
"step": 445
},
{
"epoch": 0.9617953513224686,
"grad_norm": 14.813287522652269,
"learning_rate": 4.358381691677931e-09,
"logits": -2.835251808166504,
"logps": -273.0709533691406,
"loss": -0.9813,
"step": 450
},
{
"epoch": 0.9724819663371627,
"grad_norm": 18.12603626613458,
"learning_rate": 2.7922934437178692e-09,
"logits": -2.8190817832946777,
"logps": -264.83544921875,
"loss": -0.981,
"step": 455
},
{
"epoch": 0.9831685813518568,
"grad_norm": 17.624270425975272,
"learning_rate": 1.5719475266893489e-09,
"logits": -2.793806552886963,
"logps": -224.26779174804688,
"loss": -0.9821,
"step": 460
},
{
"epoch": 0.9938551963665508,
"grad_norm": 17.165545325823846,
"learning_rate": 6.990507047049676e-10,
"logits": -2.9348580837249756,
"logps": -265.0174255371094,
"loss": -0.9799,
"step": 465
},
{
"epoch": 0.9981298423724285,
"step": 467,
"total_flos": 0.0,
"train_loss": -0.9702747563733789,
"train_runtime": 45631.769,
"train_samples_per_second": 1.312,
"train_steps_per_second": 0.01
}
],
"logging_steps": 5,
"max_steps": 467,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 250,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}