cvapict's picture
End of training
9edb9ff verified
{
"best_metric": 0.8859984697781178,
"best_model_checkpoint": "distilbert-base-multilingual-cased-aoe-hyper/run-0/checkpoint-2616",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 2616,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01529051987767584,
"grad_norm": 1.6358972787857056,
"learning_rate": 9.349717786759033e-06,
"loss": 0.6796,
"step": 10
},
{
"epoch": 0.03058103975535168,
"grad_norm": 1.7600407600402832,
"learning_rate": 9.321037670848728e-06,
"loss": 0.5945,
"step": 20
},
{
"epoch": 0.045871559633027525,
"grad_norm": 2.3243937492370605,
"learning_rate": 9.292357554938425e-06,
"loss": 0.5386,
"step": 30
},
{
"epoch": 0.06116207951070336,
"grad_norm": 2.1618645191192627,
"learning_rate": 9.263677439028121e-06,
"loss": 0.4348,
"step": 40
},
{
"epoch": 0.0764525993883792,
"grad_norm": 4.011763572692871,
"learning_rate": 9.234997323117817e-06,
"loss": 0.4917,
"step": 50
},
{
"epoch": 0.09174311926605505,
"grad_norm": 3.0631511211395264,
"learning_rate": 9.206317207207513e-06,
"loss": 0.4419,
"step": 60
},
{
"epoch": 0.10703363914373089,
"grad_norm": 5.19169807434082,
"learning_rate": 9.177637091297209e-06,
"loss": 0.4885,
"step": 70
},
{
"epoch": 0.12232415902140673,
"grad_norm": 3.2671306133270264,
"learning_rate": 9.148956975386905e-06,
"loss": 0.3778,
"step": 80
},
{
"epoch": 0.13761467889908258,
"grad_norm": 2.0801641941070557,
"learning_rate": 9.120276859476602e-06,
"loss": 0.4524,
"step": 90
},
{
"epoch": 0.1529051987767584,
"grad_norm": 3.2077746391296387,
"learning_rate": 9.091596743566297e-06,
"loss": 0.3107,
"step": 100
},
{
"epoch": 0.16819571865443425,
"grad_norm": 3.8301804065704346,
"learning_rate": 9.062916627655994e-06,
"loss": 0.4904,
"step": 110
},
{
"epoch": 0.1834862385321101,
"grad_norm": 2.995657205581665,
"learning_rate": 9.03423651174569e-06,
"loss": 0.3564,
"step": 120
},
{
"epoch": 0.19877675840978593,
"grad_norm": 5.345709800720215,
"learning_rate": 9.005556395835386e-06,
"loss": 0.498,
"step": 130
},
{
"epoch": 0.21406727828746178,
"grad_norm": 1.6241793632507324,
"learning_rate": 8.976876279925083e-06,
"loss": 0.4324,
"step": 140
},
{
"epoch": 0.22935779816513763,
"grad_norm": 6.096868991851807,
"learning_rate": 8.94819616401478e-06,
"loss": 0.4474,
"step": 150
},
{
"epoch": 0.24464831804281345,
"grad_norm": 4.777408123016357,
"learning_rate": 8.919516048104475e-06,
"loss": 0.3605,
"step": 160
},
{
"epoch": 0.2599388379204893,
"grad_norm": 4.883125305175781,
"learning_rate": 8.890835932194172e-06,
"loss": 0.3694,
"step": 170
},
{
"epoch": 0.27522935779816515,
"grad_norm": 2.1104636192321777,
"learning_rate": 8.862155816283867e-06,
"loss": 0.3391,
"step": 180
},
{
"epoch": 0.290519877675841,
"grad_norm": 3.5193943977355957,
"learning_rate": 8.833475700373564e-06,
"loss": 0.4028,
"step": 190
},
{
"epoch": 0.3058103975535168,
"grad_norm": 12.695348739624023,
"learning_rate": 8.80479558446326e-06,
"loss": 0.5372,
"step": 200
},
{
"epoch": 0.3211009174311927,
"grad_norm": 8.019516944885254,
"learning_rate": 8.776115468552956e-06,
"loss": 0.3839,
"step": 210
},
{
"epoch": 0.3363914373088685,
"grad_norm": 2.723048448562622,
"learning_rate": 8.747435352642653e-06,
"loss": 0.3155,
"step": 220
},
{
"epoch": 0.3516819571865443,
"grad_norm": 5.75403356552124,
"learning_rate": 8.718755236732348e-06,
"loss": 0.4086,
"step": 230
},
{
"epoch": 0.3669724770642202,
"grad_norm": 3.325057029724121,
"learning_rate": 8.690075120822045e-06,
"loss": 0.293,
"step": 240
},
{
"epoch": 0.382262996941896,
"grad_norm": 18.065032958984375,
"learning_rate": 8.66139500491174e-06,
"loss": 0.3126,
"step": 250
},
{
"epoch": 0.39755351681957185,
"grad_norm": 5.72575569152832,
"learning_rate": 8.632714889001437e-06,
"loss": 0.3087,
"step": 260
},
{
"epoch": 0.41284403669724773,
"grad_norm": 5.2425055503845215,
"learning_rate": 8.604034773091134e-06,
"loss": 0.474,
"step": 270
},
{
"epoch": 0.42813455657492355,
"grad_norm": 8.025136947631836,
"learning_rate": 8.57535465718083e-06,
"loss": 0.2461,
"step": 280
},
{
"epoch": 0.4434250764525994,
"grad_norm": 11.76362133026123,
"learning_rate": 8.546674541270526e-06,
"loss": 0.3092,
"step": 290
},
{
"epoch": 0.45871559633027525,
"grad_norm": 14.632379531860352,
"learning_rate": 8.517994425360223e-06,
"loss": 0.4511,
"step": 300
},
{
"epoch": 0.4740061162079511,
"grad_norm": 3.7113358974456787,
"learning_rate": 8.489314309449918e-06,
"loss": 0.3188,
"step": 310
},
{
"epoch": 0.4892966360856269,
"grad_norm": 10.738109588623047,
"learning_rate": 8.460634193539615e-06,
"loss": 0.4563,
"step": 320
},
{
"epoch": 0.5045871559633027,
"grad_norm": 13.628973960876465,
"learning_rate": 8.431954077629312e-06,
"loss": 0.3233,
"step": 330
},
{
"epoch": 0.5198776758409785,
"grad_norm": 17.098241806030273,
"learning_rate": 8.403273961719007e-06,
"loss": 0.4134,
"step": 340
},
{
"epoch": 0.5351681957186545,
"grad_norm": 4.145214080810547,
"learning_rate": 8.374593845808704e-06,
"loss": 0.3638,
"step": 350
},
{
"epoch": 0.5504587155963303,
"grad_norm": 21.35369300842285,
"learning_rate": 8.345913729898399e-06,
"loss": 0.3341,
"step": 360
},
{
"epoch": 0.5657492354740061,
"grad_norm": 6.4279937744140625,
"learning_rate": 8.317233613988096e-06,
"loss": 0.3332,
"step": 370
},
{
"epoch": 0.581039755351682,
"grad_norm": 6.384445667266846,
"learning_rate": 8.288553498077791e-06,
"loss": 0.3299,
"step": 380
},
{
"epoch": 0.5963302752293578,
"grad_norm": 9.927398681640625,
"learning_rate": 8.25987338216749e-06,
"loss": 0.3687,
"step": 390
},
{
"epoch": 0.6116207951070336,
"grad_norm": 2.317152261734009,
"learning_rate": 8.231193266257185e-06,
"loss": 0.2947,
"step": 400
},
{
"epoch": 0.6269113149847095,
"grad_norm": 6.1938982009887695,
"learning_rate": 8.202513150346881e-06,
"loss": 0.2495,
"step": 410
},
{
"epoch": 0.6422018348623854,
"grad_norm": 9.2775297164917,
"learning_rate": 8.173833034436577e-06,
"loss": 0.3011,
"step": 420
},
{
"epoch": 0.6574923547400612,
"grad_norm": 12.099929809570312,
"learning_rate": 8.145152918526273e-06,
"loss": 0.2866,
"step": 430
},
{
"epoch": 0.672782874617737,
"grad_norm": 15.571517944335938,
"learning_rate": 8.116472802615969e-06,
"loss": 0.2352,
"step": 440
},
{
"epoch": 0.6880733944954128,
"grad_norm": 2.7042486667633057,
"learning_rate": 8.087792686705666e-06,
"loss": 0.4442,
"step": 450
},
{
"epoch": 0.7033639143730887,
"grad_norm": 24.800390243530273,
"learning_rate": 8.059112570795362e-06,
"loss": 0.2647,
"step": 460
},
{
"epoch": 0.7186544342507645,
"grad_norm": 1.1386603116989136,
"learning_rate": 8.030432454885058e-06,
"loss": 0.339,
"step": 470
},
{
"epoch": 0.7339449541284404,
"grad_norm": 2.4210026264190674,
"learning_rate": 8.001752338974754e-06,
"loss": 0.2963,
"step": 480
},
{
"epoch": 0.7492354740061162,
"grad_norm": 10.266980171203613,
"learning_rate": 7.97307222306445e-06,
"loss": 0.2177,
"step": 490
},
{
"epoch": 0.764525993883792,
"grad_norm": 8.048089027404785,
"learning_rate": 7.944392107154146e-06,
"loss": 0.3248,
"step": 500
},
{
"epoch": 0.7798165137614679,
"grad_norm": 25.07794189453125,
"learning_rate": 7.915711991243843e-06,
"loss": 0.3321,
"step": 510
},
{
"epoch": 0.7951070336391437,
"grad_norm": 25.960834503173828,
"learning_rate": 7.88703187533354e-06,
"loss": 0.4167,
"step": 520
},
{
"epoch": 0.8103975535168195,
"grad_norm": 7.0256028175354,
"learning_rate": 7.858351759423235e-06,
"loss": 0.2602,
"step": 530
},
{
"epoch": 0.8256880733944955,
"grad_norm": 3.3532984256744385,
"learning_rate": 7.829671643512932e-06,
"loss": 0.2569,
"step": 540
},
{
"epoch": 0.8409785932721713,
"grad_norm": 5.293972969055176,
"learning_rate": 7.800991527602627e-06,
"loss": 0.321,
"step": 550
},
{
"epoch": 0.8562691131498471,
"grad_norm": 5.642370223999023,
"learning_rate": 7.772311411692324e-06,
"loss": 0.2011,
"step": 560
},
{
"epoch": 0.8715596330275229,
"grad_norm": 19.925289154052734,
"learning_rate": 7.743631295782021e-06,
"loss": 0.3707,
"step": 570
},
{
"epoch": 0.8868501529051988,
"grad_norm": 7.974088668823242,
"learning_rate": 7.714951179871716e-06,
"loss": 0.3449,
"step": 580
},
{
"epoch": 0.9021406727828746,
"grad_norm": 28.756046295166016,
"learning_rate": 7.686271063961413e-06,
"loss": 0.5487,
"step": 590
},
{
"epoch": 0.9174311926605505,
"grad_norm": 2.515625238418579,
"learning_rate": 7.657590948051108e-06,
"loss": 0.3541,
"step": 600
},
{
"epoch": 0.9327217125382263,
"grad_norm": 7.654485702514648,
"learning_rate": 7.628910832140805e-06,
"loss": 0.3335,
"step": 610
},
{
"epoch": 0.9480122324159022,
"grad_norm": 17.905406951904297,
"learning_rate": 7.600230716230501e-06,
"loss": 0.3162,
"step": 620
},
{
"epoch": 0.963302752293578,
"grad_norm": 16.23491668701172,
"learning_rate": 7.571550600320198e-06,
"loss": 0.3626,
"step": 630
},
{
"epoch": 0.9785932721712538,
"grad_norm": 0.26539096236228943,
"learning_rate": 7.542870484409894e-06,
"loss": 0.2231,
"step": 640
},
{
"epoch": 0.9938837920489296,
"grad_norm": 1.7205305099487305,
"learning_rate": 7.51419036849959e-06,
"loss": 0.3941,
"step": 650
},
{
"epoch": 1.0,
"eval_accuracy": 0.8706962509563887,
"eval_f1": 0.7540029112081513,
"eval_loss": 0.29941147565841675,
"eval_precision": 0.6906666666666667,
"eval_recall": 0.8301282051282052,
"eval_runtime": 4.8981,
"eval_samples_per_second": 266.841,
"eval_steps_per_second": 16.741,
"step": 654
},
{
"epoch": 1.0091743119266054,
"grad_norm": 13.617008209228516,
"learning_rate": 7.485510252589286e-06,
"loss": 0.3849,
"step": 660
},
{
"epoch": 1.0244648318042813,
"grad_norm": 6.546511650085449,
"learning_rate": 7.456830136678982e-06,
"loss": 0.2871,
"step": 670
},
{
"epoch": 1.039755351681957,
"grad_norm": 5.344148635864258,
"learning_rate": 7.428150020768678e-06,
"loss": 0.2358,
"step": 680
},
{
"epoch": 1.0550458715596331,
"grad_norm": 7.6064348220825195,
"learning_rate": 7.399469904858376e-06,
"loss": 0.2451,
"step": 690
},
{
"epoch": 1.070336391437309,
"grad_norm": 21.806922912597656,
"learning_rate": 7.370789788948072e-06,
"loss": 0.2107,
"step": 700
},
{
"epoch": 1.0856269113149848,
"grad_norm": 10.673025131225586,
"learning_rate": 7.342109673037768e-06,
"loss": 0.2411,
"step": 710
},
{
"epoch": 1.1009174311926606,
"grad_norm": 14.94886589050293,
"learning_rate": 7.313429557127464e-06,
"loss": 0.1456,
"step": 720
},
{
"epoch": 1.1162079510703364,
"grad_norm": 12.618221282958984,
"learning_rate": 7.28474944121716e-06,
"loss": 0.2997,
"step": 730
},
{
"epoch": 1.1314984709480123,
"grad_norm": 0.2970161736011505,
"learning_rate": 7.256069325306856e-06,
"loss": 0.2922,
"step": 740
},
{
"epoch": 1.146788990825688,
"grad_norm": 3.0866048336029053,
"learning_rate": 7.227389209396553e-06,
"loss": 0.1767,
"step": 750
},
{
"epoch": 1.162079510703364,
"grad_norm": 12.53414535522461,
"learning_rate": 7.198709093486249e-06,
"loss": 0.2948,
"step": 760
},
{
"epoch": 1.1773700305810397,
"grad_norm": 24.84918975830078,
"learning_rate": 7.170028977575945e-06,
"loss": 0.1902,
"step": 770
},
{
"epoch": 1.1926605504587156,
"grad_norm": 1.028529167175293,
"learning_rate": 7.141348861665641e-06,
"loss": 0.2246,
"step": 780
},
{
"epoch": 1.2079510703363914,
"grad_norm": 20.015581130981445,
"learning_rate": 7.112668745755337e-06,
"loss": 0.2278,
"step": 790
},
{
"epoch": 1.2232415902140672,
"grad_norm": 12.658394813537598,
"learning_rate": 7.083988629845033e-06,
"loss": 0.3021,
"step": 800
},
{
"epoch": 1.238532110091743,
"grad_norm": 4.727680683135986,
"learning_rate": 7.05530851393473e-06,
"loss": 0.3192,
"step": 810
},
{
"epoch": 1.2538226299694188,
"grad_norm": 16.0307559967041,
"learning_rate": 7.026628398024426e-06,
"loss": 0.3534,
"step": 820
},
{
"epoch": 1.2691131498470947,
"grad_norm": 4.940106391906738,
"learning_rate": 6.997948282114122e-06,
"loss": 0.3564,
"step": 830
},
{
"epoch": 1.2844036697247707,
"grad_norm": 23.759117126464844,
"learning_rate": 6.969268166203818e-06,
"loss": 0.2746,
"step": 840
},
{
"epoch": 1.2996941896024465,
"grad_norm": 25.81449317932129,
"learning_rate": 6.9405880502935144e-06,
"loss": 0.4967,
"step": 850
},
{
"epoch": 1.3149847094801224,
"grad_norm": 4.882550239562988,
"learning_rate": 6.9119079343832104e-06,
"loss": 0.2423,
"step": 860
},
{
"epoch": 1.3302752293577982,
"grad_norm": 12.674756050109863,
"learning_rate": 6.883227818472907e-06,
"loss": 0.3532,
"step": 870
},
{
"epoch": 1.345565749235474,
"grad_norm": 13.883800506591797,
"learning_rate": 6.854547702562603e-06,
"loss": 0.2396,
"step": 880
},
{
"epoch": 1.3608562691131498,
"grad_norm": 7.71323299407959,
"learning_rate": 6.825867586652299e-06,
"loss": 0.2591,
"step": 890
},
{
"epoch": 1.3761467889908257,
"grad_norm": 2.174020767211914,
"learning_rate": 6.797187470741995e-06,
"loss": 0.2171,
"step": 900
},
{
"epoch": 1.3914373088685015,
"grad_norm": 2.351623773574829,
"learning_rate": 6.768507354831691e-06,
"loss": 0.3057,
"step": 910
},
{
"epoch": 1.4067278287461773,
"grad_norm": 26.668006896972656,
"learning_rate": 6.739827238921387e-06,
"loss": 0.2594,
"step": 920
},
{
"epoch": 1.4220183486238533,
"grad_norm": 13.497000694274902,
"learning_rate": 6.711147123011084e-06,
"loss": 0.2886,
"step": 930
},
{
"epoch": 1.4373088685015292,
"grad_norm": 9.161819458007812,
"learning_rate": 6.682467007100781e-06,
"loss": 0.2983,
"step": 940
},
{
"epoch": 1.452599388379205,
"grad_norm": 22.162334442138672,
"learning_rate": 6.653786891190477e-06,
"loss": 0.1644,
"step": 950
},
{
"epoch": 1.4678899082568808,
"grad_norm": 0.5806663632392883,
"learning_rate": 6.625106775280173e-06,
"loss": 0.2643,
"step": 960
},
{
"epoch": 1.4831804281345566,
"grad_norm": 0.31279271841049194,
"learning_rate": 6.596426659369869e-06,
"loss": 0.2888,
"step": 970
},
{
"epoch": 1.4984709480122325,
"grad_norm": 0.543879508972168,
"learning_rate": 6.567746543459565e-06,
"loss": 0.2786,
"step": 980
},
{
"epoch": 1.5137614678899083,
"grad_norm": 0.9113799333572388,
"learning_rate": 6.539066427549262e-06,
"loss": 0.4815,
"step": 990
},
{
"epoch": 1.529051987767584,
"grad_norm": 17.983909606933594,
"learning_rate": 6.510386311638958e-06,
"loss": 0.2022,
"step": 1000
},
{
"epoch": 1.54434250764526,
"grad_norm": 3.3928322792053223,
"learning_rate": 6.481706195728654e-06,
"loss": 0.2222,
"step": 1010
},
{
"epoch": 1.5596330275229358,
"grad_norm": 27.15311050415039,
"learning_rate": 6.45302607981835e-06,
"loss": 0.2879,
"step": 1020
},
{
"epoch": 1.5749235474006116,
"grad_norm": 1.4941473007202148,
"learning_rate": 6.424345963908046e-06,
"loss": 0.3431,
"step": 1030
},
{
"epoch": 1.5902140672782874,
"grad_norm": 26.38034439086914,
"learning_rate": 6.395665847997742e-06,
"loss": 0.2812,
"step": 1040
},
{
"epoch": 1.6055045871559632,
"grad_norm": 4.923500061035156,
"learning_rate": 6.366985732087439e-06,
"loss": 0.2865,
"step": 1050
},
{
"epoch": 1.620795107033639,
"grad_norm": 0.2041570544242859,
"learning_rate": 6.338305616177135e-06,
"loss": 0.4086,
"step": 1060
},
{
"epoch": 1.6360856269113149,
"grad_norm": 1.037766456604004,
"learning_rate": 6.309625500266832e-06,
"loss": 0.1451,
"step": 1070
},
{
"epoch": 1.6513761467889907,
"grad_norm": 9.758125305175781,
"learning_rate": 6.280945384356528e-06,
"loss": 0.3565,
"step": 1080
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.7395672798156738,
"learning_rate": 6.252265268446224e-06,
"loss": 0.3725,
"step": 1090
},
{
"epoch": 1.6819571865443423,
"grad_norm": 2.0258843898773193,
"learning_rate": 6.223585152535921e-06,
"loss": 0.1929,
"step": 1100
},
{
"epoch": 1.6972477064220184,
"grad_norm": 6.11789083480835,
"learning_rate": 6.194905036625617e-06,
"loss": 0.3753,
"step": 1110
},
{
"epoch": 1.7125382262996942,
"grad_norm": 11.606084823608398,
"learning_rate": 6.166224920715313e-06,
"loss": 0.227,
"step": 1120
},
{
"epoch": 1.72782874617737,
"grad_norm": 17.951644897460938,
"learning_rate": 6.137544804805009e-06,
"loss": 0.4915,
"step": 1130
},
{
"epoch": 1.7431192660550459,
"grad_norm": 0.44653165340423584,
"learning_rate": 6.108864688894705e-06,
"loss": 0.2273,
"step": 1140
},
{
"epoch": 1.7584097859327217,
"grad_norm": 17.01247215270996,
"learning_rate": 6.080184572984401e-06,
"loss": 0.3209,
"step": 1150
},
{
"epoch": 1.7737003058103975,
"grad_norm": 20.699323654174805,
"learning_rate": 6.0515044570740976e-06,
"loss": 0.435,
"step": 1160
},
{
"epoch": 1.7889908256880735,
"grad_norm": 30.34465789794922,
"learning_rate": 6.0228243411637936e-06,
"loss": 0.2472,
"step": 1170
},
{
"epoch": 1.8042813455657494,
"grad_norm": 4.261192798614502,
"learning_rate": 5.99414422525349e-06,
"loss": 0.285,
"step": 1180
},
{
"epoch": 1.8195718654434252,
"grad_norm": 20.993389129638672,
"learning_rate": 5.965464109343186e-06,
"loss": 0.3737,
"step": 1190
},
{
"epoch": 1.834862385321101,
"grad_norm": 0.8759846091270447,
"learning_rate": 5.936783993432882e-06,
"loss": 0.1866,
"step": 1200
},
{
"epoch": 1.8501529051987768,
"grad_norm": 29.079099655151367,
"learning_rate": 5.9081038775225785e-06,
"loss": 0.4163,
"step": 1210
},
{
"epoch": 1.8654434250764527,
"grad_norm": 3.231898546218872,
"learning_rate": 5.879423761612275e-06,
"loss": 0.4359,
"step": 1220
},
{
"epoch": 1.8807339449541285,
"grad_norm": 17.104846954345703,
"learning_rate": 5.850743645701971e-06,
"loss": 0.2606,
"step": 1230
},
{
"epoch": 1.8960244648318043,
"grad_norm": 0.409883588552475,
"learning_rate": 5.822063529791667e-06,
"loss": 0.1581,
"step": 1240
},
{
"epoch": 1.9113149847094801,
"grad_norm": 10.206957817077637,
"learning_rate": 5.793383413881363e-06,
"loss": 0.2835,
"step": 1250
},
{
"epoch": 1.926605504587156,
"grad_norm": 16.08922576904297,
"learning_rate": 5.764703297971059e-06,
"loss": 0.2338,
"step": 1260
},
{
"epoch": 1.9418960244648318,
"grad_norm": 3.5700275897979736,
"learning_rate": 5.736023182060755e-06,
"loss": 0.4643,
"step": 1270
},
{
"epoch": 1.9571865443425076,
"grad_norm": 13.477959632873535,
"learning_rate": 5.707343066150452e-06,
"loss": 0.2526,
"step": 1280
},
{
"epoch": 1.9724770642201834,
"grad_norm": 8.30550479888916,
"learning_rate": 5.678662950240148e-06,
"loss": 0.4456,
"step": 1290
},
{
"epoch": 1.9877675840978593,
"grad_norm": 7.599588871002197,
"learning_rate": 5.649982834329844e-06,
"loss": 0.259,
"step": 1300
},
{
"epoch": 2.0,
"eval_accuracy": 0.881407804131599,
"eval_f1": 0.7589424572317263,
"eval_loss": 0.3265691101551056,
"eval_precision": 0.7371601208459214,
"eval_recall": 0.782051282051282,
"eval_runtime": 4.9078,
"eval_samples_per_second": 266.31,
"eval_steps_per_second": 16.708,
"step": 1308
},
{
"epoch": 2.003058103975535,
"grad_norm": 5.8759026527404785,
"learning_rate": 5.62130271841954e-06,
"loss": 0.2964,
"step": 1310
},
{
"epoch": 2.018348623853211,
"grad_norm": 5.663475036621094,
"learning_rate": 5.592622602509236e-06,
"loss": 0.2355,
"step": 1320
},
{
"epoch": 2.0336391437308867,
"grad_norm": 12.831345558166504,
"learning_rate": 5.563942486598932e-06,
"loss": 0.2225,
"step": 1330
},
{
"epoch": 2.0489296636085625,
"grad_norm": 0.6970747709274292,
"learning_rate": 5.53526237068863e-06,
"loss": 0.279,
"step": 1340
},
{
"epoch": 2.0642201834862384,
"grad_norm": 20.61145782470703,
"learning_rate": 5.506582254778326e-06,
"loss": 0.2825,
"step": 1350
},
{
"epoch": 2.079510703363914,
"grad_norm": 4.7170891761779785,
"learning_rate": 5.477902138868022e-06,
"loss": 0.2996,
"step": 1360
},
{
"epoch": 2.09480122324159,
"grad_norm": 8.943619728088379,
"learning_rate": 5.449222022957718e-06,
"loss": 0.1539,
"step": 1370
},
{
"epoch": 2.1100917431192663,
"grad_norm": 0.7099681496620178,
"learning_rate": 5.420541907047414e-06,
"loss": 0.1958,
"step": 1380
},
{
"epoch": 2.1253822629969417,
"grad_norm": 3.903538227081299,
"learning_rate": 5.39186179113711e-06,
"loss": 0.3434,
"step": 1390
},
{
"epoch": 2.140672782874618,
"grad_norm": 4.8174920082092285,
"learning_rate": 5.363181675226807e-06,
"loss": 0.266,
"step": 1400
},
{
"epoch": 2.1559633027522938,
"grad_norm": 13.861398696899414,
"learning_rate": 5.334501559316503e-06,
"loss": 0.226,
"step": 1410
},
{
"epoch": 2.1712538226299696,
"grad_norm": 0.26603391766548157,
"learning_rate": 5.305821443406199e-06,
"loss": 0.1668,
"step": 1420
},
{
"epoch": 2.1865443425076454,
"grad_norm": 8.830183982849121,
"learning_rate": 5.277141327495895e-06,
"loss": 0.1988,
"step": 1430
},
{
"epoch": 2.2018348623853212,
"grad_norm": 0.7192776799201965,
"learning_rate": 5.248461211585591e-06,
"loss": 0.2137,
"step": 1440
},
{
"epoch": 2.217125382262997,
"grad_norm": 24.46476173400879,
"learning_rate": 5.219781095675287e-06,
"loss": 0.3146,
"step": 1450
},
{
"epoch": 2.232415902140673,
"grad_norm": 36.542945861816406,
"learning_rate": 5.191100979764985e-06,
"loss": 0.3024,
"step": 1460
},
{
"epoch": 2.2477064220183487,
"grad_norm": 18.665542602539062,
"learning_rate": 5.162420863854681e-06,
"loss": 0.45,
"step": 1470
},
{
"epoch": 2.2629969418960245,
"grad_norm": 0.5163823366165161,
"learning_rate": 5.133740747944377e-06,
"loss": 0.2063,
"step": 1480
},
{
"epoch": 2.2782874617737003,
"grad_norm": 12.623830795288086,
"learning_rate": 5.105060632034073e-06,
"loss": 0.3537,
"step": 1490
},
{
"epoch": 2.293577981651376,
"grad_norm": 6.660862922668457,
"learning_rate": 5.076380516123769e-06,
"loss": 0.3763,
"step": 1500
},
{
"epoch": 2.308868501529052,
"grad_norm": 18.302440643310547,
"learning_rate": 5.047700400213465e-06,
"loss": 0.2796,
"step": 1510
},
{
"epoch": 2.324159021406728,
"grad_norm": 5.233463764190674,
"learning_rate": 5.019020284303162e-06,
"loss": 0.2484,
"step": 1520
},
{
"epoch": 2.3394495412844036,
"grad_norm": 0.2765680253505707,
"learning_rate": 4.990340168392858e-06,
"loss": 0.197,
"step": 1530
},
{
"epoch": 2.3547400611620795,
"grad_norm": 7.559242248535156,
"learning_rate": 4.961660052482554e-06,
"loss": 0.2913,
"step": 1540
},
{
"epoch": 2.3700305810397553,
"grad_norm": 20.358930587768555,
"learning_rate": 4.93297993657225e-06,
"loss": 0.2762,
"step": 1550
},
{
"epoch": 2.385321100917431,
"grad_norm": 23.49895668029785,
"learning_rate": 4.904299820661946e-06,
"loss": 0.3327,
"step": 1560
},
{
"epoch": 2.400611620795107,
"grad_norm": 9.241127014160156,
"learning_rate": 4.875619704751642e-06,
"loss": 0.2981,
"step": 1570
},
{
"epoch": 2.4159021406727827,
"grad_norm": 9.627830505371094,
"learning_rate": 4.846939588841339e-06,
"loss": 0.3052,
"step": 1580
},
{
"epoch": 2.4311926605504586,
"grad_norm": 6.3946075439453125,
"learning_rate": 4.818259472931035e-06,
"loss": 0.1022,
"step": 1590
},
{
"epoch": 2.4464831804281344,
"grad_norm": 0.6927328109741211,
"learning_rate": 4.789579357020731e-06,
"loss": 0.0922,
"step": 1600
},
{
"epoch": 2.46177370030581,
"grad_norm": 1.498061180114746,
"learning_rate": 4.760899241110427e-06,
"loss": 0.1883,
"step": 1610
},
{
"epoch": 2.477064220183486,
"grad_norm": 3.1200311183929443,
"learning_rate": 4.732219125200123e-06,
"loss": 0.1963,
"step": 1620
},
{
"epoch": 2.4923547400611623,
"grad_norm": 5.31001615524292,
"learning_rate": 4.703539009289819e-06,
"loss": 0.3003,
"step": 1630
},
{
"epoch": 2.5076452599388377,
"grad_norm": 13.968355178833008,
"learning_rate": 4.674858893379516e-06,
"loss": 0.3723,
"step": 1640
},
{
"epoch": 2.522935779816514,
"grad_norm": 30.514612197875977,
"learning_rate": 4.646178777469212e-06,
"loss": 0.2453,
"step": 1650
},
{
"epoch": 2.5382262996941893,
"grad_norm": 18.336116790771484,
"learning_rate": 4.617498661558908e-06,
"loss": 0.2514,
"step": 1660
},
{
"epoch": 2.5535168195718656,
"grad_norm": 3.3345324993133545,
"learning_rate": 4.588818545648604e-06,
"loss": 0.1784,
"step": 1670
},
{
"epoch": 2.5688073394495414,
"grad_norm": 0.6828510165214539,
"learning_rate": 4.560138429738301e-06,
"loss": 0.1926,
"step": 1680
},
{
"epoch": 2.5840978593272173,
"grad_norm": 0.3491632044315338,
"learning_rate": 4.531458313827997e-06,
"loss": 0.3459,
"step": 1690
},
{
"epoch": 2.599388379204893,
"grad_norm": 14.196633338928223,
"learning_rate": 4.502778197917693e-06,
"loss": 0.2798,
"step": 1700
},
{
"epoch": 2.614678899082569,
"grad_norm": 3.067326307296753,
"learning_rate": 4.47409808200739e-06,
"loss": 0.2548,
"step": 1710
},
{
"epoch": 2.6299694189602447,
"grad_norm": 10.058695793151855,
"learning_rate": 4.445417966097086e-06,
"loss": 0.3088,
"step": 1720
},
{
"epoch": 2.6452599388379205,
"grad_norm": 24.748844146728516,
"learning_rate": 4.416737850186782e-06,
"loss": 0.2524,
"step": 1730
},
{
"epoch": 2.6605504587155964,
"grad_norm": 9.57737922668457,
"learning_rate": 4.388057734276478e-06,
"loss": 0.1089,
"step": 1740
},
{
"epoch": 2.675840978593272,
"grad_norm": 4.255125999450684,
"learning_rate": 4.359377618366174e-06,
"loss": 0.2619,
"step": 1750
},
{
"epoch": 2.691131498470948,
"grad_norm": 12.66395378112793,
"learning_rate": 4.33069750245587e-06,
"loss": 0.113,
"step": 1760
},
{
"epoch": 2.706422018348624,
"grad_norm": 27.81719970703125,
"learning_rate": 4.302017386545567e-06,
"loss": 0.1587,
"step": 1770
},
{
"epoch": 2.7217125382262997,
"grad_norm": 9.098254203796387,
"learning_rate": 4.273337270635263e-06,
"loss": 0.1325,
"step": 1780
},
{
"epoch": 2.7370030581039755,
"grad_norm": 7.441411018371582,
"learning_rate": 4.244657154724959e-06,
"loss": 0.4097,
"step": 1790
},
{
"epoch": 2.7522935779816513,
"grad_norm": 28.30503273010254,
"learning_rate": 4.215977038814656e-06,
"loss": 0.2826,
"step": 1800
},
{
"epoch": 2.767584097859327,
"grad_norm": 47.75388717651367,
"learning_rate": 4.187296922904352e-06,
"loss": 0.2992,
"step": 1810
},
{
"epoch": 2.782874617737003,
"grad_norm": 5.235270023345947,
"learning_rate": 4.158616806994048e-06,
"loss": 0.1981,
"step": 1820
},
{
"epoch": 2.7981651376146788,
"grad_norm": 0.08484458923339844,
"learning_rate": 4.129936691083745e-06,
"loss": 0.3315,
"step": 1830
},
{
"epoch": 2.8134556574923546,
"grad_norm": 17.93990707397461,
"learning_rate": 4.101256575173441e-06,
"loss": 0.3326,
"step": 1840
},
{
"epoch": 2.8287461773700304,
"grad_norm": 0.8848928213119507,
"learning_rate": 4.072576459263137e-06,
"loss": 0.1852,
"step": 1850
},
{
"epoch": 2.8440366972477067,
"grad_norm": 1.0666664838790894,
"learning_rate": 4.043896343352833e-06,
"loss": 0.1235,
"step": 1860
},
{
"epoch": 2.859327217125382,
"grad_norm": 29.053184509277344,
"learning_rate": 4.015216227442529e-06,
"loss": 0.309,
"step": 1870
},
{
"epoch": 2.8746177370030583,
"grad_norm": 30.479965209960938,
"learning_rate": 3.986536111532225e-06,
"loss": 0.2986,
"step": 1880
},
{
"epoch": 2.8899082568807337,
"grad_norm": 23.085195541381836,
"learning_rate": 3.957855995621922e-06,
"loss": 0.1596,
"step": 1890
},
{
"epoch": 2.90519877675841,
"grad_norm": 1.5042694807052612,
"learning_rate": 3.929175879711618e-06,
"loss": 0.2213,
"step": 1900
},
{
"epoch": 2.9204892966360854,
"grad_norm": 22.314062118530273,
"learning_rate": 3.900495763801314e-06,
"loss": 0.1854,
"step": 1910
},
{
"epoch": 2.9357798165137616,
"grad_norm": 3.0091068744659424,
"learning_rate": 3.8718156478910105e-06,
"loss": 0.1231,
"step": 1920
},
{
"epoch": 2.9510703363914375,
"grad_norm": 0.4003874361515045,
"learning_rate": 3.8431355319807065e-06,
"loss": 0.1013,
"step": 1930
},
{
"epoch": 2.9663608562691133,
"grad_norm": 36.91960906982422,
"learning_rate": 3.8144554160704025e-06,
"loss": 0.1974,
"step": 1940
},
{
"epoch": 2.981651376146789,
"grad_norm": 10.349553108215332,
"learning_rate": 3.785775300160099e-06,
"loss": 0.3974,
"step": 1950
},
{
"epoch": 2.996941896024465,
"grad_norm": 4.602114677429199,
"learning_rate": 3.757095184249795e-06,
"loss": 0.1764,
"step": 1960
},
{
"epoch": 3.0,
"eval_accuracy": 0.8837031369548585,
"eval_f1": 0.7661538461538462,
"eval_loss": 0.416372686624527,
"eval_precision": 0.7366863905325444,
"eval_recall": 0.7980769230769231,
"eval_runtime": 4.9368,
"eval_samples_per_second": 264.749,
"eval_steps_per_second": 16.61,
"step": 1962
},
{
"epoch": 3.0122324159021407,
"grad_norm": 0.9072074890136719,
"learning_rate": 3.728415068339491e-06,
"loss": 0.0342,
"step": 1970
},
{
"epoch": 3.0275229357798166,
"grad_norm": 11.377812385559082,
"learning_rate": 3.699734952429188e-06,
"loss": 0.1661,
"step": 1980
},
{
"epoch": 3.0428134556574924,
"grad_norm": 17.510143280029297,
"learning_rate": 3.671054836518884e-06,
"loss": 0.1267,
"step": 1990
},
{
"epoch": 3.058103975535168,
"grad_norm": 0.12311755865812302,
"learning_rate": 3.64237472060858e-06,
"loss": 0.272,
"step": 2000
},
{
"epoch": 3.073394495412844,
"grad_norm": 7.080625534057617,
"learning_rate": 3.6136946046982763e-06,
"loss": 0.3724,
"step": 2010
},
{
"epoch": 3.08868501529052,
"grad_norm": 15.893166542053223,
"learning_rate": 3.5850144887879723e-06,
"loss": 0.211,
"step": 2020
},
{
"epoch": 3.1039755351681957,
"grad_norm": 70.26548767089844,
"learning_rate": 3.5563343728776683e-06,
"loss": 0.1047,
"step": 2030
},
{
"epoch": 3.1192660550458715,
"grad_norm": 1.035912036895752,
"learning_rate": 3.527654256967365e-06,
"loss": 0.0586,
"step": 2040
},
{
"epoch": 3.1345565749235473,
"grad_norm": 21.898548126220703,
"learning_rate": 3.498974141057061e-06,
"loss": 0.2289,
"step": 2050
},
{
"epoch": 3.149847094801223,
"grad_norm": 9.897711753845215,
"learning_rate": 3.4702940251467572e-06,
"loss": 0.2056,
"step": 2060
},
{
"epoch": 3.165137614678899,
"grad_norm": 0.49520930647850037,
"learning_rate": 3.4416139092364537e-06,
"loss": 0.2293,
"step": 2070
},
{
"epoch": 3.180428134556575,
"grad_norm": 0.14918214082717896,
"learning_rate": 3.4129337933261497e-06,
"loss": 0.1414,
"step": 2080
},
{
"epoch": 3.1957186544342506,
"grad_norm": 0.4814736247062683,
"learning_rate": 3.3842536774158457e-06,
"loss": 0.2611,
"step": 2090
},
{
"epoch": 3.2110091743119265,
"grad_norm": 0.19969391822814941,
"learning_rate": 3.355573561505542e-06,
"loss": 0.1482,
"step": 2100
},
{
"epoch": 3.2262996941896023,
"grad_norm": 0.16247302293777466,
"learning_rate": 3.3268934455952385e-06,
"loss": 0.4237,
"step": 2110
},
{
"epoch": 3.241590214067278,
"grad_norm": 17.90416717529297,
"learning_rate": 3.2982133296849346e-06,
"loss": 0.127,
"step": 2120
},
{
"epoch": 3.2568807339449544,
"grad_norm": 34.14628219604492,
"learning_rate": 3.269533213774631e-06,
"loss": 0.1481,
"step": 2130
},
{
"epoch": 3.2721712538226297,
"grad_norm": 0.18314845860004425,
"learning_rate": 3.240853097864327e-06,
"loss": 0.1436,
"step": 2140
},
{
"epoch": 3.287461773700306,
"grad_norm": 0.13257867097854614,
"learning_rate": 3.212172981954023e-06,
"loss": 0.1203,
"step": 2150
},
{
"epoch": 3.302752293577982,
"grad_norm": 42.67006301879883,
"learning_rate": 3.1834928660437194e-06,
"loss": 0.2017,
"step": 2160
},
{
"epoch": 3.3180428134556577,
"grad_norm": 0.3060612082481384,
"learning_rate": 3.154812750133416e-06,
"loss": 0.1398,
"step": 2170
},
{
"epoch": 3.3333333333333335,
"grad_norm": 6.72953987121582,
"learning_rate": 3.126132634223112e-06,
"loss": 0.1634,
"step": 2180
},
{
"epoch": 3.3486238532110093,
"grad_norm": 0.4008305072784424,
"learning_rate": 3.0974525183128083e-06,
"loss": 0.0838,
"step": 2190
},
{
"epoch": 3.363914373088685,
"grad_norm": 0.2475535124540329,
"learning_rate": 3.0687724024025043e-06,
"loss": 0.2601,
"step": 2200
},
{
"epoch": 3.379204892966361,
"grad_norm": 0.6882356405258179,
"learning_rate": 3.0400922864922004e-06,
"loss": 0.2079,
"step": 2210
},
{
"epoch": 3.3944954128440368,
"grad_norm": 0.11873319745063782,
"learning_rate": 3.0114121705818968e-06,
"loss": 0.1479,
"step": 2220
},
{
"epoch": 3.4097859327217126,
"grad_norm": 2.4591195583343506,
"learning_rate": 2.982732054671593e-06,
"loss": 0.0835,
"step": 2230
},
{
"epoch": 3.4250764525993884,
"grad_norm": 33.77700424194336,
"learning_rate": 2.9540519387612892e-06,
"loss": 0.325,
"step": 2240
},
{
"epoch": 3.4403669724770642,
"grad_norm": 0.19437803328037262,
"learning_rate": 2.9253718228509857e-06,
"loss": 0.147,
"step": 2250
},
{
"epoch": 3.45565749235474,
"grad_norm": 5.968377113342285,
"learning_rate": 2.8966917069406817e-06,
"loss": 0.3554,
"step": 2260
},
{
"epoch": 3.470948012232416,
"grad_norm": 12.378327369689941,
"learning_rate": 2.8680115910303777e-06,
"loss": 0.2007,
"step": 2270
},
{
"epoch": 3.4862385321100917,
"grad_norm": 0.21296323835849762,
"learning_rate": 2.839331475120074e-06,
"loss": 0.2296,
"step": 2280
},
{
"epoch": 3.5015290519877675,
"grad_norm": 13.099735260009766,
"learning_rate": 2.81065135920977e-06,
"loss": 0.1264,
"step": 2290
},
{
"epoch": 3.5168195718654434,
"grad_norm": 0.31538063287734985,
"learning_rate": 2.781971243299466e-06,
"loss": 0.4666,
"step": 2300
},
{
"epoch": 3.532110091743119,
"grad_norm": 12.299838066101074,
"learning_rate": 2.753291127389163e-06,
"loss": 0.2505,
"step": 2310
},
{
"epoch": 3.547400611620795,
"grad_norm": 16.644168853759766,
"learning_rate": 2.724611011478859e-06,
"loss": 0.4427,
"step": 2320
},
{
"epoch": 3.562691131498471,
"grad_norm": 39.950279235839844,
"learning_rate": 2.695930895568555e-06,
"loss": 0.3459,
"step": 2330
},
{
"epoch": 3.5779816513761467,
"grad_norm": 5.1495256423950195,
"learning_rate": 2.6672507796582515e-06,
"loss": 0.3096,
"step": 2340
},
{
"epoch": 3.5932721712538225,
"grad_norm": 35.178226470947266,
"learning_rate": 2.6385706637479475e-06,
"loss": 0.2467,
"step": 2350
},
{
"epoch": 3.6085626911314987,
"grad_norm": 6.108614921569824,
"learning_rate": 2.6098905478376435e-06,
"loss": 0.0156,
"step": 2360
},
{
"epoch": 3.623853211009174,
"grad_norm": 0.34197351336479187,
"learning_rate": 2.5812104319273403e-06,
"loss": 0.1566,
"step": 2370
},
{
"epoch": 3.6391437308868504,
"grad_norm": 5.665993690490723,
"learning_rate": 2.5525303160170364e-06,
"loss": 0.268,
"step": 2380
},
{
"epoch": 3.6544342507645258,
"grad_norm": 3.0416011810302734,
"learning_rate": 2.5238502001067324e-06,
"loss": 0.1028,
"step": 2390
},
{
"epoch": 3.669724770642202,
"grad_norm": 5.079551696777344,
"learning_rate": 2.495170084196429e-06,
"loss": 0.2225,
"step": 2400
},
{
"epoch": 3.6850152905198774,
"grad_norm": 0.19564682245254517,
"learning_rate": 2.466489968286125e-06,
"loss": 0.1261,
"step": 2410
},
{
"epoch": 3.7003058103975537,
"grad_norm": 15.542204856872559,
"learning_rate": 2.437809852375821e-06,
"loss": 0.1688,
"step": 2420
},
{
"epoch": 3.7155963302752295,
"grad_norm": 0.636468231678009,
"learning_rate": 2.4091297364655177e-06,
"loss": 0.3542,
"step": 2430
},
{
"epoch": 3.7308868501529053,
"grad_norm": 0.26796048879623413,
"learning_rate": 2.3804496205552137e-06,
"loss": 0.0689,
"step": 2440
},
{
"epoch": 3.746177370030581,
"grad_norm": 9.50084400177002,
"learning_rate": 2.3517695046449097e-06,
"loss": 0.3902,
"step": 2450
},
{
"epoch": 3.761467889908257,
"grad_norm": 1.7848141193389893,
"learning_rate": 2.323089388734606e-06,
"loss": 0.0613,
"step": 2460
},
{
"epoch": 3.776758409785933,
"grad_norm": 20.41553497314453,
"learning_rate": 2.294409272824302e-06,
"loss": 0.2541,
"step": 2470
},
{
"epoch": 3.7920489296636086,
"grad_norm": 1.6424323320388794,
"learning_rate": 2.2657291569139986e-06,
"loss": 0.2198,
"step": 2480
},
{
"epoch": 3.8073394495412844,
"grad_norm": 0.24343428015708923,
"learning_rate": 2.237049041003695e-06,
"loss": 0.2029,
"step": 2490
},
{
"epoch": 3.8226299694189603,
"grad_norm": 44.71451950073242,
"learning_rate": 2.208368925093391e-06,
"loss": 0.3573,
"step": 2500
},
{
"epoch": 3.837920489296636,
"grad_norm": 30.519176483154297,
"learning_rate": 2.179688809183087e-06,
"loss": 0.3304,
"step": 2510
},
{
"epoch": 3.853211009174312,
"grad_norm": 25.292348861694336,
"learning_rate": 2.1510086932727835e-06,
"loss": 0.1079,
"step": 2520
},
{
"epoch": 3.8685015290519877,
"grad_norm": 35.308006286621094,
"learning_rate": 2.1223285773624795e-06,
"loss": 0.1774,
"step": 2530
},
{
"epoch": 3.8837920489296636,
"grad_norm": 0.16474059224128723,
"learning_rate": 2.093648461452176e-06,
"loss": 0.0328,
"step": 2540
},
{
"epoch": 3.8990825688073394,
"grad_norm": 0.09989167749881744,
"learning_rate": 2.0649683455418724e-06,
"loss": 0.1969,
"step": 2550
},
{
"epoch": 3.914373088685015,
"grad_norm": 18.91350746154785,
"learning_rate": 2.0362882296315684e-06,
"loss": 0.1445,
"step": 2560
},
{
"epoch": 3.929663608562691,
"grad_norm": 0.08901531994342804,
"learning_rate": 2.0076081137212644e-06,
"loss": 0.3773,
"step": 2570
},
{
"epoch": 3.944954128440367,
"grad_norm": 46.0722770690918,
"learning_rate": 1.978927997810961e-06,
"loss": 0.3817,
"step": 2580
},
{
"epoch": 3.9602446483180427,
"grad_norm": 7.898420810699463,
"learning_rate": 1.950247881900657e-06,
"loss": 0.1688,
"step": 2590
},
{
"epoch": 3.9755351681957185,
"grad_norm": 51.515220642089844,
"learning_rate": 1.9215677659903533e-06,
"loss": 0.1521,
"step": 2600
},
{
"epoch": 3.9908256880733948,
"grad_norm": 0.9271498918533325,
"learning_rate": 1.8928876500800495e-06,
"loss": 0.1549,
"step": 2610
},
{
"epoch": 4.0,
"eval_accuracy": 0.8859984697781178,
"eval_f1": 0.7585089141004863,
"eval_loss": 0.44139671325683594,
"eval_precision": 0.7672131147540984,
"eval_recall": 0.75,
"eval_runtime": 4.9043,
"eval_samples_per_second": 266.502,
"eval_steps_per_second": 16.72,
"step": 2616
}
],
"logging_steps": 10,
"max_steps": 3270,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2764329675153408.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": {
"learning_rate": 9.378397902669336e-06,
"num_train_epochs": 5,
"per_device_train_batch_size": 8,
"seed": 28
}
}