sedrickkeh's picture
End of training
4cae53b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9991537376586743,
"eval_steps": 500,
"global_step": 1329,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022566995768688293,
"grad_norm": 1.100711091672383,
"learning_rate": 4.997501873438867e-06,
"loss": 0.7439,
"step": 10
},
{
"epoch": 0.045133991537376586,
"grad_norm": 0.9978428347036197,
"learning_rate": 4.995007487521836e-06,
"loss": 0.6805,
"step": 20
},
{
"epoch": 0.06770098730606489,
"grad_norm": 0.6349072049081254,
"learning_rate": 4.992516832922945e-06,
"loss": 0.663,
"step": 30
},
{
"epoch": 0.09026798307475317,
"grad_norm": 0.6944987900561912,
"learning_rate": 4.990029900348746e-06,
"loss": 0.6405,
"step": 40
},
{
"epoch": 0.11283497884344147,
"grad_norm": 0.6342717028236399,
"learning_rate": 4.987546680538165e-06,
"loss": 0.634,
"step": 50
},
{
"epoch": 0.13540197461212977,
"grad_norm": 0.7029633847174581,
"learning_rate": 4.985067164262359e-06,
"loss": 0.6333,
"step": 60
},
{
"epoch": 0.15796897038081806,
"grad_norm": 0.6429990155588656,
"learning_rate": 4.98259134232457e-06,
"loss": 0.629,
"step": 70
},
{
"epoch": 0.18053596614950634,
"grad_norm": 0.7304393500080666,
"learning_rate": 4.980119205559974e-06,
"loss": 0.6305,
"step": 80
},
{
"epoch": 0.20310296191819463,
"grad_norm": 0.7772726554166205,
"learning_rate": 4.977650744835555e-06,
"loss": 0.6282,
"step": 90
},
{
"epoch": 0.22566995768688294,
"grad_norm": 0.6015227630260552,
"learning_rate": 4.975185951049947e-06,
"loss": 0.6274,
"step": 100
},
{
"epoch": 0.24823695345557123,
"grad_norm": 0.597054561284431,
"learning_rate": 4.972724815133302e-06,
"loss": 0.6183,
"step": 110
},
{
"epoch": 0.27080394922425954,
"grad_norm": 0.6470895698195512,
"learning_rate": 4.970267328047151e-06,
"loss": 0.6174,
"step": 120
},
{
"epoch": 0.2933709449929478,
"grad_norm": 0.8937084791804235,
"learning_rate": 4.9678134807842575e-06,
"loss": 0.6186,
"step": 130
},
{
"epoch": 0.3159379407616361,
"grad_norm": 0.6639999069888693,
"learning_rate": 4.965363264368484e-06,
"loss": 0.6135,
"step": 140
},
{
"epoch": 0.3385049365303244,
"grad_norm": 0.6656810650278343,
"learning_rate": 4.962916669854652e-06,
"loss": 0.614,
"step": 150
},
{
"epoch": 0.3610719322990127,
"grad_norm": 0.5853550395216752,
"learning_rate": 4.960473688328407e-06,
"loss": 0.616,
"step": 160
},
{
"epoch": 0.383638928067701,
"grad_norm": 0.6429269241253047,
"learning_rate": 4.95803431090608e-06,
"loss": 0.6121,
"step": 170
},
{
"epoch": 0.40620592383638926,
"grad_norm": 0.5934937346014221,
"learning_rate": 4.955598528734554e-06,
"loss": 0.6088,
"step": 180
},
{
"epoch": 0.4287729196050776,
"grad_norm": 0.6370662067963314,
"learning_rate": 4.953166332991125e-06,
"loss": 0.6033,
"step": 190
},
{
"epoch": 0.4513399153737659,
"grad_norm": 0.6456352721499912,
"learning_rate": 4.950737714883372e-06,
"loss": 0.6099,
"step": 200
},
{
"epoch": 0.47390691114245415,
"grad_norm": 0.5814512533107921,
"learning_rate": 4.948312665649022e-06,
"loss": 0.6063,
"step": 210
},
{
"epoch": 0.49647390691114246,
"grad_norm": 0.732199943548795,
"learning_rate": 4.945891176555817e-06,
"loss": 0.5982,
"step": 220
},
{
"epoch": 0.5190409026798307,
"grad_norm": 0.5712191962038494,
"learning_rate": 4.943473238901383e-06,
"loss": 0.6069,
"step": 230
},
{
"epoch": 0.5416078984485191,
"grad_norm": 0.6612925923310224,
"learning_rate": 4.941058844013094e-06,
"loss": 0.6058,
"step": 240
},
{
"epoch": 0.5641748942172073,
"grad_norm": 0.5880543259363735,
"learning_rate": 4.938647983247949e-06,
"loss": 0.6,
"step": 250
},
{
"epoch": 0.5867418899858956,
"grad_norm": 0.7220209248194411,
"learning_rate": 4.936240647992436e-06,
"loss": 0.6026,
"step": 260
},
{
"epoch": 0.609308885754584,
"grad_norm": 0.5655649436066078,
"learning_rate": 4.933836829662409e-06,
"loss": 0.598,
"step": 270
},
{
"epoch": 0.6318758815232722,
"grad_norm": 0.5523686182917099,
"learning_rate": 4.9314365197029475e-06,
"loss": 0.6047,
"step": 280
},
{
"epoch": 0.6544428772919605,
"grad_norm": 0.718258478379325,
"learning_rate": 4.9290397095882446e-06,
"loss": 0.5981,
"step": 290
},
{
"epoch": 0.6770098730606487,
"grad_norm": 0.6923839988383336,
"learning_rate": 4.9266463908214664e-06,
"loss": 0.6042,
"step": 300
},
{
"epoch": 0.6995768688293371,
"grad_norm": 0.537500360918552,
"learning_rate": 4.924256554934632e-06,
"loss": 0.5973,
"step": 310
},
{
"epoch": 0.7221438645980254,
"grad_norm": 0.5213007989989484,
"learning_rate": 4.9218701934884865e-06,
"loss": 0.6005,
"step": 320
},
{
"epoch": 0.7447108603667136,
"grad_norm": 0.570859069043083,
"learning_rate": 4.919487298072377e-06,
"loss": 0.596,
"step": 330
},
{
"epoch": 0.767277856135402,
"grad_norm": 0.6158715009590428,
"learning_rate": 4.917107860304125e-06,
"loss": 0.5955,
"step": 340
},
{
"epoch": 0.7898448519040903,
"grad_norm": 0.6426850162244057,
"learning_rate": 4.914731871829905e-06,
"loss": 0.597,
"step": 350
},
{
"epoch": 0.8124118476727785,
"grad_norm": 0.5762853195893453,
"learning_rate": 4.912359324324121e-06,
"loss": 0.596,
"step": 360
},
{
"epoch": 0.8349788434414669,
"grad_norm": 0.5838318091592569,
"learning_rate": 4.909990209489284e-06,
"loss": 0.6031,
"step": 370
},
{
"epoch": 0.8575458392101551,
"grad_norm": 0.5822722127512734,
"learning_rate": 4.907624519055888e-06,
"loss": 0.5998,
"step": 380
},
{
"epoch": 0.8801128349788434,
"grad_norm": 0.6211815812426689,
"learning_rate": 4.905262244782294e-06,
"loss": 0.5938,
"step": 390
},
{
"epoch": 0.9026798307475318,
"grad_norm": 0.5926993813948374,
"learning_rate": 4.902903378454601e-06,
"loss": 0.5961,
"step": 400
},
{
"epoch": 0.92524682651622,
"grad_norm": 0.6502046942563887,
"learning_rate": 4.900547911886537e-06,
"loss": 0.594,
"step": 410
},
{
"epoch": 0.9478138222849083,
"grad_norm": 0.6150531879942983,
"learning_rate": 4.898195836919327e-06,
"loss": 0.5947,
"step": 420
},
{
"epoch": 0.9703808180535967,
"grad_norm": 0.5693169684981371,
"learning_rate": 4.895847145421587e-06,
"loss": 0.5889,
"step": 430
},
{
"epoch": 0.9929478138222849,
"grad_norm": 0.6633453342653501,
"learning_rate": 4.893501829289195e-06,
"loss": 0.5937,
"step": 440
},
{
"epoch": 0.9997179125528914,
"eval_loss": 0.5915236473083496,
"eval_runtime": 687.4788,
"eval_samples_per_second": 17.369,
"eval_steps_per_second": 0.544,
"step": 443
},
{
"epoch": 1.0155148095909732,
"grad_norm": 0.7247787853500239,
"learning_rate": 4.891159880445185e-06,
"loss": 0.6015,
"step": 450
},
{
"epoch": 1.0380818053596614,
"grad_norm": 0.6068919916237175,
"learning_rate": 4.888821290839617e-06,
"loss": 0.5289,
"step": 460
},
{
"epoch": 1.0606488011283497,
"grad_norm": 0.60326825190498,
"learning_rate": 4.886486052449469e-06,
"loss": 0.5437,
"step": 470
},
{
"epoch": 1.0832157968970382,
"grad_norm": 0.5689189414243914,
"learning_rate": 4.8841541572785224e-06,
"loss": 0.5316,
"step": 480
},
{
"epoch": 1.1057827926657264,
"grad_norm": 0.6385824037992914,
"learning_rate": 4.881825597357242e-06,
"loss": 0.5417,
"step": 490
},
{
"epoch": 1.1283497884344147,
"grad_norm": 0.5854327072193312,
"learning_rate": 4.8795003647426654e-06,
"loss": 0.5404,
"step": 500
},
{
"epoch": 1.150916784203103,
"grad_norm": 0.5947076927155363,
"learning_rate": 4.877178451518289e-06,
"loss": 0.5415,
"step": 510
},
{
"epoch": 1.1734837799717912,
"grad_norm": 0.5719055022308829,
"learning_rate": 4.8748598497939494e-06,
"loss": 0.534,
"step": 520
},
{
"epoch": 1.1960507757404795,
"grad_norm": 0.6903559934708517,
"learning_rate": 4.872544551705718e-06,
"loss": 0.5404,
"step": 530
},
{
"epoch": 1.2186177715091677,
"grad_norm": 0.5635637424337575,
"learning_rate": 4.870232549415787e-06,
"loss": 0.5379,
"step": 540
},
{
"epoch": 1.2411847672778562,
"grad_norm": 0.596695858753005,
"learning_rate": 4.867923835112355e-06,
"loss": 0.5427,
"step": 550
},
{
"epoch": 1.2637517630465445,
"grad_norm": 0.6228671033057678,
"learning_rate": 4.865618401009519e-06,
"loss": 0.5443,
"step": 560
},
{
"epoch": 1.2863187588152327,
"grad_norm": 0.5663816262212089,
"learning_rate": 4.863316239347163e-06,
"loss": 0.5416,
"step": 570
},
{
"epoch": 1.308885754583921,
"grad_norm": 0.6145942738735419,
"learning_rate": 4.861017342390847e-06,
"loss": 0.5417,
"step": 580
},
{
"epoch": 1.3314527503526092,
"grad_norm": 0.5647488472588417,
"learning_rate": 4.858721702431704e-06,
"loss": 0.5417,
"step": 590
},
{
"epoch": 1.3540197461212977,
"grad_norm": 0.5686060037431164,
"learning_rate": 4.856429311786322e-06,
"loss": 0.5469,
"step": 600
},
{
"epoch": 1.376586741889986,
"grad_norm": 0.5476278402050497,
"learning_rate": 4.8541401627966426e-06,
"loss": 0.5395,
"step": 610
},
{
"epoch": 1.3991537376586742,
"grad_norm": 0.5651482323611746,
"learning_rate": 4.85185424782985e-06,
"loss": 0.5437,
"step": 620
},
{
"epoch": 1.4217207334273625,
"grad_norm": 0.5818373440698466,
"learning_rate": 4.8495715592782715e-06,
"loss": 0.5417,
"step": 630
},
{
"epoch": 1.4442877291960508,
"grad_norm": 0.6943754325038471,
"learning_rate": 4.847292089559258e-06,
"loss": 0.5415,
"step": 640
},
{
"epoch": 1.466854724964739,
"grad_norm": 0.5809337717523729,
"learning_rate": 4.845015831115093e-06,
"loss": 0.5422,
"step": 650
},
{
"epoch": 1.4894217207334273,
"grad_norm": 0.6366781600380568,
"learning_rate": 4.842742776412874e-06,
"loss": 0.546,
"step": 660
},
{
"epoch": 1.5119887165021155,
"grad_norm": 0.5848333620078674,
"learning_rate": 4.840472917944417e-06,
"loss": 0.5407,
"step": 670
},
{
"epoch": 1.5345557122708038,
"grad_norm": 0.5539620278393851,
"learning_rate": 4.838206248226147e-06,
"loss": 0.5348,
"step": 680
},
{
"epoch": 1.5571227080394923,
"grad_norm": 0.595515096482354,
"learning_rate": 4.835942759799002e-06,
"loss": 0.537,
"step": 690
},
{
"epoch": 1.5796897038081805,
"grad_norm": 0.5765265849661396,
"learning_rate": 4.833682445228318e-06,
"loss": 0.5411,
"step": 700
},
{
"epoch": 1.6022566995768688,
"grad_norm": 0.5571197050761847,
"learning_rate": 4.831425297103738e-06,
"loss": 0.5438,
"step": 710
},
{
"epoch": 1.6248236953455573,
"grad_norm": 0.5792740919003788,
"learning_rate": 4.829171308039099e-06,
"loss": 0.5518,
"step": 720
},
{
"epoch": 1.6473906911142455,
"grad_norm": 0.5636302795085814,
"learning_rate": 4.826920470672344e-06,
"loss": 0.5352,
"step": 730
},
{
"epoch": 1.6699576868829338,
"grad_norm": 0.7262762150102657,
"learning_rate": 4.824672777665406e-06,
"loss": 0.5511,
"step": 740
},
{
"epoch": 1.692524682651622,
"grad_norm": 0.6203455822074458,
"learning_rate": 4.822428221704122e-06,
"loss": 0.5423,
"step": 750
},
{
"epoch": 1.7150916784203103,
"grad_norm": 0.6314021891275363,
"learning_rate": 4.820186795498119e-06,
"loss": 0.5413,
"step": 760
},
{
"epoch": 1.7376586741889986,
"grad_norm": 0.6244754158170291,
"learning_rate": 4.817948491780728e-06,
"loss": 0.5419,
"step": 770
},
{
"epoch": 1.7602256699576868,
"grad_norm": 0.6285912361574367,
"learning_rate": 4.815713303308872e-06,
"loss": 0.5377,
"step": 780
},
{
"epoch": 1.782792665726375,
"grad_norm": 0.5663852190944293,
"learning_rate": 4.813481222862981e-06,
"loss": 0.5414,
"step": 790
},
{
"epoch": 1.8053596614950633,
"grad_norm": 0.6930428334287735,
"learning_rate": 4.811252243246881e-06,
"loss": 0.5365,
"step": 800
},
{
"epoch": 1.8279266572637518,
"grad_norm": 0.524109251310443,
"learning_rate": 4.809026357287709e-06,
"loss": 0.547,
"step": 810
},
{
"epoch": 1.85049365303244,
"grad_norm": 0.6577534946547546,
"learning_rate": 4.806803557835802e-06,
"loss": 0.5343,
"step": 820
},
{
"epoch": 1.8730606488011283,
"grad_norm": 0.561047334945624,
"learning_rate": 4.804583837764616e-06,
"loss": 0.5463,
"step": 830
},
{
"epoch": 1.8956276445698168,
"grad_norm": 0.5709254970367929,
"learning_rate": 4.802367189970616e-06,
"loss": 0.5463,
"step": 840
},
{
"epoch": 1.918194640338505,
"grad_norm": 0.5539876831180034,
"learning_rate": 4.8001536073731936e-06,
"loss": 0.5342,
"step": 850
},
{
"epoch": 1.9407616361071933,
"grad_norm": 0.6261176438608845,
"learning_rate": 4.797943082914558e-06,
"loss": 0.5393,
"step": 860
},
{
"epoch": 1.9633286318758816,
"grad_norm": 0.6784873891334788,
"learning_rate": 4.795735609559657e-06,
"loss": 0.5457,
"step": 870
},
{
"epoch": 1.9858956276445698,
"grad_norm": 0.6032255886331123,
"learning_rate": 4.793531180296065e-06,
"loss": 0.5439,
"step": 880
},
{
"epoch": 1.9994358251057829,
"eval_loss": 0.5869531631469727,
"eval_runtime": 690.9414,
"eval_samples_per_second": 17.282,
"eval_steps_per_second": 0.541,
"step": 886
},
{
"epoch": 2.008462623413258,
"grad_norm": 0.9949702529495333,
"learning_rate": 4.7913297881339085e-06,
"loss": 0.5679,
"step": 890
},
{
"epoch": 2.0310296191819464,
"grad_norm": 0.7201678126466289,
"learning_rate": 4.789131426105757e-06,
"loss": 0.4798,
"step": 900
},
{
"epoch": 2.0535966149506346,
"grad_norm": 0.674929621297509,
"learning_rate": 4.786936087266542e-06,
"loss": 0.4817,
"step": 910
},
{
"epoch": 2.076163610719323,
"grad_norm": 0.6070882301942089,
"learning_rate": 4.784743764693455e-06,
"loss": 0.4741,
"step": 920
},
{
"epoch": 2.098730606488011,
"grad_norm": 0.6063712760118354,
"learning_rate": 4.7825544514858655e-06,
"loss": 0.4814,
"step": 930
},
{
"epoch": 2.1212976022566994,
"grad_norm": 0.5947320209763772,
"learning_rate": 4.780368140765222e-06,
"loss": 0.4856,
"step": 940
},
{
"epoch": 2.143864598025388,
"grad_norm": 0.6280872907961158,
"learning_rate": 4.778184825674966e-06,
"loss": 0.4873,
"step": 950
},
{
"epoch": 2.1664315937940763,
"grad_norm": 0.6188168725190593,
"learning_rate": 4.776004499380439e-06,
"loss": 0.4837,
"step": 960
},
{
"epoch": 2.1889985895627646,
"grad_norm": 0.7603271106624941,
"learning_rate": 4.773827155068793e-06,
"loss": 0.4818,
"step": 970
},
{
"epoch": 2.211565585331453,
"grad_norm": 0.5975717089201273,
"learning_rate": 4.771652785948902e-06,
"loss": 0.4767,
"step": 980
},
{
"epoch": 2.234132581100141,
"grad_norm": 0.5809905836332211,
"learning_rate": 4.769481385251275e-06,
"loss": 0.4765,
"step": 990
},
{
"epoch": 2.2566995768688294,
"grad_norm": 0.5843693488329456,
"learning_rate": 4.767312946227961e-06,
"loss": 0.4855,
"step": 1000
},
{
"epoch": 2.2792665726375176,
"grad_norm": 0.6027690911140321,
"learning_rate": 4.765147462152471e-06,
"loss": 0.4919,
"step": 1010
},
{
"epoch": 2.301833568406206,
"grad_norm": 0.7192183771609572,
"learning_rate": 4.762984926319677e-06,
"loss": 0.4861,
"step": 1020
},
{
"epoch": 2.324400564174894,
"grad_norm": 0.645690672789835,
"learning_rate": 4.760825332045738e-06,
"loss": 0.4905,
"step": 1030
},
{
"epoch": 2.3469675599435824,
"grad_norm": 0.6311156323408128,
"learning_rate": 4.758668672668006e-06,
"loss": 0.4883,
"step": 1040
},
{
"epoch": 2.3695345557122707,
"grad_norm": 0.6045990122500915,
"learning_rate": 4.756514941544941e-06,
"loss": 0.4861,
"step": 1050
},
{
"epoch": 2.392101551480959,
"grad_norm": 0.5914240795148143,
"learning_rate": 4.754364132056025e-06,
"loss": 0.4855,
"step": 1060
},
{
"epoch": 2.414668547249647,
"grad_norm": 0.6967797148982833,
"learning_rate": 4.752216237601676e-06,
"loss": 0.4869,
"step": 1070
},
{
"epoch": 2.4372355430183354,
"grad_norm": 0.7069219663540367,
"learning_rate": 4.750071251603165e-06,
"loss": 0.49,
"step": 1080
},
{
"epoch": 2.459802538787024,
"grad_norm": 0.593026634099332,
"learning_rate": 4.7479291675025314e-06,
"loss": 0.488,
"step": 1090
},
{
"epoch": 2.4823695345557124,
"grad_norm": 0.6133731329239802,
"learning_rate": 4.745789978762496e-06,
"loss": 0.491,
"step": 1100
},
{
"epoch": 2.5049365303244007,
"grad_norm": 0.5823330923339051,
"learning_rate": 4.7436536788663765e-06,
"loss": 0.4943,
"step": 1110
},
{
"epoch": 2.527503526093089,
"grad_norm": 0.6537812247886863,
"learning_rate": 4.74152026131801e-06,
"loss": 0.4848,
"step": 1120
},
{
"epoch": 2.550070521861777,
"grad_norm": 0.6021010883789748,
"learning_rate": 4.739389719641665e-06,
"loss": 0.4899,
"step": 1130
},
{
"epoch": 2.5726375176304654,
"grad_norm": 0.5782734444486691,
"learning_rate": 4.7372620473819615e-06,
"loss": 0.4883,
"step": 1140
},
{
"epoch": 2.5952045133991537,
"grad_norm": 0.6403059046505779,
"learning_rate": 4.735137238103785e-06,
"loss": 0.489,
"step": 1150
},
{
"epoch": 2.617771509167842,
"grad_norm": 0.6255079366673328,
"learning_rate": 4.7330152853922064e-06,
"loss": 0.4927,
"step": 1160
},
{
"epoch": 2.64033850493653,
"grad_norm": 0.6422810171594653,
"learning_rate": 4.730896182852409e-06,
"loss": 0.4817,
"step": 1170
},
{
"epoch": 2.6629055007052185,
"grad_norm": 0.6348937375312369,
"learning_rate": 4.72877992410959e-06,
"loss": 0.4924,
"step": 1180
},
{
"epoch": 2.685472496473907,
"grad_norm": 0.6020419858221361,
"learning_rate": 4.7266665028088985e-06,
"loss": 0.4932,
"step": 1190
},
{
"epoch": 2.7080394922425954,
"grad_norm": 0.5849628799150851,
"learning_rate": 4.72455591261534e-06,
"loss": 0.4917,
"step": 1200
},
{
"epoch": 2.7306064880112837,
"grad_norm": 0.5754324505606723,
"learning_rate": 4.722448147213712e-06,
"loss": 0.493,
"step": 1210
},
{
"epoch": 2.753173483779972,
"grad_norm": 0.5734988589501125,
"learning_rate": 4.720343200308507e-06,
"loss": 0.4896,
"step": 1220
},
{
"epoch": 2.77574047954866,
"grad_norm": 0.5710201726782703,
"learning_rate": 4.7182410656238484e-06,
"loss": 0.4961,
"step": 1230
},
{
"epoch": 2.7983074753173485,
"grad_norm": 0.609980767169798,
"learning_rate": 4.716141736903407e-06,
"loss": 0.4944,
"step": 1240
},
{
"epoch": 2.8208744710860367,
"grad_norm": 0.5680473087970347,
"learning_rate": 4.714045207910318e-06,
"loss": 0.4945,
"step": 1250
},
{
"epoch": 2.843441466854725,
"grad_norm": 0.5545814696527704,
"learning_rate": 4.71195147242711e-06,
"loss": 0.4963,
"step": 1260
},
{
"epoch": 2.8660084626234132,
"grad_norm": 0.5701759707556856,
"learning_rate": 4.709860524255622e-06,
"loss": 0.4932,
"step": 1270
},
{
"epoch": 2.8885754583921015,
"grad_norm": 0.5598897964453792,
"learning_rate": 4.707772357216934e-06,
"loss": 0.4912,
"step": 1280
},
{
"epoch": 2.9111424541607898,
"grad_norm": 0.6106914180921081,
"learning_rate": 4.705686965151282e-06,
"loss": 0.4949,
"step": 1290
},
{
"epoch": 2.933709449929478,
"grad_norm": 0.6259987776719272,
"learning_rate": 4.703604341917987e-06,
"loss": 0.4908,
"step": 1300
},
{
"epoch": 2.9562764456981663,
"grad_norm": 0.6635742267932128,
"learning_rate": 4.701524481395374e-06,
"loss": 0.4946,
"step": 1310
},
{
"epoch": 2.9788434414668545,
"grad_norm": 0.5993621544317691,
"learning_rate": 4.699447377480703e-06,
"loss": 0.4909,
"step": 1320
},
{
"epoch": 2.9991537376586743,
"eval_loss": 0.5994867086410522,
"eval_runtime": 687.8101,
"eval_samples_per_second": 17.361,
"eval_steps_per_second": 0.544,
"step": 1329
},
{
"epoch": 2.9991537376586743,
"step": 1329,
"total_flos": 5064195066298368.0,
"train_loss": 0.5484652002562788,
"train_runtime": 120694.0629,
"train_samples_per_second": 5.639,
"train_steps_per_second": 0.011
}
],
"logging_steps": 10,
"max_steps": 1329,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5064195066298368.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}