gsmyrnis's picture
End of training
558a58e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1269,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02364066193853428,
"grad_norm": 2.72535029902805,
"learning_rate": 5e-06,
"loss": 0.8948,
"step": 10
},
{
"epoch": 0.04728132387706856,
"grad_norm": 1.349869757361133,
"learning_rate": 5e-06,
"loss": 0.7791,
"step": 20
},
{
"epoch": 0.07092198581560284,
"grad_norm": 1.1982665750771995,
"learning_rate": 5e-06,
"loss": 0.7498,
"step": 30
},
{
"epoch": 0.09456264775413711,
"grad_norm": 1.3901014180682558,
"learning_rate": 5e-06,
"loss": 0.7383,
"step": 40
},
{
"epoch": 0.1182033096926714,
"grad_norm": 1.6352055235262863,
"learning_rate": 5e-06,
"loss": 0.7156,
"step": 50
},
{
"epoch": 0.14184397163120568,
"grad_norm": 0.9228519705928501,
"learning_rate": 5e-06,
"loss": 0.7113,
"step": 60
},
{
"epoch": 0.16548463356973994,
"grad_norm": 0.6109421695741598,
"learning_rate": 5e-06,
"loss": 0.7015,
"step": 70
},
{
"epoch": 0.18912529550827423,
"grad_norm": 1.1570191508750998,
"learning_rate": 5e-06,
"loss": 0.6928,
"step": 80
},
{
"epoch": 0.2127659574468085,
"grad_norm": 0.5479664575799189,
"learning_rate": 5e-06,
"loss": 0.6881,
"step": 90
},
{
"epoch": 0.2364066193853428,
"grad_norm": 0.6511091776660864,
"learning_rate": 5e-06,
"loss": 0.6791,
"step": 100
},
{
"epoch": 0.26004728132387706,
"grad_norm": 0.48965953407572976,
"learning_rate": 5e-06,
"loss": 0.6875,
"step": 110
},
{
"epoch": 0.28368794326241137,
"grad_norm": 0.5366385574454531,
"learning_rate": 5e-06,
"loss": 0.6943,
"step": 120
},
{
"epoch": 0.3073286052009456,
"grad_norm": 0.5940009124876445,
"learning_rate": 5e-06,
"loss": 0.6732,
"step": 130
},
{
"epoch": 0.3309692671394799,
"grad_norm": 0.5363561097267587,
"learning_rate": 5e-06,
"loss": 0.6705,
"step": 140
},
{
"epoch": 0.3546099290780142,
"grad_norm": 0.47312672873118417,
"learning_rate": 5e-06,
"loss": 0.6736,
"step": 150
},
{
"epoch": 0.37825059101654845,
"grad_norm": 0.5936177797142055,
"learning_rate": 5e-06,
"loss": 0.6714,
"step": 160
},
{
"epoch": 0.40189125295508277,
"grad_norm": 0.559796603492257,
"learning_rate": 5e-06,
"loss": 0.677,
"step": 170
},
{
"epoch": 0.425531914893617,
"grad_norm": 0.6712065525809435,
"learning_rate": 5e-06,
"loss": 0.6739,
"step": 180
},
{
"epoch": 0.4491725768321513,
"grad_norm": 0.6276897126428631,
"learning_rate": 5e-06,
"loss": 0.6712,
"step": 190
},
{
"epoch": 0.4728132387706856,
"grad_norm": 0.5058454380091781,
"learning_rate": 5e-06,
"loss": 0.6704,
"step": 200
},
{
"epoch": 0.49645390070921985,
"grad_norm": 0.4856083261066063,
"learning_rate": 5e-06,
"loss": 0.6734,
"step": 210
},
{
"epoch": 0.5200945626477541,
"grad_norm": 0.47853309599062016,
"learning_rate": 5e-06,
"loss": 0.6679,
"step": 220
},
{
"epoch": 0.5437352245862884,
"grad_norm": 0.5028725498074064,
"learning_rate": 5e-06,
"loss": 0.6603,
"step": 230
},
{
"epoch": 0.5673758865248227,
"grad_norm": 0.573425790046604,
"learning_rate": 5e-06,
"loss": 0.6666,
"step": 240
},
{
"epoch": 0.5910165484633569,
"grad_norm": 0.5450174649743487,
"learning_rate": 5e-06,
"loss": 0.663,
"step": 250
},
{
"epoch": 0.6146572104018913,
"grad_norm": 0.6439202377303648,
"learning_rate": 5e-06,
"loss": 0.6619,
"step": 260
},
{
"epoch": 0.6382978723404256,
"grad_norm": 0.6134638277704878,
"learning_rate": 5e-06,
"loss": 0.6537,
"step": 270
},
{
"epoch": 0.6619385342789598,
"grad_norm": 0.5994186482549169,
"learning_rate": 5e-06,
"loss": 0.6618,
"step": 280
},
{
"epoch": 0.6855791962174941,
"grad_norm": 0.5012460436454785,
"learning_rate": 5e-06,
"loss": 0.6608,
"step": 290
},
{
"epoch": 0.7092198581560284,
"grad_norm": 0.5441826630708447,
"learning_rate": 5e-06,
"loss": 0.66,
"step": 300
},
{
"epoch": 0.7328605200945626,
"grad_norm": 0.522329425801996,
"learning_rate": 5e-06,
"loss": 0.6568,
"step": 310
},
{
"epoch": 0.7565011820330969,
"grad_norm": 0.4758083660420685,
"learning_rate": 5e-06,
"loss": 0.6554,
"step": 320
},
{
"epoch": 0.7801418439716312,
"grad_norm": 0.49940764507627,
"learning_rate": 5e-06,
"loss": 0.6566,
"step": 330
},
{
"epoch": 0.8037825059101655,
"grad_norm": 0.6679446252287796,
"learning_rate": 5e-06,
"loss": 0.6604,
"step": 340
},
{
"epoch": 0.8274231678486997,
"grad_norm": 0.4668378880080796,
"learning_rate": 5e-06,
"loss": 0.6542,
"step": 350
},
{
"epoch": 0.851063829787234,
"grad_norm": 0.6341578495542675,
"learning_rate": 5e-06,
"loss": 0.6517,
"step": 360
},
{
"epoch": 0.8747044917257684,
"grad_norm": 0.4656594360564299,
"learning_rate": 5e-06,
"loss": 0.6535,
"step": 370
},
{
"epoch": 0.8983451536643026,
"grad_norm": 0.44357749005248587,
"learning_rate": 5e-06,
"loss": 0.6606,
"step": 380
},
{
"epoch": 0.9219858156028369,
"grad_norm": 0.4530641632992077,
"learning_rate": 5e-06,
"loss": 0.6507,
"step": 390
},
{
"epoch": 0.9456264775413712,
"grad_norm": 0.5037838223896282,
"learning_rate": 5e-06,
"loss": 0.6555,
"step": 400
},
{
"epoch": 0.9692671394799054,
"grad_norm": 0.6623305884354875,
"learning_rate": 5e-06,
"loss": 0.6534,
"step": 410
},
{
"epoch": 0.9929078014184397,
"grad_norm": 0.43978357483041475,
"learning_rate": 5e-06,
"loss": 0.6503,
"step": 420
},
{
"epoch": 1.0,
"eval_loss": 0.6491107940673828,
"eval_runtime": 40.7764,
"eval_samples_per_second": 279.304,
"eval_steps_per_second": 1.104,
"step": 423
},
{
"epoch": 1.016548463356974,
"grad_norm": 0.6718862738139351,
"learning_rate": 5e-06,
"loss": 0.626,
"step": 430
},
{
"epoch": 1.0401891252955082,
"grad_norm": 0.5240286124375526,
"learning_rate": 5e-06,
"loss": 0.6033,
"step": 440
},
{
"epoch": 1.0638297872340425,
"grad_norm": 0.5544933380864768,
"learning_rate": 5e-06,
"loss": 0.6171,
"step": 450
},
{
"epoch": 1.0874704491725768,
"grad_norm": 0.5500112360385622,
"learning_rate": 5e-06,
"loss": 0.6138,
"step": 460
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.6555691233512699,
"learning_rate": 5e-06,
"loss": 0.6114,
"step": 470
},
{
"epoch": 1.1347517730496455,
"grad_norm": 0.460930139599731,
"learning_rate": 5e-06,
"loss": 0.6141,
"step": 480
},
{
"epoch": 1.1583924349881798,
"grad_norm": 0.5188774400467296,
"learning_rate": 5e-06,
"loss": 0.6158,
"step": 490
},
{
"epoch": 1.1820330969267139,
"grad_norm": 0.4640360714483605,
"learning_rate": 5e-06,
"loss": 0.6154,
"step": 500
},
{
"epoch": 1.2056737588652482,
"grad_norm": 0.6093764843000187,
"learning_rate": 5e-06,
"loss": 0.6093,
"step": 510
},
{
"epoch": 1.2293144208037825,
"grad_norm": 0.6555953339288757,
"learning_rate": 5e-06,
"loss": 0.6096,
"step": 520
},
{
"epoch": 1.2529550827423168,
"grad_norm": 0.5237254449671765,
"learning_rate": 5e-06,
"loss": 0.6133,
"step": 530
},
{
"epoch": 1.2765957446808511,
"grad_norm": 0.6077697942607891,
"learning_rate": 5e-06,
"loss": 0.6087,
"step": 540
},
{
"epoch": 1.3002364066193852,
"grad_norm": 0.4959290366758788,
"learning_rate": 5e-06,
"loss": 0.603,
"step": 550
},
{
"epoch": 1.3238770685579198,
"grad_norm": 0.5120427167213613,
"learning_rate": 5e-06,
"loss": 0.6104,
"step": 560
},
{
"epoch": 1.3475177304964538,
"grad_norm": 0.467013186042987,
"learning_rate": 5e-06,
"loss": 0.6095,
"step": 570
},
{
"epoch": 1.3711583924349882,
"grad_norm": 0.49118088063072135,
"learning_rate": 5e-06,
"loss": 0.6166,
"step": 580
},
{
"epoch": 1.3947990543735225,
"grad_norm": 0.48826248333502054,
"learning_rate": 5e-06,
"loss": 0.6055,
"step": 590
},
{
"epoch": 1.4184397163120568,
"grad_norm": 0.5012542755180496,
"learning_rate": 5e-06,
"loss": 0.6091,
"step": 600
},
{
"epoch": 1.442080378250591,
"grad_norm": 0.41199293127019976,
"learning_rate": 5e-06,
"loss": 0.6111,
"step": 610
},
{
"epoch": 1.4657210401891252,
"grad_norm": 0.5227856582567488,
"learning_rate": 5e-06,
"loss": 0.6094,
"step": 620
},
{
"epoch": 1.4893617021276595,
"grad_norm": 0.4396680922197246,
"learning_rate": 5e-06,
"loss": 0.6053,
"step": 630
},
{
"epoch": 1.5130023640661938,
"grad_norm": 0.4667905586119019,
"learning_rate": 5e-06,
"loss": 0.6083,
"step": 640
},
{
"epoch": 1.5366430260047281,
"grad_norm": 0.4909469537010317,
"learning_rate": 5e-06,
"loss": 0.6047,
"step": 650
},
{
"epoch": 1.5602836879432624,
"grad_norm": 0.44596683795863395,
"learning_rate": 5e-06,
"loss": 0.612,
"step": 660
},
{
"epoch": 1.5839243498817965,
"grad_norm": 0.5717586109502525,
"learning_rate": 5e-06,
"loss": 0.6095,
"step": 670
},
{
"epoch": 1.607565011820331,
"grad_norm": 0.4490571190501361,
"learning_rate": 5e-06,
"loss": 0.6085,
"step": 680
},
{
"epoch": 1.6312056737588652,
"grad_norm": 0.5286431637266866,
"learning_rate": 5e-06,
"loss": 0.6074,
"step": 690
},
{
"epoch": 1.6548463356973995,
"grad_norm": 0.5173624908943171,
"learning_rate": 5e-06,
"loss": 0.6038,
"step": 700
},
{
"epoch": 1.6784869976359338,
"grad_norm": 0.43453587339422367,
"learning_rate": 5e-06,
"loss": 0.6128,
"step": 710
},
{
"epoch": 1.702127659574468,
"grad_norm": 0.4902717681255394,
"learning_rate": 5e-06,
"loss": 0.6036,
"step": 720
},
{
"epoch": 1.7257683215130024,
"grad_norm": 0.4350343628276203,
"learning_rate": 5e-06,
"loss": 0.605,
"step": 730
},
{
"epoch": 1.7494089834515365,
"grad_norm": 0.528540441581878,
"learning_rate": 5e-06,
"loss": 0.6095,
"step": 740
},
{
"epoch": 1.773049645390071,
"grad_norm": 0.600684912479072,
"learning_rate": 5e-06,
"loss": 0.6106,
"step": 750
},
{
"epoch": 1.7966903073286051,
"grad_norm": 0.5051323358574029,
"learning_rate": 5e-06,
"loss": 0.6091,
"step": 760
},
{
"epoch": 1.8203309692671394,
"grad_norm": 0.47055245921215216,
"learning_rate": 5e-06,
"loss": 0.6052,
"step": 770
},
{
"epoch": 1.8439716312056738,
"grad_norm": 0.40634667845025374,
"learning_rate": 5e-06,
"loss": 0.6077,
"step": 780
},
{
"epoch": 1.867612293144208,
"grad_norm": 0.42084763446520207,
"learning_rate": 5e-06,
"loss": 0.6101,
"step": 790
},
{
"epoch": 1.8912529550827424,
"grad_norm": 0.4874640354448252,
"learning_rate": 5e-06,
"loss": 0.6064,
"step": 800
},
{
"epoch": 1.9148936170212765,
"grad_norm": 0.4503284940019477,
"learning_rate": 5e-06,
"loss": 0.6057,
"step": 810
},
{
"epoch": 1.938534278959811,
"grad_norm": 0.5426691149205667,
"learning_rate": 5e-06,
"loss": 0.6066,
"step": 820
},
{
"epoch": 1.962174940898345,
"grad_norm": 0.44537977097090253,
"learning_rate": 5e-06,
"loss": 0.6082,
"step": 830
},
{
"epoch": 1.9858156028368794,
"grad_norm": 0.47774839135537583,
"learning_rate": 5e-06,
"loss": 0.602,
"step": 840
},
{
"epoch": 2.0,
"eval_loss": 0.6393836736679077,
"eval_runtime": 41.0339,
"eval_samples_per_second": 277.551,
"eval_steps_per_second": 1.097,
"step": 846
},
{
"epoch": 2.0094562647754137,
"grad_norm": 0.6118720101139726,
"learning_rate": 5e-06,
"loss": 0.5868,
"step": 850
},
{
"epoch": 2.033096926713948,
"grad_norm": 0.5572904299838869,
"learning_rate": 5e-06,
"loss": 0.5582,
"step": 860
},
{
"epoch": 2.0567375886524824,
"grad_norm": 0.5303184511401988,
"learning_rate": 5e-06,
"loss": 0.5637,
"step": 870
},
{
"epoch": 2.0803782505910164,
"grad_norm": 0.46454295303538246,
"learning_rate": 5e-06,
"loss": 0.5592,
"step": 880
},
{
"epoch": 2.104018912529551,
"grad_norm": 0.6361956702505728,
"learning_rate": 5e-06,
"loss": 0.562,
"step": 890
},
{
"epoch": 2.127659574468085,
"grad_norm": 0.7820428278668116,
"learning_rate": 5e-06,
"loss": 0.5611,
"step": 900
},
{
"epoch": 2.1513002364066196,
"grad_norm": 0.45298061775995824,
"learning_rate": 5e-06,
"loss": 0.562,
"step": 910
},
{
"epoch": 2.1749408983451537,
"grad_norm": 0.5810391810087593,
"learning_rate": 5e-06,
"loss": 0.566,
"step": 920
},
{
"epoch": 2.198581560283688,
"grad_norm": 0.4908474714534316,
"learning_rate": 5e-06,
"loss": 0.5634,
"step": 930
},
{
"epoch": 2.2222222222222223,
"grad_norm": 0.46389513213427735,
"learning_rate": 5e-06,
"loss": 0.5634,
"step": 940
},
{
"epoch": 2.2458628841607564,
"grad_norm": 0.6367614359838906,
"learning_rate": 5e-06,
"loss": 0.566,
"step": 950
},
{
"epoch": 2.269503546099291,
"grad_norm": 0.6141281239807579,
"learning_rate": 5e-06,
"loss": 0.5674,
"step": 960
},
{
"epoch": 2.293144208037825,
"grad_norm": 0.542025677626535,
"learning_rate": 5e-06,
"loss": 0.5651,
"step": 970
},
{
"epoch": 2.3167848699763596,
"grad_norm": 0.551857948487274,
"learning_rate": 5e-06,
"loss": 0.5661,
"step": 980
},
{
"epoch": 2.3404255319148937,
"grad_norm": 0.5734053167546258,
"learning_rate": 5e-06,
"loss": 0.5656,
"step": 990
},
{
"epoch": 2.3640661938534278,
"grad_norm": 0.5234453524143412,
"learning_rate": 5e-06,
"loss": 0.5741,
"step": 1000
},
{
"epoch": 2.3877068557919623,
"grad_norm": 0.45374696937088277,
"learning_rate": 5e-06,
"loss": 0.5671,
"step": 1010
},
{
"epoch": 2.4113475177304964,
"grad_norm": 0.4880786886377219,
"learning_rate": 5e-06,
"loss": 0.5721,
"step": 1020
},
{
"epoch": 2.434988179669031,
"grad_norm": 0.5028949189688434,
"learning_rate": 5e-06,
"loss": 0.5667,
"step": 1030
},
{
"epoch": 2.458628841607565,
"grad_norm": 0.49048839220420004,
"learning_rate": 5e-06,
"loss": 0.5643,
"step": 1040
},
{
"epoch": 2.482269503546099,
"grad_norm": 0.5197651600869932,
"learning_rate": 5e-06,
"loss": 0.5692,
"step": 1050
},
{
"epoch": 2.5059101654846336,
"grad_norm": 0.4874044145512749,
"learning_rate": 5e-06,
"loss": 0.5667,
"step": 1060
},
{
"epoch": 2.5295508274231677,
"grad_norm": 0.4664634995358468,
"learning_rate": 5e-06,
"loss": 0.5671,
"step": 1070
},
{
"epoch": 2.5531914893617023,
"grad_norm": 0.48656363327654745,
"learning_rate": 5e-06,
"loss": 0.574,
"step": 1080
},
{
"epoch": 2.5768321513002364,
"grad_norm": 0.5305320232208198,
"learning_rate": 5e-06,
"loss": 0.572,
"step": 1090
},
{
"epoch": 2.6004728132387704,
"grad_norm": 0.4910484019313719,
"learning_rate": 5e-06,
"loss": 0.5732,
"step": 1100
},
{
"epoch": 2.624113475177305,
"grad_norm": 0.5239661409291978,
"learning_rate": 5e-06,
"loss": 0.571,
"step": 1110
},
{
"epoch": 2.6477541371158395,
"grad_norm": 0.46223769122758984,
"learning_rate": 5e-06,
"loss": 0.5734,
"step": 1120
},
{
"epoch": 2.6713947990543736,
"grad_norm": 0.5142173641545875,
"learning_rate": 5e-06,
"loss": 0.5694,
"step": 1130
},
{
"epoch": 2.6950354609929077,
"grad_norm": 0.5294027287882332,
"learning_rate": 5e-06,
"loss": 0.5646,
"step": 1140
},
{
"epoch": 2.7186761229314422,
"grad_norm": 0.5331405829578174,
"learning_rate": 5e-06,
"loss": 0.5723,
"step": 1150
},
{
"epoch": 2.7423167848699763,
"grad_norm": 0.44857073429789307,
"learning_rate": 5e-06,
"loss": 0.5683,
"step": 1160
},
{
"epoch": 2.7659574468085104,
"grad_norm": 0.474611592323436,
"learning_rate": 5e-06,
"loss": 0.5786,
"step": 1170
},
{
"epoch": 2.789598108747045,
"grad_norm": 0.4950458376766123,
"learning_rate": 5e-06,
"loss": 0.5741,
"step": 1180
},
{
"epoch": 2.813238770685579,
"grad_norm": 0.5359701758991028,
"learning_rate": 5e-06,
"loss": 0.5728,
"step": 1190
},
{
"epoch": 2.8368794326241136,
"grad_norm": 0.43477013271067744,
"learning_rate": 5e-06,
"loss": 0.569,
"step": 1200
},
{
"epoch": 2.8605200945626477,
"grad_norm": 0.46358204115129703,
"learning_rate": 5e-06,
"loss": 0.5694,
"step": 1210
},
{
"epoch": 2.884160756501182,
"grad_norm": 0.4374097557825778,
"learning_rate": 5e-06,
"loss": 0.5689,
"step": 1220
},
{
"epoch": 2.9078014184397163,
"grad_norm": 0.5145958315798522,
"learning_rate": 5e-06,
"loss": 0.5738,
"step": 1230
},
{
"epoch": 2.9314420803782504,
"grad_norm": 0.5316891926922875,
"learning_rate": 5e-06,
"loss": 0.5748,
"step": 1240
},
{
"epoch": 2.955082742316785,
"grad_norm": 0.4296286799595667,
"learning_rate": 5e-06,
"loss": 0.5663,
"step": 1250
},
{
"epoch": 2.978723404255319,
"grad_norm": 0.5500793615243753,
"learning_rate": 5e-06,
"loss": 0.5681,
"step": 1260
},
{
"epoch": 3.0,
"eval_loss": 0.6408979892730713,
"eval_runtime": 40.071,
"eval_samples_per_second": 284.22,
"eval_steps_per_second": 1.123,
"step": 1269
},
{
"epoch": 3.0,
"step": 1269,
"total_flos": 2125622264463360.0,
"train_loss": 0.6192652515955475,
"train_runtime": 7838.8261,
"train_samples_per_second": 82.815,
"train_steps_per_second": 0.162
}
],
"logging_steps": 10,
"max_steps": 1269,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2125622264463360.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}