pszemraj's picture
End of training
6705ca6 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.994705174488568,
"eval_steps": 100,
"global_step": 518,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.019253910950661854,
"grad_norm": 2.4829742908477783,
"learning_rate": 1.5384615384615387e-05,
"loss": 1.3128,
"num_input_tokens_seen": 67662,
"step": 5
},
{
"epoch": 0.03850782190132371,
"grad_norm": 1.7913451194763184,
"learning_rate": 3.0769230769230774e-05,
"loss": 1.2288,
"num_input_tokens_seen": 133986,
"step": 10
},
{
"epoch": 0.05776173285198556,
"grad_norm": 1.1510990858078003,
"learning_rate": 4.615384615384615e-05,
"loss": 1.0893,
"num_input_tokens_seen": 202626,
"step": 15
},
{
"epoch": 0.07701564380264742,
"grad_norm": 0.7305760383605957,
"learning_rate": 6.153846153846155e-05,
"loss": 1.0228,
"num_input_tokens_seen": 264348,
"step": 20
},
{
"epoch": 0.09626955475330927,
"grad_norm": 0.84750896692276,
"learning_rate": 7.692307692307693e-05,
"loss": 0.9562,
"num_input_tokens_seen": 333586,
"step": 25
},
{
"epoch": 0.11552346570397112,
"grad_norm": 0.7335062623023987,
"learning_rate": 7.998695344323427e-05,
"loss": 0.9524,
"num_input_tokens_seen": 395406,
"step": 30
},
{
"epoch": 0.13477737665463296,
"grad_norm": 0.948805034160614,
"learning_rate": 7.993396639212965e-05,
"loss": 0.8957,
"num_input_tokens_seen": 467462,
"step": 35
},
{
"epoch": 0.15403128760529483,
"grad_norm": 0.7413954734802246,
"learning_rate": 7.984027740137819e-05,
"loss": 0.8737,
"num_input_tokens_seen": 543588,
"step": 40
},
{
"epoch": 0.17328519855595667,
"grad_norm": 0.7637290954589844,
"learning_rate": 7.970598196171101e-05,
"loss": 0.8871,
"num_input_tokens_seen": 613352,
"step": 45
},
{
"epoch": 0.19253910950661854,
"grad_norm": 0.761428952217102,
"learning_rate": 7.953121695121395e-05,
"loss": 0.8344,
"num_input_tokens_seen": 681318,
"step": 50
},
{
"epoch": 0.21179302045728038,
"grad_norm": 0.7143028974533081,
"learning_rate": 7.93161604958172e-05,
"loss": 0.8389,
"num_input_tokens_seen": 753680,
"step": 55
},
{
"epoch": 0.23104693140794225,
"grad_norm": 0.6739410758018494,
"learning_rate": 7.906103178774378e-05,
"loss": 0.8587,
"num_input_tokens_seen": 820164,
"step": 60
},
{
"epoch": 0.2503008423586041,
"grad_norm": 0.723397433757782,
"learning_rate": 7.876609086210207e-05,
"loss": 0.842,
"num_input_tokens_seen": 888138,
"step": 65
},
{
"epoch": 0.2695547533092659,
"grad_norm": 0.66191166639328,
"learning_rate": 7.843163833184991e-05,
"loss": 0.816,
"num_input_tokens_seen": 958314,
"step": 70
},
{
"epoch": 0.2888086642599278,
"grad_norm": 0.6319432854652405,
"learning_rate": 7.80580150814005e-05,
"loss": 0.8416,
"num_input_tokens_seen": 1030194,
"step": 75
},
{
"epoch": 0.30806257521058966,
"grad_norm": 0.7031162977218628,
"learning_rate": 7.764560191918247e-05,
"loss": 0.8179,
"num_input_tokens_seen": 1095740,
"step": 80
},
{
"epoch": 0.32731648616125153,
"grad_norm": 0.6904546618461609,
"learning_rate": 7.719481918950805e-05,
"loss": 0.802,
"num_input_tokens_seen": 1163928,
"step": 85
},
{
"epoch": 0.34657039711191334,
"grad_norm": 0.6699234247207642,
"learning_rate": 7.670612634414513e-05,
"loss": 0.8093,
"num_input_tokens_seen": 1233316,
"step": 90
},
{
"epoch": 0.3658243080625752,
"grad_norm": 0.6710209250450134,
"learning_rate": 7.618002147402967e-05,
"loss": 0.8349,
"num_input_tokens_seen": 1298016,
"step": 95
},
{
"epoch": 0.3850782190132371,
"grad_norm": 0.6232579946517944,
"learning_rate": 7.561704080159603e-05,
"loss": 0.8172,
"num_input_tokens_seen": 1364870,
"step": 100
},
{
"epoch": 0.3850782190132371,
"eval_loss": 0.6644170880317688,
"eval_runtime": 5.7343,
"eval_samples_per_second": 26.158,
"eval_steps_per_second": 13.079,
"num_input_tokens_seen": 1364870,
"step": 100
},
{
"epoch": 0.4043321299638989,
"grad_norm": 0.6807074546813965,
"learning_rate": 7.50177581342423e-05,
"loss": 0.8169,
"num_input_tokens_seen": 1437970,
"step": 105
},
{
"epoch": 0.42358604091456076,
"grad_norm": 0.6546260714530945,
"learning_rate": 7.438278427948805e-05,
"loss": 0.817,
"num_input_tokens_seen": 1512200,
"step": 110
},
{
"epoch": 0.4428399518652226,
"grad_norm": 0.7773868441581726,
"learning_rate": 7.371276642242027e-05,
"loss": 0.8038,
"num_input_tokens_seen": 1592192,
"step": 115
},
{
"epoch": 0.4620938628158845,
"grad_norm": 0.6242259740829468,
"learning_rate": 7.300838746606224e-05,
"loss": 0.7998,
"num_input_tokens_seen": 1656614,
"step": 120
},
{
"epoch": 0.4813477737665463,
"grad_norm": 0.5981661677360535,
"learning_rate": 7.227036533533753e-05,
"loss": 0.8069,
"num_input_tokens_seen": 1731572,
"step": 125
},
{
"epoch": 0.5006016847172082,
"grad_norm": 0.6073790788650513,
"learning_rate": 7.149945224533863e-05,
"loss": 0.7847,
"num_input_tokens_seen": 1797496,
"step": 130
},
{
"epoch": 0.51985559566787,
"grad_norm": 0.6308819055557251,
"learning_rate": 7.069643393464592e-05,
"loss": 0.805,
"num_input_tokens_seen": 1860442,
"step": 135
},
{
"epoch": 0.5391095066185319,
"grad_norm": 0.7135605216026306,
"learning_rate": 6.986212886447851e-05,
"loss": 0.7924,
"num_input_tokens_seen": 1924462,
"step": 140
},
{
"epoch": 0.5583634175691937,
"grad_norm": 0.6704626083374023,
"learning_rate": 6.899738738449313e-05,
"loss": 0.7926,
"num_input_tokens_seen": 1994714,
"step": 145
},
{
"epoch": 0.5776173285198556,
"grad_norm": 0.6295877695083618,
"learning_rate": 6.81030908660813e-05,
"loss": 0.7977,
"num_input_tokens_seen": 2064324,
"step": 150
},
{
"epoch": 0.5968712394705175,
"grad_norm": 0.5767641067504883,
"learning_rate": 6.718015080404824e-05,
"loss": 0.7912,
"num_input_tokens_seen": 2141534,
"step": 155
},
{
"epoch": 0.6161251504211793,
"grad_norm": 0.6175399422645569,
"learning_rate": 6.622950788758907e-05,
"loss": 0.7781,
"num_input_tokens_seen": 2205388,
"step": 160
},
{
"epoch": 0.6353790613718412,
"grad_norm": 0.7159196734428406,
"learning_rate": 6.525213104150908e-05,
"loss": 0.7733,
"num_input_tokens_seen": 2268804,
"step": 165
},
{
"epoch": 0.6546329723225031,
"grad_norm": 0.6217786073684692,
"learning_rate": 6.424901643866553e-05,
"loss": 0.7842,
"num_input_tokens_seen": 2342992,
"step": 170
},
{
"epoch": 0.6738868832731648,
"grad_norm": 0.7792349457740784,
"learning_rate": 6.32211864846372e-05,
"loss": 0.7876,
"num_input_tokens_seen": 2412156,
"step": 175
},
{
"epoch": 0.6931407942238267,
"grad_norm": 0.6551147699356079,
"learning_rate": 6.2169688775657e-05,
"loss": 0.7783,
"num_input_tokens_seen": 2482798,
"step": 180
},
{
"epoch": 0.7123947051744886,
"grad_norm": 0.6172861456871033,
"learning_rate": 6.109559503086918e-05,
"loss": 0.7666,
"num_input_tokens_seen": 2546172,
"step": 185
},
{
"epoch": 0.7316486161251504,
"grad_norm": 0.6506001949310303,
"learning_rate": 6.000000000000001e-05,
"loss": 0.7682,
"num_input_tokens_seen": 2611840,
"step": 190
},
{
"epoch": 0.7509025270758123,
"grad_norm": 0.582889974117279,
"learning_rate": 5.888402034755471e-05,
"loss": 0.763,
"num_input_tokens_seen": 2680788,
"step": 195
},
{
"epoch": 0.7701564380264742,
"grad_norm": 0.6109009385108948,
"learning_rate": 5.7748793514678394e-05,
"loss": 0.7664,
"num_input_tokens_seen": 2744502,
"step": 200
},
{
"epoch": 0.7701564380264742,
"eval_loss": 0.6271176338195801,
"eval_runtime": 6.1565,
"eval_samples_per_second": 24.365,
"eval_steps_per_second": 12.182,
"num_input_tokens_seen": 2744502,
"step": 200
},
{
"epoch": 0.789410348977136,
"grad_norm": 0.5261389017105103,
"learning_rate": 5.6595476559840604e-05,
"loss": 0.7619,
"num_input_tokens_seen": 2806924,
"step": 205
},
{
"epoch": 0.8086642599277978,
"grad_norm": 0.6377142667770386,
"learning_rate": 5.542524497952544e-05,
"loss": 0.7611,
"num_input_tokens_seen": 2880096,
"step": 210
},
{
"epoch": 0.8279181708784596,
"grad_norm": 0.6118225455284119,
"learning_rate": 5.4239291510128936e-05,
"loss": 0.7704,
"num_input_tokens_seen": 2953254,
"step": 215
},
{
"epoch": 0.8471720818291215,
"grad_norm": 0.623435378074646,
"learning_rate": 5.3038824912285144e-05,
"loss": 0.759,
"num_input_tokens_seen": 3018146,
"step": 220
},
{
"epoch": 0.8664259927797834,
"grad_norm": 0.7073508501052856,
"learning_rate": 5.182506873885964e-05,
"loss": 0.7612,
"num_input_tokens_seen": 3085348,
"step": 225
},
{
"epoch": 0.8856799037304453,
"grad_norm": 0.6435876488685608,
"learning_rate": 5.059926008786648e-05,
"loss": 0.7582,
"num_input_tokens_seen": 3155840,
"step": 230
},
{
"epoch": 0.9049338146811071,
"grad_norm": 0.7241389751434326,
"learning_rate": 4.93626483415794e-05,
"loss": 0.7502,
"num_input_tokens_seen": 3228332,
"step": 235
},
{
"epoch": 0.924187725631769,
"grad_norm": 0.583131730556488,
"learning_rate": 4.811649389312267e-05,
"loss": 0.7573,
"num_input_tokens_seen": 3292876,
"step": 240
},
{
"epoch": 0.9434416365824309,
"grad_norm": 0.5829302072525024,
"learning_rate": 4.686206686183914e-05,
"loss": 0.7535,
"num_input_tokens_seen": 3356266,
"step": 245
},
{
"epoch": 0.9626955475330926,
"grad_norm": 0.6448350548744202,
"learning_rate": 4.560064579874517e-05,
"loss": 0.738,
"num_input_tokens_seen": 3436068,
"step": 250
},
{
"epoch": 0.9819494584837545,
"grad_norm": 0.5773698687553406,
"learning_rate": 4.433351638339173e-05,
"loss": 0.7597,
"num_input_tokens_seen": 3507054,
"step": 255
},
{
"epoch": 1.0012033694344165,
"grad_norm": 0.6720272302627563,
"learning_rate": 4.306197011345984e-05,
"loss": 0.766,
"num_input_tokens_seen": 3578891,
"step": 260
},
{
"epoch": 1.0204572803850782,
"grad_norm": 0.594204306602478,
"learning_rate": 4.178730298842592e-05,
"loss": 0.6578,
"num_input_tokens_seen": 3657955,
"step": 265
},
{
"epoch": 1.03971119133574,
"grad_norm": 0.5978640913963318,
"learning_rate": 4.051081418863896e-05,
"loss": 0.6649,
"num_input_tokens_seen": 3726843,
"step": 270
},
{
"epoch": 1.058965102286402,
"grad_norm": 0.6276839971542358,
"learning_rate": 3.923380475115544e-05,
"loss": 0.6792,
"num_input_tokens_seen": 3790497,
"step": 275
},
{
"epoch": 1.0782190132370637,
"grad_norm": 0.6850570440292358,
"learning_rate": 3.7957576243681985e-05,
"loss": 0.6622,
"num_input_tokens_seen": 3865393,
"step": 280
},
{
"epoch": 1.0974729241877257,
"grad_norm": 0.5756229162216187,
"learning_rate": 3.668342943797706e-05,
"loss": 0.6531,
"num_input_tokens_seen": 3932181,
"step": 285
},
{
"epoch": 1.1167268351383874,
"grad_norm": 0.6151105761528015,
"learning_rate": 3.541266298406399e-05,
"loss": 0.673,
"num_input_tokens_seen": 4005021,
"step": 290
},
{
"epoch": 1.1359807460890494,
"grad_norm": 0.6357588171958923,
"learning_rate": 3.4146572086606516e-05,
"loss": 0.6616,
"num_input_tokens_seen": 4068541,
"step": 295
},
{
"epoch": 1.1552346570397112,
"grad_norm": 0.6211041212081909,
"learning_rate": 3.288644718479594e-05,
"loss": 0.6584,
"num_input_tokens_seen": 4137699,
"step": 300
},
{
"epoch": 1.1552346570397112,
"eval_loss": 0.6145883202552795,
"eval_runtime": 6.1807,
"eval_samples_per_second": 24.269,
"eval_steps_per_second": 12.134,
"num_input_tokens_seen": 4137699,
"step": 300
},
{
"epoch": 1.1744885679903732,
"grad_norm": 0.6347965598106384,
"learning_rate": 3.163357263709534e-05,
"loss": 0.6388,
"num_input_tokens_seen": 4206939,
"step": 305
},
{
"epoch": 1.193742478941035,
"grad_norm": 0.6002417802810669,
"learning_rate": 3.0389225412181572e-05,
"loss": 0.6841,
"num_input_tokens_seen": 4271739,
"step": 310
},
{
"epoch": 1.2129963898916967,
"grad_norm": 0.5483217239379883,
"learning_rate": 2.9154673787419124e-05,
"loss": 0.6681,
"num_input_tokens_seen": 4339421,
"step": 315
},
{
"epoch": 1.2322503008423586,
"grad_norm": 0.6174812912940979,
"learning_rate": 2.793117605619231e-05,
"loss": 0.6713,
"num_input_tokens_seen": 4398549,
"step": 320
},
{
"epoch": 1.2515042117930204,
"grad_norm": 0.6561586856842041,
"learning_rate": 2.6719979245413676e-05,
"loss": 0.6682,
"num_input_tokens_seen": 4467901,
"step": 325
},
{
"epoch": 1.2707581227436824,
"grad_norm": 0.617612361907959,
"learning_rate": 2.552231784451528e-05,
"loss": 0.6643,
"num_input_tokens_seen": 4525255,
"step": 330
},
{
"epoch": 1.2900120336943441,
"grad_norm": 0.6367979645729065,
"learning_rate": 2.4339412547218845e-05,
"loss": 0.6444,
"num_input_tokens_seen": 4597459,
"step": 335
},
{
"epoch": 1.3092659446450061,
"grad_norm": 0.5901610851287842,
"learning_rate": 2.3172469007366636e-05,
"loss": 0.6432,
"num_input_tokens_seen": 4678093,
"step": 340
},
{
"epoch": 1.3285198555956679,
"grad_norm": 0.6240355968475342,
"learning_rate": 2.20226766100817e-05,
"loss": 0.6558,
"num_input_tokens_seen": 4750971,
"step": 345
},
{
"epoch": 1.3477737665463296,
"grad_norm": 0.612914502620697,
"learning_rate": 2.089120725950948e-05,
"loss": 0.6506,
"num_input_tokens_seen": 4826861,
"step": 350
},
{
"epoch": 1.3670276774969916,
"grad_norm": 0.6090959906578064,
"learning_rate": 1.9779214184376857e-05,
"loss": 0.6545,
"num_input_tokens_seen": 4895159,
"step": 355
},
{
"epoch": 1.3862815884476534,
"grad_norm": 0.5601783990859985,
"learning_rate": 1.8687830762585378e-05,
"loss": 0.6535,
"num_input_tokens_seen": 4966107,
"step": 360
},
{
"epoch": 1.4055354993983153,
"grad_norm": 0.5911448001861572,
"learning_rate": 1.761816936603744e-05,
"loss": 0.6497,
"num_input_tokens_seen": 5031601,
"step": 365
},
{
"epoch": 1.424789410348977,
"grad_norm": 0.6376362442970276,
"learning_rate": 1.6571320226872208e-05,
"loss": 0.6616,
"num_input_tokens_seen": 5100787,
"step": 370
},
{
"epoch": 1.444043321299639,
"grad_norm": 0.5783751010894775,
"learning_rate": 1.5548350326267135e-05,
"loss": 0.6604,
"num_input_tokens_seen": 5173727,
"step": 375
},
{
"epoch": 1.4632972322503008,
"grad_norm": 0.5625059604644775,
"learning_rate": 1.4550302306937619e-05,
"loss": 0.6389,
"num_input_tokens_seen": 5245681,
"step": 380
},
{
"epoch": 1.4825511432009626,
"grad_norm": 0.6026486158370972,
"learning_rate": 1.3578193410443068e-05,
"loss": 0.6506,
"num_input_tokens_seen": 5324203,
"step": 385
},
{
"epoch": 1.5018050541516246,
"grad_norm": 0.7199910879135132,
"learning_rate": 1.263301444038279e-05,
"loss": 0.656,
"num_input_tokens_seen": 5388385,
"step": 390
},
{
"epoch": 1.5210589651022866,
"grad_norm": 0.6065146327018738,
"learning_rate": 1.1715728752538103e-05,
"loss": 0.6634,
"num_input_tokens_seen": 5455431,
"step": 395
},
{
"epoch": 1.5403128760529483,
"grad_norm": 0.6021776795387268,
"learning_rate": 1.0827271272990206e-05,
"loss": 0.6348,
"num_input_tokens_seen": 5518719,
"step": 400
},
{
"epoch": 1.5403128760529483,
"eval_loss": 0.6049174070358276,
"eval_runtime": 6.1672,
"eval_samples_per_second": 24.322,
"eval_steps_per_second": 12.161,
"num_input_tokens_seen": 5518719,
"step": 400
},
{
"epoch": 1.55956678700361,
"grad_norm": 0.5880686044692993,
"learning_rate": 9.968547545214444e-06,
"loss": 0.6604,
"num_input_tokens_seen": 5592171,
"step": 405
},
{
"epoch": 1.578820697954272,
"grad_norm": 0.611405611038208,
"learning_rate": 9.140432807122282e-06,
"loss": 0.6806,
"num_input_tokens_seen": 5661159,
"step": 410
},
{
"epoch": 1.5980746089049338,
"grad_norm": 0.5994818806648254,
"learning_rate": 8.343771098991604e-06,
"loss": 0.6492,
"num_input_tokens_seen": 5721247,
"step": 415
},
{
"epoch": 1.6173285198555956,
"grad_norm": 0.6033297181129456,
"learning_rate": 7.57937440319462e-06,
"loss": 0.6808,
"num_input_tokens_seen": 5792521,
"step": 420
},
{
"epoch": 1.6365824308062575,
"grad_norm": 0.6016368269920349,
"learning_rate": 6.848021816600221e-06,
"loss": 0.6533,
"num_input_tokens_seen": 5864477,
"step": 425
},
{
"epoch": 1.6558363417569195,
"grad_norm": 0.6104902625083923,
"learning_rate": 6.15045875649424e-06,
"loss": 0.6342,
"num_input_tokens_seen": 5926641,
"step": 430
},
{
"epoch": 1.6750902527075813,
"grad_norm": 0.6614465713500977,
"learning_rate": 5.487396200826957e-06,
"loss": 0.6598,
"num_input_tokens_seen": 5996769,
"step": 435
},
{
"epoch": 1.694344163658243,
"grad_norm": 0.6114799380302429,
"learning_rate": 4.859509963562313e-06,
"loss": 0.6635,
"num_input_tokens_seen": 6066617,
"step": 440
},
{
"epoch": 1.713598074608905,
"grad_norm": 0.5675967335700989,
"learning_rate": 4.267440005867252e-06,
"loss": 0.6733,
"num_input_tokens_seen": 6132661,
"step": 445
},
{
"epoch": 1.7328519855595668,
"grad_norm": 0.5562564134597778,
"learning_rate": 3.7117897838435225e-06,
"loss": 0.6298,
"num_input_tokens_seen": 6201793,
"step": 450
},
{
"epoch": 1.7521058965102285,
"grad_norm": 0.607563316822052,
"learning_rate": 3.19312563346633e-06,
"loss": 0.6473,
"num_input_tokens_seen": 6270295,
"step": 455
},
{
"epoch": 1.7713598074608905,
"grad_norm": 0.5713885426521301,
"learning_rate": 2.7119761933572132e-06,
"loss": 0.6682,
"num_input_tokens_seen": 6345853,
"step": 460
},
{
"epoch": 1.7906137184115525,
"grad_norm": 0.6208235621452332,
"learning_rate": 2.268831865979073e-06,
"loss": 0.6354,
"num_input_tokens_seen": 6410491,
"step": 465
},
{
"epoch": 1.8098676293622142,
"grad_norm": 0.6321476101875305,
"learning_rate": 1.8641443178027784e-06,
"loss": 0.6501,
"num_input_tokens_seen": 6476011,
"step": 470
},
{
"epoch": 1.829121540312876,
"grad_norm": 0.671848475933075,
"learning_rate": 1.4983260189546322e-06,
"loss": 0.6699,
"num_input_tokens_seen": 6548557,
"step": 475
},
{
"epoch": 1.848375451263538,
"grad_norm": 0.6751930713653564,
"learning_rate": 1.1717498228140146e-06,
"loss": 0.6615,
"num_input_tokens_seen": 6623485,
"step": 480
},
{
"epoch": 1.8676293622141997,
"grad_norm": 0.6214536428451538,
"learning_rate": 8.847485859896365e-07,
"loss": 0.6627,
"num_input_tokens_seen": 6689943,
"step": 485
},
{
"epoch": 1.8868832731648615,
"grad_norm": 0.5981723070144653,
"learning_rate": 6.376148290617146e-07,
"loss": 0.6549,
"num_input_tokens_seen": 6757657,
"step": 490
},
{
"epoch": 1.9061371841155235,
"grad_norm": 0.5489323139190674,
"learning_rate": 4.306004384359419e-07,
"loss": 0.6755,
"num_input_tokens_seen": 6824219,
"step": 495
},
{
"epoch": 1.9253910950661854,
"grad_norm": 0.5502476096153259,
"learning_rate": 2.639164096129987e-07,
"loss": 0.6372,
"num_input_tokens_seen": 6895203,
"step": 500
},
{
"epoch": 1.9253910950661854,
"eval_loss": 0.6038384437561035,
"eval_runtime": 6.1961,
"eval_samples_per_second": 24.209,
"eval_steps_per_second": 12.104,
"num_input_tokens_seen": 6895203,
"step": 500
},
{
"epoch": 1.9446450060168472,
"grad_norm": 0.7226577997207642,
"learning_rate": 1.3773263213540332e-07,
"loss": 0.6492,
"num_input_tokens_seen": 6971035,
"step": 505
},
{
"epoch": 1.963898916967509,
"grad_norm": 0.5310623049736023,
"learning_rate": 5.2177716430801276e-08,
"loss": 0.6521,
"num_input_tokens_seen": 7041317,
"step": 510
},
{
"epoch": 1.983152827918171,
"grad_norm": 0.5827205777168274,
"learning_rate": 7.338862728225593e-09,
"loss": 0.6646,
"num_input_tokens_seen": 7098531,
"step": 515
},
{
"epoch": 1.994705174488568,
"num_input_tokens_seen": 7138765,
"step": 518,
"total_flos": 1.1924408841596928e+17,
"train_loss": 0.744573145759612,
"train_runtime": 5797.4674,
"train_samples_per_second": 5.733,
"train_steps_per_second": 0.089
}
],
"logging_steps": 5,
"max_steps": 518,
"num_input_tokens_seen": 7138765,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1924408841596928e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}