gpt_108 / trainer_state.json
gokulsrinivasagan's picture
End of training
75c2085 verified
raw
history blame contribute delete
No virus
18.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4459291865360094,
"eval_steps": 1000000,
"global_step": 51058,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0043668885046027,
"grad_norm": 1.4637895822525024,
"learning_rate": 9.99956331114954e-06,
"loss": 3.0457,
"step": 500
},
{
"epoch": 0.0087337770092054,
"grad_norm": 1.4888430833816528,
"learning_rate": 9.99912662229908e-06,
"loss": 3.0497,
"step": 1000
},
{
"epoch": 0.013100665513808101,
"grad_norm": 1.5357590913772583,
"learning_rate": 9.99868993344862e-06,
"loss": 3.0467,
"step": 1500
},
{
"epoch": 0.0174675540184108,
"grad_norm": 1.4784561395645142,
"learning_rate": 9.99825324459816e-06,
"loss": 3.054,
"step": 2000
},
{
"epoch": 0.021834442523013503,
"grad_norm": 1.4730147123336792,
"learning_rate": 9.9978165557477e-06,
"loss": 3.0499,
"step": 2500
},
{
"epoch": 0.026201331027616202,
"grad_norm": 1.4645798206329346,
"learning_rate": 9.997379866897238e-06,
"loss": 3.0471,
"step": 3000
},
{
"epoch": 0.030568219532218905,
"grad_norm": 1.4646248817443848,
"learning_rate": 9.996943178046778e-06,
"loss": 3.0524,
"step": 3500
},
{
"epoch": 0.0349351080368216,
"grad_norm": 1.4661109447479248,
"learning_rate": 9.996506489196318e-06,
"loss": 3.054,
"step": 4000
},
{
"epoch": 0.03930199654142431,
"grad_norm": 1.5115820169448853,
"learning_rate": 9.996069800345858e-06,
"loss": 3.0507,
"step": 4500
},
{
"epoch": 0.043668885046027006,
"grad_norm": 1.4999421834945679,
"learning_rate": 9.995633111495399e-06,
"loss": 3.0503,
"step": 5000
},
{
"epoch": 0.048035773550629705,
"grad_norm": 1.5151880979537964,
"learning_rate": 9.995196422644939e-06,
"loss": 3.0489,
"step": 5500
},
{
"epoch": 0.052402662055232405,
"grad_norm": 1.4950436353683472,
"learning_rate": 9.994759733794479e-06,
"loss": 3.0477,
"step": 6000
},
{
"epoch": 0.056769550559835104,
"grad_norm": 1.5059316158294678,
"learning_rate": 9.994323044944017e-06,
"loss": 3.04,
"step": 6500
},
{
"epoch": 0.06113643906443781,
"grad_norm": 1.6669011116027832,
"learning_rate": 9.993886356093557e-06,
"loss": 3.0511,
"step": 7000
},
{
"epoch": 0.06550332756904051,
"grad_norm": 1.4698095321655273,
"learning_rate": 9.993449667243097e-06,
"loss": 3.0464,
"step": 7500
},
{
"epoch": 0.0698702160736432,
"grad_norm": 1.5101925134658813,
"learning_rate": 9.993012978392636e-06,
"loss": 3.0477,
"step": 8000
},
{
"epoch": 0.07423710457824591,
"grad_norm": 1.5302919149398804,
"learning_rate": 9.992576289542176e-06,
"loss": 3.0499,
"step": 8500
},
{
"epoch": 0.07860399308284861,
"grad_norm": 1.5035344362258911,
"learning_rate": 9.992139600691716e-06,
"loss": 3.04,
"step": 9000
},
{
"epoch": 0.0829708815874513,
"grad_norm": 1.5338293313980103,
"learning_rate": 9.991702911841256e-06,
"loss": 3.0411,
"step": 9500
},
{
"epoch": 0.08733777009205401,
"grad_norm": 1.545718789100647,
"learning_rate": 9.991266222990796e-06,
"loss": 3.0451,
"step": 10000
},
{
"epoch": 0.0917046585966567,
"grad_norm": 1.490438461303711,
"learning_rate": 9.990829534140335e-06,
"loss": 3.054,
"step": 10500
},
{
"epoch": 0.09607154710125941,
"grad_norm": 1.5296202898025513,
"learning_rate": 9.990392845289875e-06,
"loss": 3.0511,
"step": 11000
},
{
"epoch": 0.10043843560586212,
"grad_norm": 1.5026472806930542,
"learning_rate": 9.989956156439415e-06,
"loss": 3.0451,
"step": 11500
},
{
"epoch": 0.10480532411046481,
"grad_norm": 1.4997934103012085,
"learning_rate": 9.989519467588953e-06,
"loss": 3.0319,
"step": 12000
},
{
"epoch": 0.10917221261506752,
"grad_norm": 1.4747111797332764,
"learning_rate": 9.989082778738493e-06,
"loss": 3.0448,
"step": 12500
},
{
"epoch": 0.11353910111967021,
"grad_norm": 1.5128976106643677,
"learning_rate": 9.988646089888033e-06,
"loss": 3.0445,
"step": 13000
},
{
"epoch": 0.11790598962427291,
"grad_norm": 1.4758250713348389,
"learning_rate": 9.988209401037573e-06,
"loss": 3.0459,
"step": 13500
},
{
"epoch": 0.12227287812887562,
"grad_norm": 1.4949926137924194,
"learning_rate": 9.987772712187114e-06,
"loss": 3.0386,
"step": 14000
},
{
"epoch": 0.1266397666334783,
"grad_norm": 1.5117239952087402,
"learning_rate": 9.987336023336654e-06,
"loss": 3.0468,
"step": 14500
},
{
"epoch": 0.13100665513808102,
"grad_norm": 1.516593337059021,
"learning_rate": 9.986899334486194e-06,
"loss": 3.0421,
"step": 15000
},
{
"epoch": 0.13537354364268372,
"grad_norm": 1.5281599760055542,
"learning_rate": 9.986462645635732e-06,
"loss": 3.0476,
"step": 15500
},
{
"epoch": 0.1397404321472864,
"grad_norm": 1.500059723854065,
"learning_rate": 9.986025956785272e-06,
"loss": 3.0371,
"step": 16000
},
{
"epoch": 0.1441073206518891,
"grad_norm": 1.512818694114685,
"learning_rate": 9.985589267934812e-06,
"loss": 3.0465,
"step": 16500
},
{
"epoch": 0.14847420915649182,
"grad_norm": 1.4822925329208374,
"learning_rate": 9.985152579084352e-06,
"loss": 3.037,
"step": 17000
},
{
"epoch": 0.15284109766109452,
"grad_norm": 1.4993562698364258,
"learning_rate": 9.984715890233891e-06,
"loss": 3.0384,
"step": 17500
},
{
"epoch": 0.15720798616569723,
"grad_norm": 1.5457581281661987,
"learning_rate": 9.984279201383431e-06,
"loss": 3.0377,
"step": 18000
},
{
"epoch": 0.1615748746702999,
"grad_norm": 1.4884344339370728,
"learning_rate": 9.983842512532971e-06,
"loss": 3.0472,
"step": 18500
},
{
"epoch": 0.1659417631749026,
"grad_norm": 1.5123255252838135,
"learning_rate": 9.98340582368251e-06,
"loss": 3.0414,
"step": 19000
},
{
"epoch": 0.17030865167950532,
"grad_norm": 1.4943363666534424,
"learning_rate": 9.98296913483205e-06,
"loss": 3.0416,
"step": 19500
},
{
"epoch": 0.17467554018410802,
"grad_norm": 1.525444746017456,
"learning_rate": 9.98253244598159e-06,
"loss": 3.048,
"step": 20000
},
{
"epoch": 0.17904242868871073,
"grad_norm": 1.479514479637146,
"learning_rate": 9.982095757131129e-06,
"loss": 3.0368,
"step": 20500
},
{
"epoch": 0.1834093171933134,
"grad_norm": 1.50456702709198,
"learning_rate": 9.981659068280668e-06,
"loss": 3.0463,
"step": 21000
},
{
"epoch": 0.18777620569791612,
"grad_norm": 1.478912591934204,
"learning_rate": 9.981222379430208e-06,
"loss": 3.0391,
"step": 21500
},
{
"epoch": 0.19214309420251882,
"grad_norm": 1.4481830596923828,
"learning_rate": 9.980785690579748e-06,
"loss": 3.0436,
"step": 22000
},
{
"epoch": 0.19650998270712153,
"grad_norm": 1.5016520023345947,
"learning_rate": 9.980349001729288e-06,
"loss": 3.0397,
"step": 22500
},
{
"epoch": 0.20087687121172423,
"grad_norm": 1.5213875770568848,
"learning_rate": 9.97991231287883e-06,
"loss": 3.0439,
"step": 23000
},
{
"epoch": 0.2052437597163269,
"grad_norm": 1.5557032823562622,
"learning_rate": 9.97947562402837e-06,
"loss": 3.0437,
"step": 23500
},
{
"epoch": 0.20961064822092962,
"grad_norm": 1.477344036102295,
"learning_rate": 9.979038935177907e-06,
"loss": 3.0396,
"step": 24000
},
{
"epoch": 0.21397753672553232,
"grad_norm": 1.55954909324646,
"learning_rate": 9.978602246327447e-06,
"loss": 3.0337,
"step": 24500
},
{
"epoch": 0.21834442523013503,
"grad_norm": 1.4787821769714355,
"learning_rate": 9.978165557476987e-06,
"loss": 3.0376,
"step": 25000
},
{
"epoch": 0.22271131373473774,
"grad_norm": 1.4599512815475464,
"learning_rate": 9.977728868626527e-06,
"loss": 3.0353,
"step": 25500
},
{
"epoch": 0.22707820223934042,
"grad_norm": 1.5182305574417114,
"learning_rate": 9.977292179776067e-06,
"loss": 3.0433,
"step": 26000
},
{
"epoch": 0.23144509074394312,
"grad_norm": 1.502984881401062,
"learning_rate": 9.976855490925607e-06,
"loss": 3.0373,
"step": 26500
},
{
"epoch": 0.23581197924854583,
"grad_norm": 1.5546374320983887,
"learning_rate": 9.976418802075146e-06,
"loss": 3.0338,
"step": 27000
},
{
"epoch": 0.24017886775314853,
"grad_norm": 1.4963784217834473,
"learning_rate": 9.975982113224686e-06,
"loss": 3.0442,
"step": 27500
},
{
"epoch": 0.24454575625775124,
"grad_norm": 1.4785401821136475,
"learning_rate": 9.975545424374226e-06,
"loss": 3.0389,
"step": 28000
},
{
"epoch": 0.24891264476235392,
"grad_norm": 1.4732693433761597,
"learning_rate": 9.975108735523766e-06,
"loss": 3.036,
"step": 28500
},
{
"epoch": 0.2532795332669566,
"grad_norm": 1.455270528793335,
"learning_rate": 9.974672046673306e-06,
"loss": 3.0376,
"step": 29000
},
{
"epoch": 0.25764642177155933,
"grad_norm": 1.525971531867981,
"learning_rate": 9.974235357822844e-06,
"loss": 3.0263,
"step": 29500
},
{
"epoch": 0.26201331027616204,
"grad_norm": 1.4855064153671265,
"learning_rate": 9.973798668972384e-06,
"loss": 3.0391,
"step": 30000
},
{
"epoch": 0.26638019878076474,
"grad_norm": 1.4785747528076172,
"learning_rate": 9.973361980121923e-06,
"loss": 3.0397,
"step": 30500
},
{
"epoch": 0.27074708728536745,
"grad_norm": 1.6142592430114746,
"learning_rate": 9.972925291271463e-06,
"loss": 3.0344,
"step": 31000
},
{
"epoch": 0.27511397578997016,
"grad_norm": 1.4955430030822754,
"learning_rate": 9.972488602421003e-06,
"loss": 3.0325,
"step": 31500
},
{
"epoch": 0.2794808642945728,
"grad_norm": 1.4893536567687988,
"learning_rate": 9.972051913570545e-06,
"loss": 3.0297,
"step": 32000
},
{
"epoch": 0.2838477527991755,
"grad_norm": 1.493492603302002,
"learning_rate": 9.971615224720084e-06,
"loss": 3.0343,
"step": 32500
},
{
"epoch": 0.2882146413037782,
"grad_norm": 1.4653059244155884,
"learning_rate": 9.971178535869623e-06,
"loss": 3.0256,
"step": 33000
},
{
"epoch": 0.2925815298083809,
"grad_norm": 1.4473530054092407,
"learning_rate": 9.970741847019162e-06,
"loss": 3.0282,
"step": 33500
},
{
"epoch": 0.29694841831298363,
"grad_norm": 1.5169305801391602,
"learning_rate": 9.970305158168702e-06,
"loss": 3.0289,
"step": 34000
},
{
"epoch": 0.30131530681758634,
"grad_norm": 1.507231593132019,
"learning_rate": 9.969868469318242e-06,
"loss": 3.0368,
"step": 34500
},
{
"epoch": 0.30568219532218904,
"grad_norm": 1.5321316719055176,
"learning_rate": 9.969431780467782e-06,
"loss": 3.0328,
"step": 35000
},
{
"epoch": 0.31004908382679175,
"grad_norm": 1.5015376806259155,
"learning_rate": 9.968995091617322e-06,
"loss": 3.0279,
"step": 35500
},
{
"epoch": 0.31441597233139446,
"grad_norm": 1.494449496269226,
"learning_rate": 9.968558402766862e-06,
"loss": 3.0278,
"step": 36000
},
{
"epoch": 0.31878286083599716,
"grad_norm": 1.528844952583313,
"learning_rate": 9.968121713916401e-06,
"loss": 3.0277,
"step": 36500
},
{
"epoch": 0.3231497493405998,
"grad_norm": 1.5150773525238037,
"learning_rate": 9.967685025065941e-06,
"loss": 3.0278,
"step": 37000
},
{
"epoch": 0.3275166378452025,
"grad_norm": 1.4646540880203247,
"learning_rate": 9.967248336215481e-06,
"loss": 3.0316,
"step": 37500
},
{
"epoch": 0.3318835263498052,
"grad_norm": 1.4521143436431885,
"learning_rate": 9.96681164736502e-06,
"loss": 3.0291,
"step": 38000
},
{
"epoch": 0.33625041485440793,
"grad_norm": 1.4562242031097412,
"learning_rate": 9.966374958514559e-06,
"loss": 3.0319,
"step": 38500
},
{
"epoch": 0.34061730335901064,
"grad_norm": 1.4896670579910278,
"learning_rate": 9.965938269664099e-06,
"loss": 3.0284,
"step": 39000
},
{
"epoch": 0.34498419186361334,
"grad_norm": 1.500690221786499,
"learning_rate": 9.965501580813639e-06,
"loss": 3.03,
"step": 39500
},
{
"epoch": 0.34935108036821605,
"grad_norm": 1.525810718536377,
"learning_rate": 9.965064891963178e-06,
"loss": 3.0258,
"step": 40000
},
{
"epoch": 0.35371796887281876,
"grad_norm": 1.4898581504821777,
"learning_rate": 9.964628203112718e-06,
"loss": 3.0262,
"step": 40500
},
{
"epoch": 0.35808485737742146,
"grad_norm": 1.4923847913742065,
"learning_rate": 9.96419151426226e-06,
"loss": 3.0301,
"step": 41000
},
{
"epoch": 0.36245174588202417,
"grad_norm": 1.4524798393249512,
"learning_rate": 9.9637548254118e-06,
"loss": 3.0269,
"step": 41500
},
{
"epoch": 0.3668186343866268,
"grad_norm": 1.4696733951568604,
"learning_rate": 9.963318136561338e-06,
"loss": 3.0271,
"step": 42000
},
{
"epoch": 0.3711855228912295,
"grad_norm": 1.4951398372650146,
"learning_rate": 9.962881447710878e-06,
"loss": 3.0236,
"step": 42500
},
{
"epoch": 0.37555241139583223,
"grad_norm": 1.4677625894546509,
"learning_rate": 9.962444758860417e-06,
"loss": 3.0265,
"step": 43000
},
{
"epoch": 0.37991929990043494,
"grad_norm": 1.5334116220474243,
"learning_rate": 9.962008070009957e-06,
"loss": 3.0301,
"step": 43500
},
{
"epoch": 0.38428618840503764,
"grad_norm": 1.4863616228103638,
"learning_rate": 9.961571381159497e-06,
"loss": 3.0235,
"step": 44000
},
{
"epoch": 0.38865307690964035,
"grad_norm": 1.474593997001648,
"learning_rate": 9.961134692309037e-06,
"loss": 3.018,
"step": 44500
},
{
"epoch": 0.39301996541424306,
"grad_norm": 1.4600543975830078,
"learning_rate": 9.960698003458577e-06,
"loss": 3.0289,
"step": 45000
},
{
"epoch": 0.39738685391884576,
"grad_norm": 1.5772684812545776,
"learning_rate": 9.960261314608117e-06,
"loss": 3.0438,
"step": 45500
},
{
"epoch": 0.40175374242344847,
"grad_norm": 1.4724726676940918,
"learning_rate": 9.959824625757656e-06,
"loss": 3.0493,
"step": 46000
},
{
"epoch": 0.4061206309280511,
"grad_norm": 1.4206587076187134,
"learning_rate": 9.959387936907196e-06,
"loss": 3.0465,
"step": 46500
},
{
"epoch": 0.4104875194326538,
"grad_norm": 1.4878075122833252,
"learning_rate": 9.958951248056736e-06,
"loss": 3.0429,
"step": 47000
},
{
"epoch": 0.41485440793725653,
"grad_norm": 1.5244592428207397,
"learning_rate": 9.958514559206274e-06,
"loss": 3.0538,
"step": 47500
},
{
"epoch": 0.41922129644185924,
"grad_norm": 1.5310680866241455,
"learning_rate": 9.958077870355814e-06,
"loss": 3.0442,
"step": 48000
},
{
"epoch": 0.42358818494646194,
"grad_norm": 1.431077241897583,
"learning_rate": 9.957641181505354e-06,
"loss": 3.0459,
"step": 48500
},
{
"epoch": 0.42795507345106465,
"grad_norm": 1.4707083702087402,
"learning_rate": 9.957204492654894e-06,
"loss": 3.0416,
"step": 49000
},
{
"epoch": 0.43232196195566736,
"grad_norm": 1.4576623439788818,
"learning_rate": 9.956767803804433e-06,
"loss": 3.0536,
"step": 49500
},
{
"epoch": 0.43668885046027006,
"grad_norm": 1.4907565116882324,
"learning_rate": 9.956331114953975e-06,
"loss": 3.0537,
"step": 50000
},
{
"epoch": 0.44105573896487277,
"grad_norm": 1.4767038822174072,
"learning_rate": 9.955894426103515e-06,
"loss": 3.055,
"step": 50500
},
{
"epoch": 0.4454226274694755,
"grad_norm": 1.463346242904663,
"learning_rate": 9.955457737253053e-06,
"loss": 3.0573,
"step": 51000
},
{
"epoch": 0.4459291865360094,
"step": 51058,
"total_flos": 1.2807406558275174e+18,
"train_loss": 3.039834905891339,
"train_runtime": 46796.8653,
"train_samples_per_second": 11744.09,
"train_steps_per_second": 244.67
}
],
"logging_steps": 500,
"max_steps": 11449800,
"num_input_tokens_seen": 0,
"num_train_epochs": 100,
"save_steps": 1000000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.2807406558275174e+18,
"train_batch_size": 48,
"trial_name": null,
"trial_params": null
}