Rodo-Sami's picture
Training in progress, step 100, checkpoint
453e308 verified
raw
history blame
19.3 kB
{
"best_metric": 0.8063623309135437,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.0890670229347584,
"eval_steps": 25,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0008906702293475841,
"grad_norm": 44.20709228515625,
"learning_rate": 2e-05,
"loss": 5.189,
"step": 1
},
{
"epoch": 0.0008906702293475841,
"eval_loss": 6.094831943511963,
"eval_runtime": 696.421,
"eval_samples_per_second": 21.722,
"eval_steps_per_second": 1.358,
"step": 1
},
{
"epoch": 0.0017813404586951682,
"grad_norm": 42.66680908203125,
"learning_rate": 4e-05,
"loss": 5.1525,
"step": 2
},
{
"epoch": 0.0026720106880427524,
"grad_norm": 44.50114822387695,
"learning_rate": 6e-05,
"loss": 5.4087,
"step": 3
},
{
"epoch": 0.0035626809173903364,
"grad_norm": 52.41817855834961,
"learning_rate": 8e-05,
"loss": 5.7687,
"step": 4
},
{
"epoch": 0.00445335114673792,
"grad_norm": 57.67063522338867,
"learning_rate": 0.0001,
"loss": 5.3536,
"step": 5
},
{
"epoch": 0.005344021376085505,
"grad_norm": 75.5921630859375,
"learning_rate": 9.997266286704631e-05,
"loss": 5.0831,
"step": 6
},
{
"epoch": 0.006234691605433089,
"grad_norm": 85.153564453125,
"learning_rate": 9.989068136093873e-05,
"loss": 4.4786,
"step": 7
},
{
"epoch": 0.007125361834780673,
"grad_norm": 85.10086822509766,
"learning_rate": 9.975414512725057e-05,
"loss": 4.1905,
"step": 8
},
{
"epoch": 0.008016032064128256,
"grad_norm": 64.82942199707031,
"learning_rate": 9.956320346634876e-05,
"loss": 3.545,
"step": 9
},
{
"epoch": 0.00890670229347584,
"grad_norm": 52.32982635498047,
"learning_rate": 9.931806517013612e-05,
"loss": 3.1496,
"step": 10
},
{
"epoch": 0.009797372522823424,
"grad_norm": 43.146053314208984,
"learning_rate": 9.901899829374047e-05,
"loss": 2.8887,
"step": 11
},
{
"epoch": 0.01068804275217101,
"grad_norm": 43.445045471191406,
"learning_rate": 9.86663298624003e-05,
"loss": 2.698,
"step": 12
},
{
"epoch": 0.011578712981518594,
"grad_norm": 39.32986068725586,
"learning_rate": 9.826044551386744e-05,
"loss": 2.4633,
"step": 13
},
{
"epoch": 0.012469383210866177,
"grad_norm": 38.07303237915039,
"learning_rate": 9.780178907671789e-05,
"loss": 2.0654,
"step": 14
},
{
"epoch": 0.013360053440213761,
"grad_norm": 40.31012725830078,
"learning_rate": 9.729086208503174e-05,
"loss": 1.9334,
"step": 15
},
{
"epoch": 0.014250723669561345,
"grad_norm": 39.19505310058594,
"learning_rate": 9.672822322997305e-05,
"loss": 1.7227,
"step": 16
},
{
"epoch": 0.01514139389890893,
"grad_norm": 38.306434631347656,
"learning_rate": 9.611448774886924e-05,
"loss": 1.69,
"step": 17
},
{
"epoch": 0.01603206412825651,
"grad_norm": 36.1546630859375,
"learning_rate": 9.545032675245813e-05,
"loss": 1.6214,
"step": 18
},
{
"epoch": 0.016922734357604097,
"grad_norm": 30.53850746154785,
"learning_rate": 9.473646649103818e-05,
"loss": 1.3828,
"step": 19
},
{
"epoch": 0.01781340458695168,
"grad_norm": 37.423255920410156,
"learning_rate": 9.397368756032445e-05,
"loss": 1.4083,
"step": 20
},
{
"epoch": 0.018704074816299265,
"grad_norm": 26.23516273498535,
"learning_rate": 9.316282404787871e-05,
"loss": 1.4254,
"step": 21
},
{
"epoch": 0.019594745045646848,
"grad_norm": 26.786113739013672,
"learning_rate": 9.230476262104677e-05,
"loss": 1.3075,
"step": 22
},
{
"epoch": 0.020485415274994433,
"grad_norm": 30.260276794433594,
"learning_rate": 9.140044155740101e-05,
"loss": 1.2865,
"step": 23
},
{
"epoch": 0.02137608550434202,
"grad_norm": 25.184106826782227,
"learning_rate": 9.045084971874738e-05,
"loss": 1.1895,
"step": 24
},
{
"epoch": 0.0222667557336896,
"grad_norm": 23.548507690429688,
"learning_rate": 8.945702546981969e-05,
"loss": 1.2839,
"step": 25
},
{
"epoch": 0.0222667557336896,
"eval_loss": 1.263924479484558,
"eval_runtime": 701.2081,
"eval_samples_per_second": 21.574,
"eval_steps_per_second": 1.349,
"step": 25
},
{
"epoch": 0.023157425963037187,
"grad_norm": 26.05804443359375,
"learning_rate": 8.842005554284296e-05,
"loss": 1.4077,
"step": 26
},
{
"epoch": 0.02404809619238477,
"grad_norm": 25.391490936279297,
"learning_rate": 8.73410738492077e-05,
"loss": 1.4483,
"step": 27
},
{
"epoch": 0.024938766421732355,
"grad_norm": 26.64982795715332,
"learning_rate": 8.622126023955446e-05,
"loss": 1.2018,
"step": 28
},
{
"epoch": 0.025829436651079937,
"grad_norm": 19.658527374267578,
"learning_rate": 8.506183921362443e-05,
"loss": 1.1818,
"step": 29
},
{
"epoch": 0.026720106880427523,
"grad_norm": 23.18828582763672,
"learning_rate": 8.386407858128706e-05,
"loss": 1.1465,
"step": 30
},
{
"epoch": 0.027610777109775105,
"grad_norm": 21.72152328491211,
"learning_rate": 8.262928807620843e-05,
"loss": 1.1506,
"step": 31
},
{
"epoch": 0.02850144733912269,
"grad_norm": 29.772584915161133,
"learning_rate": 8.135881792367686e-05,
"loss": 1.0751,
"step": 32
},
{
"epoch": 0.029392117568470273,
"grad_norm": 21.76646614074707,
"learning_rate": 8.005405736415126e-05,
"loss": 0.99,
"step": 33
},
{
"epoch": 0.03028278779781786,
"grad_norm": 20.992374420166016,
"learning_rate": 7.871643313414718e-05,
"loss": 1.0642,
"step": 34
},
{
"epoch": 0.03117345802716544,
"grad_norm": 20.871078491210938,
"learning_rate": 7.734740790612136e-05,
"loss": 1.0183,
"step": 35
},
{
"epoch": 0.03206412825651302,
"grad_norm": 19.493539810180664,
"learning_rate": 7.594847868906076e-05,
"loss": 0.9964,
"step": 36
},
{
"epoch": 0.03295479848586061,
"grad_norm": 19.52667808532715,
"learning_rate": 7.452117519152542e-05,
"loss": 0.9652,
"step": 37
},
{
"epoch": 0.033845468715208195,
"grad_norm": 21.08563995361328,
"learning_rate": 7.30670581489344e-05,
"loss": 1.1014,
"step": 38
},
{
"epoch": 0.03473613894455578,
"grad_norm": 17.96194839477539,
"learning_rate": 7.158771761692464e-05,
"loss": 1.241,
"step": 39
},
{
"epoch": 0.03562680917390336,
"grad_norm": 24.256383895874023,
"learning_rate": 7.008477123264848e-05,
"loss": 1.0074,
"step": 40
},
{
"epoch": 0.036517479403250945,
"grad_norm": 16.780834197998047,
"learning_rate": 6.855986244591104e-05,
"loss": 0.9271,
"step": 41
},
{
"epoch": 0.03740814963259853,
"grad_norm": 18.25143051147461,
"learning_rate": 6.701465872208216e-05,
"loss": 0.9469,
"step": 42
},
{
"epoch": 0.038298819861946116,
"grad_norm": 16.487092971801758,
"learning_rate": 6.545084971874738e-05,
"loss": 0.8354,
"step": 43
},
{
"epoch": 0.039189490091293695,
"grad_norm": 17.375408172607422,
"learning_rate": 6.387014543809223e-05,
"loss": 0.8368,
"step": 44
},
{
"epoch": 0.04008016032064128,
"grad_norm": 19.860628128051758,
"learning_rate": 6.227427435703997e-05,
"loss": 0.8107,
"step": 45
},
{
"epoch": 0.04097083054998887,
"grad_norm": 17.61992835998535,
"learning_rate": 6.066498153718735e-05,
"loss": 0.9159,
"step": 46
},
{
"epoch": 0.04186150077933645,
"grad_norm": 21.857112884521484,
"learning_rate": 5.90440267166055e-05,
"loss": 0.8706,
"step": 47
},
{
"epoch": 0.04275217100868404,
"grad_norm": 22.603778839111328,
"learning_rate": 5.74131823855921e-05,
"loss": 0.968,
"step": 48
},
{
"epoch": 0.04364284123803162,
"grad_norm": 26.721364974975586,
"learning_rate": 5.577423184847932e-05,
"loss": 1.1293,
"step": 49
},
{
"epoch": 0.0445335114673792,
"grad_norm": 125.70867156982422,
"learning_rate": 5.4128967273616625e-05,
"loss": 0.871,
"step": 50
},
{
"epoch": 0.0445335114673792,
"eval_loss": 0.9120805859565735,
"eval_runtime": 698.0211,
"eval_samples_per_second": 21.673,
"eval_steps_per_second": 1.355,
"step": 50
},
{
"epoch": 0.04542418169672679,
"grad_norm": 19.553665161132812,
"learning_rate": 5.247918773366112e-05,
"loss": 1.1356,
"step": 51
},
{
"epoch": 0.046314851926074374,
"grad_norm": 17.038597106933594,
"learning_rate": 5.0826697238317935e-05,
"loss": 0.9157,
"step": 52
},
{
"epoch": 0.04720552215542195,
"grad_norm": 16.456735610961914,
"learning_rate": 4.917330276168208e-05,
"loss": 0.8641,
"step": 53
},
{
"epoch": 0.04809619238476954,
"grad_norm": 21.303831100463867,
"learning_rate": 4.7520812266338885e-05,
"loss": 0.979,
"step": 54
},
{
"epoch": 0.048986862614117124,
"grad_norm": 18.442556381225586,
"learning_rate": 4.5871032726383386e-05,
"loss": 0.8377,
"step": 55
},
{
"epoch": 0.04987753284346471,
"grad_norm": 17.49199104309082,
"learning_rate": 4.4225768151520694e-05,
"loss": 0.9227,
"step": 56
},
{
"epoch": 0.05076820307281229,
"grad_norm": 16.545700073242188,
"learning_rate": 4.2586817614407895e-05,
"loss": 0.7409,
"step": 57
},
{
"epoch": 0.051658873302159874,
"grad_norm": 18.476268768310547,
"learning_rate": 4.095597328339452e-05,
"loss": 0.7327,
"step": 58
},
{
"epoch": 0.05254954353150746,
"grad_norm": 17.85874366760254,
"learning_rate": 3.933501846281267e-05,
"loss": 0.8013,
"step": 59
},
{
"epoch": 0.053440213760855046,
"grad_norm": 17.47732925415039,
"learning_rate": 3.772572564296005e-05,
"loss": 0.8019,
"step": 60
},
{
"epoch": 0.054330883990202625,
"grad_norm": 21.973190307617188,
"learning_rate": 3.612985456190778e-05,
"loss": 0.8761,
"step": 61
},
{
"epoch": 0.05522155421955021,
"grad_norm": 22.474699020385742,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.045,
"step": 62
},
{
"epoch": 0.056112224448897796,
"grad_norm": 25.610048294067383,
"learning_rate": 3.298534127791785e-05,
"loss": 1.1543,
"step": 63
},
{
"epoch": 0.05700289467824538,
"grad_norm": 28.102262496948242,
"learning_rate": 3.144013755408895e-05,
"loss": 0.9572,
"step": 64
},
{
"epoch": 0.05789356490759296,
"grad_norm": 16.78169059753418,
"learning_rate": 2.991522876735154e-05,
"loss": 0.8053,
"step": 65
},
{
"epoch": 0.058784235136940546,
"grad_norm": 26.701210021972656,
"learning_rate": 2.8412282383075363e-05,
"loss": 0.8915,
"step": 66
},
{
"epoch": 0.05967490536628813,
"grad_norm": 20.651735305786133,
"learning_rate": 2.693294185106562e-05,
"loss": 0.7907,
"step": 67
},
{
"epoch": 0.06056557559563572,
"grad_norm": 19.679340362548828,
"learning_rate": 2.547882480847461e-05,
"loss": 0.7364,
"step": 68
},
{
"epoch": 0.0614562458249833,
"grad_norm": 17.274389266967773,
"learning_rate": 2.405152131093926e-05,
"loss": 0.7114,
"step": 69
},
{
"epoch": 0.06234691605433088,
"grad_norm": 17.92892074584961,
"learning_rate": 2.2652592093878666e-05,
"loss": 0.8729,
"step": 70
},
{
"epoch": 0.06323758628367847,
"grad_norm": 18.015047073364258,
"learning_rate": 2.128356686585282e-05,
"loss": 0.7113,
"step": 71
},
{
"epoch": 0.06412825651302605,
"grad_norm": 24.12103843688965,
"learning_rate": 1.9945942635848748e-05,
"loss": 0.7442,
"step": 72
},
{
"epoch": 0.06501892674237364,
"grad_norm": 19.176523208618164,
"learning_rate": 1.8641182076323148e-05,
"loss": 0.7511,
"step": 73
},
{
"epoch": 0.06590959697172122,
"grad_norm": 22.23782730102539,
"learning_rate": 1.7370711923791567e-05,
"loss": 0.7453,
"step": 74
},
{
"epoch": 0.06680026720106881,
"grad_norm": 24.631982803344727,
"learning_rate": 1.6135921418712956e-05,
"loss": 0.8716,
"step": 75
},
{
"epoch": 0.06680026720106881,
"eval_loss": 0.8243863582611084,
"eval_runtime": 699.4578,
"eval_samples_per_second": 21.628,
"eval_steps_per_second": 1.352,
"step": 75
},
{
"epoch": 0.06769093743041639,
"grad_norm": 17.87114143371582,
"learning_rate": 1.4938160786375572e-05,
"loss": 1.0139,
"step": 76
},
{
"epoch": 0.06858160765976397,
"grad_norm": 15.511335372924805,
"learning_rate": 1.3778739760445552e-05,
"loss": 0.8656,
"step": 77
},
{
"epoch": 0.06947227788911156,
"grad_norm": 20.296796798706055,
"learning_rate": 1.2658926150792322e-05,
"loss": 0.8372,
"step": 78
},
{
"epoch": 0.07036294811845914,
"grad_norm": 18.298046112060547,
"learning_rate": 1.157994445715706e-05,
"loss": 0.8844,
"step": 79
},
{
"epoch": 0.07125361834780672,
"grad_norm": 17.071407318115234,
"learning_rate": 1.0542974530180327e-05,
"loss": 0.8669,
"step": 80
},
{
"epoch": 0.07214428857715431,
"grad_norm": 16.053665161132812,
"learning_rate": 9.549150281252633e-06,
"loss": 0.7517,
"step": 81
},
{
"epoch": 0.07303495880650189,
"grad_norm": 19.35648536682129,
"learning_rate": 8.599558442598998e-06,
"loss": 0.7972,
"step": 82
},
{
"epoch": 0.07392562903584948,
"grad_norm": 17.407955169677734,
"learning_rate": 7.695237378953223e-06,
"loss": 0.7013,
"step": 83
},
{
"epoch": 0.07481629926519706,
"grad_norm": 18.117521286010742,
"learning_rate": 6.837175952121306e-06,
"loss": 0.641,
"step": 84
},
{
"epoch": 0.07570696949454464,
"grad_norm": 18.53291893005371,
"learning_rate": 6.026312439675552e-06,
"loss": 0.7497,
"step": 85
},
{
"epoch": 0.07659763972389223,
"grad_norm": 21.79059410095215,
"learning_rate": 5.263533508961827e-06,
"loss": 0.829,
"step": 86
},
{
"epoch": 0.07748830995323981,
"grad_norm": 19.699893951416016,
"learning_rate": 4.549673247541875e-06,
"loss": 0.8702,
"step": 87
},
{
"epoch": 0.07837898018258739,
"grad_norm": 18.76403045654297,
"learning_rate": 3.885512251130763e-06,
"loss": 1.07,
"step": 88
},
{
"epoch": 0.07926965041193498,
"grad_norm": 17.980998992919922,
"learning_rate": 3.271776770026963e-06,
"loss": 1.0136,
"step": 89
},
{
"epoch": 0.08016032064128256,
"grad_norm": 17.479143142700195,
"learning_rate": 2.7091379149682685e-06,
"loss": 0.7321,
"step": 90
},
{
"epoch": 0.08105099087063015,
"grad_norm": 16.1824951171875,
"learning_rate": 2.1982109232821178e-06,
"loss": 0.7568,
"step": 91
},
{
"epoch": 0.08194166109997773,
"grad_norm": 17.499082565307617,
"learning_rate": 1.7395544861325718e-06,
"loss": 0.8011,
"step": 92
},
{
"epoch": 0.08283233132932531,
"grad_norm": 17.437572479248047,
"learning_rate": 1.333670137599713e-06,
"loss": 0.7144,
"step": 93
},
{
"epoch": 0.0837230015586729,
"grad_norm": 19.064252853393555,
"learning_rate": 9.810017062595322e-07,
"loss": 0.7502,
"step": 94
},
{
"epoch": 0.08461367178802048,
"grad_norm": 18.436866760253906,
"learning_rate": 6.819348298638839e-07,
"loss": 0.7259,
"step": 95
},
{
"epoch": 0.08550434201736808,
"grad_norm": 16.668874740600586,
"learning_rate": 4.367965336512403e-07,
"loss": 0.764,
"step": 96
},
{
"epoch": 0.08639501224671566,
"grad_norm": 18.206295013427734,
"learning_rate": 2.458548727494292e-07,
"loss": 0.7036,
"step": 97
},
{
"epoch": 0.08728568247606323,
"grad_norm": 20.8583927154541,
"learning_rate": 1.0931863906127327e-07,
"loss": 0.6996,
"step": 98
},
{
"epoch": 0.08817635270541083,
"grad_norm": 17.43104362487793,
"learning_rate": 2.7337132953697554e-08,
"loss": 0.8025,
"step": 99
},
{
"epoch": 0.0890670229347584,
"grad_norm": 22.67059898376465,
"learning_rate": 0.0,
"loss": 0.8697,
"step": 100
},
{
"epoch": 0.0890670229347584,
"eval_loss": 0.8063623309135437,
"eval_runtime": 698.2614,
"eval_samples_per_second": 21.665,
"eval_steps_per_second": 1.355,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.3811002881409024e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}