bbytxt's picture
Training in progress, step 100, checkpoint
44bad10 verified
{
"best_metric": 1.49005126953125,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.03751641343087601,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00037516413430876007,
"grad_norm": 1.1359233856201172,
"learning_rate": 1.6666666666666668e-07,
"loss": 1.7936,
"step": 1
},
{
"epoch": 0.00037516413430876007,
"eval_loss": 2.12178373336792,
"eval_runtime": 365.7986,
"eval_samples_per_second": 12.275,
"eval_steps_per_second": 1.536,
"step": 1
},
{
"epoch": 0.0007503282686175201,
"grad_norm": 2.703493118286133,
"learning_rate": 3.3333333333333335e-07,
"loss": 1.5961,
"step": 2
},
{
"epoch": 0.0011254924029262803,
"grad_norm": 2.4857826232910156,
"learning_rate": 5.000000000000001e-07,
"loss": 1.3727,
"step": 3
},
{
"epoch": 0.0015006565372350403,
"grad_norm": 2.0098776817321777,
"learning_rate": 6.666666666666667e-07,
"loss": 1.1361,
"step": 4
},
{
"epoch": 0.0018758206715438004,
"grad_norm": 2.490281581878662,
"learning_rate": 8.333333333333333e-07,
"loss": 1.4598,
"step": 5
},
{
"epoch": 0.0022509848058525606,
"grad_norm": 2.235729694366455,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.2877,
"step": 6
},
{
"epoch": 0.0026261489401613208,
"grad_norm": 2.2408883571624756,
"learning_rate": 1.1666666666666668e-06,
"loss": 1.2887,
"step": 7
},
{
"epoch": 0.0030013130744700805,
"grad_norm": 2.0082004070281982,
"learning_rate": 1.3333333333333334e-06,
"loss": 1.2898,
"step": 8
},
{
"epoch": 0.0033764772087788407,
"grad_norm": 2.294994592666626,
"learning_rate": 1.5e-06,
"loss": 1.2422,
"step": 9
},
{
"epoch": 0.003751641343087601,
"grad_norm": 2.1499993801116943,
"learning_rate": 1.6666666666666667e-06,
"loss": 1.5298,
"step": 10
},
{
"epoch": 0.004126805477396361,
"grad_norm": 2.7161669731140137,
"learning_rate": 1.8333333333333333e-06,
"loss": 1.5488,
"step": 11
},
{
"epoch": 0.004501969611705121,
"grad_norm": 3.114898920059204,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.9435,
"step": 12
},
{
"epoch": 0.004877133746013881,
"grad_norm": 3.019930839538574,
"learning_rate": 2.166666666666667e-06,
"loss": 1.9317,
"step": 13
},
{
"epoch": 0.0052522978803226416,
"grad_norm": 2.8244283199310303,
"learning_rate": 2.3333333333333336e-06,
"loss": 1.966,
"step": 14
},
{
"epoch": 0.005627462014631401,
"grad_norm": 2.8965017795562744,
"learning_rate": 2.5e-06,
"loss": 1.9727,
"step": 15
},
{
"epoch": 0.006002626148940161,
"grad_norm": 2.852724552154541,
"learning_rate": 2.666666666666667e-06,
"loss": 1.8128,
"step": 16
},
{
"epoch": 0.006377790283248921,
"grad_norm": 2.867581605911255,
"learning_rate": 2.8333333333333335e-06,
"loss": 2.011,
"step": 17
},
{
"epoch": 0.006752954417557681,
"grad_norm": 2.7044379711151123,
"learning_rate": 3e-06,
"loss": 1.5948,
"step": 18
},
{
"epoch": 0.0071281185518664416,
"grad_norm": 2.3761465549468994,
"learning_rate": 3.1666666666666667e-06,
"loss": 1.7501,
"step": 19
},
{
"epoch": 0.007503282686175202,
"grad_norm": 2.6399307250976562,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.495,
"step": 20
},
{
"epoch": 0.007878446820483961,
"grad_norm": 3.001854419708252,
"learning_rate": 3.5e-06,
"loss": 1.8462,
"step": 21
},
{
"epoch": 0.008253610954792722,
"grad_norm": 3.6531243324279785,
"learning_rate": 3.6666666666666666e-06,
"loss": 2.0036,
"step": 22
},
{
"epoch": 0.008628775089101481,
"grad_norm": 3.283379554748535,
"learning_rate": 3.833333333333334e-06,
"loss": 2.4422,
"step": 23
},
{
"epoch": 0.009003939223410242,
"grad_norm": 1.9670974016189575,
"learning_rate": 4.000000000000001e-06,
"loss": 1.3793,
"step": 24
},
{
"epoch": 0.009379103357719002,
"grad_norm": 2.6124558448791504,
"learning_rate": 4.166666666666667e-06,
"loss": 1.7191,
"step": 25
},
{
"epoch": 0.009754267492027763,
"grad_norm": 3.230952024459839,
"learning_rate": 4.333333333333334e-06,
"loss": 2.2802,
"step": 26
},
{
"epoch": 0.010129431626336522,
"grad_norm": 2.728804588317871,
"learning_rate": 4.5e-06,
"loss": 1.479,
"step": 27
},
{
"epoch": 0.010504595760645283,
"grad_norm": 3.584338903427124,
"learning_rate": 4.666666666666667e-06,
"loss": 1.893,
"step": 28
},
{
"epoch": 0.010879759894954042,
"grad_norm": 2.7680435180664062,
"learning_rate": 4.833333333333333e-06,
"loss": 1.7687,
"step": 29
},
{
"epoch": 0.011254924029262802,
"grad_norm": 2.725539445877075,
"learning_rate": 5e-06,
"loss": 1.6371,
"step": 30
},
{
"epoch": 0.011630088163571563,
"grad_norm": 2.4085865020751953,
"learning_rate": 4.997482666353287e-06,
"loss": 1.6609,
"step": 31
},
{
"epoch": 0.012005252297880322,
"grad_norm": 2.4707136154174805,
"learning_rate": 4.989935734988098e-06,
"loss": 1.6657,
"step": 32
},
{
"epoch": 0.012380416432189083,
"grad_norm": 2.5936341285705566,
"learning_rate": 4.977374404419838e-06,
"loss": 2.0111,
"step": 33
},
{
"epoch": 0.012755580566497842,
"grad_norm": 2.875962018966675,
"learning_rate": 4.959823971496575e-06,
"loss": 1.6358,
"step": 34
},
{
"epoch": 0.013130744700806603,
"grad_norm": 3.3064959049224854,
"learning_rate": 4.937319780454559e-06,
"loss": 1.6326,
"step": 35
},
{
"epoch": 0.013505908835115363,
"grad_norm": 3.9652161598205566,
"learning_rate": 4.909907151739634e-06,
"loss": 2.1585,
"step": 36
},
{
"epoch": 0.013881072969424124,
"grad_norm": 2.84902286529541,
"learning_rate": 4.8776412907378845e-06,
"loss": 1.9896,
"step": 37
},
{
"epoch": 0.014256237103732883,
"grad_norm": 2.7080459594726562,
"learning_rate": 4.8405871765993435e-06,
"loss": 1.9475,
"step": 38
},
{
"epoch": 0.014631401238041642,
"grad_norm": 2.5884978771209717,
"learning_rate": 4.7988194313786275e-06,
"loss": 1.9678,
"step": 39
},
{
"epoch": 0.015006565372350403,
"grad_norm": 2.8800384998321533,
"learning_rate": 4.752422169756048e-06,
"loss": 1.9445,
"step": 40
},
{
"epoch": 0.015381729506659163,
"grad_norm": 2.643221378326416,
"learning_rate": 4.701488829641845e-06,
"loss": 2.6071,
"step": 41
},
{
"epoch": 0.015756893640967922,
"grad_norm": 2.8539741039276123,
"learning_rate": 4.646121984004666e-06,
"loss": 2.2393,
"step": 42
},
{
"epoch": 0.016132057775276685,
"grad_norm": 2.8243980407714844,
"learning_rate": 4.586433134303257e-06,
"loss": 2.3024,
"step": 43
},
{
"epoch": 0.016507221909585444,
"grad_norm": 2.7712883949279785,
"learning_rate": 4.522542485937369e-06,
"loss": 2.2517,
"step": 44
},
{
"epoch": 0.016882386043894203,
"grad_norm": 2.8007943630218506,
"learning_rate": 4.454578706170075e-06,
"loss": 2.3495,
"step": 45
},
{
"epoch": 0.017257550178202963,
"grad_norm": 3.1787521839141846,
"learning_rate": 4.382678665009028e-06,
"loss": 2.353,
"step": 46
},
{
"epoch": 0.017632714312511726,
"grad_norm": 2.6534879207611084,
"learning_rate": 4.3069871595684795e-06,
"loss": 2.4552,
"step": 47
},
{
"epoch": 0.018007878446820485,
"grad_norm": 3.0257785320281982,
"learning_rate": 4.227656622467162e-06,
"loss": 2.1897,
"step": 48
},
{
"epoch": 0.018383042581129244,
"grad_norm": 4.02520227432251,
"learning_rate": 4.144846814849282e-06,
"loss": 2.4987,
"step": 49
},
{
"epoch": 0.018758206715438003,
"grad_norm": 5.180730819702148,
"learning_rate": 4.058724504646834e-06,
"loss": 3.2648,
"step": 50
},
{
"epoch": 0.018758206715438003,
"eval_loss": 1.617767095565796,
"eval_runtime": 366.2259,
"eval_samples_per_second": 12.26,
"eval_steps_per_second": 1.535,
"step": 50
},
{
"epoch": 0.019133370849746763,
"grad_norm": 3.0668530464172363,
"learning_rate": 3.969463130731183e-06,
"loss": 1.6115,
"step": 51
},
{
"epoch": 0.019508534984055526,
"grad_norm": 2.0221545696258545,
"learning_rate": 3.8772424536302565e-06,
"loss": 1.4548,
"step": 52
},
{
"epoch": 0.019883699118364285,
"grad_norm": 1.181392788887024,
"learning_rate": 3.782248193514766e-06,
"loss": 0.7772,
"step": 53
},
{
"epoch": 0.020258863252673044,
"grad_norm": 1.638417363166809,
"learning_rate": 3.684671656182497e-06,
"loss": 0.9231,
"step": 54
},
{
"epoch": 0.020634027386981803,
"grad_norm": 1.695191740989685,
"learning_rate": 3.5847093477938955e-06,
"loss": 1.0114,
"step": 55
},
{
"epoch": 0.021009191521290566,
"grad_norm": 1.8865028619766235,
"learning_rate": 3.4825625791348093e-06,
"loss": 0.9822,
"step": 56
},
{
"epoch": 0.021384355655599326,
"grad_norm": 2.3858845233917236,
"learning_rate": 3.3784370602033572e-06,
"loss": 1.7195,
"step": 57
},
{
"epoch": 0.021759519789908085,
"grad_norm": 1.7190085649490356,
"learning_rate": 3.272542485937369e-06,
"loss": 1.3282,
"step": 58
},
{
"epoch": 0.022134683924216844,
"grad_norm": 2.187772750854492,
"learning_rate": 3.165092113916688e-06,
"loss": 1.09,
"step": 59
},
{
"epoch": 0.022509848058525603,
"grad_norm": 1.8837783336639404,
"learning_rate": 3.056302334890786e-06,
"loss": 1.0456,
"step": 60
},
{
"epoch": 0.022885012192834366,
"grad_norm": 1.810979962348938,
"learning_rate": 2.946392236996592e-06,
"loss": 1.2371,
"step": 61
},
{
"epoch": 0.023260176327143126,
"grad_norm": 1.6549510955810547,
"learning_rate": 2.835583164544139e-06,
"loss": 1.2869,
"step": 62
},
{
"epoch": 0.023635340461451885,
"grad_norm": 1.8585015535354614,
"learning_rate": 2.724098272258584e-06,
"loss": 1.2206,
"step": 63
},
{
"epoch": 0.024010504595760644,
"grad_norm": 2.164670467376709,
"learning_rate": 2.6121620758762877e-06,
"loss": 1.2996,
"step": 64
},
{
"epoch": 0.024385668730069407,
"grad_norm": 2.0705413818359375,
"learning_rate": 2.5e-06,
"loss": 1.2158,
"step": 65
},
{
"epoch": 0.024760832864378166,
"grad_norm": 1.8618977069854736,
"learning_rate": 2.3878379241237136e-06,
"loss": 1.3169,
"step": 66
},
{
"epoch": 0.025135996998686926,
"grad_norm": 2.355955123901367,
"learning_rate": 2.2759017277414165e-06,
"loss": 1.1751,
"step": 67
},
{
"epoch": 0.025511161132995685,
"grad_norm": 1.8614939451217651,
"learning_rate": 2.1644168354558623e-06,
"loss": 1.3641,
"step": 68
},
{
"epoch": 0.025886325267304444,
"grad_norm": 2.999551773071289,
"learning_rate": 2.053607763003409e-06,
"loss": 1.2753,
"step": 69
},
{
"epoch": 0.026261489401613207,
"grad_norm": 1.9945933818817139,
"learning_rate": 1.9436976651092143e-06,
"loss": 1.1216,
"step": 70
},
{
"epoch": 0.026636653535921966,
"grad_norm": 1.8532859086990356,
"learning_rate": 1.8349078860833125e-06,
"loss": 1.088,
"step": 71
},
{
"epoch": 0.027011817670230726,
"grad_norm": 2.1395885944366455,
"learning_rate": 1.7274575140626318e-06,
"loss": 1.0015,
"step": 72
},
{
"epoch": 0.027386981804539485,
"grad_norm": 2.2408030033111572,
"learning_rate": 1.6215629397966432e-06,
"loss": 1.108,
"step": 73
},
{
"epoch": 0.027762145938848248,
"grad_norm": 2.0609114170074463,
"learning_rate": 1.5174374208651913e-06,
"loss": 1.574,
"step": 74
},
{
"epoch": 0.028137310073157007,
"grad_norm": 2.0313756465911865,
"learning_rate": 1.415290652206105e-06,
"loss": 1.1664,
"step": 75
},
{
"epoch": 0.028512474207465766,
"grad_norm": 1.7218246459960938,
"learning_rate": 1.3153283438175036e-06,
"loss": 1.3208,
"step": 76
},
{
"epoch": 0.028887638341774526,
"grad_norm": 2.472414016723633,
"learning_rate": 1.217751806485235e-06,
"loss": 1.3203,
"step": 77
},
{
"epoch": 0.029262802476083285,
"grad_norm": 1.6580557823181152,
"learning_rate": 1.122757546369744e-06,
"loss": 1.1601,
"step": 78
},
{
"epoch": 0.029637966610392048,
"grad_norm": 1.7984857559204102,
"learning_rate": 1.0305368692688175e-06,
"loss": 1.1257,
"step": 79
},
{
"epoch": 0.030013130744700807,
"grad_norm": 2.6854803562164307,
"learning_rate": 9.412754953531664e-07,
"loss": 1.5246,
"step": 80
},
{
"epoch": 0.030388294879009566,
"grad_norm": 2.4177401065826416,
"learning_rate": 8.551531851507186e-07,
"loss": 1.5478,
"step": 81
},
{
"epoch": 0.030763459013318326,
"grad_norm": 2.418226957321167,
"learning_rate": 7.723433775328385e-07,
"loss": 1.5165,
"step": 82
},
{
"epoch": 0.03113862314762709,
"grad_norm": 2.8937528133392334,
"learning_rate": 6.930128404315214e-07,
"loss": 1.7612,
"step": 83
},
{
"epoch": 0.031513787281935844,
"grad_norm": 2.7434005737304688,
"learning_rate": 6.17321334990973e-07,
"loss": 1.7735,
"step": 84
},
{
"epoch": 0.03188895141624461,
"grad_norm": 2.3153278827667236,
"learning_rate": 5.454212938299256e-07,
"loss": 1.4117,
"step": 85
},
{
"epoch": 0.03226411555055337,
"grad_norm": 2.19724440574646,
"learning_rate": 4.774575140626317e-07,
"loss": 1.5057,
"step": 86
},
{
"epoch": 0.032639279684862126,
"grad_norm": 4.022721290588379,
"learning_rate": 4.1356686569674344e-07,
"loss": 1.4694,
"step": 87
},
{
"epoch": 0.03301444381917089,
"grad_norm": 2.6987545490264893,
"learning_rate": 3.538780159953348e-07,
"loss": 1.8611,
"step": 88
},
{
"epoch": 0.033389607953479644,
"grad_norm": 2.5568947792053223,
"learning_rate": 2.98511170358155e-07,
"loss": 2.0525,
"step": 89
},
{
"epoch": 0.03376477208778841,
"grad_norm": 2.9902150630950928,
"learning_rate": 2.4757783024395244e-07,
"loss": 1.9918,
"step": 90
},
{
"epoch": 0.03413993622209717,
"grad_norm": 2.9363043308258057,
"learning_rate": 2.0118056862137358e-07,
"loss": 1.8205,
"step": 91
},
{
"epoch": 0.034515100356405926,
"grad_norm": 2.565152168273926,
"learning_rate": 1.59412823400657e-07,
"loss": 2.1421,
"step": 92
},
{
"epoch": 0.03489026449071469,
"grad_norm": 2.2385518550872803,
"learning_rate": 1.223587092621162e-07,
"loss": 2.1784,
"step": 93
},
{
"epoch": 0.03526542862502345,
"grad_norm": 2.4541680812835693,
"learning_rate": 9.00928482603669e-08,
"loss": 2.1318,
"step": 94
},
{
"epoch": 0.03564059275933221,
"grad_norm": 3.151038408279419,
"learning_rate": 6.268021954544095e-08,
"loss": 2.1849,
"step": 95
},
{
"epoch": 0.03601575689364097,
"grad_norm": 2.4328556060791016,
"learning_rate": 4.017602850342584e-08,
"loss": 2.3812,
"step": 96
},
{
"epoch": 0.036390921027949726,
"grad_norm": 3.570035934448242,
"learning_rate": 2.262559558016325e-08,
"loss": 2.091,
"step": 97
},
{
"epoch": 0.03676608516225849,
"grad_norm": 3.09126877784729,
"learning_rate": 1.006426501190233e-08,
"loss": 2.4002,
"step": 98
},
{
"epoch": 0.03714124929656725,
"grad_norm": 3.067747116088867,
"learning_rate": 2.5173336467135266e-09,
"loss": 1.8106,
"step": 99
},
{
"epoch": 0.03751641343087601,
"grad_norm": 5.066938400268555,
"learning_rate": 0.0,
"loss": 2.3905,
"step": 100
},
{
"epoch": 0.03751641343087601,
"eval_loss": 1.49005126953125,
"eval_runtime": 366.7943,
"eval_samples_per_second": 12.241,
"eval_steps_per_second": 1.532,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.561311554001961e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}