besimray's picture
Training in progress, step 110, checkpoint
07354ea verified
{
"best_metric": 1.2894105911254883,
"best_model_checkpoint": "miner_id_24/checkpoint-80",
"epoch": 0.03106247793858101,
"eval_steps": 10,
"global_step": 110,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002823861630780092,
"grad_norm": 0.6790897250175476,
"learning_rate": 2e-05,
"loss": 1.6543,
"step": 1
},
{
"epoch": 0.0002823861630780092,
"eval_loss": 2.1089015007019043,
"eval_runtime": 417.1011,
"eval_samples_per_second": 1.789,
"eval_steps_per_second": 1.789,
"step": 1
},
{
"epoch": 0.0005647723261560184,
"grad_norm": 1.1645948886871338,
"learning_rate": 4e-05,
"loss": 2.2171,
"step": 2
},
{
"epoch": 0.0008471584892340275,
"grad_norm": 0.4623192846775055,
"learning_rate": 6e-05,
"loss": 0.9606,
"step": 3
},
{
"epoch": 0.0011295446523120368,
"grad_norm": 0.9102379679679871,
"learning_rate": 8e-05,
"loss": 2.5406,
"step": 4
},
{
"epoch": 0.0014119308153900459,
"grad_norm": 1.1047648191452026,
"learning_rate": 0.0001,
"loss": 2.2713,
"step": 5
},
{
"epoch": 0.001694316978468055,
"grad_norm": 0.4400641918182373,
"learning_rate": 0.00012,
"loss": 2.4362,
"step": 6
},
{
"epoch": 0.001976703141546064,
"grad_norm": 0.7926576137542725,
"learning_rate": 0.00014,
"loss": 1.75,
"step": 7
},
{
"epoch": 0.0022590893046240735,
"grad_norm": 0.6660397052764893,
"learning_rate": 0.00016,
"loss": 2.1309,
"step": 8
},
{
"epoch": 0.0025414754677020824,
"grad_norm": 0.6158621311187744,
"learning_rate": 0.00018,
"loss": 2.4602,
"step": 9
},
{
"epoch": 0.0028238616307800918,
"grad_norm": 0.9061938524246216,
"learning_rate": 0.0002,
"loss": 1.9124,
"step": 10
},
{
"epoch": 0.0028238616307800918,
"eval_loss": 1.7352350950241089,
"eval_runtime": 416.8713,
"eval_samples_per_second": 1.79,
"eval_steps_per_second": 1.79,
"step": 10
},
{
"epoch": 0.003106247793858101,
"grad_norm": 0.9380114674568176,
"learning_rate": 0.0001999979446958366,
"loss": 1.5976,
"step": 11
},
{
"epoch": 0.00338863395693611,
"grad_norm": 1.0424156188964844,
"learning_rate": 0.00019999177886783194,
"loss": 1.9498,
"step": 12
},
{
"epoch": 0.0036710201200141194,
"grad_norm": 0.7817176580429077,
"learning_rate": 0.00019998150276943902,
"loss": 1.407,
"step": 13
},
{
"epoch": 0.003953406283092128,
"grad_norm": 1.3307476043701172,
"learning_rate": 0.000199967116823068,
"loss": 1.9723,
"step": 14
},
{
"epoch": 0.004235792446170138,
"grad_norm": 0.9394961595535278,
"learning_rate": 0.0001999486216200688,
"loss": 1.1677,
"step": 15
},
{
"epoch": 0.004518178609248147,
"grad_norm": 3.413137435913086,
"learning_rate": 0.00019992601792070679,
"loss": 1.0187,
"step": 16
},
{
"epoch": 0.004800564772326156,
"grad_norm": 3.870213747024536,
"learning_rate": 0.00019989930665413147,
"loss": 1.4127,
"step": 17
},
{
"epoch": 0.005082950935404165,
"grad_norm": 0.6383346915245056,
"learning_rate": 0.00019986848891833845,
"loss": 1.338,
"step": 18
},
{
"epoch": 0.005365337098482174,
"grad_norm": 1.3503261804580688,
"learning_rate": 0.0001998335659801241,
"loss": 0.4498,
"step": 19
},
{
"epoch": 0.0056477232615601836,
"grad_norm": 1.6426069736480713,
"learning_rate": 0.00019979453927503364,
"loss": 1.7376,
"step": 20
},
{
"epoch": 0.0056477232615601836,
"eval_loss": 1.337227702140808,
"eval_runtime": 418.4568,
"eval_samples_per_second": 1.783,
"eval_steps_per_second": 1.783,
"step": 20
},
{
"epoch": 0.005930109424638193,
"grad_norm": 1.5207312107086182,
"learning_rate": 0.00019975141040730207,
"loss": 1.0652,
"step": 21
},
{
"epoch": 0.006212495587716202,
"grad_norm": 0.9025612473487854,
"learning_rate": 0.0001997041811497882,
"loss": 1.9069,
"step": 22
},
{
"epoch": 0.006494881750794211,
"grad_norm": 0.662822961807251,
"learning_rate": 0.00019965285344390184,
"loss": 2.1184,
"step": 23
},
{
"epoch": 0.00677726791387222,
"grad_norm": 1.2998898029327393,
"learning_rate": 0.00019959742939952392,
"loss": 1.3041,
"step": 24
},
{
"epoch": 0.0070596540769502295,
"grad_norm": 0.87566077709198,
"learning_rate": 0.00019953791129491983,
"loss": 1.8995,
"step": 25
},
{
"epoch": 0.007342040240028239,
"grad_norm": 1.7948976755142212,
"learning_rate": 0.00019947430157664576,
"loss": 1.0098,
"step": 26
},
{
"epoch": 0.007624426403106248,
"grad_norm": 0.8442519307136536,
"learning_rate": 0.00019940660285944803,
"loss": 1.9566,
"step": 27
},
{
"epoch": 0.007906812566184257,
"grad_norm": 1.40757155418396,
"learning_rate": 0.00019933481792615583,
"loss": 1.6962,
"step": 28
},
{
"epoch": 0.008189198729262267,
"grad_norm": 1.3247369527816772,
"learning_rate": 0.0001992589497275665,
"loss": 1.6803,
"step": 29
},
{
"epoch": 0.008471584892340275,
"grad_norm": 2.1888298988342285,
"learning_rate": 0.0001991790013823246,
"loss": 1.1217,
"step": 30
},
{
"epoch": 0.008471584892340275,
"eval_loss": 1.3264524936676025,
"eval_runtime": 422.0198,
"eval_samples_per_second": 1.768,
"eval_steps_per_second": 1.768,
"step": 30
},
{
"epoch": 0.008753971055418284,
"grad_norm": 0.716585636138916,
"learning_rate": 0.00019909497617679348,
"loss": 2.0163,
"step": 31
},
{
"epoch": 0.009036357218496294,
"grad_norm": 1.1441899538040161,
"learning_rate": 0.0001990068775649202,
"loss": 1.9939,
"step": 32
},
{
"epoch": 0.009318743381574303,
"grad_norm": 0.9873970746994019,
"learning_rate": 0.00019891470916809362,
"loss": 2.0328,
"step": 33
},
{
"epoch": 0.009601129544652313,
"grad_norm": 1.0919432640075684,
"learning_rate": 0.00019881847477499557,
"loss": 1.4307,
"step": 34
},
{
"epoch": 0.009883515707730321,
"grad_norm": 1.9606109857559204,
"learning_rate": 0.00019871817834144504,
"loss": 0.8573,
"step": 35
},
{
"epoch": 0.01016590187080833,
"grad_norm": 0.7053022384643555,
"learning_rate": 0.0001986138239902355,
"loss": 1.7692,
"step": 36
},
{
"epoch": 0.01044828803388634,
"grad_norm": 1.2274115085601807,
"learning_rate": 0.0001985054160109657,
"loss": 1.3894,
"step": 37
},
{
"epoch": 0.010730674196964348,
"grad_norm": 0.6820394992828369,
"learning_rate": 0.00019839295885986296,
"loss": 1.4337,
"step": 38
},
{
"epoch": 0.011013060360042359,
"grad_norm": 1.0677088499069214,
"learning_rate": 0.0001982764571596004,
"loss": 1.6154,
"step": 39
},
{
"epoch": 0.011295446523120367,
"grad_norm": 1.6393396854400635,
"learning_rate": 0.00019815591569910654,
"loss": 2.0251,
"step": 40
},
{
"epoch": 0.011295446523120367,
"eval_loss": 1.3246878385543823,
"eval_runtime": 422.1745,
"eval_samples_per_second": 1.767,
"eval_steps_per_second": 1.767,
"step": 40
},
{
"epoch": 0.011577832686198376,
"grad_norm": 1.3773140907287598,
"learning_rate": 0.00019803133943336874,
"loss": 1.7091,
"step": 41
},
{
"epoch": 0.011860218849276386,
"grad_norm": 0.8763225078582764,
"learning_rate": 0.0001979027334832293,
"loss": 1.7084,
"step": 42
},
{
"epoch": 0.012142605012354394,
"grad_norm": 0.8546161651611328,
"learning_rate": 0.00019777010313517518,
"loss": 1.7023,
"step": 43
},
{
"epoch": 0.012424991175432405,
"grad_norm": 2.4073739051818848,
"learning_rate": 0.00019763345384112043,
"loss": 1.1566,
"step": 44
},
{
"epoch": 0.012707377338510413,
"grad_norm": 1.2686834335327148,
"learning_rate": 0.00019749279121818235,
"loss": 0.3033,
"step": 45
},
{
"epoch": 0.012989763501588421,
"grad_norm": 0.7953050136566162,
"learning_rate": 0.00019734812104845047,
"loss": 1.5463,
"step": 46
},
{
"epoch": 0.013272149664666432,
"grad_norm": 1.074453592300415,
"learning_rate": 0.00019719944927874881,
"loss": 1.5074,
"step": 47
},
{
"epoch": 0.01355453582774444,
"grad_norm": 1.1829779148101807,
"learning_rate": 0.0001970467820203915,
"loss": 1.2887,
"step": 48
},
{
"epoch": 0.01383692199082245,
"grad_norm": 0.631193995475769,
"learning_rate": 0.00019689012554893154,
"loss": 1.614,
"step": 49
},
{
"epoch": 0.014119308153900459,
"grad_norm": 0.6255627274513245,
"learning_rate": 0.00019672948630390294,
"loss": 0.7312,
"step": 50
},
{
"epoch": 0.014119308153900459,
"eval_loss": 1.303961157798767,
"eval_runtime": 420.1974,
"eval_samples_per_second": 1.775,
"eval_steps_per_second": 1.775,
"step": 50
},
{
"epoch": 0.014401694316978467,
"grad_norm": 1.264630913734436,
"learning_rate": 0.00019656487088855592,
"loss": 1.1731,
"step": 51
},
{
"epoch": 0.014684080480056478,
"grad_norm": 1.3137420415878296,
"learning_rate": 0.00019639628606958533,
"loss": 0.795,
"step": 52
},
{
"epoch": 0.014966466643134486,
"grad_norm": 1.4407997131347656,
"learning_rate": 0.0001962237387768529,
"loss": 1.3326,
"step": 53
},
{
"epoch": 0.015248852806212496,
"grad_norm": 1.2762479782104492,
"learning_rate": 0.00019604723610310194,
"loss": 0.7972,
"step": 54
},
{
"epoch": 0.015531238969290505,
"grad_norm": 1.0771076679229736,
"learning_rate": 0.00019586678530366606,
"loss": 1.3158,
"step": 55
},
{
"epoch": 0.015813625132368513,
"grad_norm": 0.8992990851402283,
"learning_rate": 0.00019568239379617088,
"loss": 1.4787,
"step": 56
},
{
"epoch": 0.016096011295446522,
"grad_norm": 1.2085480690002441,
"learning_rate": 0.00019549406916022905,
"loss": 1.3324,
"step": 57
},
{
"epoch": 0.016378397458524534,
"grad_norm": 0.7203757166862488,
"learning_rate": 0.00019530181913712872,
"loss": 1.548,
"step": 58
},
{
"epoch": 0.016660783621602542,
"grad_norm": 1.4223425388336182,
"learning_rate": 0.00019510565162951537,
"loss": 1.8365,
"step": 59
},
{
"epoch": 0.01694316978468055,
"grad_norm": 0.46909332275390625,
"learning_rate": 0.00019490557470106686,
"loss": 1.6222,
"step": 60
},
{
"epoch": 0.01694316978468055,
"eval_loss": 1.3196920156478882,
"eval_runtime": 419.9731,
"eval_samples_per_second": 1.776,
"eval_steps_per_second": 1.776,
"step": 60
},
{
"epoch": 0.01722555594775856,
"grad_norm": 0.7459226846694946,
"learning_rate": 0.00019470159657616215,
"loss": 1.4639,
"step": 61
},
{
"epoch": 0.017507942110836568,
"grad_norm": 1.4355456829071045,
"learning_rate": 0.00019449372563954293,
"loss": 1.4994,
"step": 62
},
{
"epoch": 0.01779032827391458,
"grad_norm": 0.7244583964347839,
"learning_rate": 0.0001942819704359693,
"loss": 0.3904,
"step": 63
},
{
"epoch": 0.018072714436992588,
"grad_norm": 0.9388204216957092,
"learning_rate": 0.00019406633966986828,
"loss": 1.8876,
"step": 64
},
{
"epoch": 0.018355100600070597,
"grad_norm": 2.272127866744995,
"learning_rate": 0.00019384684220497605,
"loss": 1.5339,
"step": 65
},
{
"epoch": 0.018637486763148605,
"grad_norm": 0.8985967636108398,
"learning_rate": 0.00019362348706397373,
"loss": 1.5562,
"step": 66
},
{
"epoch": 0.018919872926226614,
"grad_norm": 1.5650131702423096,
"learning_rate": 0.00019339628342811632,
"loss": 0.6815,
"step": 67
},
{
"epoch": 0.019202259089304625,
"grad_norm": 1.2985599040985107,
"learning_rate": 0.0001931652406368554,
"loss": 0.8239,
"step": 68
},
{
"epoch": 0.019484645252382634,
"grad_norm": 1.2683568000793457,
"learning_rate": 0.0001929303681874552,
"loss": 1.6103,
"step": 69
},
{
"epoch": 0.019767031415460642,
"grad_norm": 1.2072068452835083,
"learning_rate": 0.0001926916757346022,
"loss": 2.0554,
"step": 70
},
{
"epoch": 0.019767031415460642,
"eval_loss": 1.293800711631775,
"eval_runtime": 418.574,
"eval_samples_per_second": 1.782,
"eval_steps_per_second": 1.782,
"step": 70
},
{
"epoch": 0.02004941757853865,
"grad_norm": 1.6358360052108765,
"learning_rate": 0.00019244917309000817,
"loss": 1.3294,
"step": 71
},
{
"epoch": 0.02033180374161666,
"grad_norm": 1.2779337167739868,
"learning_rate": 0.00019220287022200707,
"loss": 1.4654,
"step": 72
},
{
"epoch": 0.02061418990469467,
"grad_norm": 1.0031236410140991,
"learning_rate": 0.0001919527772551451,
"loss": 1.4763,
"step": 73
},
{
"epoch": 0.02089657606777268,
"grad_norm": 1.0776865482330322,
"learning_rate": 0.00019169890446976454,
"loss": 1.9505,
"step": 74
},
{
"epoch": 0.02117896223085069,
"grad_norm": 1.0557630062103271,
"learning_rate": 0.00019144126230158127,
"loss": 1.425,
"step": 75
},
{
"epoch": 0.021461348393928697,
"grad_norm": 1.4582068920135498,
"learning_rate": 0.0001911798613412557,
"loss": 2.1393,
"step": 76
},
{
"epoch": 0.021743734557006705,
"grad_norm": 0.8082997798919678,
"learning_rate": 0.0001909147123339575,
"loss": 1.94,
"step": 77
},
{
"epoch": 0.022026120720084717,
"grad_norm": 0.6186602711677551,
"learning_rate": 0.0001906458261789238,
"loss": 2.4405,
"step": 78
},
{
"epoch": 0.022308506883162726,
"grad_norm": 0.9546780586242676,
"learning_rate": 0.00019037321392901136,
"loss": 1.4176,
"step": 79
},
{
"epoch": 0.022590893046240734,
"grad_norm": 0.5143202543258667,
"learning_rate": 0.0001900968867902419,
"loss": 2.0072,
"step": 80
},
{
"epoch": 0.022590893046240734,
"eval_loss": 1.2894105911254883,
"eval_runtime": 419.1859,
"eval_samples_per_second": 1.78,
"eval_steps_per_second": 1.78,
"step": 80
},
{
"epoch": 0.022873279209318743,
"grad_norm": 0.835146963596344,
"learning_rate": 0.0001898168561213419,
"loss": 1.6153,
"step": 81
},
{
"epoch": 0.02315566537239675,
"grad_norm": 0.6441218852996826,
"learning_rate": 0.0001895331334332753,
"loss": 1.3453,
"step": 82
},
{
"epoch": 0.023438051535474763,
"grad_norm": 1.0550341606140137,
"learning_rate": 0.0001892457303887706,
"loss": 0.7759,
"step": 83
},
{
"epoch": 0.02372043769855277,
"grad_norm": 1.1421196460723877,
"learning_rate": 0.0001889546588018412,
"loss": 1.4067,
"step": 84
},
{
"epoch": 0.02400282386163078,
"grad_norm": 0.683531641960144,
"learning_rate": 0.00018865993063730004,
"loss": 2.1907,
"step": 85
},
{
"epoch": 0.02428521002470879,
"grad_norm": 1.0629582405090332,
"learning_rate": 0.00018836155801026753,
"loss": 1.656,
"step": 86
},
{
"epoch": 0.024567596187786797,
"grad_norm": 1.9884428977966309,
"learning_rate": 0.0001880595531856738,
"loss": 1.3818,
"step": 87
},
{
"epoch": 0.02484998235086481,
"grad_norm": 0.8152889013290405,
"learning_rate": 0.00018775392857775432,
"loss": 2.4539,
"step": 88
},
{
"epoch": 0.025132368513942818,
"grad_norm": 1.8213200569152832,
"learning_rate": 0.00018744469674953956,
"loss": 0.8031,
"step": 89
},
{
"epoch": 0.025414754677020826,
"grad_norm": 1.3625519275665283,
"learning_rate": 0.00018713187041233896,
"loss": 1.5481,
"step": 90
},
{
"epoch": 0.025414754677020826,
"eval_loss": 1.2990045547485352,
"eval_runtime": 420.4752,
"eval_samples_per_second": 1.774,
"eval_steps_per_second": 1.774,
"step": 90
},
{
"epoch": 0.025697140840098835,
"grad_norm": 0.922839343547821,
"learning_rate": 0.00018681546242521786,
"loss": 2.2968,
"step": 91
},
{
"epoch": 0.025979527003176843,
"grad_norm": 0.9457669258117676,
"learning_rate": 0.00018649548579446936,
"loss": 1.5159,
"step": 92
},
{
"epoch": 0.026261913166254855,
"grad_norm": 1.3212480545043945,
"learning_rate": 0.0001861719536730795,
"loss": 1.8084,
"step": 93
},
{
"epoch": 0.026544299329332863,
"grad_norm": 1.1744962930679321,
"learning_rate": 0.00018584487936018661,
"loss": 2.3983,
"step": 94
},
{
"epoch": 0.026826685492410872,
"grad_norm": 0.9709725379943848,
"learning_rate": 0.00018551427630053463,
"loss": 1.8034,
"step": 95
},
{
"epoch": 0.02710907165548888,
"grad_norm": 0.878976047039032,
"learning_rate": 0.00018518015808392045,
"loss": 1.2276,
"step": 96
},
{
"epoch": 0.02739145781856689,
"grad_norm": 1.220984935760498,
"learning_rate": 0.00018484253844463526,
"loss": 1.0919,
"step": 97
},
{
"epoch": 0.0276738439816449,
"grad_norm": 0.41714727878570557,
"learning_rate": 0.00018450143126090015,
"loss": 1.0902,
"step": 98
},
{
"epoch": 0.02795623014472291,
"grad_norm": 0.9148246049880981,
"learning_rate": 0.00018415685055429533,
"loss": 2.1429,
"step": 99
},
{
"epoch": 0.028238616307800918,
"grad_norm": 0.8029168844223022,
"learning_rate": 0.00018380881048918405,
"loss": 1.5813,
"step": 100
},
{
"epoch": 0.028238616307800918,
"eval_loss": 1.3071253299713135,
"eval_runtime": 421.1658,
"eval_samples_per_second": 1.771,
"eval_steps_per_second": 1.771,
"step": 100
},
{
"epoch": 0.028521002470878926,
"grad_norm": 0.8574671745300293,
"learning_rate": 0.00018345732537213027,
"loss": 1.9177,
"step": 101
},
{
"epoch": 0.028803388633956935,
"grad_norm": 0.9129525423049927,
"learning_rate": 0.00018310240965131041,
"loss": 2.4622,
"step": 102
},
{
"epoch": 0.029085774797034947,
"grad_norm": 0.8835414052009583,
"learning_rate": 0.00018274407791591966,
"loss": 1.051,
"step": 103
},
{
"epoch": 0.029368160960112955,
"grad_norm": 0.508120059967041,
"learning_rate": 0.00018238234489557215,
"loss": 0.4317,
"step": 104
},
{
"epoch": 0.029650547123190964,
"grad_norm": 1.259400725364685,
"learning_rate": 0.0001820172254596956,
"loss": 1.9737,
"step": 105
},
{
"epoch": 0.029932933286268972,
"grad_norm": 1.45259428024292,
"learning_rate": 0.00018164873461691986,
"loss": 0.6959,
"step": 106
},
{
"epoch": 0.03021531944934698,
"grad_norm": 0.8846643567085266,
"learning_rate": 0.00018127688751446027,
"loss": 1.3487,
"step": 107
},
{
"epoch": 0.030497705612424993,
"grad_norm": 0.7302697896957397,
"learning_rate": 0.00018090169943749476,
"loss": 1.7445,
"step": 108
},
{
"epoch": 0.030780091775503,
"grad_norm": 1.3288211822509766,
"learning_rate": 0.0001805231858085356,
"loss": 1.2461,
"step": 109
},
{
"epoch": 0.03106247793858101,
"grad_norm": 0.4046940207481384,
"learning_rate": 0.00018014136218679567,
"loss": 1.9418,
"step": 110
},
{
"epoch": 0.03106247793858101,
"eval_loss": 1.3085497617721558,
"eval_runtime": 418.6203,
"eval_samples_per_second": 1.782,
"eval_steps_per_second": 1.782,
"step": 110
}
],
"logging_steps": 1,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 3
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.206114973810688e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}