nttx's picture
Training in progress, step 150, checkpoint
61cece3 verified
{
"best_metric": 0.6796185970306396,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 2.1361815754339117,
"eval_steps": 25,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.014241210502892745,
"grad_norm": 17.032926559448242,
"learning_rate": 2.9999999999999997e-05,
"loss": 9.507,
"step": 1
},
{
"epoch": 0.014241210502892745,
"eval_loss": 9.880945205688477,
"eval_runtime": 5.1908,
"eval_samples_per_second": 9.632,
"eval_steps_per_second": 1.349,
"step": 1
},
{
"epoch": 0.02848242100578549,
"grad_norm": 17.92328453063965,
"learning_rate": 5.9999999999999995e-05,
"loss": 8.954,
"step": 2
},
{
"epoch": 0.042723631508678236,
"grad_norm": 19.016923904418945,
"learning_rate": 8.999999999999999e-05,
"loss": 9.2194,
"step": 3
},
{
"epoch": 0.05696484201157098,
"grad_norm": 19.83509635925293,
"learning_rate": 0.00011999999999999999,
"loss": 8.52,
"step": 4
},
{
"epoch": 0.07120605251446373,
"grad_norm": 21.18231201171875,
"learning_rate": 0.00015,
"loss": 5.5235,
"step": 5
},
{
"epoch": 0.08544726301735647,
"grad_norm": 16.114591598510742,
"learning_rate": 0.00017999999999999998,
"loss": 2.7593,
"step": 6
},
{
"epoch": 0.09968847352024922,
"grad_norm": 8.644830703735352,
"learning_rate": 0.00020999999999999998,
"loss": 1.3101,
"step": 7
},
{
"epoch": 0.11392968402314196,
"grad_norm": 9.379091262817383,
"learning_rate": 0.00023999999999999998,
"loss": 1.2027,
"step": 8
},
{
"epoch": 0.12817089452603472,
"grad_norm": 4.965191841125488,
"learning_rate": 0.00027,
"loss": 0.7208,
"step": 9
},
{
"epoch": 0.14241210502892745,
"grad_norm": 8.171319007873535,
"learning_rate": 0.0003,
"loss": 0.9491,
"step": 10
},
{
"epoch": 0.15665331553182021,
"grad_norm": 6.414262294769287,
"learning_rate": 0.0002999794957488703,
"loss": 0.8808,
"step": 11
},
{
"epoch": 0.17089452603471295,
"grad_norm": 3.4195830821990967,
"learning_rate": 0.0002999179886011389,
"loss": 0.7676,
"step": 12
},
{
"epoch": 0.1851357365376057,
"grad_norm": 2.1499311923980713,
"learning_rate": 0.0002998154953722457,
"loss": 0.6968,
"step": 13
},
{
"epoch": 0.19937694704049844,
"grad_norm": 4.422380447387695,
"learning_rate": 0.00029967204408281613,
"loss": 0.7326,
"step": 14
},
{
"epoch": 0.2136181575433912,
"grad_norm": 3.569704532623291,
"learning_rate": 0.00029948767395100045,
"loss": 0.8209,
"step": 15
},
{
"epoch": 0.22785936804628393,
"grad_norm": 3.780567169189453,
"learning_rate": 0.0002992624353817517,
"loss": 0.7951,
"step": 16
},
{
"epoch": 0.2421005785491767,
"grad_norm": 1.9853416681289673,
"learning_rate": 0.0002989963899530457,
"loss": 0.7119,
"step": 17
},
{
"epoch": 0.25634178905206945,
"grad_norm": 10.530756950378418,
"learning_rate": 0.00029868961039904624,
"loss": 1.0692,
"step": 18
},
{
"epoch": 0.2705829995549622,
"grad_norm": 2.4004061222076416,
"learning_rate": 0.00029834218059022024,
"loss": 0.7298,
"step": 19
},
{
"epoch": 0.2848242100578549,
"grad_norm": 102.44490814208984,
"learning_rate": 0.00029795419551040833,
"loss": 2.7342,
"step": 20
},
{
"epoch": 0.29906542056074764,
"grad_norm": 2.6258203983306885,
"learning_rate": 0.00029752576123085736,
"loss": 0.7426,
"step": 21
},
{
"epoch": 0.31330663106364043,
"grad_norm": 1.1354727745056152,
"learning_rate": 0.0002970569948812214,
"loss": 0.7128,
"step": 22
},
{
"epoch": 0.32754784156653316,
"grad_norm": 1.8081063032150269,
"learning_rate": 0.0002965480246175399,
"loss": 0.735,
"step": 23
},
{
"epoch": 0.3417890520694259,
"grad_norm": 1.5173677206039429,
"learning_rate": 0.0002959989895872009,
"loss": 0.7374,
"step": 24
},
{
"epoch": 0.3560302625723186,
"grad_norm": 1.4395380020141602,
"learning_rate": 0.0002954100398908995,
"loss": 0.7097,
"step": 25
},
{
"epoch": 0.3560302625723186,
"eval_loss": 0.7021229267120361,
"eval_runtime": 4.1775,
"eval_samples_per_second": 11.969,
"eval_steps_per_second": 1.676,
"step": 25
},
{
"epoch": 0.3702714730752114,
"grad_norm": 1.9905906915664673,
"learning_rate": 0.0002947813365416023,
"loss": 0.7426,
"step": 26
},
{
"epoch": 0.38451268357810414,
"grad_norm": 2.3615007400512695,
"learning_rate": 0.0002941130514205272,
"loss": 0.7702,
"step": 27
},
{
"epoch": 0.3987538940809969,
"grad_norm": 1.6972936391830444,
"learning_rate": 0.0002934053672301536,
"loss": 0.7125,
"step": 28
},
{
"epoch": 0.4129951045838896,
"grad_norm": 2.0987725257873535,
"learning_rate": 0.00029265847744427303,
"loss": 0.7404,
"step": 29
},
{
"epoch": 0.4272363150867824,
"grad_norm": 1.1297392845153809,
"learning_rate": 0.00029187258625509513,
"loss": 0.6718,
"step": 30
},
{
"epoch": 0.4414775255896751,
"grad_norm": 5.362275123596191,
"learning_rate": 0.00029104790851742417,
"loss": 0.8599,
"step": 31
},
{
"epoch": 0.45571873609256786,
"grad_norm": 2.3504865169525146,
"learning_rate": 0.0002901846696899191,
"loss": 0.8024,
"step": 32
},
{
"epoch": 0.4699599465954606,
"grad_norm": 2.02323317527771,
"learning_rate": 0.00028928310577345606,
"loss": 0.6932,
"step": 33
},
{
"epoch": 0.4842011570983534,
"grad_norm": 5.036567687988281,
"learning_rate": 0.0002883434632466077,
"loss": 0.7235,
"step": 34
},
{
"epoch": 0.4984423676012461,
"grad_norm": 3.0347747802734375,
"learning_rate": 0.00028736599899825856,
"loss": 0.7954,
"step": 35
},
{
"epoch": 0.5126835781041389,
"grad_norm": 2.3752622604370117,
"learning_rate": 0.00028635098025737434,
"loss": 0.7592,
"step": 36
},
{
"epoch": 0.5269247886070316,
"grad_norm": 1.4230512380599976,
"learning_rate": 0.00028529868451994384,
"loss": 0.7337,
"step": 37
},
{
"epoch": 0.5411659991099244,
"grad_norm": 0.973042368888855,
"learning_rate": 0.0002842093994731145,
"loss": 0.7265,
"step": 38
},
{
"epoch": 0.5554072096128171,
"grad_norm": 1.2456659078598022,
"learning_rate": 0.00028308342291654174,
"loss": 0.727,
"step": 39
},
{
"epoch": 0.5696484201157098,
"grad_norm": 0.5054659247398376,
"learning_rate": 0.00028192106268097334,
"loss": 0.6699,
"step": 40
},
{
"epoch": 0.5838896306186026,
"grad_norm": 0.71197909116745,
"learning_rate": 0.00028072263654409154,
"loss": 0.6921,
"step": 41
},
{
"epoch": 0.5981308411214953,
"grad_norm": 7.028622627258301,
"learning_rate": 0.0002794884721436361,
"loss": 0.8929,
"step": 42
},
{
"epoch": 0.6123720516243881,
"grad_norm": 25.489347457885742,
"learning_rate": 0.00027821890688783083,
"loss": 0.912,
"step": 43
},
{
"epoch": 0.6266132621272809,
"grad_norm": 1.3052946329116821,
"learning_rate": 0.0002769142878631403,
"loss": 0.7233,
"step": 44
},
{
"epoch": 0.6408544726301736,
"grad_norm": 1.5701165199279785,
"learning_rate": 0.00027557497173937923,
"loss": 0.7709,
"step": 45
},
{
"epoch": 0.6550956831330663,
"grad_norm": 0.5267326235771179,
"learning_rate": 0.000274201324672203,
"loss": 0.6879,
"step": 46
},
{
"epoch": 0.669336893635959,
"grad_norm": 1.0091323852539062,
"learning_rate": 0.00027279372220300385,
"loss": 0.7001,
"step": 47
},
{
"epoch": 0.6835781041388518,
"grad_norm": 1.2562334537506104,
"learning_rate": 0.0002713525491562421,
"loss": 0.6997,
"step": 48
},
{
"epoch": 0.6978193146417445,
"grad_norm": 0.9929755926132202,
"learning_rate": 0.00026987819953423867,
"loss": 0.7009,
"step": 49
},
{
"epoch": 0.7120605251446372,
"grad_norm": 1.2045605182647705,
"learning_rate": 0.00026837107640945905,
"loss": 0.6871,
"step": 50
},
{
"epoch": 0.7120605251446372,
"eval_loss": 0.760506808757782,
"eval_runtime": 4.1803,
"eval_samples_per_second": 11.961,
"eval_steps_per_second": 1.675,
"step": 50
},
{
"epoch": 0.7263017356475301,
"grad_norm": 3.0040671825408936,
"learning_rate": 0.0002668315918143169,
"loss": 0.8039,
"step": 51
},
{
"epoch": 0.7405429461504228,
"grad_norm": 3.1810615062713623,
"learning_rate": 0.00026526016662852886,
"loss": 0.7996,
"step": 52
},
{
"epoch": 0.7547841566533156,
"grad_norm": 2.66048526763916,
"learning_rate": 0.00026365723046405023,
"loss": 0.7514,
"step": 53
},
{
"epoch": 0.7690253671562083,
"grad_norm": 2.6719470024108887,
"learning_rate": 0.0002620232215476231,
"loss": 0.7929,
"step": 54
},
{
"epoch": 0.783266577659101,
"grad_norm": 2.316591739654541,
"learning_rate": 0.0002603585866009697,
"loss": 0.7603,
"step": 55
},
{
"epoch": 0.7975077881619937,
"grad_norm": 1.115259051322937,
"learning_rate": 0.00025866378071866334,
"loss": 0.6665,
"step": 56
},
{
"epoch": 0.8117489986648865,
"grad_norm": 0.5784854292869568,
"learning_rate": 0.00025693926724370956,
"loss": 0.6798,
"step": 57
},
{
"epoch": 0.8259902091677792,
"grad_norm": 0.49967318773269653,
"learning_rate": 0.00025518551764087326,
"loss": 0.6836,
"step": 58
},
{
"epoch": 0.840231419670672,
"grad_norm": 0.8447709083557129,
"learning_rate": 0.00025340301136778483,
"loss": 0.7267,
"step": 59
},
{
"epoch": 0.8544726301735648,
"grad_norm": 0.5715835094451904,
"learning_rate": 0.00025159223574386114,
"loss": 0.6858,
"step": 60
},
{
"epoch": 0.8687138406764575,
"grad_norm": 1.0063656568527222,
"learning_rate": 0.0002497536858170772,
"loss": 0.706,
"step": 61
},
{
"epoch": 0.8829550511793502,
"grad_norm": 0.9968670606613159,
"learning_rate": 0.00024788786422862526,
"loss": 0.6861,
"step": 62
},
{
"epoch": 0.897196261682243,
"grad_norm": 0.9589150547981262,
"learning_rate": 0.00024599528107549745,
"loss": 0.7194,
"step": 63
},
{
"epoch": 0.9114374721851357,
"grad_norm": 0.7447915077209473,
"learning_rate": 0.00024407645377103054,
"loss": 0.7086,
"step": 64
},
{
"epoch": 0.9256786826880284,
"grad_norm": 1.025489091873169,
"learning_rate": 0.00024213190690345018,
"loss": 0.6846,
"step": 65
},
{
"epoch": 0.9399198931909212,
"grad_norm": 1.9707061052322388,
"learning_rate": 0.00024016217209245374,
"loss": 0.7149,
"step": 66
},
{
"epoch": 0.954161103693814,
"grad_norm": 0.826673686504364,
"learning_rate": 0.00023816778784387094,
"loss": 0.6999,
"step": 67
},
{
"epoch": 0.9684023141967067,
"grad_norm": 1.027491807937622,
"learning_rate": 0.0002361492994024415,
"loss": 0.7006,
"step": 68
},
{
"epoch": 0.9826435246995995,
"grad_norm": 0.5436568260192871,
"learning_rate": 0.0002341072586027509,
"loss": 0.6732,
"step": 69
},
{
"epoch": 0.9968847352024922,
"grad_norm": 1.2011053562164307,
"learning_rate": 0.00023204222371836405,
"loss": 0.7055,
"step": 70
},
{
"epoch": 1.011125945705385,
"grad_norm": 0.7172683477401733,
"learning_rate": 0.00022995475930919905,
"loss": 1.1875,
"step": 71
},
{
"epoch": 1.0253671562082778,
"grad_norm": 0.8648049831390381,
"learning_rate": 0.00022784543606718227,
"loss": 0.6959,
"step": 72
},
{
"epoch": 1.0396083667111704,
"grad_norm": 1.807977557182312,
"learning_rate": 0.00022571483066022657,
"loss": 0.7421,
"step": 73
},
{
"epoch": 1.0538495772140632,
"grad_norm": 1.7105003595352173,
"learning_rate": 0.0002235635255745762,
"loss": 0.7546,
"step": 74
},
{
"epoch": 1.0680907877169559,
"grad_norm": 0.626197338104248,
"learning_rate": 0.00022139210895556104,
"loss": 0.6896,
"step": 75
},
{
"epoch": 1.0680907877169559,
"eval_loss": 0.719901442527771,
"eval_runtime": 4.188,
"eval_samples_per_second": 11.939,
"eval_steps_per_second": 1.671,
"step": 75
},
{
"epoch": 1.0823319982198487,
"grad_norm": 1.092750906944275,
"learning_rate": 0.00021920117444680317,
"loss": 0.7064,
"step": 76
},
{
"epoch": 1.0965732087227413,
"grad_norm": 0.5488213896751404,
"learning_rate": 0.00021699132102792097,
"loss": 0.6757,
"step": 77
},
{
"epoch": 1.1108144192256342,
"grad_norm": 1.0106656551361084,
"learning_rate": 0.0002147631528507739,
"loss": 0.6956,
"step": 78
},
{
"epoch": 1.125055629728527,
"grad_norm": 0.6059918999671936,
"learning_rate": 0.00021251727907429355,
"loss": 0.696,
"step": 79
},
{
"epoch": 1.1392968402314196,
"grad_norm": 0.7169640064239502,
"learning_rate": 0.0002102543136979454,
"loss": 0.7019,
"step": 80
},
{
"epoch": 1.1535380507343125,
"grad_norm": 0.8470028638839722,
"learning_rate": 0.0002079748753938678,
"loss": 0.6867,
"step": 81
},
{
"epoch": 1.167779261237205,
"grad_norm": 0.5408539175987244,
"learning_rate": 0.0002056795873377331,
"loss": 0.6932,
"step": 82
},
{
"epoch": 1.182020471740098,
"grad_norm": 0.7373553514480591,
"learning_rate": 0.00020336907703837748,
"loss": 0.7023,
"step": 83
},
{
"epoch": 1.1962616822429906,
"grad_norm": 0.552183985710144,
"learning_rate": 0.00020104397616624645,
"loss": 0.7053,
"step": 84
},
{
"epoch": 1.2105028927458834,
"grad_norm": 0.7189547419548035,
"learning_rate": 0.00019870492038070252,
"loss": 0.6598,
"step": 85
},
{
"epoch": 1.224744103248776,
"grad_norm": 0.7592505812644958,
"learning_rate": 0.0001963525491562421,
"loss": 0.6908,
"step": 86
},
{
"epoch": 1.2389853137516689,
"grad_norm": 0.42698734998703003,
"learning_rate": 0.0001939875056076697,
"loss": 0.6838,
"step": 87
},
{
"epoch": 1.2532265242545617,
"grad_norm": 0.5881170034408569,
"learning_rate": 0.00019161043631427666,
"loss": 0.6908,
"step": 88
},
{
"epoch": 1.2674677347574543,
"grad_norm": 1.1356054544448853,
"learning_rate": 0.00018922199114307294,
"loss": 0.6936,
"step": 89
},
{
"epoch": 1.2817089452603472,
"grad_norm": 0.5578824281692505,
"learning_rate": 0.00018682282307111987,
"loss": 0.6895,
"step": 90
},
{
"epoch": 1.2959501557632398,
"grad_norm": 1.2620768547058105,
"learning_rate": 0.00018441358800701273,
"loss": 0.7067,
"step": 91
},
{
"epoch": 1.3101913662661326,
"grad_norm": 0.9779208898544312,
"learning_rate": 0.00018199494461156203,
"loss": 0.7215,
"step": 92
},
{
"epoch": 1.3244325767690253,
"grad_norm": 0.9356729388237,
"learning_rate": 0.000179567554117722,
"loss": 0.6979,
"step": 93
},
{
"epoch": 1.338673787271918,
"grad_norm": 0.8828136324882507,
"learning_rate": 0.00017713208014981648,
"loss": 0.7469,
"step": 94
},
{
"epoch": 1.352914997774811,
"grad_norm": 0.33228611946105957,
"learning_rate": 0.00017468918854211007,
"loss": 0.6815,
"step": 95
},
{
"epoch": 1.3671562082777036,
"grad_norm": 0.744838535785675,
"learning_rate": 0.00017223954715677627,
"loss": 0.696,
"step": 96
},
{
"epoch": 1.3813974187805964,
"grad_norm": 0.5581339597702026,
"learning_rate": 0.00016978382570131034,
"loss": 0.6878,
"step": 97
},
{
"epoch": 1.395638629283489,
"grad_norm": 0.6438109278678894,
"learning_rate": 0.00016732269554543794,
"loss": 0.7204,
"step": 98
},
{
"epoch": 1.4098798397863819,
"grad_norm": 0.4048614501953125,
"learning_rate": 0.00016485682953756942,
"loss": 0.682,
"step": 99
},
{
"epoch": 1.4241210502892745,
"grad_norm": 0.6994701027870178,
"learning_rate": 0.00016238690182084986,
"loss": 0.6491,
"step": 100
},
{
"epoch": 1.4241210502892745,
"eval_loss": 0.6796185970306396,
"eval_runtime": 4.1861,
"eval_samples_per_second": 11.944,
"eval_steps_per_second": 1.672,
"step": 100
},
{
"epoch": 1.4383622607921673,
"grad_norm": 0.44421419501304626,
"learning_rate": 0.0001599135876488549,
"loss": 0.6904,
"step": 101
},
{
"epoch": 1.4526034712950602,
"grad_norm": 0.6852967143058777,
"learning_rate": 0.00015743756320098332,
"loss": 0.644,
"step": 102
},
{
"epoch": 1.4668446817979528,
"grad_norm": 0.9849354028701782,
"learning_rate": 0.0001549595053975962,
"loss": 0.6534,
"step": 103
},
{
"epoch": 1.4810858923008456,
"grad_norm": 0.8082683086395264,
"learning_rate": 0.00015248009171495378,
"loss": 0.6335,
"step": 104
},
{
"epoch": 1.4953271028037383,
"grad_norm": 0.9514603614807129,
"learning_rate": 0.00015,
"loss": 0.7136,
"step": 105
},
{
"epoch": 1.509568313306631,
"grad_norm": 0.6103482842445374,
"learning_rate": 0.00014751990828504622,
"loss": 0.6936,
"step": 106
},
{
"epoch": 1.5238095238095237,
"grad_norm": 0.7948316931724548,
"learning_rate": 0.00014504049460240375,
"loss": 0.6635,
"step": 107
},
{
"epoch": 1.5380507343124166,
"grad_norm": 0.6883447170257568,
"learning_rate": 0.00014256243679901663,
"loss": 0.6801,
"step": 108
},
{
"epoch": 1.5522919448153094,
"grad_norm": 11.66091251373291,
"learning_rate": 0.00014008641235114508,
"loss": 0.9128,
"step": 109
},
{
"epoch": 1.566533155318202,
"grad_norm": 1.0597611665725708,
"learning_rate": 0.00013761309817915014,
"loss": 0.6946,
"step": 110
},
{
"epoch": 1.5807743658210947,
"grad_norm": 1.3430280685424805,
"learning_rate": 0.00013514317046243058,
"loss": 0.7171,
"step": 111
},
{
"epoch": 1.5950155763239875,
"grad_norm": 1.0752052068710327,
"learning_rate": 0.00013267730445456208,
"loss": 0.705,
"step": 112
},
{
"epoch": 1.6092567868268803,
"grad_norm": 12.76914119720459,
"learning_rate": 0.00013021617429868963,
"loss": 0.7726,
"step": 113
},
{
"epoch": 1.623497997329773,
"grad_norm": 0.9116569757461548,
"learning_rate": 0.00012776045284322368,
"loss": 0.7378,
"step": 114
},
{
"epoch": 1.6377392078326658,
"grad_norm": 0.7087443470954895,
"learning_rate": 0.00012531081145788987,
"loss": 0.704,
"step": 115
},
{
"epoch": 1.6519804183355586,
"grad_norm": 0.5982756614685059,
"learning_rate": 0.00012286791985018355,
"loss": 0.7044,
"step": 116
},
{
"epoch": 1.6662216288384513,
"grad_norm": 0.7551002502441406,
"learning_rate": 0.00012043244588227796,
"loss": 0.7104,
"step": 117
},
{
"epoch": 1.6804628393413439,
"grad_norm": 0.6853469610214233,
"learning_rate": 0.00011800505538843798,
"loss": 0.7109,
"step": 118
},
{
"epoch": 1.6947040498442367,
"grad_norm": 0.339310884475708,
"learning_rate": 0.00011558641199298727,
"loss": 0.6843,
"step": 119
},
{
"epoch": 1.7089452603471296,
"grad_norm": 0.43288713693618774,
"learning_rate": 0.00011317717692888012,
"loss": 0.6887,
"step": 120
},
{
"epoch": 1.7231864708500222,
"grad_norm": 0.6944701671600342,
"learning_rate": 0.00011077800885692702,
"loss": 0.6887,
"step": 121
},
{
"epoch": 1.737427681352915,
"grad_norm": 0.5868093967437744,
"learning_rate": 0.00010838956368572334,
"loss": 0.6853,
"step": 122
},
{
"epoch": 1.7516688918558079,
"grad_norm": 0.5155894756317139,
"learning_rate": 0.0001060124943923303,
"loss": 0.6742,
"step": 123
},
{
"epoch": 1.7659101023587005,
"grad_norm": 0.7610857486724854,
"learning_rate": 0.0001036474508437579,
"loss": 0.6495,
"step": 124
},
{
"epoch": 1.7801513128615931,
"grad_norm": 1.0266481637954712,
"learning_rate": 0.00010129507961929748,
"loss": 0.7232,
"step": 125
},
{
"epoch": 1.7801513128615931,
"eval_loss": 0.6749953627586365,
"eval_runtime": 4.181,
"eval_samples_per_second": 11.959,
"eval_steps_per_second": 1.674,
"step": 125
},
{
"epoch": 1.794392523364486,
"grad_norm": 1.3338428735733032,
"learning_rate": 9.895602383375353e-05,
"loss": 0.7449,
"step": 126
},
{
"epoch": 1.8086337338673788,
"grad_norm": 0.7237399220466614,
"learning_rate": 9.663092296162251e-05,
"loss": 0.6565,
"step": 127
},
{
"epoch": 1.8228749443702714,
"grad_norm": 1.313313364982605,
"learning_rate": 9.432041266226686e-05,
"loss": 0.7559,
"step": 128
},
{
"epoch": 1.8371161548731643,
"grad_norm": 0.8445695638656616,
"learning_rate": 9.202512460613219e-05,
"loss": 0.6871,
"step": 129
},
{
"epoch": 1.851357365376057,
"grad_norm": 0.847963273525238,
"learning_rate": 8.97456863020546e-05,
"loss": 0.6877,
"step": 130
},
{
"epoch": 1.8655985758789497,
"grad_norm": 0.8895235657691956,
"learning_rate": 8.748272092570646e-05,
"loss": 0.7047,
"step": 131
},
{
"epoch": 1.8798397863818423,
"grad_norm": 0.5661662817001343,
"learning_rate": 8.523684714922608e-05,
"loss": 0.6888,
"step": 132
},
{
"epoch": 1.8940809968847352,
"grad_norm": 0.429289311170578,
"learning_rate": 8.300867897207903e-05,
"loss": 0.6702,
"step": 133
},
{
"epoch": 1.908322207387628,
"grad_norm": 0.6174786686897278,
"learning_rate": 8.079882555319684e-05,
"loss": 0.6665,
"step": 134
},
{
"epoch": 1.9225634178905207,
"grad_norm": 0.45624855160713196,
"learning_rate": 7.860789104443896e-05,
"loss": 0.6919,
"step": 135
},
{
"epoch": 1.9368046283934133,
"grad_norm": 0.6706792116165161,
"learning_rate": 7.643647442542382e-05,
"loss": 0.6933,
"step": 136
},
{
"epoch": 1.9510458388963063,
"grad_norm": 0.7465632557868958,
"learning_rate": 7.428516933977347e-05,
"loss": 0.6696,
"step": 137
},
{
"epoch": 1.965287049399199,
"grad_norm": 0.8878591060638428,
"learning_rate": 7.215456393281776e-05,
"loss": 0.7182,
"step": 138
},
{
"epoch": 1.9795282599020916,
"grad_norm": 0.6827524900436401,
"learning_rate": 7.004524069080096e-05,
"loss": 0.6645,
"step": 139
},
{
"epoch": 1.9937694704049844,
"grad_norm": 0.9182266592979431,
"learning_rate": 6.795777628163599e-05,
"loss": 0.6593,
"step": 140
},
{
"epoch": 2.0080106809078773,
"grad_norm": 2.135566473007202,
"learning_rate": 6.58927413972491e-05,
"loss": 1.062,
"step": 141
},
{
"epoch": 2.02225189141077,
"grad_norm": 1.0483397245407104,
"learning_rate": 6.385070059755846e-05,
"loss": 0.6592,
"step": 142
},
{
"epoch": 2.0364931019136625,
"grad_norm": 1.6429957151412964,
"learning_rate": 6.183221215612904e-05,
"loss": 0.6421,
"step": 143
},
{
"epoch": 2.0507343124165556,
"grad_norm": 1.6329983472824097,
"learning_rate": 5.983782790754623e-05,
"loss": 0.6402,
"step": 144
},
{
"epoch": 2.064975522919448,
"grad_norm": 2.5596134662628174,
"learning_rate": 5.786809309654982e-05,
"loss": 0.6616,
"step": 145
},
{
"epoch": 2.079216733422341,
"grad_norm": 1.579499363899231,
"learning_rate": 5.592354622896944e-05,
"loss": 0.6083,
"step": 146
},
{
"epoch": 2.0934579439252334,
"grad_norm": 2.024502754211426,
"learning_rate": 5.40047189245025e-05,
"loss": 0.6279,
"step": 147
},
{
"epoch": 2.1076991544281265,
"grad_norm": 2.148564100265503,
"learning_rate": 5.211213577137469e-05,
"loss": 0.5763,
"step": 148
},
{
"epoch": 2.121940364931019,
"grad_norm": 2.531046152114868,
"learning_rate": 5.024631418292274e-05,
"loss": 0.6952,
"step": 149
},
{
"epoch": 2.1361815754339117,
"grad_norm": 1.8060929775238037,
"learning_rate": 4.840776425613886e-05,
"loss": 0.5623,
"step": 150
},
{
"epoch": 2.1361815754339117,
"eval_loss": 0.6849590539932251,
"eval_runtime": 4.1751,
"eval_samples_per_second": 11.976,
"eval_steps_per_second": 1.677,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.364765716348928e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}