farmery's picture
Training in progress, step 150, checkpoint
337da28 verified
raw
history blame
28.4 kB
{
"best_metric": 2.4083755016326904,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 1.474427891260943,
"eval_steps": 25,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009829519275072954,
"grad_norm": 2.6821541786193848,
"learning_rate": 1.6666666666666667e-05,
"loss": 3.4332,
"step": 1
},
{
"epoch": 0.009829519275072954,
"eval_loss": 4.630801200866699,
"eval_runtime": 0.698,
"eval_samples_per_second": 71.632,
"eval_steps_per_second": 18.624,
"step": 1
},
{
"epoch": 0.019659038550145908,
"grad_norm": 3.109013319015503,
"learning_rate": 3.3333333333333335e-05,
"loss": 3.6809,
"step": 2
},
{
"epoch": 0.02948855782521886,
"grad_norm": 3.3269290924072266,
"learning_rate": 5e-05,
"loss": 3.8116,
"step": 3
},
{
"epoch": 0.039318077100291816,
"grad_norm": 3.2339508533477783,
"learning_rate": 6.666666666666667e-05,
"loss": 3.7141,
"step": 4
},
{
"epoch": 0.04914759637536477,
"grad_norm": 3.194000482559204,
"learning_rate": 8.333333333333334e-05,
"loss": 3.7467,
"step": 5
},
{
"epoch": 0.05897711565043772,
"grad_norm": 3.2596042156219482,
"learning_rate": 0.0001,
"loss": 3.6488,
"step": 6
},
{
"epoch": 0.06880663492551067,
"grad_norm": 2.7639877796173096,
"learning_rate": 0.00011666666666666668,
"loss": 3.4483,
"step": 7
},
{
"epoch": 0.07863615420058363,
"grad_norm": 2.3510923385620117,
"learning_rate": 0.00013333333333333334,
"loss": 3.2237,
"step": 8
},
{
"epoch": 0.08846567347565658,
"grad_norm": 2.3857412338256836,
"learning_rate": 0.00015000000000000001,
"loss": 3.289,
"step": 9
},
{
"epoch": 0.09829519275072954,
"grad_norm": 3.4331414699554443,
"learning_rate": 0.0001666666666666667,
"loss": 3.1625,
"step": 10
},
{
"epoch": 0.10812471202580248,
"grad_norm": 3.1604197025299072,
"learning_rate": 0.00018333333333333334,
"loss": 3.2429,
"step": 11
},
{
"epoch": 0.11795423130087544,
"grad_norm": 2.682563304901123,
"learning_rate": 0.0002,
"loss": 3.4042,
"step": 12
},
{
"epoch": 0.1277837505759484,
"grad_norm": 1.3882246017456055,
"learning_rate": 0.00019999486177088252,
"loss": 2.6979,
"step": 13
},
{
"epoch": 0.13761326985102135,
"grad_norm": 1.0993757247924805,
"learning_rate": 0.00019997944767022771,
"loss": 2.5693,
"step": 14
},
{
"epoch": 0.1474427891260943,
"grad_norm": 0.979709804058075,
"learning_rate": 0.00019995375945806192,
"loss": 2.5269,
"step": 15
},
{
"epoch": 0.15727230840116727,
"grad_norm": 0.9595231413841248,
"learning_rate": 0.00019991780006753883,
"loss": 2.6667,
"step": 16
},
{
"epoch": 0.1671018276762402,
"grad_norm": 0.832266092300415,
"learning_rate": 0.0001998715736046049,
"loss": 2.4709,
"step": 17
},
{
"epoch": 0.17693134695131316,
"grad_norm": 1.0170220136642456,
"learning_rate": 0.00019981508534753028,
"loss": 2.4992,
"step": 18
},
{
"epoch": 0.18676086622638613,
"grad_norm": 0.8332740068435669,
"learning_rate": 0.00019974834174630622,
"loss": 2.5776,
"step": 19
},
{
"epoch": 0.19659038550145908,
"grad_norm": 0.8824642300605774,
"learning_rate": 0.00019967135042190862,
"loss": 2.5329,
"step": 20
},
{
"epoch": 0.20641990477653202,
"grad_norm": 0.8353331089019775,
"learning_rate": 0.00019958412016542784,
"loss": 2.5575,
"step": 21
},
{
"epoch": 0.21624942405160497,
"grad_norm": 0.9471293091773987,
"learning_rate": 0.00019948666093706484,
"loss": 2.6083,
"step": 22
},
{
"epoch": 0.22607894332667794,
"grad_norm": 1.0592094659805298,
"learning_rate": 0.00019937898386499393,
"loss": 2.6781,
"step": 23
},
{
"epoch": 0.23590846260175088,
"grad_norm": 1.2304210662841797,
"learning_rate": 0.00019926110124409216,
"loss": 2.7943,
"step": 24
},
{
"epoch": 0.24573798187682383,
"grad_norm": 2.681989908218384,
"learning_rate": 0.00019913302653453544,
"loss": 3.2206,
"step": 25
},
{
"epoch": 0.24573798187682383,
"eval_loss": 2.6977789402008057,
"eval_runtime": 0.6963,
"eval_samples_per_second": 71.804,
"eval_steps_per_second": 18.669,
"step": 25
},
{
"epoch": 0.2555675011518968,
"grad_norm": 1.0680567026138306,
"learning_rate": 0.00019899477436026157,
"loss": 2.386,
"step": 26
},
{
"epoch": 0.2653970204269697,
"grad_norm": 1.146119236946106,
"learning_rate": 0.00019884636050730055,
"loss": 2.4147,
"step": 27
},
{
"epoch": 0.2752265397020427,
"grad_norm": 1.0828416347503662,
"learning_rate": 0.00019868780192197182,
"loss": 2.4808,
"step": 28
},
{
"epoch": 0.28505605897711567,
"grad_norm": 1.0389188528060913,
"learning_rate": 0.00019851911670894977,
"loss": 2.4491,
"step": 29
},
{
"epoch": 0.2948855782521886,
"grad_norm": 0.9195923805236816,
"learning_rate": 0.0001983403241291959,
"loss": 2.3826,
"step": 30
},
{
"epoch": 0.30471509752726156,
"grad_norm": 0.8912298083305359,
"learning_rate": 0.00019815144459776,
"loss": 2.4857,
"step": 31
},
{
"epoch": 0.31454461680233453,
"grad_norm": 0.8696074485778809,
"learning_rate": 0.00019795249968144896,
"loss": 2.4644,
"step": 32
},
{
"epoch": 0.32437413607740745,
"grad_norm": 0.8864859938621521,
"learning_rate": 0.00019774351209636413,
"loss": 2.5214,
"step": 33
},
{
"epoch": 0.3342036553524804,
"grad_norm": 0.9830371737480164,
"learning_rate": 0.0001975245057053076,
"loss": 2.5429,
"step": 34
},
{
"epoch": 0.3440331746275534,
"grad_norm": 1.0698072910308838,
"learning_rate": 0.00019729550551505752,
"loss": 2.655,
"step": 35
},
{
"epoch": 0.3538626939026263,
"grad_norm": 1.177764892578125,
"learning_rate": 0.00019705653767351265,
"loss": 2.6462,
"step": 36
},
{
"epoch": 0.3636922131776993,
"grad_norm": 1.5564563274383545,
"learning_rate": 0.00019680762946670683,
"loss": 2.8865,
"step": 37
},
{
"epoch": 0.37352173245277226,
"grad_norm": 1.0166500806808472,
"learning_rate": 0.0001965488093156933,
"loss": 2.3889,
"step": 38
},
{
"epoch": 0.3833512517278452,
"grad_norm": 0.7780814170837402,
"learning_rate": 0.00019628010677329947,
"loss": 2.22,
"step": 39
},
{
"epoch": 0.39318077100291815,
"grad_norm": 0.7663058638572693,
"learning_rate": 0.00019600155252075268,
"loss": 2.2794,
"step": 40
},
{
"epoch": 0.40301029027799107,
"grad_norm": 0.9589061737060547,
"learning_rate": 0.0001957131783641767,
"loss": 2.4648,
"step": 41
},
{
"epoch": 0.41283980955306404,
"grad_norm": 0.8198392391204834,
"learning_rate": 0.00019541501723096017,
"loss": 2.4522,
"step": 42
},
{
"epoch": 0.422669328828137,
"grad_norm": 0.7783530354499817,
"learning_rate": 0.0001951071031659968,
"loss": 2.4004,
"step": 43
},
{
"epoch": 0.43249884810320993,
"grad_norm": 0.9277461767196655,
"learning_rate": 0.000194789471327798,
"loss": 2.4421,
"step": 44
},
{
"epoch": 0.4423283673782829,
"grad_norm": 0.8785288333892822,
"learning_rate": 0.00019446215798447845,
"loss": 2.3626,
"step": 45
},
{
"epoch": 0.4521578866533559,
"grad_norm": 0.9237145781517029,
"learning_rate": 0.00019412520050961481,
"loss": 2.4119,
"step": 46
},
{
"epoch": 0.4619874059284288,
"grad_norm": 0.9750041961669922,
"learning_rate": 0.00019377863737797839,
"loss": 2.4959,
"step": 47
},
{
"epoch": 0.47181692520350177,
"grad_norm": 1.1339645385742188,
"learning_rate": 0.00019342250816114197,
"loss": 2.4285,
"step": 48
},
{
"epoch": 0.48164644447857474,
"grad_norm": 1.3169782161712646,
"learning_rate": 0.00019305685352296134,
"loss": 2.667,
"step": 49
},
{
"epoch": 0.49147596375364766,
"grad_norm": 2.7065019607543945,
"learning_rate": 0.00019268171521493225,
"loss": 3.0527,
"step": 50
},
{
"epoch": 0.49147596375364766,
"eval_loss": 2.54618239402771,
"eval_runtime": 0.6959,
"eval_samples_per_second": 71.852,
"eval_steps_per_second": 18.682,
"step": 50
},
{
"epoch": 0.5013054830287206,
"grad_norm": 1.095455288887024,
"learning_rate": 0.0001922971360714231,
"loss": 2.2371,
"step": 51
},
{
"epoch": 0.5111350023037936,
"grad_norm": 0.9260361790657043,
"learning_rate": 0.00019190316000478402,
"loss": 2.3674,
"step": 52
},
{
"epoch": 0.5209645215788665,
"grad_norm": 0.8077250123023987,
"learning_rate": 0.0001914998320003326,
"loss": 2.3025,
"step": 53
},
{
"epoch": 0.5307940408539394,
"grad_norm": 0.8684698939323425,
"learning_rate": 0.00019108719811121772,
"loss": 2.3124,
"step": 54
},
{
"epoch": 0.5406235601290125,
"grad_norm": 1.095186471939087,
"learning_rate": 0.0001906653054531608,
"loss": 2.3331,
"step": 55
},
{
"epoch": 0.5504530794040854,
"grad_norm": 1.0066113471984863,
"learning_rate": 0.00019023420219907607,
"loss": 2.3205,
"step": 56
},
{
"epoch": 0.5602825986791583,
"grad_norm": 0.84846431016922,
"learning_rate": 0.00018979393757357003,
"loss": 2.3889,
"step": 57
},
{
"epoch": 0.5701121179542313,
"grad_norm": 0.8877599835395813,
"learning_rate": 0.00018934456184732082,
"loss": 2.3474,
"step": 58
},
{
"epoch": 0.5799416372293043,
"grad_norm": 1.066555142402649,
"learning_rate": 0.00018888612633133827,
"loss": 2.4563,
"step": 59
},
{
"epoch": 0.5897711565043772,
"grad_norm": 1.1691265106201172,
"learning_rate": 0.00018841868337110508,
"loss": 2.4273,
"step": 60
},
{
"epoch": 0.5996006757794502,
"grad_norm": 1.2215334177017212,
"learning_rate": 0.0001879422863405995,
"loss": 2.4515,
"step": 61
},
{
"epoch": 0.6094301950545231,
"grad_norm": 1.5334198474884033,
"learning_rate": 0.00018745698963620145,
"loss": 2.7499,
"step": 62
},
{
"epoch": 0.619259714329596,
"grad_norm": 0.930508553981781,
"learning_rate": 0.00018696284867048118,
"loss": 2.3011,
"step": 63
},
{
"epoch": 0.6290892336046691,
"grad_norm": 0.732704222202301,
"learning_rate": 0.00018645991986587185,
"loss": 2.2053,
"step": 64
},
{
"epoch": 0.638918752879742,
"grad_norm": 0.7689364552497864,
"learning_rate": 0.0001859482606482275,
"loss": 2.2696,
"step": 65
},
{
"epoch": 0.6487482721548149,
"grad_norm": 0.8270861506462097,
"learning_rate": 0.00018542792944026566,
"loss": 2.3104,
"step": 66
},
{
"epoch": 0.6585777914298879,
"grad_norm": 0.8585655689239502,
"learning_rate": 0.00018489898565489664,
"loss": 2.2854,
"step": 67
},
{
"epoch": 0.6684073107049608,
"grad_norm": 0.958391010761261,
"learning_rate": 0.00018436148968843956,
"loss": 2.3815,
"step": 68
},
{
"epoch": 0.6782368299800338,
"grad_norm": 0.8035868406295776,
"learning_rate": 0.00018381550291372603,
"loss": 2.3304,
"step": 69
},
{
"epoch": 0.6880663492551068,
"grad_norm": 0.8527224063873291,
"learning_rate": 0.00018326108767309266,
"loss": 2.2976,
"step": 70
},
{
"epoch": 0.6978958685301797,
"grad_norm": 1.0419751405715942,
"learning_rate": 0.00018269830727126233,
"loss": 2.3254,
"step": 71
},
{
"epoch": 0.7077253878052526,
"grad_norm": 1.060821533203125,
"learning_rate": 0.0001821272259681161,
"loss": 2.3873,
"step": 72
},
{
"epoch": 0.7175549070803257,
"grad_norm": 1.11903977394104,
"learning_rate": 0.00018154790897135581,
"loss": 2.425,
"step": 73
},
{
"epoch": 0.7273844263553986,
"grad_norm": 1.2984219789505005,
"learning_rate": 0.00018096042242905832,
"loss": 2.5535,
"step": 74
},
{
"epoch": 0.7372139456304715,
"grad_norm": 2.152036428451538,
"learning_rate": 0.00018036483342212268,
"loss": 2.8375,
"step": 75
},
{
"epoch": 0.7372139456304715,
"eval_loss": 2.4736454486846924,
"eval_runtime": 0.6958,
"eval_samples_per_second": 71.864,
"eval_steps_per_second": 18.685,
"step": 75
},
{
"epoch": 0.7470434649055445,
"grad_norm": 0.7806919813156128,
"learning_rate": 0.00017976120995661068,
"loss": 2.2532,
"step": 76
},
{
"epoch": 0.7568729841806174,
"grad_norm": 0.7151602506637573,
"learning_rate": 0.00017914962095598151,
"loss": 2.2544,
"step": 77
},
{
"epoch": 0.7667025034556904,
"grad_norm": 0.7725361585617065,
"learning_rate": 0.0001785301362532221,
"loss": 2.2388,
"step": 78
},
{
"epoch": 0.7765320227307633,
"grad_norm": 0.8105177879333496,
"learning_rate": 0.00017790282658287332,
"loss": 2.3032,
"step": 79
},
{
"epoch": 0.7863615420058363,
"grad_norm": 0.8750107884407043,
"learning_rate": 0.00017726776357295318,
"loss": 2.252,
"step": 80
},
{
"epoch": 0.7961910612809092,
"grad_norm": 0.8127192854881287,
"learning_rate": 0.0001766250197367784,
"loss": 2.3539,
"step": 81
},
{
"epoch": 0.8060205805559821,
"grad_norm": 0.8602331280708313,
"learning_rate": 0.00017597466846468437,
"loss": 2.3433,
"step": 82
},
{
"epoch": 0.8158500998310552,
"grad_norm": 0.8889239430427551,
"learning_rate": 0.0001753167840156454,
"loss": 2.3318,
"step": 83
},
{
"epoch": 0.8256796191061281,
"grad_norm": 1.038681149482727,
"learning_rate": 0.00017465144150879548,
"loss": 2.4018,
"step": 84
},
{
"epoch": 0.835509138381201,
"grad_norm": 1.0794991254806519,
"learning_rate": 0.00017397871691485117,
"loss": 2.3722,
"step": 85
},
{
"epoch": 0.845338657656274,
"grad_norm": 1.2361195087432861,
"learning_rate": 0.00017329868704743677,
"loss": 2.4939,
"step": 86
},
{
"epoch": 0.855168176931347,
"grad_norm": 1.5107554197311401,
"learning_rate": 0.0001726114295543138,
"loss": 2.6408,
"step": 87
},
{
"epoch": 0.8649976962064199,
"grad_norm": 0.9424665570259094,
"learning_rate": 0.00017191702290851469,
"loss": 2.2892,
"step": 88
},
{
"epoch": 0.8748272154814929,
"grad_norm": 0.8208919763565063,
"learning_rate": 0.00017121554639938272,
"loss": 2.2319,
"step": 89
},
{
"epoch": 0.8846567347565658,
"grad_norm": 0.811772882938385,
"learning_rate": 0.00017050708012351852,
"loss": 2.1431,
"step": 90
},
{
"epoch": 0.8944862540316387,
"grad_norm": 0.7872000336647034,
"learning_rate": 0.00016979170497563416,
"loss": 2.3565,
"step": 91
},
{
"epoch": 0.9043157733067118,
"grad_norm": 0.8809075355529785,
"learning_rate": 0.00016906950263931663,
"loss": 2.3408,
"step": 92
},
{
"epoch": 0.9141452925817847,
"grad_norm": 0.8811956644058228,
"learning_rate": 0.00016834055557770096,
"loss": 2.2743,
"step": 93
},
{
"epoch": 0.9239748118568576,
"grad_norm": 0.8447458148002625,
"learning_rate": 0.00016760494702405416,
"loss": 2.2628,
"step": 94
},
{
"epoch": 0.9338043311319306,
"grad_norm": 0.8538081049919128,
"learning_rate": 0.00016686276097227154,
"loss": 2.2477,
"step": 95
},
{
"epoch": 0.9436338504070035,
"grad_norm": 0.895717442035675,
"learning_rate": 0.00016611408216728603,
"loss": 2.3793,
"step": 96
},
{
"epoch": 0.9534633696820765,
"grad_norm": 1.01509428024292,
"learning_rate": 0.00016535899609539177,
"loss": 2.3649,
"step": 97
},
{
"epoch": 0.9632928889571495,
"grad_norm": 1.1379575729370117,
"learning_rate": 0.00016459758897448298,
"loss": 2.4999,
"step": 98
},
{
"epoch": 0.9731224082322224,
"grad_norm": 1.2544729709625244,
"learning_rate": 0.00016382994774420947,
"loss": 2.4487,
"step": 99
},
{
"epoch": 0.9829519275072953,
"grad_norm": 2.1138370037078857,
"learning_rate": 0.0001630561600560494,
"loss": 2.8294,
"step": 100
},
{
"epoch": 0.9829519275072953,
"eval_loss": 2.4398374557495117,
"eval_runtime": 0.6951,
"eval_samples_per_second": 71.931,
"eval_steps_per_second": 18.702,
"step": 100
},
{
"epoch": 0.9927814467823683,
"grad_norm": 0.9641188383102417,
"learning_rate": 0.00016227631426330124,
"loss": 2.2486,
"step": 101
},
{
"epoch": 1.0026109660574412,
"grad_norm": 1.2361067533493042,
"learning_rate": 0.00016149049941099528,
"loss": 3.1568,
"step": 102
},
{
"epoch": 1.0124404853325142,
"grad_norm": 0.6794304251670837,
"learning_rate": 0.00016069880522572597,
"loss": 2.0051,
"step": 103
},
{
"epoch": 1.0222700046075872,
"grad_norm": 0.7373532056808472,
"learning_rate": 0.00015990132210540707,
"loss": 2.1212,
"step": 104
},
{
"epoch": 1.03209952388266,
"grad_norm": 0.7815002202987671,
"learning_rate": 0.00015909814110894938,
"loss": 2.1963,
"step": 105
},
{
"epoch": 1.041929043157733,
"grad_norm": 0.8411288261413574,
"learning_rate": 0.00015828935394586365,
"loss": 2.108,
"step": 106
},
{
"epoch": 1.051758562432806,
"grad_norm": 0.9140182137489319,
"learning_rate": 0.00015747505296578884,
"loss": 2.014,
"step": 107
},
{
"epoch": 1.0615880817078789,
"grad_norm": 0.8995840549468994,
"learning_rate": 0.0001566553311479473,
"loss": 2.1216,
"step": 108
},
{
"epoch": 1.071417600982952,
"grad_norm": 0.8572644591331482,
"learning_rate": 0.0001558302820905281,
"loss": 2.1224,
"step": 109
},
{
"epoch": 1.081247120258025,
"grad_norm": 0.8983514904975891,
"learning_rate": 0.000155,
"loss": 2.0613,
"step": 110
},
{
"epoch": 1.0910766395330977,
"grad_norm": 1.0163862705230713,
"learning_rate": 0.00015416457968035443,
"loss": 2.0615,
"step": 111
},
{
"epoch": 1.1009061588081708,
"grad_norm": 1.1525602340698242,
"learning_rate": 0.0001533241165222805,
"loss": 2.0906,
"step": 112
},
{
"epoch": 1.1107356780832438,
"grad_norm": 1.3764214515686035,
"learning_rate": 0.00015247870649227308,
"loss": 2.1388,
"step": 113
},
{
"epoch": 1.1205651973583166,
"grad_norm": 1.391831398010254,
"learning_rate": 0.0001516284461216752,
"loss": 1.7508,
"step": 114
},
{
"epoch": 1.1303947166333896,
"grad_norm": 1.578629970550537,
"learning_rate": 0.00015077343249565554,
"loss": 2.4968,
"step": 115
},
{
"epoch": 1.1402242359084627,
"grad_norm": 1.0895462036132812,
"learning_rate": 0.0001499137632421232,
"loss": 2.0435,
"step": 116
},
{
"epoch": 1.1500537551835355,
"grad_norm": 1.0212132930755615,
"learning_rate": 0.00014904953652058022,
"loss": 2.0891,
"step": 117
},
{
"epoch": 1.1598832744586085,
"grad_norm": 0.9461895227432251,
"learning_rate": 0.00014818085101091336,
"loss": 2.1364,
"step": 118
},
{
"epoch": 1.1697127937336815,
"grad_norm": 0.8605825901031494,
"learning_rate": 0.0001473078059021266,
"loss": 2.0956,
"step": 119
},
{
"epoch": 1.1795423130087543,
"grad_norm": 1.0641827583312988,
"learning_rate": 0.00014643050088101545,
"loss": 1.984,
"step": 120
},
{
"epoch": 1.1893718322838274,
"grad_norm": 0.9965870380401611,
"learning_rate": 0.00014554903612078448,
"loss": 2.0469,
"step": 121
},
{
"epoch": 1.1992013515589004,
"grad_norm": 1.0630478858947754,
"learning_rate": 0.00014466351226960917,
"loss": 2.0305,
"step": 122
},
{
"epoch": 1.2090308708339732,
"grad_norm": 1.062567949295044,
"learning_rate": 0.0001437740304391437,
"loss": 2.024,
"step": 123
},
{
"epoch": 1.2188603901090462,
"grad_norm": 1.1082484722137451,
"learning_rate": 0.0001428806921929756,
"loss": 2.0846,
"step": 124
},
{
"epoch": 1.2286899093841193,
"grad_norm": 1.236946702003479,
"learning_rate": 0.000141983599535029,
"loss": 1.9621,
"step": 125
},
{
"epoch": 1.2286899093841193,
"eval_loss": 2.4299097061157227,
"eval_runtime": 0.6964,
"eval_samples_per_second": 71.798,
"eval_steps_per_second": 18.667,
"step": 125
},
{
"epoch": 1.238519428659192,
"grad_norm": 1.6171234846115112,
"learning_rate": 0.00014108285489791768,
"loss": 2.2414,
"step": 126
},
{
"epoch": 1.248348947934265,
"grad_norm": 2.209141254425049,
"learning_rate": 0.0001401785611312488,
"loss": 2.4781,
"step": 127
},
{
"epoch": 1.2581784672093381,
"grad_norm": 1.0709174871444702,
"learning_rate": 0.00013927082148987925,
"loss": 1.9901,
"step": 128
},
{
"epoch": 1.268007986484411,
"grad_norm": 1.1437026262283325,
"learning_rate": 0.0001383597396221259,
"loss": 1.9849,
"step": 129
},
{
"epoch": 1.277837505759484,
"grad_norm": 1.181343674659729,
"learning_rate": 0.00013744541955793045,
"loss": 2.0997,
"step": 130
},
{
"epoch": 1.287667025034557,
"grad_norm": 1.113682508468628,
"learning_rate": 0.0001365279656969814,
"loss": 2.0418,
"step": 131
},
{
"epoch": 1.2974965443096298,
"grad_norm": 1.067321538925171,
"learning_rate": 0.0001356074827967929,
"loss": 2.0363,
"step": 132
},
{
"epoch": 1.3073260635847028,
"grad_norm": 0.9680085182189941,
"learning_rate": 0.00013468407596074376,
"loss": 2.0136,
"step": 133
},
{
"epoch": 1.3171555828597756,
"grad_norm": 1.076326608657837,
"learning_rate": 0.0001337578506260759,
"loss": 2.03,
"step": 134
},
{
"epoch": 1.3269851021348487,
"grad_norm": 1.0561034679412842,
"learning_rate": 0.00013282891255185565,
"loss": 1.9895,
"step": 135
},
{
"epoch": 1.3368146214099217,
"grad_norm": 1.1942148208618164,
"learning_rate": 0.0001318973678068978,
"loss": 2.0359,
"step": 136
},
{
"epoch": 1.3466441406849947,
"grad_norm": 1.1791845560073853,
"learning_rate": 0.00013096332275765407,
"loss": 2.0042,
"step": 137
},
{
"epoch": 1.3564736599600675,
"grad_norm": 1.4035441875457764,
"learning_rate": 0.00013002688405606828,
"loss": 2.1086,
"step": 138
},
{
"epoch": 1.3663031792351406,
"grad_norm": 1.4087142944335938,
"learning_rate": 0.00012908815862739835,
"loss": 1.7052,
"step": 139
},
{
"epoch": 1.3761326985102134,
"grad_norm": 1.3538085222244263,
"learning_rate": 0.00012814725365800698,
"loss": 2.5688,
"step": 140
},
{
"epoch": 1.3859622177852864,
"grad_norm": 0.9382008910179138,
"learning_rate": 0.00012720427658312352,
"loss": 2.0208,
"step": 141
},
{
"epoch": 1.3957917370603594,
"grad_norm": 0.8642036318778992,
"learning_rate": 0.0001262593350745759,
"loss": 2.0409,
"step": 142
},
{
"epoch": 1.4056212563354324,
"grad_norm": 0.9334009289741516,
"learning_rate": 0.00012531253702849696,
"loss": 2.028,
"step": 143
},
{
"epoch": 1.4154507756105053,
"grad_norm": 0.9900059700012207,
"learning_rate": 0.00012436399055300415,
"loss": 2.0542,
"step": 144
},
{
"epoch": 1.4252802948855783,
"grad_norm": 1.1152311563491821,
"learning_rate": 0.0001234138039558557,
"loss": 2.0503,
"step": 145
},
{
"epoch": 1.435109814160651,
"grad_norm": 1.132704496383667,
"learning_rate": 0.00012246208573208367,
"loss": 2.019,
"step": 146
},
{
"epoch": 1.4449393334357241,
"grad_norm": 1.0074613094329834,
"learning_rate": 0.00012150894455160555,
"loss": 1.9423,
"step": 147
},
{
"epoch": 1.4547688527107971,
"grad_norm": 1.2089931964874268,
"learning_rate": 0.00012055448924681618,
"loss": 2.0784,
"step": 148
},
{
"epoch": 1.4645983719858702,
"grad_norm": 1.1703325510025024,
"learning_rate": 0.00011959882880016083,
"loss": 1.9828,
"step": 149
},
{
"epoch": 1.474427891260943,
"grad_norm": 1.3632087707519531,
"learning_rate": 0.00011864207233169136,
"loss": 2.1095,
"step": 150
},
{
"epoch": 1.474427891260943,
"eval_loss": 2.4083755016326904,
"eval_runtime": 0.6963,
"eval_samples_per_second": 71.812,
"eval_steps_per_second": 18.671,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 306,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.444243315621888e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}