Taiwan-ELM-1_1B-Instruct / trainer_state.json
liswei's picture
End of training
727b22b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9994709927702345,
"eval_steps": 500,
"global_step": 1417,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007053429730206313,
"grad_norm": 1.3042659759521484,
"learning_rate": 7.042253521126762e-06,
"loss": 2.258,
"step": 10
},
{
"epoch": 0.014106859460412626,
"grad_norm": 0.738134503364563,
"learning_rate": 1.4084507042253523e-05,
"loss": 2.0805,
"step": 20
},
{
"epoch": 0.02116028919061894,
"grad_norm": 0.5946909785270691,
"learning_rate": 2.112676056338028e-05,
"loss": 1.9582,
"step": 30
},
{
"epoch": 0.02821371892082525,
"grad_norm": 0.5485147833824158,
"learning_rate": 2.8169014084507046e-05,
"loss": 1.9138,
"step": 40
},
{
"epoch": 0.03526714865103157,
"grad_norm": 0.5394290089607239,
"learning_rate": 3.5211267605633805e-05,
"loss": 1.8736,
"step": 50
},
{
"epoch": 0.04232057838123788,
"grad_norm": 0.49499747157096863,
"learning_rate": 4.225352112676056e-05,
"loss": 1.7826,
"step": 60
},
{
"epoch": 0.04937400811144419,
"grad_norm": 0.48521119356155396,
"learning_rate": 4.929577464788733e-05,
"loss": 1.7868,
"step": 70
},
{
"epoch": 0.0564274378416505,
"grad_norm": 0.5505830645561218,
"learning_rate": 5.633802816901409e-05,
"loss": 1.754,
"step": 80
},
{
"epoch": 0.06348086757185682,
"grad_norm": 0.512299656867981,
"learning_rate": 6.338028169014085e-05,
"loss": 1.7356,
"step": 90
},
{
"epoch": 0.07053429730206313,
"grad_norm": 0.47940096259117126,
"learning_rate": 7.042253521126761e-05,
"loss": 1.6952,
"step": 100
},
{
"epoch": 0.07758772703226945,
"grad_norm": 0.48518213629722595,
"learning_rate": 7.746478873239437e-05,
"loss": 1.6853,
"step": 110
},
{
"epoch": 0.08464115676247576,
"grad_norm": 0.5208117961883545,
"learning_rate": 8.450704225352113e-05,
"loss": 1.7022,
"step": 120
},
{
"epoch": 0.09169458649268207,
"grad_norm": 0.5312570333480835,
"learning_rate": 9.15492957746479e-05,
"loss": 1.6371,
"step": 130
},
{
"epoch": 0.09874801622288838,
"grad_norm": 0.5060498118400574,
"learning_rate": 9.859154929577466e-05,
"loss": 1.665,
"step": 140
},
{
"epoch": 0.1058014459530947,
"grad_norm": 0.482503205537796,
"learning_rate": 9.999028628636837e-05,
"loss": 1.6445,
"step": 150
},
{
"epoch": 0.112854875683301,
"grad_norm": 0.5026859045028687,
"learning_rate": 9.99508307932129e-05,
"loss": 1.6505,
"step": 160
},
{
"epoch": 0.11990830541350732,
"grad_norm": 0.508380651473999,
"learning_rate": 9.988105034827552e-05,
"loss": 1.6513,
"step": 170
},
{
"epoch": 0.12696173514371364,
"grad_norm": 0.4797461926937103,
"learning_rate": 9.978098731498833e-05,
"loss": 1.6382,
"step": 180
},
{
"epoch": 0.13401516487391996,
"grad_norm": 0.5087615847587585,
"learning_rate": 9.965070244122301e-05,
"loss": 1.6464,
"step": 190
},
{
"epoch": 0.14106859460412627,
"grad_norm": 0.4981568455696106,
"learning_rate": 9.949027482241107e-05,
"loss": 1.6105,
"step": 200
},
{
"epoch": 0.14812202433433258,
"grad_norm": 0.4438849091529846,
"learning_rate": 9.929980185352526e-05,
"loss": 1.6194,
"step": 210
},
{
"epoch": 0.1551754540645389,
"grad_norm": 0.5086779594421387,
"learning_rate": 9.907939916995152e-05,
"loss": 1.6073,
"step": 220
},
{
"epoch": 0.1622288837947452,
"grad_norm": 0.5140203833580017,
"learning_rate": 9.88292005772872e-05,
"loss": 1.6031,
"step": 230
},
{
"epoch": 0.16928231352495152,
"grad_norm": 0.5097965002059937,
"learning_rate": 9.854935797010821e-05,
"loss": 1.6216,
"step": 240
},
{
"epoch": 0.17633574325515783,
"grad_norm": 0.5518028140068054,
"learning_rate": 9.824004123975435e-05,
"loss": 1.5585,
"step": 250
},
{
"epoch": 0.18338917298536414,
"grad_norm": 0.48416197299957275,
"learning_rate": 9.790143817118904e-05,
"loss": 1.5739,
"step": 260
},
{
"epoch": 0.19044260271557045,
"grad_norm": 0.4688737690448761,
"learning_rate": 9.753375432899581e-05,
"loss": 1.571,
"step": 270
},
{
"epoch": 0.19749603244577676,
"grad_norm": 0.4657231569290161,
"learning_rate": 9.713721293258078e-05,
"loss": 1.5614,
"step": 280
},
{
"epoch": 0.20454946217598308,
"grad_norm": 0.47847288846969604,
"learning_rate": 9.671205472065707e-05,
"loss": 1.5474,
"step": 290
},
{
"epoch": 0.2116028919061894,
"grad_norm": 0.5418571829795837,
"learning_rate": 9.625853780509335e-05,
"loss": 1.5562,
"step": 300
},
{
"epoch": 0.2186563216363957,
"grad_norm": 0.5494666695594788,
"learning_rate": 9.577693751421506e-05,
"loss": 1.5585,
"step": 310
},
{
"epoch": 0.225709751366602,
"grad_norm": 0.5272155404090881,
"learning_rate": 9.52675462256537e-05,
"loss": 1.546,
"step": 320
},
{
"epoch": 0.23276318109680832,
"grad_norm": 0.5072800517082214,
"learning_rate": 9.473067318884552e-05,
"loss": 1.5395,
"step": 330
},
{
"epoch": 0.23981661082701464,
"grad_norm": 0.48417559266090393,
"learning_rate": 9.416664433728748e-05,
"loss": 1.5342,
"step": 340
},
{
"epoch": 0.24687004055722095,
"grad_norm": 0.49016687273979187,
"learning_rate": 9.357580209066416e-05,
"loss": 1.5155,
"step": 350
},
{
"epoch": 0.2539234702874273,
"grad_norm": 0.48039939999580383,
"learning_rate": 9.295850514696628e-05,
"loss": 1.5194,
"step": 360
},
{
"epoch": 0.2609769000176336,
"grad_norm": 0.4710429906845093,
"learning_rate": 9.23151282647265e-05,
"loss": 1.4984,
"step": 370
},
{
"epoch": 0.2680303297478399,
"grad_norm": 0.46802473068237305,
"learning_rate": 9.164606203550497e-05,
"loss": 1.5316,
"step": 380
},
{
"epoch": 0.2750837594780462,
"grad_norm": 0.46727123856544495,
"learning_rate": 9.095171264676283e-05,
"loss": 1.5478,
"step": 390
},
{
"epoch": 0.28213718920825254,
"grad_norm": 0.4999746084213257,
"learning_rate": 9.023250163526731e-05,
"loss": 1.5335,
"step": 400
},
{
"epoch": 0.2891906189384588,
"grad_norm": 0.4812432825565338,
"learning_rate": 8.948886563117844e-05,
"loss": 1.5063,
"step": 410
},
{
"epoch": 0.29624404866866516,
"grad_norm": 0.48060664534568787,
"learning_rate": 8.872125609297271e-05,
"loss": 1.5039,
"step": 420
},
{
"epoch": 0.30329747839887145,
"grad_norm": 0.4972708523273468,
"learning_rate": 8.793013903336428e-05,
"loss": 1.5414,
"step": 430
},
{
"epoch": 0.3103509081290778,
"grad_norm": 0.4610942006111145,
"learning_rate": 8.711599473639058e-05,
"loss": 1.4808,
"step": 440
},
{
"epoch": 0.31740433785928407,
"grad_norm": 0.45125675201416016,
"learning_rate": 8.627931746583378e-05,
"loss": 1.5097,
"step": 450
},
{
"epoch": 0.3244577675894904,
"grad_norm": 0.46617043018341064,
"learning_rate": 8.542061516515512e-05,
"loss": 1.4708,
"step": 460
},
{
"epoch": 0.3315111973196967,
"grad_norm": 0.5127195119857788,
"learning_rate": 8.454040914912456e-05,
"loss": 1.4894,
"step": 470
},
{
"epoch": 0.33856462704990303,
"grad_norm": 0.45094090700149536,
"learning_rate": 8.363923378733255e-05,
"loss": 1.4554,
"step": 480
},
{
"epoch": 0.3456180567801093,
"grad_norm": 0.48269587755203247,
"learning_rate": 8.271763617977641e-05,
"loss": 1.4732,
"step": 490
},
{
"epoch": 0.35267148651031566,
"grad_norm": 0.4571676552295685,
"learning_rate": 8.177617582471813e-05,
"loss": 1.4543,
"step": 500
},
{
"epoch": 0.35267148651031566,
"eval_loss": 1.4671958684921265,
"eval_runtime": 1577.1702,
"eval_samples_per_second": 38.35,
"eval_steps_per_second": 0.4,
"step": 500
},
{
"epoch": 0.35972491624052194,
"grad_norm": 0.4831838011741638,
"learning_rate": 8.081542427901503e-05,
"loss": 1.4805,
"step": 510
},
{
"epoch": 0.3667783459707283,
"grad_norm": 0.47973912954330444,
"learning_rate": 7.983596481113005e-05,
"loss": 1.4767,
"step": 520
},
{
"epoch": 0.37383177570093457,
"grad_norm": 0.4587724208831787,
"learning_rate": 7.883839204703165e-05,
"loss": 1.4783,
"step": 530
},
{
"epoch": 0.3808852054311409,
"grad_norm": 0.48470768332481384,
"learning_rate": 7.782331160919877e-05,
"loss": 1.4691,
"step": 540
},
{
"epoch": 0.3879386351613472,
"grad_norm": 0.4692317247390747,
"learning_rate": 7.679133974894983e-05,
"loss": 1.4651,
"step": 550
},
{
"epoch": 0.39499206489155353,
"grad_norm": 0.5106418132781982,
"learning_rate": 7.574310297231897e-05,
"loss": 1.4651,
"step": 560
},
{
"epoch": 0.4020454946217598,
"grad_norm": 0.4444584250450134,
"learning_rate": 7.46792376597067e-05,
"loss": 1.459,
"step": 570
},
{
"epoch": 0.40909892435196615,
"grad_norm": 0.44441869854927063,
"learning_rate": 7.360038967953598e-05,
"loss": 1.466,
"step": 580
},
{
"epoch": 0.41615235408217244,
"grad_norm": 0.4592408537864685,
"learning_rate": 7.250721399614786e-05,
"loss": 1.4527,
"step": 590
},
{
"epoch": 0.4232057838123788,
"grad_norm": 0.48659372329711914,
"learning_rate": 7.140037427217545e-05,
"loss": 1.4409,
"step": 600
},
{
"epoch": 0.43025921354258506,
"grad_norm": 0.49556177854537964,
"learning_rate": 7.02805424656368e-05,
"loss": 1.4378,
"step": 610
},
{
"epoch": 0.4373126432727914,
"grad_norm": 0.4970695674419403,
"learning_rate": 6.914839842199187e-05,
"loss": 1.4209,
"step": 620
},
{
"epoch": 0.4443660730029977,
"grad_norm": 0.4574239253997803,
"learning_rate": 6.800462946141109e-05,
"loss": 1.4215,
"step": 630
},
{
"epoch": 0.451419502733204,
"grad_norm": 0.44600942730903625,
"learning_rate": 6.684992996150599e-05,
"loss": 1.4481,
"step": 640
},
{
"epoch": 0.4584729324634103,
"grad_norm": 0.4874365031719208,
"learning_rate": 6.568500093577525e-05,
"loss": 1.4357,
"step": 650
},
{
"epoch": 0.46552636219361665,
"grad_norm": 0.44276514649391174,
"learning_rate": 6.451054960802224e-05,
"loss": 1.4229,
"step": 660
},
{
"epoch": 0.47257979192382293,
"grad_norm": 0.4567689895629883,
"learning_rate": 6.332728898300221e-05,
"loss": 1.3891,
"step": 670
},
{
"epoch": 0.4796332216540293,
"grad_norm": 0.4700818359851837,
"learning_rate": 6.213593741355993e-05,
"loss": 1.3942,
"step": 680
},
{
"epoch": 0.48668665138423556,
"grad_norm": 0.510308027267456,
"learning_rate": 6.093721816452052e-05,
"loss": 1.4321,
"step": 690
},
{
"epoch": 0.4937400811144419,
"grad_norm": 0.4537096321582794,
"learning_rate": 5.973185897359828e-05,
"loss": 1.4058,
"step": 700
},
{
"epoch": 0.5007935108446482,
"grad_norm": 0.46621763706207275,
"learning_rate": 5.852059160958988e-05,
"loss": 1.4264,
"step": 710
},
{
"epoch": 0.5078469405748546,
"grad_norm": 0.4476553201675415,
"learning_rate": 5.730415142812059e-05,
"loss": 1.3943,
"step": 720
},
{
"epoch": 0.5149003703050609,
"grad_norm": 0.4877742528915405,
"learning_rate": 5.6083276925212624e-05,
"loss": 1.4114,
"step": 730
},
{
"epoch": 0.5219538000352671,
"grad_norm": 0.4259643256664276,
"learning_rate": 5.485870928894725e-05,
"loss": 1.3905,
"step": 740
},
{
"epoch": 0.5290072297654734,
"grad_norm": 0.4499744772911072,
"learning_rate": 5.3631191949492354e-05,
"loss": 1.3973,
"step": 750
},
{
"epoch": 0.5360606594956798,
"grad_norm": 0.45207515358924866,
"learning_rate": 5.240147012776899e-05,
"loss": 1.3957,
"step": 760
},
{
"epoch": 0.5431140892258861,
"grad_norm": 0.4296036958694458,
"learning_rate": 5.117029038303065e-05,
"loss": 1.4023,
"step": 770
},
{
"epoch": 0.5501675189560924,
"grad_norm": 0.4590493142604828,
"learning_rate": 4.993840015963016e-05,
"loss": 1.414,
"step": 780
},
{
"epoch": 0.5572209486862987,
"grad_norm": 0.4364815652370453,
"learning_rate": 4.870654733324907e-05,
"loss": 1.3939,
"step": 790
},
{
"epoch": 0.5642743784165051,
"grad_norm": 0.4730934798717499,
"learning_rate": 4.747547975686538e-05,
"loss": 1.3951,
"step": 800
},
{
"epoch": 0.5713278081467114,
"grad_norm": 0.4653618037700653,
"learning_rate": 4.6245944806734966e-05,
"loss": 1.3919,
"step": 810
},
{
"epoch": 0.5783812378769176,
"grad_norm": 0.44776442646980286,
"learning_rate": 4.50186889286623e-05,
"loss": 1.401,
"step": 820
},
{
"epoch": 0.5854346676071239,
"grad_norm": 0.47814151644706726,
"learning_rate": 4.379445718483626e-05,
"loss": 1.4014,
"step": 830
},
{
"epoch": 0.5924880973373303,
"grad_norm": 0.4623441994190216,
"learning_rate": 4.257399280150576e-05,
"loss": 1.4101,
"step": 840
},
{
"epoch": 0.5995415270675366,
"grad_norm": 0.47123944759368896,
"learning_rate": 4.135803671777011e-05,
"loss": 1.3687,
"step": 850
},
{
"epoch": 0.6065949567977429,
"grad_norm": 0.4534803628921509,
"learning_rate": 4.014732713575769e-05,
"loss": 1.3675,
"step": 860
},
{
"epoch": 0.6136483865279492,
"grad_norm": 0.4451278746128082,
"learning_rate": 3.894259907246652e-05,
"loss": 1.3705,
"step": 870
},
{
"epoch": 0.6207018162581556,
"grad_norm": 0.40993040800094604,
"learning_rate": 3.774458391353821e-05,
"loss": 1.3944,
"step": 880
},
{
"epoch": 0.6277552459883619,
"grad_norm": 0.43390119075775146,
"learning_rate": 3.655400896923672e-05,
"loss": 1.3898,
"step": 890
},
{
"epoch": 0.6348086757185681,
"grad_norm": 0.469901442527771,
"learning_rate": 3.537159703290112e-05,
"loss": 1.3975,
"step": 900
},
{
"epoch": 0.6418621054487744,
"grad_norm": 0.4519672989845276,
"learning_rate": 3.419806594214063e-05,
"loss": 1.3705,
"step": 910
},
{
"epoch": 0.6489155351789808,
"grad_norm": 0.44010812044143677,
"learning_rate": 3.303412814303827e-05,
"loss": 1.3673,
"step": 920
},
{
"epoch": 0.6559689649091871,
"grad_norm": 0.45562589168548584,
"learning_rate": 3.1880490257627646e-05,
"loss": 1.3754,
"step": 930
},
{
"epoch": 0.6630223946393934,
"grad_norm": 0.4654683768749237,
"learning_rate": 3.0737852654905497e-05,
"loss": 1.3809,
"step": 940
},
{
"epoch": 0.6700758243695997,
"grad_norm": 0.40361422300338745,
"learning_rate": 2.960690902564044e-05,
"loss": 1.3432,
"step": 950
},
{
"epoch": 0.6771292540998061,
"grad_norm": 0.45382988452911377,
"learning_rate": 2.8488345961236075e-05,
"loss": 1.3686,
"step": 960
},
{
"epoch": 0.6841826838300124,
"grad_norm": 0.4258984327316284,
"learning_rate": 2.7382842536904075e-05,
"loss": 1.3755,
"step": 970
},
{
"epoch": 0.6912361135602186,
"grad_norm": 0.5345839858055115,
"learning_rate": 2.629106989940025e-05,
"loss": 1.3793,
"step": 980
},
{
"epoch": 0.6982895432904249,
"grad_norm": 0.42425984144210815,
"learning_rate": 2.5213690859574058e-05,
"loss": 1.3385,
"step": 990
},
{
"epoch": 0.7053429730206313,
"grad_norm": 0.43758624792099,
"learning_rate": 2.4151359489978733e-05,
"loss": 1.3683,
"step": 1000
},
{
"epoch": 0.7053429730206313,
"eval_loss": 1.3570424318313599,
"eval_runtime": 1575.8807,
"eval_samples_per_second": 38.382,
"eval_steps_per_second": 0.4,
"step": 1000
},
{
"epoch": 0.7123964027508376,
"grad_norm": 0.4325385093688965,
"learning_rate": 2.3104720727786467e-05,
"loss": 1.3411,
"step": 1010
},
{
"epoch": 0.7194498324810439,
"grad_norm": 0.41445136070251465,
"learning_rate": 2.2074409983249545e-05,
"loss": 1.3495,
"step": 1020
},
{
"epoch": 0.7265032622112502,
"grad_norm": 0.4271891415119171,
"learning_rate": 2.106105275394533e-05,
"loss": 1.3505,
"step": 1030
},
{
"epoch": 0.7335566919414566,
"grad_norm": 0.4772493243217468,
"learning_rate": 2.0065264245039128e-05,
"loss": 1.317,
"step": 1040
},
{
"epoch": 0.7406101216716628,
"grad_norm": 0.43762919306755066,
"learning_rate": 1.9087648995795658e-05,
"loss": 1.349,
"step": 1050
},
{
"epoch": 0.7476635514018691,
"grad_norm": 0.4569909870624542,
"learning_rate": 1.8128800512565513e-05,
"loss": 1.3473,
"step": 1060
},
{
"epoch": 0.7547169811320755,
"grad_norm": 0.4433094561100006,
"learning_rate": 1.7189300908469912e-05,
"loss": 1.3386,
"step": 1070
},
{
"epoch": 0.7617704108622818,
"grad_norm": 0.48191553354263306,
"learning_rate": 1.6269720550002077e-05,
"loss": 1.3765,
"step": 1080
},
{
"epoch": 0.7688238405924881,
"grad_norm": 0.40601858496665955,
"learning_rate": 1.5370617710760004e-05,
"loss": 1.3354,
"step": 1090
},
{
"epoch": 0.7758772703226944,
"grad_norm": 0.43439218401908875,
"learning_rate": 1.4492538232520791e-05,
"loss": 1.3404,
"step": 1100
},
{
"epoch": 0.7829307000529008,
"grad_norm": 0.47002339363098145,
"learning_rate": 1.3636015193862212e-05,
"loss": 1.3562,
"step": 1110
},
{
"epoch": 0.7899841297831071,
"grad_norm": 0.44879859685897827,
"learning_rate": 1.2801568586532837e-05,
"loss": 1.3652,
"step": 1120
},
{
"epoch": 0.7970375595133133,
"grad_norm": 0.44707298278808594,
"learning_rate": 1.1989704999767149e-05,
"loss": 1.3332,
"step": 1130
},
{
"epoch": 0.8040909892435196,
"grad_norm": 0.45202744007110596,
"learning_rate": 1.1200917312737135e-05,
"loss": 1.3488,
"step": 1140
},
{
"epoch": 0.811144418973726,
"grad_norm": 0.4314928650856018,
"learning_rate": 1.043568439532735e-05,
"loss": 1.3592,
"step": 1150
},
{
"epoch": 0.8181978487039323,
"grad_norm": 0.47974613308906555,
"learning_rate": 9.69447081741487e-06,
"loss": 1.3273,
"step": 1160
},
{
"epoch": 0.8252512784341386,
"grad_norm": 0.49341320991516113,
"learning_rate": 8.97772656683083e-06,
"loss": 1.3495,
"step": 1170
},
{
"epoch": 0.8323047081643449,
"grad_norm": 0.45349812507629395,
"learning_rate": 8.2858867761745e-06,
"loss": 1.3349,
"step": 1180
},
{
"epoch": 0.8393581378945513,
"grad_norm": 0.4800957143306732,
"learning_rate": 7.619371458646007e-06,
"loss": 1.3282,
"step": 1190
},
{
"epoch": 0.8464115676247576,
"grad_norm": 0.41283947229385376,
"learning_rate": 6.978585253058101e-06,
"loss": 1.3289,
"step": 1200
},
{
"epoch": 0.8534649973549638,
"grad_norm": 0.3885229825973511,
"learning_rate": 6.363917178181389e-06,
"loss": 1.3251,
"step": 1210
},
{
"epoch": 0.8605184270851701,
"grad_norm": 0.3919270634651184,
"learning_rate": 5.7757403965726655e-06,
"loss": 1.3552,
"step": 1220
},
{
"epoch": 0.8675718568153765,
"grad_norm": 0.4169398546218872,
"learning_rate": 5.214411988029355e-06,
"loss": 1.332,
"step": 1230
},
{
"epoch": 0.8746252865455828,
"grad_norm": 0.4624149799346924,
"learning_rate": 4.680272732807834e-06,
"loss": 1.3261,
"step": 1240
},
{
"epoch": 0.8816787162757891,
"grad_norm": 0.404799222946167,
"learning_rate": 4.173646904737027e-06,
"loss": 1.3352,
"step": 1250
},
{
"epoch": 0.8887321460059954,
"grad_norm": 0.45688724517822266,
"learning_rate": 3.694842074353061e-06,
"loss": 1.3586,
"step": 1260
},
{
"epoch": 0.8957855757362018,
"grad_norm": 0.42276087403297424,
"learning_rate": 3.244148922174445e-06,
"loss": 1.3653,
"step": 1270
},
{
"epoch": 0.902839005466408,
"grad_norm": 0.446267306804657,
"learning_rate": 2.8218410622310286e-06,
"loss": 1.3674,
"step": 1280
},
{
"epoch": 0.9098924351966143,
"grad_norm": 0.48505640029907227,
"learning_rate": 2.4281748759540534e-06,
"loss": 1.3289,
"step": 1290
},
{
"epoch": 0.9169458649268206,
"grad_norm": 0.43678343296051025,
"learning_rate": 2.0633893565279615e-06,
"loss": 1.3405,
"step": 1300
},
{
"epoch": 0.923999294657027,
"grad_norm": 0.4217219650745392,
"learning_rate": 1.7277059637985925e-06,
"loss": 1.3356,
"step": 1310
},
{
"epoch": 0.9310527243872333,
"grad_norm": 0.40834924578666687,
"learning_rate": 1.4213284898258073e-06,
"loss": 1.3514,
"step": 1320
},
{
"epoch": 0.9381061541174396,
"grad_norm": 0.43623679876327515,
"learning_rate": 1.1444429351620933e-06,
"loss": 1.3239,
"step": 1330
},
{
"epoch": 0.9451595838476459,
"grad_norm": 0.3971399664878845,
"learning_rate": 8.972173959324093e-07,
"loss": 1.3358,
"step": 1340
},
{
"epoch": 0.9522130135778523,
"grad_norm": 0.42954108119010925,
"learning_rate": 6.798019617836437e-07,
"loss": 1.336,
"step": 1350
},
{
"epoch": 0.9592664433080585,
"grad_norm": 0.43156835436820984,
"learning_rate": 4.923286247658476e-07,
"loss": 1.3414,
"step": 1360
},
{
"epoch": 0.9663198730382648,
"grad_norm": 0.43554648756980896,
"learning_rate": 3.349111992003218e-07,
"loss": 1.3395,
"step": 1370
},
{
"epoch": 0.9733733027684711,
"grad_norm": 0.44990482926368713,
"learning_rate": 2.0764525258341606e-07,
"loss": 1.3522,
"step": 1380
},
{
"epoch": 0.9804267324986775,
"grad_norm": 0.46251362562179565,
"learning_rate": 1.1060804756790055e-07,
"loss": 1.3422,
"step": 1390
},
{
"epoch": 0.9874801622288838,
"grad_norm": 0.4148382842540741,
"learning_rate": 4.385849505708084e-08,
"loss": 1.336,
"step": 1400
},
{
"epoch": 0.9945335919590901,
"grad_norm": 0.39327695965766907,
"learning_rate": 7.437118440245528e-09,
"loss": 1.367,
"step": 1410
},
{
"epoch": 0.9994709927702345,
"step": 1417,
"total_flos": 3.7897423231228314e+17,
"train_loss": 1.4671159325642071,
"train_runtime": 64163.8855,
"train_samples_per_second": 8.484,
"train_steps_per_second": 0.022
}
],
"logging_steps": 10,
"max_steps": 1417,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 4000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.7897423231228314e+17,
"train_batch_size": 12,
"trial_name": null,
"trial_params": null
}