emilykang's picture
Training in progress, epoch 0
529f5c7 verified
raw
history blame contribute delete
No virus
28.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.984301412872842,
"eval_steps": 500,
"global_step": 1590,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06279434850863422,
"grad_norm": 0.7715030908584595,
"learning_rate": 0.00019998048082884911,
"loss": 1.3075,
"step": 10
},
{
"epoch": 0.12558869701726844,
"grad_norm": 0.7010838389396667,
"learning_rate": 0.0001999219309353572,
"loss": 1.0321,
"step": 20
},
{
"epoch": 0.18838304552590268,
"grad_norm": 0.5988379120826721,
"learning_rate": 0.00019982437317643217,
"loss": 0.9725,
"step": 30
},
{
"epoch": 0.25117739403453687,
"grad_norm": 0.5993463397026062,
"learning_rate": 0.00019968784563700586,
"loss": 0.9429,
"step": 40
},
{
"epoch": 0.3139717425431711,
"grad_norm": 0.5684201121330261,
"learning_rate": 0.0001995124016151664,
"loss": 0.9221,
"step": 50
},
{
"epoch": 0.37676609105180536,
"grad_norm": 0.5648465752601624,
"learning_rate": 0.00019929810960135172,
"loss": 0.957,
"step": 60
},
{
"epoch": 0.43956043956043955,
"grad_norm": 0.5728427767753601,
"learning_rate": 0.00019904505325161158,
"loss": 0.8807,
"step": 70
},
{
"epoch": 0.5023547880690737,
"grad_norm": 0.5611833333969116,
"learning_rate": 0.00019875333135495002,
"loss": 0.8894,
"step": 80
},
{
"epoch": 0.565149136577708,
"grad_norm": 0.5440887808799744,
"learning_rate": 0.00019842305779475968,
"loss": 0.9077,
"step": 90
},
{
"epoch": 0.6279434850863422,
"grad_norm": 0.5734382271766663,
"learning_rate": 0.00019805436150436352,
"loss": 0.872,
"step": 100
},
{
"epoch": 0.6907378335949764,
"grad_norm": 0.5940066576004028,
"learning_rate": 0.00019764738641668138,
"loss": 0.8535,
"step": 110
},
{
"epoch": 0.7535321821036107,
"grad_norm": 0.5004596710205078,
"learning_rate": 0.0001972022914080411,
"loss": 0.8571,
"step": 120
},
{
"epoch": 0.8163265306122449,
"grad_norm": 0.49201518297195435,
"learning_rate": 0.00019671925023615573,
"loss": 0.8738,
"step": 130
},
{
"epoch": 0.8791208791208791,
"grad_norm": 0.5316975116729736,
"learning_rate": 0.00019619845147229138,
"loss": 0.8571,
"step": 140
},
{
"epoch": 0.9419152276295133,
"grad_norm": 0.4743081033229828,
"learning_rate": 0.00019564009842765225,
"loss": 0.8552,
"step": 150
},
{
"epoch": 1.0047095761381475,
"grad_norm": 0.488436758518219,
"learning_rate": 0.0001950444090740111,
"loss": 0.8686,
"step": 160
},
{
"epoch": 1.0675039246467817,
"grad_norm": 0.552605390548706,
"learning_rate": 0.0001944116159586169,
"loss": 0.7721,
"step": 170
},
{
"epoch": 1.130298273155416,
"grad_norm": 0.5751229524612427,
"learning_rate": 0.0001937419661134121,
"loss": 0.7748,
"step": 180
},
{
"epoch": 1.1930926216640503,
"grad_norm": 0.4798067808151245,
"learning_rate": 0.00019303572095859546,
"loss": 0.78,
"step": 190
},
{
"epoch": 1.2558869701726845,
"grad_norm": 0.5503143072128296,
"learning_rate": 0.00019229315620056803,
"loss": 0.7998,
"step": 200
},
{
"epoch": 1.3186813186813187,
"grad_norm": 0.5721454620361328,
"learning_rate": 0.00019151456172430183,
"loss": 0.787,
"step": 210
},
{
"epoch": 1.3814756671899528,
"grad_norm": 0.5621303915977478,
"learning_rate": 0.00019070024148017375,
"loss": 0.7816,
"step": 220
},
{
"epoch": 1.4442700156985873,
"grad_norm": 0.5912421941757202,
"learning_rate": 0.00018985051336530798,
"loss": 0.7806,
"step": 230
},
{
"epoch": 1.5070643642072215,
"grad_norm": 0.5405421257019043,
"learning_rate": 0.00018896570909947475,
"loss": 0.7548,
"step": 240
},
{
"epoch": 1.5698587127158556,
"grad_norm": 0.47901952266693115,
"learning_rate": 0.00018804617409559198,
"loss": 0.7776,
"step": 250
},
{
"epoch": 1.6326530612244898,
"grad_norm": 0.5640778541564941,
"learning_rate": 0.00018709226732488215,
"loss": 0.7683,
"step": 260
},
{
"epoch": 1.695447409733124,
"grad_norm": 0.5678220987319946,
"learning_rate": 0.00018610436117673555,
"loss": 0.7773,
"step": 270
},
{
"epoch": 1.7582417582417582,
"grad_norm": 0.5400906801223755,
"learning_rate": 0.00018508284131333602,
"loss": 0.7797,
"step": 280
},
{
"epoch": 1.8210361067503924,
"grad_norm": 0.5115251541137695,
"learning_rate": 0.00018402810651910442,
"loss": 0.78,
"step": 290
},
{
"epoch": 1.8838304552590266,
"grad_norm": 0.5241789221763611,
"learning_rate": 0.0001829405685450202,
"loss": 0.7978,
"step": 300
},
{
"epoch": 1.9466248037676608,
"grad_norm": 0.540064811706543,
"learning_rate": 0.00018182065194788025,
"loss": 0.7754,
"step": 310
},
{
"epoch": 2.009419152276295,
"grad_norm": 0.5090541243553162,
"learning_rate": 0.0001806687939245593,
"loss": 0.7707,
"step": 320
},
{
"epoch": 2.072213500784929,
"grad_norm": 0.6285854578018188,
"learning_rate": 0.00017948544414133534,
"loss": 0.6989,
"step": 330
},
{
"epoch": 2.1350078492935634,
"grad_norm": 0.5556398630142212,
"learning_rate": 0.00017827106455834733,
"loss": 0.681,
"step": 340
},
{
"epoch": 2.197802197802198,
"grad_norm": 0.6137024760246277,
"learning_rate": 0.00017702612924925376,
"loss": 0.7002,
"step": 350
},
{
"epoch": 2.260596546310832,
"grad_norm": 0.5647903680801392,
"learning_rate": 0.00017575112421616202,
"loss": 0.6796,
"step": 360
},
{
"epoch": 2.3233908948194664,
"grad_norm": 0.5665746927261353,
"learning_rate": 0.00017444654719990128,
"loss": 0.7247,
"step": 370
},
{
"epoch": 2.3861852433281006,
"grad_norm": 0.5704404711723328,
"learning_rate": 0.00017311290748571275,
"loss": 0.6982,
"step": 380
},
{
"epoch": 2.4489795918367347,
"grad_norm": 0.6268453001976013,
"learning_rate": 0.00017175072570443312,
"loss": 0.6788,
"step": 390
},
{
"epoch": 2.511773940345369,
"grad_norm": 0.5893650054931641,
"learning_rate": 0.00017036053362924896,
"loss": 0.7071,
"step": 400
},
{
"epoch": 2.574568288854003,
"grad_norm": 0.5590313673019409,
"learning_rate": 0.0001689428739681012,
"loss": 0.6977,
"step": 410
},
{
"epoch": 2.6373626373626373,
"grad_norm": 0.5971296429634094,
"learning_rate": 0.00016749830015182107,
"loss": 0.7079,
"step": 420
},
{
"epoch": 2.7001569858712715,
"grad_norm": 0.60088050365448,
"learning_rate": 0.00016602737611807976,
"loss": 0.6985,
"step": 430
},
{
"epoch": 2.7629513343799057,
"grad_norm": 0.5298681855201721,
"learning_rate": 0.00016453067609123654,
"loss": 0.7271,
"step": 440
},
{
"epoch": 2.82574568288854,
"grad_norm": 0.6201738119125366,
"learning_rate": 0.00016300878435817113,
"loss": 0.6905,
"step": 450
},
{
"epoch": 2.8885400313971745,
"grad_norm": 0.605073869228363,
"learning_rate": 0.00016146229504018775,
"loss": 0.7071,
"step": 460
},
{
"epoch": 2.9513343799058083,
"grad_norm": 0.5479874610900879,
"learning_rate": 0.00015989181186108002,
"loss": 0.7233,
"step": 470
},
{
"epoch": 3.014128728414443,
"grad_norm": 0.5628566145896912,
"learning_rate": 0.0001582979479114472,
"loss": 0.6769,
"step": 480
},
{
"epoch": 3.076923076923077,
"grad_norm": 0.6384280920028687,
"learning_rate": 0.0001566813254093538,
"loss": 0.6165,
"step": 490
},
{
"epoch": 3.1397174254317113,
"grad_norm": 0.6049516797065735,
"learning_rate": 0.00015504257545742584,
"loss": 0.6414,
"step": 500
},
{
"epoch": 3.2025117739403455,
"grad_norm": 0.6833707690238953,
"learning_rate": 0.0001533823377964791,
"loss": 0.6559,
"step": 510
},
{
"epoch": 3.2653061224489797,
"grad_norm": 0.6257244944572449,
"learning_rate": 0.00015170126055577462,
"loss": 0.6465,
"step": 520
},
{
"epoch": 3.328100470957614,
"grad_norm": 0.6548301577568054,
"learning_rate": 0.00015000000000000001,
"loss": 0.6332,
"step": 530
},
{
"epoch": 3.390894819466248,
"grad_norm": 0.6910219788551331,
"learning_rate": 0.00014827922027307451,
"loss": 0.6331,
"step": 540
},
{
"epoch": 3.4536891679748822,
"grad_norm": 0.5907390713691711,
"learning_rate": 0.00014653959313887813,
"loss": 0.6106,
"step": 550
},
{
"epoch": 3.5164835164835164,
"grad_norm": 0.6401733756065369,
"learning_rate": 0.00014478179771900632,
"loss": 0.642,
"step": 560
},
{
"epoch": 3.5792778649921506,
"grad_norm": 0.5502592325210571,
"learning_rate": 0.00014300652022765207,
"loss": 0.6392,
"step": 570
},
{
"epoch": 3.642072213500785,
"grad_norm": 0.6283037066459656,
"learning_rate": 0.00014121445370371923,
"loss": 0.612,
"step": 580
},
{
"epoch": 3.704866562009419,
"grad_norm": 0.6005645990371704,
"learning_rate": 0.0001394062977402717,
"loss": 0.6289,
"step": 590
},
{
"epoch": 3.767660910518053,
"grad_norm": 0.6629378199577332,
"learning_rate": 0.00013758275821142382,
"loss": 0.6281,
"step": 600
},
{
"epoch": 3.830455259026688,
"grad_norm": 0.6070135235786438,
"learning_rate": 0.00013574454699677893,
"loss": 0.6308,
"step": 610
},
{
"epoch": 3.8932496075353216,
"grad_norm": 0.6706437468528748,
"learning_rate": 0.00013389238170352318,
"loss": 0.6474,
"step": 620
},
{
"epoch": 3.956043956043956,
"grad_norm": 0.591901957988739,
"learning_rate": 0.00013202698538628376,
"loss": 0.6418,
"step": 630
},
{
"epoch": 4.01883830455259,
"grad_norm": 0.5693519711494446,
"learning_rate": 0.0001301490862648603,
"loss": 0.6092,
"step": 640
},
{
"epoch": 4.081632653061225,
"grad_norm": 0.6287245154380798,
"learning_rate": 0.0001282594174399399,
"loss": 0.5511,
"step": 650
},
{
"epoch": 4.144427001569858,
"grad_norm": 0.6552153825759888,
"learning_rate": 0.00012635871660690676,
"loss": 0.5828,
"step": 660
},
{
"epoch": 4.207221350078493,
"grad_norm": 0.664696216583252,
"learning_rate": 0.00012444772576785827,
"loss": 0.5643,
"step": 670
},
{
"epoch": 4.270015698587127,
"grad_norm": 0.6447893381118774,
"learning_rate": 0.0001225271909419395,
"loss": 0.5883,
"step": 680
},
{
"epoch": 4.332810047095761,
"grad_norm": 0.6885414123535156,
"learning_rate": 0.00012059786187410984,
"loss": 0.5826,
"step": 690
},
{
"epoch": 4.395604395604396,
"grad_norm": 0.608504593372345,
"learning_rate": 0.00011866049174245491,
"loss": 0.5499,
"step": 700
},
{
"epoch": 4.45839874411303,
"grad_norm": 0.6774938702583313,
"learning_rate": 0.00011671583686415832,
"loss": 0.5785,
"step": 710
},
{
"epoch": 4.521193092621664,
"grad_norm": 0.6806432604789734,
"learning_rate": 0.00011476465640024814,
"loss": 0.5678,
"step": 720
},
{
"epoch": 4.583987441130298,
"grad_norm": 0.7246624827384949,
"learning_rate": 0.00011280771205923268,
"loss": 0.5797,
"step": 730
},
{
"epoch": 4.646781789638933,
"grad_norm": 0.6730565428733826,
"learning_rate": 0.00011084576779974257,
"loss": 0.565,
"step": 740
},
{
"epoch": 4.7095761381475665,
"grad_norm": 0.6672644019126892,
"learning_rate": 0.00010887958953229349,
"loss": 0.5624,
"step": 750
},
{
"epoch": 4.772370486656201,
"grad_norm": 0.7056964039802551,
"learning_rate": 0.0001069099448202878,
"loss": 0.587,
"step": 760
},
{
"epoch": 4.835164835164835,
"grad_norm": 0.7322887182235718,
"learning_rate": 0.00010493760258037031,
"loss": 0.5712,
"step": 770
},
{
"epoch": 4.8979591836734695,
"grad_norm": 0.7399300336837769,
"learning_rate": 0.00010296333278225599,
"loss": 0.5842,
"step": 780
},
{
"epoch": 4.960753532182103,
"grad_norm": 0.6447321176528931,
"learning_rate": 0.00010098790614814658,
"loss": 0.559,
"step": 790
},
{
"epoch": 5.023547880690738,
"grad_norm": 0.7017489075660706,
"learning_rate": 9.901209385185345e-05,
"loss": 0.5439,
"step": 800
},
{
"epoch": 5.086342229199372,
"grad_norm": 0.7099691033363342,
"learning_rate": 9.703666721774402e-05,
"loss": 0.5144,
"step": 810
},
{
"epoch": 5.149136577708006,
"grad_norm": 0.6762498617172241,
"learning_rate": 9.506239741962971e-05,
"loss": 0.5159,
"step": 820
},
{
"epoch": 5.211930926216641,
"grad_norm": 0.722254753112793,
"learning_rate": 9.309005517971222e-05,
"loss": 0.5174,
"step": 830
},
{
"epoch": 5.274725274725275,
"grad_norm": 0.7049859166145325,
"learning_rate": 9.112041046770653e-05,
"loss": 0.5067,
"step": 840
},
{
"epoch": 5.337519623233909,
"grad_norm": 0.6614891886711121,
"learning_rate": 8.915423220025747e-05,
"loss": 0.5178,
"step": 850
},
{
"epoch": 5.400313971742543,
"grad_norm": 0.7367340326309204,
"learning_rate": 8.719228794076733e-05,
"loss": 0.5158,
"step": 860
},
{
"epoch": 5.463108320251178,
"grad_norm": 0.7544198632240295,
"learning_rate": 8.523534359975189e-05,
"loss": 0.5181,
"step": 870
},
{
"epoch": 5.525902668759811,
"grad_norm": 0.7748332023620605,
"learning_rate": 8.328416313584169e-05,
"loss": 0.5211,
"step": 880
},
{
"epoch": 5.588697017268446,
"grad_norm": 0.7265179753303528,
"learning_rate": 8.13395082575451e-05,
"loss": 0.5005,
"step": 890
},
{
"epoch": 5.65149136577708,
"grad_norm": 0.7204436659812927,
"learning_rate": 7.940213812589018e-05,
"loss": 0.5034,
"step": 900
},
{
"epoch": 5.714285714285714,
"grad_norm": 0.7397501468658447,
"learning_rate": 7.747280905806052e-05,
"loss": 0.5204,
"step": 910
},
{
"epoch": 5.777080062794348,
"grad_norm": 0.7541511654853821,
"learning_rate": 7.555227423214174e-05,
"loss": 0.4947,
"step": 920
},
{
"epoch": 5.839874411302983,
"grad_norm": 0.7153120636940002,
"learning_rate": 7.364128339309326e-05,
"loss": 0.5189,
"step": 930
},
{
"epoch": 5.9026687598116165,
"grad_norm": 0.7152422070503235,
"learning_rate": 7.174058256006012e-05,
"loss": 0.5254,
"step": 940
},
{
"epoch": 5.965463108320251,
"grad_norm": 0.7143374681472778,
"learning_rate": 6.985091373513972e-05,
"loss": 0.5044,
"step": 950
},
{
"epoch": 6.028257456828886,
"grad_norm": 0.6690304279327393,
"learning_rate": 6.797301461371625e-05,
"loss": 0.482,
"step": 960
},
{
"epoch": 6.0910518053375196,
"grad_norm": 0.7355020642280579,
"learning_rate": 6.610761829647685e-05,
"loss": 0.4561,
"step": 970
},
{
"epoch": 6.153846153846154,
"grad_norm": 0.7926068902015686,
"learning_rate": 6.425545300322112e-05,
"loss": 0.4708,
"step": 980
},
{
"epoch": 6.216640502354788,
"grad_norm": 0.7191044688224792,
"learning_rate": 6.24172417885762e-05,
"loss": 0.4586,
"step": 990
},
{
"epoch": 6.279434850863423,
"grad_norm": 0.7463113069534302,
"learning_rate": 6.0593702259728336e-05,
"loss": 0.4674,
"step": 1000
},
{
"epoch": 6.342229199372056,
"grad_norm": 0.743307888507843,
"learning_rate": 5.8785546296280816e-05,
"loss": 0.471,
"step": 1010
},
{
"epoch": 6.405023547880691,
"grad_norm": 0.7751266956329346,
"learning_rate": 5.699347977234799e-05,
"loss": 0.4655,
"step": 1020
},
{
"epoch": 6.467817896389325,
"grad_norm": 0.8425696492195129,
"learning_rate": 5.5218202280993725e-05,
"loss": 0.476,
"step": 1030
},
{
"epoch": 6.530612244897959,
"grad_norm": 0.778333842754364,
"learning_rate": 5.3460406861121894e-05,
"loss": 0.4667,
"step": 1040
},
{
"epoch": 6.593406593406593,
"grad_norm": 0.7623078227043152,
"learning_rate": 5.172077972692553e-05,
"loss": 0.4799,
"step": 1050
},
{
"epoch": 6.656200941915228,
"grad_norm": 0.7711922526359558,
"learning_rate": 5.000000000000002e-05,
"loss": 0.4621,
"step": 1060
},
{
"epoch": 6.718995290423862,
"grad_norm": 0.812324583530426,
"learning_rate": 4.829873944422544e-05,
"loss": 0.4612,
"step": 1070
},
{
"epoch": 6.781789638932496,
"grad_norm": 0.7873876094818115,
"learning_rate": 4.661766220352097e-05,
"loss": 0.4794,
"step": 1080
},
{
"epoch": 6.84458398744113,
"grad_norm": 0.8011544942855835,
"learning_rate": 4.495742454257418e-05,
"loss": 0.4608,
"step": 1090
},
{
"epoch": 6.9073783359497645,
"grad_norm": 0.77611243724823,
"learning_rate": 4.3318674590646237e-05,
"loss": 0.471,
"step": 1100
},
{
"epoch": 6.970172684458399,
"grad_norm": 0.8160433769226074,
"learning_rate": 4.170205208855281e-05,
"loss": 0.4717,
"step": 1110
},
{
"epoch": 7.032967032967033,
"grad_norm": 0.7271689176559448,
"learning_rate": 4.010818813892e-05,
"loss": 0.4322,
"step": 1120
},
{
"epoch": 7.0957613814756675,
"grad_norm": 0.8266540765762329,
"learning_rate": 3.8537704959812294e-05,
"loss": 0.4184,
"step": 1130
},
{
"epoch": 7.158555729984301,
"grad_norm": 0.8125666975975037,
"learning_rate": 3.69912156418289e-05,
"loss": 0.4284,
"step": 1140
},
{
"epoch": 7.221350078492936,
"grad_norm": 0.839213490486145,
"learning_rate": 3.546932390876351e-05,
"loss": 0.4389,
"step": 1150
},
{
"epoch": 7.28414442700157,
"grad_norm": 0.89272141456604,
"learning_rate": 3.397262388192029e-05,
"loss": 0.4477,
"step": 1160
},
{
"epoch": 7.346938775510204,
"grad_norm": 0.7432073950767517,
"learning_rate": 3.250169984817897e-05,
"loss": 0.43,
"step": 1170
},
{
"epoch": 7.409733124018838,
"grad_norm": 0.7860047817230225,
"learning_rate": 3.105712603189884e-05,
"loss": 0.4269,
"step": 1180
},
{
"epoch": 7.472527472527473,
"grad_norm": 0.832013726234436,
"learning_rate": 2.9639466370751068e-05,
"loss": 0.4256,
"step": 1190
},
{
"epoch": 7.535321821036106,
"grad_norm": 0.7955825924873352,
"learning_rate": 2.8249274295566864e-05,
"loss": 0.4282,
"step": 1200
},
{
"epoch": 7.598116169544741,
"grad_norm": 0.7972998023033142,
"learning_rate": 2.688709251428725e-05,
"loss": 0.4388,
"step": 1210
},
{
"epoch": 7.660910518053376,
"grad_norm": 0.8105794191360474,
"learning_rate": 2.555345280009872e-05,
"loss": 0.4381,
"step": 1220
},
{
"epoch": 7.723704866562009,
"grad_norm": 0.7821062207221985,
"learning_rate": 2.4248875783837987e-05,
"loss": 0.4276,
"step": 1230
},
{
"epoch": 7.786499215070644,
"grad_norm": 0.805131733417511,
"learning_rate": 2.297387075074625e-05,
"loss": 0.4307,
"step": 1240
},
{
"epoch": 7.849293563579278,
"grad_norm": 0.797322690486908,
"learning_rate": 2.1728935441652686e-05,
"loss": 0.4295,
"step": 1250
},
{
"epoch": 7.912087912087912,
"grad_norm": 0.7943819761276245,
"learning_rate": 2.0514555858664663e-05,
"loss": 0.4346,
"step": 1260
},
{
"epoch": 7.974882260596546,
"grad_norm": 0.814616322517395,
"learning_rate": 1.93312060754407e-05,
"loss": 0.4331,
"step": 1270
},
{
"epoch": 8.03767660910518,
"grad_norm": 0.79474937915802,
"learning_rate": 1.817934805211976e-05,
"loss": 0.4153,
"step": 1280
},
{
"epoch": 8.100470957613815,
"grad_norm": 0.8072588443756104,
"learning_rate": 1.7059431454979824e-05,
"loss": 0.4072,
"step": 1290
},
{
"epoch": 8.16326530612245,
"grad_norm": 0.8833736777305603,
"learning_rate": 1.5971893480895583e-05,
"loss": 0.3996,
"step": 1300
},
{
"epoch": 8.226059654631083,
"grad_norm": 0.8510453104972839,
"learning_rate": 1.491715868666399e-05,
"loss": 0.4132,
"step": 1310
},
{
"epoch": 8.288854003139717,
"grad_norm": 0.8392331004142761,
"learning_rate": 1.3895638823264446e-05,
"loss": 0.4181,
"step": 1320
},
{
"epoch": 8.351648351648352,
"grad_norm": 0.7935534119606018,
"learning_rate": 1.290773267511788e-05,
"loss": 0.3973,
"step": 1330
},
{
"epoch": 8.414442700156986,
"grad_norm": 0.821882963180542,
"learning_rate": 1.1953825904408034e-05,
"loss": 0.4069,
"step": 1340
},
{
"epoch": 8.47723704866562,
"grad_norm": 0.8700546026229858,
"learning_rate": 1.103429090052528e-05,
"loss": 0.4144,
"step": 1350
},
{
"epoch": 8.540031397174253,
"grad_norm": 0.822404146194458,
"learning_rate": 1.0149486634692018e-05,
"loss": 0.4052,
"step": 1360
},
{
"epoch": 8.602825745682889,
"grad_norm": 0.8342509269714355,
"learning_rate": 9.299758519826273e-06,
"loss": 0.4076,
"step": 1370
},
{
"epoch": 8.665620094191523,
"grad_norm": 0.8416010141372681,
"learning_rate": 8.485438275698154e-06,
"loss": 0.4161,
"step": 1380
},
{
"epoch": 8.728414442700156,
"grad_norm": 0.7646155953407288,
"learning_rate": 7.706843799431984e-06,
"loss": 0.4153,
"step": 1390
},
{
"epoch": 8.791208791208792,
"grad_norm": 0.7787067294120789,
"learning_rate": 6.964279041404553e-06,
"loss": 0.3995,
"step": 1400
},
{
"epoch": 8.854003139717426,
"grad_norm": 0.9011712074279785,
"learning_rate": 6.258033886587911e-06,
"loss": 0.4144,
"step": 1410
},
{
"epoch": 8.91679748822606,
"grad_norm": 0.8679221272468567,
"learning_rate": 5.588384041383088e-06,
"loss": 0.4147,
"step": 1420
},
{
"epoch": 8.979591836734693,
"grad_norm": 0.8739622831344604,
"learning_rate": 4.955590925988895e-06,
"loss": 0.4011,
"step": 1430
},
{
"epoch": 9.042386185243329,
"grad_norm": 0.824476420879364,
"learning_rate": 4.359901572347758e-06,
"loss": 0.4138,
"step": 1440
},
{
"epoch": 9.105180533751962,
"grad_norm": 0.8952732682228088,
"learning_rate": 3.8015485277086205e-06,
"loss": 0.4088,
"step": 1450
},
{
"epoch": 9.167974882260596,
"grad_norm": 0.7407020330429077,
"learning_rate": 3.280749763844293e-06,
"loss": 0.4018,
"step": 1460
},
{
"epoch": 9.23076923076923,
"grad_norm": 0.8719793558120728,
"learning_rate": 2.7977085919589254e-06,
"loss": 0.3925,
"step": 1470
},
{
"epoch": 9.293563579277865,
"grad_norm": 0.8144113421440125,
"learning_rate": 2.3526135833186525e-06,
"loss": 0.4012,
"step": 1480
},
{
"epoch": 9.3563579277865,
"grad_norm": 0.8647878170013428,
"learning_rate": 1.945638495636515e-06,
"loss": 0.4006,
"step": 1490
},
{
"epoch": 9.419152276295133,
"grad_norm": 0.827995240688324,
"learning_rate": 1.576942205240317e-06,
"loss": 0.3984,
"step": 1500
},
{
"epoch": 9.481946624803768,
"grad_norm": 0.8608419895172119,
"learning_rate": 1.2466686450499865e-06,
"loss": 0.3956,
"step": 1510
},
{
"epoch": 9.544740973312402,
"grad_norm": 0.8144489526748657,
"learning_rate": 9.549467483884412e-07,
"loss": 0.3967,
"step": 1520
},
{
"epoch": 9.607535321821036,
"grad_norm": 0.906897783279419,
"learning_rate": 7.018903986483083e-07,
"loss": 0.4039,
"step": 1530
},
{
"epoch": 9.67032967032967,
"grad_norm": 0.8140434622764587,
"learning_rate": 4.875983848335874e-07,
"loss": 0.3924,
"step": 1540
},
{
"epoch": 9.733124018838305,
"grad_norm": 0.8026575446128845,
"learning_rate": 3.12154362994177e-07,
"loss": 0.4028,
"step": 1550
},
{
"epoch": 9.795918367346939,
"grad_norm": 0.8668120503425598,
"learning_rate": 1.7562682356786487e-07,
"loss": 0.404,
"step": 1560
},
{
"epoch": 9.858712715855573,
"grad_norm": 0.8131958842277527,
"learning_rate": 7.806906464281615e-08,
"loss": 0.4016,
"step": 1570
},
{
"epoch": 9.921507064364206,
"grad_norm": 0.820686399936676,
"learning_rate": 1.951917115091684e-08,
"loss": 0.3993,
"step": 1580
},
{
"epoch": 9.984301412872842,
"grad_norm": 0.8288590312004089,
"learning_rate": 0.0,
"loss": 0.389,
"step": 1590
},
{
"epoch": 9.984301412872842,
"step": 1590,
"total_flos": 1.04418647212032e+17,
"train_loss": 0.5835858842861728,
"train_runtime": 7264.6263,
"train_samples_per_second": 0.877,
"train_steps_per_second": 0.219
}
],
"logging_steps": 10,
"max_steps": 1590,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 1.04418647212032e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}