qwen_4b_sql / trainer_state.json
bsq1989's picture
Upload stronger Qwen3-4B SQL model
eaecfc9 verified
{
"best_global_step": 1000,
"best_metric": 0.24901165068149567,
"best_model_checkpoint": "/root/workspace/finetune/checkpoints/train_run_06_qwen3_4b_formal/checkpoint-1000",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 6572,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0121765601217656,
"grad_norm": 2.8322572708129883,
"learning_rate": 3.166666666666667e-07,
"loss": 1.119423007965088,
"step": 20
},
{
"epoch": 0.0243531202435312,
"grad_norm": 2.00534987449646,
"learning_rate": 6.5e-07,
"loss": 0.8098940849304199,
"step": 40
},
{
"epoch": 0.0365296803652968,
"grad_norm": 1.4319002628326416,
"learning_rate": 9.833333333333334e-07,
"loss": 0.6310151576995849,
"step": 60
},
{
"epoch": 0.0487062404870624,
"grad_norm": 1.3265702724456787,
"learning_rate": 1.3166666666666666e-06,
"loss": 0.5526751518249512,
"step": 80
},
{
"epoch": 0.060882800608828,
"grad_norm": 0.8091968297958374,
"learning_rate": 1.6500000000000003e-06,
"loss": 0.4834024906158447,
"step": 100
},
{
"epoch": 0.0730593607305936,
"grad_norm": 0.8584772348403931,
"learning_rate": 1.9833333333333335e-06,
"loss": 0.44873433113098143,
"step": 120
},
{
"epoch": 0.0852359208523592,
"grad_norm": 0.9426456093788147,
"learning_rate": 2.316666666666667e-06,
"loss": 0.38286705017089845,
"step": 140
},
{
"epoch": 0.0974124809741248,
"grad_norm": 0.939085066318512,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.38465352058410646,
"step": 160
},
{
"epoch": 0.1095890410958904,
"grad_norm": 0.8642272353172302,
"learning_rate": 2.9833333333333337e-06,
"loss": 0.3305965900421143,
"step": 180
},
{
"epoch": 0.121765601217656,
"grad_norm": 0.7353930473327637,
"learning_rate": 3.316666666666667e-06,
"loss": 0.2875316619873047,
"step": 200
},
{
"epoch": 0.1339421613394216,
"grad_norm": 0.7686158418655396,
"learning_rate": 3.65e-06,
"loss": 0.3140716075897217,
"step": 220
},
{
"epoch": 0.1461187214611872,
"grad_norm": 1.153424859046936,
"learning_rate": 3.983333333333334e-06,
"loss": 0.2985520839691162,
"step": 240
},
{
"epoch": 0.1582952815829528,
"grad_norm": 0.7382195591926575,
"learning_rate": 4.316666666666667e-06,
"loss": 0.2449601411819458,
"step": 260
},
{
"epoch": 0.1704718417047184,
"grad_norm": 0.7017386555671692,
"learning_rate": 4.65e-06,
"loss": 0.24566495418548584,
"step": 280
},
{
"epoch": 0.182648401826484,
"grad_norm": 1.0982152223587036,
"learning_rate": 4.983333333333334e-06,
"loss": 0.20863604545593262,
"step": 300
},
{
"epoch": 0.1948249619482496,
"grad_norm": 0.6582516431808472,
"learning_rate": 4.9998867856224845e-06,
"loss": 0.19693111181259154,
"step": 320
},
{
"epoch": 0.2070015220700152,
"grad_norm": 0.6201784610748291,
"learning_rate": 4.999523005839606e-06,
"loss": 0.21833207607269287,
"step": 340
},
{
"epoch": 0.2191780821917808,
"grad_norm": 0.7551426887512207,
"learning_rate": 4.998908383543311e-06,
"loss": 0.2382877826690674,
"step": 360
},
{
"epoch": 0.2313546423135464,
"grad_norm": 0.6009605526924133,
"learning_rate": 4.9980429804147276e-06,
"loss": 0.21118538379669188,
"step": 380
},
{
"epoch": 0.243531202435312,
"grad_norm": 0.7086856365203857,
"learning_rate": 4.996926883302385e-06,
"loss": 0.18367968797683715,
"step": 400
},
{
"epoch": 0.2557077625570776,
"grad_norm": 0.964198648929596,
"learning_rate": 4.995560204213496e-06,
"loss": 0.22483460903167723,
"step": 420
},
{
"epoch": 0.2678843226788432,
"grad_norm": 0.6596850156784058,
"learning_rate": 4.993943080302715e-06,
"loss": 0.22673561573028564,
"step": 440
},
{
"epoch": 0.2800608828006088,
"grad_norm": 0.5922938585281372,
"learning_rate": 4.992075673858379e-06,
"loss": 0.185296630859375,
"step": 460
},
{
"epoch": 0.2922374429223744,
"grad_norm": 0.6671269536018372,
"learning_rate": 4.989958172286214e-06,
"loss": 0.18437937498092652,
"step": 480
},
{
"epoch": 0.30441400304414,
"grad_norm": 0.7064187526702881,
"learning_rate": 4.987590788090533e-06,
"loss": 0.1850834846496582,
"step": 500
},
{
"epoch": 0.30441400304414,
"eval_loss": 0.2562323212623596,
"eval_runtime": 47.3626,
"eval_samples_per_second": 18.58,
"eval_steps_per_second": 18.58,
"step": 500
},
{
"epoch": 0.3165905631659056,
"grad_norm": 0.7698928117752075,
"learning_rate": 4.984973758852904e-06,
"loss": 0.16346561908721924,
"step": 520
},
{
"epoch": 0.3287671232876712,
"grad_norm": 0.4859939515590668,
"learning_rate": 4.982107347208317e-06,
"loss": 0.18838067054748536,
"step": 540
},
{
"epoch": 0.3409436834094368,
"grad_norm": 0.5321416854858398,
"learning_rate": 4.978991840818816e-06,
"loss": 0.177593994140625,
"step": 560
},
{
"epoch": 0.3531202435312024,
"grad_norm": 0.593889594078064,
"learning_rate": 4.975627552344638e-06,
"loss": 0.20775914192199707,
"step": 580
},
{
"epoch": 0.365296803652968,
"grad_norm": 0.7926055192947388,
"learning_rate": 4.97201481941283e-06,
"loss": 0.16498700380325318,
"step": 600
},
{
"epoch": 0.3774733637747336,
"grad_norm": 0.2909524440765381,
"learning_rate": 4.968154004583374e-06,
"loss": 0.17565951347351075,
"step": 620
},
{
"epoch": 0.3896499238964992,
"grad_norm": 0.4575704038143158,
"learning_rate": 4.964045495312794e-06,
"loss": 0.16204673051834106,
"step": 640
},
{
"epoch": 0.4018264840182648,
"grad_norm": 0.5366008281707764,
"learning_rate": 4.959689703915272e-06,
"loss": 0.17068564891815186,
"step": 660
},
{
"epoch": 0.4140030441400304,
"grad_norm": 0.5129569172859192,
"learning_rate": 4.95508706752128e-06,
"loss": 0.1589680790901184,
"step": 680
},
{
"epoch": 0.426179604261796,
"grad_norm": 0.4709528684616089,
"learning_rate": 4.9502380480337e-06,
"loss": 0.17568455934524535,
"step": 700
},
{
"epoch": 0.4383561643835616,
"grad_norm": 0.6092886328697205,
"learning_rate": 4.9451431320814715e-06,
"loss": 0.16204804182052612,
"step": 720
},
{
"epoch": 0.4505327245053272,
"grad_norm": 0.5957323908805847,
"learning_rate": 4.939802830970762e-06,
"loss": 0.16562143564224244,
"step": 740
},
{
"epoch": 0.4627092846270928,
"grad_norm": 0.4758240580558777,
"learning_rate": 4.934217680633646e-06,
"loss": 0.17697544097900392,
"step": 760
},
{
"epoch": 0.4748858447488584,
"grad_norm": 0.865627646446228,
"learning_rate": 4.928388241574327e-06,
"loss": 0.1649466037750244,
"step": 780
},
{
"epoch": 0.487062404870624,
"grad_norm": 0.466294527053833,
"learning_rate": 4.922315098812883e-06,
"loss": 0.1602837324142456,
"step": 800
},
{
"epoch": 0.4992389649923896,
"grad_norm": 0.6357060670852661,
"learning_rate": 4.9159988618265585e-06,
"loss": 0.142719042301178,
"step": 820
},
{
"epoch": 0.5114155251141552,
"grad_norm": 0.6055647730827332,
"learning_rate": 4.9094401644886e-06,
"loss": 0.14508233070373536,
"step": 840
},
{
"epoch": 0.5235920852359208,
"grad_norm": 0.45214834809303284,
"learning_rate": 4.902639665004641e-06,
"loss": 0.1821539044380188,
"step": 860
},
{
"epoch": 0.5357686453576864,
"grad_norm": 0.5735688805580139,
"learning_rate": 4.89559804584665e-06,
"loss": 0.16131887435913086,
"step": 880
},
{
"epoch": 0.547945205479452,
"grad_norm": 0.6279348731040955,
"learning_rate": 4.888316013684435e-06,
"loss": 0.17404688596725465,
"step": 900
},
{
"epoch": 0.5601217656012176,
"grad_norm": 0.6474089026451111,
"learning_rate": 4.880794299314732e-06,
"loss": 0.14134640693664552,
"step": 920
},
{
"epoch": 0.5722983257229832,
"grad_norm": 0.5808464884757996,
"learning_rate": 4.87303365758786e-06,
"loss": 0.14891813993453978,
"step": 940
},
{
"epoch": 0.5844748858447488,
"grad_norm": 0.5440990328788757,
"learning_rate": 4.865034867331967e-06,
"loss": 0.1696299910545349,
"step": 960
},
{
"epoch": 0.5966514459665144,
"grad_norm": 0.6859214901924133,
"learning_rate": 4.856798731274874e-06,
"loss": 0.14085158109664916,
"step": 980
},
{
"epoch": 0.60882800608828,
"grad_norm": 0.3178713619709015,
"learning_rate": 4.84832607596351e-06,
"loss": 0.133053982257843,
"step": 1000
},
{
"epoch": 0.60882800608828,
"eval_loss": 0.24901165068149567,
"eval_runtime": 46.6976,
"eval_samples_per_second": 18.845,
"eval_steps_per_second": 18.845,
"step": 1000
},
{
"epoch": 0.6210045662100456,
"grad_norm": 0.3812738358974457,
"learning_rate": 4.8396177516809695e-06,
"loss": 0.12680984735488893,
"step": 1020
},
{
"epoch": 0.6331811263318112,
"grad_norm": 0.5174199342727661,
"learning_rate": 4.830674632361178e-06,
"loss": 0.14880582094192504,
"step": 1040
},
{
"epoch": 0.6453576864535768,
"grad_norm": 0.4705193042755127,
"learning_rate": 4.821497615501186e-06,
"loss": 0.1447562575340271,
"step": 1060
},
{
"epoch": 0.6575342465753424,
"grad_norm": 0.42298850417137146,
"learning_rate": 4.812087622071104e-06,
"loss": 0.15530819892883302,
"step": 1080
},
{
"epoch": 0.669710806697108,
"grad_norm": 0.30658382177352905,
"learning_rate": 4.80244559642167e-06,
"loss": 0.14426586627960206,
"step": 1100
},
{
"epoch": 0.6818873668188736,
"grad_norm": 0.4838867783546448,
"learning_rate": 4.792572506189489e-06,
"loss": 0.15436025857925414,
"step": 1120
},
{
"epoch": 0.6940639269406392,
"grad_norm": 0.716833770275116,
"learning_rate": 4.782469342199915e-06,
"loss": 0.14860854148864747,
"step": 1140
},
{
"epoch": 0.7062404870624048,
"grad_norm": 0.36538004875183105,
"learning_rate": 4.7721371183676205e-06,
"loss": 0.1313084125518799,
"step": 1160
},
{
"epoch": 0.7184170471841704,
"grad_norm": 0.5409316420555115,
"learning_rate": 4.761576871594841e-06,
"loss": 0.150812029838562,
"step": 1180
},
{
"epoch": 0.730593607305936,
"grad_norm": 0.5275493264198303,
"learning_rate": 4.750789661667318e-06,
"loss": 0.13278884887695314,
"step": 1200
},
{
"epoch": 0.7427701674277016,
"grad_norm": 0.5485584735870361,
"learning_rate": 4.739776571147943e-06,
"loss": 0.1612934350967407,
"step": 1220
},
{
"epoch": 0.7549467275494672,
"grad_norm": 0.5949460864067078,
"learning_rate": 4.728538705268116e-06,
"loss": 0.16211290359497071,
"step": 1240
},
{
"epoch": 0.7671232876712328,
"grad_norm": 0.43323376774787903,
"learning_rate": 4.717077191816824e-06,
"loss": 0.14386119842529296,
"step": 1260
},
{
"epoch": 0.7792998477929984,
"grad_norm": 0.6409174799919128,
"learning_rate": 4.705393181027463e-06,
"loss": 0.12942540645599365,
"step": 1280
},
{
"epoch": 0.791476407914764,
"grad_norm": 0.4871342182159424,
"learning_rate": 4.693487845462413e-06,
"loss": 0.14771063327789308,
"step": 1300
},
{
"epoch": 0.8036529680365296,
"grad_norm": 0.5108008980751038,
"learning_rate": 4.681362379895349e-06,
"loss": 0.1276724100112915,
"step": 1320
},
{
"epoch": 0.8158295281582952,
"grad_norm": 0.915285587310791,
"learning_rate": 4.6690180011913524e-06,
"loss": 0.1319241166114807,
"step": 1340
},
{
"epoch": 0.8280060882800608,
"grad_norm": 0.5282526612281799,
"learning_rate": 4.6564559481847795e-06,
"loss": 0.1557891011238098,
"step": 1360
},
{
"epoch": 0.8401826484018264,
"grad_norm": 0.46745216846466064,
"learning_rate": 4.643677481554947e-06,
"loss": 0.11075855493545532,
"step": 1380
},
{
"epoch": 0.852359208523592,
"grad_norm": 0.40246087312698364,
"learning_rate": 4.630683883699607e-06,
"loss": 0.1580789566040039,
"step": 1400
},
{
"epoch": 0.8645357686453576,
"grad_norm": 0.3718211352825165,
"learning_rate": 4.6174764586062556e-06,
"loss": 0.16006500720977784,
"step": 1420
},
{
"epoch": 0.8767123287671232,
"grad_norm": 0.4359384775161743,
"learning_rate": 4.6040565317212685e-06,
"loss": 0.1462727189064026,
"step": 1440
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.3503302037715912,
"learning_rate": 4.59042544981688e-06,
"loss": 0.14725338220596312,
"step": 1460
},
{
"epoch": 0.9010654490106544,
"grad_norm": 0.3662220537662506,
"learning_rate": 4.5765845808560334e-06,
"loss": 0.1304166793823242,
"step": 1480
},
{
"epoch": 0.91324200913242,
"grad_norm": 0.45357516407966614,
"learning_rate": 4.562535313855094e-06,
"loss": 0.1293134570121765,
"step": 1500
},
{
"epoch": 0.91324200913242,
"eval_loss": 0.25538697838783264,
"eval_runtime": 47.0026,
"eval_samples_per_second": 18.722,
"eval_steps_per_second": 18.722,
"step": 1500
},
{
"epoch": 0.9254185692541856,
"grad_norm": 0.31984779238700867,
"learning_rate": 4.548279058744451e-06,
"loss": 0.11359381675720215,
"step": 1520
},
{
"epoch": 0.9375951293759512,
"grad_norm": 0.6947388052940369,
"learning_rate": 4.533817246227024e-06,
"loss": 0.15145074129104613,
"step": 1540
},
{
"epoch": 0.9497716894977168,
"grad_norm": 0.5685822367668152,
"learning_rate": 4.519151327634685e-06,
"loss": 0.11953675746917725,
"step": 1560
},
{
"epoch": 0.9619482496194824,
"grad_norm": 0.2805669903755188,
"learning_rate": 4.504282774782605e-06,
"loss": 0.13977375030517578,
"step": 1580
},
{
"epoch": 0.974124809741248,
"grad_norm": 0.333103746175766,
"learning_rate": 4.489213079821551e-06,
"loss": 0.1338045358657837,
"step": 1600
},
{
"epoch": 0.9863013698630136,
"grad_norm": 0.5493115782737732,
"learning_rate": 4.4739437550881355e-06,
"loss": 0.11776142120361328,
"step": 1620
},
{
"epoch": 0.9984779299847792,
"grad_norm": 0.4903205931186676,
"learning_rate": 4.458476332953051e-06,
"loss": 0.12504475116729735,
"step": 1640
},
{
"epoch": 1.0103500761035007,
"grad_norm": 0.2682284712791443,
"learning_rate": 4.442812365667281e-06,
"loss": 0.08379222154617309,
"step": 1660
},
{
"epoch": 1.0225266362252663,
"grad_norm": 0.34869125485420227,
"learning_rate": 4.426953425206322e-06,
"loss": 0.08407147526741028,
"step": 1680
},
{
"epoch": 1.034703196347032,
"grad_norm": 0.38522422313690186,
"learning_rate": 4.410901103112434e-06,
"loss": 0.08041079640388489,
"step": 1700
},
{
"epoch": 1.0468797564687975,
"grad_norm": 0.4700411856174469,
"learning_rate": 4.394657010334908e-06,
"loss": 0.07876392006874085,
"step": 1720
},
{
"epoch": 1.059056316590563,
"grad_norm": 0.3719494938850403,
"learning_rate": 4.378222777068406e-06,
"loss": 0.10302903652191162,
"step": 1740
},
{
"epoch": 1.0712328767123287,
"grad_norm": 0.406753808259964,
"learning_rate": 4.361600052589358e-06,
"loss": 0.08733606934547425,
"step": 1760
},
{
"epoch": 1.0834094368340943,
"grad_norm": 0.19822958111763,
"learning_rate": 4.344790505090447e-06,
"loss": 0.08532609939575195,
"step": 1780
},
{
"epoch": 1.09558599695586,
"grad_norm": 0.3569728434085846,
"learning_rate": 4.327795821513195e-06,
"loss": 0.08734336495399475,
"step": 1800
},
{
"epoch": 1.1077625570776255,
"grad_norm": 0.4549162685871124,
"learning_rate": 4.3106177073786684e-06,
"loss": 0.0913870632648468,
"step": 1820
},
{
"epoch": 1.119939117199391,
"grad_norm": 0.5802178978919983,
"learning_rate": 4.293257886616318e-06,
"loss": 0.08115516304969787,
"step": 1840
},
{
"epoch": 1.1321156773211567,
"grad_norm": 0.30524641275405884,
"learning_rate": 4.275718101390975e-06,
"loss": 0.08891176581382751,
"step": 1860
},
{
"epoch": 1.1442922374429223,
"grad_norm": 0.33804091811180115,
"learning_rate": 4.25800011192801e-06,
"loss": 0.07950961589813232,
"step": 1880
},
{
"epoch": 1.156468797564688,
"grad_norm": 0.33472684025764465,
"learning_rate": 4.240105696336687e-06,
"loss": 0.08310645222663879,
"step": 1900
},
{
"epoch": 1.1686453576864535,
"grad_norm": 0.3032575845718384,
"learning_rate": 4.222036650431715e-06,
"loss": 0.07682961225509644,
"step": 1920
},
{
"epoch": 1.180821917808219,
"grad_norm": 0.47542238235473633,
"learning_rate": 4.203794787553032e-06,
"loss": 0.07520227432250977,
"step": 1940
},
{
"epoch": 1.1929984779299847,
"grad_norm": 0.39914897084236145,
"learning_rate": 4.185381938383821e-06,
"loss": 0.0754019558429718,
"step": 1960
},
{
"epoch": 1.2051750380517503,
"grad_norm": 0.4697635769844055,
"learning_rate": 4.166799950766793e-06,
"loss": 0.08085885643959045,
"step": 1980
},
{
"epoch": 1.217351598173516,
"grad_norm": 0.28078529238700867,
"learning_rate": 4.14805068951874e-06,
"loss": 0.0884653627872467,
"step": 2000
},
{
"epoch": 1.217351598173516,
"eval_loss": 0.2661186456680298,
"eval_runtime": 46.548,
"eval_samples_per_second": 18.905,
"eval_steps_per_second": 18.905,
"step": 2000
},
{
"epoch": 1.2295281582952815,
"grad_norm": 0.2885560989379883,
"learning_rate": 4.1291360362433965e-06,
"loss": 0.06684748530387878,
"step": 2020
},
{
"epoch": 1.241704718417047,
"grad_norm": 0.24163688719272614,
"learning_rate": 4.110057889142601e-06,
"loss": 0.0720324158668518,
"step": 2040
},
{
"epoch": 1.2538812785388127,
"grad_norm": 0.52589350938797,
"learning_rate": 4.090818162825804e-06,
"loss": 0.08799988031387329,
"step": 2060
},
{
"epoch": 1.2660578386605783,
"grad_norm": 0.35164448618888855,
"learning_rate": 4.071418788117926e-06,
"loss": 0.09275985956192016,
"step": 2080
},
{
"epoch": 1.278234398782344,
"grad_norm": 0.4981421232223511,
"learning_rate": 4.0518617118655845e-06,
"loss": 0.08431113958358764,
"step": 2100
},
{
"epoch": 1.2904109589041095,
"grad_norm": 0.2867731750011444,
"learning_rate": 4.032148896741717e-06,
"loss": 0.09995608925819396,
"step": 2120
},
{
"epoch": 1.302587519025875,
"grad_norm": 0.5612165331840515,
"learning_rate": 4.012282321048618e-06,
"loss": 0.07387629747390748,
"step": 2140
},
{
"epoch": 1.3147640791476407,
"grad_norm": 0.4880141317844391,
"learning_rate": 3.992263978519398e-06,
"loss": 0.07667248249053955,
"step": 2160
},
{
"epoch": 1.3269406392694063,
"grad_norm": 0.3143049478530884,
"learning_rate": 3.972095878117904e-06,
"loss": 0.09203824400901794,
"step": 2180
},
{
"epoch": 1.339117199391172,
"grad_norm": 0.47859013080596924,
"learning_rate": 3.951780043837107e-06,
"loss": 0.07835246920585633,
"step": 2200
},
{
"epoch": 1.3512937595129375,
"grad_norm": 0.28448912501335144,
"learning_rate": 3.9313185144959835e-06,
"loss": 0.08577624559402466,
"step": 2220
},
{
"epoch": 1.363470319634703,
"grad_norm": 0.32139304280281067,
"learning_rate": 3.9107133435349025e-06,
"loss": 0.0789969801902771,
"step": 2240
},
{
"epoch": 1.3756468797564687,
"grad_norm": 0.4797567129135132,
"learning_rate": 3.889966598809557e-06,
"loss": 0.07151145935058593,
"step": 2260
},
{
"epoch": 1.3878234398782343,
"grad_norm": 0.2404891699552536,
"learning_rate": 3.869080362383437e-06,
"loss": 0.09204544425010681,
"step": 2280
},
{
"epoch": 1.4,
"grad_norm": 0.328392893075943,
"learning_rate": 3.848056730318881e-06,
"loss": 0.11079612970352173,
"step": 2300
},
{
"epoch": 1.4121765601217655,
"grad_norm": 0.2993980646133423,
"learning_rate": 3.826897812466728e-06,
"loss": 0.06770140528678895,
"step": 2320
},
{
"epoch": 1.4243531202435311,
"grad_norm": 0.47816380858421326,
"learning_rate": 3.8056057322545763e-06,
"loss": 0.08210510611534119,
"step": 2340
},
{
"epoch": 1.4365296803652967,
"grad_norm": 0.38082119822502136,
"learning_rate": 3.7841826264736888e-06,
"loss": 0.09583572745323181,
"step": 2360
},
{
"epoch": 1.4487062404870623,
"grad_norm": 0.3811774253845215,
"learning_rate": 3.762630645064547e-06,
"loss": 0.09235450625419617,
"step": 2380
},
{
"epoch": 1.460882800608828,
"grad_norm": 0.3869916498661041,
"learning_rate": 3.7409519509010985e-06,
"loss": 0.08658097982406616,
"step": 2400
},
{
"epoch": 1.4730593607305935,
"grad_norm": 0.5042543411254883,
"learning_rate": 3.7191487195736915e-06,
"loss": 0.08892765045166015,
"step": 2420
},
{
"epoch": 1.4852359208523591,
"grad_norm": 0.4064173996448517,
"learning_rate": 3.697223139170748e-06,
"loss": 0.07849371433258057,
"step": 2440
},
{
"epoch": 1.4974124809741247,
"grad_norm": 0.3958194851875305,
"learning_rate": 3.6751774100591716e-06,
"loss": 0.07469035387039184,
"step": 2460
},
{
"epoch": 1.5095890410958903,
"grad_norm": 0.39424121379852295,
"learning_rate": 3.6530137446635265e-06,
"loss": 0.0782626211643219,
"step": 2480
},
{
"epoch": 1.521765601217656,
"grad_norm": 0.46179333329200745,
"learning_rate": 3.630734367244012e-06,
"loss": 0.08304058909416198,
"step": 2500
},
{
"epoch": 1.521765601217656,
"eval_loss": 0.2771773338317871,
"eval_runtime": 46.9989,
"eval_samples_per_second": 18.724,
"eval_steps_per_second": 18.724,
"step": 2500
},
{
"epoch": 1.5339421613394215,
"grad_norm": 0.3447970151901245,
"learning_rate": 3.6083415136732374e-06,
"loss": 0.08037537932395936,
"step": 2520
},
{
"epoch": 1.5461187214611871,
"grad_norm": 0.544224202632904,
"learning_rate": 3.585837431211845e-06,
"loss": 0.08990358114242554,
"step": 2540
},
{
"epoch": 1.5582952815829527,
"grad_norm": 0.2808915376663208,
"learning_rate": 3.563224378282978e-06,
"loss": 0.0773526132106781,
"step": 2560
},
{
"epoch": 1.5704718417047183,
"grad_norm": 0.27800413966178894,
"learning_rate": 3.5405046242456396e-06,
"loss": 0.0777865469455719,
"step": 2580
},
{
"epoch": 1.582648401826484,
"grad_norm": 0.4247933328151703,
"learning_rate": 3.517680449166943e-06,
"loss": 0.08037815093994141,
"step": 2600
},
{
"epoch": 1.5948249619482495,
"grad_norm": 0.748912513256073,
"learning_rate": 3.4947541435932976e-06,
"loss": 0.08837634325027466,
"step": 2620
},
{
"epoch": 1.6070015220700151,
"grad_norm": 0.29709526896476746,
"learning_rate": 3.471728008320532e-06,
"loss": 0.08201563358306885,
"step": 2640
},
{
"epoch": 1.6191780821917807,
"grad_norm": 0.3287765681743622,
"learning_rate": 3.4486043541630066e-06,
"loss": 0.07379403114318847,
"step": 2660
},
{
"epoch": 1.6313546423135463,
"grad_norm": 0.4224306643009186,
"learning_rate": 3.425385501721696e-06,
"loss": 0.08871785402297974,
"step": 2680
},
{
"epoch": 1.643531202435312,
"grad_norm": 0.5213542580604553,
"learning_rate": 3.4020737811513107e-06,
"loss": 0.07757498621940613,
"step": 2700
},
{
"epoch": 1.6557077625570775,
"grad_norm": 0.21830426156520844,
"learning_rate": 3.3786715319264483e-06,
"loss": 0.08565697669982911,
"step": 2720
},
{
"epoch": 1.6678843226788431,
"grad_norm": 0.30557703971862793,
"learning_rate": 3.355181102606816e-06,
"loss": 0.08129348754882812,
"step": 2740
},
{
"epoch": 1.6800608828006087,
"grad_norm": 0.453703910112381,
"learning_rate": 3.331604850601533e-06,
"loss": 0.07875375747680664,
"step": 2760
},
{
"epoch": 1.6922374429223743,
"grad_norm": 0.33889397978782654,
"learning_rate": 3.307945141932556e-06,
"loss": 0.08989614248275757,
"step": 2780
},
{
"epoch": 1.70441400304414,
"grad_norm": 0.21036894619464874,
"learning_rate": 3.2842043509972294e-06,
"loss": 0.08251298069953919,
"step": 2800
},
{
"epoch": 1.7165905631659055,
"grad_norm": 0.2764931321144104,
"learning_rate": 3.2603848603300026e-06,
"loss": 0.07760271430015564,
"step": 2820
},
{
"epoch": 1.7287671232876711,
"grad_norm": 0.3566988706588745,
"learning_rate": 3.236489060363329e-06,
"loss": 0.07395396232604981,
"step": 2840
},
{
"epoch": 1.7409436834094367,
"grad_norm": 0.3616081476211548,
"learning_rate": 3.212519349187766e-06,
"loss": 0.07028600573539734,
"step": 2860
},
{
"epoch": 1.7531202435312023,
"grad_norm": 0.41925984621047974,
"learning_rate": 3.188478132311319e-06,
"loss": 0.08469281196594239,
"step": 2880
},
{
"epoch": 1.765296803652968,
"grad_norm": 0.4866960346698761,
"learning_rate": 3.164367822418029e-06,
"loss": 0.09567424058914184,
"step": 2900
},
{
"epoch": 1.7774733637747335,
"grad_norm": 0.2977049648761749,
"learning_rate": 3.1401908391258474e-06,
"loss": 0.07239987254142762,
"step": 2920
},
{
"epoch": 1.7896499238964991,
"grad_norm": 0.3723915219306946,
"learning_rate": 3.1159496087438098e-06,
"loss": 0.0891954243183136,
"step": 2940
},
{
"epoch": 1.8018264840182647,
"grad_norm": 0.22685140371322632,
"learning_rate": 3.0916465640285426e-06,
"loss": 0.07796849608421326,
"step": 2960
},
{
"epoch": 1.8140030441400303,
"grad_norm": 0.23534800112247467,
"learning_rate": 3.0672841439401223e-06,
"loss": 0.08645985722541809,
"step": 2980
},
{
"epoch": 1.826179604261796,
"grad_norm": 0.40188542008399963,
"learning_rate": 3.0428647933973103e-06,
"loss": 0.08427774310111999,
"step": 3000
},
{
"epoch": 1.826179604261796,
"eval_loss": 0.2774485647678375,
"eval_runtime": 46.6449,
"eval_samples_per_second": 18.866,
"eval_steps_per_second": 18.866,
"step": 3000
},
{
"epoch": 1.8383561643835615,
"grad_norm": 0.35732510685920715,
"learning_rate": 3.0183909630321865e-06,
"loss": 0.07381275296211243,
"step": 3020
},
{
"epoch": 1.8505327245053271,
"grad_norm": 0.3167949914932251,
"learning_rate": 2.9938651089442184e-06,
"loss": 0.07289664745330811,
"step": 3040
},
{
"epoch": 1.8627092846270927,
"grad_norm": 0.4893674850463867,
"learning_rate": 2.969289692453773e-06,
"loss": 0.07124295830726624,
"step": 3060
},
{
"epoch": 1.8748858447488583,
"grad_norm": 0.3017306327819824,
"learning_rate": 2.944667179855109e-06,
"loss": 0.08125877976417542,
"step": 3080
},
{
"epoch": 1.887062404870624,
"grad_norm": 0.3442900776863098,
"learning_rate": 2.920000042168871e-06,
"loss": 0.0724608838558197,
"step": 3100
},
{
"epoch": 1.8992389649923895,
"grad_norm": 0.27901750802993774,
"learning_rate": 2.8952907548941057e-06,
"loss": 0.07104775309562683,
"step": 3120
},
{
"epoch": 1.9114155251141551,
"grad_norm": 0.35838621854782104,
"learning_rate": 2.8705417977598277e-06,
"loss": 0.0677955150604248,
"step": 3140
},
{
"epoch": 1.9235920852359207,
"grad_norm": 0.3615752160549164,
"learning_rate": 2.8457556544761687e-06,
"loss": 0.07164496779441834,
"step": 3160
},
{
"epoch": 1.9357686453576863,
"grad_norm": 0.5117827653884888,
"learning_rate": 2.8209348124851187e-06,
"loss": 0.071807599067688,
"step": 3180
},
{
"epoch": 1.947945205479452,
"grad_norm": 0.36082401871681213,
"learning_rate": 2.7960817627108965e-06,
"loss": 0.095755535364151,
"step": 3200
},
{
"epoch": 1.9601217656012175,
"grad_norm": 0.27905145287513733,
"learning_rate": 2.77119899930997e-06,
"loss": 0.07055851817131042,
"step": 3220
},
{
"epoch": 1.9722983257229831,
"grad_norm": 0.5642575621604919,
"learning_rate": 2.7462890194207513e-06,
"loss": 0.07278798818588257,
"step": 3240
},
{
"epoch": 1.9844748858447487,
"grad_norm": 0.2286670207977295,
"learning_rate": 2.7213543229129956e-06,
"loss": 0.07153088450431824,
"step": 3260
},
{
"epoch": 1.9966514459665143,
"grad_norm": 0.3228706121444702,
"learning_rate": 2.6963974121369242e-06,
"loss": 0.07440360188484192,
"step": 3280
},
{
"epoch": 2.008523592085236,
"grad_norm": 0.21613839268684387,
"learning_rate": 2.671420791672093e-06,
"loss": 0.0517767608165741,
"step": 3300
},
{
"epoch": 2.0207001522070014,
"grad_norm": 0.19271881878376007,
"learning_rate": 2.646426968076052e-06,
"loss": 0.03812239170074463,
"step": 3320
},
{
"epoch": 2.032876712328767,
"grad_norm": 0.12863056361675262,
"learning_rate": 2.6214184496327865e-06,
"loss": 0.04107579588890076,
"step": 3340
},
{
"epoch": 2.0450532724505326,
"grad_norm": 0.26679477095603943,
"learning_rate": 2.5963977461010022e-06,
"loss": 0.04673115909099579,
"step": 3360
},
{
"epoch": 2.057229832572298,
"grad_norm": 0.2743483781814575,
"learning_rate": 2.5713673684622524e-06,
"loss": 0.03674449622631073,
"step": 3380
},
{
"epoch": 2.069406392694064,
"grad_norm": 0.16999909281730652,
"learning_rate": 2.546329828668949e-06,
"loss": 0.03422380387783051,
"step": 3400
},
{
"epoch": 2.0815829528158294,
"grad_norm": 0.2931291460990906,
"learning_rate": 2.5212876393922657e-06,
"loss": 0.035878732800483704,
"step": 3420
},
{
"epoch": 2.093759512937595,
"grad_norm": 0.34781521558761597,
"learning_rate": 2.496243313769986e-06,
"loss": 0.03577531576156616,
"step": 3440
},
{
"epoch": 2.1059360730593606,
"grad_norm": 0.472351998090744,
"learning_rate": 2.471199365154283e-06,
"loss": 0.04281675517559051,
"step": 3460
},
{
"epoch": 2.118112633181126,
"grad_norm": 0.38320988416671753,
"learning_rate": 2.4461583068595014e-06,
"loss": 0.042955422401428224,
"step": 3480
},
{
"epoch": 2.130289193302892,
"grad_norm": 0.12197626382112503,
"learning_rate": 2.421122651909918e-06,
"loss": 0.04432957172393799,
"step": 3500
},
{
"epoch": 2.130289193302892,
"eval_loss": 0.3123805522918701,
"eval_runtime": 46.5258,
"eval_samples_per_second": 18.914,
"eval_steps_per_second": 18.914,
"step": 3500
},
{
"epoch": 2.1424657534246574,
"grad_norm": 0.21291697025299072,
"learning_rate": 2.3960949127875556e-06,
"loss": 0.03356837034225464,
"step": 3520
},
{
"epoch": 2.154642313546423,
"grad_norm": 0.44130077958106995,
"learning_rate": 2.371077601180031e-06,
"loss": 0.036935809254646304,
"step": 3540
},
{
"epoch": 2.1668188736681886,
"grad_norm": 0.49069875478744507,
"learning_rate": 2.3460732277284994e-06,
"loss": 0.0395690768957138,
"step": 3560
},
{
"epoch": 2.178995433789954,
"grad_norm": 0.2824937701225281,
"learning_rate": 2.321084301775689e-06,
"loss": 0.044693085551261905,
"step": 3580
},
{
"epoch": 2.19117199391172,
"grad_norm": 0.35114097595214844,
"learning_rate": 2.29611333111408e-06,
"loss": 0.03243565857410431,
"step": 3600
},
{
"epoch": 2.2033485540334854,
"grad_norm": 0.47931790351867676,
"learning_rate": 2.271162821734225e-06,
"loss": 0.04325798749923706,
"step": 3620
},
{
"epoch": 2.215525114155251,
"grad_norm": 0.12716218829154968,
"learning_rate": 2.2462352775732653e-06,
"loss": 0.03856868743896484,
"step": 3640
},
{
"epoch": 2.2277016742770166,
"grad_norm": 0.3522437512874603,
"learning_rate": 2.221333200263637e-06,
"loss": 0.041602414846420285,
"step": 3660
},
{
"epoch": 2.239878234398782,
"grad_norm": 0.234590083360672,
"learning_rate": 2.1964590888820233e-06,
"loss": 0.04286134541034699,
"step": 3680
},
{
"epoch": 2.252054794520548,
"grad_norm": 0.2972453534603119,
"learning_rate": 2.1716154396985526e-06,
"loss": 0.041756758093833925,
"step": 3700
},
{
"epoch": 2.2642313546423134,
"grad_norm": 0.3236760199069977,
"learning_rate": 2.1468047459262882e-06,
"loss": 0.0359495222568512,
"step": 3720
},
{
"epoch": 2.276407914764079,
"grad_norm": 0.32255831360816956,
"learning_rate": 2.12202949747101e-06,
"loss": 0.04322676360607147,
"step": 3740
},
{
"epoch": 2.2885844748858446,
"grad_norm": 0.2753404378890991,
"learning_rate": 2.0972921806813468e-06,
"loss": 0.04191597998142242,
"step": 3760
},
{
"epoch": 2.30076103500761,
"grad_norm": 0.23182159662246704,
"learning_rate": 2.072595278099247e-06,
"loss": 0.041278204321861266,
"step": 3780
},
{
"epoch": 2.312937595129376,
"grad_norm": 0.25987961888313293,
"learning_rate": 2.047941268210849e-06,
"loss": 0.04312986135482788,
"step": 3800
},
{
"epoch": 2.3251141552511414,
"grad_norm": 0.3683331310749054,
"learning_rate": 2.0233326251977426e-06,
"loss": 0.04236046075820923,
"step": 3820
},
{
"epoch": 2.337290715372907,
"grad_norm": 0.2520082890987396,
"learning_rate": 1.9987718186886724e-06,
"loss": 0.04433901011943817,
"step": 3840
},
{
"epoch": 2.3494672754946726,
"grad_norm": 0.16200299561023712,
"learning_rate": 1.9742613135116986e-06,
"loss": 0.04127628207206726,
"step": 3860
},
{
"epoch": 2.361643835616438,
"grad_norm": 0.37064701318740845,
"learning_rate": 1.949803569446828e-06,
"loss": 0.04586326479911804,
"step": 3880
},
{
"epoch": 2.373820395738204,
"grad_norm": 0.416020005941391,
"learning_rate": 1.925401040979171e-06,
"loss": 0.03624185025691986,
"step": 3900
},
{
"epoch": 2.3859969558599694,
"grad_norm": 0.17131179571151733,
"learning_rate": 1.9010561770526076e-06,
"loss": 0.035064518451690674,
"step": 3920
},
{
"epoch": 2.398173515981735,
"grad_norm": 0.5188226103782654,
"learning_rate": 1.8767714208240312e-06,
"loss": 0.042050021886825564,
"step": 3940
},
{
"epoch": 2.4103500761035006,
"grad_norm": 0.3398009240627289,
"learning_rate": 1.852549209418154e-06,
"loss": 0.038166466355323794,
"step": 3960
},
{
"epoch": 2.422526636225266,
"grad_norm": 0.2541758418083191,
"learning_rate": 1.8283919736829332e-06,
"loss": 0.040885674953460696,
"step": 3980
},
{
"epoch": 2.434703196347032,
"grad_norm": 0.4074256122112274,
"learning_rate": 1.804302137945614e-06,
"loss": 0.040162667632102966,
"step": 4000
},
{
"epoch": 2.434703196347032,
"eval_loss": 0.31583163142204285,
"eval_runtime": 47.319,
"eval_samples_per_second": 18.597,
"eval_steps_per_second": 18.597,
"step": 4000
},
{
"epoch": 2.4468797564687974,
"grad_norm": 0.2724802494049072,
"learning_rate": 1.7802821197694426e-06,
"loss": 0.04170995056629181,
"step": 4020
},
{
"epoch": 2.459056316590563,
"grad_norm": 0.21376508474349976,
"learning_rate": 1.7563343297110375e-06,
"loss": 0.03834344446659088,
"step": 4040
},
{
"epoch": 2.4712328767123286,
"grad_norm": 0.2933412492275238,
"learning_rate": 1.732461171078486e-06,
"loss": 0.03928310573101044,
"step": 4060
},
{
"epoch": 2.483409436834094,
"grad_norm": 0.46805083751678467,
"learning_rate": 1.7086650396901489e-06,
"loss": 0.03358933925628662,
"step": 4080
},
{
"epoch": 2.49558599695586,
"grad_norm": 0.45667552947998047,
"learning_rate": 1.6849483236342322e-06,
"loss": 0.03547535240650177,
"step": 4100
},
{
"epoch": 2.5077625570776254,
"grad_norm": 0.24512450397014618,
"learning_rate": 1.6613134030291217e-06,
"loss": 0.03600102663040161,
"step": 4120
},
{
"epoch": 2.519939117199391,
"grad_norm": 0.20636337995529175,
"learning_rate": 1.6377626497845278e-06,
"loss": 0.04347077012062073,
"step": 4140
},
{
"epoch": 2.5321156773211566,
"grad_norm": 0.4584953486919403,
"learning_rate": 1.6142984273634505e-06,
"loss": 0.02908192276954651,
"step": 4160
},
{
"epoch": 2.544292237442922,
"grad_norm": 0.26193487644195557,
"learning_rate": 1.5909230905449846e-06,
"loss": 0.03611198365688324,
"step": 4180
},
{
"epoch": 2.556468797564688,
"grad_norm": 0.20813828706741333,
"learning_rate": 1.567638985188012e-06,
"loss": 0.03758668601512909,
"step": 4200
},
{
"epoch": 2.5686453576864534,
"grad_norm": 0.3395022749900818,
"learning_rate": 1.544448447995773e-06,
"loss": 0.033633843064308167,
"step": 4220
},
{
"epoch": 2.580821917808219,
"grad_norm": 0.1472434103488922,
"learning_rate": 1.52135380628137e-06,
"loss": 0.036797890067100526,
"step": 4240
},
{
"epoch": 2.5929984779299846,
"grad_norm": 0.5788060426712036,
"learning_rate": 1.498357377734201e-06,
"loss": 0.039166563749313356,
"step": 4260
},
{
"epoch": 2.60517503805175,
"grad_norm": 0.7623679637908936,
"learning_rate": 1.4754614701873703e-06,
"loss": 0.03717599511146545,
"step": 4280
},
{
"epoch": 2.6173515981735163,
"grad_norm": 0.16205403208732605,
"learning_rate": 1.4526683813860792e-06,
"loss": 0.03962793946266174,
"step": 4300
},
{
"epoch": 2.6295281582952814,
"grad_norm": 0.11986076086759567,
"learning_rate": 1.4299803987570396e-06,
"loss": 0.035475924611091614,
"step": 4320
},
{
"epoch": 2.6417047184170475,
"grad_norm": 0.15573006868362427,
"learning_rate": 1.4073997991789078e-06,
"loss": 0.03256964683532715,
"step": 4340
},
{
"epoch": 2.6538812785388126,
"grad_norm": 0.25151512026786804,
"learning_rate": 1.384928848753792e-06,
"loss": 0.03712306022644043,
"step": 4360
},
{
"epoch": 2.6660578386605787,
"grad_norm": 0.20408153533935547,
"learning_rate": 1.3625698025798322e-06,
"loss": 0.041410398483276364,
"step": 4380
},
{
"epoch": 2.678234398782344,
"grad_norm": 0.3156696856021881,
"learning_rate": 1.3403249045248907e-06,
"loss": 0.03158504366874695,
"step": 4400
},
{
"epoch": 2.69041095890411,
"grad_norm": 0.3835665285587311,
"learning_rate": 1.3181963870013604e-06,
"loss": 0.03525224924087524,
"step": 4420
},
{
"epoch": 2.702587519025875,
"grad_norm": 0.45423486828804016,
"learning_rate": 1.2961864707421345e-06,
"loss": 0.03239959478378296,
"step": 4440
},
{
"epoch": 2.714764079147641,
"grad_norm": 0.15982766449451447,
"learning_rate": 1.2742973645777394e-06,
"loss": 0.031032082438468934,
"step": 4460
},
{
"epoch": 2.726940639269406,
"grad_norm": 0.2770426869392395,
"learning_rate": 1.252531265214662e-06,
"loss": 0.030566230416297913,
"step": 4480
},
{
"epoch": 2.7391171993911723,
"grad_norm": 0.3693839907646179,
"learning_rate": 1.2308903570149048e-06,
"loss": 0.041362547874450685,
"step": 4500
},
{
"epoch": 2.7391171993911723,
"eval_loss": 0.3391737937927246,
"eval_runtime": 46.9306,
"eval_samples_per_second": 18.751,
"eval_steps_per_second": 18.751,
"step": 4500
},
{
"epoch": 2.7512937595129374,
"grad_norm": 0.3229275643825531,
"learning_rate": 1.2093768117767613e-06,
"loss": 0.0388390064239502,
"step": 4520
},
{
"epoch": 2.7634703196347035,
"grad_norm": 0.6786078214645386,
"learning_rate": 1.1879927885168733e-06,
"loss": 0.032555675506591795,
"step": 4540
},
{
"epoch": 2.7756468797564686,
"grad_norm": 0.32371029257774353,
"learning_rate": 1.1667404332535504e-06,
"loss": 0.03606459796428681,
"step": 4560
},
{
"epoch": 2.7878234398782347,
"grad_norm": 0.44066882133483887,
"learning_rate": 1.1456218787914128e-06,
"loss": 0.032086309790611264,
"step": 4580
},
{
"epoch": 2.8,
"grad_norm": 0.5005165338516235,
"learning_rate": 1.1246392445073438e-06,
"loss": 0.033362787961959836,
"step": 4600
},
{
"epoch": 2.812176560121766,
"grad_norm": 0.22586822509765625,
"learning_rate": 1.1037946361378027e-06,
"loss": 0.03638745844364166,
"step": 4620
},
{
"epoch": 2.824353120243531,
"grad_norm": 0.2905796766281128,
"learning_rate": 1.0830901455674977e-06,
"loss": 0.030933958292007447,
"step": 4640
},
{
"epoch": 2.836529680365297,
"grad_norm": 0.10796497762203217,
"learning_rate": 1.0625278506194538e-06,
"loss": 0.02879139482975006,
"step": 4660
},
{
"epoch": 2.8487062404870622,
"grad_norm": 0.2545916438102722,
"learning_rate": 1.04210981484649e-06,
"loss": 0.03345020413398743,
"step": 4680
},
{
"epoch": 2.8608828006088283,
"grad_norm": 0.2986568808555603,
"learning_rate": 1.0218380873241314e-06,
"loss": 0.02593054175376892,
"step": 4700
},
{
"epoch": 2.8730593607305934,
"grad_norm": 0.1755756139755249,
"learning_rate": 1.0017147024449674e-06,
"loss": 0.03906567096710205,
"step": 4720
},
{
"epoch": 2.8852359208523595,
"grad_norm": 0.18288888037204742,
"learning_rate": 9.81741679714493e-07,
"loss": 0.03371626436710358,
"step": 4740
},
{
"epoch": 2.8974124809741246,
"grad_norm": 0.368429958820343,
"learning_rate": 9.619210235484333e-07,
"loss": 0.03090968132019043,
"step": 4760
},
{
"epoch": 2.9095890410958907,
"grad_norm": 0.17118144035339355,
"learning_rate": 9.422547230715931e-07,
"loss": 0.0322105199098587,
"step": 4780
},
{
"epoch": 2.921765601217656,
"grad_norm": 0.41911277174949646,
"learning_rate": 9.227447519182353e-07,
"loss": 0.035210177302360535,
"step": 4800
},
{
"epoch": 2.933942161339422,
"grad_norm": 0.3521968722343445,
"learning_rate": 9.033930680340097e-07,
"loss": 0.026842504739761353,
"step": 4820
},
{
"epoch": 2.946118721461187,
"grad_norm": 0.20812013745307922,
"learning_rate": 8.842016134794682e-07,
"loss": 0.03439584076404571,
"step": 4840
},
{
"epoch": 2.958295281582953,
"grad_norm": 0.2796875834465027,
"learning_rate": 8.651723142351603e-07,
"loss": 0.04011322855949402,
"step": 4860
},
{
"epoch": 2.9704718417047182,
"grad_norm": 0.20960687100887299,
"learning_rate": 8.463070800083562e-07,
"loss": 0.03800423145294189,
"step": 4880
},
{
"epoch": 2.9826484018264843,
"grad_norm": 0.2586495876312256,
"learning_rate": 8.276078040413879e-07,
"loss": 0.03839131891727447,
"step": 4900
},
{
"epoch": 2.9948249619482494,
"grad_norm": 0.37137141823768616,
"learning_rate": 8.090763629216589e-07,
"loss": 0.02721840739250183,
"step": 4920
},
{
"epoch": 3.006697108066971,
"grad_norm": 0.3677407503128052,
"learning_rate": 7.907146163933102e-07,
"loss": 0.023991990089416503,
"step": 4940
},
{
"epoch": 3.0188736681887365,
"grad_norm": 0.11811063438653946,
"learning_rate": 7.725244071705871e-07,
"loss": 0.01451514959335327,
"step": 4960
},
{
"epoch": 3.031050228310502,
"grad_norm": 0.3449067771434784,
"learning_rate": 7.545075607529104e-07,
"loss": 0.014327619969844819,
"step": 4980
},
{
"epoch": 3.0432267884322677,
"grad_norm": 0.3237353265285492,
"learning_rate": 7.366658852416788e-07,
"loss": 0.017832010984420776,
"step": 5000
},
{
"epoch": 3.0432267884322677,
"eval_loss": 0.39115142822265625,
"eval_runtime": 47.6431,
"eval_samples_per_second": 18.471,
"eval_steps_per_second": 18.471,
"step": 5000
},
{
"epoch": 3.0554033485540333,
"grad_norm": 0.2226281613111496,
"learning_rate": 7.190011711588101e-07,
"loss": 0.011674411594867706,
"step": 5020
},
{
"epoch": 3.067579908675799,
"grad_norm": 0.08729376643896103,
"learning_rate": 7.015151912670562e-07,
"loss": 0.013690856099128724,
"step": 5040
},
{
"epoch": 3.0797564687975645,
"grad_norm": 0.2745465636253357,
"learning_rate": 6.842097003920903e-07,
"loss": 0.011978642642498016,
"step": 5060
},
{
"epoch": 3.09193302891933,
"grad_norm": 0.06905842572450638,
"learning_rate": 6.67086435246406e-07,
"loss": 0.013893941044807434,
"step": 5080
},
{
"epoch": 3.1041095890410957,
"grad_norm": 0.07840315997600555,
"learning_rate": 6.501471142550194e-07,
"loss": 0.009910025447607041,
"step": 5100
},
{
"epoch": 3.1162861491628613,
"grad_norm": 0.19672124087810516,
"learning_rate": 6.333934373830222e-07,
"loss": 0.008863755315542222,
"step": 5120
},
{
"epoch": 3.128462709284627,
"grad_norm": 0.37645605206489563,
"learning_rate": 6.168270859649761e-07,
"loss": 0.010502541810274124,
"step": 5140
},
{
"epoch": 3.1406392694063925,
"grad_norm": 0.2069159746170044,
"learning_rate": 6.004497225361786e-07,
"loss": 0.012096930295228958,
"step": 5160
},
{
"epoch": 3.1528158295281585,
"grad_norm": 0.2584017217159271,
"learning_rate": 5.842629906658226e-07,
"loss": 0.013278065621852875,
"step": 5180
},
{
"epoch": 3.1649923896499237,
"grad_norm": 0.2050527036190033,
"learning_rate": 5.682685147920481e-07,
"loss": 0.013548998534679413,
"step": 5200
},
{
"epoch": 3.1771689497716897,
"grad_norm": 0.13838107883930206,
"learning_rate": 5.524679000589256e-07,
"loss": 0.013736458122730255,
"step": 5220
},
{
"epoch": 3.189345509893455,
"grad_norm": 0.06378225982189178,
"learning_rate": 5.36862732155366e-07,
"loss": 0.013177134096622467,
"step": 5240
},
{
"epoch": 3.201522070015221,
"grad_norm": 0.27431613206863403,
"learning_rate": 5.214545771559879e-07,
"loss": 0.011971819400787353,
"step": 5260
},
{
"epoch": 3.213698630136986,
"grad_norm": 0.529901921749115,
"learning_rate": 5.062449813639528e-07,
"loss": 0.014422819018363953,
"step": 5280
},
{
"epoch": 3.225875190258752,
"grad_norm": 0.19417761266231537,
"learning_rate": 4.912354711557856e-07,
"loss": 0.010663678497076034,
"step": 5300
},
{
"epoch": 3.2380517503805173,
"grad_norm": 0.044735077768564224,
"learning_rate": 4.764275528281892e-07,
"loss": 0.011400717496871948,
"step": 5320
},
{
"epoch": 3.2502283105022833,
"grad_norm": 0.057179443538188934,
"learning_rate": 4.6182271244688355e-07,
"loss": 0.008456526696681977,
"step": 5340
},
{
"epoch": 3.2624048706240485,
"grad_norm": 0.10396906733512878,
"learning_rate": 4.4742241569746407e-07,
"loss": 0.014539115130901337,
"step": 5360
},
{
"epoch": 3.2745814307458145,
"grad_norm": 0.32904428243637085,
"learning_rate": 4.332281077383177e-07,
"loss": 0.017625690996646882,
"step": 5380
},
{
"epoch": 3.2867579908675797,
"grad_norm": 0.20823979377746582,
"learning_rate": 4.1924121305558563e-07,
"loss": 0.007641100138425827,
"step": 5400
},
{
"epoch": 3.2989345509893457,
"grad_norm": 0.25470009446144104,
"learning_rate": 4.054631353202121e-07,
"loss": 0.011799700558185577,
"step": 5420
},
{
"epoch": 3.311111111111111,
"grad_norm": 0.3968588709831238,
"learning_rate": 3.9189525724707634e-07,
"loss": 0.011455408483743667,
"step": 5440
},
{
"epoch": 3.323287671232877,
"grad_norm": 0.10818332433700562,
"learning_rate": 3.785389404562259e-07,
"loss": 0.012499115616083144,
"step": 5460
},
{
"epoch": 3.335464231354642,
"grad_norm": 0.1818460375070572,
"learning_rate": 3.653955253362351e-07,
"loss": 0.01148865669965744,
"step": 5480
},
{
"epoch": 3.347640791476408,
"grad_norm": 0.3504088521003723,
"learning_rate": 3.5246633090968205e-07,
"loss": 0.012819178402423859,
"step": 5500
},
{
"epoch": 3.347640791476408,
"eval_loss": 0.43404534459114075,
"eval_runtime": 46.4882,
"eval_samples_per_second": 18.93,
"eval_steps_per_second": 18.93,
"step": 5500
},
{
"epoch": 3.3598173515981733,
"grad_norm": 0.4551874101161957,
"learning_rate": 3.397526547007832e-07,
"loss": 0.013325585424900055,
"step": 5520
},
{
"epoch": 3.3719939117199393,
"grad_norm": 0.35187825560569763,
"learning_rate": 3.2725577260517396e-07,
"loss": 0.011712662875652313,
"step": 5540
},
{
"epoch": 3.3841704718417045,
"grad_norm": 0.6071529984474182,
"learning_rate": 3.14976938761867e-07,
"loss": 0.01580573171377182,
"step": 5560
},
{
"epoch": 3.3963470319634705,
"grad_norm": 0.18844422698020935,
"learning_rate": 3.029173854273909e-07,
"loss": 0.012312603741884231,
"step": 5580
},
{
"epoch": 3.4085235920852357,
"grad_norm": 0.13131535053253174,
"learning_rate": 2.910783228521269e-07,
"loss": 0.011797953397035599,
"step": 5600
},
{
"epoch": 3.4207001522070017,
"grad_norm": 0.4402364492416382,
"learning_rate": 2.794609391588504e-07,
"loss": 0.012182456254959107,
"step": 5620
},
{
"epoch": 3.432876712328767,
"grad_norm": 0.3497592508792877,
"learning_rate": 2.6806640022349897e-07,
"loss": 0.013599888980388641,
"step": 5640
},
{
"epoch": 3.445053272450533,
"grad_norm": 0.2316354215145111,
"learning_rate": 2.5689584955816497e-07,
"loss": 0.009272868931293487,
"step": 5660
},
{
"epoch": 3.457229832572298,
"grad_norm": 0.3858301341533661,
"learning_rate": 2.459504081963421e-07,
"loss": 0.008165979385375976,
"step": 5680
},
{
"epoch": 3.469406392694064,
"grad_norm": 0.14734333753585815,
"learning_rate": 2.3523117458041865e-07,
"loss": 0.009182130545377731,
"step": 5700
},
{
"epoch": 3.4815829528158293,
"grad_norm": 0.03280401974916458,
"learning_rate": 2.2473922445144485e-07,
"loss": 0.0107998326420784,
"step": 5720
},
{
"epoch": 3.4937595129375953,
"grad_norm": 0.1505511999130249,
"learning_rate": 2.144756107411733e-07,
"loss": 0.014469687640666962,
"step": 5740
},
{
"epoch": 3.5059360730593605,
"grad_norm": 0.2366904318332672,
"learning_rate": 2.0444136346639333e-07,
"loss": 0.0121701680123806,
"step": 5760
},
{
"epoch": 3.5181126331811265,
"grad_norm": 0.1468425989151001,
"learning_rate": 1.9463748962556096e-07,
"loss": 0.014668506383895875,
"step": 5780
},
{
"epoch": 3.5302891933028917,
"grad_norm": 0.14534050226211548,
"learning_rate": 1.8506497309773885e-07,
"loss": 0.010488402843475342,
"step": 5800
},
{
"epoch": 3.5424657534246577,
"grad_norm": 0.15501493215560913,
"learning_rate": 1.7572477454386257e-07,
"loss": 0.010667071491479874,
"step": 5820
},
{
"epoch": 3.554642313546423,
"grad_norm": 0.26535800099372864,
"learning_rate": 1.6661783131032726e-07,
"loss": 0.011079683899879456,
"step": 5840
},
{
"epoch": 3.566818873668189,
"grad_norm": 0.24390950798988342,
"learning_rate": 1.5774505733492263e-07,
"loss": 0.009308797866106033,
"step": 5860
},
{
"epoch": 3.578995433789954,
"grad_norm": 0.3409421443939209,
"learning_rate": 1.49107343055111e-07,
"loss": 0.012319787591695785,
"step": 5880
},
{
"epoch": 3.59117199391172,
"grad_norm": 0.4800300896167755,
"learning_rate": 1.407055553186701e-07,
"loss": 0.00843576118350029,
"step": 5900
},
{
"epoch": 3.6033485540334853,
"grad_norm": 0.11663182079792023,
"learning_rate": 1.3254053729669564e-07,
"loss": 0.00938587412238121,
"step": 5920
},
{
"epoch": 3.6155251141552514,
"grad_norm": 0.29512378573417664,
"learning_rate": 1.2461310839898656e-07,
"loss": 0.011934128403663636,
"step": 5940
},
{
"epoch": 3.6277016742770165,
"grad_norm": 0.2641650140285492,
"learning_rate": 1.169240641918104e-07,
"loss": 0.013170333206653595,
"step": 5960
},
{
"epoch": 3.6398782343987826,
"grad_norm": 0.47704726457595825,
"learning_rate": 1.0947417631806539e-07,
"loss": 0.014534834027290344,
"step": 5980
},
{
"epoch": 3.6520547945205477,
"grad_norm": 0.10114685446023941,
"learning_rate": 1.0226419241983865e-07,
"loss": 0.011021688580513,
"step": 6000
},
{
"epoch": 3.6520547945205477,
"eval_loss": 0.44063475728034973,
"eval_runtime": 46.1734,
"eval_samples_per_second": 19.059,
"eval_steps_per_second": 19.059,
"step": 6000
},
{
"epoch": 3.6642313546423138,
"grad_norm": 0.2619183659553528,
"learning_rate": 9.529483606337902e-08,
"loss": 0.010764393210411071,
"step": 6020
},
{
"epoch": 3.676407914764079,
"grad_norm": 0.05733129009604454,
"learning_rate": 8.856680666647882e-08,
"loss": 0.012128306180238723,
"step": 6040
},
{
"epoch": 3.688584474885845,
"grad_norm": 0.19483673572540283,
"learning_rate": 8.208077942828713e-08,
"loss": 0.011729901283979416,
"step": 6060
},
{
"epoch": 3.70076103500761,
"grad_norm": 0.2111903578042984,
"learning_rate": 7.58374052615457e-08,
"loss": 0.009119105339050294,
"step": 6080
},
{
"epoch": 3.712937595129376,
"grad_norm": 0.04995311424136162,
"learning_rate": 6.983731072726818e-08,
"loss": 0.017101363837718965,
"step": 6100
},
{
"epoch": 3.7251141552511413,
"grad_norm": 0.5839787125587463,
"learning_rate": 6.408109797186118e-08,
"loss": 0.012368235737085342,
"step": 6120
},
{
"epoch": 3.7372907153729074,
"grad_norm": 0.4685717523097992,
"learning_rate": 5.856934466669212e-08,
"loss": 0.008782628178596496,
"step": 6140
},
{
"epoch": 3.7494672754946725,
"grad_norm": 0.17204681038856506,
"learning_rate": 5.3302603950119994e-08,
"loss": 0.008994438499212266,
"step": 6160
},
{
"epoch": 3.7616438356164386,
"grad_norm": 0.07392167299985886,
"learning_rate": 4.8281404371981755e-08,
"loss": 0.011286454647779465,
"step": 6180
},
{
"epoch": 3.7738203957382037,
"grad_norm": 0.3728208541870117,
"learning_rate": 4.350624984055196e-08,
"loss": 0.011785905063152313,
"step": 6200
},
{
"epoch": 3.7859969558599698,
"grad_norm": 0.25468680262565613,
"learning_rate": 3.897761957196877e-08,
"loss": 0.013624191284179688,
"step": 6220
},
{
"epoch": 3.798173515981735,
"grad_norm": 0.09725204110145569,
"learning_rate": 3.469596804214548e-08,
"loss": 0.011700452119112015,
"step": 6240
},
{
"epoch": 3.810350076103501,
"grad_norm": 0.07126162946224213,
"learning_rate": 3.06617249411581e-08,
"loss": 0.011029987037181855,
"step": 6260
},
{
"epoch": 3.822526636225266,
"grad_norm": 0.08542267978191376,
"learning_rate": 2.687529513012488e-08,
"loss": 0.010965974628925323,
"step": 6280
},
{
"epoch": 3.834703196347032,
"grad_norm": 0.2627331018447876,
"learning_rate": 2.3337058600575722e-08,
"loss": 0.012378603965044022,
"step": 6300
},
{
"epoch": 3.8468797564687973,
"grad_norm": 0.19707690179347992,
"learning_rate": 2.0047370436317437e-08,
"loss": 0.011792077124118805,
"step": 6320
},
{
"epoch": 3.8590563165905634,
"grad_norm": 0.47547003626823425,
"learning_rate": 1.7006560777798608e-08,
"loss": 0.01145942509174347,
"step": 6340
},
{
"epoch": 3.8712328767123285,
"grad_norm": 0.3591565489768982,
"learning_rate": 1.421493478897945e-08,
"loss": 0.011088228970766067,
"step": 6360
},
{
"epoch": 3.8834094368340946,
"grad_norm": 0.20619548857212067,
"learning_rate": 1.1672772626704909e-08,
"loss": 0.010828402638435364,
"step": 6380
},
{
"epoch": 3.8955859969558597,
"grad_norm": 0.2822403311729431,
"learning_rate": 9.38032941258965e-09,
"loss": 0.01165580153465271,
"step": 6400
},
{
"epoch": 3.9077625570776258,
"grad_norm": 0.15682674944400787,
"learning_rate": 7.3378352074163215e-09,
"loss": 0.010783226788043975,
"step": 6420
},
{
"epoch": 3.919939117199391,
"grad_norm": 0.34253379702568054,
"learning_rate": 5.545494988045963e-09,
"loss": 0.011295531690120698,
"step": 6440
},
{
"epoch": 3.932115677321157,
"grad_norm": 0.27684664726257324,
"learning_rate": 4.003488626848073e-09,
"loss": 0.013613662123680115,
"step": 6460
},
{
"epoch": 3.944292237442922,
"grad_norm": 0.69688880443573,
"learning_rate": 2.7119708736486615e-09,
"loss": 0.011696261167526246,
"step": 6480
},
{
"epoch": 3.956468797564688,
"grad_norm": 0.2617769241333008,
"learning_rate": 1.6710713402015577e-09,
"loss": 0.010873865336179733,
"step": 6500
},
{
"epoch": 3.956468797564688,
"eval_loss": 0.441041499376297,
"eval_runtime": 46.1946,
"eval_samples_per_second": 19.05,
"eval_steps_per_second": 19.05,
"step": 6500
},
{
"epoch": 3.9686453576864533,
"grad_norm": 0.23670868575572968,
"learning_rate": 8.80894487179651e-10,
"loss": 0.012961818277835846,
"step": 6520
},
{
"epoch": 3.9808219178082194,
"grad_norm": 1.036125659942627,
"learning_rate": 3.4151961369188745e-10,
"loss": 0.01224210560321808,
"step": 6540
},
{
"epoch": 3.9929984779299845,
"grad_norm": 0.1431870311498642,
"learning_rate": 5.300084932574612e-11,
"loss": 0.010245455056428909,
"step": 6560
},
{
"epoch": 4.0,
"step": 6572,
"total_flos": 3.545203061907456e+17,
"train_loss": 0.08566030774240586,
"train_runtime": 13933.4985,
"train_samples_per_second": 3.772,
"train_steps_per_second": 0.472
}
],
"logging_steps": 20,
"max_steps": 6572,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.545203061907456e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}