tyzhu's picture
End of training
8c84ff2 verified
raw
history blame contribute delete
No virus
40.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 19.995409685563462,
"eval_steps": 500,
"global_step": 21780,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09180628873077806,
"grad_norm": 0.229265034198761,
"learning_rate": 0.0003,
"loss": 1.6148,
"step": 100
},
{
"epoch": 0.18361257746155613,
"grad_norm": 0.2753294110298157,
"learning_rate": 0.0003,
"loss": 1.5846,
"step": 200
},
{
"epoch": 0.2754188661923342,
"grad_norm": 0.2562897801399231,
"learning_rate": 0.0003,
"loss": 1.5631,
"step": 300
},
{
"epoch": 0.36722515492311225,
"grad_norm": 0.24621829390525818,
"learning_rate": 0.0003,
"loss": 1.5662,
"step": 400
},
{
"epoch": 0.4590314436538903,
"grad_norm": 0.2583877742290497,
"learning_rate": 0.0003,
"loss": 1.561,
"step": 500
},
{
"epoch": 0.5508377323846684,
"grad_norm": 0.275136262178421,
"learning_rate": 0.0003,
"loss": 1.5561,
"step": 600
},
{
"epoch": 0.6426440211154464,
"grad_norm": 0.2927466928958893,
"learning_rate": 0.0003,
"loss": 1.5552,
"step": 700
},
{
"epoch": 0.7344503098462245,
"grad_norm": 0.3323851525783539,
"learning_rate": 0.0003,
"loss": 1.5608,
"step": 800
},
{
"epoch": 0.8262565985770025,
"grad_norm": 0.33202576637268066,
"learning_rate": 0.0003,
"loss": 1.5254,
"step": 900
},
{
"epoch": 0.9180628873077806,
"grad_norm": 0.3486601412296295,
"learning_rate": 0.0003,
"loss": 1.5356,
"step": 1000
},
{
"epoch": 0.999770484278173,
"eval_accuracy": 0.6863755458515284,
"eval_loss": 1.3710707426071167,
"eval_runtime": 8.9317,
"eval_samples_per_second": 55.98,
"eval_steps_per_second": 7.054,
"step": 1089
},
{
"epoch": 1.0098691760385587,
"grad_norm": 0.34695035219192505,
"learning_rate": 0.0003,
"loss": 1.5044,
"step": 1100
},
{
"epoch": 1.1016754647693368,
"grad_norm": 0.3605581820011139,
"learning_rate": 0.0003,
"loss": 1.3328,
"step": 1200
},
{
"epoch": 1.1934817535001148,
"grad_norm": 0.37978020310401917,
"learning_rate": 0.0003,
"loss": 1.3246,
"step": 1300
},
{
"epoch": 1.2852880422308928,
"grad_norm": 0.40200668573379517,
"learning_rate": 0.0003,
"loss": 1.3183,
"step": 1400
},
{
"epoch": 1.377094330961671,
"grad_norm": 0.49451494216918945,
"learning_rate": 0.0003,
"loss": 1.3153,
"step": 1500
},
{
"epoch": 1.468900619692449,
"grad_norm": 0.4396137297153473,
"learning_rate": 0.0003,
"loss": 1.3301,
"step": 1600
},
{
"epoch": 1.560706908423227,
"grad_norm": 0.4403189718723297,
"learning_rate": 0.0003,
"loss": 1.2971,
"step": 1700
},
{
"epoch": 1.652513197154005,
"grad_norm": 0.4628995656967163,
"learning_rate": 0.0003,
"loss": 1.3159,
"step": 1800
},
{
"epoch": 1.744319485884783,
"grad_norm": 0.4826644957065582,
"learning_rate": 0.0003,
"loss": 1.3027,
"step": 1900
},
{
"epoch": 1.836125774615561,
"grad_norm": 0.48213547468185425,
"learning_rate": 0.0003,
"loss": 1.2977,
"step": 2000
},
{
"epoch": 1.9279320633463393,
"grad_norm": 0.5228666067123413,
"learning_rate": 0.0003,
"loss": 1.3102,
"step": 2100
},
{
"epoch": 1.999540968556346,
"eval_accuracy": 0.7020116448326056,
"eval_loss": 1.1752803325653076,
"eval_runtime": 8.9457,
"eval_samples_per_second": 55.892,
"eval_steps_per_second": 7.042,
"step": 2178
},
{
"epoch": 2.0197383520771175,
"grad_norm": 0.4973185360431671,
"learning_rate": 0.0003,
"loss": 1.2434,
"step": 2200
},
{
"epoch": 2.1115446408078955,
"grad_norm": 0.5200226902961731,
"learning_rate": 0.0003,
"loss": 1.0191,
"step": 2300
},
{
"epoch": 2.2033509295386735,
"grad_norm": 0.6249756217002869,
"learning_rate": 0.0003,
"loss": 1.0435,
"step": 2400
},
{
"epoch": 2.2951572182694515,
"grad_norm": 0.5348188877105713,
"learning_rate": 0.0003,
"loss": 1.0434,
"step": 2500
},
{
"epoch": 2.3869635070002295,
"grad_norm": 0.6106544733047485,
"learning_rate": 0.0003,
"loss": 1.0546,
"step": 2600
},
{
"epoch": 2.4787697957310075,
"grad_norm": 0.5895304679870605,
"learning_rate": 0.0003,
"loss": 1.0437,
"step": 2700
},
{
"epoch": 2.5705760844617855,
"grad_norm": 0.5809823274612427,
"learning_rate": 0.0003,
"loss": 1.0628,
"step": 2800
},
{
"epoch": 2.6623823731925635,
"grad_norm": 0.6009641289710999,
"learning_rate": 0.0003,
"loss": 1.0478,
"step": 2900
},
{
"epoch": 2.754188661923342,
"grad_norm": 0.679328978061676,
"learning_rate": 0.0003,
"loss": 1.0674,
"step": 3000
},
{
"epoch": 2.84599495065412,
"grad_norm": 0.5471652746200562,
"learning_rate": 0.0003,
"loss": 1.0462,
"step": 3100
},
{
"epoch": 2.937801239384898,
"grad_norm": 0.6065019965171814,
"learning_rate": 0.0003,
"loss": 1.0549,
"step": 3200
},
{
"epoch": 2.9993114528345193,
"eval_accuracy": 0.716448326055313,
"eval_loss": 1.0094784498214722,
"eval_runtime": 8.9265,
"eval_samples_per_second": 56.013,
"eval_steps_per_second": 7.058,
"step": 3267
},
{
"epoch": 3.029607528115676,
"grad_norm": 0.6480036973953247,
"learning_rate": 0.0003,
"loss": 0.9648,
"step": 3300
},
{
"epoch": 3.121413816846454,
"grad_norm": 0.7049514651298523,
"learning_rate": 0.0003,
"loss": 0.7919,
"step": 3400
},
{
"epoch": 3.213220105577232,
"grad_norm": 0.7723010182380676,
"learning_rate": 0.0003,
"loss": 0.804,
"step": 3500
},
{
"epoch": 3.30502639430801,
"grad_norm": 0.7994470596313477,
"learning_rate": 0.0003,
"loss": 0.8323,
"step": 3600
},
{
"epoch": 3.396832683038788,
"grad_norm": 0.7567194700241089,
"learning_rate": 0.0003,
"loss": 0.8261,
"step": 3700
},
{
"epoch": 3.488638971769566,
"grad_norm": 0.7832695245742798,
"learning_rate": 0.0003,
"loss": 0.8337,
"step": 3800
},
{
"epoch": 3.580445260500344,
"grad_norm": 0.7570985555648804,
"learning_rate": 0.0003,
"loss": 0.8334,
"step": 3900
},
{
"epoch": 3.672251549231122,
"grad_norm": 0.7717698216438293,
"learning_rate": 0.0003,
"loss": 0.8455,
"step": 4000
},
{
"epoch": 3.7640578379619005,
"grad_norm": 0.8522344827651978,
"learning_rate": 0.0003,
"loss": 0.8448,
"step": 4100
},
{
"epoch": 3.8558641266926785,
"grad_norm": 0.8296842575073242,
"learning_rate": 0.0003,
"loss": 0.8534,
"step": 4200
},
{
"epoch": 3.9476704154234565,
"grad_norm": 0.7863412499427795,
"learning_rate": 0.0003,
"loss": 0.8461,
"step": 4300
},
{
"epoch": 4.0,
"eval_accuracy": 0.7297409024745269,
"eval_loss": 0.8722099661827087,
"eval_runtime": 8.8823,
"eval_samples_per_second": 56.292,
"eval_steps_per_second": 7.093,
"step": 4357
},
{
"epoch": 4.039476704154235,
"grad_norm": 0.7983632683753967,
"learning_rate": 0.0003,
"loss": 0.7547,
"step": 4400
},
{
"epoch": 4.131282992885013,
"grad_norm": 0.699720561504364,
"learning_rate": 0.0003,
"loss": 0.618,
"step": 4500
},
{
"epoch": 4.223089281615791,
"grad_norm": 0.8726249933242798,
"learning_rate": 0.0003,
"loss": 0.6373,
"step": 4600
},
{
"epoch": 4.314895570346569,
"grad_norm": 0.8412553071975708,
"learning_rate": 0.0003,
"loss": 0.6582,
"step": 4700
},
{
"epoch": 4.406701859077347,
"grad_norm": 0.86082923412323,
"learning_rate": 0.0003,
"loss": 0.6612,
"step": 4800
},
{
"epoch": 4.498508147808125,
"grad_norm": 0.8449448943138123,
"learning_rate": 0.0003,
"loss": 0.6704,
"step": 4900
},
{
"epoch": 4.590314436538903,
"grad_norm": 0.9901677370071411,
"learning_rate": 0.0003,
"loss": 0.683,
"step": 5000
},
{
"epoch": 4.682120725269681,
"grad_norm": 0.884429395198822,
"learning_rate": 0.0003,
"loss": 0.6903,
"step": 5100
},
{
"epoch": 4.773927014000459,
"grad_norm": 0.8438558578491211,
"learning_rate": 0.0003,
"loss": 0.693,
"step": 5200
},
{
"epoch": 4.865733302731237,
"grad_norm": 0.8200265169143677,
"learning_rate": 0.0003,
"loss": 0.6986,
"step": 5300
},
{
"epoch": 4.957539591462015,
"grad_norm": 0.9410732984542847,
"learning_rate": 0.0003,
"loss": 0.701,
"step": 5400
},
{
"epoch": 4.999770484278173,
"eval_accuracy": 0.7405647743813683,
"eval_loss": 0.7641452550888062,
"eval_runtime": 8.8775,
"eval_samples_per_second": 56.322,
"eval_steps_per_second": 7.097,
"step": 5446
},
{
"epoch": 5.049345880192793,
"grad_norm": 0.8364540934562683,
"learning_rate": 0.0003,
"loss": 0.5945,
"step": 5500
},
{
"epoch": 5.141152168923571,
"grad_norm": 0.8985306620597839,
"learning_rate": 0.0003,
"loss": 0.5072,
"step": 5600
},
{
"epoch": 5.232958457654349,
"grad_norm": 0.9036381244659424,
"learning_rate": 0.0003,
"loss": 0.5187,
"step": 5700
},
{
"epoch": 5.324764746385127,
"grad_norm": 0.9836907386779785,
"learning_rate": 0.0003,
"loss": 0.5468,
"step": 5800
},
{
"epoch": 5.416571035115905,
"grad_norm": 0.8932607769966125,
"learning_rate": 0.0003,
"loss": 0.546,
"step": 5900
},
{
"epoch": 5.508377323846684,
"grad_norm": 0.9328716397285461,
"learning_rate": 0.0003,
"loss": 0.5636,
"step": 6000
},
{
"epoch": 5.600183612577462,
"grad_norm": 1.0993613004684448,
"learning_rate": 0.0003,
"loss": 0.567,
"step": 6100
},
{
"epoch": 5.69198990130824,
"grad_norm": 0.9644467830657959,
"learning_rate": 0.0003,
"loss": 0.5853,
"step": 6200
},
{
"epoch": 5.783796190039018,
"grad_norm": 1.0126985311508179,
"learning_rate": 0.0003,
"loss": 0.5883,
"step": 6300
},
{
"epoch": 5.875602478769796,
"grad_norm": 0.9119430184364319,
"learning_rate": 0.0003,
"loss": 0.5853,
"step": 6400
},
{
"epoch": 5.967408767500574,
"grad_norm": 0.88072669506073,
"learning_rate": 0.0003,
"loss": 0.5977,
"step": 6500
},
{
"epoch": 5.999540968556346,
"eval_accuracy": 0.7490451237263465,
"eval_loss": 0.6796724796295166,
"eval_runtime": 8.9191,
"eval_samples_per_second": 56.059,
"eval_steps_per_second": 7.063,
"step": 6535
},
{
"epoch": 6.059215056231352,
"grad_norm": 0.9065115451812744,
"learning_rate": 0.0003,
"loss": 0.4864,
"step": 6600
},
{
"epoch": 6.15102134496213,
"grad_norm": 0.8660620450973511,
"learning_rate": 0.0003,
"loss": 0.4337,
"step": 6700
},
{
"epoch": 6.242827633692908,
"grad_norm": 0.9163237810134888,
"learning_rate": 0.0003,
"loss": 0.4496,
"step": 6800
},
{
"epoch": 6.334633922423686,
"grad_norm": 0.8863268494606018,
"learning_rate": 0.0003,
"loss": 0.4626,
"step": 6900
},
{
"epoch": 6.426440211154464,
"grad_norm": 0.9574721455574036,
"learning_rate": 0.0003,
"loss": 0.4722,
"step": 7000
},
{
"epoch": 6.518246499885242,
"grad_norm": 1.096871256828308,
"learning_rate": 0.0003,
"loss": 0.4887,
"step": 7100
},
{
"epoch": 6.61005278861602,
"grad_norm": 1.0900388956069946,
"learning_rate": 0.0003,
"loss": 0.4958,
"step": 7200
},
{
"epoch": 6.701859077346798,
"grad_norm": 0.9678546190261841,
"learning_rate": 0.0003,
"loss": 0.4991,
"step": 7300
},
{
"epoch": 6.793665366077576,
"grad_norm": 1.0782684087753296,
"learning_rate": 0.0003,
"loss": 0.5068,
"step": 7400
},
{
"epoch": 6.885471654808354,
"grad_norm": 1.0213241577148438,
"learning_rate": 0.0003,
"loss": 0.512,
"step": 7500
},
{
"epoch": 6.977277943539132,
"grad_norm": 1.049910545349121,
"learning_rate": 0.0003,
"loss": 0.5238,
"step": 7600
},
{
"epoch": 6.999311452834519,
"eval_accuracy": 0.7558719068413392,
"eval_loss": 0.620939314365387,
"eval_runtime": 9.0834,
"eval_samples_per_second": 55.045,
"eval_steps_per_second": 6.936,
"step": 7624
},
{
"epoch": 7.06908423226991,
"grad_norm": 0.9470316767692566,
"learning_rate": 0.0003,
"loss": 0.4131,
"step": 7700
},
{
"epoch": 7.160890521000688,
"grad_norm": 0.8299586772918701,
"learning_rate": 0.0003,
"loss": 0.39,
"step": 7800
},
{
"epoch": 7.252696809731467,
"grad_norm": 0.9369521141052246,
"learning_rate": 0.0003,
"loss": 0.3953,
"step": 7900
},
{
"epoch": 7.344503098462245,
"grad_norm": 1.0843796730041504,
"learning_rate": 0.0003,
"loss": 0.4144,
"step": 8000
},
{
"epoch": 7.436309387193023,
"grad_norm": 0.9615817666053772,
"learning_rate": 0.0003,
"loss": 0.4266,
"step": 8100
},
{
"epoch": 7.528115675923801,
"grad_norm": 0.9017691612243652,
"learning_rate": 0.0003,
"loss": 0.4411,
"step": 8200
},
{
"epoch": 7.619921964654579,
"grad_norm": 1.0230755805969238,
"learning_rate": 0.0003,
"loss": 0.4392,
"step": 8300
},
{
"epoch": 7.711728253385357,
"grad_norm": 1.0185121297836304,
"learning_rate": 0.0003,
"loss": 0.4489,
"step": 8400
},
{
"epoch": 7.803534542116135,
"grad_norm": 1.003201961517334,
"learning_rate": 0.0003,
"loss": 0.4475,
"step": 8500
},
{
"epoch": 7.895340830846913,
"grad_norm": 0.8930547833442688,
"learning_rate": 0.0003,
"loss": 0.4569,
"step": 8600
},
{
"epoch": 7.987147119577691,
"grad_norm": 1.1096137762069702,
"learning_rate": 0.0003,
"loss": 0.4742,
"step": 8700
},
{
"epoch": 8.0,
"eval_accuracy": 0.7599941775836973,
"eval_loss": 0.5836606025695801,
"eval_runtime": 8.8891,
"eval_samples_per_second": 56.249,
"eval_steps_per_second": 7.087,
"step": 8714
},
{
"epoch": 8.07895340830847,
"grad_norm": 0.9343357086181641,
"learning_rate": 0.0003,
"loss": 0.3623,
"step": 8800
},
{
"epoch": 8.170759697039248,
"grad_norm": 1.0631523132324219,
"learning_rate": 0.0003,
"loss": 0.3456,
"step": 8900
},
{
"epoch": 8.262565985770026,
"grad_norm": 0.9419318437576294,
"learning_rate": 0.0003,
"loss": 0.3648,
"step": 9000
},
{
"epoch": 8.354372274500804,
"grad_norm": 0.8649080395698547,
"learning_rate": 0.0003,
"loss": 0.3758,
"step": 9100
},
{
"epoch": 8.446178563231582,
"grad_norm": 0.9168979525566101,
"learning_rate": 0.0003,
"loss": 0.3843,
"step": 9200
},
{
"epoch": 8.53798485196236,
"grad_norm": 0.9176032543182373,
"learning_rate": 0.0003,
"loss": 0.3942,
"step": 9300
},
{
"epoch": 8.629791140693138,
"grad_norm": 0.9991121292114258,
"learning_rate": 0.0003,
"loss": 0.4084,
"step": 9400
},
{
"epoch": 8.721597429423916,
"grad_norm": 1.0064568519592285,
"learning_rate": 0.0003,
"loss": 0.4115,
"step": 9500
},
{
"epoch": 8.813403718154694,
"grad_norm": 1.0827409029006958,
"learning_rate": 0.0003,
"loss": 0.4254,
"step": 9600
},
{
"epoch": 8.905210006885472,
"grad_norm": 1.1357035636901855,
"learning_rate": 0.0003,
"loss": 0.425,
"step": 9700
},
{
"epoch": 8.99701629561625,
"grad_norm": 1.0116100311279297,
"learning_rate": 0.0003,
"loss": 0.438,
"step": 9800
},
{
"epoch": 8.999770484278173,
"eval_accuracy": 0.7638136826783115,
"eval_loss": 0.5532092452049255,
"eval_runtime": 8.896,
"eval_samples_per_second": 56.205,
"eval_steps_per_second": 7.082,
"step": 9803
},
{
"epoch": 9.088822584347028,
"grad_norm": 0.943336546421051,
"learning_rate": 0.0003,
"loss": 0.3196,
"step": 9900
},
{
"epoch": 9.180628873077806,
"grad_norm": 0.966768205165863,
"learning_rate": 0.0003,
"loss": 0.3288,
"step": 10000
},
{
"epoch": 9.272435161808584,
"grad_norm": 1.0522419214248657,
"learning_rate": 0.0003,
"loss": 0.3416,
"step": 10100
},
{
"epoch": 9.364241450539362,
"grad_norm": 0.9877403378486633,
"learning_rate": 0.0003,
"loss": 0.3522,
"step": 10200
},
{
"epoch": 9.45604773927014,
"grad_norm": 1.1037845611572266,
"learning_rate": 0.0003,
"loss": 0.3595,
"step": 10300
},
{
"epoch": 9.547854028000918,
"grad_norm": 1.1503223180770874,
"learning_rate": 0.0003,
"loss": 0.3734,
"step": 10400
},
{
"epoch": 9.639660316731696,
"grad_norm": 1.0321091413497925,
"learning_rate": 0.0003,
"loss": 0.3808,
"step": 10500
},
{
"epoch": 9.731466605462474,
"grad_norm": 0.9578890204429626,
"learning_rate": 0.0003,
"loss": 0.3878,
"step": 10600
},
{
"epoch": 9.823272894193252,
"grad_norm": 1.0930492877960205,
"learning_rate": 0.0003,
"loss": 0.3943,
"step": 10700
},
{
"epoch": 9.91507918292403,
"grad_norm": 1.2598655223846436,
"learning_rate": 0.0003,
"loss": 0.402,
"step": 10800
},
{
"epoch": 9.999540968556346,
"eval_accuracy": 0.7663522561863173,
"eval_loss": 0.5331353545188904,
"eval_runtime": 8.8958,
"eval_samples_per_second": 56.206,
"eval_steps_per_second": 7.082,
"step": 10892
},
{
"epoch": 10.006885471654808,
"grad_norm": 0.9872549176216125,
"learning_rate": 0.0003,
"loss": 0.4016,
"step": 10900
},
{
"epoch": 10.098691760385586,
"grad_norm": 0.9196072220802307,
"learning_rate": 0.0003,
"loss": 0.2985,
"step": 11000
},
{
"epoch": 10.190498049116364,
"grad_norm": 0.9028146266937256,
"learning_rate": 0.0003,
"loss": 0.311,
"step": 11100
},
{
"epoch": 10.282304337847142,
"grad_norm": 0.9019381999969482,
"learning_rate": 0.0003,
"loss": 0.3199,
"step": 11200
},
{
"epoch": 10.37411062657792,
"grad_norm": 1.0821175575256348,
"learning_rate": 0.0003,
"loss": 0.3366,
"step": 11300
},
{
"epoch": 10.465916915308698,
"grad_norm": 0.9856448769569397,
"learning_rate": 0.0003,
"loss": 0.3391,
"step": 11400
},
{
"epoch": 10.557723204039476,
"grad_norm": 1.0605835914611816,
"learning_rate": 0.0003,
"loss": 0.3512,
"step": 11500
},
{
"epoch": 10.649529492770254,
"grad_norm": 0.9575846791267395,
"learning_rate": 0.0003,
"loss": 0.3546,
"step": 11600
},
{
"epoch": 10.741335781501032,
"grad_norm": 1.0335845947265625,
"learning_rate": 0.0003,
"loss": 0.3692,
"step": 11700
},
{
"epoch": 10.83314207023181,
"grad_norm": 1.131901502609253,
"learning_rate": 0.0003,
"loss": 0.3747,
"step": 11800
},
{
"epoch": 10.924948358962588,
"grad_norm": 0.9360109567642212,
"learning_rate": 0.0003,
"loss": 0.383,
"step": 11900
},
{
"epoch": 10.999311452834519,
"eval_accuracy": 0.7685181950509461,
"eval_loss": 0.5156339406967163,
"eval_runtime": 8.925,
"eval_samples_per_second": 56.022,
"eval_steps_per_second": 7.059,
"step": 11981
},
{
"epoch": 11.016754647693366,
"grad_norm": 0.8987607359886169,
"learning_rate": 0.0003,
"loss": 0.3677,
"step": 12000
},
{
"epoch": 11.108560936424144,
"grad_norm": 0.882723867893219,
"learning_rate": 0.0003,
"loss": 0.2865,
"step": 12100
},
{
"epoch": 11.200367225154924,
"grad_norm": 0.9493284821510315,
"learning_rate": 0.0003,
"loss": 0.2998,
"step": 12200
},
{
"epoch": 11.292173513885702,
"grad_norm": 0.9848419427871704,
"learning_rate": 0.0003,
"loss": 0.3062,
"step": 12300
},
{
"epoch": 11.38397980261648,
"grad_norm": 0.9860394597053528,
"learning_rate": 0.0003,
"loss": 0.3197,
"step": 12400
},
{
"epoch": 11.475786091347258,
"grad_norm": 1.246336579322815,
"learning_rate": 0.0003,
"loss": 0.3318,
"step": 12500
},
{
"epoch": 11.567592380078036,
"grad_norm": 1.0225517749786377,
"learning_rate": 0.0003,
"loss": 0.3333,
"step": 12600
},
{
"epoch": 11.659398668808814,
"grad_norm": 1.043228268623352,
"learning_rate": 0.0003,
"loss": 0.3496,
"step": 12700
},
{
"epoch": 11.751204957539592,
"grad_norm": 1.0331432819366455,
"learning_rate": 0.0003,
"loss": 0.3561,
"step": 12800
},
{
"epoch": 11.84301124627037,
"grad_norm": 1.009142279624939,
"learning_rate": 0.0003,
"loss": 0.3591,
"step": 12900
},
{
"epoch": 11.934817535001148,
"grad_norm": 0.9662990570068359,
"learning_rate": 0.0003,
"loss": 0.3627,
"step": 13000
},
{
"epoch": 12.0,
"eval_accuracy": 0.7702008733624454,
"eval_loss": 0.5070006251335144,
"eval_runtime": 8.8961,
"eval_samples_per_second": 56.204,
"eval_steps_per_second": 7.082,
"step": 13071
},
{
"epoch": 12.026623823731926,
"grad_norm": 0.8747033476829529,
"learning_rate": 0.0003,
"loss": 0.3406,
"step": 13100
},
{
"epoch": 12.118430112462704,
"grad_norm": 1.044632911682129,
"learning_rate": 0.0003,
"loss": 0.2725,
"step": 13200
},
{
"epoch": 12.210236401193482,
"grad_norm": 1.118489384651184,
"learning_rate": 0.0003,
"loss": 0.2867,
"step": 13300
},
{
"epoch": 12.30204268992426,
"grad_norm": 1.1297811269760132,
"learning_rate": 0.0003,
"loss": 0.3031,
"step": 13400
},
{
"epoch": 12.393848978655038,
"grad_norm": 1.005750298500061,
"learning_rate": 0.0003,
"loss": 0.308,
"step": 13500
},
{
"epoch": 12.485655267385816,
"grad_norm": 1.1464134454727173,
"learning_rate": 0.0003,
"loss": 0.3173,
"step": 13600
},
{
"epoch": 12.577461556116594,
"grad_norm": 1.0724166631698608,
"learning_rate": 0.0003,
"loss": 0.3263,
"step": 13700
},
{
"epoch": 12.669267844847372,
"grad_norm": 0.9200640320777893,
"learning_rate": 0.0003,
"loss": 0.3348,
"step": 13800
},
{
"epoch": 12.76107413357815,
"grad_norm": 1.012441635131836,
"learning_rate": 0.0003,
"loss": 0.34,
"step": 13900
},
{
"epoch": 12.852880422308928,
"grad_norm": 1.1097270250320435,
"learning_rate": 0.0003,
"loss": 0.3515,
"step": 14000
},
{
"epoch": 12.944686711039706,
"grad_norm": 1.0985119342803955,
"learning_rate": 0.0003,
"loss": 0.3521,
"step": 14100
},
{
"epoch": 12.999770484278173,
"eval_accuracy": 0.7713799126637555,
"eval_loss": 0.4984254539012909,
"eval_runtime": 7.8448,
"eval_samples_per_second": 63.737,
"eval_steps_per_second": 8.031,
"step": 14160
},
{
"epoch": 13.036492999770484,
"grad_norm": 0.8010920882225037,
"learning_rate": 0.0003,
"loss": 0.3178,
"step": 14200
},
{
"epoch": 13.128299288501262,
"grad_norm": 0.7816724181175232,
"learning_rate": 0.0003,
"loss": 0.2667,
"step": 14300
},
{
"epoch": 13.22010557723204,
"grad_norm": 0.9443672895431519,
"learning_rate": 0.0003,
"loss": 0.2779,
"step": 14400
},
{
"epoch": 13.311911865962818,
"grad_norm": 0.9471251964569092,
"learning_rate": 0.0003,
"loss": 0.2889,
"step": 14500
},
{
"epoch": 13.403718154693596,
"grad_norm": 1.0766810178756714,
"learning_rate": 0.0003,
"loss": 0.301,
"step": 14600
},
{
"epoch": 13.495524443424374,
"grad_norm": 1.0341782569885254,
"learning_rate": 0.0003,
"loss": 0.3079,
"step": 14700
},
{
"epoch": 13.587330732155152,
"grad_norm": 0.9588432312011719,
"learning_rate": 0.0003,
"loss": 0.3184,
"step": 14800
},
{
"epoch": 13.67913702088593,
"grad_norm": 1.0522781610488892,
"learning_rate": 0.0003,
"loss": 0.3228,
"step": 14900
},
{
"epoch": 13.770943309616708,
"grad_norm": 1.0346490144729614,
"learning_rate": 0.0003,
"loss": 0.33,
"step": 15000
},
{
"epoch": 13.862749598347486,
"grad_norm": 1.129906177520752,
"learning_rate": 0.0003,
"loss": 0.3378,
"step": 15100
},
{
"epoch": 13.954555887078264,
"grad_norm": 0.9843702912330627,
"learning_rate": 0.0003,
"loss": 0.344,
"step": 15200
},
{
"epoch": 13.999540968556346,
"eval_accuracy": 0.7721863173216885,
"eval_loss": 0.4925004243850708,
"eval_runtime": 8.9168,
"eval_samples_per_second": 56.074,
"eval_steps_per_second": 7.065,
"step": 15249
},
{
"epoch": 14.046362175809042,
"grad_norm": 0.9825355410575867,
"learning_rate": 0.0003,
"loss": 0.2998,
"step": 15300
},
{
"epoch": 14.13816846453982,
"grad_norm": 1.0309946537017822,
"learning_rate": 0.0003,
"loss": 0.2621,
"step": 15400
},
{
"epoch": 14.229974753270598,
"grad_norm": 0.8775806427001953,
"learning_rate": 0.0003,
"loss": 0.2744,
"step": 15500
},
{
"epoch": 14.321781042001376,
"grad_norm": 0.9635316133499146,
"learning_rate": 0.0003,
"loss": 0.2828,
"step": 15600
},
{
"epoch": 14.413587330732156,
"grad_norm": 1.0143553018569946,
"learning_rate": 0.0003,
"loss": 0.29,
"step": 15700
},
{
"epoch": 14.505393619462934,
"grad_norm": 1.0207178592681885,
"learning_rate": 0.0003,
"loss": 0.3022,
"step": 15800
},
{
"epoch": 14.597199908193712,
"grad_norm": 1.07411527633667,
"learning_rate": 0.0003,
"loss": 0.3113,
"step": 15900
},
{
"epoch": 14.68900619692449,
"grad_norm": 1.0961089134216309,
"learning_rate": 0.0003,
"loss": 0.3153,
"step": 16000
},
{
"epoch": 14.780812485655268,
"grad_norm": 0.9983770847320557,
"learning_rate": 0.0003,
"loss": 0.323,
"step": 16100
},
{
"epoch": 14.872618774386046,
"grad_norm": 1.0680583715438843,
"learning_rate": 0.0003,
"loss": 0.3275,
"step": 16200
},
{
"epoch": 14.964425063116824,
"grad_norm": 1.0475704669952393,
"learning_rate": 0.0003,
"loss": 0.3341,
"step": 16300
},
{
"epoch": 14.999311452834519,
"eval_accuracy": 0.7735604075691412,
"eval_loss": 0.4847230315208435,
"eval_runtime": 9.1634,
"eval_samples_per_second": 54.565,
"eval_steps_per_second": 6.875,
"step": 16338
},
{
"epoch": 15.056231351847602,
"grad_norm": 0.7305497527122498,
"learning_rate": 0.0003,
"loss": 0.2829,
"step": 16400
},
{
"epoch": 15.14803764057838,
"grad_norm": 0.936439037322998,
"learning_rate": 0.0003,
"loss": 0.2585,
"step": 16500
},
{
"epoch": 15.239843929309158,
"grad_norm": 0.9285767078399658,
"learning_rate": 0.0003,
"loss": 0.262,
"step": 16600
},
{
"epoch": 15.331650218039936,
"grad_norm": 1.0587061643600464,
"learning_rate": 0.0003,
"loss": 0.2795,
"step": 16700
},
{
"epoch": 15.423456506770714,
"grad_norm": 1.0315121412277222,
"learning_rate": 0.0003,
"loss": 0.2858,
"step": 16800
},
{
"epoch": 15.515262795501492,
"grad_norm": 1.007581114768982,
"learning_rate": 0.0003,
"loss": 0.2941,
"step": 16900
},
{
"epoch": 15.60706908423227,
"grad_norm": 1.123679280281067,
"learning_rate": 0.0003,
"loss": 0.2998,
"step": 17000
},
{
"epoch": 15.698875372963048,
"grad_norm": 1.245398759841919,
"learning_rate": 0.0003,
"loss": 0.3086,
"step": 17100
},
{
"epoch": 15.790681661693826,
"grad_norm": 0.9799935817718506,
"learning_rate": 0.0003,
"loss": 0.3133,
"step": 17200
},
{
"epoch": 15.882487950424604,
"grad_norm": 1.1230483055114746,
"learning_rate": 0.0003,
"loss": 0.3251,
"step": 17300
},
{
"epoch": 15.974294239155382,
"grad_norm": 1.2086437940597534,
"learning_rate": 0.0003,
"loss": 0.3275,
"step": 17400
},
{
"epoch": 16.0,
"eval_accuracy": 0.7748180494905386,
"eval_loss": 0.4808007478713989,
"eval_runtime": 8.8958,
"eval_samples_per_second": 56.206,
"eval_steps_per_second": 7.082,
"step": 17428
},
{
"epoch": 16.06610052788616,
"grad_norm": 0.8386597633361816,
"learning_rate": 0.0003,
"loss": 0.2679,
"step": 17500
},
{
"epoch": 16.15790681661694,
"grad_norm": 0.865949273109436,
"learning_rate": 0.0003,
"loss": 0.2542,
"step": 17600
},
{
"epoch": 16.249713105347716,
"grad_norm": 1.052322506904602,
"learning_rate": 0.0003,
"loss": 0.2646,
"step": 17700
},
{
"epoch": 16.341519394078496,
"grad_norm": 0.8787432312965393,
"learning_rate": 0.0003,
"loss": 0.2671,
"step": 17800
},
{
"epoch": 16.433325682809272,
"grad_norm": 0.9127426147460938,
"learning_rate": 0.0003,
"loss": 0.2772,
"step": 17900
},
{
"epoch": 16.525131971540052,
"grad_norm": 0.9554562568664551,
"learning_rate": 0.0003,
"loss": 0.286,
"step": 18000
},
{
"epoch": 16.616938260270828,
"grad_norm": 0.9382279515266418,
"learning_rate": 0.0003,
"loss": 0.2972,
"step": 18100
},
{
"epoch": 16.708744549001608,
"grad_norm": 0.928916335105896,
"learning_rate": 0.0003,
"loss": 0.3031,
"step": 18200
},
{
"epoch": 16.800550837732384,
"grad_norm": 0.9512932300567627,
"learning_rate": 0.0003,
"loss": 0.3058,
"step": 18300
},
{
"epoch": 16.892357126463164,
"grad_norm": 1.2069571018218994,
"learning_rate": 0.0003,
"loss": 0.3168,
"step": 18400
},
{
"epoch": 16.98416341519394,
"grad_norm": 1.1233407258987427,
"learning_rate": 0.0003,
"loss": 0.3223,
"step": 18500
},
{
"epoch": 16.999770484278173,
"eval_accuracy": 0.7750887918486172,
"eval_loss": 0.4775759279727936,
"eval_runtime": 8.8897,
"eval_samples_per_second": 56.245,
"eval_steps_per_second": 7.087,
"step": 18517
},
{
"epoch": 17.07596970392472,
"grad_norm": 0.9757447838783264,
"learning_rate": 0.0003,
"loss": 0.2534,
"step": 18600
},
{
"epoch": 17.167775992655496,
"grad_norm": 0.7774830460548401,
"learning_rate": 0.0003,
"loss": 0.2487,
"step": 18700
},
{
"epoch": 17.259582281386276,
"grad_norm": 0.9797073602676392,
"learning_rate": 0.0003,
"loss": 0.2564,
"step": 18800
},
{
"epoch": 17.351388570117052,
"grad_norm": 0.9221961498260498,
"learning_rate": 0.0003,
"loss": 0.2675,
"step": 18900
},
{
"epoch": 17.443194858847832,
"grad_norm": 0.9980019927024841,
"learning_rate": 0.0003,
"loss": 0.2759,
"step": 19000
},
{
"epoch": 17.53500114757861,
"grad_norm": 1.1230412721633911,
"learning_rate": 0.0003,
"loss": 0.2884,
"step": 19100
},
{
"epoch": 17.626807436309388,
"grad_norm": 1.0681424140930176,
"learning_rate": 0.0003,
"loss": 0.2894,
"step": 19200
},
{
"epoch": 17.718613725040164,
"grad_norm": 1.0071791410446167,
"learning_rate": 0.0003,
"loss": 0.294,
"step": 19300
},
{
"epoch": 17.810420013770944,
"grad_norm": 1.0229710340499878,
"learning_rate": 0.0003,
"loss": 0.305,
"step": 19400
},
{
"epoch": 17.90222630250172,
"grad_norm": 1.1899082660675049,
"learning_rate": 0.0003,
"loss": 0.3114,
"step": 19500
},
{
"epoch": 17.9940325912325,
"grad_norm": 1.1344189643859863,
"learning_rate": 0.0003,
"loss": 0.3155,
"step": 19600
},
{
"epoch": 17.999540968556346,
"eval_accuracy": 0.7757671033478893,
"eval_loss": 0.4804111421108246,
"eval_runtime": 8.8913,
"eval_samples_per_second": 56.234,
"eval_steps_per_second": 7.086,
"step": 19606
},
{
"epoch": 18.085838879963276,
"grad_norm": 0.9845724105834961,
"learning_rate": 0.0003,
"loss": 0.2418,
"step": 19700
},
{
"epoch": 18.177645168694056,
"grad_norm": 0.879173994064331,
"learning_rate": 0.0003,
"loss": 0.2453,
"step": 19800
},
{
"epoch": 18.269451457424832,
"grad_norm": 0.8902878761291504,
"learning_rate": 0.0003,
"loss": 0.2545,
"step": 19900
},
{
"epoch": 18.361257746155612,
"grad_norm": 1.160914421081543,
"learning_rate": 0.0003,
"loss": 0.2648,
"step": 20000
},
{
"epoch": 18.45306403488639,
"grad_norm": 0.9650329947471619,
"learning_rate": 0.0003,
"loss": 0.271,
"step": 20100
},
{
"epoch": 18.544870323617168,
"grad_norm": 1.0495669841766357,
"learning_rate": 0.0003,
"loss": 0.2782,
"step": 20200
},
{
"epoch": 18.636676612347944,
"grad_norm": 0.9071341156959534,
"learning_rate": 0.0003,
"loss": 0.2886,
"step": 20300
},
{
"epoch": 18.728482901078724,
"grad_norm": 1.0429463386535645,
"learning_rate": 0.0003,
"loss": 0.2934,
"step": 20400
},
{
"epoch": 18.8202891898095,
"grad_norm": 1.096994400024414,
"learning_rate": 0.0003,
"loss": 0.2992,
"step": 20500
},
{
"epoch": 18.91209547854028,
"grad_norm": 1.0189508199691772,
"learning_rate": 0.0003,
"loss": 0.3033,
"step": 20600
},
{
"epoch": 18.99931145283452,
"eval_accuracy": 0.7760727802037846,
"eval_loss": 0.4787416458129883,
"eval_runtime": 9.278,
"eval_samples_per_second": 53.891,
"eval_steps_per_second": 6.79,
"step": 20695
},
{
"epoch": 19.003901767271056,
"grad_norm": 0.8995434045791626,
"learning_rate": 0.0003,
"loss": 0.3075,
"step": 20700
},
{
"epoch": 19.095708056001836,
"grad_norm": 0.8744321465492249,
"learning_rate": 0.0003,
"loss": 0.2342,
"step": 20800
},
{
"epoch": 19.187514344732612,
"grad_norm": 0.9471040964126587,
"learning_rate": 0.0003,
"loss": 0.2422,
"step": 20900
},
{
"epoch": 19.279320633463392,
"grad_norm": 1.0133309364318848,
"learning_rate": 0.0003,
"loss": 0.2533,
"step": 21000
},
{
"epoch": 19.371126922194172,
"grad_norm": 1.0121233463287354,
"learning_rate": 0.0003,
"loss": 0.2607,
"step": 21100
},
{
"epoch": 19.462933210924948,
"grad_norm": 1.1256521940231323,
"learning_rate": 0.0003,
"loss": 0.2706,
"step": 21200
},
{
"epoch": 19.554739499655728,
"grad_norm": 1.0843769311904907,
"learning_rate": 0.0003,
"loss": 0.2735,
"step": 21300
},
{
"epoch": 19.646545788386504,
"grad_norm": 1.1019864082336426,
"learning_rate": 0.0003,
"loss": 0.286,
"step": 21400
},
{
"epoch": 19.738352077117284,
"grad_norm": 1.135688066482544,
"learning_rate": 0.0003,
"loss": 0.2886,
"step": 21500
},
{
"epoch": 19.83015836584806,
"grad_norm": 1.0445754528045654,
"learning_rate": 0.0003,
"loss": 0.2953,
"step": 21600
},
{
"epoch": 19.92196465457884,
"grad_norm": 1.035718560218811,
"learning_rate": 0.0003,
"loss": 0.2989,
"step": 21700
},
{
"epoch": 19.995409685563462,
"eval_accuracy": 0.7763580786026201,
"eval_loss": 0.475557804107666,
"eval_runtime": 9.1559,
"eval_samples_per_second": 54.61,
"eval_steps_per_second": 6.881,
"step": 21780
},
{
"epoch": 19.995409685563462,
"step": 21780,
"total_flos": 2.2953223726028554e+18,
"train_loss": 0.5338389528903313,
"train_runtime": 46593.736,
"train_samples_per_second": 14.961,
"train_steps_per_second": 0.467
}
],
"logging_steps": 100,
"max_steps": 21780,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"total_flos": 2.2953223726028554e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}