tyzhu's picture
End of training
cfc2847 verified
raw
history blame contribute delete
No virus
40.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 19.995409685563462,
"eval_steps": 500,
"global_step": 21780,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09180628873077806,
"grad_norm": 0.25305476784706116,
"learning_rate": 0.0005,
"loss": 1.6083,
"step": 100
},
{
"epoch": 0.18361257746155613,
"grad_norm": 0.3124815821647644,
"learning_rate": 0.0005,
"loss": 1.5865,
"step": 200
},
{
"epoch": 0.2754188661923342,
"grad_norm": 0.33532094955444336,
"learning_rate": 0.0005,
"loss": 1.5663,
"step": 300
},
{
"epoch": 0.36722515492311225,
"grad_norm": 0.3140541911125183,
"learning_rate": 0.0005,
"loss": 1.5709,
"step": 400
},
{
"epoch": 0.4590314436538903,
"grad_norm": 0.3288426399230957,
"learning_rate": 0.0005,
"loss": 1.567,
"step": 500
},
{
"epoch": 0.5508377323846684,
"grad_norm": 0.362998902797699,
"learning_rate": 0.0005,
"loss": 1.5635,
"step": 600
},
{
"epoch": 0.6426440211154464,
"grad_norm": 0.37498417496681213,
"learning_rate": 0.0005,
"loss": 1.5651,
"step": 700
},
{
"epoch": 0.7344503098462245,
"grad_norm": 0.45461997389793396,
"learning_rate": 0.0005,
"loss": 1.5687,
"step": 800
},
{
"epoch": 0.8262565985770025,
"grad_norm": 0.42744582891464233,
"learning_rate": 0.0005,
"loss": 1.5349,
"step": 900
},
{
"epoch": 0.9180628873077806,
"grad_norm": 0.43451881408691406,
"learning_rate": 0.0005,
"loss": 1.5463,
"step": 1000
},
{
"epoch": 0.999770484278173,
"eval_accuracy": 0.6871615720524017,
"eval_loss": 1.3538845777511597,
"eval_runtime": 9.1369,
"eval_samples_per_second": 54.723,
"eval_steps_per_second": 6.895,
"step": 1089
},
{
"epoch": 1.0098691760385587,
"grad_norm": 0.45076310634613037,
"learning_rate": 0.0005,
"loss": 1.51,
"step": 1100
},
{
"epoch": 1.1016754647693368,
"grad_norm": 0.47397756576538086,
"learning_rate": 0.0005,
"loss": 1.2876,
"step": 1200
},
{
"epoch": 1.1934817535001148,
"grad_norm": 0.4858494997024536,
"learning_rate": 0.0005,
"loss": 1.2932,
"step": 1300
},
{
"epoch": 1.2852880422308928,
"grad_norm": 0.52638179063797,
"learning_rate": 0.0005,
"loss": 1.2933,
"step": 1400
},
{
"epoch": 1.377094330961671,
"grad_norm": 0.5119564533233643,
"learning_rate": 0.0005,
"loss": 1.2981,
"step": 1500
},
{
"epoch": 1.468900619692449,
"grad_norm": 0.5230050086975098,
"learning_rate": 0.0005,
"loss": 1.32,
"step": 1600
},
{
"epoch": 1.560706908423227,
"grad_norm": 0.5544503331184387,
"learning_rate": 0.0005,
"loss": 1.2968,
"step": 1700
},
{
"epoch": 1.652513197154005,
"grad_norm": 0.6211464405059814,
"learning_rate": 0.0005,
"loss": 1.3169,
"step": 1800
},
{
"epoch": 1.744319485884783,
"grad_norm": 0.5929824709892273,
"learning_rate": 0.0005,
"loss": 1.3062,
"step": 1900
},
{
"epoch": 1.836125774615561,
"grad_norm": 0.559938371181488,
"learning_rate": 0.0005,
"loss": 1.3056,
"step": 2000
},
{
"epoch": 1.9279320633463393,
"grad_norm": 0.6206551194190979,
"learning_rate": 0.0005,
"loss": 1.3199,
"step": 2100
},
{
"epoch": 1.999540968556346,
"eval_accuracy": 0.7021979621542941,
"eval_loss": 1.1631555557250977,
"eval_runtime": 9.1947,
"eval_samples_per_second": 54.379,
"eval_steps_per_second": 6.852,
"step": 2178
},
{
"epoch": 2.0197383520771175,
"grad_norm": 0.5680427551269531,
"learning_rate": 0.0005,
"loss": 1.2504,
"step": 2200
},
{
"epoch": 2.1115446408078955,
"grad_norm": 0.6365711688995361,
"learning_rate": 0.0005,
"loss": 0.9978,
"step": 2300
},
{
"epoch": 2.2033509295386735,
"grad_norm": 0.7458738088607788,
"learning_rate": 0.0005,
"loss": 1.0328,
"step": 2400
},
{
"epoch": 2.2951572182694515,
"grad_norm": 0.5955780148506165,
"learning_rate": 0.0005,
"loss": 1.0487,
"step": 2500
},
{
"epoch": 2.3869635070002295,
"grad_norm": 0.6577792763710022,
"learning_rate": 0.0005,
"loss": 1.0702,
"step": 2600
},
{
"epoch": 2.4787697957310075,
"grad_norm": 0.6620481014251709,
"learning_rate": 0.0005,
"loss": 1.0653,
"step": 2700
},
{
"epoch": 2.5705760844617855,
"grad_norm": 0.7260810136795044,
"learning_rate": 0.0005,
"loss": 1.0881,
"step": 2800
},
{
"epoch": 2.6623823731925635,
"grad_norm": 0.7388891577720642,
"learning_rate": 0.0005,
"loss": 1.0823,
"step": 2900
},
{
"epoch": 2.754188661923342,
"grad_norm": 0.7343183159828186,
"learning_rate": 0.0005,
"loss": 1.1044,
"step": 3000
},
{
"epoch": 2.84599495065412,
"grad_norm": 0.606133759021759,
"learning_rate": 0.0005,
"loss": 1.0884,
"step": 3100
},
{
"epoch": 2.937801239384898,
"grad_norm": 0.6684651374816895,
"learning_rate": 0.0005,
"loss": 1.1039,
"step": 3200
},
{
"epoch": 2.9993114528345193,
"eval_accuracy": 0.713353711790393,
"eval_loss": 1.0347490310668945,
"eval_runtime": 9.0955,
"eval_samples_per_second": 54.972,
"eval_steps_per_second": 6.927,
"step": 3267
},
{
"epoch": 3.029607528115676,
"grad_norm": 0.7068032026290894,
"learning_rate": 0.0005,
"loss": 1.0062,
"step": 3300
},
{
"epoch": 3.121413816846454,
"grad_norm": 0.7648475766181946,
"learning_rate": 0.0005,
"loss": 0.8266,
"step": 3400
},
{
"epoch": 3.213220105577232,
"grad_norm": 0.8599951267242432,
"learning_rate": 0.0005,
"loss": 0.8518,
"step": 3500
},
{
"epoch": 3.30502639430801,
"grad_norm": 0.8535987138748169,
"learning_rate": 0.0005,
"loss": 0.8849,
"step": 3600
},
{
"epoch": 3.396832683038788,
"grad_norm": 0.8338284492492676,
"learning_rate": 0.0005,
"loss": 0.8896,
"step": 3700
},
{
"epoch": 3.488638971769566,
"grad_norm": 0.8858312964439392,
"learning_rate": 0.0005,
"loss": 0.9036,
"step": 3800
},
{
"epoch": 3.580445260500344,
"grad_norm": 0.8100723028182983,
"learning_rate": 0.0005,
"loss": 0.9088,
"step": 3900
},
{
"epoch": 3.672251549231122,
"grad_norm": 0.827248215675354,
"learning_rate": 0.0005,
"loss": 0.9252,
"step": 4000
},
{
"epoch": 3.7640578379619005,
"grad_norm": 0.7961249351501465,
"learning_rate": 0.0005,
"loss": 0.9277,
"step": 4100
},
{
"epoch": 3.8558641266926785,
"grad_norm": 0.8417057394981384,
"learning_rate": 0.0005,
"loss": 0.9406,
"step": 4200
},
{
"epoch": 3.9476704154234565,
"grad_norm": 0.8393632173538208,
"learning_rate": 0.0005,
"loss": 0.9356,
"step": 4300
},
{
"epoch": 4.0,
"eval_accuracy": 0.723665211062591,
"eval_loss": 0.9234349131584167,
"eval_runtime": 9.127,
"eval_samples_per_second": 54.783,
"eval_steps_per_second": 6.903,
"step": 4357
},
{
"epoch": 4.039476704154235,
"grad_norm": 0.7149174809455872,
"learning_rate": 0.0005,
"loss": 0.8365,
"step": 4400
},
{
"epoch": 4.131282992885013,
"grad_norm": 0.7990247011184692,
"learning_rate": 0.0005,
"loss": 0.6924,
"step": 4500
},
{
"epoch": 4.223089281615791,
"grad_norm": 0.7871200442314148,
"learning_rate": 0.0005,
"loss": 0.7195,
"step": 4600
},
{
"epoch": 4.314895570346569,
"grad_norm": 0.8242298364639282,
"learning_rate": 0.0005,
"loss": 0.749,
"step": 4700
},
{
"epoch": 4.406701859077347,
"grad_norm": 0.864937424659729,
"learning_rate": 0.0005,
"loss": 0.7616,
"step": 4800
},
{
"epoch": 4.498508147808125,
"grad_norm": 0.8644908666610718,
"learning_rate": 0.0005,
"loss": 0.7763,
"step": 4900
},
{
"epoch": 4.590314436538903,
"grad_norm": 0.9300737977027893,
"learning_rate": 0.0005,
"loss": 0.7999,
"step": 5000
},
{
"epoch": 4.682120725269681,
"grad_norm": 0.8608301877975464,
"learning_rate": 0.0005,
"loss": 0.8112,
"step": 5100
},
{
"epoch": 4.773927014000459,
"grad_norm": 0.8489959239959717,
"learning_rate": 0.0005,
"loss": 0.8168,
"step": 5200
},
{
"epoch": 4.865733302731237,
"grad_norm": 0.9379121661186218,
"learning_rate": 0.0005,
"loss": 0.8256,
"step": 5300
},
{
"epoch": 4.957539591462015,
"grad_norm": 0.9303742051124573,
"learning_rate": 0.0005,
"loss": 0.8312,
"step": 5400
},
{
"epoch": 4.999770484278173,
"eval_accuracy": 0.7306957787481805,
"eval_loss": 0.8529494404792786,
"eval_runtime": 9.1302,
"eval_samples_per_second": 54.764,
"eval_steps_per_second": 6.9,
"step": 5446
},
{
"epoch": 5.049345880192793,
"grad_norm": 0.8555835485458374,
"learning_rate": 0.0005,
"loss": 0.7084,
"step": 5500
},
{
"epoch": 5.141152168923571,
"grad_norm": 0.99271559715271,
"learning_rate": 0.0005,
"loss": 0.6162,
"step": 5600
},
{
"epoch": 5.232958457654349,
"grad_norm": 0.9135051965713501,
"learning_rate": 0.0005,
"loss": 0.6335,
"step": 5700
},
{
"epoch": 5.324764746385127,
"grad_norm": 0.9890821576118469,
"learning_rate": 0.0005,
"loss": 0.6725,
"step": 5800
},
{
"epoch": 5.416571035115905,
"grad_norm": 0.941581130027771,
"learning_rate": 0.0005,
"loss": 0.677,
"step": 5900
},
{
"epoch": 5.508377323846684,
"grad_norm": 0.8984777927398682,
"learning_rate": 0.0005,
"loss": 0.704,
"step": 6000
},
{
"epoch": 5.600183612577462,
"grad_norm": 1.007198452949524,
"learning_rate": 0.0005,
"loss": 0.71,
"step": 6100
},
{
"epoch": 5.69198990130824,
"grad_norm": 1.0259089469909668,
"learning_rate": 0.0005,
"loss": 0.731,
"step": 6200
},
{
"epoch": 5.783796190039018,
"grad_norm": 1.0021580457687378,
"learning_rate": 0.0005,
"loss": 0.7425,
"step": 6300
},
{
"epoch": 5.875602478769796,
"grad_norm": 0.9952398538589478,
"learning_rate": 0.0005,
"loss": 0.7395,
"step": 6400
},
{
"epoch": 5.967408767500574,
"grad_norm": 1.257237195968628,
"learning_rate": 0.0005,
"loss": 0.7565,
"step": 6500
},
{
"epoch": 5.999540968556346,
"eval_accuracy": 0.7372139737991267,
"eval_loss": 0.7859806418418884,
"eval_runtime": 9.1037,
"eval_samples_per_second": 54.923,
"eval_steps_per_second": 6.92,
"step": 6535
},
{
"epoch": 6.059215056231352,
"grad_norm": 0.9014455676078796,
"learning_rate": 0.0005,
"loss": 0.6172,
"step": 6600
},
{
"epoch": 6.15102134496213,
"grad_norm": 0.8993759751319885,
"learning_rate": 0.0005,
"loss": 0.5578,
"step": 6700
},
{
"epoch": 6.242827633692908,
"grad_norm": 0.9781313538551331,
"learning_rate": 0.0005,
"loss": 0.5846,
"step": 6800
},
{
"epoch": 6.334633922423686,
"grad_norm": 0.9638663530349731,
"learning_rate": 0.0005,
"loss": 0.6045,
"step": 6900
},
{
"epoch": 6.426440211154464,
"grad_norm": 0.9154132604598999,
"learning_rate": 0.0005,
"loss": 0.6203,
"step": 7000
},
{
"epoch": 6.518246499885242,
"grad_norm": 1.0229175090789795,
"learning_rate": 0.0005,
"loss": 0.6421,
"step": 7100
},
{
"epoch": 6.61005278861602,
"grad_norm": 1.0271860361099243,
"learning_rate": 0.0005,
"loss": 0.6525,
"step": 7200
},
{
"epoch": 6.701859077346798,
"grad_norm": 0.981940746307373,
"learning_rate": 0.0005,
"loss": 0.6625,
"step": 7300
},
{
"epoch": 6.793665366077576,
"grad_norm": 1.0368313789367676,
"learning_rate": 0.0005,
"loss": 0.6754,
"step": 7400
},
{
"epoch": 6.885471654808354,
"grad_norm": 1.058945655822754,
"learning_rate": 0.0005,
"loss": 0.685,
"step": 7500
},
{
"epoch": 6.977277943539132,
"grad_norm": 0.9555875062942505,
"learning_rate": 0.0005,
"loss": 0.6985,
"step": 7600
},
{
"epoch": 6.999311452834519,
"eval_accuracy": 0.741475982532751,
"eval_loss": 0.7415383458137512,
"eval_runtime": 9.1507,
"eval_samples_per_second": 54.641,
"eval_steps_per_second": 6.885,
"step": 7624
},
{
"epoch": 7.06908423226991,
"grad_norm": 0.9311098456382751,
"learning_rate": 0.0005,
"loss": 0.5515,
"step": 7700
},
{
"epoch": 7.160890521000688,
"grad_norm": 0.9383052587509155,
"learning_rate": 0.0005,
"loss": 0.5276,
"step": 7800
},
{
"epoch": 7.252696809731467,
"grad_norm": 0.9980023503303528,
"learning_rate": 0.0005,
"loss": 0.5369,
"step": 7900
},
{
"epoch": 7.344503098462245,
"grad_norm": 0.9550295472145081,
"learning_rate": 0.0005,
"loss": 0.5676,
"step": 8000
},
{
"epoch": 7.436309387193023,
"grad_norm": 0.9989355206489563,
"learning_rate": 0.0005,
"loss": 0.5805,
"step": 8100
},
{
"epoch": 7.528115675923801,
"grad_norm": 1.0541164875030518,
"learning_rate": 0.0005,
"loss": 0.6059,
"step": 8200
},
{
"epoch": 7.619921964654579,
"grad_norm": 1.1819795370101929,
"learning_rate": 0.0005,
"loss": 0.6102,
"step": 8300
},
{
"epoch": 7.711728253385357,
"grad_norm": 1.0276963710784912,
"learning_rate": 0.0005,
"loss": 0.6196,
"step": 8400
},
{
"epoch": 7.803534542116135,
"grad_norm": 0.9429015517234802,
"learning_rate": 0.0005,
"loss": 0.6251,
"step": 8500
},
{
"epoch": 7.895340830846913,
"grad_norm": 1.1022661924362183,
"learning_rate": 0.0005,
"loss": 0.6355,
"step": 8600
},
{
"epoch": 7.987147119577691,
"grad_norm": 0.9936983585357666,
"learning_rate": 0.0005,
"loss": 0.6623,
"step": 8700
},
{
"epoch": 8.0,
"eval_accuracy": 0.7457467248908297,
"eval_loss": 0.7111318111419678,
"eval_runtime": 9.2592,
"eval_samples_per_second": 54.0,
"eval_steps_per_second": 6.804,
"step": 8714
},
{
"epoch": 8.07895340830847,
"grad_norm": 0.9469543099403381,
"learning_rate": 0.0005,
"loss": 0.5012,
"step": 8800
},
{
"epoch": 8.170759697039248,
"grad_norm": 1.0641309022903442,
"learning_rate": 0.0005,
"loss": 0.4854,
"step": 8900
},
{
"epoch": 8.262565985770026,
"grad_norm": 1.0741512775421143,
"learning_rate": 0.0005,
"loss": 0.5116,
"step": 9000
},
{
"epoch": 8.354372274500804,
"grad_norm": 1.038459300994873,
"learning_rate": 0.0005,
"loss": 0.532,
"step": 9100
},
{
"epoch": 8.446178563231582,
"grad_norm": 0.976216197013855,
"learning_rate": 0.0005,
"loss": 0.5478,
"step": 9200
},
{
"epoch": 8.53798485196236,
"grad_norm": 1.0488590002059937,
"learning_rate": 0.0005,
"loss": 0.5649,
"step": 9300
},
{
"epoch": 8.629791140693138,
"grad_norm": 1.0634474754333496,
"learning_rate": 0.0005,
"loss": 0.5873,
"step": 9400
},
{
"epoch": 8.721597429423916,
"grad_norm": 1.0678510665893555,
"learning_rate": 0.0005,
"loss": 0.5926,
"step": 9500
},
{
"epoch": 8.813403718154694,
"grad_norm": 1.0706602334976196,
"learning_rate": 0.0005,
"loss": 0.6089,
"step": 9600
},
{
"epoch": 8.905210006885472,
"grad_norm": 1.0411357879638672,
"learning_rate": 0.0005,
"loss": 0.6096,
"step": 9700
},
{
"epoch": 8.99701629561625,
"grad_norm": 1.0408014059066772,
"learning_rate": 0.0005,
"loss": 0.6281,
"step": 9800
},
{
"epoch": 8.999770484278173,
"eval_accuracy": 0.7481106259097525,
"eval_loss": 0.6775466203689575,
"eval_runtime": 9.131,
"eval_samples_per_second": 54.759,
"eval_steps_per_second": 6.9,
"step": 9803
},
{
"epoch": 9.088822584347028,
"grad_norm": 0.9965059757232666,
"learning_rate": 0.0005,
"loss": 0.4564,
"step": 9900
},
{
"epoch": 9.180628873077806,
"grad_norm": 0.9503998160362244,
"learning_rate": 0.0005,
"loss": 0.4683,
"step": 10000
},
{
"epoch": 9.272435161808584,
"grad_norm": 0.9788022041320801,
"learning_rate": 0.0005,
"loss": 0.4948,
"step": 10100
},
{
"epoch": 9.364241450539362,
"grad_norm": 0.97702556848526,
"learning_rate": 0.0005,
"loss": 0.5083,
"step": 10200
},
{
"epoch": 9.45604773927014,
"grad_norm": 1.0470614433288574,
"learning_rate": 0.0005,
"loss": 0.5269,
"step": 10300
},
{
"epoch": 9.547854028000918,
"grad_norm": 1.1560702323913574,
"learning_rate": 0.0005,
"loss": 0.5454,
"step": 10400
},
{
"epoch": 9.639660316731696,
"grad_norm": 1.065807819366455,
"learning_rate": 0.0005,
"loss": 0.5582,
"step": 10500
},
{
"epoch": 9.731466605462474,
"grad_norm": 1.0357719659805298,
"learning_rate": 0.0005,
"loss": 0.5673,
"step": 10600
},
{
"epoch": 9.823272894193252,
"grad_norm": 1.051635503768921,
"learning_rate": 0.0005,
"loss": 0.5808,
"step": 10700
},
{
"epoch": 9.91507918292403,
"grad_norm": 1.1748321056365967,
"learning_rate": 0.0005,
"loss": 0.5885,
"step": 10800
},
{
"epoch": 9.999540968556346,
"eval_accuracy": 0.7495545851528385,
"eval_loss": 0.668917715549469,
"eval_runtime": 9.0726,
"eval_samples_per_second": 55.111,
"eval_steps_per_second": 6.944,
"step": 10892
},
{
"epoch": 10.006885471654808,
"grad_norm": 1.015367031097412,
"learning_rate": 0.0005,
"loss": 0.5866,
"step": 10900
},
{
"epoch": 10.098691760385586,
"grad_norm": 1.0127650499343872,
"learning_rate": 0.0005,
"loss": 0.4348,
"step": 11000
},
{
"epoch": 10.190498049116364,
"grad_norm": 0.9805082082748413,
"learning_rate": 0.0005,
"loss": 0.4539,
"step": 11100
},
{
"epoch": 10.282304337847142,
"grad_norm": 1.007502555847168,
"learning_rate": 0.0005,
"loss": 0.4772,
"step": 11200
},
{
"epoch": 10.37411062657792,
"grad_norm": 1.1225751638412476,
"learning_rate": 0.0005,
"loss": 0.4984,
"step": 11300
},
{
"epoch": 10.465916915308698,
"grad_norm": 1.164787769317627,
"learning_rate": 0.0005,
"loss": 0.5082,
"step": 11400
},
{
"epoch": 10.557723204039476,
"grad_norm": 1.1108081340789795,
"learning_rate": 0.0005,
"loss": 0.5247,
"step": 11500
},
{
"epoch": 10.649529492770254,
"grad_norm": 1.0103906393051147,
"learning_rate": 0.0005,
"loss": 0.5299,
"step": 11600
},
{
"epoch": 10.741335781501032,
"grad_norm": 1.0926460027694702,
"learning_rate": 0.0005,
"loss": 0.553,
"step": 11700
},
{
"epoch": 10.83314207023181,
"grad_norm": 1.1170562505722046,
"learning_rate": 0.0005,
"loss": 0.5623,
"step": 11800
},
{
"epoch": 10.924948358962588,
"grad_norm": 0.9009856581687927,
"learning_rate": 0.0005,
"loss": 0.5721,
"step": 11900
},
{
"epoch": 10.999311452834519,
"eval_accuracy": 0.7530101892285298,
"eval_loss": 0.6363697648048401,
"eval_runtime": 9.1706,
"eval_samples_per_second": 54.522,
"eval_steps_per_second": 6.87,
"step": 11981
},
{
"epoch": 11.016754647693366,
"grad_norm": 0.8974876403808594,
"learning_rate": 0.0005,
"loss": 0.5493,
"step": 12000
},
{
"epoch": 11.108560936424144,
"grad_norm": 0.974591851234436,
"learning_rate": 0.0005,
"loss": 0.4203,
"step": 12100
},
{
"epoch": 11.200367225154924,
"grad_norm": 1.0000176429748535,
"learning_rate": 0.0005,
"loss": 0.4454,
"step": 12200
},
{
"epoch": 11.292173513885702,
"grad_norm": 1.0163604021072388,
"learning_rate": 0.0005,
"loss": 0.4579,
"step": 12300
},
{
"epoch": 11.38397980261648,
"grad_norm": 1.0325731039047241,
"learning_rate": 0.0005,
"loss": 0.4852,
"step": 12400
},
{
"epoch": 11.475786091347258,
"grad_norm": 1.093885064125061,
"learning_rate": 0.0005,
"loss": 0.4979,
"step": 12500
},
{
"epoch": 11.567592380078036,
"grad_norm": 1.0193454027175903,
"learning_rate": 0.0005,
"loss": 0.5076,
"step": 12600
},
{
"epoch": 11.659398668808814,
"grad_norm": 0.9466171264648438,
"learning_rate": 0.0005,
"loss": 0.529,
"step": 12700
},
{
"epoch": 11.751204957539592,
"grad_norm": 1.1748363971710205,
"learning_rate": 0.0005,
"loss": 0.5398,
"step": 12800
},
{
"epoch": 11.84301124627037,
"grad_norm": 1.0464725494384766,
"learning_rate": 0.0005,
"loss": 0.544,
"step": 12900
},
{
"epoch": 11.934817535001148,
"grad_norm": 1.1592164039611816,
"learning_rate": 0.0005,
"loss": 0.5504,
"step": 13000
},
{
"epoch": 12.0,
"eval_accuracy": 0.7540669577874818,
"eval_loss": 0.6318646669387817,
"eval_runtime": 9.1786,
"eval_samples_per_second": 54.474,
"eval_steps_per_second": 6.864,
"step": 13071
},
{
"epoch": 12.026623823731926,
"grad_norm": 0.935130774974823,
"learning_rate": 0.0005,
"loss": 0.5135,
"step": 13100
},
{
"epoch": 12.118430112462704,
"grad_norm": 1.0784605741500854,
"learning_rate": 0.0005,
"loss": 0.4078,
"step": 13200
},
{
"epoch": 12.210236401193482,
"grad_norm": 1.0809942483901978,
"learning_rate": 0.0005,
"loss": 0.4313,
"step": 13300
},
{
"epoch": 12.30204268992426,
"grad_norm": 1.1434837579727173,
"learning_rate": 0.0005,
"loss": 0.4576,
"step": 13400
},
{
"epoch": 12.393848978655038,
"grad_norm": 1.0381824970245361,
"learning_rate": 0.0005,
"loss": 0.4688,
"step": 13500
},
{
"epoch": 12.485655267385816,
"grad_norm": 1.0997393131256104,
"learning_rate": 0.0005,
"loss": 0.4838,
"step": 13600
},
{
"epoch": 12.577461556116594,
"grad_norm": 1.1423929929733276,
"learning_rate": 0.0005,
"loss": 0.5015,
"step": 13700
},
{
"epoch": 12.669267844847372,
"grad_norm": 1.0119812488555908,
"learning_rate": 0.0005,
"loss": 0.5099,
"step": 13800
},
{
"epoch": 12.76107413357815,
"grad_norm": 1.0487439632415771,
"learning_rate": 0.0005,
"loss": 0.5224,
"step": 13900
},
{
"epoch": 12.852880422308928,
"grad_norm": 1.1909295320510864,
"learning_rate": 0.0005,
"loss": 0.5362,
"step": 14000
},
{
"epoch": 12.944686711039706,
"grad_norm": 1.0172119140625,
"learning_rate": 0.0005,
"loss": 0.5406,
"step": 14100
},
{
"epoch": 12.999770484278173,
"eval_accuracy": 0.7549374090247453,
"eval_loss": 0.6185360550880432,
"eval_runtime": 9.1246,
"eval_samples_per_second": 54.797,
"eval_steps_per_second": 6.904,
"step": 14160
},
{
"epoch": 13.036492999770484,
"grad_norm": 0.8250077366828918,
"learning_rate": 0.0005,
"loss": 0.4829,
"step": 14200
},
{
"epoch": 13.128299288501262,
"grad_norm": 0.9478662014007568,
"learning_rate": 0.0005,
"loss": 0.4008,
"step": 14300
},
{
"epoch": 13.22010557723204,
"grad_norm": 0.9901746511459351,
"learning_rate": 0.0005,
"loss": 0.4221,
"step": 14400
},
{
"epoch": 13.311911865962818,
"grad_norm": 1.0091050863265991,
"learning_rate": 0.0005,
"loss": 0.4457,
"step": 14500
},
{
"epoch": 13.403718154693596,
"grad_norm": 1.1044436693191528,
"learning_rate": 0.0005,
"loss": 0.4612,
"step": 14600
},
{
"epoch": 13.495524443424374,
"grad_norm": 1.095940113067627,
"learning_rate": 0.0005,
"loss": 0.4808,
"step": 14700
},
{
"epoch": 13.587330732155152,
"grad_norm": 0.9664422869682312,
"learning_rate": 0.0005,
"loss": 0.4936,
"step": 14800
},
{
"epoch": 13.67913702088593,
"grad_norm": 0.9911946654319763,
"learning_rate": 0.0005,
"loss": 0.5029,
"step": 14900
},
{
"epoch": 13.770943309616708,
"grad_norm": 1.0246057510375977,
"learning_rate": 0.0005,
"loss": 0.5111,
"step": 15000
},
{
"epoch": 13.862749598347486,
"grad_norm": 1.094871163368225,
"learning_rate": 0.0005,
"loss": 0.5253,
"step": 15100
},
{
"epoch": 13.954555887078264,
"grad_norm": 1.1010475158691406,
"learning_rate": 0.0005,
"loss": 0.536,
"step": 15200
},
{
"epoch": 13.999540968556346,
"eval_accuracy": 0.7565123726346433,
"eval_loss": 0.61576908826828,
"eval_runtime": 9.1949,
"eval_samples_per_second": 54.378,
"eval_steps_per_second": 6.852,
"step": 15249
},
{
"epoch": 14.046362175809042,
"grad_norm": 0.9305674433708191,
"learning_rate": 0.0005,
"loss": 0.4565,
"step": 15300
},
{
"epoch": 14.13816846453982,
"grad_norm": 0.9582420587539673,
"learning_rate": 0.0005,
"loss": 0.3955,
"step": 15400
},
{
"epoch": 14.229974753270598,
"grad_norm": 1.0967265367507935,
"learning_rate": 0.0005,
"loss": 0.4182,
"step": 15500
},
{
"epoch": 14.321781042001376,
"grad_norm": 0.9896946549415588,
"learning_rate": 0.0005,
"loss": 0.4357,
"step": 15600
},
{
"epoch": 14.413587330732156,
"grad_norm": 1.0576809644699097,
"learning_rate": 0.0005,
"loss": 0.4549,
"step": 15700
},
{
"epoch": 14.505393619462934,
"grad_norm": 1.0679750442504883,
"learning_rate": 0.0005,
"loss": 0.4694,
"step": 15800
},
{
"epoch": 14.597199908193712,
"grad_norm": 1.0860179662704468,
"learning_rate": 0.0005,
"loss": 0.4857,
"step": 15900
},
{
"epoch": 14.68900619692449,
"grad_norm": 1.0913867950439453,
"learning_rate": 0.0005,
"loss": 0.4935,
"step": 16000
},
{
"epoch": 14.780812485655268,
"grad_norm": 1.0317550897598267,
"learning_rate": 0.0005,
"loss": 0.5047,
"step": 16100
},
{
"epoch": 14.872618774386046,
"grad_norm": 1.0847545862197876,
"learning_rate": 0.0005,
"loss": 0.5103,
"step": 16200
},
{
"epoch": 14.964425063116824,
"grad_norm": 1.0414918661117554,
"learning_rate": 0.0005,
"loss": 0.5205,
"step": 16300
},
{
"epoch": 14.999311452834519,
"eval_accuracy": 0.757839883551674,
"eval_loss": 0.5975773930549622,
"eval_runtime": 9.2897,
"eval_samples_per_second": 53.823,
"eval_steps_per_second": 6.782,
"step": 16338
},
{
"epoch": 15.056231351847602,
"grad_norm": 0.8716037273406982,
"learning_rate": 0.0005,
"loss": 0.4352,
"step": 16400
},
{
"epoch": 15.14803764057838,
"grad_norm": 0.9753520488739014,
"learning_rate": 0.0005,
"loss": 0.3923,
"step": 16500
},
{
"epoch": 15.239843929309158,
"grad_norm": 1.0317981243133545,
"learning_rate": 0.0005,
"loss": 0.4083,
"step": 16600
},
{
"epoch": 15.331650218039936,
"grad_norm": 1.0025997161865234,
"learning_rate": 0.0005,
"loss": 0.435,
"step": 16700
},
{
"epoch": 15.423456506770714,
"grad_norm": 1.0460282564163208,
"learning_rate": 0.0005,
"loss": 0.4479,
"step": 16800
},
{
"epoch": 15.515262795501492,
"grad_norm": 0.957950234413147,
"learning_rate": 0.0005,
"loss": 0.4613,
"step": 16900
},
{
"epoch": 15.60706908423227,
"grad_norm": 1.0739928483963013,
"learning_rate": 0.0005,
"loss": 0.4724,
"step": 17000
},
{
"epoch": 15.698875372963048,
"grad_norm": 1.161801815032959,
"learning_rate": 0.0005,
"loss": 0.4889,
"step": 17100
},
{
"epoch": 15.790681661693826,
"grad_norm": 1.1013407707214355,
"learning_rate": 0.0005,
"loss": 0.4911,
"step": 17200
},
{
"epoch": 15.882487950424604,
"grad_norm": 1.1333537101745605,
"learning_rate": 0.0005,
"loss": 0.5082,
"step": 17300
},
{
"epoch": 15.974294239155382,
"grad_norm": 1.1869903802871704,
"learning_rate": 0.0005,
"loss": 0.5175,
"step": 17400
},
{
"epoch": 16.0,
"eval_accuracy": 0.7590393013100437,
"eval_loss": 0.5922391414642334,
"eval_runtime": 9.1385,
"eval_samples_per_second": 54.714,
"eval_steps_per_second": 6.894,
"step": 17428
},
{
"epoch": 16.06610052788616,
"grad_norm": 1.069451093673706,
"learning_rate": 0.0005,
"loss": 0.4122,
"step": 17500
},
{
"epoch": 16.15790681661694,
"grad_norm": 0.9926499724388123,
"learning_rate": 0.0005,
"loss": 0.3909,
"step": 17600
},
{
"epoch": 16.249713105347716,
"grad_norm": 1.067694902420044,
"learning_rate": 0.0005,
"loss": 0.4075,
"step": 17700
},
{
"epoch": 16.341519394078496,
"grad_norm": 0.9541024565696716,
"learning_rate": 0.0005,
"loss": 0.4215,
"step": 17800
},
{
"epoch": 16.433325682809272,
"grad_norm": 1.0835317373275757,
"learning_rate": 0.0005,
"loss": 0.4405,
"step": 17900
},
{
"epoch": 16.525131971540052,
"grad_norm": 1.1008630990982056,
"learning_rate": 0.0005,
"loss": 0.4562,
"step": 18000
},
{
"epoch": 16.616938260270828,
"grad_norm": 1.1548954248428345,
"learning_rate": 0.0005,
"loss": 0.4694,
"step": 18100
},
{
"epoch": 16.708744549001608,
"grad_norm": 0.9584476947784424,
"learning_rate": 0.0005,
"loss": 0.481,
"step": 18200
},
{
"epoch": 16.800550837732384,
"grad_norm": 1.0248852968215942,
"learning_rate": 0.0005,
"loss": 0.4854,
"step": 18300
},
{
"epoch": 16.892357126463164,
"grad_norm": 1.1495635509490967,
"learning_rate": 0.0005,
"loss": 0.5012,
"step": 18400
},
{
"epoch": 16.98416341519394,
"grad_norm": 1.0732232332229614,
"learning_rate": 0.0005,
"loss": 0.5068,
"step": 18500
},
{
"epoch": 16.999770484278173,
"eval_accuracy": 0.759278020378457,
"eval_loss": 0.582272469997406,
"eval_runtime": 9.1627,
"eval_samples_per_second": 54.569,
"eval_steps_per_second": 6.876,
"step": 18517
},
{
"epoch": 17.07596970392472,
"grad_norm": 1.0219801664352417,
"learning_rate": 0.0005,
"loss": 0.3925,
"step": 18600
},
{
"epoch": 17.167775992655496,
"grad_norm": 0.8995177149772644,
"learning_rate": 0.0005,
"loss": 0.384,
"step": 18700
},
{
"epoch": 17.259582281386276,
"grad_norm": 1.0561939477920532,
"learning_rate": 0.0005,
"loss": 0.4041,
"step": 18800
},
{
"epoch": 17.351388570117052,
"grad_norm": 0.9551050662994385,
"learning_rate": 0.0005,
"loss": 0.4246,
"step": 18900
},
{
"epoch": 17.443194858847832,
"grad_norm": 1.0561895370483398,
"learning_rate": 0.0005,
"loss": 0.4382,
"step": 19000
},
{
"epoch": 17.53500114757861,
"grad_norm": 1.1141507625579834,
"learning_rate": 0.0005,
"loss": 0.4583,
"step": 19100
},
{
"epoch": 17.626807436309388,
"grad_norm": 1.141851544380188,
"learning_rate": 0.0005,
"loss": 0.4609,
"step": 19200
},
{
"epoch": 17.718613725040164,
"grad_norm": 1.1055680513381958,
"learning_rate": 0.0005,
"loss": 0.4668,
"step": 19300
},
{
"epoch": 17.810420013770944,
"grad_norm": 1.0966593027114868,
"learning_rate": 0.0005,
"loss": 0.4843,
"step": 19400
},
{
"epoch": 17.90222630250172,
"grad_norm": 1.1193351745605469,
"learning_rate": 0.0005,
"loss": 0.4933,
"step": 19500
},
{
"epoch": 17.9940325912325,
"grad_norm": 1.258386254310608,
"learning_rate": 0.0005,
"loss": 0.5023,
"step": 19600
},
{
"epoch": 17.999540968556346,
"eval_accuracy": 0.760660844250364,
"eval_loss": 0.5753906965255737,
"eval_runtime": 9.1571,
"eval_samples_per_second": 54.603,
"eval_steps_per_second": 6.88,
"step": 19606
},
{
"epoch": 18.085838879963276,
"grad_norm": 0.9986138939857483,
"learning_rate": 0.0005,
"loss": 0.3719,
"step": 19700
},
{
"epoch": 18.177645168694056,
"grad_norm": 0.9715769290924072,
"learning_rate": 0.0005,
"loss": 0.3825,
"step": 19800
},
{
"epoch": 18.269451457424832,
"grad_norm": 1.0640110969543457,
"learning_rate": 0.0005,
"loss": 0.4056,
"step": 19900
},
{
"epoch": 18.361257746155612,
"grad_norm": 1.0777605772018433,
"learning_rate": 0.0005,
"loss": 0.4172,
"step": 20000
},
{
"epoch": 18.45306403488639,
"grad_norm": 1.0681456327438354,
"learning_rate": 0.0005,
"loss": 0.4318,
"step": 20100
},
{
"epoch": 18.544870323617168,
"grad_norm": 1.1611887216567993,
"learning_rate": 0.0005,
"loss": 0.4431,
"step": 20200
},
{
"epoch": 18.636676612347944,
"grad_norm": 0.9606027603149414,
"learning_rate": 0.0005,
"loss": 0.4594,
"step": 20300
},
{
"epoch": 18.728482901078724,
"grad_norm": 1.0940121412277222,
"learning_rate": 0.0005,
"loss": 0.4692,
"step": 20400
},
{
"epoch": 18.8202891898095,
"grad_norm": 1.219136357307434,
"learning_rate": 0.0005,
"loss": 0.4734,
"step": 20500
},
{
"epoch": 18.91209547854028,
"grad_norm": 1.058712363243103,
"learning_rate": 0.0005,
"loss": 0.4848,
"step": 20600
},
{
"epoch": 18.99931145283452,
"eval_accuracy": 0.7608238719068413,
"eval_loss": 0.5781408548355103,
"eval_runtime": 9.1618,
"eval_samples_per_second": 54.574,
"eval_steps_per_second": 6.876,
"step": 20695
},
{
"epoch": 19.003901767271056,
"grad_norm": 0.853932797908783,
"learning_rate": 0.0005,
"loss": 0.4916,
"step": 20700
},
{
"epoch": 19.095708056001836,
"grad_norm": 1.0715103149414062,
"learning_rate": 0.0005,
"loss": 0.362,
"step": 20800
},
{
"epoch": 19.187514344732612,
"grad_norm": 0.9922650456428528,
"learning_rate": 0.0005,
"loss": 0.3766,
"step": 20900
},
{
"epoch": 19.279320633463392,
"grad_norm": 1.0666494369506836,
"learning_rate": 0.0005,
"loss": 0.3999,
"step": 21000
},
{
"epoch": 19.371126922194172,
"grad_norm": 1.026061773300171,
"learning_rate": 0.0005,
"loss": 0.4188,
"step": 21100
},
{
"epoch": 19.462933210924948,
"grad_norm": 1.0832582712173462,
"learning_rate": 0.0005,
"loss": 0.432,
"step": 21200
},
{
"epoch": 19.554739499655728,
"grad_norm": 1.109492540359497,
"learning_rate": 0.0005,
"loss": 0.436,
"step": 21300
},
{
"epoch": 19.646545788386504,
"grad_norm": 1.0509155988693237,
"learning_rate": 0.0005,
"loss": 0.4564,
"step": 21400
},
{
"epoch": 19.738352077117284,
"grad_norm": 1.1579090356826782,
"learning_rate": 0.0005,
"loss": 0.4622,
"step": 21500
},
{
"epoch": 19.83015836584806,
"grad_norm": 1.0362952947616577,
"learning_rate": 0.0005,
"loss": 0.4728,
"step": 21600
},
{
"epoch": 19.92196465457884,
"grad_norm": 1.06185781955719,
"learning_rate": 0.0005,
"loss": 0.4767,
"step": 21700
},
{
"epoch": 19.995409685563462,
"eval_accuracy": 0.7611848617176128,
"eval_loss": 0.5757241249084473,
"eval_runtime": 9.1042,
"eval_samples_per_second": 54.92,
"eval_steps_per_second": 6.92,
"step": 21780
},
{
"epoch": 19.995409685563462,
"step": 21780,
"total_flos": 2.2953223726028554e+18,
"train_loss": 0.6653460843285034,
"train_runtime": 50472.5887,
"train_samples_per_second": 13.811,
"train_steps_per_second": 0.432
}
],
"logging_steps": 100,
"max_steps": 21780,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"total_flos": 2.2953223726028554e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}