vinhnq29's picture
Upload folder using huggingface_hub
f6fc557 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.4644549763033177,
"eval_steps": 6,
"global_step": 130,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018957345971563982,
"grad_norm": 1.21875,
"learning_rate": 2e-05,
"loss": 2.0442,
"step": 1
},
{
"epoch": 0.018957345971563982,
"eval_loss": 2.073391914367676,
"eval_runtime": 6.8335,
"eval_samples_per_second": 52.096,
"eval_steps_per_second": 13.024,
"step": 1
},
{
"epoch": 0.037914691943127965,
"grad_norm": 1.40625,
"learning_rate": 4e-05,
"loss": 2.1817,
"step": 2
},
{
"epoch": 0.05687203791469194,
"grad_norm": 1.3125,
"learning_rate": 6e-05,
"loss": 2.0671,
"step": 3
},
{
"epoch": 0.07582938388625593,
"grad_norm": 1.5,
"learning_rate": 8e-05,
"loss": 1.9289,
"step": 4
},
{
"epoch": 0.0947867298578199,
"grad_norm": 1.3984375,
"learning_rate": 0.0001,
"loss": 1.7705,
"step": 5
},
{
"epoch": 0.11374407582938388,
"grad_norm": 1.1171875,
"learning_rate": 0.00012,
"loss": 1.449,
"step": 6
},
{
"epoch": 0.11374407582938388,
"eval_loss": 1.2773768901824951,
"eval_runtime": 7.1055,
"eval_samples_per_second": 50.102,
"eval_steps_per_second": 12.526,
"step": 6
},
{
"epoch": 0.13270142180094788,
"grad_norm": 1.125,
"learning_rate": 0.00014,
"loss": 1.3112,
"step": 7
},
{
"epoch": 0.15165876777251186,
"grad_norm": 1.4140625,
"learning_rate": 0.00016,
"loss": 1.2196,
"step": 8
},
{
"epoch": 0.17061611374407584,
"grad_norm": 0.87890625,
"learning_rate": 0.00018,
"loss": 1.1166,
"step": 9
},
{
"epoch": 0.1895734597156398,
"grad_norm": 0.8125,
"learning_rate": 0.0002,
"loss": 1.0836,
"step": 10
},
{
"epoch": 0.20853080568720378,
"grad_norm": 0.4609375,
"learning_rate": 0.00019997685019798912,
"loss": 0.898,
"step": 11
},
{
"epoch": 0.22748815165876776,
"grad_norm": 0.376953125,
"learning_rate": 0.00019990741151022301,
"loss": 0.8548,
"step": 12
},
{
"epoch": 0.22748815165876776,
"eval_loss": 0.9005841612815857,
"eval_runtime": 6.5385,
"eval_samples_per_second": 54.447,
"eval_steps_per_second": 13.612,
"step": 12
},
{
"epoch": 0.24644549763033174,
"grad_norm": 0.384765625,
"learning_rate": 0.00019979171608653924,
"loss": 0.8835,
"step": 13
},
{
"epoch": 0.26540284360189575,
"grad_norm": 0.326171875,
"learning_rate": 0.00019962981749346078,
"loss": 0.8374,
"step": 14
},
{
"epoch": 0.2843601895734597,
"grad_norm": 0.32421875,
"learning_rate": 0.0001994217906893952,
"loss": 0.8658,
"step": 15
},
{
"epoch": 0.3033175355450237,
"grad_norm": 0.326171875,
"learning_rate": 0.000199167731989929,
"loss": 0.8232,
"step": 16
},
{
"epoch": 0.3222748815165877,
"grad_norm": 0.234375,
"learning_rate": 0.00019886775902323405,
"loss": 0.7541,
"step": 17
},
{
"epoch": 0.3412322274881517,
"grad_norm": 0.2470703125,
"learning_rate": 0.00019852201067560606,
"loss": 0.8561,
"step": 18
},
{
"epoch": 0.3412322274881517,
"eval_loss": 0.7924264669418335,
"eval_runtime": 7.7645,
"eval_samples_per_second": 45.85,
"eval_steps_per_second": 11.462,
"step": 18
},
{
"epoch": 0.36018957345971564,
"grad_norm": 0.267578125,
"learning_rate": 0.00019813064702716094,
"loss": 0.7909,
"step": 19
},
{
"epoch": 0.3791469194312796,
"grad_norm": 0.3046875,
"learning_rate": 0.0001976938492777182,
"loss": 0.7657,
"step": 20
},
{
"epoch": 0.3981042654028436,
"grad_norm": 1.3984375,
"learning_rate": 0.00019721181966290613,
"loss": 0.7214,
"step": 21
},
{
"epoch": 0.41706161137440756,
"grad_norm": 0.33203125,
"learning_rate": 0.00019668478136052774,
"loss": 0.7511,
"step": 22
},
{
"epoch": 0.43601895734597157,
"grad_norm": 0.35546875,
"learning_rate": 0.0001961129783872301,
"loss": 0.7159,
"step": 23
},
{
"epoch": 0.4549763033175355,
"grad_norm": 0.279296875,
"learning_rate": 0.00019549667548552556,
"loss": 0.744,
"step": 24
},
{
"epoch": 0.4549763033175355,
"eval_loss": 0.717645525932312,
"eval_runtime": 6.6339,
"eval_samples_per_second": 53.664,
"eval_steps_per_second": 13.416,
"step": 24
},
{
"epoch": 0.47393364928909953,
"grad_norm": 0.259765625,
"learning_rate": 0.00019483615800121716,
"loss": 0.6753,
"step": 25
},
{
"epoch": 0.4928909952606635,
"grad_norm": 0.26953125,
"learning_rate": 0.00019413173175128473,
"loss": 0.7068,
"step": 26
},
{
"epoch": 0.5118483412322274,
"grad_norm": 0.263671875,
"learning_rate": 0.0001933837228822925,
"loss": 0.6545,
"step": 27
},
{
"epoch": 0.5308056872037915,
"grad_norm": 0.29296875,
"learning_rate": 0.000192592477719385,
"loss": 0.7141,
"step": 28
},
{
"epoch": 0.5497630331753555,
"grad_norm": 0.259765625,
"learning_rate": 0.00019175836260593938,
"loss": 0.6713,
"step": 29
},
{
"epoch": 0.5687203791469194,
"grad_norm": 0.310546875,
"learning_rate": 0.0001908817637339503,
"loss": 0.6752,
"step": 30
},
{
"epoch": 0.5687203791469194,
"eval_loss": 0.6603197455406189,
"eval_runtime": 7.3719,
"eval_samples_per_second": 48.291,
"eval_steps_per_second": 12.073,
"step": 30
},
{
"epoch": 0.5876777251184834,
"grad_norm": 0.326171875,
"learning_rate": 0.00018996308696522433,
"loss": 0.6389,
"step": 31
},
{
"epoch": 0.6066350710900474,
"grad_norm": 0.3046875,
"learning_rate": 0.00018900275764346768,
"loss": 0.6711,
"step": 32
},
{
"epoch": 0.6255924170616114,
"grad_norm": 0.322265625,
"learning_rate": 0.00018800122039735358,
"loss": 0.6097,
"step": 33
},
{
"epoch": 0.6445497630331753,
"grad_norm": 0.322265625,
"learning_rate": 0.0001869589389346611,
"loss": 0.6204,
"step": 34
},
{
"epoch": 0.6635071090047393,
"grad_norm": 0.34765625,
"learning_rate": 0.00018587639582758031,
"loss": 0.6288,
"step": 35
},
{
"epoch": 0.6824644549763034,
"grad_norm": 0.34375,
"learning_rate": 0.00018475409228928312,
"loss": 0.5908,
"step": 36
},
{
"epoch": 0.6824644549763034,
"eval_loss": 0.6117041707038879,
"eval_runtime": 7.5585,
"eval_samples_per_second": 47.099,
"eval_steps_per_second": 11.775,
"step": 36
},
{
"epoch": 0.7014218009478673,
"grad_norm": 0.369140625,
"learning_rate": 0.0001835925479418637,
"loss": 0.5851,
"step": 37
},
{
"epoch": 0.7203791469194313,
"grad_norm": 0.40625,
"learning_rate": 0.00018239230057575542,
"loss": 0.5967,
"step": 38
},
{
"epoch": 0.7393364928909952,
"grad_norm": 0.396484375,
"learning_rate": 0.0001811539059007361,
"loss": 0.5626,
"step": 39
},
{
"epoch": 0.7582938388625592,
"grad_norm": 0.37109375,
"learning_rate": 0.00017987793728863651,
"loss": 0.5691,
"step": 40
},
{
"epoch": 0.7772511848341233,
"grad_norm": 0.421875,
"learning_rate": 0.00017856498550787144,
"loss": 0.5913,
"step": 41
},
{
"epoch": 0.7962085308056872,
"grad_norm": 0.40625,
"learning_rate": 0.00017721565844991643,
"loss": 0.5229,
"step": 42
},
{
"epoch": 0.7962085308056872,
"eval_loss": 0.5701669454574585,
"eval_runtime": 7.5799,
"eval_samples_per_second": 46.966,
"eval_steps_per_second": 11.742,
"step": 42
},
{
"epoch": 0.8151658767772512,
"grad_norm": 0.423828125,
"learning_rate": 0.00017583058084785625,
"loss": 0.5542,
"step": 43
},
{
"epoch": 0.8341232227488151,
"grad_norm": 0.466796875,
"learning_rate": 0.00017441039398713608,
"loss": 0.5131,
"step": 44
},
{
"epoch": 0.8530805687203792,
"grad_norm": 0.41015625,
"learning_rate": 0.00017295575540864877,
"loss": 0.5479,
"step": 45
},
{
"epoch": 0.8720379146919431,
"grad_norm": 0.416015625,
"learning_rate": 0.00017146733860429612,
"loss": 0.5456,
"step": 46
},
{
"epoch": 0.8909952606635071,
"grad_norm": 0.400390625,
"learning_rate": 0.0001699458327051647,
"loss": 0.5475,
"step": 47
},
{
"epoch": 0.909952606635071,
"grad_norm": 0.419921875,
"learning_rate": 0.00016839194216246108,
"loss": 0.558,
"step": 48
},
{
"epoch": 0.909952606635071,
"eval_loss": 0.528121829032898,
"eval_runtime": 7.5358,
"eval_samples_per_second": 47.241,
"eval_steps_per_second": 11.81,
"step": 48
},
{
"epoch": 0.9289099526066351,
"grad_norm": 0.443359375,
"learning_rate": 0.00016680638642135336,
"loss": 0.4805,
"step": 49
},
{
"epoch": 0.9478672985781991,
"grad_norm": 0.435546875,
"learning_rate": 0.00016518989958787126,
"loss": 0.4929,
"step": 50
},
{
"epoch": 0.966824644549763,
"grad_norm": 0.423828125,
"learning_rate": 0.00016354323008901776,
"loss": 0.5415,
"step": 51
},
{
"epoch": 0.985781990521327,
"grad_norm": 0.453125,
"learning_rate": 0.00016186714032625035,
"loss": 0.5038,
"step": 52
},
{
"epoch": 1.004739336492891,
"grad_norm": 1.2734375,
"learning_rate": 0.00016016240632249224,
"loss": 0.4915,
"step": 53
},
{
"epoch": 1.0236966824644549,
"grad_norm": 0.5234375,
"learning_rate": 0.00015842981736283686,
"loss": 0.4343,
"step": 54
},
{
"epoch": 1.0236966824644549,
"eval_loss": 0.4752185344696045,
"eval_runtime": 7.8454,
"eval_samples_per_second": 45.377,
"eval_steps_per_second": 11.344,
"step": 54
},
{
"epoch": 1.042654028436019,
"grad_norm": 0.57421875,
"learning_rate": 0.00015667017562911176,
"loss": 0.4282,
"step": 55
},
{
"epoch": 1.061611374407583,
"grad_norm": 0.51953125,
"learning_rate": 0.00015488429582847192,
"loss": 0.396,
"step": 56
},
{
"epoch": 1.080568720379147,
"grad_norm": 0.53515625,
"learning_rate": 0.00015307300481619333,
"loss": 0.4685,
"step": 57
},
{
"epoch": 1.099526066350711,
"grad_norm": 0.59375,
"learning_rate": 0.0001512371412128424,
"loss": 0.428,
"step": 58
},
{
"epoch": 1.1184834123222749,
"grad_norm": 0.5234375,
"learning_rate": 0.00014937755501599772,
"loss": 0.4002,
"step": 59
},
{
"epoch": 1.1374407582938388,
"grad_norm": 0.498046875,
"learning_rate": 0.00014749510720670506,
"loss": 0.4039,
"step": 60
},
{
"epoch": 1.1374407582938388,
"eval_loss": 0.4151807725429535,
"eval_runtime": 7.7375,
"eval_samples_per_second": 46.01,
"eval_steps_per_second": 11.502,
"step": 60
},
{
"epoch": 1.1563981042654028,
"grad_norm": 0.4140625,
"learning_rate": 0.00014559066935084588,
"loss": 0.3903,
"step": 61
},
{
"epoch": 1.1753554502369667,
"grad_norm": 0.2890625,
"learning_rate": 0.0001436651231956064,
"loss": 0.3936,
"step": 62
},
{
"epoch": 1.1943127962085307,
"grad_norm": 0.314453125,
"learning_rate": 0.00014171936026123168,
"loss": 0.3759,
"step": 63
},
{
"epoch": 1.2132701421800949,
"grad_norm": 0.8046875,
"learning_rate": 0.0001397542814282556,
"loss": 0.3949,
"step": 64
},
{
"epoch": 1.2322274881516588,
"grad_norm": 2.8125,
"learning_rate": 0.0001377707965203965,
"loss": 0.3979,
"step": 65
},
{
"epoch": 1.2511848341232228,
"grad_norm": 0.6015625,
"learning_rate": 0.0001357698238833126,
"loss": 0.3744,
"step": 66
},
{
"epoch": 1.2511848341232228,
"eval_loss": 0.422529935836792,
"eval_runtime": 7.4489,
"eval_samples_per_second": 47.792,
"eval_steps_per_second": 11.948,
"step": 66
},
{
"epoch": 1.2701421800947867,
"grad_norm": 2.421875,
"learning_rate": 0.00013375228995941133,
"loss": 0.3851,
"step": 67
},
{
"epoch": 1.2890995260663507,
"grad_norm": 0.515625,
"learning_rate": 0.00013171912885891063,
"loss": 0.3524,
"step": 68
},
{
"epoch": 1.3080568720379147,
"grad_norm": 0.421875,
"learning_rate": 0.00012967128192734902,
"loss": 0.3688,
"step": 69
},
{
"epoch": 1.3270142180094786,
"grad_norm": 0.2421875,
"learning_rate": 0.00012760969730974694,
"loss": 0.3338,
"step": 70
},
{
"epoch": 1.3459715639810428,
"grad_norm": 0.212890625,
"learning_rate": 0.0001255353295116187,
"loss": 0.3206,
"step": 71
},
{
"epoch": 1.3649289099526065,
"grad_norm": 0.2109375,
"learning_rate": 0.00012344913895704097,
"loss": 0.3313,
"step": 72
},
{
"epoch": 1.3649289099526065,
"eval_loss": 0.3852485120296478,
"eval_runtime": 7.69,
"eval_samples_per_second": 46.294,
"eval_steps_per_second": 11.573,
"step": 72
},
{
"epoch": 1.3838862559241707,
"grad_norm": 0.23046875,
"learning_rate": 0.00012135209154397962,
"loss": 0.3514,
"step": 73
},
{
"epoch": 1.4028436018957346,
"grad_norm": 0.22265625,
"learning_rate": 0.000119245158197083,
"loss": 0.3608,
"step": 74
},
{
"epoch": 1.4218009478672986,
"grad_norm": 0.2060546875,
"learning_rate": 0.00011712931441814776,
"loss": 0.3361,
"step": 75
},
{
"epoch": 1.4407582938388626,
"grad_norm": 0.1884765625,
"learning_rate": 0.00011500553983446527,
"loss": 0.3095,
"step": 76
},
{
"epoch": 1.4597156398104265,
"grad_norm": 0.2060546875,
"learning_rate": 0.0001128748177452581,
"loss": 0.318,
"step": 77
},
{
"epoch": 1.4786729857819905,
"grad_norm": 0.2265625,
"learning_rate": 0.00011073813466641632,
"loss": 0.374,
"step": 78
},
{
"epoch": 1.4786729857819905,
"eval_loss": 0.3739808201789856,
"eval_runtime": 7.5558,
"eval_samples_per_second": 47.116,
"eval_steps_per_second": 11.779,
"step": 78
},
{
"epoch": 1.4976303317535544,
"grad_norm": 0.216796875,
"learning_rate": 0.00010859647987374467,
"loss": 0.3174,
"step": 79
},
{
"epoch": 1.5165876777251186,
"grad_norm": 0.251953125,
"learning_rate": 0.00010645084494493165,
"loss": 0.3281,
"step": 80
},
{
"epoch": 1.5355450236966823,
"grad_norm": 0.2236328125,
"learning_rate": 0.00010430222330045304,
"loss": 0.3235,
"step": 81
},
{
"epoch": 1.5545023696682465,
"grad_norm": 0.2236328125,
"learning_rate": 0.00010215160974362223,
"loss": 0.3064,
"step": 82
},
{
"epoch": 1.5734597156398105,
"grad_norm": 0.25390625,
"learning_rate": 0.0001,
"loss": 0.3278,
"step": 83
},
{
"epoch": 1.5924170616113744,
"grad_norm": 0.2373046875,
"learning_rate": 9.784839025637778e-05,
"loss": 0.3246,
"step": 84
},
{
"epoch": 1.5924170616113744,
"eval_loss": 0.3657410442829132,
"eval_runtime": 7.41,
"eval_samples_per_second": 48.043,
"eval_steps_per_second": 12.011,
"step": 84
},
{
"epoch": 1.6113744075829384,
"grad_norm": 0.23046875,
"learning_rate": 9.569777669954694e-05,
"loss": 0.3257,
"step": 85
},
{
"epoch": 1.6303317535545023,
"grad_norm": 0.19921875,
"learning_rate": 9.354915505506839e-05,
"loss": 0.3247,
"step": 86
},
{
"epoch": 1.6492890995260665,
"grad_norm": 0.234375,
"learning_rate": 9.140352012625537e-05,
"loss": 0.3372,
"step": 87
},
{
"epoch": 1.6682464454976302,
"grad_norm": 0.2216796875,
"learning_rate": 8.92618653335837e-05,
"loss": 0.2964,
"step": 88
},
{
"epoch": 1.6872037914691944,
"grad_norm": 0.2255859375,
"learning_rate": 8.712518225474191e-05,
"loss": 0.3604,
"step": 89
},
{
"epoch": 1.7061611374407581,
"grad_norm": 0.2197265625,
"learning_rate": 8.499446016553474e-05,
"loss": 0.3392,
"step": 90
},
{
"epoch": 1.7061611374407581,
"eval_loss": 0.3591402769088745,
"eval_runtime": 7.6868,
"eval_samples_per_second": 46.313,
"eval_steps_per_second": 11.578,
"step": 90
},
{
"epoch": 1.7251184834123223,
"grad_norm": 0.2197265625,
"learning_rate": 8.287068558185225e-05,
"loss": 0.3367,
"step": 91
},
{
"epoch": 1.7440758293838863,
"grad_norm": 0.2138671875,
"learning_rate": 8.075484180291701e-05,
"loss": 0.2959,
"step": 92
},
{
"epoch": 1.7630331753554502,
"grad_norm": 0.2119140625,
"learning_rate": 7.864790845602039e-05,
"loss": 0.3239,
"step": 93
},
{
"epoch": 1.7819905213270142,
"grad_norm": 0.2060546875,
"learning_rate": 7.655086104295904e-05,
"loss": 0.2984,
"step": 94
},
{
"epoch": 1.8009478672985781,
"grad_norm": 0.236328125,
"learning_rate": 7.446467048838131e-05,
"loss": 0.3346,
"step": 95
},
{
"epoch": 1.8199052132701423,
"grad_norm": 0.255859375,
"learning_rate": 7.239030269025311e-05,
"loss": 0.3309,
"step": 96
},
{
"epoch": 1.8199052132701423,
"eval_loss": 0.3504635691642761,
"eval_runtime": 8.1061,
"eval_samples_per_second": 43.918,
"eval_steps_per_second": 10.979,
"step": 96
},
{
"epoch": 1.838862559241706,
"grad_norm": 0.2333984375,
"learning_rate": 7.032871807265096e-05,
"loss": 0.3313,
"step": 97
},
{
"epoch": 1.8578199052132702,
"grad_norm": 0.267578125,
"learning_rate": 6.82808711410894e-05,
"loss": 0.32,
"step": 98
},
{
"epoch": 1.876777251184834,
"grad_norm": 0.267578125,
"learning_rate": 6.624771004058868e-05,
"loss": 0.4074,
"step": 99
},
{
"epoch": 1.8957345971563981,
"grad_norm": 0.2041015625,
"learning_rate": 6.423017611668745e-05,
"loss": 0.2994,
"step": 100
},
{
"epoch": 1.914691943127962,
"grad_norm": 0.2216796875,
"learning_rate": 6.22292034796035e-05,
"loss": 0.3063,
"step": 101
},
{
"epoch": 1.933649289099526,
"grad_norm": 0.2138671875,
"learning_rate": 6.024571857174443e-05,
"loss": 0.3621,
"step": 102
},
{
"epoch": 1.933649289099526,
"eval_loss": 0.343742311000824,
"eval_runtime": 7.3443,
"eval_samples_per_second": 48.473,
"eval_steps_per_second": 12.118,
"step": 102
},
{
"epoch": 1.95260663507109,
"grad_norm": 0.265625,
"learning_rate": 5.828063973876834e-05,
"loss": 0.302,
"step": 103
},
{
"epoch": 1.971563981042654,
"grad_norm": 0.232421875,
"learning_rate": 5.633487680439361e-05,
"loss": 0.3248,
"step": 104
},
{
"epoch": 1.9905213270142181,
"grad_norm": 0.201171875,
"learning_rate": 5.440933064915414e-05,
"loss": 0.2853,
"step": 105
},
{
"epoch": 2.009478672985782,
"grad_norm": 0.2109375,
"learning_rate": 5.2504892793295e-05,
"loss": 0.2788,
"step": 106
},
{
"epoch": 2.028436018957346,
"grad_norm": 0.2177734375,
"learning_rate": 5.062244498400228e-05,
"loss": 0.2791,
"step": 107
},
{
"epoch": 2.0473933649289098,
"grad_norm": 0.208984375,
"learning_rate": 4.876285878715764e-05,
"loss": 0.2819,
"step": 108
},
{
"epoch": 2.0473933649289098,
"eval_loss": 0.3415816128253937,
"eval_runtime": 7.5308,
"eval_samples_per_second": 47.273,
"eval_steps_per_second": 11.818,
"step": 108
},
{
"epoch": 2.066350710900474,
"grad_norm": 0.224609375,
"learning_rate": 4.6926995183806644e-05,
"loss": 0.2547,
"step": 109
},
{
"epoch": 2.085308056872038,
"grad_norm": 0.22265625,
"learning_rate": 4.5115704171528105e-05,
"loss": 0.2911,
"step": 110
},
{
"epoch": 2.104265402843602,
"grad_norm": 0.220703125,
"learning_rate": 4.332982437088825e-05,
"loss": 0.2756,
"step": 111
},
{
"epoch": 2.123222748815166,
"grad_norm": 0.267578125,
"learning_rate": 4.1570182637163155e-05,
"loss": 0.2825,
"step": 112
},
{
"epoch": 2.1421800947867298,
"grad_norm": 0.26171875,
"learning_rate": 3.9837593677507726e-05,
"loss": 0.2358,
"step": 113
},
{
"epoch": 2.161137440758294,
"grad_norm": 0.25390625,
"learning_rate": 3.813285967374969e-05,
"loss": 0.2672,
"step": 114
},
{
"epoch": 2.161137440758294,
"eval_loss": 0.3413563072681427,
"eval_runtime": 7.5937,
"eval_samples_per_second": 46.881,
"eval_steps_per_second": 11.72,
"step": 114
},
{
"epoch": 2.1800947867298577,
"grad_norm": 0.2255859375,
"learning_rate": 3.645676991098227e-05,
"loss": 0.2753,
"step": 115
},
{
"epoch": 2.199052132701422,
"grad_norm": 0.23046875,
"learning_rate": 3.4810100412128747e-05,
"loss": 0.2588,
"step": 116
},
{
"epoch": 2.2180094786729856,
"grad_norm": 0.2578125,
"learning_rate": 3.319361357864663e-05,
"loss": 0.2964,
"step": 117
},
{
"epoch": 2.2369668246445498,
"grad_norm": 0.2734375,
"learning_rate": 3.160805783753897e-05,
"loss": 0.2438,
"step": 118
},
{
"epoch": 2.2559241706161135,
"grad_norm": 0.2451171875,
"learning_rate": 3.005416729483531e-05,
"loss": 0.262,
"step": 119
},
{
"epoch": 2.2748815165876777,
"grad_norm": 0.2109375,
"learning_rate": 2.853266139570391e-05,
"loss": 0.2284,
"step": 120
},
{
"epoch": 2.2748815165876777,
"eval_loss": 0.33752232789993286,
"eval_runtime": 8.0472,
"eval_samples_per_second": 44.239,
"eval_steps_per_second": 11.06,
"step": 120
},
{
"epoch": 2.293838862559242,
"grad_norm": 0.197265625,
"learning_rate": 2.7044244591351232e-05,
"loss": 0.2287,
"step": 121
},
{
"epoch": 2.3127962085308056,
"grad_norm": 0.2333984375,
"learning_rate": 2.5589606012863963e-05,
"loss": 0.247,
"step": 122
},
{
"epoch": 2.3317535545023698,
"grad_norm": 0.240234375,
"learning_rate": 2.4169419152143768e-05,
"loss": 0.2797,
"step": 123
},
{
"epoch": 2.3507109004739335,
"grad_norm": 0.2333984375,
"learning_rate": 2.2784341550083576e-05,
"loss": 0.2577,
"step": 124
},
{
"epoch": 2.3696682464454977,
"grad_norm": 0.23828125,
"learning_rate": 2.1435014492128547e-05,
"loss": 0.2792,
"step": 125
},
{
"epoch": 2.3886255924170614,
"grad_norm": 0.2412109375,
"learning_rate": 2.0122062711363532e-05,
"loss": 0.2836,
"step": 126
},
{
"epoch": 2.3886255924170614,
"eval_loss": 0.3353247344493866,
"eval_runtime": 6.6458,
"eval_samples_per_second": 53.567,
"eval_steps_per_second": 13.392,
"step": 126
},
{
"epoch": 2.4075829383886256,
"grad_norm": 0.2333984375,
"learning_rate": 1.8846094099263912e-05,
"loss": 0.2694,
"step": 127
},
{
"epoch": 2.4265402843601898,
"grad_norm": 0.216796875,
"learning_rate": 1.7607699424244585e-05,
"loss": 0.2666,
"step": 128
},
{
"epoch": 2.4454976303317535,
"grad_norm": 0.232421875,
"learning_rate": 1.6407452058136296e-05,
"loss": 0.2747,
"step": 129
},
{
"epoch": 2.4644549763033177,
"grad_norm": 0.2470703125,
"learning_rate": 1.5245907710716911e-05,
"loss": 0.3057,
"step": 130
}
],
"logging_steps": 1,
"max_steps": 156,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 26,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.922942328719278e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}