zephyr-7b-sft-qlora / trainer_state.json
terry69's picture
Model save
e1a89f2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 325,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.26868827253641747,
"learning_rate": 6.060606060606061e-06,
"loss": 0.7782,
"step": 1
},
{
"epoch": 0.02,
"grad_norm": 0.21382632169384555,
"learning_rate": 3.0303030303030306e-05,
"loss": 0.7798,
"step": 5
},
{
"epoch": 0.03,
"grad_norm": 0.22445606867737244,
"learning_rate": 6.060606060606061e-05,
"loss": 0.7851,
"step": 10
},
{
"epoch": 0.05,
"grad_norm": 0.2072105558411773,
"learning_rate": 9.090909090909092e-05,
"loss": 0.7855,
"step": 15
},
{
"epoch": 0.06,
"grad_norm": 0.17995218448427217,
"learning_rate": 0.00012121212121212122,
"loss": 0.792,
"step": 20
},
{
"epoch": 0.08,
"grad_norm": 0.226508952573423,
"learning_rate": 0.00015151515151515152,
"loss": 0.7382,
"step": 25
},
{
"epoch": 0.09,
"grad_norm": 0.18958014075029098,
"learning_rate": 0.00018181818181818183,
"loss": 0.7419,
"step": 30
},
{
"epoch": 0.11,
"grad_norm": 0.19949889749656097,
"learning_rate": 0.00019997685019798912,
"loss": 0.7642,
"step": 35
},
{
"epoch": 0.12,
"grad_norm": 0.19567934772580736,
"learning_rate": 0.0001997165380022878,
"loss": 0.7607,
"step": 40
},
{
"epoch": 0.14,
"grad_norm": 0.18969776374927835,
"learning_rate": 0.000199167731989929,
"loss": 0.795,
"step": 45
},
{
"epoch": 0.15,
"grad_norm": 0.1942706799722158,
"learning_rate": 0.0001983320199330545,
"loss": 0.764,
"step": 50
},
{
"epoch": 0.17,
"grad_norm": 0.19464988909031225,
"learning_rate": 0.00019721181966290613,
"loss": 0.7836,
"step": 55
},
{
"epoch": 0.18,
"grad_norm": 0.1849352970071158,
"learning_rate": 0.00019581037207470382,
"loss": 0.757,
"step": 60
},
{
"epoch": 0.2,
"grad_norm": 0.17976128059849958,
"learning_rate": 0.00019413173175128473,
"loss": 0.7616,
"step": 65
},
{
"epoch": 0.22,
"grad_norm": 0.1934718609207969,
"learning_rate": 0.00019218075523263104,
"loss": 0.7538,
"step": 70
},
{
"epoch": 0.23,
"grad_norm": 0.16955946612078765,
"learning_rate": 0.00018996308696522433,
"loss": 0.7617,
"step": 75
},
{
"epoch": 0.25,
"grad_norm": 0.19860710500579018,
"learning_rate": 0.00018748514297187648,
"loss": 0.7995,
"step": 80
},
{
"epoch": 0.26,
"grad_norm": 0.17834039868116905,
"learning_rate": 0.00018475409228928312,
"loss": 0.7539,
"step": 85
},
{
"epoch": 0.28,
"grad_norm": 0.1889302979453787,
"learning_rate": 0.00018177783622700327,
"loss": 0.7754,
"step": 90
},
{
"epoch": 0.29,
"grad_norm": 0.17928302611917235,
"learning_rate": 0.00017856498550787144,
"loss": 0.7924,
"step": 95
},
{
"epoch": 0.31,
"grad_norm": 0.18965987325021239,
"learning_rate": 0.00017512483535597867,
"loss": 0.7547,
"step": 100
},
{
"epoch": 0.32,
"grad_norm": 0.16948470382160882,
"learning_rate": 0.00017146733860429612,
"loss": 0.7755,
"step": 105
},
{
"epoch": 0.34,
"grad_norm": 0.20373009441973453,
"learning_rate": 0.0001676030768997445,
"loss": 0.7452,
"step": 110
},
{
"epoch": 0.35,
"grad_norm": 0.18030063668475196,
"learning_rate": 0.00016354323008901776,
"loss": 0.7499,
"step": 115
},
{
"epoch": 0.37,
"grad_norm": 0.16132188005262432,
"learning_rate": 0.00015929954387373103,
"loss": 0.7344,
"step": 120
},
{
"epoch": 0.38,
"grad_norm": 0.2014655387761585,
"learning_rate": 0.00015488429582847192,
"loss": 0.7909,
"step": 125
},
{
"epoch": 0.4,
"grad_norm": 0.19701253442033323,
"learning_rate": 0.00015031025988006936,
"loss": 0.7846,
"step": 130
},
{
"epoch": 0.42,
"grad_norm": 0.18599715688382074,
"learning_rate": 0.00014559066935084588,
"loss": 0.7884,
"step": 135
},
{
"epoch": 0.43,
"grad_norm": 0.15916898081839567,
"learning_rate": 0.00014073917867277557,
"loss": 0.7478,
"step": 140
},
{
"epoch": 0.45,
"grad_norm": 0.1693289847957747,
"learning_rate": 0.0001357698238833126,
"loss": 0.758,
"step": 145
},
{
"epoch": 0.46,
"grad_norm": 0.1724296952005482,
"learning_rate": 0.000130696982017182,
"loss": 0.7349,
"step": 150
},
{
"epoch": 0.48,
"grad_norm": 0.20365543265357713,
"learning_rate": 0.0001255353295116187,
"loss": 0.7581,
"step": 155
},
{
"epoch": 0.49,
"grad_norm": 0.17925645984310104,
"learning_rate": 0.00012029979974539234,
"loss": 0.7428,
"step": 160
},
{
"epoch": 0.51,
"grad_norm": 0.1849261223570645,
"learning_rate": 0.00011500553983446527,
"loss": 0.7915,
"step": 165
},
{
"epoch": 0.52,
"grad_norm": 0.17147514694965366,
"learning_rate": 0.00010966786680927874,
"loss": 0.7282,
"step": 170
},
{
"epoch": 0.54,
"grad_norm": 0.19718678657625408,
"learning_rate": 0.00010430222330045304,
"loss": 0.7807,
"step": 175
},
{
"epoch": 0.55,
"grad_norm": 0.1697756132509757,
"learning_rate": 9.892413286110886e-05,
"loss": 0.7522,
"step": 180
},
{
"epoch": 0.57,
"grad_norm": 0.1745926097752596,
"learning_rate": 9.354915505506839e-05,
"loss": 0.7656,
"step": 185
},
{
"epoch": 0.58,
"grad_norm": 0.17991554784663008,
"learning_rate": 8.81928404408726e-05,
"loss": 0.7425,
"step": 190
},
{
"epoch": 0.6,
"grad_norm": 0.1638975774298517,
"learning_rate": 8.287068558185225e-05,
"loss": 0.785,
"step": 195
},
{
"epoch": 0.62,
"grad_norm": 0.1721376274882642,
"learning_rate": 7.759808821241406e-05,
"loss": 0.747,
"step": 200
},
{
"epoch": 0.63,
"grad_norm": 0.16645909181505114,
"learning_rate": 7.239030269025311e-05,
"loss": 0.7436,
"step": 205
},
{
"epoch": 0.65,
"grad_norm": 0.17668278165163676,
"learning_rate": 6.726239586337408e-05,
"loss": 0.76,
"step": 210
},
{
"epoch": 0.66,
"grad_norm": 0.1592821063866204,
"learning_rate": 6.22292034796035e-05,
"loss": 0.7532,
"step": 215
},
{
"epoch": 0.68,
"grad_norm": 0.18211269157973645,
"learning_rate": 5.730528726470792e-05,
"loss": 0.7684,
"step": 220
},
{
"epoch": 0.69,
"grad_norm": 0.1611844005218168,
"learning_rate": 5.2504892793295e-05,
"loss": 0.7432,
"step": 225
},
{
"epoch": 0.71,
"grad_norm": 0.1770721524356504,
"learning_rate": 4.7841908274384616e-05,
"loss": 0.7719,
"step": 230
},
{
"epoch": 0.72,
"grad_norm": 0.17458138059176753,
"learning_rate": 4.332982437088825e-05,
"loss": 0.7153,
"step": 235
},
{
"epoch": 0.74,
"grad_norm": 0.1764916869298734,
"learning_rate": 3.898169516924398e-05,
"loss": 0.7509,
"step": 240
},
{
"epoch": 0.75,
"grad_norm": 0.17474182112370204,
"learning_rate": 3.4810100412128747e-05,
"loss": 0.7562,
"step": 245
},
{
"epoch": 0.77,
"grad_norm": 0.1741562771880736,
"learning_rate": 3.0827109103512643e-05,
"loss": 0.7669,
"step": 250
},
{
"epoch": 0.78,
"grad_norm": 0.17549490206623847,
"learning_rate": 2.7044244591351232e-05,
"loss": 0.7592,
"step": 255
},
{
"epoch": 0.8,
"grad_norm": 0.16399003172635285,
"learning_rate": 2.3472451228937253e-05,
"loss": 0.7396,
"step": 260
},
{
"epoch": 0.82,
"grad_norm": 0.1713214529866966,
"learning_rate": 2.0122062711363532e-05,
"loss": 0.7766,
"step": 265
},
{
"epoch": 0.83,
"grad_norm": 0.17574824064745212,
"learning_rate": 1.7002772178705716e-05,
"loss": 0.7329,
"step": 270
},
{
"epoch": 0.85,
"grad_norm": 0.16897840325162736,
"learning_rate": 1.4123604172419713e-05,
"loss": 0.7723,
"step": 275
},
{
"epoch": 0.86,
"grad_norm": 0.18239238197655358,
"learning_rate": 1.149288852608743e-05,
"loss": 0.7558,
"step": 280
},
{
"epoch": 0.88,
"grad_norm": 0.16446806582557563,
"learning_rate": 9.118236266049707e-06,
"loss": 0.731,
"step": 285
},
{
"epoch": 0.89,
"grad_norm": 0.17024389183471494,
"learning_rate": 7.0065175916482095e-06,
"loss": 0.7599,
"step": 290
},
{
"epoch": 0.91,
"grad_norm": 0.15264356146830035,
"learning_rate": 5.163841998782837e-06,
"loss": 0.7644,
"step": 295
},
{
"epoch": 0.92,
"grad_norm": 0.16324609453084554,
"learning_rate": 3.595540604290437e-06,
"loss": 0.7581,
"step": 300
},
{
"epoch": 0.94,
"grad_norm": 0.1710138375738759,
"learning_rate": 2.30615072228183e-06,
"loss": 0.7332,
"step": 305
},
{
"epoch": 0.95,
"grad_norm": 0.1611044475082952,
"learning_rate": 1.2994027370611173e-06,
"loss": 0.7627,
"step": 310
},
{
"epoch": 0.97,
"grad_norm": 0.1514646487097857,
"learning_rate": 5.782093106048159e-07,
"loss": 0.7366,
"step": 315
},
{
"epoch": 0.98,
"grad_norm": 0.17261842858442877,
"learning_rate": 1.446569558255395e-07,
"loss": 0.7436,
"step": 320
},
{
"epoch": 1.0,
"grad_norm": 0.16882560969081586,
"learning_rate": 0.0,
"loss": 0.7491,
"step": 325
},
{
"epoch": 1.0,
"eval_loss": 0.7773829698562622,
"eval_runtime": 46.5077,
"eval_samples_per_second": 4.967,
"eval_steps_per_second": 0.172,
"step": 325
},
{
"epoch": 1.0,
"step": 325,
"total_flos": 4962652747988992.0,
"train_loss": 0.7601131853690514,
"train_runtime": 11993.6222,
"train_samples_per_second": 1.733,
"train_steps_per_second": 0.027
}
],
"logging_steps": 5,
"max_steps": 325,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 4962652747988992.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}