zephyr-7b-sft-qlora / trainer_state.json
Mark-Arcee's picture
Model save
d5be272 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9980582524271845,
"eval_steps": 500,
"global_step": 257,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 5.7377284333132515e-05,
"grad_norm": 0.427734375,
"learning_rate": 1.1474469305794606e-07,
"loss": 1.3587,
"step": 1
},
{
"epoch": 0.00028688642166566254,
"grad_norm": 0.58203125,
"learning_rate": 5.737234652897304e-07,
"loss": 1.099,
"step": 5
},
{
"epoch": 0.0005737728433313251,
"grad_norm": 0.349609375,
"learning_rate": 1.1474469305794607e-06,
"loss": 1.1269,
"step": 10
},
{
"epoch": 0.0008606592649969877,
"grad_norm": 0.416015625,
"learning_rate": 1.721170395869191e-06,
"loss": 1.1759,
"step": 15
},
{
"epoch": 0.0011475456866626502,
"grad_norm": 0.337890625,
"learning_rate": 2.2948938611589215e-06,
"loss": 1.1985,
"step": 20
},
{
"epoch": 0.001434432108328313,
"grad_norm": 0.359375,
"learning_rate": 2.868617326448652e-06,
"loss": 1.1267,
"step": 25
},
{
"epoch": 0.0017213185299939755,
"grad_norm": 0.30859375,
"learning_rate": 3.442340791738382e-06,
"loss": 1.062,
"step": 30
},
{
"epoch": 0.002008204951659638,
"grad_norm": 0.3515625,
"learning_rate": 4.016064257028113e-06,
"loss": 1.1175,
"step": 35
},
{
"epoch": 0.0022950913733253003,
"grad_norm": 0.330078125,
"learning_rate": 4.589787722317843e-06,
"loss": 1.1421,
"step": 40
},
{
"epoch": 0.002581977794990963,
"grad_norm": 0.32421875,
"learning_rate": 5.163511187607573e-06,
"loss": 1.1013,
"step": 45
},
{
"epoch": 0.002868864216656626,
"grad_norm": 0.33984375,
"learning_rate": 5.737234652897304e-06,
"loss": 1.138,
"step": 50
},
{
"epoch": 0.0031557506383222884,
"grad_norm": 0.330078125,
"learning_rate": 6.310958118187034e-06,
"loss": 1.128,
"step": 55
},
{
"epoch": 0.003442637059987951,
"grad_norm": 0.318359375,
"learning_rate": 6.884681583476764e-06,
"loss": 1.1071,
"step": 60
},
{
"epoch": 0.0037295234816536135,
"grad_norm": 0.322265625,
"learning_rate": 7.4584050487664955e-06,
"loss": 1.1442,
"step": 65
},
{
"epoch": 0.004016409903319276,
"grad_norm": 0.326171875,
"learning_rate": 8.032128514056226e-06,
"loss": 1.1746,
"step": 70
},
{
"epoch": 0.004303296324984938,
"grad_norm": 0.2890625,
"learning_rate": 8.605851979345956e-06,
"loss": 1.0971,
"step": 75
},
{
"epoch": 0.004590182746650601,
"grad_norm": 0.306640625,
"learning_rate": 9.179575444635686e-06,
"loss": 1.1355,
"step": 80
},
{
"epoch": 0.004877069168316263,
"grad_norm": 0.28125,
"learning_rate": 9.753298909925416e-06,
"loss": 1.0908,
"step": 85
},
{
"epoch": 0.005163955589981926,
"grad_norm": 0.30078125,
"learning_rate": 1.0327022375215146e-05,
"loss": 1.0386,
"step": 90
},
{
"epoch": 0.005450842011647588,
"grad_norm": 0.294921875,
"learning_rate": 1.0900745840504876e-05,
"loss": 1.1107,
"step": 95
},
{
"epoch": 0.005737728433313252,
"grad_norm": 0.30078125,
"learning_rate": 1.1474469305794608e-05,
"loss": 1.071,
"step": 100
},
{
"epoch": 0.006024614854978914,
"grad_norm": 0.380859375,
"learning_rate": 1.2048192771084338e-05,
"loss": 1.1582,
"step": 105
},
{
"epoch": 0.006311501276644577,
"grad_norm": 0.40625,
"learning_rate": 1.2621916236374069e-05,
"loss": 1.0057,
"step": 110
},
{
"epoch": 0.006598387698310239,
"grad_norm": 0.25390625,
"learning_rate": 1.3195639701663797e-05,
"loss": 0.973,
"step": 115
},
{
"epoch": 0.006885274119975902,
"grad_norm": 0.291015625,
"learning_rate": 1.3769363166953527e-05,
"loss": 0.9738,
"step": 120
},
{
"epoch": 0.007172160541641564,
"grad_norm": 0.2734375,
"learning_rate": 1.434308663224326e-05,
"loss": 1.0439,
"step": 125
},
{
"epoch": 0.007459046963307227,
"grad_norm": 0.30078125,
"learning_rate": 1.4916810097532991e-05,
"loss": 1.0216,
"step": 130
},
{
"epoch": 0.0077459333849728895,
"grad_norm": 0.236328125,
"learning_rate": 1.549053356282272e-05,
"loss": 0.9903,
"step": 135
},
{
"epoch": 0.008032819806638551,
"grad_norm": 0.24609375,
"learning_rate": 1.606425702811245e-05,
"loss": 0.9875,
"step": 140
},
{
"epoch": 0.008319706228304214,
"grad_norm": 0.25,
"learning_rate": 1.663798049340218e-05,
"loss": 0.9877,
"step": 145
},
{
"epoch": 0.008606592649969876,
"grad_norm": 0.2890625,
"learning_rate": 1.721170395869191e-05,
"loss": 0.9866,
"step": 150
},
{
"epoch": 0.008893479071635539,
"grad_norm": 0.2578125,
"learning_rate": 1.7785427423981642e-05,
"loss": 0.9855,
"step": 155
},
{
"epoch": 0.009180365493301201,
"grad_norm": 0.2890625,
"learning_rate": 1.8359150889271372e-05,
"loss": 1.0016,
"step": 160
},
{
"epoch": 0.009467251914966864,
"grad_norm": 0.28125,
"learning_rate": 1.8932874354561102e-05,
"loss": 1.025,
"step": 165
},
{
"epoch": 0.009754138336632526,
"grad_norm": 0.28125,
"learning_rate": 1.9506597819850832e-05,
"loss": 1.015,
"step": 170
},
{
"epoch": 0.010041024758298189,
"grad_norm": 0.27734375,
"learning_rate": 2.0080321285140562e-05,
"loss": 0.967,
"step": 175
},
{
"epoch": 0.010327911179963851,
"grad_norm": 0.24609375,
"learning_rate": 2.0654044750430293e-05,
"loss": 1.0354,
"step": 180
},
{
"epoch": 0.010614797601629514,
"grad_norm": 0.259765625,
"learning_rate": 2.1227768215720023e-05,
"loss": 1.04,
"step": 185
},
{
"epoch": 0.010901684023295177,
"grad_norm": 0.275390625,
"learning_rate": 2.1801491681009753e-05,
"loss": 1.05,
"step": 190
},
{
"epoch": 0.01118857044496084,
"grad_norm": 0.263671875,
"learning_rate": 2.2375215146299486e-05,
"loss": 0.9229,
"step": 195
},
{
"epoch": 0.011475456866626503,
"grad_norm": 0.267578125,
"learning_rate": 2.2948938611589217e-05,
"loss": 1.0781,
"step": 200
},
{
"epoch": 0.7961165048543689,
"grad_norm": 0.90234375,
"learning_rate": 2.3981489016705205e-05,
"loss": 0.9635,
"step": 205
},
{
"epoch": 0.8155339805825242,
"grad_norm": 0.5234375,
"learning_rate": 1.9742585080206755e-05,
"loss": 0.8979,
"step": 210
},
{
"epoch": 0.8349514563106796,
"grad_norm": 0.45703125,
"learning_rate": 1.587464671688187e-05,
"loss": 0.828,
"step": 215
},
{
"epoch": 0.8543689320388349,
"grad_norm": 0.47265625,
"learning_rate": 1.2395552299381741e-05,
"loss": 0.7969,
"step": 220
},
{
"epoch": 0.8737864077669902,
"grad_norm": 0.470703125,
"learning_rate": 9.321382887082563e-06,
"loss": 0.7724,
"step": 225
},
{
"epoch": 0.8932038834951457,
"grad_norm": 0.447265625,
"learning_rate": 6.666347896263325e-06,
"loss": 0.7529,
"step": 230
},
{
"epoch": 0.912621359223301,
"grad_norm": 0.443359375,
"learning_rate": 4.442719421385922e-06,
"loss": 0.751,
"step": 235
},
{
"epoch": 0.9320388349514563,
"grad_norm": 0.408203125,
"learning_rate": 2.6607755110584887e-06,
"loss": 0.7345,
"step": 240
},
{
"epoch": 0.9514563106796117,
"grad_norm": 0.400390625,
"learning_rate": 1.3287526608711131e-06,
"loss": 0.7324,
"step": 245
},
{
"epoch": 0.970873786407767,
"grad_norm": 0.369140625,
"learning_rate": 4.5280774269154115e-07,
"loss": 0.7255,
"step": 250
},
{
"epoch": 0.9902912621359223,
"grad_norm": 0.38671875,
"learning_rate": 3.6989546391297256e-08,
"loss": 0.7229,
"step": 255
},
{
"epoch": 0.9980582524271845,
"eval_loss": 0.7271392941474915,
"eval_runtime": 30.5484,
"eval_samples_per_second": 3.601,
"eval_steps_per_second": 0.458,
"step": 257
},
{
"epoch": 0.9980582524271845,
"step": 257,
"total_flos": 1.807042878719918e+17,
"train_loss": 0.17443326521476418,
"train_runtime": 473.1182,
"train_samples_per_second": 4.354,
"train_steps_per_second": 0.543
}
],
"logging_steps": 5,
"max_steps": 257,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.807042878719918e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}