Qwen1.5-4B-Chat-qlora-gozaru / trainer_state.json
smorce's picture
Upload folder using huggingface_hub
408517c verified
raw
history blame contribute delete
No virus
10.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9984,
"eval_steps": 500,
"global_step": 312,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 0.8396336436271667,
"learning_rate": 0.00019987329060020616,
"loss": 2.8923,
"step": 5
},
{
"epoch": 0.03,
"grad_norm": 0.7428969740867615,
"learning_rate": 0.00019949348350626456,
"loss": 2.0944,
"step": 10
},
{
"epoch": 0.05,
"grad_norm": 0.5937550067901611,
"learning_rate": 0.00019886154122075343,
"loss": 2.0323,
"step": 15
},
{
"epoch": 0.06,
"grad_norm": 0.4460415542125702,
"learning_rate": 0.00019797906520422677,
"loss": 1.9436,
"step": 20
},
{
"epoch": 0.08,
"grad_norm": 0.4390580952167511,
"learning_rate": 0.00019684829181681234,
"loss": 2.0292,
"step": 25
},
{
"epoch": 0.1,
"grad_norm": 0.43469828367233276,
"learning_rate": 0.00019547208665085457,
"loss": 1.9654,
"step": 30
},
{
"epoch": 0.11,
"grad_norm": 0.48699721693992615,
"learning_rate": 0.0001938539372689649,
"loss": 1.9041,
"step": 35
},
{
"epoch": 0.13,
"grad_norm": 0.5205313563346863,
"learning_rate": 0.00019199794436588243,
"loss": 1.8992,
"step": 40
},
{
"epoch": 0.14,
"grad_norm": 0.5224802494049072,
"learning_rate": 0.00018990881137654258,
"loss": 1.8393,
"step": 45
},
{
"epoch": 0.16,
"grad_norm": 0.5004997849464417,
"learning_rate": 0.0001875918325566888,
"loss": 1.615,
"step": 50
},
{
"epoch": 0.18,
"grad_norm": 0.5422343611717224,
"learning_rate": 0.00018505287956623297,
"loss": 1.9873,
"step": 55
},
{
"epoch": 0.19,
"grad_norm": 0.47483357787132263,
"learning_rate": 0.00018229838658936564,
"loss": 1.9264,
"step": 60
},
{
"epoch": 0.21,
"grad_norm": 0.4191422164440155,
"learning_rate": 0.00017933533402912354,
"loss": 1.828,
"step": 65
},
{
"epoch": 0.22,
"grad_norm": 0.41457584500312805,
"learning_rate": 0.00017617123081773591,
"loss": 1.8083,
"step": 70
},
{
"epoch": 0.24,
"grad_norm": 0.524440348148346,
"learning_rate": 0.00017281409538757883,
"loss": 1.8268,
"step": 75
},
{
"epoch": 0.26,
"grad_norm": 0.5396173000335693,
"learning_rate": 0.00016927243535095997,
"loss": 1.7587,
"step": 80
},
{
"epoch": 0.27,
"grad_norm": 0.5633752942085266,
"learning_rate": 0.0001655552259402295,
"loss": 1.7939,
"step": 85
},
{
"epoch": 0.29,
"grad_norm": 0.5931205153465271,
"learning_rate": 0.00016167188726285434,
"loss": 1.848,
"step": 90
},
{
"epoch": 0.3,
"grad_norm": 0.677586019039154,
"learning_rate": 0.00015763226042909455,
"loss": 1.7407,
"step": 95
},
{
"epoch": 0.32,
"grad_norm": 0.5012004971504211,
"learning_rate": 0.0001534465826127801,
"loss": 1.8559,
"step": 100
},
{
"epoch": 0.34,
"grad_norm": 0.5996416807174683,
"learning_rate": 0.00014912546110838775,
"loss": 1.8363,
"step": 105
},
{
"epoch": 0.35,
"grad_norm": 0.5592181086540222,
"learning_rate": 0.00014467984645016258,
"loss": 1.6798,
"step": 110
},
{
"epoch": 0.37,
"grad_norm": 0.6498942971229553,
"learning_rate": 0.00014012100466140578,
"loss": 1.6868,
"step": 115
},
{
"epoch": 0.38,
"grad_norm": 0.5339984893798828,
"learning_rate": 0.00013546048870425356,
"loss": 1.8213,
"step": 120
},
{
"epoch": 0.4,
"grad_norm": 0.5681377649307251,
"learning_rate": 0.00013071010920229909,
"loss": 1.7561,
"step": 125
},
{
"epoch": 0.42,
"grad_norm": 0.5253719687461853,
"learning_rate": 0.00012588190451025207,
"loss": 1.7834,
"step": 130
},
{
"epoch": 0.43,
"grad_norm": 0.5420334935188293,
"learning_rate": 0.00012098811020648475,
"loss": 1.9342,
"step": 135
},
{
"epoch": 0.45,
"grad_norm": 0.6371601819992065,
"learning_rate": 0.00011604112808577603,
"loss": 1.8081,
"step": 140
},
{
"epoch": 0.46,
"grad_norm": 0.5578604340553284,
"learning_rate": 0.000111053494730832,
"loss": 1.7449,
"step": 145
},
{
"epoch": 0.48,
"grad_norm": 0.768509030342102,
"learning_rate": 0.00010603784974222861,
"loss": 1.8202,
"step": 150
},
{
"epoch": 0.5,
"grad_norm": 0.627295970916748,
"learning_rate": 0.00010100690370728755,
"loss": 1.7587,
"step": 155
},
{
"epoch": 0.51,
"grad_norm": 0.6519939303398132,
"learning_rate": 9.597340598905852e-05,
"loss": 1.8827,
"step": 160
},
{
"epoch": 0.53,
"grad_norm": 0.6148818135261536,
"learning_rate": 9.095011241703623e-05,
"loss": 1.8022,
"step": 165
},
{
"epoch": 0.54,
"grad_norm": 0.5009266138076782,
"learning_rate": 8.594975296149076e-05,
"loss": 1.7515,
"step": 170
},
{
"epoch": 0.56,
"grad_norm": 0.5753130316734314,
"learning_rate": 8.098499947332934e-05,
"loss": 1.7027,
"step": 175
},
{
"epoch": 0.58,
"grad_norm": 0.5501881241798401,
"learning_rate": 7.606843357124426e-05,
"loss": 1.7445,
"step": 180
},
{
"epoch": 0.59,
"grad_norm": 0.6279743909835815,
"learning_rate": 7.121251475752539e-05,
"loss": 1.7094,
"step": 185
},
{
"epoch": 0.61,
"grad_norm": 0.5455606579780579,
"learning_rate": 6.642954884333955e-05,
"loss": 1.7047,
"step": 190
},
{
"epoch": 0.62,
"grad_norm": 0.5445593595504761,
"learning_rate": 6.173165676349103e-05,
"loss": 1.6086,
"step": 195
},
{
"epoch": 0.64,
"grad_norm": 0.6656392812728882,
"learning_rate": 5.713074385969457e-05,
"loss": 1.7098,
"step": 200
},
{
"epoch": 0.66,
"grad_norm": 0.5549783706665039,
"learning_rate": 5.263846971020108e-05,
"loss": 1.7766,
"step": 205
},
{
"epoch": 0.67,
"grad_norm": 0.594687819480896,
"learning_rate": 4.826621858223431e-05,
"loss": 1.9863,
"step": 210
},
{
"epoch": 0.69,
"grad_norm": 0.5940021276473999,
"learning_rate": 4.40250705821178e-05,
"loss": 1.7957,
"step": 215
},
{
"epoch": 0.7,
"grad_norm": 0.6612168550491333,
"learning_rate": 3.99257735762021e-05,
"loss": 1.7848,
"step": 220
},
{
"epoch": 0.72,
"grad_norm": 0.5535335540771484,
"learning_rate": 3.597871595375121e-05,
"loss": 1.8688,
"step": 225
},
{
"epoch": 0.74,
"grad_norm": 0.607822597026825,
"learning_rate": 3.219390030081091e-05,
"loss": 1.6451,
"step": 230
},
{
"epoch": 0.75,
"grad_norm": 0.6583452820777893,
"learning_rate": 2.858091805177554e-05,
"loss": 1.8795,
"step": 235
},
{
"epoch": 0.77,
"grad_norm": 0.5893087983131409,
"learning_rate": 2.514892518288988e-05,
"loss": 1.7631,
"step": 240
},
{
"epoch": 0.78,
"grad_norm": 0.5828584432601929,
"learning_rate": 2.1906619009284257e-05,
"loss": 1.7843,
"step": 245
},
{
"epoch": 0.8,
"grad_norm": 0.5105574727058411,
"learning_rate": 1.8862216144342692e-05,
"loss": 1.7854,
"step": 250
},
{
"epoch": 0.82,
"grad_norm": 0.6286787390708923,
"learning_rate": 1.6023431677260214e-05,
"loss": 1.5676,
"step": 255
},
{
"epoch": 0.83,
"grad_norm": 0.5752671957015991,
"learning_rate": 1.339745962155613e-05,
"loss": 1.6083,
"step": 260
},
{
"epoch": 0.85,
"grad_norm": 0.5867213606834412,
"learning_rate": 1.0990954684091558e-05,
"loss": 1.8291,
"step": 265
},
{
"epoch": 0.86,
"grad_norm": 0.6025935411453247,
"learning_rate": 8.810015400790994e-06,
"loss": 1.7606,
"step": 270
},
{
"epoch": 0.88,
"grad_norm": 0.5295992493629456,
"learning_rate": 6.860168681805945e-06,
"loss": 1.7076,
"step": 275
},
{
"epoch": 0.9,
"grad_norm": 0.6831830143928528,
"learning_rate": 5.146355805285452e-06,
"loss": 1.706,
"step": 280
},
{
"epoch": 0.91,
"grad_norm": 0.6414029002189636,
"learning_rate": 3.6729198952483724e-06,
"loss": 1.7685,
"step": 285
},
{
"epoch": 0.93,
"grad_norm": 0.5466156601905823,
"learning_rate": 2.4435949152906145e-06,
"loss": 1.7422,
"step": 290
},
{
"epoch": 0.94,
"grad_norm": 0.7220910787582397,
"learning_rate": 1.4614962060194304e-06,
"loss": 1.8109,
"step": 295
},
{
"epoch": 0.96,
"grad_norm": 0.6633021235466003,
"learning_rate": 7.291125901946027e-07,
"loss": 1.7055,
"step": 300
},
{
"epoch": 0.98,
"grad_norm": 0.5576484799385071,
"learning_rate": 2.4830006558373973e-07,
"loss": 1.73,
"step": 305
},
{
"epoch": 0.99,
"grad_norm": 0.6037314534187317,
"learning_rate": 2.0277101514987184e-08,
"loss": 1.9092,
"step": 310
},
{
"epoch": 1.0,
"step": 312,
"total_flos": 1.4358009139789824e+17,
"train_loss": 1.8196016863370552,
"train_runtime": 2685.2386,
"train_samples_per_second": 3.724,
"train_steps_per_second": 0.116
}
],
"logging_steps": 5,
"max_steps": 312,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 1.4358009139789824e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}