llama3-8b-lora-coding-11-v1 / trainer_state.json
chansung's picture
Model save
3361fa2 verified
raw
history blame
6.04 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 137,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0072992700729927005,
"grad_norm": 0.8994209170341492,
"learning_rate": 1.4285714285714285e-05,
"loss": 1.7629,
"step": 1
},
{
"epoch": 0.0364963503649635,
"grad_norm": 0.7910200953483582,
"learning_rate": 7.142857142857143e-05,
"loss": 1.7329,
"step": 5
},
{
"epoch": 0.072992700729927,
"grad_norm": 0.47016409039497375,
"learning_rate": 0.00014285714285714287,
"loss": 1.6931,
"step": 10
},
{
"epoch": 0.10948905109489052,
"grad_norm": 0.6418158411979675,
"learning_rate": 0.00019996738360808565,
"loss": 1.5991,
"step": 15
},
{
"epoch": 0.145985401459854,
"grad_norm": 0.5754856467247009,
"learning_rate": 0.00019882804237803488,
"loss": 1.4449,
"step": 20
},
{
"epoch": 0.18248175182481752,
"grad_norm": 0.5607455968856812,
"learning_rate": 0.00019607909582962477,
"loss": 1.3519,
"step": 25
},
{
"epoch": 0.21897810218978103,
"grad_norm": 0.31966158747673035,
"learning_rate": 0.0001917653158603628,
"loss": 1.2901,
"step": 30
},
{
"epoch": 0.25547445255474455,
"grad_norm": 0.21750766038894653,
"learning_rate": 0.00018595696069872013,
"loss": 1.2469,
"step": 35
},
{
"epoch": 0.291970802919708,
"grad_norm": 0.21939656138420105,
"learning_rate": 0.00017874863061334657,
"loss": 1.223,
"step": 40
},
{
"epoch": 0.3284671532846715,
"grad_norm": 0.18782173097133636,
"learning_rate": 0.00017025772716520323,
"loss": 1.2186,
"step": 45
},
{
"epoch": 0.36496350364963503,
"grad_norm": 0.19173678755760193,
"learning_rate": 0.0001606225410966638,
"loss": 1.2035,
"step": 50
},
{
"epoch": 0.40145985401459855,
"grad_norm": 0.2181948572397232,
"learning_rate": 0.00015000000000000001,
"loss": 1.191,
"step": 55
},
{
"epoch": 0.43795620437956206,
"grad_norm": 0.20430037379264832,
"learning_rate": 0.0001385631124488136,
"loss": 1.1811,
"step": 60
},
{
"epoch": 0.4744525547445255,
"grad_norm": 0.19574333727359772,
"learning_rate": 0.0001264981502196662,
"loss": 1.1705,
"step": 65
},
{
"epoch": 0.5109489051094891,
"grad_norm": 0.1992381513118744,
"learning_rate": 0.00011400161449686293,
"loss": 1.1695,
"step": 70
},
{
"epoch": 0.5474452554744526,
"grad_norm": 0.18511532247066498,
"learning_rate": 0.00010127703547159739,
"loss": 1.1559,
"step": 75
},
{
"epoch": 0.583941605839416,
"grad_norm": 0.19379611313343048,
"learning_rate": 8.853165746015997e-05,
"loss": 1.1526,
"step": 80
},
{
"epoch": 0.6204379562043796,
"grad_norm": 0.19651520252227783,
"learning_rate": 7.597306353045393e-05,
"loss": 1.1439,
"step": 85
},
{
"epoch": 0.656934306569343,
"grad_norm": 0.18215572834014893,
"learning_rate": 6.380579461128819e-05,
"loss": 1.1592,
"step": 90
},
{
"epoch": 0.6934306569343066,
"grad_norm": 0.19182553887367249,
"learning_rate": 5.222801814877369e-05,
"loss": 1.1425,
"step": 95
},
{
"epoch": 0.7299270072992701,
"grad_norm": 0.19180874526500702,
"learning_rate": 4.142830056718052e-05,
"loss": 1.1488,
"step": 100
},
{
"epoch": 0.7664233576642335,
"grad_norm": 0.19942091405391693,
"learning_rate": 3.158253610095697e-05,
"loss": 1.1437,
"step": 105
},
{
"epoch": 0.8029197080291971,
"grad_norm": 0.19039174914360046,
"learning_rate": 2.2851082017805703e-05,
"loss": 1.1397,
"step": 110
},
{
"epoch": 0.8394160583941606,
"grad_norm": 0.18815937638282776,
"learning_rate": 1.5376146891235598e-05,
"loss": 1.1451,
"step": 115
},
{
"epoch": 0.8759124087591241,
"grad_norm": 0.19718731939792633,
"learning_rate": 9.279474459608805e-06,
"loss": 1.1502,
"step": 120
},
{
"epoch": 0.9124087591240876,
"grad_norm": 0.19630739092826843,
"learning_rate": 4.660360794506946e-06,
"loss": 1.1419,
"step": 125
},
{
"epoch": 0.948905109489051,
"grad_norm": 0.1804933249950409,
"learning_rate": 1.5940370726542863e-06,
"loss": 1.1483,
"step": 130
},
{
"epoch": 0.9854014598540146,
"grad_norm": 0.1939323991537094,
"learning_rate": 1.3044429107700318e-07,
"loss": 1.1473,
"step": 135
},
{
"epoch": 1.0,
"eval_loss": 1.645493507385254,
"eval_runtime": 0.792,
"eval_samples_per_second": 11.363,
"eval_steps_per_second": 1.263,
"step": 137
},
{
"epoch": 1.0,
"step": 137,
"total_flos": 8.08957492854784e+17,
"train_loss": 1.2450406925521629,
"train_runtime": 683.5001,
"train_samples_per_second": 51.148,
"train_steps_per_second": 0.2
}
],
"logging_steps": 5,
"max_steps": 137,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.08957492854784e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}