bbytxt's picture
Training in progress, epoch 0, checkpoint
922d527 verified
raw
history blame
8.73 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.8859357696566998,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004429678848283499,
"eval_loss": 10.838801383972168,
"eval_runtime": 0.3434,
"eval_samples_per_second": 276.62,
"eval_steps_per_second": 139.766,
"step": 1
},
{
"epoch": 0.0221483942414175,
"grad_norm": 0.12922966480255127,
"learning_rate": 5e-05,
"loss": 10.8345,
"step": 5
},
{
"epoch": 0.044296788482835,
"grad_norm": 0.15409621596336365,
"learning_rate": 0.0001,
"loss": 10.8351,
"step": 10
},
{
"epoch": 0.0664451827242525,
"grad_norm": 0.16463850438594818,
"learning_rate": 9.98292246503335e-05,
"loss": 10.8363,
"step": 15
},
{
"epoch": 0.08859357696567,
"grad_norm": 0.15971654653549194,
"learning_rate": 9.931806517013612e-05,
"loss": 10.8329,
"step": 20
},
{
"epoch": 0.11074197120708748,
"grad_norm": 0.20640374720096588,
"learning_rate": 9.847001329696653e-05,
"loss": 10.8257,
"step": 25
},
{
"epoch": 0.132890365448505,
"grad_norm": 0.1784103512763977,
"learning_rate": 9.729086208503174e-05,
"loss": 10.8245,
"step": 30
},
{
"epoch": 0.15503875968992248,
"grad_norm": 0.1995667666196823,
"learning_rate": 9.578866633275288e-05,
"loss": 10.8213,
"step": 35
},
{
"epoch": 0.17718715393134,
"grad_norm": 0.20601214468479156,
"learning_rate": 9.397368756032445e-05,
"loss": 10.8223,
"step": 40
},
{
"epoch": 0.19933554817275748,
"grad_norm": 0.22217710316181183,
"learning_rate": 9.185832391312644e-05,
"loss": 10.8179,
"step": 45
},
{
"epoch": 0.22148394241417496,
"grad_norm": 0.32868528366088867,
"learning_rate": 8.945702546981969e-05,
"loss": 10.8227,
"step": 50
},
{
"epoch": 0.22148394241417496,
"eval_loss": 10.81373119354248,
"eval_runtime": 0.337,
"eval_samples_per_second": 281.9,
"eval_steps_per_second": 142.434,
"step": 50
},
{
"epoch": 0.24363233665559247,
"grad_norm": 0.25537627935409546,
"learning_rate": 8.678619553365659e-05,
"loss": 10.8066,
"step": 55
},
{
"epoch": 0.26578073089701,
"grad_norm": 0.2640365660190582,
"learning_rate": 8.386407858128706e-05,
"loss": 10.8066,
"step": 60
},
{
"epoch": 0.28792912513842744,
"grad_norm": 0.2941606640815735,
"learning_rate": 8.07106356344834e-05,
"loss": 10.7951,
"step": 65
},
{
"epoch": 0.31007751937984496,
"grad_norm": 0.3043847978115082,
"learning_rate": 7.734740790612136e-05,
"loss": 10.7888,
"step": 70
},
{
"epoch": 0.33222591362126247,
"grad_norm": 0.2664663791656494,
"learning_rate": 7.379736965185368e-05,
"loss": 10.7881,
"step": 75
},
{
"epoch": 0.35437430786268,
"grad_norm": 0.3024435341358185,
"learning_rate": 7.008477123264848e-05,
"loss": 10.7854,
"step": 80
},
{
"epoch": 0.37652270210409744,
"grad_norm": 0.3120448887348175,
"learning_rate": 6.623497346023418e-05,
"loss": 10.781,
"step": 85
},
{
"epoch": 0.39867109634551495,
"grad_norm": 0.2808491587638855,
"learning_rate": 6.227427435703997e-05,
"loss": 10.7733,
"step": 90
},
{
"epoch": 0.42081949058693247,
"grad_norm": 0.3052971065044403,
"learning_rate": 5.8229729514036705e-05,
"loss": 10.7721,
"step": 95
},
{
"epoch": 0.4429678848283499,
"grad_norm": 0.44799819588661194,
"learning_rate": 5.4128967273616625e-05,
"loss": 10.7888,
"step": 100
},
{
"epoch": 0.4429678848283499,
"eval_loss": 10.766999244689941,
"eval_runtime": 0.3358,
"eval_samples_per_second": 282.876,
"eval_steps_per_second": 142.927,
"step": 100
},
{
"epoch": 0.46511627906976744,
"grad_norm": 0.22693148255348206,
"learning_rate": 5e-05,
"loss": 10.7656,
"step": 105
},
{
"epoch": 0.48726467331118495,
"grad_norm": 0.25195586681365967,
"learning_rate": 4.5871032726383386e-05,
"loss": 10.7628,
"step": 110
},
{
"epoch": 0.5094130675526024,
"grad_norm": 0.24384839832782745,
"learning_rate": 4.17702704859633e-05,
"loss": 10.7586,
"step": 115
},
{
"epoch": 0.53156146179402,
"grad_norm": 0.21910274028778076,
"learning_rate": 3.772572564296005e-05,
"loss": 10.7505,
"step": 120
},
{
"epoch": 0.5537098560354374,
"grad_norm": 0.2763572037220001,
"learning_rate": 3.3765026539765834e-05,
"loss": 10.7494,
"step": 125
},
{
"epoch": 0.5758582502768549,
"grad_norm": 0.23573029041290283,
"learning_rate": 2.991522876735154e-05,
"loss": 10.758,
"step": 130
},
{
"epoch": 0.5980066445182725,
"grad_norm": 0.22336480021476746,
"learning_rate": 2.6202630348146324e-05,
"loss": 10.7592,
"step": 135
},
{
"epoch": 0.6201550387596899,
"grad_norm": 0.2121262103319168,
"learning_rate": 2.2652592093878666e-05,
"loss": 10.7607,
"step": 140
},
{
"epoch": 0.6423034330011074,
"grad_norm": 0.2790408134460449,
"learning_rate": 1.928936436551661e-05,
"loss": 10.75,
"step": 145
},
{
"epoch": 0.6644518272425249,
"grad_norm": 0.36312904953956604,
"learning_rate": 1.6135921418712956e-05,
"loss": 10.7721,
"step": 150
},
{
"epoch": 0.6644518272425249,
"eval_loss": 10.751730918884277,
"eval_runtime": 0.3354,
"eval_samples_per_second": 283.275,
"eval_steps_per_second": 143.128,
"step": 150
},
{
"epoch": 0.6866002214839424,
"grad_norm": 0.23820596933364868,
"learning_rate": 1.3213804466343421e-05,
"loss": 10.7486,
"step": 155
},
{
"epoch": 0.70874861572536,
"grad_norm": 0.1970384567975998,
"learning_rate": 1.0542974530180327e-05,
"loss": 10.7485,
"step": 160
},
{
"epoch": 0.7308970099667774,
"grad_norm": 0.1786661446094513,
"learning_rate": 8.141676086873572e-06,
"loss": 10.7446,
"step": 165
},
{
"epoch": 0.7530454042081949,
"grad_norm": 0.18778419494628906,
"learning_rate": 6.026312439675552e-06,
"loss": 10.7532,
"step": 170
},
{
"epoch": 0.7751937984496124,
"grad_norm": 0.19150562584400177,
"learning_rate": 4.2113336672471245e-06,
"loss": 10.7434,
"step": 175
},
{
"epoch": 0.7973421926910299,
"grad_norm": 0.1973363310098648,
"learning_rate": 2.7091379149682685e-06,
"loss": 10.7504,
"step": 180
},
{
"epoch": 0.8194905869324474,
"grad_norm": 0.221753790974617,
"learning_rate": 1.5299867030334814e-06,
"loss": 10.753,
"step": 185
},
{
"epoch": 0.8416389811738649,
"grad_norm": 0.225653275847435,
"learning_rate": 6.819348298638839e-07,
"loss": 10.7533,
"step": 190
},
{
"epoch": 0.8637873754152824,
"grad_norm": 0.26362743973731995,
"learning_rate": 1.7077534966650766e-07,
"loss": 10.7592,
"step": 195
},
{
"epoch": 0.8859357696566998,
"grad_norm": 0.35018375515937805,
"learning_rate": 0.0,
"loss": 10.7569,
"step": 200
},
{
"epoch": 0.8859357696566998,
"eval_loss": 10.749935150146484,
"eval_runtime": 0.3361,
"eval_samples_per_second": 282.658,
"eval_steps_per_second": 142.817,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 32811830476800.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}