Howard881010's picture
Upload folder using huggingface_hub
45c97e0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 30,
"global_step": 450,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022222222222222223,
"grad_norm": 0.3012203276157379,
"learning_rate": 9.987820251299122e-05,
"loss": 0.3486,
"step": 10
},
{
"epoch": 0.044444444444444446,
"grad_norm": 0.19629184901714325,
"learning_rate": 9.951340343707852e-05,
"loss": 0.0694,
"step": 20
},
{
"epoch": 0.06666666666666667,
"grad_norm": 0.1222372055053711,
"learning_rate": 9.890738003669029e-05,
"loss": 0.0654,
"step": 30
},
{
"epoch": 0.06666666666666667,
"eval_loss": 0.06320454925298691,
"eval_runtime": 136.4924,
"eval_samples_per_second": 7.326,
"eval_steps_per_second": 0.366,
"step": 30
},
{
"epoch": 0.08888888888888889,
"grad_norm": 0.12505225837230682,
"learning_rate": 9.806308479691595e-05,
"loss": 0.0637,
"step": 40
},
{
"epoch": 0.1111111111111111,
"grad_norm": 0.14867539703845978,
"learning_rate": 9.698463103929542e-05,
"loss": 0.0638,
"step": 50
},
{
"epoch": 0.13333333333333333,
"grad_norm": 0.10100489109754562,
"learning_rate": 9.567727288213005e-05,
"loss": 0.0595,
"step": 60
},
{
"epoch": 0.13333333333333333,
"eval_loss": 0.05767079070210457,
"eval_runtime": 136.642,
"eval_samples_per_second": 7.318,
"eval_steps_per_second": 0.366,
"step": 60
},
{
"epoch": 0.15555555555555556,
"grad_norm": 0.17016883194446564,
"learning_rate": 9.414737964294636e-05,
"loss": 0.0569,
"step": 70
},
{
"epoch": 0.17777777777777778,
"grad_norm": 0.08618276566267014,
"learning_rate": 9.24024048078213e-05,
"loss": 0.0567,
"step": 80
},
{
"epoch": 0.2,
"grad_norm": 0.08416091650724411,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0546,
"step": 90
},
{
"epoch": 0.2,
"eval_loss": 0.053434912115335464,
"eval_runtime": 136.6891,
"eval_samples_per_second": 7.316,
"eval_steps_per_second": 0.366,
"step": 90
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.09946251660585403,
"learning_rate": 8.83022221559489e-05,
"loss": 0.0558,
"step": 100
},
{
"epoch": 0.24444444444444444,
"grad_norm": 0.10791021585464478,
"learning_rate": 8.596699001693255e-05,
"loss": 0.0552,
"step": 110
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.10341854393482208,
"learning_rate": 8.345653031794292e-05,
"loss": 0.0555,
"step": 120
},
{
"epoch": 0.26666666666666666,
"eval_loss": 0.052995871752500534,
"eval_runtime": 136.9161,
"eval_samples_per_second": 7.304,
"eval_steps_per_second": 0.365,
"step": 120
},
{
"epoch": 0.28888888888888886,
"grad_norm": 0.092866912484169,
"learning_rate": 8.07830737662829e-05,
"loss": 0.0504,
"step": 130
},
{
"epoch": 0.3111111111111111,
"grad_norm": 0.10099858045578003,
"learning_rate": 7.795964517353735e-05,
"loss": 0.0526,
"step": 140
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.17789088189601898,
"learning_rate": 7.500000000000001e-05,
"loss": 0.052,
"step": 150
},
{
"epoch": 0.3333333333333333,
"eval_loss": 0.052405789494514465,
"eval_runtime": 136.4971,
"eval_samples_per_second": 7.326,
"eval_steps_per_second": 0.366,
"step": 150
},
{
"epoch": 0.35555555555555557,
"grad_norm": 0.13198032975196838,
"learning_rate": 7.191855733945387e-05,
"loss": 0.0525,
"step": 160
},
{
"epoch": 0.37777777777777777,
"grad_norm": 0.10188263654708862,
"learning_rate": 6.873032967079561e-05,
"loss": 0.0524,
"step": 170
},
{
"epoch": 0.4,
"grad_norm": 0.1550171822309494,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0529,
"step": 180
},
{
"epoch": 0.4,
"eval_loss": 0.05086047202348709,
"eval_runtime": 136.9202,
"eval_samples_per_second": 7.304,
"eval_steps_per_second": 0.365,
"step": 180
},
{
"epoch": 0.4222222222222222,
"grad_norm": 0.08379051834344864,
"learning_rate": 6.209609477998338e-05,
"loss": 0.053,
"step": 190
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.1344887912273407,
"learning_rate": 5.868240888334653e-05,
"loss": 0.0511,
"step": 200
},
{
"epoch": 0.4666666666666667,
"grad_norm": 0.09979716688394547,
"learning_rate": 5.522642316338268e-05,
"loss": 0.0502,
"step": 210
},
{
"epoch": 0.4666666666666667,
"eval_loss": 0.0506291501224041,
"eval_runtime": 136.7115,
"eval_samples_per_second": 7.315,
"eval_steps_per_second": 0.366,
"step": 210
},
{
"epoch": 0.4888888888888889,
"grad_norm": 0.07537568360567093,
"learning_rate": 5.174497483512506e-05,
"loss": 0.0497,
"step": 220
},
{
"epoch": 0.5111111111111111,
"grad_norm": 0.10576926916837692,
"learning_rate": 4.825502516487497e-05,
"loss": 0.0504,
"step": 230
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.10534786432981491,
"learning_rate": 4.477357683661734e-05,
"loss": 0.0479,
"step": 240
},
{
"epoch": 0.5333333333333333,
"eval_loss": 0.048654135316610336,
"eval_runtime": 136.5401,
"eval_samples_per_second": 7.324,
"eval_steps_per_second": 0.366,
"step": 240
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.10896391421556473,
"learning_rate": 4.131759111665349e-05,
"loss": 0.0506,
"step": 250
},
{
"epoch": 0.5777777777777777,
"grad_norm": 0.10746324062347412,
"learning_rate": 3.790390522001662e-05,
"loss": 0.0477,
"step": 260
},
{
"epoch": 0.6,
"grad_norm": 0.16898374259471893,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.0479,
"step": 270
},
{
"epoch": 0.6,
"eval_loss": 0.04828697443008423,
"eval_runtime": 136.3988,
"eval_samples_per_second": 7.331,
"eval_steps_per_second": 0.367,
"step": 270
},
{
"epoch": 0.6222222222222222,
"grad_norm": 0.23446787893772125,
"learning_rate": 3.12696703292044e-05,
"loss": 0.0487,
"step": 280
},
{
"epoch": 0.6444444444444445,
"grad_norm": 0.18817700445652008,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.0483,
"step": 290
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.1385546326637268,
"learning_rate": 2.500000000000001e-05,
"loss": 0.047,
"step": 300
},
{
"epoch": 0.6666666666666666,
"eval_loss": 0.04631952568888664,
"eval_runtime": 136.5399,
"eval_samples_per_second": 7.324,
"eval_steps_per_second": 0.366,
"step": 300
},
{
"epoch": 0.6888888888888889,
"grad_norm": 0.1836208701133728,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.0477,
"step": 310
},
{
"epoch": 0.7111111111111111,
"grad_norm": 0.17699767649173737,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.0455,
"step": 320
},
{
"epoch": 0.7333333333333333,
"grad_norm": 0.17500163614749908,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.0452,
"step": 330
},
{
"epoch": 0.7333333333333333,
"eval_loss": 0.0455147922039032,
"eval_runtime": 136.6527,
"eval_samples_per_second": 7.318,
"eval_steps_per_second": 0.366,
"step": 330
},
{
"epoch": 0.7555555555555555,
"grad_norm": 0.4250011146068573,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.045,
"step": 340
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.22023910284042358,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.0436,
"step": 350
},
{
"epoch": 0.8,
"grad_norm": 0.1621396541595459,
"learning_rate": 9.549150281252633e-06,
"loss": 0.0437,
"step": 360
},
{
"epoch": 0.8,
"eval_loss": 0.04442232847213745,
"eval_runtime": 136.5105,
"eval_samples_per_second": 7.325,
"eval_steps_per_second": 0.366,
"step": 360
},
{
"epoch": 0.8222222222222222,
"grad_norm": 0.1345316618680954,
"learning_rate": 7.597595192178702e-06,
"loss": 0.043,
"step": 370
},
{
"epoch": 0.8444444444444444,
"grad_norm": 0.1296633929014206,
"learning_rate": 5.852620357053651e-06,
"loss": 0.0451,
"step": 380
},
{
"epoch": 0.8666666666666667,
"grad_norm": 0.1679375320672989,
"learning_rate": 4.322727117869951e-06,
"loss": 0.0442,
"step": 390
},
{
"epoch": 0.8666666666666667,
"eval_loss": 0.04389314725995064,
"eval_runtime": 136.6458,
"eval_samples_per_second": 7.318,
"eval_steps_per_second": 0.366,
"step": 390
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.1358930468559265,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.042,
"step": 400
},
{
"epoch": 0.9111111111111111,
"grad_norm": 0.12815573811531067,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.0435,
"step": 410
},
{
"epoch": 0.9333333333333333,
"grad_norm": 0.16702179610729218,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.0439,
"step": 420
},
{
"epoch": 0.9333333333333333,
"eval_loss": 0.04338783398270607,
"eval_runtime": 136.4534,
"eval_samples_per_second": 7.329,
"eval_steps_per_second": 0.366,
"step": 420
},
{
"epoch": 0.9555555555555556,
"grad_norm": 0.2307462841272354,
"learning_rate": 4.865965629214819e-07,
"loss": 0.0406,
"step": 430
},
{
"epoch": 0.9777777777777777,
"grad_norm": 0.1788487583398819,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.0434,
"step": 440
},
{
"epoch": 1.0,
"grad_norm": 0.22977402806282043,
"learning_rate": 0.0,
"loss": 0.0444,
"step": 450
},
{
"epoch": 1.0,
"eval_loss": 0.04335245117545128,
"eval_runtime": 136.5818,
"eval_samples_per_second": 7.322,
"eval_steps_per_second": 0.366,
"step": 450
},
{
"epoch": 1.0,
"step": 450,
"total_flos": 8.965157752615731e+17,
"train_loss": 0.0571367746591568,
"train_runtime": 6071.8085,
"train_samples_per_second": 1.482,
"train_steps_per_second": 0.074
}
],
"logging_steps": 10,
"max_steps": 450,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.965157752615731e+17,
"train_batch_size": 10,
"trial_name": null,
"trial_params": null
}