|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 19.2,
|
|
"eval_steps": 500,
|
|
"global_step": 240,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.4,
|
|
"grad_norm": 1.758865475654602,
|
|
"learning_rate": 4.994647308096509e-05,
|
|
"loss": 4.3569,
|
|
"num_input_tokens_seen": 5888,
|
|
"step": 5
|
|
},
|
|
{
|
|
"epoch": 0.8,
|
|
"grad_norm": 1.4234931468963623,
|
|
"learning_rate": 4.9786121534345265e-05,
|
|
"loss": 4.043,
|
|
"num_input_tokens_seen": 11872,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 1.2,
|
|
"grad_norm": 1.5642849206924438,
|
|
"learning_rate": 4.951963201008076e-05,
|
|
"loss": 3.7996,
|
|
"num_input_tokens_seen": 17616,
|
|
"step": 15
|
|
},
|
|
{
|
|
"epoch": 1.6,
|
|
"grad_norm": 1.0812954902648926,
|
|
"learning_rate": 4.914814565722671e-05,
|
|
"loss": 3.7366,
|
|
"num_input_tokens_seen": 23584,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"grad_norm": 1.3543965816497803,
|
|
"learning_rate": 4.867325323737765e-05,
|
|
"loss": 3.6712,
|
|
"num_input_tokens_seen": 29600,
|
|
"step": 25
|
|
},
|
|
{
|
|
"epoch": 2.4,
|
|
"grad_norm": 1.436577558517456,
|
|
"learning_rate": 4.8096988312782174e-05,
|
|
"loss": 3.6046,
|
|
"num_input_tokens_seen": 35184,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 2.8,
|
|
"grad_norm": 1.079750657081604,
|
|
"learning_rate": 4.742181853831721e-05,
|
|
"loss": 3.4873,
|
|
"num_input_tokens_seen": 41056,
|
|
"step": 35
|
|
},
|
|
{
|
|
"epoch": 3.2,
|
|
"grad_norm": 1.5652551651000977,
|
|
"learning_rate": 4.665063509461097e-05,
|
|
"loss": 3.3525,
|
|
"num_input_tokens_seen": 47040,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 3.6,
|
|
"grad_norm": 1.6999948024749756,
|
|
"learning_rate": 4.5786740307563636e-05,
|
|
"loss": 3.2724,
|
|
"num_input_tokens_seen": 53072,
|
|
"step": 45
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"grad_norm": 1.4872270822525024,
|
|
"learning_rate": 4.4833833507280884e-05,
|
|
"loss": 3.3493,
|
|
"num_input_tokens_seen": 59360,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 4.4,
|
|
"grad_norm": 1.450323462486267,
|
|
"learning_rate": 4.379599518697444e-05,
|
|
"loss": 3.2383,
|
|
"num_input_tokens_seen": 64928,
|
|
"step": 55
|
|
},
|
|
{
|
|
"epoch": 4.8,
|
|
"grad_norm": 1.70930016040802,
|
|
"learning_rate": 4.267766952966369e-05,
|
|
"loss": 3.1941,
|
|
"num_input_tokens_seen": 70752,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 5.2,
|
|
"grad_norm": 1.9510873556137085,
|
|
"learning_rate": 4.148364537750172e-05,
|
|
"loss": 3.1134,
|
|
"num_input_tokens_seen": 76544,
|
|
"step": 65
|
|
},
|
|
{
|
|
"epoch": 5.6,
|
|
"grad_norm": 1.781402826309204,
|
|
"learning_rate": 4.021903572521802e-05,
|
|
"loss": 3.0551,
|
|
"num_input_tokens_seen": 82736,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"grad_norm": 1.4807748794555664,
|
|
"learning_rate": 3.888925582549006e-05,
|
|
"loss": 3.0148,
|
|
"num_input_tokens_seen": 88464,
|
|
"step": 75
|
|
},
|
|
{
|
|
"epoch": 6.4,
|
|
"grad_norm": 1.936543345451355,
|
|
"learning_rate": 3.7500000000000003e-05,
|
|
"loss": 2.9215,
|
|
"num_input_tokens_seen": 94496,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 6.8,
|
|
"grad_norm": 2.542340040206909,
|
|
"learning_rate": 3.6057217255475034e-05,
|
|
"loss": 2.8317,
|
|
"num_input_tokens_seen": 100192,
|
|
"step": 85
|
|
},
|
|
{
|
|
"epoch": 7.2,
|
|
"grad_norm": 2.17262864112854,
|
|
"learning_rate": 3.456708580912725e-05,
|
|
"loss": 2.8665,
|
|
"num_input_tokens_seen": 105904,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 7.6,
|
|
"grad_norm": 1.919408917427063,
|
|
"learning_rate": 3.303598663257904e-05,
|
|
"loss": 2.8028,
|
|
"num_input_tokens_seen": 111856,
|
|
"step": 95
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"grad_norm": 2.5374443531036377,
|
|
"learning_rate": 3.147047612756302e-05,
|
|
"loss": 2.9059,
|
|
"num_input_tokens_seen": 118064,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 8.4,
|
|
"grad_norm": 1.868935465812683,
|
|
"learning_rate": 2.9877258050403212e-05,
|
|
"loss": 2.6716,
|
|
"num_input_tokens_seen": 124192,
|
|
"step": 105
|
|
},
|
|
{
|
|
"epoch": 8.8,
|
|
"grad_norm": 1.9682857990264893,
|
|
"learning_rate": 2.8263154805501297e-05,
|
|
"loss": 2.7336,
|
|
"num_input_tokens_seen": 129856,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 9.2,
|
|
"grad_norm": 2.1156065464019775,
|
|
"learning_rate": 2.663507823075358e-05,
|
|
"loss": 2.6835,
|
|
"num_input_tokens_seen": 135760,
|
|
"step": 115
|
|
},
|
|
{
|
|
"epoch": 9.6,
|
|
"grad_norm": 2.3779890537261963,
|
|
"learning_rate": 2.5e-05,
|
|
"loss": 2.5898,
|
|
"num_input_tokens_seen": 141760,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 10.0,
|
|
"grad_norm": 2.671842336654663,
|
|
"learning_rate": 2.3364921769246423e-05,
|
|
"loss": 2.5017,
|
|
"num_input_tokens_seen": 147664,
|
|
"step": 125
|
|
},
|
|
{
|
|
"epoch": 10.4,
|
|
"grad_norm": 2.809380292892456,
|
|
"learning_rate": 2.173684519449872e-05,
|
|
"loss": 2.4384,
|
|
"num_input_tokens_seen": 153312,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 10.8,
|
|
"grad_norm": 3.9723222255706787,
|
|
"learning_rate": 2.0122741949596797e-05,
|
|
"loss": 2.5169,
|
|
"num_input_tokens_seen": 159584,
|
|
"step": 135
|
|
},
|
|
{
|
|
"epoch": 11.2,
|
|
"grad_norm": 2.3110992908477783,
|
|
"learning_rate": 1.852952387243698e-05,
|
|
"loss": 2.3124,
|
|
"num_input_tokens_seen": 165040,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 11.6,
|
|
"grad_norm": 2.2998690605163574,
|
|
"learning_rate": 1.6964013367420966e-05,
|
|
"loss": 2.4734,
|
|
"num_input_tokens_seen": 170944,
|
|
"step": 145
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"grad_norm": 3.0829477310180664,
|
|
"learning_rate": 1.5432914190872757e-05,
|
|
"loss": 2.3957,
|
|
"num_input_tokens_seen": 177168,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 12.4,
|
|
"grad_norm": 2.7017433643341064,
|
|
"learning_rate": 1.3942782744524973e-05,
|
|
"loss": 2.5141,
|
|
"num_input_tokens_seen": 183088,
|
|
"step": 155
|
|
},
|
|
{
|
|
"epoch": 12.8,
|
|
"grad_norm": 3.001948118209839,
|
|
"learning_rate": 1.2500000000000006e-05,
|
|
"loss": 2.2276,
|
|
"num_input_tokens_seen": 188736,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 13.2,
|
|
"grad_norm": 3.0778212547302246,
|
|
"learning_rate": 1.1110744174509952e-05,
|
|
"loss": 2.1839,
|
|
"num_input_tokens_seen": 194592,
|
|
"step": 165
|
|
},
|
|
{
|
|
"epoch": 13.6,
|
|
"grad_norm": 3.4998250007629395,
|
|
"learning_rate": 9.780964274781984e-06,
|
|
"loss": 2.3212,
|
|
"num_input_tokens_seen": 200528,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 14.0,
|
|
"grad_norm": 2.9788782596588135,
|
|
"learning_rate": 8.51635462249828e-06,
|
|
"loss": 2.2398,
|
|
"num_input_tokens_seen": 206256,
|
|
"step": 175
|
|
},
|
|
{
|
|
"epoch": 14.4,
|
|
"grad_norm": 3.000025510787964,
|
|
"learning_rate": 7.3223304703363135e-06,
|
|
"loss": 2.2431,
|
|
"num_input_tokens_seen": 212480,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 14.8,
|
|
"grad_norm": 3.488719940185547,
|
|
"learning_rate": 6.204004813025568e-06,
|
|
"loss": 2.0743,
|
|
"num_input_tokens_seen": 218208,
|
|
"step": 185
|
|
},
|
|
{
|
|
"epoch": 15.2,
|
|
"grad_norm": 3.7175214290618896,
|
|
"learning_rate": 5.166166492719124e-06,
|
|
"loss": 2.1343,
|
|
"num_input_tokens_seen": 223632,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 15.6,
|
|
"grad_norm": 3.5645270347595215,
|
|
"learning_rate": 4.213259692436367e-06,
|
|
"loss": 2.3751,
|
|
"num_input_tokens_seen": 229776,
|
|
"step": 195
|
|
},
|
|
{
|
|
"epoch": 16.0,
|
|
"grad_norm": 3.673673629760742,
|
|
"learning_rate": 3.3493649053890326e-06,
|
|
"loss": 2.0981,
|
|
"num_input_tokens_seen": 235392,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 16.4,
|
|
"grad_norm": 3.5783541202545166,
|
|
"learning_rate": 2.578181461682794e-06,
|
|
"loss": 2.151,
|
|
"num_input_tokens_seen": 241168,
|
|
"step": 205
|
|
},
|
|
{
|
|
"epoch": 16.8,
|
|
"grad_norm": 3.5505430698394775,
|
|
"learning_rate": 1.9030116872178316e-06,
|
|
"loss": 2.1801,
|
|
"num_input_tokens_seen": 247200,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 17.2,
|
|
"grad_norm": 3.7030887603759766,
|
|
"learning_rate": 1.3267467626223606e-06,
|
|
"loss": 2.3428,
|
|
"num_input_tokens_seen": 253536,
|
|
"step": 215
|
|
},
|
|
{
|
|
"epoch": 17.6,
|
|
"grad_norm": 3.326767921447754,
|
|
"learning_rate": 8.51854342773295e-07,
|
|
"loss": 2.1907,
|
|
"num_input_tokens_seen": 259568,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 18.0,
|
|
"grad_norm": 3.3685076236724854,
|
|
"learning_rate": 4.803679899192392e-07,
|
|
"loss": 2.0246,
|
|
"num_input_tokens_seen": 265056,
|
|
"step": 225
|
|
},
|
|
{
|
|
"epoch": 18.4,
|
|
"grad_norm": 3.4106569290161133,
|
|
"learning_rate": 2.1387846565474045e-07,
|
|
"loss": 2.1411,
|
|
"num_input_tokens_seen": 271312,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 18.8,
|
|
"grad_norm": 3.188873529434204,
|
|
"learning_rate": 5.352691903491303e-08,
|
|
"loss": 2.0321,
|
|
"num_input_tokens_seen": 277008,
|
|
"step": 235
|
|
},
|
|
{
|
|
"epoch": 19.2,
|
|
"grad_norm": 3.023102045059204,
|
|
"learning_rate": 0.0,
|
|
"loss": 2.2785,
|
|
"num_input_tokens_seen": 282848,
|
|
"step": 240
|
|
}
|
|
],
|
|
"logging_steps": 5,
|
|
"max_steps": 240,
|
|
"num_input_tokens_seen": 282848,
|
|
"num_train_epochs": 20,
|
|
"save_steps": 100,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 2240526315257856.0,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|