File size: 2,056 Bytes
9a9c2ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.773824980524537,
"eval_steps": 500,
"global_step": 400000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.596728122565567,
"grad_norm": 0.9470382332801819,
"learning_rate": 0.0002,
"loss": 1.7054,
"step": 50000
},
{
"epoch": 5.193456245131134,
"grad_norm": 0.6922363638877869,
"learning_rate": 0.0002,
"loss": 1.6798,
"step": 100000
},
{
"epoch": 7.790184367696702,
"grad_norm": 0.6903401017189026,
"learning_rate": 0.0002,
"loss": 1.6738,
"step": 150000
},
{
"epoch": 10.386912490262269,
"grad_norm": 1.2185564041137695,
"learning_rate": 0.0002,
"loss": 1.6705,
"step": 200000
},
{
"epoch": 12.983640612827838,
"grad_norm": 0.7960174083709717,
"learning_rate": 0.0002,
"loss": 1.6691,
"step": 250000
},
{
"epoch": 15.580368735393405,
"grad_norm": 0.7023811936378479,
"learning_rate": 0.0002,
"loss": 1.6667,
"step": 300000
},
{
"epoch": 18.177096857958972,
"grad_norm": 0.6432592868804932,
"learning_rate": 0.0002,
"loss": 1.6664,
"step": 350000
},
{
"epoch": 20.773824980524537,
"grad_norm": 1.1296038627624512,
"learning_rate": 0.0002,
"loss": 1.6651,
"step": 400000
}
],
"logging_steps": 50000,
"max_steps": 577650,
"num_input_tokens_seen": 0,
"num_train_epochs": 30,
"save_steps": 50000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.227652833030557e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|