File size: 1,599 Bytes
00f5a86 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.888268156424581,
"eval_steps": 500,
"global_step": 3500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.7,
"grad_norm": 6.08462381362915,
"learning_rate": 1.7206703910614527e-05,
"loss": 0.3865,
"step": 500
},
{
"epoch": 1.4,
"grad_norm": 5.282430648803711,
"learning_rate": 1.4413407821229052e-05,
"loss": 0.2899,
"step": 1000
},
{
"epoch": 2.09,
"grad_norm": 4.065394878387451,
"learning_rate": 1.1620111731843577e-05,
"loss": 0.2214,
"step": 1500
},
{
"epoch": 2.79,
"grad_norm": 8.418371200561523,
"learning_rate": 8.826815642458101e-06,
"loss": 0.1093,
"step": 2000
},
{
"epoch": 3.49,
"grad_norm": 20.9011173248291,
"learning_rate": 6.033519553072626e-06,
"loss": 0.0653,
"step": 2500
},
{
"epoch": 4.19,
"grad_norm": 4.803398132324219,
"learning_rate": 3.240223463687151e-06,
"loss": 0.0406,
"step": 3000
},
{
"epoch": 4.89,
"grad_norm": 0.0067610470578074455,
"learning_rate": 4.46927374301676e-07,
"loss": 0.0164,
"step": 3500
}
],
"logging_steps": 500,
"max_steps": 3580,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 3179834312329860.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}
|