|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.559508574353724, |
|
"global_step": 10000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.893353809401928e-05, |
|
"loss": 5.7923, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.7867076188038565e-05, |
|
"loss": 5.0624, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.680061428205785e-05, |
|
"loss": 4.1352, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.573415237607713e-05, |
|
"loss": 3.453, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.466769047009641e-05, |
|
"loss": 3.0505, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.360122856411569e-05, |
|
"loss": 2.7572, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.253476665813497e-05, |
|
"loss": 2.5325, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.146830475215426e-05, |
|
"loss": 2.3672, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 4.040184284617354e-05, |
|
"loss": 2.2281, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 3.933538094019282e-05, |
|
"loss": 2.1289, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 3.8268919034212095e-05, |
|
"loss": 2.0405, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 3.720245712823138e-05, |
|
"loss": 1.9581, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 3.6135995222250665e-05, |
|
"loss": 1.8997, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 3.506953331626994e-05, |
|
"loss": 1.8383, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 3.400307141028923e-05, |
|
"loss": 1.7812, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 3.2936609504308506e-05, |
|
"loss": 1.7441, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 3.187014759832779e-05, |
|
"loss": 1.6926, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 3.0803685692347076e-05, |
|
"loss": 1.657, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 2.9737223786366354e-05, |
|
"loss": 1.6246, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 2.8670761880385632e-05, |
|
"loss": 1.5971, |
|
"step": 10000 |
|
} |
|
], |
|
"max_steps": 23442, |
|
"num_train_epochs": 6, |
|
"total_flos": 4.243235179619942e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|