|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.1190171487074485, |
|
"global_step": 20000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.893353809401928e-05, |
|
"loss": 5.7923, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.7867076188038565e-05, |
|
"loss": 5.0624, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.680061428205785e-05, |
|
"loss": 4.1352, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.573415237607713e-05, |
|
"loss": 3.453, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.466769047009641e-05, |
|
"loss": 3.0505, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.360122856411569e-05, |
|
"loss": 2.7572, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.253476665813497e-05, |
|
"loss": 2.5325, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.146830475215426e-05, |
|
"loss": 2.3672, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 4.040184284617354e-05, |
|
"loss": 2.2281, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 3.933538094019282e-05, |
|
"loss": 2.1289, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 3.8268919034212095e-05, |
|
"loss": 2.0405, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 3.720245712823138e-05, |
|
"loss": 1.9581, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 3.6135995222250665e-05, |
|
"loss": 1.8997, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 3.506953331626994e-05, |
|
"loss": 1.8383, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 3.400307141028923e-05, |
|
"loss": 1.7812, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 3.2936609504308506e-05, |
|
"loss": 1.7441, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 3.187014759832779e-05, |
|
"loss": 1.6926, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 3.0803685692347076e-05, |
|
"loss": 1.657, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 2.9737223786366354e-05, |
|
"loss": 1.6246, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 2.8670761880385632e-05, |
|
"loss": 1.5971, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 2.7604299974404917e-05, |
|
"loss": 1.5715, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 2.6537838068424198e-05, |
|
"loss": 1.5444, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 2.5471376162443476e-05, |
|
"loss": 1.5255, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 2.440491425646276e-05, |
|
"loss": 1.5065, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 2.3338452350482043e-05, |
|
"loss": 1.4829, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 2.2271990444501324e-05, |
|
"loss": 1.46, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 2.1205528538520606e-05, |
|
"loss": 1.4434, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 2.0139066632539887e-05, |
|
"loss": 1.4356, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 1.907260472655917e-05, |
|
"loss": 1.4199, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 1.800614282057845e-05, |
|
"loss": 1.3948, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 1.693968091459773e-05, |
|
"loss": 1.3924, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 1.5873219008617013e-05, |
|
"loss": 1.3707, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 1.4806757102636296e-05, |
|
"loss": 1.3681, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 1.3740295196655576e-05, |
|
"loss": 1.3581, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 1.2673833290674857e-05, |
|
"loss": 1.3525, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 1.1607371384694139e-05, |
|
"loss": 1.3468, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 1.054090947871342e-05, |
|
"loss": 1.3392, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 9.474447572732703e-06, |
|
"loss": 1.3218, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 8.407985666751985e-06, |
|
"loss": 1.3237, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 7.3415237607712656e-06, |
|
"loss": 1.3108, |
|
"step": 20000 |
|
} |
|
], |
|
"max_steps": 23442, |
|
"num_train_epochs": 6, |
|
"total_flos": 8.486152068857856e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|