|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 6.289308176100629, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6122580645161291, |
|
"eval_loss": 0.20235490798950195, |
|
"eval_runtime": 5.9992, |
|
"eval_samples_per_second": 516.736, |
|
"eval_steps_per_second": 10.835, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 1.550763701707098e-05, |
|
"loss": 0.3237, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8187096774193549, |
|
"eval_loss": 0.10080192238092422, |
|
"eval_runtime": 5.8671, |
|
"eval_samples_per_second": 528.366, |
|
"eval_steps_per_second": 11.079, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8764516129032258, |
|
"eval_loss": 0.06733943521976471, |
|
"eval_runtime": 5.745, |
|
"eval_samples_per_second": 539.6, |
|
"eval_steps_per_second": 11.314, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 1.101527403414196e-05, |
|
"loss": 0.1156, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8977419354838709, |
|
"eval_loss": 0.052980903536081314, |
|
"eval_runtime": 5.7366, |
|
"eval_samples_per_second": 540.391, |
|
"eval_steps_per_second": 11.331, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 6.522911051212939e-06, |
|
"loss": 0.0761, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9029032258064517, |
|
"eval_loss": 0.04506753385066986, |
|
"eval_runtime": 5.8089, |
|
"eval_samples_per_second": 533.661, |
|
"eval_steps_per_second": 11.19, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9067741935483871, |
|
"eval_loss": 0.04121686518192291, |
|
"eval_runtime": 5.7614, |
|
"eval_samples_per_second": 538.061, |
|
"eval_steps_per_second": 11.282, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 2.0305480682839176e-06, |
|
"loss": 0.0627, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2226, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 7, |
|
"save_steps": 500, |
|
"total_flos": 519271419317532.0, |
|
"train_batch_size": 48, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.7143131281575181, |
|
"num_train_epochs": 7, |
|
"temperature": 11 |
|
} |
|
} |
|
|