|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 6.289308176100629, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6954838709677419, |
|
"eval_loss": 0.4076870381832123, |
|
"eval_runtime": 5.7004, |
|
"eval_samples_per_second": 543.825, |
|
"eval_steps_per_second": 11.403, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 1.650593990216632e-05, |
|
"loss": 0.6428, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8416129032258064, |
|
"eval_loss": 0.13920199871063232, |
|
"eval_runtime": 5.9203, |
|
"eval_samples_per_second": 523.622, |
|
"eval_steps_per_second": 10.979, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8987096774193548, |
|
"eval_loss": 0.07036732137203217, |
|
"eval_runtime": 5.7408, |
|
"eval_samples_per_second": 539.992, |
|
"eval_steps_per_second": 11.322, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 1.3011879804332637e-05, |
|
"loss": 0.163, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9196774193548387, |
|
"eval_loss": 0.05059635266661644, |
|
"eval_runtime": 5.7559, |
|
"eval_samples_per_second": 538.578, |
|
"eval_steps_per_second": 11.293, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 9.517819706498952e-06, |
|
"loss": 0.0813, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9267741935483871, |
|
"eval_loss": 0.04242779687047005, |
|
"eval_runtime": 5.8398, |
|
"eval_samples_per_second": 530.844, |
|
"eval_steps_per_second": 11.131, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9280645161290323, |
|
"eval_loss": 0.03737134486436844, |
|
"eval_runtime": 5.7358, |
|
"eval_samples_per_second": 540.467, |
|
"eval_steps_per_second": 11.332, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 6.02375960866527e-06, |
|
"loss": 0.0615, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2862, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9, |
|
"save_steps": 500, |
|
"total_flos": 519271419317532.0, |
|
"train_batch_size": 48, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.9688927570011859, |
|
"num_train_epochs": 9, |
|
"temperature": 2 |
|
} |
|
} |
|
|