|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 6.289308176100629, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6506451612903226, |
|
"eval_loss": 0.2873533070087433, |
|
"eval_runtime": 5.9654, |
|
"eval_samples_per_second": 519.661, |
|
"eval_steps_per_second": 10.896, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 1.650593990216632e-05, |
|
"loss": 0.4591, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8432258064516129, |
|
"eval_loss": 0.1160515546798706, |
|
"eval_runtime": 6.0854, |
|
"eval_samples_per_second": 509.418, |
|
"eval_steps_per_second": 10.681, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8958064516129032, |
|
"eval_loss": 0.06775005906820297, |
|
"eval_runtime": 6.0485, |
|
"eval_samples_per_second": 512.528, |
|
"eval_steps_per_second": 10.747, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 1.3011879804332637e-05, |
|
"loss": 0.1363, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9106451612903226, |
|
"eval_loss": 0.04885930195450783, |
|
"eval_runtime": 5.9946, |
|
"eval_samples_per_second": 517.134, |
|
"eval_steps_per_second": 10.843, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 9.517819706498952e-06, |
|
"loss": 0.0769, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9196774193548387, |
|
"eval_loss": 0.039541397243738174, |
|
"eval_runtime": 6.0954, |
|
"eval_samples_per_second": 508.58, |
|
"eval_steps_per_second": 10.664, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9267741935483871, |
|
"eval_loss": 0.034905772656202316, |
|
"eval_runtime": 5.9756, |
|
"eval_samples_per_second": 518.776, |
|
"eval_steps_per_second": 10.878, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 6.02375960866527e-06, |
|
"loss": 0.059, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2862, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9, |
|
"save_steps": 500, |
|
"total_flos": 697479243717816.0, |
|
"train_batch_size": 48, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.706898380229718, |
|
"num_train_epochs": 9, |
|
"temperature": 3 |
|
} |
|
} |
|
|