|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 6.289308176100629, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.5832258064516129, |
|
"eval_loss": 0.19133226573467255, |
|
"eval_runtime": 1.4535, |
|
"eval_samples_per_second": 2132.847, |
|
"eval_steps_per_second": 44.721, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 1.685534591194969e-05, |
|
"loss": 0.309, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.827741935483871, |
|
"eval_loss": 0.09374182671308517, |
|
"eval_runtime": 1.4509, |
|
"eval_samples_per_second": 2136.594, |
|
"eval_steps_per_second": 44.8, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8790322580645161, |
|
"eval_loss": 0.06333240121603012, |
|
"eval_runtime": 1.4496, |
|
"eval_samples_per_second": 2138.579, |
|
"eval_steps_per_second": 44.841, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 1.371069182389937e-05, |
|
"loss": 0.1078, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.896774193548387, |
|
"eval_loss": 0.04806679114699364, |
|
"eval_runtime": 1.4589, |
|
"eval_samples_per_second": 2124.857, |
|
"eval_steps_per_second": 44.553, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 1.0566037735849058e-05, |
|
"loss": 0.0686, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9112903225806451, |
|
"eval_loss": 0.0393882654607296, |
|
"eval_runtime": 1.4641, |
|
"eval_samples_per_second": 2117.338, |
|
"eval_steps_per_second": 44.396, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9167741935483871, |
|
"eval_loss": 0.03456535562872887, |
|
"eval_runtime": 1.4626, |
|
"eval_samples_per_second": 2119.465, |
|
"eval_steps_per_second": 44.44, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 7.421383647798742e-06, |
|
"loss": 0.0538, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 3180, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 520587680240208.0, |
|
"train_batch_size": 48, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.3001695987245524, |
|
"num_train_epochs": 10, |
|
"temperature": 17 |
|
} |
|
} |
|
|