|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 6.289308176100629, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6525806451612903, |
|
"eval_loss": 0.28762087225914, |
|
"eval_runtime": 2.0811, |
|
"eval_samples_per_second": 1489.606, |
|
"eval_steps_per_second": 31.234, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"grad_norm": 0.6741577386856079, |
|
"learning_rate": 1.650593990216632e-05, |
|
"loss": 0.4587, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8432258064516129, |
|
"eval_loss": 0.11765597760677338, |
|
"eval_runtime": 2.0413, |
|
"eval_samples_per_second": 1518.615, |
|
"eval_steps_per_second": 31.842, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8896774193548387, |
|
"eval_loss": 0.06820457428693771, |
|
"eval_runtime": 2.0992, |
|
"eval_samples_per_second": 1476.728, |
|
"eval_steps_per_second": 30.964, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"grad_norm": 0.48641300201416016, |
|
"learning_rate": 1.3011879804332637e-05, |
|
"loss": 0.1374, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9090322580645162, |
|
"eval_loss": 0.04870055243372917, |
|
"eval_runtime": 2.2217, |
|
"eval_samples_per_second": 1395.357, |
|
"eval_steps_per_second": 29.257, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"grad_norm": 0.4376387596130371, |
|
"learning_rate": 9.517819706498952e-06, |
|
"loss": 0.0771, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9209677419354839, |
|
"eval_loss": 0.039510224014520645, |
|
"eval_runtime": 2.287, |
|
"eval_samples_per_second": 1355.473, |
|
"eval_steps_per_second": 28.421, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9267741935483871, |
|
"eval_loss": 0.034614451229572296, |
|
"eval_runtime": 2.0978, |
|
"eval_samples_per_second": 1477.764, |
|
"eval_steps_per_second": 30.985, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"grad_norm": 0.290304571390152, |
|
"learning_rate": 6.02375960866527e-06, |
|
"loss": 0.0588, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2862, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9, |
|
"save_steps": 500, |
|
"total_flos": 687000001259712.0, |
|
"train_batch_size": 48, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.9964545021779843, |
|
"num_train_epochs": 9, |
|
"temperature": 3 |
|
} |
|
} |
|
|