|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 7.861635220125786, |
|
"eval_steps": 500, |
|
"global_step": 2500, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6780645161290323, |
|
"eval_loss": 0.40519121289253235, |
|
"eval_runtime": 2.041, |
|
"eval_samples_per_second": 1518.881, |
|
"eval_steps_per_second": 31.848, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"grad_norm": 0.8578532934188843, |
|
"learning_rate": 1.650593990216632e-05, |
|
"loss": 0.6413, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8396774193548387, |
|
"eval_loss": 0.14012566208839417, |
|
"eval_runtime": 2.0415, |
|
"eval_samples_per_second": 1518.497, |
|
"eval_steps_per_second": 31.839, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8964516129032258, |
|
"eval_loss": 0.07267240434885025, |
|
"eval_runtime": 2.0914, |
|
"eval_samples_per_second": 1482.244, |
|
"eval_steps_per_second": 31.079, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"grad_norm": 0.5715540051460266, |
|
"learning_rate": 1.3011879804332637e-05, |
|
"loss": 0.1635, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9145161290322581, |
|
"eval_loss": 0.05143408849835396, |
|
"eval_runtime": 2.5486, |
|
"eval_samples_per_second": 1216.367, |
|
"eval_steps_per_second": 25.504, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"grad_norm": 0.5522882342338562, |
|
"learning_rate": 9.517819706498952e-06, |
|
"loss": 0.0817, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9235483870967742, |
|
"eval_loss": 0.04265019670128822, |
|
"eval_runtime": 2.1204, |
|
"eval_samples_per_second": 1461.984, |
|
"eval_steps_per_second": 30.655, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9267741935483871, |
|
"eval_loss": 0.03797636553645134, |
|
"eval_runtime": 2.0762, |
|
"eval_samples_per_second": 1493.112, |
|
"eval_steps_per_second": 31.307, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"grad_norm": 0.3155914843082428, |
|
"learning_rate": 6.02375960866527e-06, |
|
"loss": 0.0617, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9345161290322581, |
|
"eval_loss": 0.03546394035220146, |
|
"eval_runtime": 2.0483, |
|
"eval_samples_per_second": 1513.483, |
|
"eval_steps_per_second": 31.734, |
|
"step": 2226 |
|
}, |
|
{ |
|
"epoch": 7.86, |
|
"grad_norm": 0.31727784872055054, |
|
"learning_rate": 2.5296995108315863e-06, |
|
"loss": 0.054, |
|
"step": 2500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2862, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9, |
|
"save_steps": 500, |
|
"total_flos": 651155886807636.0, |
|
"train_batch_size": 48, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.980055715011544, |
|
"num_train_epochs": 9, |
|
"temperature": 2 |
|
} |
|
} |
|
|