|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 7.0, |
|
"eval_steps": 500, |
|
"global_step": 21056, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 7.977410793304443, |
|
"learning_rate": 1.9867686170212766e-05, |
|
"loss": 8.4229, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 3.635096788406372, |
|
"eval_runtime": 12.1685, |
|
"eval_samples_per_second": 416.402, |
|
"eval_steps_per_second": 13.066, |
|
"step": 3008 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"grad_norm": 12.781421661376953, |
|
"learning_rate": 1.9734707446808513e-05, |
|
"loss": 4.4356, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"grad_norm": 13.996766090393066, |
|
"learning_rate": 1.9601728723404256e-05, |
|
"loss": 2.1407, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.1635645627975464, |
|
"eval_runtime": 12.1736, |
|
"eval_samples_per_second": 416.227, |
|
"eval_steps_per_second": 13.061, |
|
"step": 6016 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"grad_norm": 9.880745887756348, |
|
"learning_rate": 1.9468750000000002e-05, |
|
"loss": 1.2343, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.6885451674461365, |
|
"eval_runtime": 12.1613, |
|
"eval_samples_per_second": 416.65, |
|
"eval_steps_per_second": 13.074, |
|
"step": 9024 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"grad_norm": 9.76446533203125, |
|
"learning_rate": 1.9335837765957447e-05, |
|
"loss": 0.9126, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"grad_norm": 10.145390510559082, |
|
"learning_rate": 1.9202859042553194e-05, |
|
"loss": 0.7534, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.5545051097869873, |
|
"eval_runtime": 12.1431, |
|
"eval_samples_per_second": 417.274, |
|
"eval_steps_per_second": 13.094, |
|
"step": 12032 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"grad_norm": 9.280902862548828, |
|
"learning_rate": 1.9069880319148937e-05, |
|
"loss": 0.6302, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.48846569657325745, |
|
"eval_runtime": 12.1559, |
|
"eval_samples_per_second": 416.836, |
|
"eval_steps_per_second": 13.08, |
|
"step": 15040 |
|
}, |
|
{ |
|
"epoch": 5.32, |
|
"grad_norm": 8.525071144104004, |
|
"learning_rate": 1.8936968085106385e-05, |
|
"loss": 0.5579, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"grad_norm": 8.482599258422852, |
|
"learning_rate": 1.880405585106383e-05, |
|
"loss": 0.4992, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.4417724311351776, |
|
"eval_runtime": 12.1719, |
|
"eval_samples_per_second": 416.286, |
|
"eval_steps_per_second": 13.063, |
|
"step": 18048 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"grad_norm": 7.430637359619141, |
|
"learning_rate": 1.8671077127659576e-05, |
|
"loss": 0.4421, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 0.40214258432388306, |
|
"eval_runtime": 12.1628, |
|
"eval_samples_per_second": 416.599, |
|
"eval_steps_per_second": 13.073, |
|
"step": 21056 |
|
} |
|
], |
|
"logging_steps": 2000, |
|
"max_steps": 300800, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 500, |
|
"total_flos": 5.002004203009344e+16, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|