|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.10105263157894737, |
|
"eval_steps": 500, |
|
"global_step": 60, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008421052631578947, |
|
"grad_norm": 1.9215331077575684, |
|
"learning_rate": 4.914814565722671e-05, |
|
"loss": 1.8162, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.016842105263157894, |
|
"grad_norm": 1.746415376663208, |
|
"learning_rate": 4.665063509461097e-05, |
|
"loss": 1.7065, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02526315789473684, |
|
"grad_norm": 1.2568161487579346, |
|
"learning_rate": 4.267766952966369e-05, |
|
"loss": 1.6809, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03368421052631579, |
|
"grad_norm": 1.087218165397644, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 1.7278, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.042105263157894736, |
|
"grad_norm": 0.935480535030365, |
|
"learning_rate": 3.147047612756302e-05, |
|
"loss": 1.5573, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05052631578947368, |
|
"grad_norm": 1.06438148021698, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.5858, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05894736842105263, |
|
"grad_norm": 1.0331830978393555, |
|
"learning_rate": 1.852952387243698e-05, |
|
"loss": 1.4219, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06736842105263158, |
|
"grad_norm": 0.7302573919296265, |
|
"learning_rate": 1.2500000000000006e-05, |
|
"loss": 1.6545, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07578947368421053, |
|
"grad_norm": 0.8324851989746094, |
|
"learning_rate": 7.3223304703363135e-06, |
|
"loss": 1.5896, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08421052631578947, |
|
"grad_norm": 1.0173665285110474, |
|
"learning_rate": 3.3493649053890326e-06, |
|
"loss": 1.4727, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09263157894736843, |
|
"grad_norm": 0.763109564781189, |
|
"learning_rate": 8.51854342773295e-07, |
|
"loss": 1.6785, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.10105263157894737, |
|
"grad_norm": 0.8254455327987671, |
|
"learning_rate": 0.0, |
|
"loss": 1.6034, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.10105263157894737, |
|
"step": 60, |
|
"total_flos": 1.783322464223232e+16, |
|
"train_loss": 1.624589498837789, |
|
"train_runtime": 968.353, |
|
"train_samples_per_second": 0.981, |
|
"train_steps_per_second": 0.062 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 60, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 1.783322464223232e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|