|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9985401459854014, |
|
"eval_steps": 500, |
|
"global_step": 513, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.19070926308631897, |
|
"learning_rate": 9.615384615384617e-05, |
|
"loss": 1.8609, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.21529725193977356, |
|
"learning_rate": 0.00019230769230769233, |
|
"loss": 1.8112, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.2493579387664795, |
|
"learning_rate": 0.00019002169197396965, |
|
"loss": 1.7445, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.23993328213691711, |
|
"learning_rate": 0.000179175704989154, |
|
"loss": 1.7633, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.23707528412342072, |
|
"learning_rate": 0.0001683297180043384, |
|
"loss": 1.7176, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.205499067902565, |
|
"learning_rate": 0.00015748373101952278, |
|
"loss": 1.7322, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.20716556906700134, |
|
"learning_rate": 0.00014663774403470716, |
|
"loss": 1.7102, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.21949006617069244, |
|
"learning_rate": 0.00013579175704989157, |
|
"loss": 1.7285, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.21479777991771698, |
|
"learning_rate": 0.00012494577006507592, |
|
"loss": 1.6978, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.23418399691581726, |
|
"learning_rate": 0.0001140997830802603, |
|
"loss": 1.7135, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.25077584385871887, |
|
"learning_rate": 0.0001032537960954447, |
|
"loss": 1.7141, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.2330324500799179, |
|
"learning_rate": 9.240780911062907e-05, |
|
"loss": 1.6829, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.22768735885620117, |
|
"learning_rate": 8.156182212581345e-05, |
|
"loss": 1.6824, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.24993765354156494, |
|
"learning_rate": 7.071583514099784e-05, |
|
"loss": 1.719, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.22910568118095398, |
|
"learning_rate": 5.9869848156182215e-05, |
|
"loss": 1.7019, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.23837457597255707, |
|
"learning_rate": 4.90238611713666e-05, |
|
"loss": 1.7092, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.22363372147083282, |
|
"learning_rate": 3.817787418655098e-05, |
|
"loss": 1.6829, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.22817212343215942, |
|
"learning_rate": 2.7331887201735356e-05, |
|
"loss": 1.6589, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.23979724943637848, |
|
"learning_rate": 1.648590021691974e-05, |
|
"loss": 1.6638, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.23026736080646515, |
|
"learning_rate": 5.639913232104121e-06, |
|
"loss": 1.6881, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 513, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"total_flos": 1.668156353497006e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|