|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.2705993776214315, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.8518972396850586, |
|
"learning_rate": 4.933026654038696e-05, |
|
"loss": 0.5532, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.7409780621528625, |
|
"learning_rate": 4.865376809633338e-05, |
|
"loss": 0.5033, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.7067281007766724, |
|
"learning_rate": 4.79772696522798e-05, |
|
"loss": 0.4602, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.7321436405181885, |
|
"learning_rate": 4.7300771208226225e-05, |
|
"loss": 0.4523, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.6380289793014526, |
|
"learning_rate": 4.6624272764172644e-05, |
|
"loss": 0.4606, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.5421116352081299, |
|
"learning_rate": 4.594777432011906e-05, |
|
"loss": 0.4262, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.5193295478820801, |
|
"learning_rate": 4.527127587606549e-05, |
|
"loss": 0.4105, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.5962494611740112, |
|
"learning_rate": 4.459477743201191e-05, |
|
"loss": 0.4174, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.6797559261322021, |
|
"learning_rate": 4.391827898795833e-05, |
|
"loss": 0.4346, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.5834950804710388, |
|
"learning_rate": 4.324178054390475e-05, |
|
"loss": 0.4233, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.5665029287338257, |
|
"learning_rate": 4.256528209985117e-05, |
|
"loss": 0.4119, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.5659196972846985, |
|
"learning_rate": 4.188878365579759e-05, |
|
"loss": 0.4242, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.5055731534957886, |
|
"learning_rate": 4.121228521174402e-05, |
|
"loss": 0.4075, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.5024439692497253, |
|
"learning_rate": 4.053578676769044e-05, |
|
"loss": 0.3856, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.5468323826789856, |
|
"learning_rate": 3.9859288323636856e-05, |
|
"loss": 0.3942, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.579856812953949, |
|
"learning_rate": 3.918278987958328e-05, |
|
"loss": 0.3914, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.6119765043258667, |
|
"learning_rate": 3.85062914355297e-05, |
|
"loss": 0.4195, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.4361695647239685, |
|
"learning_rate": 3.782979299147612e-05, |
|
"loss": 0.3926, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.4878581464290619, |
|
"learning_rate": 3.715329454742254e-05, |
|
"loss": 0.3738, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.6861026883125305, |
|
"learning_rate": 3.6476796103368965e-05, |
|
"loss": 0.4021, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 7391, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 1000, |
|
"total_flos": 5.62657090338816e+17, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|