|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 32.0, |
|
"eval_steps": 500, |
|
"global_step": 9184, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.000484375, |
|
"loss": 2.8375, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00046875, |
|
"loss": 2.4263, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.000453125, |
|
"loss": 2.2043, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 0.0004375, |
|
"loss": 2.0835, |
|
"step": 1148 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.000421875, |
|
"loss": 2.0225, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 0.00040625000000000004, |
|
"loss": 1.9901, |
|
"step": 1722 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 0.000390625, |
|
"loss": 1.9992, |
|
"step": 2009 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 0.000375, |
|
"loss": 1.9665, |
|
"step": 2296 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 0.000359375, |
|
"loss": 1.943, |
|
"step": 2583 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 0.00034375, |
|
"loss": 1.9327, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 0.000328125, |
|
"loss": 1.9184, |
|
"step": 3157 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 0.0003125, |
|
"loss": 1.9191, |
|
"step": 3444 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 0.000296875, |
|
"loss": 1.9074, |
|
"step": 3731 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 0.00028125000000000003, |
|
"loss": 1.9066, |
|
"step": 4018 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 0.000265625, |
|
"loss": 1.9053, |
|
"step": 4305 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 0.00025, |
|
"loss": 1.8906, |
|
"step": 4592 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 0.000234375, |
|
"loss": 1.8876, |
|
"step": 4879 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 0.00021875, |
|
"loss": 1.8837, |
|
"step": 5166 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 0.00020312500000000002, |
|
"loss": 1.8766, |
|
"step": 5453 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 0.0001875, |
|
"loss": 1.8701, |
|
"step": 5740 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 0.000171875, |
|
"loss": 1.8698, |
|
"step": 6027 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 0.00015625, |
|
"loss": 1.8713, |
|
"step": 6314 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 0.00014062500000000002, |
|
"loss": 1.8756, |
|
"step": 6601 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 0.000125, |
|
"loss": 1.8628, |
|
"step": 6888 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 0.000109375, |
|
"loss": 1.8646, |
|
"step": 7175 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 9.375e-05, |
|
"loss": 1.8658, |
|
"step": 7462 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 7.8125e-05, |
|
"loss": 1.8627, |
|
"step": 7749 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 6.25e-05, |
|
"loss": 1.8646, |
|
"step": 8036 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 4.6875e-05, |
|
"loss": 1.8585, |
|
"step": 8323 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 3.125e-05, |
|
"loss": 1.8598, |
|
"step": 8610 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 1.5625e-05, |
|
"loss": 1.8601, |
|
"step": 8897 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 0.0, |
|
"loss": 1.8605, |
|
"step": 9184 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 9184, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 32, |
|
"save_steps": 500, |
|
"total_flos": 0.0, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|