|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 7482, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.665864742047581e-05, |
|
"loss": 1.7562, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.331729484095162e-05, |
|
"loss": 1.526, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 3.997594226142743e-05, |
|
"loss": 1.4549, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 3.663458968190324e-05, |
|
"loss": 1.434, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 3.3293237102379046e-05, |
|
"loss": 1.3931, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 2.995188452285485e-05, |
|
"loss": 1.3762, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 2.6610531943330663e-05, |
|
"loss": 1.3459, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 2.326917936380647e-05, |
|
"loss": 1.314, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.9927826784282277e-05, |
|
"loss": 1.3203, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.658647420475809e-05, |
|
"loss": 1.3109, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.3245121625233894e-05, |
|
"loss": 1.3052, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 9.903769045709705e-06, |
|
"loss": 1.2921, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 6.562416466185513e-06, |
|
"loss": 1.2959, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 3.2210638866613206e-06, |
|
"loss": 1.2986, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 7482, |
|
"total_flos": 2.662634158119936e+16, |
|
"train_loss": 1.3815043066249624, |
|
"train_runtime": 3193.6495, |
|
"train_samples_per_second": 4.686, |
|
"train_steps_per_second": 2.343 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 7482, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"total_flos": 2.662634158119936e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|