|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"global_step": 93, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001298236559139785, |
|
"loss": 5.2886, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00012244731182795699, |
|
"loss": 4.8484, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00011507096774193549, |
|
"loss": 4.592, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00010769462365591397, |
|
"loss": 4.3362, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00010031827956989248, |
|
"loss": 4.3381, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 9.294193548387097e-05, |
|
"loss": 4.229, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 8.556559139784946e-05, |
|
"loss": 4.2006, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 7.818924731182796e-05, |
|
"loss": 3.8651, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 7.081290322580646e-05, |
|
"loss": 4.0792, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 6.343655913978495e-05, |
|
"loss": 3.8664, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 5.606021505376345e-05, |
|
"loss": 3.7922, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.868387096774194e-05, |
|
"loss": 3.727, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.130752688172043e-05, |
|
"loss": 3.565, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.3931182795698924e-05, |
|
"loss": 3.7849, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 2.655483870967742e-05, |
|
"loss": 3.7644, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.9178494623655912e-05, |
|
"loss": 3.6057, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.1802150537634409e-05, |
|
"loss": 3.648, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.4258064516129036e-06, |
|
"loss": 3.6404, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 93, |
|
"total_flos": 48600317952000.0, |
|
"train_loss": 4.058929084449686, |
|
"train_runtime": 28.0555, |
|
"train_samples_per_second": 3.315, |
|
"train_steps_per_second": 3.315 |
|
} |
|
], |
|
"max_steps": 93, |
|
"num_train_epochs": 1, |
|
"total_flos": 48600317952000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|