|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 90, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1111111111111111, |
|
"grad_norm": 25.66914681799565, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.0048, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.5555555555555556, |
|
"grad_norm": 12.156020748334926, |
|
"learning_rate": 9.986966157589751e-06, |
|
"loss": 0.9831, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.652240514755249, |
|
"eval_runtime": 24.1162, |
|
"eval_samples_per_second": 10.74, |
|
"eval_steps_per_second": 0.373, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"grad_norm": 3.8285863774272446, |
|
"learning_rate": 9.841114703012817e-06, |
|
"loss": 0.7569, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 2.607435400324957, |
|
"learning_rate": 9.537877098354787e-06, |
|
"loss": 0.601, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.4056125581264496, |
|
"eval_runtime": 24.0279, |
|
"eval_samples_per_second": 10.779, |
|
"eval_steps_per_second": 0.375, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"grad_norm": 3.788253489544647, |
|
"learning_rate": 9.08711169279446e-06, |
|
"loss": 0.4813, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.7777777777777777, |
|
"grad_norm": 2.392342802419934, |
|
"learning_rate": 8.503473010366713e-06, |
|
"loss": 0.3464, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.18872837722301483, |
|
"eval_runtime": 24.0055, |
|
"eval_samples_per_second": 10.789, |
|
"eval_steps_per_second": 0.375, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"grad_norm": 5.610647726544286, |
|
"learning_rate": 7.805935326811913e-06, |
|
"loss": 0.2162, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.888888888888889, |
|
"grad_norm": 2.516026593621024, |
|
"learning_rate": 7.017175809949044e-06, |
|
"loss": 0.1559, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.07645933330059052, |
|
"eval_runtime": 24.0333, |
|
"eval_samples_per_second": 10.777, |
|
"eval_steps_per_second": 0.374, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 4.444444444444445, |
|
"grad_norm": 3.496241200147731, |
|
"learning_rate": 6.162837277871553e-06, |
|
"loss": 0.0797, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 1.562094511651351, |
|
"learning_rate": 5.270694542927089e-06, |
|
"loss": 0.0714, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.047790270298719406, |
|
"eval_runtime": 24.1049, |
|
"eval_samples_per_second": 10.745, |
|
"eval_steps_per_second": 0.373, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 5.555555555555555, |
|
"grad_norm": 2.038884810074137, |
|
"learning_rate": 4.369751443898554e-06, |
|
"loss": 0.0477, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.035123735666275024, |
|
"eval_runtime": 24.0485, |
|
"eval_samples_per_second": 10.77, |
|
"eval_steps_per_second": 0.374, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 6.111111111111111, |
|
"grad_norm": 1.2084030161409793, |
|
"learning_rate": 3.489297922152136e-06, |
|
"loss": 0.0452, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 6.666666666666667, |
|
"grad_norm": 1.5950605501326323, |
|
"learning_rate": 2.65795779650105e-06, |
|
"loss": 0.0345, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 0.02559623494744301, |
|
"eval_runtime": 24.0003, |
|
"eval_samples_per_second": 10.792, |
|
"eval_steps_per_second": 0.375, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 7.222222222222222, |
|
"grad_norm": 0.7529091490914854, |
|
"learning_rate": 1.9027581939213852e-06, |
|
"loss": 0.0295, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 7.777777777777778, |
|
"grad_norm": 1.569990631190196, |
|
"learning_rate": 1.2482508892179884e-06, |
|
"loss": 0.0252, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 0.019161375239491463, |
|
"eval_runtime": 24.061, |
|
"eval_samples_per_second": 10.764, |
|
"eval_steps_per_second": 0.374, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 8.333333333333334, |
|
"grad_norm": 0.674086772310731, |
|
"learning_rate": 7.157141191620548e-07, |
|
"loss": 0.0198, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 8.88888888888889, |
|
"grad_norm": 0.5564084869849035, |
|
"learning_rate": 3.224608203719953e-07, |
|
"loss": 0.0181, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 0.015782972797751427, |
|
"eval_runtime": 23.9574, |
|
"eval_samples_per_second": 10.811, |
|
"eval_steps_per_second": 0.376, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 9.444444444444445, |
|
"grad_norm": 0.402959494753, |
|
"learning_rate": 8.127578033998663e-08, |
|
"loss": 0.0156, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.3564826816641896, |
|
"learning_rate": 0.0, |
|
"loss": 0.0153, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 0.015113583765923977, |
|
"eval_runtime": 23.9569, |
|
"eval_samples_per_second": 10.811, |
|
"eval_steps_per_second": 0.376, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 90, |
|
"total_flos": 18844169011200.0, |
|
"train_loss": 0.21927920257051786, |
|
"train_runtime": 1273.3046, |
|
"train_samples_per_second": 2.034, |
|
"train_steps_per_second": 0.071 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 90, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 18844169011200.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|