|
{ |
|
"best_metric": 1.5293647050857544, |
|
"best_model_checkpoint": "./output/checkpoints/2024-06-10_15-16-32/checkpoint-10", |
|
"epoch": 1.0, |
|
"eval_steps": 1, |
|
"global_step": 19, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05263157894736842, |
|
"grad_norm": 22.303813934326172, |
|
"learning_rate": 0.0002, |
|
"loss": 5.2005, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05263157894736842, |
|
"eval_loss": 5.126399993896484, |
|
"eval_runtime": 8.3738, |
|
"eval_samples_per_second": 7.404, |
|
"eval_steps_per_second": 0.478, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 21.968650817871094, |
|
"learning_rate": 0.0004, |
|
"loss": 5.1548, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"eval_loss": 2.035064935684204, |
|
"eval_runtime": 8.1279, |
|
"eval_samples_per_second": 7.628, |
|
"eval_steps_per_second": 0.492, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.15789473684210525, |
|
"grad_norm": 4.796197891235352, |
|
"learning_rate": 0.00037647058823529414, |
|
"loss": 1.9739, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.15789473684210525, |
|
"eval_loss": 1.565435767173767, |
|
"eval_runtime": 8.1953, |
|
"eval_samples_per_second": 7.565, |
|
"eval_steps_per_second": 0.488, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 2.5580379962921143, |
|
"learning_rate": 0.00035294117647058826, |
|
"loss": 1.5127, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"eval_loss": 1.5319403409957886, |
|
"eval_runtime": 8.2189, |
|
"eval_samples_per_second": 7.544, |
|
"eval_steps_per_second": 0.487, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.2631578947368421, |
|
"grad_norm": 1.5636128187179565, |
|
"learning_rate": 0.0003294117647058824, |
|
"loss": 1.5023, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.2631578947368421, |
|
"eval_loss": 1.4795572757720947, |
|
"eval_runtime": 8.1979, |
|
"eval_samples_per_second": 7.563, |
|
"eval_steps_per_second": 0.488, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 0.6475034952163696, |
|
"learning_rate": 0.00030588235294117644, |
|
"loss": 1.4094, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"eval_loss": 1.4736040830612183, |
|
"eval_runtime": 8.1682, |
|
"eval_samples_per_second": 7.59, |
|
"eval_steps_per_second": 0.49, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.3684210526315789, |
|
"grad_norm": 1.273154616355896, |
|
"learning_rate": 0.0002823529411764706, |
|
"loss": 1.2526, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.3684210526315789, |
|
"eval_loss": 1.801269292831421, |
|
"eval_runtime": 8.1796, |
|
"eval_samples_per_second": 7.58, |
|
"eval_steps_per_second": 0.489, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 1.148282527923584, |
|
"learning_rate": 0.00025882352941176474, |
|
"loss": 1.2184, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"eval_loss": 1.6574877500534058, |
|
"eval_runtime": 8.2558, |
|
"eval_samples_per_second": 7.51, |
|
"eval_steps_per_second": 0.485, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.47368421052631576, |
|
"grad_norm": 0.6692990064620972, |
|
"learning_rate": 0.00023529411764705883, |
|
"loss": 1.1489, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.47368421052631576, |
|
"eval_loss": 1.5531737804412842, |
|
"eval_runtime": 8.2574, |
|
"eval_samples_per_second": 7.508, |
|
"eval_steps_per_second": 0.484, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 0.5520226955413818, |
|
"learning_rate": 0.00021176470588235295, |
|
"loss": 1.1021, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"eval_loss": 1.5293647050857544, |
|
"eval_runtime": 8.3253, |
|
"eval_samples_per_second": 7.447, |
|
"eval_steps_per_second": 0.48, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5789473684210527, |
|
"grad_norm": 0.3798828423023224, |
|
"learning_rate": 0.00018823529411764707, |
|
"loss": 1.0751, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.5789473684210527, |
|
"eval_loss": 1.565451979637146, |
|
"eval_runtime": 8.1863, |
|
"eval_samples_per_second": 7.574, |
|
"eval_steps_per_second": 0.489, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"grad_norm": 0.2520289719104767, |
|
"learning_rate": 0.0001647058823529412, |
|
"loss": 1.0487, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"eval_loss": 1.6195383071899414, |
|
"eval_runtime": 8.2116, |
|
"eval_samples_per_second": 7.55, |
|
"eval_steps_per_second": 0.487, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.6842105263157895, |
|
"grad_norm": 0.26942452788352966, |
|
"learning_rate": 0.0001411764705882353, |
|
"loss": 1.0435, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.6842105263157895, |
|
"eval_loss": 1.635750412940979, |
|
"eval_runtime": 8.2868, |
|
"eval_samples_per_second": 7.482, |
|
"eval_steps_per_second": 0.483, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.7368421052631579, |
|
"grad_norm": 0.23508282005786896, |
|
"learning_rate": 0.00011764705882352942, |
|
"loss": 1.0481, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.7368421052631579, |
|
"eval_loss": 1.625639796257019, |
|
"eval_runtime": 8.2755, |
|
"eval_samples_per_second": 7.492, |
|
"eval_steps_per_second": 0.483, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.7894736842105263, |
|
"grad_norm": 0.21778114140033722, |
|
"learning_rate": 9.411764705882353e-05, |
|
"loss": 1.0049, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.7894736842105263, |
|
"eval_loss": 1.6122336387634277, |
|
"eval_runtime": 8.2527, |
|
"eval_samples_per_second": 7.513, |
|
"eval_steps_per_second": 0.485, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 0.27773982286453247, |
|
"learning_rate": 7.058823529411765e-05, |
|
"loss": 1.0006, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"eval_loss": 1.5968810319900513, |
|
"eval_runtime": 8.3129, |
|
"eval_samples_per_second": 7.458, |
|
"eval_steps_per_second": 0.481, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.8947368421052632, |
|
"grad_norm": 0.2526203393936157, |
|
"learning_rate": 4.705882352941177e-05, |
|
"loss": 0.9965, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.8947368421052632, |
|
"eval_loss": 1.5880597829818726, |
|
"eval_runtime": 8.2366, |
|
"eval_samples_per_second": 7.527, |
|
"eval_steps_per_second": 0.486, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.9473684210526315, |
|
"grad_norm": 0.20632648468017578, |
|
"learning_rate": 2.3529411764705884e-05, |
|
"loss": 0.9902, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.9473684210526315, |
|
"eval_loss": 1.5857887268066406, |
|
"eval_runtime": 8.2438, |
|
"eval_samples_per_second": 7.521, |
|
"eval_steps_per_second": 0.485, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.2954784333705902, |
|
"learning_rate": 0.0, |
|
"loss": 0.9972, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.5857279300689697, |
|
"eval_runtime": 8.3407, |
|
"eval_samples_per_second": 7.433, |
|
"eval_steps_per_second": 0.48, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 19, |
|
"total_flos": 1.3664381904617472e+16, |
|
"train_loss": 1.614759667923576, |
|
"train_runtime": 422.1655, |
|
"train_samples_per_second": 1.393, |
|
"train_steps_per_second": 0.045 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 19, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.3664381904617472e+16, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|