|
{ |
|
"best_metric": 0.002532488200813532, |
|
"best_model_checkpoint": "wsd_embedding/checkpoint-280896", |
|
"epoch": 11.0, |
|
"eval_steps": 500, |
|
"global_step": 280896, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.013213178142905235, |
|
"learning_rate": 9.800046992481203e-05, |
|
"loss": 0.0078, |
|
"step": 25536 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.004399613011628389, |
|
"eval_runtime": 113.9442, |
|
"eval_samples_per_second": 94.362, |
|
"eval_steps_per_second": 11.795, |
|
"step": 25536 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.008942891843616962, |
|
"learning_rate": 9.600117481203008e-05, |
|
"loss": 0.0056, |
|
"step": 51072 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.0036812573671340942, |
|
"eval_runtime": 113.8113, |
|
"eval_samples_per_second": 94.472, |
|
"eval_steps_per_second": 11.809, |
|
"step": 51072 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.019620591774582863, |
|
"learning_rate": 9.400180137844612e-05, |
|
"loss": 0.0049, |
|
"step": 76608 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.003332871478050947, |
|
"eval_runtime": 113.7743, |
|
"eval_samples_per_second": 94.503, |
|
"eval_steps_per_second": 11.813, |
|
"step": 76608 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.009495776146650314, |
|
"learning_rate": 9.200250626566417e-05, |
|
"loss": 0.0045, |
|
"step": 102144 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.0030836393125355244, |
|
"eval_runtime": 113.745, |
|
"eval_samples_per_second": 94.527, |
|
"eval_steps_per_second": 11.816, |
|
"step": 102144 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.007638801820576191, |
|
"learning_rate": 9.00031328320802e-05, |
|
"loss": 0.0043, |
|
"step": 127680 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.002948065521195531, |
|
"eval_runtime": 113.6579, |
|
"eval_samples_per_second": 94.6, |
|
"eval_steps_per_second": 11.825, |
|
"step": 127680 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.010788742452859879, |
|
"learning_rate": 8.800407268170426e-05, |
|
"loss": 0.0041, |
|
"step": 153216 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.0028368972707539797, |
|
"eval_runtime": 114.0546, |
|
"eval_samples_per_second": 94.271, |
|
"eval_steps_per_second": 11.784, |
|
"step": 153216 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 0.017284151166677475, |
|
"learning_rate": 8.600477756892231e-05, |
|
"loss": 0.0039, |
|
"step": 178752 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 0.0027765275444835424, |
|
"eval_runtime": 113.8381, |
|
"eval_samples_per_second": 94.45, |
|
"eval_steps_per_second": 11.806, |
|
"step": 178752 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 0.008209532126784325, |
|
"learning_rate": 8.400540413533835e-05, |
|
"loss": 0.0038, |
|
"step": 204288 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 0.002683098893612623, |
|
"eval_runtime": 113.811, |
|
"eval_samples_per_second": 94.472, |
|
"eval_steps_per_second": 11.809, |
|
"step": 204288 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 0.01161679532378912, |
|
"learning_rate": 8.20062656641604e-05, |
|
"loss": 0.0037, |
|
"step": 229824 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 0.002616806188598275, |
|
"eval_runtime": 113.6336, |
|
"eval_samples_per_second": 94.62, |
|
"eval_steps_per_second": 11.827, |
|
"step": 229824 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.005629376508295536, |
|
"learning_rate": 8.000689223057644e-05, |
|
"loss": 0.0036, |
|
"step": 255360 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 0.0025433956179767847, |
|
"eval_runtime": 113.7133, |
|
"eval_samples_per_second": 94.554, |
|
"eval_steps_per_second": 11.819, |
|
"step": 255360 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"grad_norm": 0.009585434570908546, |
|
"learning_rate": 7.800767543859649e-05, |
|
"loss": 0.0035, |
|
"step": 280896 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 0.002532488200813532, |
|
"eval_runtime": 113.8464, |
|
"eval_samples_per_second": 94.443, |
|
"eval_steps_per_second": 11.805, |
|
"step": 280896 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 1276800, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.7262662954123264e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|