|
{ |
|
"best_metric": 0.8916819012797075, |
|
"best_model_checkpoint": "distilbert-hate_speech18\\run-1\\checkpoint-958", |
|
"epoch": 7.0, |
|
"eval_steps": 500, |
|
"global_step": 3353, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 6.1414387289205385e-06, |
|
"loss": 0.2985, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8784277879341865, |
|
"eval_loss": 0.29401153326034546, |
|
"eval_runtime": 9.4448, |
|
"eval_samples_per_second": 231.661, |
|
"eval_steps_per_second": 7.306, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 5.264090339074747e-06, |
|
"loss": 0.2918, |
|
"step": 958 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8916819012797075, |
|
"eval_loss": 0.29342585802078247, |
|
"eval_runtime": 9.4536, |
|
"eval_samples_per_second": 231.446, |
|
"eval_steps_per_second": 7.299, |
|
"step": 958 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.386741949228956e-06, |
|
"loss": 0.2879, |
|
"step": 1437 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8875685557586838, |
|
"eval_loss": 0.2938942015171051, |
|
"eval_runtime": 9.4764, |
|
"eval_samples_per_second": 230.889, |
|
"eval_steps_per_second": 7.281, |
|
"step": 1437 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.509393559383165e-06, |
|
"loss": 0.2853, |
|
"step": 1916 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8848263254113345, |
|
"eval_loss": 0.293885737657547, |
|
"eval_runtime": 9.4687, |
|
"eval_samples_per_second": 231.076, |
|
"eval_steps_per_second": 7.287, |
|
"step": 1916 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 2.6320451695373736e-06, |
|
"loss": 0.2834, |
|
"step": 2395 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.8843692870201096, |
|
"eval_loss": 0.2947681248188019, |
|
"eval_runtime": 9.4769, |
|
"eval_samples_per_second": 230.877, |
|
"eval_steps_per_second": 7.281, |
|
"step": 2395 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 1.7546967796915825e-06, |
|
"loss": 0.2825, |
|
"step": 2874 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.8916819012797075, |
|
"eval_loss": 0.2950766682624817, |
|
"eval_runtime": 9.4662, |
|
"eval_samples_per_second": 231.137, |
|
"eval_steps_per_second": 7.289, |
|
"step": 2874 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 8.773483898457912e-07, |
|
"loss": 0.2816, |
|
"step": 3353 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.8875685557586838, |
|
"eval_loss": 0.29531416296958923, |
|
"eval_runtime": 9.4698, |
|
"eval_samples_per_second": 231.049, |
|
"eval_steps_per_second": 7.286, |
|
"step": 3353 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 3832, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 8, |
|
"save_steps": 500, |
|
"total_flos": 901588712790528.0, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.24491283832577226, |
|
"learning_rate": 7.01878711876633e-06, |
|
"num_train_epochs": 8, |
|
"per_device_eval_batch_size": 32, |
|
"per_device_train_batch_size": 16, |
|
"temperature": 13 |
|
} |
|
} |
|
|