|
{ |
|
"best_metric": 0.9094036697247706, |
|
"best_model_checkpoint": "bert-base-uncased-finetuned-sst2/run-0/checkpoint-422", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 633, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9071100917431193, |
|
"eval_loss": 0.2743072807788849, |
|
"eval_runtime": 2.2589, |
|
"eval_samples_per_second": 386.028, |
|
"eval_steps_per_second": 24.348, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9094036697247706, |
|
"eval_loss": 0.2763120234012604, |
|
"eval_runtime": 2.3197, |
|
"eval_samples_per_second": 375.903, |
|
"eval_steps_per_second": 23.709, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"grad_norm": 7.35267448425293, |
|
"learning_rate": 1.4004341381907418e-05, |
|
"loss": 0.2085, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9071100917431193, |
|
"eval_loss": 0.39031100273132324, |
|
"eval_runtime": 2.3726, |
|
"eval_samples_per_second": 367.532, |
|
"eval_steps_per_second": 23.181, |
|
"step": 633 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 633, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 519273306636840.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": { |
|
"learning_rate": 6.665224131389019e-05, |
|
"num_train_epochs": 3, |
|
"per_device_train_batch_size": 32, |
|
"seed": 10 |
|
} |
|
} |
|
|