|
{ |
|
"best_metric": 0.9243119266055045, |
|
"best_model_checkpoint": "bert-base-uncased-finetuned-sst2/run-13/checkpoint-844", |
|
"epoch": 4.0, |
|
"eval_steps": 500, |
|
"global_step": 844, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.911697247706422, |
|
"eval_loss": 0.22412490844726562, |
|
"eval_runtime": 2.4274, |
|
"eval_samples_per_second": 359.236, |
|
"eval_steps_per_second": 22.658, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9105504587155964, |
|
"eval_loss": 0.25786927342414856, |
|
"eval_runtime": 2.4527, |
|
"eval_samples_per_second": 355.532, |
|
"eval_steps_per_second": 22.425, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"grad_norm": 30.697444915771484, |
|
"learning_rate": 2.3450215142725074e-05, |
|
"loss": 0.2117, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9197247706422018, |
|
"eval_loss": 0.3487926125526428, |
|
"eval_runtime": 2.4283, |
|
"eval_samples_per_second": 359.093, |
|
"eval_steps_per_second": 22.649, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9243119266055045, |
|
"eval_loss": 0.3896058201789856, |
|
"eval_runtime": 2.4333, |
|
"eval_samples_per_second": 358.356, |
|
"eval_steps_per_second": 22.603, |
|
"step": 844 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 844, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"total_flos": 422743410424080.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": { |
|
"learning_rate": 5.753483017575571e-05, |
|
"num_train_epochs": 4, |
|
"per_device_train_batch_size": 32, |
|
"seed": 7 |
|
} |
|
} |
|
|