|
{ |
|
"best_metric": 0.8779419256240448, |
|
"best_model_checkpoint": "./roberta_base/fine_tuned_models/checkpoint-36816", |
|
"epoch": 3.0, |
|
"global_step": 36816, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.4814993662099355e-05, |
|
"loss": 0.4891, |
|
"step": 12272 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8625573102394295, |
|
"eval_loss": 0.3779561519622803, |
|
"eval_runtime": 45.6198, |
|
"eval_samples_per_second": 215.148, |
|
"eval_steps_per_second": 26.896, |
|
"step": 12272 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 7.4074968310496775e-06, |
|
"loss": 0.3181, |
|
"step": 24544 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8757004584819155, |
|
"eval_loss": 0.3620317280292511, |
|
"eval_runtime": 45.5874, |
|
"eval_samples_per_second": 215.301, |
|
"eval_steps_per_second": 26.915, |
|
"step": 24544 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.2271, |
|
"step": 36816 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8779419256240448, |
|
"eval_loss": 0.38248324394226074, |
|
"eval_runtime": 45.8275, |
|
"eval_samples_per_second": 214.172, |
|
"eval_steps_per_second": 26.774, |
|
"step": 36816 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 36816, |
|
"total_flos": 7.74938740264658e+16, |
|
"train_loss": 0.3447862050679392, |
|
"train_runtime": 14914.656, |
|
"train_samples_per_second": 78.99, |
|
"train_steps_per_second": 2.468 |
|
} |
|
], |
|
"max_steps": 36816, |
|
"num_train_epochs": 3, |
|
"total_flos": 7.74938740264658e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|