BERT-WMM / run-18 /checkpoint-801 /trainer_state.json
HikasaHana's picture
Training in progress, epoch 1
5819a45 verified
raw
history blame
1.44 kB
{
"best_metric": 0.5817901492118835,
"best_model_checkpoint": "BERT-WMM/run-18/checkpoint-534",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 801,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_accuracy": 0.7563380281690141,
"eval_loss": 0.6019130349159241,
"eval_runtime": 2.4196,
"eval_samples_per_second": 880.318,
"eval_steps_per_second": 55.381,
"step": 267
},
{
"epoch": 1.87,
"grad_norm": 9.674468994140625,
"learning_rate": 5.7987153604787005e-06,
"loss": 0.6061,
"step": 500
},
{
"epoch": 2.0,
"eval_accuracy": 0.7619718309859155,
"eval_loss": 0.5817901492118835,
"eval_runtime": 2.455,
"eval_samples_per_second": 867.605,
"eval_steps_per_second": 54.582,
"step": 534
},
{
"epoch": 3.0,
"eval_accuracy": 0.7582159624413145,
"eval_loss": 0.6465383768081665,
"eval_runtime": 2.4569,
"eval_samples_per_second": 866.932,
"eval_steps_per_second": 54.539,
"step": 801
}
],
"logging_steps": 500,
"max_steps": 801,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 349221394826640.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": {
"per_device_train_batch_size": 32
}
}