t-frex-xlnet-base-cased / trainer_state.json
quim-motger's picture
Upload 10 files
f9dd32d verified
{
"best_metric": 0.9816007359705612,
"best_model_checkpoint": "data/train-test/xlnet-base-cased//model/checkpoint-874",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 874,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_accuracy": null,
"eval_f1": 0.9355716878402904,
"eval_loss": 0.025213107466697693,
"eval_precision": 0.9230080572963295,
"eval_recall": 0.9484820607175714,
"eval_runtime": 1.8189,
"eval_samples_per_second": 533.284,
"eval_steps_per_second": 17.043,
"step": 437
},
{
"epoch": 1.14,
"grad_norm": 1.2998504638671875,
"learning_rate": 8.558352402745997e-06,
"loss": 0.0831,
"step": 500
},
{
"epoch": 2.0,
"eval_accuracy": null,
"eval_f1": 0.9775538250114522,
"eval_loss": 0.015987424179911613,
"eval_precision": 0.9735401459854015,
"eval_recall": 0.9816007359705612,
"eval_runtime": 1.8215,
"eval_samples_per_second": 532.541,
"eval_steps_per_second": 17.019,
"step": 874
}
],
"logging_steps": 500,
"max_steps": 874,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 841186234475568.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}