gpt2_f_experiment_0_drug_data / trainer_state.json
mllm-dev's picture
Upload folder using huggingface_hub
693551c verified
raw
history blame contribute delete
No virus
1.56 kB
{
"best_metric": 1.4213874340057373,
"best_model_checkpoint": "tam_test_out_drug_data/checkpoint-1011",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1011,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_accuracy": 0.46219659629870735,
"eval_loss": 1.4630001783370972,
"eval_runtime": 15.2997,
"eval_samples_per_second": 702.823,
"eval_steps_per_second": 7.386,
"step": 337
},
{
"epoch": 1.48,
"grad_norm": 283691.34375,
"learning_rate": 5.554896142433234e-05,
"loss": 1.599,
"step": 500
},
{
"epoch": 2.0,
"eval_accuracy": 0.4721473077280759,
"eval_loss": 1.443649411201477,
"eval_runtime": 15.1007,
"eval_samples_per_second": 712.086,
"eval_steps_per_second": 7.483,
"step": 674
},
{
"epoch": 2.97,
"grad_norm": 246200.171875,
"learning_rate": 5.109792284866469e-05,
"loss": 1.3511,
"step": 1000
},
{
"epoch": 3.0,
"eval_accuracy": 0.471310332000372,
"eval_loss": 1.4213874340057373,
"eval_runtime": 14.9491,
"eval_samples_per_second": 719.309,
"eval_steps_per_second": 7.559,
"step": 1011
}
],
"logging_steps": 500,
"max_steps": 6740,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"total_flos": 1.32737670494208e+16,
"train_batch_size": 96,
"trial_name": null,
"trial_params": null
}