storm-cloud-detector / trainer_state.json
Wisp-y's picture
Upload folder using huggingface_hub
87984f4 verified
raw
history blame
1.4 kB
{
"best_metric": 0.2846003472805023,
"best_model_checkpoint": "./vit-base-beans/checkpoint-40",
"epoch": 2.857142857142857,
"eval_steps": 40,
"global_step": 40,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.71,
"grad_norm": 0.9989465475082397,
"learning_rate": 0.00016428571428571428,
"loss": 0.5177,
"step": 10
},
{
"epoch": 1.43,
"grad_norm": 1.447810173034668,
"learning_rate": 0.00012857142857142858,
"loss": 0.2339,
"step": 20
},
{
"epoch": 2.14,
"grad_norm": 2.477407693862915,
"learning_rate": 9.285714285714286e-05,
"loss": 0.2139,
"step": 30
},
{
"epoch": 2.86,
"grad_norm": 0.2586057484149933,
"learning_rate": 5.714285714285714e-05,
"loss": 0.1312,
"step": 40
},
{
"epoch": 2.86,
"eval_accuracy": 0.8852459016393442,
"eval_loss": 0.2846003472805023,
"eval_runtime": 38.8967,
"eval_samples_per_second": 1.568,
"eval_steps_per_second": 0.206,
"step": 40
}
],
"logging_steps": 10,
"max_steps": 56,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 40,
"total_flos": 4.758008162335949e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}