dima806's picture
Upload folder using huggingface_hub
4ad9bb3
{
"best_metric": 0.05447809770703316,
"best_model_checkpoint": "ai_vs_real_image_detection/checkpoint-8440",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 8440,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.3,
"learning_rate": 9.463647199046485e-06,
"loss": 0.2818,
"step": 500
},
{
"epoch": 0.59,
"learning_rate": 8.867699642431467e-06,
"loss": 0.1106,
"step": 1000
},
{
"epoch": 0.89,
"learning_rate": 8.27175208581645e-06,
"loss": 0.0871,
"step": 1500
},
{
"epoch": 1.0,
"eval_accuracy": 0.9599166666666666,
"eval_loss": 0.13126564025878906,
"eval_runtime": 108.7151,
"eval_samples_per_second": 110.38,
"eval_steps_per_second": 3.449,
"step": 1688
},
{
"epoch": 1.18,
"learning_rate": 7.675804529201431e-06,
"loss": 0.0641,
"step": 2000
},
{
"epoch": 1.48,
"learning_rate": 7.079856972586413e-06,
"loss": 0.0554,
"step": 2500
},
{
"epoch": 1.78,
"learning_rate": 6.483909415971396e-06,
"loss": 0.0542,
"step": 3000
},
{
"epoch": 2.0,
"eval_accuracy": 0.97675,
"eval_loss": 0.06993511319160461,
"eval_runtime": 110.3278,
"eval_samples_per_second": 108.767,
"eval_steps_per_second": 3.399,
"step": 3376
},
{
"epoch": 2.07,
"learning_rate": 5.887961859356377e-06,
"loss": 0.0448,
"step": 3500
},
{
"epoch": 2.37,
"learning_rate": 5.2920143027413596e-06,
"loss": 0.0372,
"step": 4000
},
{
"epoch": 2.67,
"learning_rate": 4.696066746126342e-06,
"loss": 0.0344,
"step": 4500
},
{
"epoch": 2.96,
"learning_rate": 4.100119189511323e-06,
"loss": 0.031,
"step": 5000
},
{
"epoch": 3.0,
"eval_accuracy": 0.9773333333333334,
"eval_loss": 0.06398669630289078,
"eval_runtime": 110.6996,
"eval_samples_per_second": 108.401,
"eval_steps_per_second": 3.388,
"step": 5064
},
{
"epoch": 3.26,
"learning_rate": 3.5041716328963056e-06,
"loss": 0.0252,
"step": 5500
},
{
"epoch": 3.55,
"learning_rate": 2.9082240762812874e-06,
"loss": 0.0226,
"step": 6000
},
{
"epoch": 3.85,
"learning_rate": 2.3122765196662693e-06,
"loss": 0.024,
"step": 6500
},
{
"epoch": 4.0,
"eval_accuracy": 0.9770833333333333,
"eval_loss": 0.06279084086418152,
"eval_runtime": 110.8304,
"eval_samples_per_second": 108.274,
"eval_steps_per_second": 3.384,
"step": 6752
},
{
"epoch": 4.15,
"learning_rate": 1.7163289630512518e-06,
"loss": 0.019,
"step": 7000
},
{
"epoch": 4.44,
"learning_rate": 1.1203814064362336e-06,
"loss": 0.0171,
"step": 7500
},
{
"epoch": 4.74,
"learning_rate": 5.244338498212158e-07,
"loss": 0.0158,
"step": 8000
},
{
"epoch": 5.0,
"eval_accuracy": 0.9804166666666667,
"eval_loss": 0.05447809770703316,
"eval_runtime": 108.5958,
"eval_samples_per_second": 110.502,
"eval_steps_per_second": 3.453,
"step": 8440
}
],
"logging_steps": 500,
"max_steps": 8440,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 4.184567439187968e+19,
"trial_name": null,
"trial_params": null
}