dima806's picture
Upload folder using huggingface_hub
7385203 verified
raw
history blame
No virus
4.15 kB
{
"best_metric": 1.8800272941589355,
"best_model_checkpoint": "flower_groups_image_detection/checkpoint-7755",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 7755,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.32,
"grad_norm": 5.792048454284668,
"learning_rate": 9.415963659961064e-07,
"loss": 2.0867,
"step": 500
},
{
"epoch": 0.64,
"grad_norm": 3.73576283454895,
"learning_rate": 8.767034393251135e-07,
"loss": 2.0233,
"step": 1000
},
{
"epoch": 0.97,
"grad_norm": 3.3685128688812256,
"learning_rate": 8.118105126541207e-07,
"loss": 1.9642,
"step": 1500
},
{
"epoch": 1.0,
"eval_accuracy": 0.7352745524915336,
"eval_loss": 2.0397422313690186,
"eval_runtime": 331.1652,
"eval_samples_per_second": 99.866,
"eval_steps_per_second": 12.483,
"step": 1551
},
{
"epoch": 1.29,
"grad_norm": 3.8721961975097656,
"learning_rate": 7.469175859831278e-07,
"loss": 1.9523,
"step": 2000
},
{
"epoch": 1.61,
"grad_norm": 5.311251163482666,
"learning_rate": 6.820246593121349e-07,
"loss": 1.939,
"step": 2500
},
{
"epoch": 1.93,
"grad_norm": 3.2061917781829834,
"learning_rate": 6.17131732641142e-07,
"loss": 1.8962,
"step": 3000
},
{
"epoch": 2.0,
"eval_accuracy": 0.7481555394291244,
"eval_loss": 1.9691122770309448,
"eval_runtime": 321.3062,
"eval_samples_per_second": 102.93,
"eval_steps_per_second": 12.866,
"step": 3102
},
{
"epoch": 2.26,
"grad_norm": 6.1755170822143555,
"learning_rate": 5.522388059701492e-07,
"loss": 1.8795,
"step": 3500
},
{
"epoch": 2.58,
"grad_norm": 7.479930877685547,
"learning_rate": 4.873458792991564e-07,
"loss": 1.8772,
"step": 4000
},
{
"epoch": 2.9,
"grad_norm": 3.788714647293091,
"learning_rate": 4.224529526281635e-07,
"loss": 1.8427,
"step": 4500
},
{
"epoch": 3.0,
"eval_accuracy": 0.7578313981615868,
"eval_loss": 1.9191893339157104,
"eval_runtime": 323.2055,
"eval_samples_per_second": 102.325,
"eval_steps_per_second": 12.791,
"step": 4653
},
{
"epoch": 3.22,
"grad_norm": 5.707278251647949,
"learning_rate": 3.5756002595717065e-07,
"loss": 1.8256,
"step": 5000
},
{
"epoch": 3.55,
"grad_norm": 3.444243907928467,
"learning_rate": 2.9266709928617776e-07,
"loss": 1.8318,
"step": 5500
},
{
"epoch": 3.87,
"grad_norm": 3.4218833446502686,
"learning_rate": 2.2777417261518494e-07,
"loss": 1.8064,
"step": 6000
},
{
"epoch": 4.0,
"eval_accuracy": 0.7642416545718432,
"eval_loss": 1.8897231817245483,
"eval_runtime": 325.0959,
"eval_samples_per_second": 101.73,
"eval_steps_per_second": 12.716,
"step": 6204
},
{
"epoch": 4.19,
"grad_norm": 3.0800297260284424,
"learning_rate": 1.6288124594419208e-07,
"loss": 1.8,
"step": 6500
},
{
"epoch": 4.51,
"grad_norm": 5.477210521697998,
"learning_rate": 9.798831927319921e-08,
"loss": 1.8135,
"step": 7000
},
{
"epoch": 4.84,
"grad_norm": 5.891938209533691,
"learning_rate": 3.309539260220636e-08,
"loss": 1.7991,
"step": 7500
},
{
"epoch": 5.0,
"eval_accuracy": 0.7664791969037252,
"eval_loss": 1.8800272941589355,
"eval_runtime": 319.8009,
"eval_samples_per_second": 103.414,
"eval_steps_per_second": 12.927,
"step": 7755
}
],
"logging_steps": 500,
"max_steps": 7755,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 1.9243077271251886e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}