dima806's picture
Upload folder using huggingface_hub
bb19cb7 verified
raw
history blame
4.12 kB
{
"best_metric": 0.6021606922149658,
"best_model_checkpoint": "mushrooms_image_detection/checkpoint-10945",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 10945,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"grad_norm": 5.143975734710693,
"learning_rate": 1.9173932996787516e-07,
"loss": 0.6876,
"step": 500
},
{
"epoch": 0.09,
"grad_norm": 5.595354080200195,
"learning_rate": 1.825608077099587e-07,
"loss": 0.705,
"step": 1000
},
{
"epoch": 0.14,
"grad_norm": 8.542109489440918,
"learning_rate": 1.733822854520422e-07,
"loss": 0.6908,
"step": 1500
},
{
"epoch": 0.18,
"grad_norm": 5.49858283996582,
"learning_rate": 1.6420376319412576e-07,
"loss": 0.6763,
"step": 2000
},
{
"epoch": 0.23,
"grad_norm": 5.070239067077637,
"learning_rate": 1.5502524093620926e-07,
"loss": 0.6807,
"step": 2500
},
{
"epoch": 0.27,
"grad_norm": 3.0705084800720215,
"learning_rate": 1.458467186782928e-07,
"loss": 0.6879,
"step": 3000
},
{
"epoch": 0.32,
"grad_norm": 3.836571455001831,
"learning_rate": 1.366681964203763e-07,
"loss": 0.69,
"step": 3500
},
{
"epoch": 0.37,
"grad_norm": 7.066412448883057,
"learning_rate": 1.2748967416245983e-07,
"loss": 0.6808,
"step": 4000
},
{
"epoch": 0.41,
"grad_norm": 5.1906328201293945,
"learning_rate": 1.1831115190454337e-07,
"loss": 0.6842,
"step": 4500
},
{
"epoch": 0.46,
"grad_norm": 6.5265703201293945,
"learning_rate": 1.0913262964662688e-07,
"loss": 0.6911,
"step": 5000
},
{
"epoch": 0.5,
"grad_norm": 8.26229190826416,
"learning_rate": 9.995410738871042e-08,
"loss": 0.6743,
"step": 5500
},
{
"epoch": 0.55,
"grad_norm": 5.895538330078125,
"learning_rate": 9.077558513079394e-08,
"loss": 0.6734,
"step": 6000
},
{
"epoch": 0.59,
"grad_norm": 9.030574798583984,
"learning_rate": 8.159706287287747e-08,
"loss": 0.6718,
"step": 6500
},
{
"epoch": 0.64,
"grad_norm": 7.243845462799072,
"learning_rate": 7.241854061496099e-08,
"loss": 0.6698,
"step": 7000
},
{
"epoch": 0.69,
"grad_norm": 5.422345161437988,
"learning_rate": 6.32400183570445e-08,
"loss": 0.6627,
"step": 7500
},
{
"epoch": 0.73,
"grad_norm": 3.86919903755188,
"learning_rate": 5.406149609912804e-08,
"loss": 0.667,
"step": 8000
},
{
"epoch": 0.78,
"grad_norm": 8.272964477539062,
"learning_rate": 4.488297384121156e-08,
"loss": 0.6542,
"step": 8500
},
{
"epoch": 0.82,
"grad_norm": 7.44323205947876,
"learning_rate": 3.5704451583295086e-08,
"loss": 0.6729,
"step": 9000
},
{
"epoch": 0.87,
"grad_norm": 3.3304898738861084,
"learning_rate": 2.6525929325378617e-08,
"loss": 0.6822,
"step": 9500
},
{
"epoch": 0.91,
"grad_norm": 7.638361930847168,
"learning_rate": 1.7347407067462138e-08,
"loss": 0.6749,
"step": 10000
},
{
"epoch": 0.96,
"grad_norm": 4.384842395782471,
"learning_rate": 8.168884809545663e-09,
"loss": 0.6733,
"step": 10500
},
{
"epoch": 1.0,
"eval_accuracy": 0.8896693506938496,
"eval_loss": 0.6021606922149658,
"eval_runtime": 3122.8795,
"eval_samples_per_second": 74.764,
"eval_steps_per_second": 9.346,
"step": 10945
}
],
"logging_steps": 500,
"max_steps": 10945,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 2.716308216840831e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}