food-image-classification / trainer_state.json
Shresthadev403's picture
End of training
5f5c561 verified
raw
history blame
5.33 kB
{
"best_metric": 0.7946534653465347,
"best_model_checkpoint": "food-image-classification/checkpoint-12000",
"epoch": 13.727560718057022,
"eval_steps": 1000,
"global_step": 13000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.06,
"learning_rate": 1.0559662090813095e-06,
"loss": 4.6112,
"step": 1000
},
{
"epoch": 1.06,
"eval_accuracy": 0.034851485148514855,
"eval_loss": 4.575930118560791,
"eval_runtime": 157.053,
"eval_samples_per_second": 96.464,
"eval_steps_per_second": 6.03,
"step": 1000
},
{
"epoch": 2.11,
"learning_rate": 2.111932418162619e-06,
"loss": 4.4899,
"step": 2000
},
{
"epoch": 2.11,
"eval_accuracy": 0.3103630363036304,
"eval_loss": 4.3788862228393555,
"eval_runtime": 154.3948,
"eval_samples_per_second": 98.125,
"eval_steps_per_second": 6.134,
"step": 2000
},
{
"epoch": 3.17,
"learning_rate": 3.167898627243928e-06,
"loss": 4.2111,
"step": 3000
},
{
"epoch": 3.17,
"eval_accuracy": 0.5498349834983498,
"eval_loss": 4.030922889709473,
"eval_runtime": 155.2257,
"eval_samples_per_second": 97.6,
"eval_steps_per_second": 6.101,
"step": 3000
},
{
"epoch": 4.22,
"learning_rate": 4.223864836325238e-06,
"loss": 3.8257,
"step": 4000
},
{
"epoch": 4.22,
"eval_accuracy": 0.6111551155115511,
"eval_loss": 3.634243965148926,
"eval_runtime": 156.2293,
"eval_samples_per_second": 96.973,
"eval_steps_per_second": 6.062,
"step": 4000
},
{
"epoch": 5.28,
"learning_rate": 5.279831045406547e-06,
"loss": 3.4182,
"step": 5000
},
{
"epoch": 5.28,
"eval_accuracy": 0.6514851485148515,
"eval_loss": 3.225186586380005,
"eval_runtime": 154.6911,
"eval_samples_per_second": 97.937,
"eval_steps_per_second": 6.122,
"step": 5000
},
{
"epoch": 6.34,
"learning_rate": 6.335797254487856e-06,
"loss": 2.9962,
"step": 6000
},
{
"epoch": 6.34,
"eval_accuracy": 0.687062706270627,
"eval_loss": 2.805878162384033,
"eval_runtime": 154.6826,
"eval_samples_per_second": 97.943,
"eval_steps_per_second": 6.122,
"step": 6000
},
{
"epoch": 7.39,
"learning_rate": 7.3917634635691666e-06,
"loss": 2.5605,
"step": 7000
},
{
"epoch": 7.39,
"eval_accuracy": 0.7070627062706271,
"eval_loss": 2.382246494293213,
"eval_runtime": 155.5967,
"eval_samples_per_second": 97.367,
"eval_steps_per_second": 6.086,
"step": 7000
},
{
"epoch": 8.45,
"learning_rate": 8.447729672650476e-06,
"loss": 2.1397,
"step": 8000
},
{
"epoch": 8.45,
"eval_accuracy": 0.7335973597359736,
"eval_loss": 1.975380539894104,
"eval_runtime": 156.8793,
"eval_samples_per_second": 96.571,
"eval_steps_per_second": 6.036,
"step": 8000
},
{
"epoch": 9.5,
"learning_rate": 9.503695881731786e-06,
"loss": 1.7383,
"step": 9000
},
{
"epoch": 9.5,
"eval_accuracy": 0.7576897689768977,
"eval_loss": 1.608676552772522,
"eval_runtime": 154.5661,
"eval_samples_per_second": 98.016,
"eval_steps_per_second": 6.127,
"step": 9000
},
{
"epoch": 10.56,
"learning_rate": 1.0559662090813093e-05,
"loss": 1.3909,
"step": 10000
},
{
"epoch": 10.56,
"eval_accuracy": 0.7758415841584159,
"eval_loss": 1.3203929662704468,
"eval_runtime": 155.6061,
"eval_samples_per_second": 97.361,
"eval_steps_per_second": 6.086,
"step": 10000
},
{
"epoch": 11.62,
"learning_rate": 1.1615628299894405e-05,
"loss": 1.1223,
"step": 11000
},
{
"epoch": 11.62,
"eval_accuracy": 0.7831683168316832,
"eval_loss": 1.1283260583877563,
"eval_runtime": 153.8564,
"eval_samples_per_second": 98.468,
"eval_steps_per_second": 6.155,
"step": 11000
},
{
"epoch": 12.67,
"learning_rate": 1.2671594508975712e-05,
"loss": 0.9312,
"step": 12000
},
{
"epoch": 12.67,
"eval_accuracy": 0.7946534653465347,
"eval_loss": 0.9766868352890015,
"eval_runtime": 155.0819,
"eval_samples_per_second": 97.69,
"eval_steps_per_second": 6.106,
"step": 12000
},
{
"epoch": 13.73,
"learning_rate": 1.3727560718057022e-05,
"loss": 0.7817,
"step": 13000
},
{
"epoch": 13.73,
"eval_accuracy": 0.7984158415841585,
"eval_loss": 0.8917332887649536,
"eval_runtime": 154.0819,
"eval_samples_per_second": 98.324,
"eval_steps_per_second": 6.146,
"step": 13000
}
],
"logging_steps": 1000,
"max_steps": 473500,
"num_input_tokens_seen": 0,
"num_train_epochs": 500,
"save_steps": 1000,
"total_flos": 6.452247659853865e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}