|
{ |
|
"best_metric": 0.7309236947791165, |
|
"best_model_checkpoint": "vit-base-patch16-224-finetuned-for-agricultural/checkpoint-140", |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 350, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 1.0138, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.857142857142857e-05, |
|
"loss": 0.9245, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 4.2857142857142856e-05, |
|
"loss": 0.9131, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6847389558232931, |
|
"eval_loss": 1.0877727270126343, |
|
"eval_runtime": 8.0762, |
|
"eval_samples_per_second": 61.662, |
|
"eval_steps_per_second": 1.981, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 4.9206349206349204e-05, |
|
"loss": 0.8061, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 4.761904761904762e-05, |
|
"loss": 0.8195, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 4.603174603174603e-05, |
|
"loss": 0.7651, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.8066, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.7188755020080321, |
|
"eval_loss": 0.9932681322097778, |
|
"eval_runtime": 7.6803, |
|
"eval_samples_per_second": 64.841, |
|
"eval_steps_per_second": 2.083, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 4.2857142857142856e-05, |
|
"loss": 0.7147, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 4.126984126984127e-05, |
|
"loss": 0.7225, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 3.968253968253968e-05, |
|
"loss": 0.7259, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.7248995983935743, |
|
"eval_loss": 0.9445154666900635, |
|
"eval_runtime": 7.2886, |
|
"eval_samples_per_second": 68.326, |
|
"eval_steps_per_second": 2.195, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 3.809523809523809e-05, |
|
"loss": 0.7531, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 3.650793650793651e-05, |
|
"loss": 0.6886, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 3.492063492063492e-05, |
|
"loss": 0.6582, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.6719, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7309236947791165, |
|
"eval_loss": 0.9245957136154175, |
|
"eval_runtime": 7.3939, |
|
"eval_samples_per_second": 67.352, |
|
"eval_steps_per_second": 2.164, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 3.1746031746031745e-05, |
|
"loss": 0.5603, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 3.0158730158730158e-05, |
|
"loss": 0.6393, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 2.857142857142857e-05, |
|
"loss": 0.6056, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.7228915662650602, |
|
"eval_loss": 0.9257926344871521, |
|
"eval_runtime": 7.3009, |
|
"eval_samples_per_second": 68.211, |
|
"eval_steps_per_second": 2.192, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 2.6984126984126984e-05, |
|
"loss": 0.5759, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"learning_rate": 2.5396825396825397e-05, |
|
"loss": 0.5775, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 2.380952380952381e-05, |
|
"loss": 0.5253, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.5576, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.7309236947791165, |
|
"eval_loss": 0.9229769706726074, |
|
"eval_runtime": 7.6266, |
|
"eval_samples_per_second": 65.297, |
|
"eval_steps_per_second": 2.098, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 2.0634920634920636e-05, |
|
"loss": 0.5068, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 6.57, |
|
"learning_rate": 1.9047619047619046e-05, |
|
"loss": 0.4967, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"learning_rate": 1.746031746031746e-05, |
|
"loss": 0.5113, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.7168674698795181, |
|
"eval_loss": 0.915241003036499, |
|
"eval_runtime": 7.9518, |
|
"eval_samples_per_second": 62.627, |
|
"eval_steps_per_second": 2.012, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"learning_rate": 1.5873015873015872e-05, |
|
"loss": 0.5246, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 7.43, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 0.4652, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 7.71, |
|
"learning_rate": 1.2698412698412699e-05, |
|
"loss": 0.4524, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.488, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.7208835341365462, |
|
"eval_loss": 0.9118531942367554, |
|
"eval_runtime": 8.0283, |
|
"eval_samples_per_second": 62.03, |
|
"eval_steps_per_second": 1.993, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"learning_rate": 9.523809523809523e-06, |
|
"loss": 0.4497, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 7.936507936507936e-06, |
|
"loss": 0.4589, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 8.86, |
|
"learning_rate": 6.349206349206349e-06, |
|
"loss": 0.4822, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.7269076305220884, |
|
"eval_loss": 0.9061241745948792, |
|
"eval_runtime": 7.749, |
|
"eval_samples_per_second": 64.267, |
|
"eval_steps_per_second": 2.065, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 9.14, |
|
"learning_rate": 4.7619047619047615e-06, |
|
"loss": 0.4558, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 9.43, |
|
"learning_rate": 3.1746031746031746e-06, |
|
"loss": 0.4418, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"learning_rate": 1.5873015873015873e-06, |
|
"loss": 0.4369, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.4163, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.7289156626506024, |
|
"eval_loss": 0.9038876295089722, |
|
"eval_runtime": 7.7862, |
|
"eval_samples_per_second": 63.96, |
|
"eval_steps_per_second": 2.055, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 350, |
|
"total_flos": 3.4674577185952973e+18, |
|
"train_loss": 0.6174789919172015, |
|
"train_runtime": 1725.4363, |
|
"train_samples_per_second": 25.93, |
|
"train_steps_per_second": 0.203 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 350, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 3.4674577185952973e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|