|
{
|
|
"best_metric": 0.85,
|
|
"best_model_checkpoint": "vit-base-patch16-224-RU2-10\\checkpoint-308",
|
|
"epoch": 9.87012987012987,
|
|
"eval_steps": 500,
|
|
"global_step": 380,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.26,
|
|
"learning_rate": 2.894736842105263e-05,
|
|
"loss": 1.3769,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.52,
|
|
"learning_rate": 5.484764542936288e-05,
|
|
"loss": 1.3067,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.78,
|
|
"learning_rate": 5.332409972299169e-05,
|
|
"loss": 1.1641,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.99,
|
|
"eval_accuracy": 0.7333333333333333,
|
|
"eval_loss": 0.9789325594902039,
|
|
"eval_runtime": 1.301,
|
|
"eval_samples_per_second": 46.117,
|
|
"eval_steps_per_second": 1.537,
|
|
"step": 38
|
|
},
|
|
{
|
|
"epoch": 1.04,
|
|
"learning_rate": 5.18005540166205e-05,
|
|
"loss": 1.0123,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 1.3,
|
|
"learning_rate": 5.027700831024931e-05,
|
|
"loss": 0.87,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 1.56,
|
|
"learning_rate": 4.8753462603878116e-05,
|
|
"loss": 0.7091,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 1.82,
|
|
"learning_rate": 4.7229916897506926e-05,
|
|
"loss": 0.5847,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.637053370475769,
|
|
"eval_runtime": 1.0799,
|
|
"eval_samples_per_second": 55.56,
|
|
"eval_steps_per_second": 1.852,
|
|
"step": 77
|
|
},
|
|
{
|
|
"epoch": 2.08,
|
|
"learning_rate": 4.570637119113573e-05,
|
|
"loss": 0.5235,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 2.34,
|
|
"learning_rate": 4.418282548476455e-05,
|
|
"loss": 0.4276,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 2.6,
|
|
"learning_rate": 4.265927977839336e-05,
|
|
"loss": 0.3653,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 2.86,
|
|
"learning_rate": 4.113573407202216e-05,
|
|
"loss": 0.2844,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 2.99,
|
|
"eval_accuracy": 0.75,
|
|
"eval_loss": 0.6706024408340454,
|
|
"eval_runtime": 1.113,
|
|
"eval_samples_per_second": 53.906,
|
|
"eval_steps_per_second": 1.797,
|
|
"step": 115
|
|
},
|
|
{
|
|
"epoch": 3.12,
|
|
"learning_rate": 3.961218836565097e-05,
|
|
"loss": 0.2144,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 3.38,
|
|
"learning_rate": 3.808864265927978e-05,
|
|
"loss": 0.2352,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 3.64,
|
|
"learning_rate": 3.6565096952908585e-05,
|
|
"loss": 0.2413,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 3.9,
|
|
"learning_rate": 3.5041551246537395e-05,
|
|
"loss": 0.2275,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.535870373249054,
|
|
"eval_runtime": 1.0699,
|
|
"eval_samples_per_second": 56.079,
|
|
"eval_steps_per_second": 1.869,
|
|
"step": 154
|
|
},
|
|
{
|
|
"epoch": 4.16,
|
|
"learning_rate": 3.3518005540166206e-05,
|
|
"loss": 0.1779,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 4.42,
|
|
"learning_rate": 3.1994459833795016e-05,
|
|
"loss": 0.1763,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 4.68,
|
|
"learning_rate": 3.0470914127423827e-05,
|
|
"loss": 0.1581,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 4.94,
|
|
"learning_rate": 2.894736842105263e-05,
|
|
"loss": 0.1539,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 4.99,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.6066630482673645,
|
|
"eval_runtime": 1.0523,
|
|
"eval_samples_per_second": 57.02,
|
|
"eval_steps_per_second": 1.901,
|
|
"step": 192
|
|
},
|
|
{
|
|
"epoch": 5.19,
|
|
"learning_rate": 2.742382271468144e-05,
|
|
"loss": 0.1287,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 5.45,
|
|
"learning_rate": 2.590027700831025e-05,
|
|
"loss": 0.1306,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 5.71,
|
|
"learning_rate": 2.4376731301939058e-05,
|
|
"loss": 0.1339,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 5.97,
|
|
"learning_rate": 2.2853185595567865e-05,
|
|
"loss": 0.1113,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.7666666666666667,
|
|
"eval_loss": 0.7886819839477539,
|
|
"eval_runtime": 1.095,
|
|
"eval_samples_per_second": 54.792,
|
|
"eval_steps_per_second": 1.826,
|
|
"step": 231
|
|
},
|
|
{
|
|
"epoch": 6.23,
|
|
"learning_rate": 2.132963988919668e-05,
|
|
"loss": 0.0957,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 6.49,
|
|
"learning_rate": 1.9806094182825486e-05,
|
|
"loss": 0.1304,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 6.75,
|
|
"learning_rate": 1.8282548476454293e-05,
|
|
"loss": 0.1117,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 6.99,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.6443384885787964,
|
|
"eval_runtime": 1.142,
|
|
"eval_samples_per_second": 52.54,
|
|
"eval_steps_per_second": 1.751,
|
|
"step": 269
|
|
},
|
|
{
|
|
"epoch": 7.01,
|
|
"learning_rate": 1.6759002770083103e-05,
|
|
"loss": 0.1033,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 7.27,
|
|
"learning_rate": 1.5235457063711913e-05,
|
|
"loss": 0.0903,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 7.53,
|
|
"learning_rate": 1.371191135734072e-05,
|
|
"loss": 0.086,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 7.79,
|
|
"learning_rate": 1.2188365650969529e-05,
|
|
"loss": 0.1088,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 0.85,
|
|
"eval_loss": 0.642919659614563,
|
|
"eval_runtime": 1.1112,
|
|
"eval_samples_per_second": 53.994,
|
|
"eval_steps_per_second": 1.8,
|
|
"step": 308
|
|
},
|
|
{
|
|
"epoch": 8.05,
|
|
"learning_rate": 1.066481994459834e-05,
|
|
"loss": 0.0725,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 8.31,
|
|
"learning_rate": 9.141274238227146e-06,
|
|
"loss": 0.0761,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 8.57,
|
|
"learning_rate": 7.617728531855957e-06,
|
|
"loss": 0.0665,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 8.83,
|
|
"learning_rate": 6.0941828254847645e-06,
|
|
"loss": 0.0824,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 8.99,
|
|
"eval_accuracy": 0.8333333333333334,
|
|
"eval_loss": 0.6498779654502869,
|
|
"eval_runtime": 1.0629,
|
|
"eval_samples_per_second": 56.449,
|
|
"eval_steps_per_second": 1.882,
|
|
"step": 346
|
|
},
|
|
{
|
|
"epoch": 9.09,
|
|
"learning_rate": 4.570637119113573e-06,
|
|
"loss": 0.0749,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 9.35,
|
|
"learning_rate": 3.0470914127423822e-06,
|
|
"loss": 0.0808,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 9.61,
|
|
"learning_rate": 1.5235457063711911e-06,
|
|
"loss": 0.077,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 9.87,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.0834,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 9.87,
|
|
"eval_accuracy": 0.8166666666666667,
|
|
"eval_loss": 0.6802073121070862,
|
|
"eval_runtime": 1.0862,
|
|
"eval_samples_per_second": 55.236,
|
|
"eval_steps_per_second": 1.841,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 9.87,
|
|
"step": 380,
|
|
"total_flos": 3.7497496079053947e+18,
|
|
"train_loss": 0.31720146521141657,
|
|
"train_runtime": 770.452,
|
|
"train_samples_per_second": 63.599,
|
|
"train_steps_per_second": 0.493
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 380,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 10,
|
|
"save_steps": 500,
|
|
"total_flos": 3.7497496079053947e+18,
|
|
"train_batch_size": 32,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|