|
{ |
|
"best_metric": 0.6851490665923655, |
|
"best_model_checkpoint": "beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-8e-05/checkpoint-672", |
|
"epoch": 2.997772828507795, |
|
"global_step": 672, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.1764705882352942e-05, |
|
"loss": 1.9257, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 2.3529411764705884e-05, |
|
"loss": 1.7335, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 3.529411764705883e-05, |
|
"loss": 1.5648, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.705882352941177e-05, |
|
"loss": 1.4534, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 5.8823529411764714e-05, |
|
"loss": 1.4039, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 7.058823529411765e-05, |
|
"loss": 1.3592, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 7.973509933774835e-05, |
|
"loss": 1.3827, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 7.841059602649007e-05, |
|
"loss": 1.2943, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 7.70860927152318e-05, |
|
"loss": 1.2908, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 7.576158940397351e-05, |
|
"loss": 1.3016, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 7.443708609271524e-05, |
|
"loss": 1.2757, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 7.311258278145696e-05, |
|
"loss": 1.2541, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 7.178807947019869e-05, |
|
"loss": 1.2023, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.04635761589404e-05, |
|
"loss": 1.3072, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 6.913907284768213e-05, |
|
"loss": 1.2497, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 6.781456953642385e-05, |
|
"loss": 1.2927, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 6.649006622516556e-05, |
|
"loss": 1.2041, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 6.516556291390729e-05, |
|
"loss": 1.199, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 6.3841059602649e-05, |
|
"loss": 1.2165, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 6.251655629139073e-05, |
|
"loss": 1.1911, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.119205298013245e-05, |
|
"loss": 1.1666, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 5.986754966887418e-05, |
|
"loss": 1.1538, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6173028698801895, |
|
"eval_loss": 1.0146794319152832, |
|
"eval_runtime": 61.7913, |
|
"eval_samples_per_second": 116.165, |
|
"eval_steps_per_second": 3.641, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 5.85430463576159e-05, |
|
"loss": 1.2208, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 5.721854304635762e-05, |
|
"loss": 1.1085, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 5.589403973509934e-05, |
|
"loss": 1.1218, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 5.456953642384107e-05, |
|
"loss": 1.1537, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 5.324503311258278e-05, |
|
"loss": 1.1485, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 5.1920529801324506e-05, |
|
"loss": 1.1285, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 5.059602649006623e-05, |
|
"loss": 1.1556, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 4.927152317880795e-05, |
|
"loss": 1.0654, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 4.794701986754967e-05, |
|
"loss": 1.1231, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 4.6622516556291395e-05, |
|
"loss": 1.192, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 4.5298013245033114e-05, |
|
"loss": 1.0347, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 4.397350993377484e-05, |
|
"loss": 1.0308, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 4.264900662251656e-05, |
|
"loss": 1.1021, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.132450331125828e-05, |
|
"loss": 1.0571, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 4e-05, |
|
"loss": 1.0717, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 3.867549668874173e-05, |
|
"loss": 1.1333, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 3.735099337748345e-05, |
|
"loss": 1.1149, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 3.6026490066225173e-05, |
|
"loss": 1.0476, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 3.470198675496689e-05, |
|
"loss": 1.0465, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 3.337748344370861e-05, |
|
"loss": 1.0967, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 3.205298013245033e-05, |
|
"loss": 1.0408, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 3.0728476821192056e-05, |
|
"loss": 1.03, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.6588186124268598, |
|
"eval_loss": 0.9184884428977966, |
|
"eval_runtime": 65.4725, |
|
"eval_samples_per_second": 109.634, |
|
"eval_steps_per_second": 3.437, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 2.9403973509933778e-05, |
|
"loss": 1.0675, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 2.80794701986755e-05, |
|
"loss": 1.0017, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 2.675496688741722e-05, |
|
"loss": 1.0437, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 2.5430463576158945e-05, |
|
"loss": 0.9995, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 2.4105960264900664e-05, |
|
"loss": 1.0433, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 2.2781456953642386e-05, |
|
"loss": 1.0132, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 2.1456953642384105e-05, |
|
"loss": 1.0186, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 2.013245033112583e-05, |
|
"loss": 1.0136, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 1.8807947019867553e-05, |
|
"loss": 1.0207, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.7483443708609275e-05, |
|
"loss": 0.9965, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.6158940397350994e-05, |
|
"loss": 0.9838, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.4834437086092716e-05, |
|
"loss": 1.0405, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 1.3509933774834438e-05, |
|
"loss": 0.9921, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 1.218543046357616e-05, |
|
"loss": 1.0087, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 1.0860927152317881e-05, |
|
"loss": 1.0011, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 9.536423841059603e-06, |
|
"loss": 1.0205, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 8.211920529801326e-06, |
|
"loss": 1.0155, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 6.887417218543046e-06, |
|
"loss": 0.9777, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 5.5629139072847685e-06, |
|
"loss": 0.9669, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 4.238410596026491e-06, |
|
"loss": 0.9621, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 2.913907284768212e-06, |
|
"loss": 0.9538, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 1.589403973509934e-06, |
|
"loss": 1.0232, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 2.649006622516557e-07, |
|
"loss": 0.9692, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.6851490665923655, |
|
"eval_loss": 0.8512730002403259, |
|
"eval_runtime": 67.5959, |
|
"eval_samples_per_second": 106.19, |
|
"eval_steps_per_second": 3.329, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 672, |
|
"total_flos": 6.668732964123095e+18, |
|
"train_loss": 1.145610089103381, |
|
"train_runtime": 2105.9038, |
|
"train_samples_per_second": 40.898, |
|
"train_steps_per_second": 0.319 |
|
} |
|
], |
|
"max_steps": 672, |
|
"num_train_epochs": 3, |
|
"total_flos": 6.668732964123095e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|