lixiqi's picture
End of training
f92da53
raw
history blame
3.56 kB
{
"best_metric": 0.6415436054611312,
"best_model_checkpoint": "beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-9e-05/checkpoint-224",
"epoch": 0.9977728285077951,
"global_step": 224,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"learning_rate": 3.91304347826087e-05,
"loss": 1.9178,
"step": 10
},
{
"epoch": 0.09,
"learning_rate": 7.82608695652174e-05,
"loss": 1.6833,
"step": 20
},
{
"epoch": 0.13,
"learning_rate": 8.686567164179105e-05,
"loss": 1.5051,
"step": 30
},
{
"epoch": 0.18,
"learning_rate": 8.238805970149254e-05,
"loss": 1.4696,
"step": 40
},
{
"epoch": 0.22,
"learning_rate": 7.791044776119403e-05,
"loss": 1.3784,
"step": 50
},
{
"epoch": 0.27,
"learning_rate": 7.343283582089552e-05,
"loss": 1.3547,
"step": 60
},
{
"epoch": 0.31,
"learning_rate": 6.895522388059701e-05,
"loss": 1.3355,
"step": 70
},
{
"epoch": 0.36,
"learning_rate": 6.447761194029851e-05,
"loss": 1.2555,
"step": 80
},
{
"epoch": 0.4,
"learning_rate": 6e-05,
"loss": 1.2671,
"step": 90
},
{
"epoch": 0.45,
"learning_rate": 5.5522388059701494e-05,
"loss": 1.2823,
"step": 100
},
{
"epoch": 0.49,
"learning_rate": 5.1044776119402986e-05,
"loss": 1.2462,
"step": 110
},
{
"epoch": 0.53,
"learning_rate": 4.656716417910448e-05,
"loss": 1.207,
"step": 120
},
{
"epoch": 0.58,
"learning_rate": 4.208955223880597e-05,
"loss": 1.1757,
"step": 130
},
{
"epoch": 0.62,
"learning_rate": 3.761194029850746e-05,
"loss": 1.2294,
"step": 140
},
{
"epoch": 0.67,
"learning_rate": 3.3134328358208955e-05,
"loss": 1.1943,
"step": 150
},
{
"epoch": 0.71,
"learning_rate": 2.865671641791045e-05,
"loss": 1.233,
"step": 160
},
{
"epoch": 0.76,
"learning_rate": 2.4179104477611943e-05,
"loss": 1.1816,
"step": 170
},
{
"epoch": 0.8,
"learning_rate": 1.9701492537313435e-05,
"loss": 1.1394,
"step": 180
},
{
"epoch": 0.85,
"learning_rate": 1.5223880597014927e-05,
"loss": 1.1697,
"step": 190
},
{
"epoch": 0.89,
"learning_rate": 1.0746268656716418e-05,
"loss": 1.1656,
"step": 200
},
{
"epoch": 0.94,
"learning_rate": 6.268656716417911e-06,
"loss": 1.1379,
"step": 210
},
{
"epoch": 0.98,
"learning_rate": 1.7910447761194032e-06,
"loss": 1.1175,
"step": 220
},
{
"epoch": 1.0,
"eval_accuracy": 0.6415436054611312,
"eval_loss": 0.9517616629600525,
"eval_runtime": 77.1312,
"eval_samples_per_second": 93.062,
"eval_steps_per_second": 2.917,
"step": 224
},
{
"epoch": 1.0,
"step": 224,
"total_flos": 2.2210002502885048e+18,
"train_loss": 1.2996089096580232,
"train_runtime": 698.1963,
"train_samples_per_second": 41.119,
"train_steps_per_second": 0.321
}
],
"max_steps": 224,
"num_train_epochs": 1,
"total_flos": 2.2210002502885048e+18,
"trial_name": null,
"trial_params": null
}