wahidww's picture
End of training
fea36fa verified
{
"best_metric": 1.0,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-mobile-eye-tracking-dataset-v2/checkpoint-370",
"epoch": 14.91891891891892,
"eval_steps": 500,
"global_step": 690,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.43,
"learning_rate": 1.4492753623188407e-05,
"loss": 1.3219,
"step": 20
},
{
"epoch": 0.86,
"learning_rate": 2.8985507246376814e-05,
"loss": 0.8881,
"step": 40
},
{
"epoch": 0.99,
"eval_accuracy": 0.8282504012841091,
"eval_loss": 0.4645065367221832,
"eval_runtime": 6.9111,
"eval_samples_per_second": 90.145,
"eval_steps_per_second": 1.447,
"step": 46
},
{
"epoch": 1.3,
"learning_rate": 4.347826086956522e-05,
"loss": 0.4619,
"step": 60
},
{
"epoch": 1.73,
"learning_rate": 4.911433172302738e-05,
"loss": 0.257,
"step": 80
},
{
"epoch": 1.99,
"eval_accuracy": 0.9823434991974318,
"eval_loss": 0.07187946140766144,
"eval_runtime": 7.3374,
"eval_samples_per_second": 84.908,
"eval_steps_per_second": 1.363,
"step": 92
},
{
"epoch": 2.16,
"learning_rate": 4.7504025764895335e-05,
"loss": 0.1702,
"step": 100
},
{
"epoch": 2.59,
"learning_rate": 4.589371980676328e-05,
"loss": 0.1419,
"step": 120
},
{
"epoch": 2.98,
"eval_accuracy": 0.9823434991974318,
"eval_loss": 0.04622809961438179,
"eval_runtime": 7.1362,
"eval_samples_per_second": 87.301,
"eval_steps_per_second": 1.401,
"step": 138
},
{
"epoch": 3.03,
"learning_rate": 4.428341384863124e-05,
"loss": 0.1013,
"step": 140
},
{
"epoch": 3.46,
"learning_rate": 4.26731078904992e-05,
"loss": 0.1057,
"step": 160
},
{
"epoch": 3.89,
"learning_rate": 4.106280193236715e-05,
"loss": 0.0888,
"step": 180
},
{
"epoch": 4.0,
"eval_accuracy": 0.9983948635634029,
"eval_loss": 0.00559239462018013,
"eval_runtime": 7.1577,
"eval_samples_per_second": 87.04,
"eval_steps_per_second": 1.397,
"step": 185
},
{
"epoch": 4.32,
"learning_rate": 3.9452495974235105e-05,
"loss": 0.0777,
"step": 200
},
{
"epoch": 4.76,
"learning_rate": 3.784219001610306e-05,
"loss": 0.0706,
"step": 220
},
{
"epoch": 4.99,
"eval_accuracy": 0.9935794542536116,
"eval_loss": 0.018903154879808426,
"eval_runtime": 6.9404,
"eval_samples_per_second": 89.764,
"eval_steps_per_second": 1.441,
"step": 231
},
{
"epoch": 5.19,
"learning_rate": 3.6231884057971014e-05,
"loss": 0.0621,
"step": 240
},
{
"epoch": 5.62,
"learning_rate": 3.462157809983897e-05,
"loss": 0.057,
"step": 260
},
{
"epoch": 5.99,
"eval_accuracy": 0.9967897271268058,
"eval_loss": 0.006798300892114639,
"eval_runtime": 7.1017,
"eval_samples_per_second": 87.726,
"eval_steps_per_second": 1.408,
"step": 277
},
{
"epoch": 6.05,
"learning_rate": 3.301127214170693e-05,
"loss": 0.0566,
"step": 280
},
{
"epoch": 6.49,
"learning_rate": 3.140096618357488e-05,
"loss": 0.0542,
"step": 300
},
{
"epoch": 6.92,
"learning_rate": 2.9790660225442833e-05,
"loss": 0.0592,
"step": 320
},
{
"epoch": 6.98,
"eval_accuracy": 0.9983948635634029,
"eval_loss": 0.004527484066784382,
"eval_runtime": 7.0776,
"eval_samples_per_second": 88.024,
"eval_steps_per_second": 1.413,
"step": 323
},
{
"epoch": 7.35,
"learning_rate": 2.8180354267310787e-05,
"loss": 0.0534,
"step": 340
},
{
"epoch": 7.78,
"learning_rate": 2.6570048309178748e-05,
"loss": 0.0475,
"step": 360
},
{
"epoch": 8.0,
"eval_accuracy": 1.0,
"eval_loss": 0.0018792045302689075,
"eval_runtime": 6.8298,
"eval_samples_per_second": 91.218,
"eval_steps_per_second": 1.464,
"step": 370
},
{
"epoch": 8.22,
"learning_rate": 2.49597423510467e-05,
"loss": 0.0432,
"step": 380
},
{
"epoch": 8.65,
"learning_rate": 2.3349436392914656e-05,
"loss": 0.0472,
"step": 400
},
{
"epoch": 8.99,
"eval_accuracy": 1.0,
"eval_loss": 0.002520466921851039,
"eval_runtime": 6.961,
"eval_samples_per_second": 89.498,
"eval_steps_per_second": 1.437,
"step": 416
},
{
"epoch": 9.08,
"learning_rate": 2.173913043478261e-05,
"loss": 0.0426,
"step": 420
},
{
"epoch": 9.51,
"learning_rate": 2.0128824476650564e-05,
"loss": 0.0458,
"step": 440
},
{
"epoch": 9.95,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.0404,
"step": 460
},
{
"epoch": 9.99,
"eval_accuracy": 1.0,
"eval_loss": 0.00030820202664472163,
"eval_runtime": 6.9828,
"eval_samples_per_second": 89.219,
"eval_steps_per_second": 1.432,
"step": 462
},
{
"epoch": 10.38,
"learning_rate": 1.6908212560386476e-05,
"loss": 0.0475,
"step": 480
},
{
"epoch": 10.81,
"learning_rate": 1.529790660225443e-05,
"loss": 0.0392,
"step": 500
},
{
"epoch": 10.98,
"eval_accuracy": 1.0,
"eval_loss": 0.0003477015416137874,
"eval_runtime": 6.8849,
"eval_samples_per_second": 90.488,
"eval_steps_per_second": 1.452,
"step": 508
},
{
"epoch": 11.24,
"learning_rate": 1.3687600644122384e-05,
"loss": 0.0341,
"step": 520
},
{
"epoch": 11.68,
"learning_rate": 1.2077294685990338e-05,
"loss": 0.0337,
"step": 540
},
{
"epoch": 12.0,
"eval_accuracy": 1.0,
"eval_loss": 0.0006900139851495624,
"eval_runtime": 6.8733,
"eval_samples_per_second": 90.641,
"eval_steps_per_second": 1.455,
"step": 555
},
{
"epoch": 12.11,
"learning_rate": 1.0466988727858294e-05,
"loss": 0.0321,
"step": 560
},
{
"epoch": 12.54,
"learning_rate": 8.85668276972625e-06,
"loss": 0.0279,
"step": 580
},
{
"epoch": 12.97,
"learning_rate": 7.246376811594203e-06,
"loss": 0.0279,
"step": 600
},
{
"epoch": 12.99,
"eval_accuracy": 1.0,
"eval_loss": 0.0003141628985758871,
"eval_runtime": 6.9013,
"eval_samples_per_second": 90.273,
"eval_steps_per_second": 1.449,
"step": 601
},
{
"epoch": 13.41,
"learning_rate": 5.636070853462158e-06,
"loss": 0.0259,
"step": 620
},
{
"epoch": 13.84,
"learning_rate": 4.025764895330112e-06,
"loss": 0.0273,
"step": 640
},
{
"epoch": 13.99,
"eval_accuracy": 1.0,
"eval_loss": 0.00027547089848667383,
"eval_runtime": 6.8982,
"eval_samples_per_second": 90.313,
"eval_steps_per_second": 1.45,
"step": 647
},
{
"epoch": 14.27,
"learning_rate": 2.4154589371980677e-06,
"loss": 0.0265,
"step": 660
},
{
"epoch": 14.7,
"learning_rate": 8.051529790660226e-07,
"loss": 0.0236,
"step": 680
},
{
"epoch": 14.92,
"eval_accuracy": 1.0,
"eval_loss": 0.0002015855861827731,
"eval_runtime": 6.9691,
"eval_samples_per_second": 89.394,
"eval_steps_per_second": 1.435,
"step": 690
},
{
"epoch": 14.92,
"step": 690,
"total_flos": 4.3848912042296525e+18,
"train_loss": 0.13484098049609558,
"train_runtime": 2784.1229,
"train_samples_per_second": 63.699,
"train_steps_per_second": 0.248
}
],
"logging_steps": 20,
"max_steps": 690,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"total_flos": 4.3848912042296525e+18,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}