lucnha's picture
End of training
88a4097 verified
raw
history blame
No virus
8.57 kB
{
"best_metric": 0.8418079096045198,
"best_model_checkpoint": "convnextv2-base-22k-224-finetuned-eurosat/checkpoint-396",
"epoch": 2.9962168978562422,
"eval_steps": 500,
"global_step": 594,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 8.333333333333334e-06,
"loss": 1.2525,
"step": 10
},
{
"epoch": 0.1,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.9977,
"step": 20
},
{
"epoch": 0.15,
"learning_rate": 2.5e-05,
"loss": 0.9665,
"step": 30
},
{
"epoch": 0.2,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.7669,
"step": 40
},
{
"epoch": 0.25,
"learning_rate": 4.166666666666667e-05,
"loss": 0.7126,
"step": 50
},
{
"epoch": 0.3,
"learning_rate": 5e-05,
"loss": 0.728,
"step": 60
},
{
"epoch": 0.35,
"learning_rate": 4.906367041198502e-05,
"loss": 0.7651,
"step": 70
},
{
"epoch": 0.4,
"learning_rate": 4.812734082397004e-05,
"loss": 0.7771,
"step": 80
},
{
"epoch": 0.45,
"learning_rate": 4.719101123595506e-05,
"loss": 0.6614,
"step": 90
},
{
"epoch": 0.5,
"learning_rate": 4.625468164794008e-05,
"loss": 0.6218,
"step": 100
},
{
"epoch": 0.55,
"learning_rate": 4.531835205992509e-05,
"loss": 0.6318,
"step": 110
},
{
"epoch": 0.61,
"learning_rate": 4.438202247191011e-05,
"loss": 0.5905,
"step": 120
},
{
"epoch": 0.66,
"learning_rate": 4.344569288389513e-05,
"loss": 0.6299,
"step": 130
},
{
"epoch": 0.71,
"learning_rate": 4.250936329588015e-05,
"loss": 0.6222,
"step": 140
},
{
"epoch": 0.76,
"learning_rate": 4.157303370786517e-05,
"loss": 0.5438,
"step": 150
},
{
"epoch": 0.81,
"learning_rate": 4.063670411985019e-05,
"loss": 0.5207,
"step": 160
},
{
"epoch": 0.86,
"learning_rate": 3.970037453183521e-05,
"loss": 0.5481,
"step": 170
},
{
"epoch": 0.91,
"learning_rate": 3.876404494382023e-05,
"loss": 0.5603,
"step": 180
},
{
"epoch": 0.96,
"learning_rate": 3.782771535580524e-05,
"loss": 0.5909,
"step": 190
},
{
"epoch": 1.0,
"eval_accuracy": 0.7966101694915254,
"eval_loss": 0.5286521315574646,
"eval_runtime": 5.9941,
"eval_samples_per_second": 29.529,
"eval_steps_per_second": 14.848,
"step": 198
},
{
"epoch": 1.01,
"learning_rate": 3.689138576779027e-05,
"loss": 0.3812,
"step": 200
},
{
"epoch": 1.06,
"learning_rate": 3.595505617977528e-05,
"loss": 0.3082,
"step": 210
},
{
"epoch": 1.11,
"learning_rate": 3.5018726591760305e-05,
"loss": 0.3588,
"step": 220
},
{
"epoch": 1.16,
"learning_rate": 3.408239700374532e-05,
"loss": 0.4855,
"step": 230
},
{
"epoch": 1.21,
"learning_rate": 3.314606741573034e-05,
"loss": 0.3966,
"step": 240
},
{
"epoch": 1.26,
"learning_rate": 3.220973782771536e-05,
"loss": 0.2686,
"step": 250
},
{
"epoch": 1.31,
"learning_rate": 3.1273408239700376e-05,
"loss": 0.4901,
"step": 260
},
{
"epoch": 1.36,
"learning_rate": 3.0337078651685396e-05,
"loss": 0.4859,
"step": 270
},
{
"epoch": 1.41,
"learning_rate": 2.940074906367041e-05,
"loss": 0.4439,
"step": 280
},
{
"epoch": 1.46,
"learning_rate": 2.846441947565543e-05,
"loss": 0.3942,
"step": 290
},
{
"epoch": 1.51,
"learning_rate": 2.752808988764045e-05,
"loss": 0.3957,
"step": 300
},
{
"epoch": 1.56,
"learning_rate": 2.6591760299625466e-05,
"loss": 0.4238,
"step": 310
},
{
"epoch": 1.61,
"learning_rate": 2.565543071161049e-05,
"loss": 0.4087,
"step": 320
},
{
"epoch": 1.66,
"learning_rate": 2.4719101123595505e-05,
"loss": 0.3913,
"step": 330
},
{
"epoch": 1.72,
"learning_rate": 2.3782771535580524e-05,
"loss": 0.301,
"step": 340
},
{
"epoch": 1.77,
"learning_rate": 2.2846441947565544e-05,
"loss": 0.3868,
"step": 350
},
{
"epoch": 1.82,
"learning_rate": 2.1910112359550563e-05,
"loss": 0.3086,
"step": 360
},
{
"epoch": 1.87,
"learning_rate": 2.0973782771535582e-05,
"loss": 0.3019,
"step": 370
},
{
"epoch": 1.92,
"learning_rate": 2.00374531835206e-05,
"loss": 0.3578,
"step": 380
},
{
"epoch": 1.97,
"learning_rate": 1.9101123595505618e-05,
"loss": 0.2224,
"step": 390
},
{
"epoch": 2.0,
"eval_accuracy": 0.8418079096045198,
"eval_loss": 0.4224812686443329,
"eval_runtime": 6.0015,
"eval_samples_per_second": 29.493,
"eval_steps_per_second": 14.83,
"step": 396
},
{
"epoch": 2.02,
"learning_rate": 1.8164794007490637e-05,
"loss": 0.2681,
"step": 400
},
{
"epoch": 2.07,
"learning_rate": 1.7228464419475657e-05,
"loss": 0.2608,
"step": 410
},
{
"epoch": 2.12,
"learning_rate": 1.6292134831460676e-05,
"loss": 0.2281,
"step": 420
},
{
"epoch": 2.17,
"learning_rate": 1.5355805243445692e-05,
"loss": 0.1541,
"step": 430
},
{
"epoch": 2.22,
"learning_rate": 1.4419475655430711e-05,
"loss": 0.4201,
"step": 440
},
{
"epoch": 2.27,
"learning_rate": 1.348314606741573e-05,
"loss": 0.2288,
"step": 450
},
{
"epoch": 2.32,
"learning_rate": 1.254681647940075e-05,
"loss": 0.23,
"step": 460
},
{
"epoch": 2.37,
"learning_rate": 1.161048689138577e-05,
"loss": 0.2587,
"step": 470
},
{
"epoch": 2.42,
"learning_rate": 1.0674157303370787e-05,
"loss": 0.0679,
"step": 480
},
{
"epoch": 2.47,
"learning_rate": 9.737827715355806e-06,
"loss": 0.3435,
"step": 490
},
{
"epoch": 2.52,
"learning_rate": 8.801498127340826e-06,
"loss": 0.2798,
"step": 500
},
{
"epoch": 2.57,
"learning_rate": 7.865168539325843e-06,
"loss": 0.2222,
"step": 510
},
{
"epoch": 2.62,
"learning_rate": 6.928838951310862e-06,
"loss": 0.2538,
"step": 520
},
{
"epoch": 2.67,
"learning_rate": 5.9925093632958805e-06,
"loss": 0.2586,
"step": 530
},
{
"epoch": 2.72,
"learning_rate": 5.056179775280899e-06,
"loss": 0.1283,
"step": 540
},
{
"epoch": 2.77,
"learning_rate": 4.1198501872659175e-06,
"loss": 0.349,
"step": 550
},
{
"epoch": 2.82,
"learning_rate": 3.1835205992509364e-06,
"loss": 0.189,
"step": 560
},
{
"epoch": 2.88,
"learning_rate": 2.247191011235955e-06,
"loss": 0.1864,
"step": 570
},
{
"epoch": 2.93,
"learning_rate": 1.310861423220974e-06,
"loss": 0.2067,
"step": 580
},
{
"epoch": 2.98,
"learning_rate": 3.7453183520599253e-07,
"loss": 0.1975,
"step": 590
},
{
"epoch": 3.0,
"eval_accuracy": 0.8418079096045198,
"eval_loss": 0.4207219183444977,
"eval_runtime": 6.0523,
"eval_samples_per_second": 29.245,
"eval_steps_per_second": 14.705,
"step": 594
},
{
"epoch": 3.0,
"step": 594,
"total_flos": 3.76219513152e+17,
"train_loss": 0.4336023674368457,
"train_runtime": 545.8523,
"train_samples_per_second": 8.711,
"train_steps_per_second": 1.088
}
],
"logging_steps": 10,
"max_steps": 594,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 3.76219513152e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}