vintage-lavender619's picture
End of training
ac6468f verified
raw
history blame
7.55 kB
{
"best_metric": 0.9365918097754293,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-landscape/checkpoint-213",
"epoch": 9.68421052631579,
"eval_steps": 500,
"global_step": 230,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.42105263157894735,
"grad_norm": 70.45781707763672,
"learning_rate": 2.173913043478261e-05,
"loss": 7.8634,
"step": 10
},
{
"epoch": 0.8421052631578947,
"grad_norm": 27.836477279663086,
"learning_rate": 4.347826086956522e-05,
"loss": 3.6193,
"step": 20
},
{
"epoch": 0.968421052631579,
"eval_accuracy": 0.6618229854689565,
"eval_loss": 1.0203161239624023,
"eval_runtime": 3.846,
"eval_samples_per_second": 196.827,
"eval_steps_per_second": 6.24,
"step": 23
},
{
"epoch": 1.263157894736842,
"grad_norm": 14.402323722839355,
"learning_rate": 4.830917874396135e-05,
"loss": 1.2754,
"step": 30
},
{
"epoch": 1.6842105263157894,
"grad_norm": 17.051239013671875,
"learning_rate": 4.589371980676328e-05,
"loss": 0.5876,
"step": 40
},
{
"epoch": 1.9789473684210526,
"eval_accuracy": 0.8903566710700133,
"eval_loss": 0.3358216881752014,
"eval_runtime": 3.7597,
"eval_samples_per_second": 201.344,
"eval_steps_per_second": 6.383,
"step": 47
},
{
"epoch": 2.1052631578947367,
"grad_norm": 12.79457950592041,
"learning_rate": 4.347826086956522e-05,
"loss": 0.3806,
"step": 50
},
{
"epoch": 2.526315789473684,
"grad_norm": 7.795621395111084,
"learning_rate": 4.106280193236715e-05,
"loss": 0.3839,
"step": 60
},
{
"epoch": 2.9473684210526314,
"grad_norm": 8.323637008666992,
"learning_rate": 3.864734299516908e-05,
"loss": 0.3585,
"step": 70
},
{
"epoch": 2.9894736842105263,
"eval_accuracy": 0.9207397622192867,
"eval_loss": 0.2493603378534317,
"eval_runtime": 3.5421,
"eval_samples_per_second": 213.718,
"eval_steps_per_second": 6.776,
"step": 71
},
{
"epoch": 3.3684210526315788,
"grad_norm": 5.737844944000244,
"learning_rate": 3.6231884057971014e-05,
"loss": 0.2795,
"step": 80
},
{
"epoch": 3.7894736842105265,
"grad_norm": 5.775590896606445,
"learning_rate": 3.381642512077295e-05,
"loss": 0.2906,
"step": 90
},
{
"epoch": 4.0,
"eval_accuracy": 0.9207397622192867,
"eval_loss": 0.23325380682945251,
"eval_runtime": 4.0394,
"eval_samples_per_second": 187.403,
"eval_steps_per_second": 5.941,
"step": 95
},
{
"epoch": 4.2105263157894735,
"grad_norm": 7.07818603515625,
"learning_rate": 3.140096618357488e-05,
"loss": 0.3048,
"step": 100
},
{
"epoch": 4.631578947368421,
"grad_norm": 7.080397605895996,
"learning_rate": 2.8985507246376814e-05,
"loss": 0.2591,
"step": 110
},
{
"epoch": 4.968421052631579,
"eval_accuracy": 0.9313077939233818,
"eval_loss": 0.2077917903661728,
"eval_runtime": 3.7706,
"eval_samples_per_second": 200.761,
"eval_steps_per_second": 6.365,
"step": 118
},
{
"epoch": 5.052631578947368,
"grad_norm": 5.3396124839782715,
"learning_rate": 2.6570048309178748e-05,
"loss": 0.2752,
"step": 120
},
{
"epoch": 5.473684210526316,
"grad_norm": 5.555719375610352,
"learning_rate": 2.4154589371980676e-05,
"loss": 0.2328,
"step": 130
},
{
"epoch": 5.894736842105263,
"grad_norm": 5.745518684387207,
"learning_rate": 2.173913043478261e-05,
"loss": 0.2458,
"step": 140
},
{
"epoch": 5.978947368421053,
"eval_accuracy": 0.9233817701453104,
"eval_loss": 0.22294138371944427,
"eval_runtime": 3.6849,
"eval_samples_per_second": 205.433,
"eval_steps_per_second": 6.513,
"step": 142
},
{
"epoch": 6.315789473684211,
"grad_norm": 6.548435688018799,
"learning_rate": 1.932367149758454e-05,
"loss": 0.2252,
"step": 150
},
{
"epoch": 6.7368421052631575,
"grad_norm": 6.568725109100342,
"learning_rate": 1.6908212560386476e-05,
"loss": 0.1936,
"step": 160
},
{
"epoch": 6.989473684210527,
"eval_accuracy": 0.9220607661822986,
"eval_loss": 0.20947237312793732,
"eval_runtime": 3.5585,
"eval_samples_per_second": 212.729,
"eval_steps_per_second": 6.744,
"step": 166
},
{
"epoch": 7.157894736842105,
"grad_norm": 5.143336772918701,
"learning_rate": 1.4492753623188407e-05,
"loss": 0.2081,
"step": 170
},
{
"epoch": 7.578947368421053,
"grad_norm": 7.358247756958008,
"learning_rate": 1.2077294685990338e-05,
"loss": 0.2262,
"step": 180
},
{
"epoch": 8.0,
"grad_norm": 5.925826549530029,
"learning_rate": 9.66183574879227e-06,
"loss": 0.1765,
"step": 190
},
{
"epoch": 8.0,
"eval_accuracy": 0.9247027741083224,
"eval_loss": 0.19408579170703888,
"eval_runtime": 3.8932,
"eval_samples_per_second": 194.44,
"eval_steps_per_second": 6.165,
"step": 190
},
{
"epoch": 8.421052631578947,
"grad_norm": 4.0063252449035645,
"learning_rate": 7.246376811594203e-06,
"loss": 0.1819,
"step": 200
},
{
"epoch": 8.842105263157894,
"grad_norm": 4.924877166748047,
"learning_rate": 4.830917874396135e-06,
"loss": 0.1915,
"step": 210
},
{
"epoch": 8.968421052631578,
"eval_accuracy": 0.9365918097754293,
"eval_loss": 0.18896903097629547,
"eval_runtime": 4.0115,
"eval_samples_per_second": 188.708,
"eval_steps_per_second": 5.983,
"step": 213
},
{
"epoch": 9.263157894736842,
"grad_norm": 5.235773086547852,
"learning_rate": 2.4154589371980677e-06,
"loss": 0.1798,
"step": 220
},
{
"epoch": 9.68421052631579,
"grad_norm": 4.68380880355835,
"learning_rate": 0.0,
"loss": 0.1824,
"step": 230
},
{
"epoch": 9.68421052631579,
"eval_accuracy": 0.9365918097754293,
"eval_loss": 0.18814285099506378,
"eval_runtime": 3.7835,
"eval_samples_per_second": 200.08,
"eval_steps_per_second": 6.343,
"step": 230
},
{
"epoch": 9.68421052631579,
"step": 230,
"total_flos": 7.494072602457047e+17,
"train_loss": 0.7879104811212291,
"train_runtime": 284.9792,
"train_samples_per_second": 106.253,
"train_steps_per_second": 0.807
}
],
"logging_steps": 10,
"max_steps": 230,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.494072602457047e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}