|
{ |
|
"best_metric": 0.9949179046129789, |
|
"best_model_checkpoint": "Brain_Tumor_Classification_using_swin_transformer/checkpoint-540", |
|
"epoch": 3.0, |
|
"global_step": 540, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.259259259259259e-06, |
|
"loss": 1.1369, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.9198, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.6536, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.454, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.3619, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.938271604938271e-05, |
|
"loss": 0.3045, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.835390946502058e-05, |
|
"loss": 0.2705, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.732510288065844e-05, |
|
"loss": 0.2496, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.2166, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.5267489711934157e-05, |
|
"loss": 0.2754, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.423868312757202e-05, |
|
"loss": 0.1692, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.3209876543209875e-05, |
|
"loss": 0.1326, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.2181069958847744e-05, |
|
"loss": 0.1449, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.11522633744856e-05, |
|
"loss": 0.1632, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.012345679012346e-05, |
|
"loss": 0.1412, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.909465020576132e-05, |
|
"loss": 0.1427, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 3.806584362139918e-05, |
|
"loss": 0.127, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.081, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9831899921813917, |
|
"eval_f1": 0.9831899921813917, |
|
"eval_loss": 0.055702339857816696, |
|
"eval_precision": 0.9831899921813917, |
|
"eval_recall": 0.9831899921813917, |
|
"eval_runtime": 438.8809, |
|
"eval_samples_per_second": 5.828, |
|
"eval_steps_per_second": 0.182, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 3.60082304526749e-05, |
|
"loss": 0.1341, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 3.497942386831276e-05, |
|
"loss": 0.1193, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 3.395061728395062e-05, |
|
"loss": 0.1082, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 3.292181069958848e-05, |
|
"loss": 0.0874, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 3.1893004115226336e-05, |
|
"loss": 0.0927, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 3.08641975308642e-05, |
|
"loss": 0.1024, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 2.9835390946502057e-05, |
|
"loss": 0.1166, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 2.880658436213992e-05, |
|
"loss": 0.1043, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.087, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 2.6748971193415638e-05, |
|
"loss": 0.071, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 2.5720164609053497e-05, |
|
"loss": 0.0836, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 2.4691358024691357e-05, |
|
"loss": 0.0942, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 2.366255144032922e-05, |
|
"loss": 0.118, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 2.2633744855967078e-05, |
|
"loss": 0.069, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 2.1604938271604937e-05, |
|
"loss": 0.072, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 2.05761316872428e-05, |
|
"loss": 0.1062, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 1.954732510288066e-05, |
|
"loss": 0.0815, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.0816, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9937451133698202, |
|
"eval_f1": 0.9937451133698202, |
|
"eval_loss": 0.01865515671670437, |
|
"eval_precision": 0.9937451133698202, |
|
"eval_recall": 0.9937451133698202, |
|
"eval_runtime": 444.8168, |
|
"eval_samples_per_second": 5.751, |
|
"eval_steps_per_second": 0.18, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 1.748971193415638e-05, |
|
"loss": 0.0759, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.646090534979424e-05, |
|
"loss": 0.0788, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 1.54320987654321e-05, |
|
"loss": 0.0719, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 1.440329218106996e-05, |
|
"loss": 0.069, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 1.3374485596707819e-05, |
|
"loss": 0.0615, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 1.2345679012345678e-05, |
|
"loss": 0.0574, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 1.1316872427983539e-05, |
|
"loss": 0.0683, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.02880658436214e-05, |
|
"loss": 0.0651, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 9.259259259259259e-06, |
|
"loss": 0.0749, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 8.23045267489712e-06, |
|
"loss": 0.052, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 7.20164609053498e-06, |
|
"loss": 0.0653, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 6.172839506172839e-06, |
|
"loss": 0.0687, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 5.1440329218107e-06, |
|
"loss": 0.0538, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.11522633744856e-06, |
|
"loss": 0.0577, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 3.0864197530864196e-06, |
|
"loss": 0.0579, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 2.05761316872428e-06, |
|
"loss": 0.0571, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 1.02880658436214e-06, |
|
"loss": 0.0548, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.0543, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9949179046129789, |
|
"eval_f1": 0.9949179046129789, |
|
"eval_loss": 0.011820005252957344, |
|
"eval_precision": 0.9949179046129789, |
|
"eval_recall": 0.9949179046129789, |
|
"eval_runtime": 467.8126, |
|
"eval_samples_per_second": 5.468, |
|
"eval_steps_per_second": 0.171, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 540, |
|
"total_flos": 5.409667845997019e+18, |
|
"train_loss": 0.1632997931153686, |
|
"train_runtime": 53634.0894, |
|
"train_samples_per_second": 1.287, |
|
"train_steps_per_second": 0.01 |
|
} |
|
], |
|
"max_steps": 540, |
|
"num_train_epochs": 3, |
|
"total_flos": 5.409667845997019e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|