|
{ |
|
"best_metric": 0.9075907590759076, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-lungs-disease/checkpoint-105", |
|
"epoch": 4.883720930232558, |
|
"eval_steps": 500, |
|
"global_step": 105, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 6.489995002746582, |
|
"learning_rate": 4.545454545454546e-05, |
|
"loss": 1.4339, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 6.888110637664795, |
|
"learning_rate": 4.5212765957446815e-05, |
|
"loss": 0.814, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.7491749174917491, |
|
"eval_loss": 0.5313370227813721, |
|
"eval_runtime": 66.4067, |
|
"eval_samples_per_second": 4.563, |
|
"eval_steps_per_second": 0.151, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 15.805723190307617, |
|
"learning_rate": 3.9893617021276594e-05, |
|
"loss": 0.5569, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"grad_norm": 13.49924087524414, |
|
"learning_rate": 3.4574468085106386e-05, |
|
"loss": 0.4444, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8910891089108911, |
|
"eval_loss": 0.3199963867664337, |
|
"eval_runtime": 66.8668, |
|
"eval_samples_per_second": 4.531, |
|
"eval_steps_per_second": 0.15, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"grad_norm": 8.2146577835083, |
|
"learning_rate": 2.925531914893617e-05, |
|
"loss": 0.3767, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"grad_norm": 12.676996231079102, |
|
"learning_rate": 2.393617021276596e-05, |
|
"loss": 0.3322, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"eval_accuracy": 0.8910891089108911, |
|
"eval_loss": 0.31477591395378113, |
|
"eval_runtime": 66.222, |
|
"eval_samples_per_second": 4.576, |
|
"eval_steps_per_second": 0.151, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"grad_norm": 9.924786567687988, |
|
"learning_rate": 1.8617021276595745e-05, |
|
"loss": 0.2842, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"grad_norm": 8.528675079345703, |
|
"learning_rate": 1.3297872340425532e-05, |
|
"loss": 0.2975, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8976897689768977, |
|
"eval_loss": 0.28364694118499756, |
|
"eval_runtime": 66.122, |
|
"eval_samples_per_second": 4.582, |
|
"eval_steps_per_second": 0.151, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"grad_norm": 16.571998596191406, |
|
"learning_rate": 7.97872340425532e-06, |
|
"loss": 0.2764, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"grad_norm": 6.849820613861084, |
|
"learning_rate": 2.6595744680851065e-06, |
|
"loss": 0.254, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"eval_accuracy": 0.9075907590759076, |
|
"eval_loss": 0.24336053431034088, |
|
"eval_runtime": 66.2106, |
|
"eval_samples_per_second": 4.576, |
|
"eval_steps_per_second": 0.151, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"step": 105, |
|
"total_flos": 3.313083915385897e+17, |
|
"train_loss": 0.4951313120978219, |
|
"train_runtime": 9028.2693, |
|
"train_samples_per_second": 1.509, |
|
"train_steps_per_second": 0.012 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 105, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 3.313083915385897e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|