|
{ |
|
"best_metric": 0.7951807228915663, |
|
"best_model_checkpoint": "swinv2-tiny-patch4-window8-256-finetuned-gardner-icm-max/checkpoint-11", |
|
"epoch": 18.72340425531915, |
|
"eval_steps": 500, |
|
"global_step": 220, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 2.272727272727273e-05, |
|
"loss": 1.0925, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 1.0630700588226318, |
|
"eval_runtime": 8.4566, |
|
"eval_samples_per_second": 19.63, |
|
"eval_steps_per_second": 0.71, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 4.545454545454546e-05, |
|
"loss": 0.9552, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.633553683757782, |
|
"eval_runtime": 2.9014, |
|
"eval_samples_per_second": 57.213, |
|
"eval_steps_per_second": 2.068, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 4.797979797979798e-05, |
|
"loss": 0.6566, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.5356065630912781, |
|
"eval_runtime": 2.9308, |
|
"eval_samples_per_second": 56.639, |
|
"eval_steps_per_second": 2.047, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 4.545454545454546e-05, |
|
"loss": 0.5686, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.5150089263916016, |
|
"eval_runtime": 3.1556, |
|
"eval_samples_per_second": 52.605, |
|
"eval_steps_per_second": 1.901, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 4.292929292929293e-05, |
|
"loss": 0.5703, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.5129297971725464, |
|
"eval_runtime": 2.9359, |
|
"eval_samples_per_second": 56.542, |
|
"eval_steps_per_second": 2.044, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 4.0404040404040405e-05, |
|
"loss": 0.5361, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"learning_rate": 3.787878787878788e-05, |
|
"loss": 0.5726, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.5154421925544739, |
|
"eval_runtime": 2.9132, |
|
"eval_samples_per_second": 56.982, |
|
"eval_steps_per_second": 2.06, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"learning_rate": 3.535353535353535e-05, |
|
"loss": 0.5482, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.5141979455947876, |
|
"eval_runtime": 2.9168, |
|
"eval_samples_per_second": 56.912, |
|
"eval_steps_per_second": 2.057, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"learning_rate": 3.282828282828283e-05, |
|
"loss": 0.568, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.5108516812324524, |
|
"eval_runtime": 2.9147, |
|
"eval_samples_per_second": 56.952, |
|
"eval_steps_per_second": 2.059, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 8.51, |
|
"learning_rate": 3.0303030303030306e-05, |
|
"loss": 0.5245, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 8.94, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.5134214162826538, |
|
"eval_runtime": 2.9007, |
|
"eval_samples_per_second": 57.227, |
|
"eval_steps_per_second": 2.068, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 9.36, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.5979, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 9.96, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.5237765908241272, |
|
"eval_runtime": 2.9436, |
|
"eval_samples_per_second": 56.394, |
|
"eval_steps_per_second": 2.038, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 10.21, |
|
"learning_rate": 2.5252525252525256e-05, |
|
"loss": 0.5442, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 10.98, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.507612943649292, |
|
"eval_runtime": 2.9562, |
|
"eval_samples_per_second": 56.154, |
|
"eval_steps_per_second": 2.03, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 11.06, |
|
"learning_rate": 2.272727272727273e-05, |
|
"loss": 0.5451, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 11.91, |
|
"learning_rate": 2.0202020202020203e-05, |
|
"loss": 0.545, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.5061953067779541, |
|
"eval_runtime": 2.9818, |
|
"eval_samples_per_second": 55.671, |
|
"eval_steps_per_second": 2.012, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 12.77, |
|
"learning_rate": 1.7676767676767676e-05, |
|
"loss": 0.5514, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 12.94, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.5012802481651306, |
|
"eval_runtime": 3.0403, |
|
"eval_samples_per_second": 54.599, |
|
"eval_steps_per_second": 1.973, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 13.62, |
|
"learning_rate": 1.5151515151515153e-05, |
|
"loss": 0.5377, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 13.96, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.5044662356376648, |
|
"eval_runtime": 3.0226, |
|
"eval_samples_per_second": 54.919, |
|
"eval_steps_per_second": 1.985, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 14.47, |
|
"learning_rate": 1.2626262626262628e-05, |
|
"loss": 0.5282, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 14.98, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.5037835836410522, |
|
"eval_runtime": 2.9367, |
|
"eval_samples_per_second": 56.525, |
|
"eval_steps_per_second": 2.043, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 15.32, |
|
"learning_rate": 1.0101010101010101e-05, |
|
"loss": 0.5389, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.4994117021560669, |
|
"eval_runtime": 2.9413, |
|
"eval_samples_per_second": 56.437, |
|
"eval_steps_per_second": 2.04, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 16.17, |
|
"learning_rate": 7.5757575757575764e-06, |
|
"loss": 0.5039, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 16.94, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.49963268637657166, |
|
"eval_runtime": 3.0102, |
|
"eval_samples_per_second": 55.146, |
|
"eval_steps_per_second": 1.993, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 17.02, |
|
"learning_rate": 5.050505050505051e-06, |
|
"loss": 0.5449, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 17.87, |
|
"learning_rate": 2.5252525252525253e-06, |
|
"loss": 0.5348, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 17.96, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.493960976600647, |
|
"eval_runtime": 2.9022, |
|
"eval_samples_per_second": 57.197, |
|
"eval_steps_per_second": 2.067, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 18.72, |
|
"learning_rate": 0.0, |
|
"loss": 0.5426, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 18.72, |
|
"eval_accuracy": 0.7951807228915663, |
|
"eval_loss": 0.49473580718040466, |
|
"eval_runtime": 2.8726, |
|
"eval_samples_per_second": 57.787, |
|
"eval_steps_per_second": 2.089, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 18.72, |
|
"step": 220, |
|
"total_flos": 9.056508197685166e+17, |
|
"train_loss": 0.595783019065857, |
|
"train_runtime": 1248.9487, |
|
"train_samples_per_second": 23.796, |
|
"train_steps_per_second": 0.176 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 220, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"total_flos": 9.056508197685166e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|