|
{ |
|
"best_metric": 0.7170383055043567, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-art/checkpoint-171", |
|
"epoch": 1.9941690962099126, |
|
"eval_steps": 500, |
|
"global_step": 171, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.96078431372549e-06, |
|
"loss": 2.0347, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 3.92156862745098e-06, |
|
"loss": 1.9855, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 5.882352941176471e-06, |
|
"loss": 1.8774, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 7.84313725490196e-06, |
|
"loss": 1.7658, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 9.803921568627451e-06, |
|
"loss": 1.6138, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.1764705882352942e-05, |
|
"loss": 1.422, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.3725490196078432e-05, |
|
"loss": 1.3298, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.568627450980392e-05, |
|
"loss": 1.262, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"eval_f1": 0.5746968921742681, |
|
"eval_loss": 1.1490696668624878, |
|
"eval_runtime": 23.7551, |
|
"eval_samples_per_second": 115.302, |
|
"eval_steps_per_second": 3.62, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 1.7647058823529414e-05, |
|
"loss": 1.1287, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 1.9607843137254903e-05, |
|
"loss": 1.0018, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 2.1568627450980395e-05, |
|
"loss": 1.01, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 2.3529411764705884e-05, |
|
"loss": 1.032, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 2.5490196078431373e-05, |
|
"loss": 0.9039, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 2.7450980392156865e-05, |
|
"loss": 0.8492, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 2.9411764705882354e-05, |
|
"loss": 0.8667, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 3.137254901960784e-05, |
|
"loss": 0.7881, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.7538, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"eval_f1": 0.7170383055043567, |
|
"eval_loss": 0.7177160978317261, |
|
"eval_runtime": 27.6306, |
|
"eval_samples_per_second": 99.129, |
|
"eval_steps_per_second": 3.112, |
|
"step": 171 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2550, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 30, |
|
"save_steps": 500, |
|
"total_flos": 5.4452164834993766e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|