swin-tiny-patch4-window7-224-crack-detectorMAIN50epochsFINAL
/
tmp-checkpoint-1115
/trainer_state.json
{ | |
"best_metric": 0.2781514525413513, | |
"best_model_checkpoint": "swin-tiny-patch4-window7-224-crack-detectorMAIN50epochsFINAL\\checkpoint-1115", | |
"epoch": 2.0, | |
"eval_steps": 500, | |
"global_step": 1115, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.02, | |
"learning_rate": 1.7953321364452426e-07, | |
"loss": 1.4257, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 3.590664272890485e-07, | |
"loss": 1.4227, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 5.385996409335728e-07, | |
"loss": 1.4112, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 7.18132854578097e-07, | |
"loss": 1.4052, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 8.976660682226213e-07, | |
"loss": 1.3954, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 1.0771992818671456e-06, | |
"loss": 1.3978, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 1.2567324955116697e-06, | |
"loss": 1.3862, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 1.436265709156194e-06, | |
"loss": 1.3651, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 1.6157989228007182e-06, | |
"loss": 1.359, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 1.7953321364452425e-06, | |
"loss": 1.3476, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 1.9748653500897667e-06, | |
"loss": 1.3303, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 2.1543985637342912e-06, | |
"loss": 1.3047, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 2.333931777378815e-06, | |
"loss": 1.2944, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 2.5134649910233395e-06, | |
"loss": 1.284, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 2.6929982046678636e-06, | |
"loss": 1.23, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 2.872531418312388e-06, | |
"loss": 1.2103, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 3.0520646319569123e-06, | |
"loss": 1.187, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 3.2315978456014364e-06, | |
"loss": 1.1591, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 3.411131059245961e-06, | |
"loss": 1.144, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 3.590664272890485e-06, | |
"loss": 1.0904, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 3.770197486535009e-06, | |
"loss": 1.0401, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 3.949730700179533e-06, | |
"loss": 1.0281, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 4.129263913824058e-06, | |
"loss": 0.9577, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 4.3087971274685824e-06, | |
"loss": 0.9251, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 4.488330341113106e-06, | |
"loss": 0.8964, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 4.66786355475763e-06, | |
"loss": 0.8897, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 4.847396768402154e-06, | |
"loss": 0.8481, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 5.026929982046679e-06, | |
"loss": 0.8048, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 5.206463195691203e-06, | |
"loss": 0.8014, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 5.385996409335727e-06, | |
"loss": 0.7511, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 5.565529622980251e-06, | |
"loss": 0.741, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 5.745062836624776e-06, | |
"loss": 0.7327, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 5.9245960502693004e-06, | |
"loss": 0.7492, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 6.1041292639138246e-06, | |
"loss": 0.7613, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 6.283662477558349e-06, | |
"loss": 0.6902, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 6.463195691202873e-06, | |
"loss": 0.6687, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 6.642728904847396e-06, | |
"loss": 0.6623, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 6.822262118491922e-06, | |
"loss": 0.6901, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 7.001795332136446e-06, | |
"loss": 0.6638, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 7.18132854578097e-06, | |
"loss": 0.6414, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 7.360861759425494e-06, | |
"loss": 0.6877, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 7.540394973070018e-06, | |
"loss": 0.6249, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 7.719928186714543e-06, | |
"loss": 0.6099, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 7.899461400359067e-06, | |
"loss": 0.6321, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 8.07899461400359e-06, | |
"loss": 0.6431, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 8.258527827648117e-06, | |
"loss": 0.6404, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 8.43806104129264e-06, | |
"loss": 0.6148, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 8.617594254937165e-06, | |
"loss": 0.5844, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 8.797127468581689e-06, | |
"loss": 0.6316, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 8.976660682226211e-06, | |
"loss": 0.6018, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 9.156193895870736e-06, | |
"loss": 0.5843, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 9.33572710951526e-06, | |
"loss": 0.5986, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 9.515260323159784e-06, | |
"loss": 0.5718, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 9.694793536804308e-06, | |
"loss": 0.5163, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.99, | |
"learning_rate": 9.874326750448834e-06, | |
"loss": 0.5448, | |
"step": 550 | |
}, | |
{ | |
"epoch": 1.0, | |
"eval_accuracy": { | |
"accuracy": 0.8387530836510428 | |
}, | |
"eval_f1": { | |
"f1": 0.8242528613153339 | |
}, | |
"eval_loss": 0.43474748730659485, | |
"eval_precision": { | |
"precision": 0.8238035334510331 | |
}, | |
"eval_recall": { | |
"recall": 0.8266051811271123 | |
}, | |
"eval_runtime": 171.551, | |
"eval_samples_per_second": 103.969, | |
"eval_steps_per_second": 3.253, | |
"step": 557 | |
}, | |
{ | |
"epoch": 1.0, | |
"learning_rate": 1.0053859964093358e-05, | |
"loss": 0.58, | |
"step": 560 | |
}, | |
{ | |
"epoch": 1.02, | |
"learning_rate": 1.0233393177737882e-05, | |
"loss": 0.5298, | |
"step": 570 | |
}, | |
{ | |
"epoch": 1.04, | |
"learning_rate": 1.0412926391382406e-05, | |
"loss": 0.5592, | |
"step": 580 | |
}, | |
{ | |
"epoch": 1.06, | |
"learning_rate": 1.059245960502693e-05, | |
"loss": 0.5352, | |
"step": 590 | |
}, | |
{ | |
"epoch": 1.08, | |
"learning_rate": 1.0771992818671454e-05, | |
"loss": 0.5683, | |
"step": 600 | |
}, | |
{ | |
"epoch": 1.09, | |
"learning_rate": 1.0951526032315979e-05, | |
"loss": 0.571, | |
"step": 610 | |
}, | |
{ | |
"epoch": 1.11, | |
"learning_rate": 1.1131059245960503e-05, | |
"loss": 0.5603, | |
"step": 620 | |
}, | |
{ | |
"epoch": 1.13, | |
"learning_rate": 1.1310592459605028e-05, | |
"loss": 0.5157, | |
"step": 630 | |
}, | |
{ | |
"epoch": 1.15, | |
"learning_rate": 1.1490125673249553e-05, | |
"loss": 0.4885, | |
"step": 640 | |
}, | |
{ | |
"epoch": 1.17, | |
"learning_rate": 1.1669658886894077e-05, | |
"loss": 0.5005, | |
"step": 650 | |
}, | |
{ | |
"epoch": 1.18, | |
"learning_rate": 1.1849192100538601e-05, | |
"loss": 0.4951, | |
"step": 660 | |
}, | |
{ | |
"epoch": 1.2, | |
"learning_rate": 1.2028725314183125e-05, | |
"loss": 0.5197, | |
"step": 670 | |
}, | |
{ | |
"epoch": 1.22, | |
"learning_rate": 1.2208258527827649e-05, | |
"loss": 0.4746, | |
"step": 680 | |
}, | |
{ | |
"epoch": 1.24, | |
"learning_rate": 1.2387791741472173e-05, | |
"loss": 0.486, | |
"step": 690 | |
}, | |
{ | |
"epoch": 1.26, | |
"learning_rate": 1.2567324955116697e-05, | |
"loss": 0.4932, | |
"step": 700 | |
}, | |
{ | |
"epoch": 1.27, | |
"learning_rate": 1.2746858168761221e-05, | |
"loss": 0.5222, | |
"step": 710 | |
}, | |
{ | |
"epoch": 1.29, | |
"learning_rate": 1.2926391382405746e-05, | |
"loss": 0.434, | |
"step": 720 | |
}, | |
{ | |
"epoch": 1.31, | |
"learning_rate": 1.310592459605027e-05, | |
"loss": 0.4817, | |
"step": 730 | |
}, | |
{ | |
"epoch": 1.33, | |
"learning_rate": 1.3285457809694792e-05, | |
"loss": 0.487, | |
"step": 740 | |
}, | |
{ | |
"epoch": 1.35, | |
"learning_rate": 1.3464991023339318e-05, | |
"loss": 0.4478, | |
"step": 750 | |
}, | |
{ | |
"epoch": 1.36, | |
"learning_rate": 1.3644524236983844e-05, | |
"loss": 0.4564, | |
"step": 760 | |
}, | |
{ | |
"epoch": 1.38, | |
"learning_rate": 1.3824057450628366e-05, | |
"loss": 0.4455, | |
"step": 770 | |
}, | |
{ | |
"epoch": 1.4, | |
"learning_rate": 1.4003590664272892e-05, | |
"loss": 0.4701, | |
"step": 780 | |
}, | |
{ | |
"epoch": 1.42, | |
"learning_rate": 1.4183123877917415e-05, | |
"loss": 0.4333, | |
"step": 790 | |
}, | |
{ | |
"epoch": 1.43, | |
"learning_rate": 1.436265709156194e-05, | |
"loss": 0.4409, | |
"step": 800 | |
}, | |
{ | |
"epoch": 1.45, | |
"learning_rate": 1.4542190305206463e-05, | |
"loss": 0.4639, | |
"step": 810 | |
}, | |
{ | |
"epoch": 1.47, | |
"learning_rate": 1.4721723518850989e-05, | |
"loss": 0.4552, | |
"step": 820 | |
}, | |
{ | |
"epoch": 1.49, | |
"learning_rate": 1.4901256732495511e-05, | |
"loss": 0.4759, | |
"step": 830 | |
}, | |
{ | |
"epoch": 1.51, | |
"learning_rate": 1.5080789946140037e-05, | |
"loss": 0.4584, | |
"step": 840 | |
}, | |
{ | |
"epoch": 1.52, | |
"learning_rate": 1.5260323159784563e-05, | |
"loss": 0.4502, | |
"step": 850 | |
}, | |
{ | |
"epoch": 1.54, | |
"learning_rate": 1.5439856373429085e-05, | |
"loss": 0.4519, | |
"step": 860 | |
}, | |
{ | |
"epoch": 1.56, | |
"learning_rate": 1.561938958707361e-05, | |
"loss": 0.4695, | |
"step": 870 | |
}, | |
{ | |
"epoch": 1.58, | |
"learning_rate": 1.5798922800718133e-05, | |
"loss": 0.4466, | |
"step": 880 | |
}, | |
{ | |
"epoch": 1.6, | |
"learning_rate": 1.597845601436266e-05, | |
"loss": 0.4531, | |
"step": 890 | |
}, | |
{ | |
"epoch": 1.61, | |
"learning_rate": 1.615798922800718e-05, | |
"loss": 0.4674, | |
"step": 900 | |
}, | |
{ | |
"epoch": 1.63, | |
"learning_rate": 1.6337522441651707e-05, | |
"loss": 0.4262, | |
"step": 910 | |
}, | |
{ | |
"epoch": 1.65, | |
"learning_rate": 1.6517055655296233e-05, | |
"loss": 0.3945, | |
"step": 920 | |
}, | |
{ | |
"epoch": 1.67, | |
"learning_rate": 1.6696588868940756e-05, | |
"loss": 0.4318, | |
"step": 930 | |
}, | |
{ | |
"epoch": 1.69, | |
"learning_rate": 1.687612208258528e-05, | |
"loss": 0.439, | |
"step": 940 | |
}, | |
{ | |
"epoch": 1.7, | |
"learning_rate": 1.7055655296229804e-05, | |
"loss": 0.4036, | |
"step": 950 | |
}, | |
{ | |
"epoch": 1.72, | |
"learning_rate": 1.723518850987433e-05, | |
"loss": 0.422, | |
"step": 960 | |
}, | |
{ | |
"epoch": 1.74, | |
"learning_rate": 1.7414721723518852e-05, | |
"loss": 0.4315, | |
"step": 970 | |
}, | |
{ | |
"epoch": 1.76, | |
"learning_rate": 1.7594254937163378e-05, | |
"loss": 0.4516, | |
"step": 980 | |
}, | |
{ | |
"epoch": 1.78, | |
"learning_rate": 1.77737881508079e-05, | |
"loss": 0.4092, | |
"step": 990 | |
}, | |
{ | |
"epoch": 1.79, | |
"learning_rate": 1.7953321364452423e-05, | |
"loss": 0.4367, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 1.81, | |
"learning_rate": 1.813285457809695e-05, | |
"loss": 0.4321, | |
"step": 1010 | |
}, | |
{ | |
"epoch": 1.83, | |
"learning_rate": 1.831238779174147e-05, | |
"loss": 0.3859, | |
"step": 1020 | |
}, | |
{ | |
"epoch": 1.85, | |
"learning_rate": 1.8491921005385997e-05, | |
"loss": 0.415, | |
"step": 1030 | |
}, | |
{ | |
"epoch": 1.87, | |
"learning_rate": 1.867145421903052e-05, | |
"loss": 0.4388, | |
"step": 1040 | |
}, | |
{ | |
"epoch": 1.88, | |
"learning_rate": 1.8850987432675045e-05, | |
"loss": 0.3968, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 1.9, | |
"learning_rate": 1.9030520646319568e-05, | |
"loss": 0.4172, | |
"step": 1060 | |
}, | |
{ | |
"epoch": 1.92, | |
"learning_rate": 1.9210053859964093e-05, | |
"loss": 0.4593, | |
"step": 1070 | |
}, | |
{ | |
"epoch": 1.94, | |
"learning_rate": 1.9389587073608616e-05, | |
"loss": 0.3924, | |
"step": 1080 | |
}, | |
{ | |
"epoch": 1.96, | |
"learning_rate": 1.9569120287253142e-05, | |
"loss": 0.4067, | |
"step": 1090 | |
}, | |
{ | |
"epoch": 1.97, | |
"learning_rate": 1.9748653500897668e-05, | |
"loss": 0.3906, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 1.99, | |
"learning_rate": 1.992818671454219e-05, | |
"loss": 0.3915, | |
"step": 1110 | |
}, | |
{ | |
"epoch": 2.0, | |
"eval_accuracy": { | |
"accuracy": 0.8952679973088137 | |
}, | |
"eval_f1": { | |
"f1": 0.8895212539619177 | |
}, | |
"eval_loss": 0.2781514525413513, | |
"eval_precision": { | |
"precision": 0.8895590270096669 | |
}, | |
"eval_recall": { | |
"recall": 0.892156947982043 | |
}, | |
"eval_runtime": 99.0662, | |
"eval_samples_per_second": 180.041, | |
"eval_steps_per_second": 5.633, | |
"step": 1115 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 27850, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 50, | |
"save_steps": 500, | |
"total_flos": 3.546650701349683e+18, | |
"train_batch_size": 32, | |
"trial_name": null, | |
"trial_params": null | |
} | |