|
{ |
|
"best_metric": 0.14285185933113098, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGBD-b5_6/checkpoint-240", |
|
"epoch": 120.0, |
|
"global_step": 240, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 1.2935, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.3048, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 5e-06, |
|
"loss": 1.2961, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.2755, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 1.2586, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 1e-05, |
|
"loss": 1.246, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 1.1666666666666668e-05, |
|
"loss": 1.2275, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.2057, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 1.1914, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.159, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.5030895204018441, |
|
"eval_accuracy_undropoff": 0.6320608332329993, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.06443018550460354, |
|
"eval_iou_undropoff": 0.6203390578678918, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.003960132598877, |
|
"eval_mean_accuracy": 0.5675751768174218, |
|
"eval_mean_iou": 0.22825641445749845, |
|
"eval_overall_accuracy": 0.6266983032226563, |
|
"eval_runtime": 2.8797, |
|
"eval_samples_per_second": 6.945, |
|
"eval_steps_per_second": 0.695, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 1.8333333333333333e-05, |
|
"loss": 1.138, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 2e-05, |
|
"loss": 1.1, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 1.9912280701754387e-05, |
|
"loss": 1.0765, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 1.9824561403508773e-05, |
|
"loss": 1.0466, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 1.9736842105263158e-05, |
|
"loss": 1.0032, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 1.9649122807017544e-05, |
|
"loss": 0.9971, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 1.9561403508771933e-05, |
|
"loss": 0.9459, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 1.9473684210526318e-05, |
|
"loss": 0.9112, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 1.9385964912280704e-05, |
|
"loss": 0.8852, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 1.929824561403509e-05, |
|
"loss": 0.8345, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.11338333448014863, |
|
"eval_accuracy_undropoff": 0.9505815555977898, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.05546940396815957, |
|
"eval_iou_undropoff": 0.9154202339604258, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.747998058795929, |
|
"eval_mean_accuracy": 0.5319824450389692, |
|
"eval_mean_iou": 0.32362987930952847, |
|
"eval_overall_accuracy": 0.915771484375, |
|
"eval_runtime": 2.8511, |
|
"eval_samples_per_second": 7.015, |
|
"eval_steps_per_second": 0.701, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 1.9210526315789474e-05, |
|
"loss": 0.8052, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 1.912280701754386e-05, |
|
"loss": 0.7648, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 1.9035087719298245e-05, |
|
"loss": 0.7405, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 1.894736842105263e-05, |
|
"loss": 0.7066, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 1.885964912280702e-05, |
|
"loss": 0.6675, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 1.8771929824561405e-05, |
|
"loss": 0.6358, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 1.868421052631579e-05, |
|
"loss": 0.6127, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 1.8596491228070176e-05, |
|
"loss": 0.5935, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 1.8508771929824562e-05, |
|
"loss": 0.5549, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.5406, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.017945365719397234, |
|
"eval_accuracy_undropoff": 0.9917655826949273, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.015735932390196418, |
|
"eval_iou_undropoff": 0.9512822485309885, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5476881265640259, |
|
"eval_mean_accuracy": 0.5048554742071623, |
|
"eval_mean_iou": 0.322339393640395, |
|
"eval_overall_accuracy": 0.9512748718261719, |
|
"eval_runtime": 5.6484, |
|
"eval_samples_per_second": 3.541, |
|
"eval_steps_per_second": 0.354, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 1.8333333333333333e-05, |
|
"loss": 0.53, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 1.824561403508772e-05, |
|
"loss": 0.5004, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 1.8157894736842107e-05, |
|
"loss": 0.4965, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 1.8070175438596493e-05, |
|
"loss": 0.4653, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 1.7982456140350878e-05, |
|
"loss": 0.4434, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 1.7894736842105264e-05, |
|
"loss": 0.4414, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 1.780701754385965e-05, |
|
"loss": 0.3915, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 1.7719298245614035e-05, |
|
"loss": 0.4001, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 1.763157894736842e-05, |
|
"loss": 0.4204, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 1.754385964912281e-05, |
|
"loss": 0.3695, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.014605839583476686, |
|
"eval_accuracy_undropoff": 0.9925813227566402, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.012491320023381993, |
|
"eval_iou_undropoff": 0.951889892701647, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.45899271965026855, |
|
"eval_mean_accuracy": 0.5035935811700585, |
|
"eval_mean_iou": 0.3214604042416764, |
|
"eval_overall_accuracy": 0.9519178390502929, |
|
"eval_runtime": 3.3683, |
|
"eval_samples_per_second": 5.938, |
|
"eval_steps_per_second": 0.594, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 1.7456140350877195e-05, |
|
"loss": 0.3526, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 1.736842105263158e-05, |
|
"loss": 0.3346, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 1.7280701754385966e-05, |
|
"loss": 0.3645, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 1.719298245614035e-05, |
|
"loss": 0.3017, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 1.7105263157894737e-05, |
|
"loss": 0.3384, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 1.7017543859649125e-05, |
|
"loss": 0.3197, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 1.692982456140351e-05, |
|
"loss": 0.3031, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 1.6842105263157896e-05, |
|
"loss": 0.3068, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 1.6754385964912282e-05, |
|
"loss": 0.2974, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.3053, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.002344090460790385, |
|
"eval_accuracy_undropoff": 0.9979102009299715, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.002236568552358026, |
|
"eval_iou_undropoff": 0.9565110735242025, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.3790256679058075, |
|
"eval_mean_accuracy": 0.500127145695381, |
|
"eval_mean_iou": 0.3195825473588535, |
|
"eval_overall_accuracy": 0.9565153121948242, |
|
"eval_runtime": 2.7928, |
|
"eval_samples_per_second": 7.161, |
|
"eval_steps_per_second": 0.716, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 1.6578947368421053e-05, |
|
"loss": 0.2815, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 1.649122807017544e-05, |
|
"loss": 0.2835, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 1.6403508771929827e-05, |
|
"loss": 0.2771, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.2843, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 1.62280701754386e-05, |
|
"loss": 0.2831, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 1.6140350877192984e-05, |
|
"loss": 0.2437, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 1.605263157894737e-05, |
|
"loss": 0.2769, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 1.5964912280701755e-05, |
|
"loss": 0.2448, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 1.5877192982456144e-05, |
|
"loss": 0.2692, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 1.578947368421053e-05, |
|
"loss": 0.2436, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.00587169430491525, |
|
"eval_accuracy_undropoff": 0.9980554778865586, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0056198027800461875, |
|
"eval_iou_undropoff": 0.9567906746031746, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3303200304508209, |
|
"eval_mean_accuracy": 0.5019635860957369, |
|
"eval_mean_iou": 0.48120523869161036, |
|
"eval_overall_accuracy": 0.9568012237548829, |
|
"eval_runtime": 2.7601, |
|
"eval_samples_per_second": 7.246, |
|
"eval_steps_per_second": 0.725, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 1.5701754385964915e-05, |
|
"loss": 0.2546, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 1.56140350877193e-05, |
|
"loss": 0.2305, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 1.5526315789473686e-05, |
|
"loss": 0.2386, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 1.543859649122807e-05, |
|
"loss": 0.2412, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 1.535087719298246e-05, |
|
"loss": 0.2459, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 1.5263157894736846e-05, |
|
"loss": 0.2444, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 1.517543859649123e-05, |
|
"loss": 0.2415, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 1.5087719298245615e-05, |
|
"loss": 0.2233, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 0.2269, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 1.4912280701754388e-05, |
|
"loss": 0.2148, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.0008394687951558522, |
|
"eval_accuracy_undropoff": 0.9995707364447146, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0008312438678731058, |
|
"eval_iou_undropoff": 0.9580427783638841, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.2739110589027405, |
|
"eval_mean_accuracy": 0.5002051026199352, |
|
"eval_mean_iou": 0.4794370111158786, |
|
"eval_overall_accuracy": 0.9580442428588867, |
|
"eval_runtime": 2.7182, |
|
"eval_samples_per_second": 7.358, |
|
"eval_steps_per_second": 0.736, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 1.4824561403508773e-05, |
|
"loss": 0.2208, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 1.4736842105263159e-05, |
|
"loss": 0.2169, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 1.4649122807017544e-05, |
|
"loss": 0.2418, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 1.4561403508771931e-05, |
|
"loss": 0.1911, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 1.4473684210526317e-05, |
|
"loss": 0.2089, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 1.4385964912280704e-05, |
|
"loss": 0.2248, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 1.429824561403509e-05, |
|
"loss": 0.2164, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.2224, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 1.412280701754386e-05, |
|
"loss": 0.2239, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 1.4035087719298246e-05, |
|
"loss": 0.1983, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.05823528062570243, |
|
"eval_accuracy_undropoff": 0.998591012530635, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0564034210818616, |
|
"eval_iou_undropoff": 0.9593934057781895, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.23478250205516815, |
|
"eval_mean_accuracy": 0.5284131465781687, |
|
"eval_mean_iou": 0.5078984134300256, |
|
"eval_overall_accuracy": 0.9594917297363281, |
|
"eval_runtime": 2.9691, |
|
"eval_samples_per_second": 6.736, |
|
"eval_steps_per_second": 0.674, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 1.3947368421052631e-05, |
|
"loss": 0.2082, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 1.385964912280702e-05, |
|
"loss": 0.1867, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 1.3771929824561406e-05, |
|
"loss": 0.189, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.2007, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 1.3596491228070177e-05, |
|
"loss": 0.1998, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 1.3508771929824562e-05, |
|
"loss": 0.1833, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 1.3421052631578948e-05, |
|
"loss": 0.2017, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.1757, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 1.3245614035087719e-05, |
|
"loss": 0.1922, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 1.3157894736842108e-05, |
|
"loss": 0.1784, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_dropoff": 0.29602513819124293, |
|
"eval_accuracy_undropoff": 0.9920426039600906, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2501434219706954, |
|
"eval_iou_undropoff": 0.9626429123728263, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.21779485046863556, |
|
"eval_mean_accuracy": 0.6440338710756668, |
|
"eval_mean_iou": 0.6063931671717608, |
|
"eval_overall_accuracy": 0.9631027221679688, |
|
"eval_runtime": 2.7873, |
|
"eval_samples_per_second": 7.175, |
|
"eval_steps_per_second": 0.718, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 1.3070175438596493e-05, |
|
"loss": 0.184, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 1.2982456140350879e-05, |
|
"loss": 0.1835, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 1.2894736842105264e-05, |
|
"loss": 0.1654, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 1.280701754385965e-05, |
|
"loss": 0.181, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 1.2719298245614035e-05, |
|
"loss": 0.1782, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 1.263157894736842e-05, |
|
"loss": 0.172, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 1.2543859649122808e-05, |
|
"loss": 0.1819, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 1.2456140350877195e-05, |
|
"loss": 0.1685, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 1.236842105263158e-05, |
|
"loss": 0.1616, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 1.2280701754385966e-05, |
|
"loss": 0.1631, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.3760499094015918, |
|
"eval_accuracy_undropoff": 0.9860599794821175, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2846008568194916, |
|
"eval_iou_undropoff": 0.9600719098963882, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1942574679851532, |
|
"eval_mean_accuracy": 0.6810549444418547, |
|
"eval_mean_iou": 0.62233638335794, |
|
"eval_overall_accuracy": 0.9606962203979492, |
|
"eval_runtime": 2.9092, |
|
"eval_samples_per_second": 6.875, |
|
"eval_steps_per_second": 0.687, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 1.2192982456140352e-05, |
|
"loss": 0.153, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.172, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 1.2017543859649124e-05, |
|
"loss": 0.1695, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 1.192982456140351e-05, |
|
"loss": 0.1743, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 1.1842105263157895e-05, |
|
"loss": 0.1628, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 1.1754385964912282e-05, |
|
"loss": 0.1585, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 1.1666666666666668e-05, |
|
"loss": 0.1494, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.15, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 1.1491228070175439e-05, |
|
"loss": 0.1556, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 1.1403508771929826e-05, |
|
"loss": 0.1468, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_dropoff": 0.358338494002156, |
|
"eval_accuracy_undropoff": 0.9878841008301682, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2801101564853196, |
|
"eval_iou_undropoff": 0.9611289112145298, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.17585495114326477, |
|
"eval_mean_accuracy": 0.6731112974161622, |
|
"eval_mean_iou": 0.6206195338499247, |
|
"eval_overall_accuracy": 0.9617080688476562, |
|
"eval_runtime": 5.2724, |
|
"eval_samples_per_second": 3.793, |
|
"eval_steps_per_second": 0.379, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 1.1315789473684212e-05, |
|
"loss": 0.1559, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 1.1228070175438597e-05, |
|
"loss": 0.1603, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 1.1140350877192983e-05, |
|
"loss": 0.1488, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 1.105263157894737e-05, |
|
"loss": 0.1414, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 1.0964912280701755e-05, |
|
"loss": 0.1461, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 1.0877192982456142e-05, |
|
"loss": 0.1519, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 1.0789473684210528e-05, |
|
"loss": 0.1509, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 1.0701754385964913e-05, |
|
"loss": 0.1393, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 1.0614035087719299e-05, |
|
"loss": 0.1369, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.1353, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.2731347049244249, |
|
"eval_accuracy_undropoff": 0.9938581678983698, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.23926190543163342, |
|
"eval_iou_undropoff": 0.9634762386709105, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.16572275757789612, |
|
"eval_mean_accuracy": 0.6334964364113973, |
|
"eval_mean_iou": 0.601369072051272, |
|
"eval_overall_accuracy": 0.9638910293579102, |
|
"eval_runtime": 2.7535, |
|
"eval_samples_per_second": 7.264, |
|
"eval_steps_per_second": 0.726, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 1.043859649122807e-05, |
|
"loss": 0.1456, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 1.0350877192982459e-05, |
|
"loss": 0.1355, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 1.0263157894736844e-05, |
|
"loss": 0.1299, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 1.017543859649123e-05, |
|
"loss": 0.1451, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 1.0087719298245615e-05, |
|
"loss": 0.1869, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 1e-05, |
|
"loss": 0.1365, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 9.912280701754386e-06, |
|
"loss": 0.1427, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 9.824561403508772e-06, |
|
"loss": 0.1475, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 9.736842105263159e-06, |
|
"loss": 0.1361, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 9.649122807017545e-06, |
|
"loss": 0.1474, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_dropoff": 0.25048739649991975, |
|
"eval_accuracy_undropoff": 0.9950542151710935, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2248535085877116, |
|
"eval_iou_undropoff": 0.9637178060145232, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1590222418308258, |
|
"eval_mean_accuracy": 0.6227708058355066, |
|
"eval_mean_iou": 0.5942856573011174, |
|
"eval_overall_accuracy": 0.9640956878662109, |
|
"eval_runtime": 2.8192, |
|
"eval_samples_per_second": 7.094, |
|
"eval_steps_per_second": 0.709, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 9.56140350877193e-06, |
|
"loss": 0.1335, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 9.473684210526315e-06, |
|
"loss": 0.1261, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 9.385964912280703e-06, |
|
"loss": 0.1367, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 9.298245614035088e-06, |
|
"loss": 0.1237, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 9.210526315789474e-06, |
|
"loss": 0.1247, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 9.12280701754386e-06, |
|
"loss": 0.1635, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 9.035087719298246e-06, |
|
"loss": 0.1204, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.1264, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 8.859649122807017e-06, |
|
"loss": 0.1248, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 8.771929824561405e-06, |
|
"loss": 0.1172, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.3399986238216473, |
|
"eval_accuracy_undropoff": 0.9924475883527683, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28958565316767276, |
|
"eval_iou_undropoff": 0.9648219438232984, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.15622450411319733, |
|
"eval_mean_accuracy": 0.6662231060872078, |
|
"eval_mean_iou": 0.6272037984954856, |
|
"eval_overall_accuracy": 0.9653192520141601, |
|
"eval_runtime": 2.8324, |
|
"eval_samples_per_second": 7.061, |
|
"eval_steps_per_second": 0.706, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 8.68421052631579e-06, |
|
"loss": 0.1217, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 8.596491228070176e-06, |
|
"loss": 0.117, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 8.508771929824563e-06, |
|
"loss": 0.1138, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.1261, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.1172, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 8.24561403508772e-06, |
|
"loss": 0.1237, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 8.157894736842106e-06, |
|
"loss": 0.1122, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 8.070175438596492e-06, |
|
"loss": 0.1352, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 7.982456140350877e-06, |
|
"loss": 0.1129, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 7.894736842105265e-06, |
|
"loss": 0.1169, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_dropoff": 0.3466868506158398, |
|
"eval_accuracy_undropoff": 0.9924615190198383, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2953629702002931, |
|
"eval_iou_undropoff": 0.9651077234859803, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.15383288264274597, |
|
"eval_mean_accuracy": 0.669574184817839, |
|
"eval_mean_iou": 0.6302353468431368, |
|
"eval_overall_accuracy": 0.9656106948852539, |
|
"eval_runtime": 2.904, |
|
"eval_samples_per_second": 6.887, |
|
"eval_steps_per_second": 0.689, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 7.80701754385965e-06, |
|
"loss": 0.1075, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 7.719298245614036e-06, |
|
"loss": 0.1224, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 7.631578947368423e-06, |
|
"loss": 0.1101, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 7.5438596491228074e-06, |
|
"loss": 0.1276, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 7.456140350877194e-06, |
|
"loss": 0.1119, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 7.368421052631579e-06, |
|
"loss": 0.1088, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 7.280701754385966e-06, |
|
"loss": 0.1119, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 7.192982456140352e-06, |
|
"loss": 0.1174, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 7.1052631578947375e-06, |
|
"loss": 0.1087, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 7.017543859649123e-06, |
|
"loss": 0.1263, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.36454964563407416, |
|
"eval_accuracy_undropoff": 0.9921878809166776, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3089212827988338, |
|
"eval_iou_undropoff": 0.9655692652723684, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.15396572649478912, |
|
"eval_mean_accuracy": 0.6783687632753759, |
|
"eval_mean_iou": 0.6372452740356012, |
|
"eval_overall_accuracy": 0.9660911560058594, |
|
"eval_runtime": 2.7842, |
|
"eval_samples_per_second": 7.183, |
|
"eval_steps_per_second": 0.718, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 6.92982456140351e-06, |
|
"loss": 0.108, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 6.842105263157896e-06, |
|
"loss": 0.1088, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 6.754385964912281e-06, |
|
"loss": 0.1062, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.1077, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 6.578947368421054e-06, |
|
"loss": 0.1095, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 6.491228070175439e-06, |
|
"loss": 0.1041, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 6.403508771929825e-06, |
|
"loss": 0.1064, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 6.31578947368421e-06, |
|
"loss": 0.1226, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 6.2280701754385975e-06, |
|
"loss": 0.12, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 6.140350877192983e-06, |
|
"loss": 0.1028, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_dropoff": 0.3991651184660199, |
|
"eval_accuracy_undropoff": 0.9904479405996356, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3271364284022512, |
|
"eval_iou_undropoff": 0.9652867104330137, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.15117403864860535, |
|
"eval_mean_accuracy": 0.6948065295328277, |
|
"eval_mean_iou": 0.6462115694176325, |
|
"eval_overall_accuracy": 0.9658628463745117, |
|
"eval_runtime": 2.7735, |
|
"eval_samples_per_second": 7.211, |
|
"eval_steps_per_second": 0.721, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 6.0526315789473685e-06, |
|
"loss": 0.1076, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 5.964912280701755e-06, |
|
"loss": 0.1055, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 5.877192982456141e-06, |
|
"loss": 0.1103, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 5.789473684210527e-06, |
|
"loss": 0.1052, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 5.701754385964913e-06, |
|
"loss": 0.1101, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 5.6140350877192985e-06, |
|
"loss": 0.1048, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 5.526315789473685e-06, |
|
"loss": 0.1062, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 5.438596491228071e-06, |
|
"loss": 0.0989, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 5.350877192982457e-06, |
|
"loss": 0.1038, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.1163, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_dropoff": 0.39533934264547355, |
|
"eval_accuracy_undropoff": 0.9910939255326241, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.32800374504751717, |
|
"eval_iou_undropoff": 0.9657600658405825, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.14928457140922546, |
|
"eval_mean_accuracy": 0.6932166340890489, |
|
"eval_mean_iou": 0.6468819054440498, |
|
"eval_overall_accuracy": 0.9663228988647461, |
|
"eval_runtime": 2.7734, |
|
"eval_samples_per_second": 7.211, |
|
"eval_steps_per_second": 0.721, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 5.175438596491229e-06, |
|
"loss": 0.0987, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 5.087719298245615e-06, |
|
"loss": 0.1142, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1002, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 4.912280701754386e-06, |
|
"loss": 0.0975, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 4.824561403508772e-06, |
|
"loss": 0.1074, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 4.736842105263158e-06, |
|
"loss": 0.0931, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 4.649122807017544e-06, |
|
"loss": 0.1008, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 4.56140350877193e-06, |
|
"loss": 0.0978, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 4.473684210526316e-06, |
|
"loss": 0.1001, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 4.385964912280702e-06, |
|
"loss": 0.0998, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_dropoff": 0.38693089291038785, |
|
"eval_accuracy_undropoff": 0.9917826975144705, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.32531258918722955, |
|
"eval_iou_undropoff": 0.9660878261660151, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.14812232553958893, |
|
"eval_mean_accuracy": 0.6893567952124291, |
|
"eval_mean_iou": 0.6457002076766223, |
|
"eval_overall_accuracy": 0.966633415222168, |
|
"eval_runtime": 4.5756, |
|
"eval_samples_per_second": 4.371, |
|
"eval_steps_per_second": 0.437, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 4.298245614035088e-06, |
|
"loss": 0.1011, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 0.0942, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 4.12280701754386e-06, |
|
"loss": 0.1065, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 4.035087719298246e-06, |
|
"loss": 0.1003, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 3.947368421052632e-06, |
|
"loss": 0.0949, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 3.859649122807018e-06, |
|
"loss": 0.1001, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 3.7719298245614037e-06, |
|
"loss": 0.099, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 3.6842105263157896e-06, |
|
"loss": 0.1054, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 3.596491228070176e-06, |
|
"loss": 0.0948, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 3.5087719298245615e-06, |
|
"loss": 0.0997, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_dropoff": 0.3868896075598064, |
|
"eval_accuracy_undropoff": 0.9916963273786366, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.32473432927768364, |
|
"eval_iou_undropoff": 0.9660020083201836, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1464928388595581, |
|
"eval_mean_accuracy": 0.6892929674692214, |
|
"eval_mean_iou": 0.6453681687989337, |
|
"eval_overall_accuracy": 0.9665489196777344, |
|
"eval_runtime": 2.84, |
|
"eval_samples_per_second": 7.042, |
|
"eval_steps_per_second": 0.704, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.5, |
|
"learning_rate": 3.421052631578948e-06, |
|
"loss": 0.0948, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 101.0, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.0931, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 101.5, |
|
"learning_rate": 3.2456140350877197e-06, |
|
"loss": 0.0964, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 102.0, |
|
"learning_rate": 3.157894736842105e-06, |
|
"loss": 0.0978, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 102.5, |
|
"learning_rate": 3.0701754385964915e-06, |
|
"loss": 0.1001, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 103.0, |
|
"learning_rate": 2.9824561403508774e-06, |
|
"loss": 0.0979, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 103.5, |
|
"learning_rate": 2.8947368421052634e-06, |
|
"loss": 0.099, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 104.0, |
|
"learning_rate": 2.8070175438596493e-06, |
|
"loss": 0.0976, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 104.5, |
|
"learning_rate": 2.7192982456140356e-06, |
|
"loss": 0.0998, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.0998, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"eval_accuracy_dropoff": 0.39580724328539646, |
|
"eval_accuracy_undropoff": 0.9915522444792269, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3312957871941761, |
|
"eval_iou_undropoff": 0.9662257815039224, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.14729923009872437, |
|
"eval_mean_accuracy": 0.6936797438823117, |
|
"eval_mean_iou": 0.6487607843490493, |
|
"eval_overall_accuracy": 0.9667816162109375, |
|
"eval_runtime": 2.8077, |
|
"eval_samples_per_second": 7.123, |
|
"eval_steps_per_second": 0.712, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 105.5, |
|
"learning_rate": 2.5438596491228075e-06, |
|
"loss": 0.1039, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 106.0, |
|
"learning_rate": 2.456140350877193e-06, |
|
"loss": 0.1002, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 106.5, |
|
"learning_rate": 2.368421052631579e-06, |
|
"loss": 0.0959, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 107.0, |
|
"learning_rate": 2.280701754385965e-06, |
|
"loss": 0.0939, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 107.5, |
|
"learning_rate": 2.192982456140351e-06, |
|
"loss": 0.0918, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 108.0, |
|
"learning_rate": 2.105263157894737e-06, |
|
"loss": 0.102, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 108.5, |
|
"learning_rate": 2.017543859649123e-06, |
|
"loss": 0.0915, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 109.0, |
|
"learning_rate": 1.929824561403509e-06, |
|
"loss": 0.0962, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 109.5, |
|
"learning_rate": 1.8421052631578948e-06, |
|
"loss": 0.0897, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"learning_rate": 1.7543859649122807e-06, |
|
"loss": 0.1003, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"eval_accuracy_dropoff": 0.3614486570792908, |
|
"eval_accuracy_undropoff": 0.9933879083799928, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.31364541039726135, |
|
"eval_iou_undropoff": 0.9666105483138739, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.14373770356178284, |
|
"eval_mean_accuracy": 0.6774182827296418, |
|
"eval_mean_iou": 0.6401279793555676, |
|
"eval_overall_accuracy": 0.9671123504638672, |
|
"eval_runtime": 2.7413, |
|
"eval_samples_per_second": 7.296, |
|
"eval_steps_per_second": 0.73, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 110.5, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 0.0975, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 111.0, |
|
"learning_rate": 1.5789473684210526e-06, |
|
"loss": 0.1052, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 111.5, |
|
"learning_rate": 1.4912280701754387e-06, |
|
"loss": 0.0958, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 112.0, |
|
"learning_rate": 1.4035087719298246e-06, |
|
"loss": 0.1006, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 112.5, |
|
"learning_rate": 1.3157894736842106e-06, |
|
"loss": 0.0882, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 113.0, |
|
"learning_rate": 1.2280701754385965e-06, |
|
"loss": 0.1023, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 113.5, |
|
"learning_rate": 1.1403508771929824e-06, |
|
"loss": 0.0941, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 114.0, |
|
"learning_rate": 1.0526315789473685e-06, |
|
"loss": 0.0942, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 114.5, |
|
"learning_rate": 9.649122807017545e-07, |
|
"loss": 0.0896, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"learning_rate": 8.771929824561404e-07, |
|
"loss": 0.0932, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"eval_accuracy_dropoff": 0.3876235693479208, |
|
"eval_accuracy_undropoff": 0.9920292703216094, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.32745972632892456, |
|
"eval_iou_undropoff": 0.9663562979074826, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1433604657649994, |
|
"eval_mean_accuracy": 0.6898264198347651, |
|
"eval_mean_iou": 0.6469080121182036, |
|
"eval_overall_accuracy": 0.9668985366821289, |
|
"eval_runtime": 2.7102, |
|
"eval_samples_per_second": 7.38, |
|
"eval_steps_per_second": 0.738, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 115.5, |
|
"learning_rate": 7.894736842105263e-07, |
|
"loss": 0.0888, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 116.0, |
|
"learning_rate": 7.017543859649123e-07, |
|
"loss": 0.1018, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 116.5, |
|
"learning_rate": 6.140350877192982e-07, |
|
"loss": 0.0975, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 117.0, |
|
"learning_rate": 5.263157894736843e-07, |
|
"loss": 0.0908, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 117.5, |
|
"learning_rate": 4.385964912280702e-07, |
|
"loss": 0.0911, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 118.0, |
|
"learning_rate": 3.5087719298245616e-07, |
|
"loss": 0.0916, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 118.5, |
|
"learning_rate": 2.6315789473684213e-07, |
|
"loss": 0.0999, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 119.0, |
|
"learning_rate": 1.7543859649122808e-07, |
|
"loss": 0.0915, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 119.5, |
|
"learning_rate": 8.771929824561404e-08, |
|
"loss": 0.091, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.0942, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"eval_accuracy_dropoff": 0.37821050941535356, |
|
"eval_accuracy_undropoff": 0.9924722655344351, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3222878497074126, |
|
"eval_iou_undropoff": 0.9664033941344445, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.14285185933113098, |
|
"eval_mean_accuracy": 0.6853413874748944, |
|
"eval_mean_iou": 0.6443456219209286, |
|
"eval_overall_accuracy": 0.9669317245483399, |
|
"eval_runtime": 2.8324, |
|
"eval_samples_per_second": 7.061, |
|
"eval_steps_per_second": 0.706, |
|
"step": 240 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 2.778548611716219e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|