|
{ |
|
"best_metric": 0.6278509497642517, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGB-b5_1/checkpoint-240", |
|
"epoch": 120.0, |
|
"global_step": 240, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.6666666666666665e-07, |
|
"loss": 1.0244, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.333333333333333e-07, |
|
"loss": 1.0185, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 5e-07, |
|
"loss": 1.0196, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 6.666666666666666e-07, |
|
"loss": 1.0239, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 8.333333333333333e-07, |
|
"loss": 1.0206, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 1e-06, |
|
"loss": 1.0123, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 1.1666666666666668e-06, |
|
"loss": 1.0125, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1.3333333333333332e-06, |
|
"loss": 1.016, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 1.5e-06, |
|
"loss": 1.007, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 1.0071, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.025546457487557055, |
|
"eval_accuracy_undropoff": 0.5241011485834999, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.014703669184605108, |
|
"eval_iou_undropoff": 0.5086504735858094, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0205577611923218, |
|
"eval_mean_accuracy": 0.27482380303552845, |
|
"eval_mean_iou": 0.17445138092347148, |
|
"eval_overall_accuracy": 0.5033716201782227, |
|
"eval_runtime": 2.8831, |
|
"eval_samples_per_second": 6.937, |
|
"eval_steps_per_second": 0.694, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 1.833333333333333e-06, |
|
"loss": 1.002, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 2e-06, |
|
"loss": 0.9999, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 1.9912280701754384e-06, |
|
"loss": 0.9963, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 1.982456140350877e-06, |
|
"loss": 0.9889, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 1.9736842105263157e-06, |
|
"loss": 0.9863, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 1.964912280701754e-06, |
|
"loss": 0.9881, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 1.956140350877193e-06, |
|
"loss": 0.9856, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 1.9473684210526315e-06, |
|
"loss": 0.9763, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 1.93859649122807e-06, |
|
"loss": 0.9789, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 1.929824561403509e-06, |
|
"loss": 0.9688, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.09924998279777059, |
|
"eval_accuracy_undropoff": 0.5978783594052401, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.05823063486894339, |
|
"eval_iou_undropoff": 0.5837983317382666, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.9873144030570984, |
|
"eval_mean_accuracy": 0.3485641711015054, |
|
"eval_mean_iou": 0.21400965553573667, |
|
"eval_overall_accuracy": 0.577145767211914, |
|
"eval_runtime": 2.4113, |
|
"eval_samples_per_second": 8.294, |
|
"eval_steps_per_second": 0.829, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 1.9210526315789474e-06, |
|
"loss": 0.9671, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 1.912280701754386e-06, |
|
"loss": 0.9643, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 1.9035087719298243e-06, |
|
"loss": 0.9621, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 1.894736842105263e-06, |
|
"loss": 0.9631, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 1.8859649122807019e-06, |
|
"loss": 0.958, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 1.8771929824561403e-06, |
|
"loss": 0.9523, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 1.868421052631579e-06, |
|
"loss": 0.9526, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 1.8596491228070175e-06, |
|
"loss": 0.9428, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 1.850877192982456e-06, |
|
"loss": 0.947, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 1.8421052631578946e-06, |
|
"loss": 0.9406, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.2037844904699649, |
|
"eval_accuracy_undropoff": 0.6855052404184374, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.1135240392724076, |
|
"eval_iou_undropoff": 0.6704558637017982, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.9312955141067505, |
|
"eval_mean_accuracy": 0.4446448654442012, |
|
"eval_mean_iou": 0.2613266343247353, |
|
"eval_overall_accuracy": 0.6654756546020508, |
|
"eval_runtime": 2.3961, |
|
"eval_samples_per_second": 8.347, |
|
"eval_steps_per_second": 0.835, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 1.833333333333333e-06, |
|
"loss": 0.9434, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 1.8245614035087718e-06, |
|
"loss": 0.9337, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 1.8157894736842106e-06, |
|
"loss": 0.9365, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 1.807017543859649e-06, |
|
"loss": 0.9311, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 1.7982456140350878e-06, |
|
"loss": 0.9313, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 1.7894736842105262e-06, |
|
"loss": 0.9281, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 1.7807017543859647e-06, |
|
"loss": 0.927, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 1.7719298245614034e-06, |
|
"loss": 0.9209, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 1.7631578947368419e-06, |
|
"loss": 0.9161, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 1.7543859649122805e-06, |
|
"loss": 0.9278, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.30091515860455514, |
|
"eval_accuracy_undropoff": 0.7288984723033463, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.16481371211493093, |
|
"eval_iou_undropoff": 0.7141641818866753, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.88507080078125, |
|
"eval_mean_accuracy": 0.5149068154539507, |
|
"eval_mean_iou": 0.2929926313338687, |
|
"eval_overall_accuracy": 0.7111032485961915, |
|
"eval_runtime": 2.3253, |
|
"eval_samples_per_second": 8.601, |
|
"eval_steps_per_second": 0.86, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 1.7456140350877194e-06, |
|
"loss": 0.9123, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 1.7368421052631579e-06, |
|
"loss": 0.9098, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 1.7280701754385966e-06, |
|
"loss": 0.9114, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 1.719298245614035e-06, |
|
"loss": 0.9087, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 1.7105263157894735e-06, |
|
"loss": 0.9003, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 1.7017543859649122e-06, |
|
"loss": 0.9087, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 1.6929824561403506e-06, |
|
"loss": 0.901, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 1.6842105263157893e-06, |
|
"loss": 0.8953, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 1.6754385964912282e-06, |
|
"loss": 0.8924, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 0.8956, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.3769627743755591, |
|
"eval_accuracy_undropoff": 0.7513628670108868, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.19850953583032382, |
|
"eval_iou_undropoff": 0.7370141563659725, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8563140630722046, |
|
"eval_mean_accuracy": 0.564162820693223, |
|
"eval_mean_iou": 0.31184123073209874, |
|
"eval_overall_accuracy": 0.7357955932617187, |
|
"eval_runtime": 2.2641, |
|
"eval_samples_per_second": 8.834, |
|
"eval_steps_per_second": 0.883, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 1.6578947368421053e-06, |
|
"loss": 0.8909, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 1.6491228070175438e-06, |
|
"loss": 0.8821, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 1.6403508771929823e-06, |
|
"loss": 0.8869, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 1.631578947368421e-06, |
|
"loss": 0.8838, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 1.6228070175438594e-06, |
|
"loss": 0.8769, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 1.614035087719298e-06, |
|
"loss": 0.8819, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 1.605263157894737e-06, |
|
"loss": 0.8742, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 1.5964912280701754e-06, |
|
"loss": 0.8858, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 1.5877192982456141e-06, |
|
"loss": 0.8734, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 1.5789473684210526e-06, |
|
"loss": 0.8674, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.43656047157044886, |
|
"eval_accuracy_undropoff": 0.7806618459924953, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2245523958736421, |
|
"eval_iou_undropoff": 0.7664355612520527, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8260032534599304, |
|
"eval_mean_accuracy": 0.6086111587814721, |
|
"eval_mean_iou": 0.3303293190418983, |
|
"eval_overall_accuracy": 0.7663543701171875, |
|
"eval_runtime": 2.6403, |
|
"eval_samples_per_second": 7.575, |
|
"eval_steps_per_second": 0.757, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 1.570175438596491e-06, |
|
"loss": 0.8687, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 1.5614035087719297e-06, |
|
"loss": 0.8671, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 1.5526315789473682e-06, |
|
"loss": 0.866, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 1.5438596491228069e-06, |
|
"loss": 0.853, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 1.5350877192982458e-06, |
|
"loss": 0.8616, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 1.5263157894736842e-06, |
|
"loss": 0.8555, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 1.517543859649123e-06, |
|
"loss": 0.856, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 1.5087719298245614e-06, |
|
"loss": 0.8452, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 1.5e-06, |
|
"loss": 0.8559, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 1.4912280701754385e-06, |
|
"loss": 0.8438, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.4920571572742494, |
|
"eval_accuracy_undropoff": 0.7790013104777522, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.23805307181361615, |
|
"eval_iou_undropoff": 0.7660370749081444, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8148576617240906, |
|
"eval_mean_accuracy": 0.6355292338760008, |
|
"eval_mean_iou": 0.3346967155739202, |
|
"eval_overall_accuracy": 0.7670703887939453, |
|
"eval_runtime": 2.6339, |
|
"eval_samples_per_second": 7.593, |
|
"eval_steps_per_second": 0.759, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 1.482456140350877e-06, |
|
"loss": 0.8387, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 1.4736842105263156e-06, |
|
"loss": 0.8475, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 1.4649122807017541e-06, |
|
"loss": 0.8298, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 1.456140350877193e-06, |
|
"loss": 0.8521, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 1.4473684210526317e-06, |
|
"loss": 0.8356, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 1.4385964912280701e-06, |
|
"loss": 0.8229, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 1.4298245614035088e-06, |
|
"loss": 0.8277, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 1.4210526315789473e-06, |
|
"loss": 0.8268, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 1.4122807017543857e-06, |
|
"loss": 0.8204, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 1.4035087719298244e-06, |
|
"loss": 0.8309, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.49719488979105025, |
|
"eval_accuracy_undropoff": 0.797183020108918, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.25387240561121116, |
|
"eval_iou_undropoff": 0.7839036228326106, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7881223559379578, |
|
"eval_mean_accuracy": 0.6471889549499841, |
|
"eval_mean_iou": 0.34592534281460724, |
|
"eval_overall_accuracy": 0.7847097396850586, |
|
"eval_runtime": 2.3528, |
|
"eval_samples_per_second": 8.501, |
|
"eval_steps_per_second": 0.85, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 1.3947368421052629e-06, |
|
"loss": 0.8164, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 1.3859649122807018e-06, |
|
"loss": 0.8311, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 1.3771929824561405e-06, |
|
"loss": 0.8189, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 1.368421052631579e-06, |
|
"loss": 0.8179, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 1.3596491228070176e-06, |
|
"loss": 0.8202, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 1.350877192982456e-06, |
|
"loss": 0.8104, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 1.3421052631578945e-06, |
|
"loss": 0.8061, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 1.3333333333333332e-06, |
|
"loss": 0.817, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 1.3245614035087717e-06, |
|
"loss": 0.8078, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 1.3157894736842106e-06, |
|
"loss": 0.8069, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_dropoff": 0.5063373013142504, |
|
"eval_accuracy_undropoff": 0.816973124758079, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2667705916473318, |
|
"eval_iou_undropoff": 0.8033164328582806, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7640160322189331, |
|
"eval_mean_accuracy": 0.6616552130361647, |
|
"eval_mean_iou": 0.35669567483520415, |
|
"eval_overall_accuracy": 0.8040571212768555, |
|
"eval_runtime": 2.4378, |
|
"eval_samples_per_second": 8.204, |
|
"eval_steps_per_second": 0.82, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 1.3070175438596492e-06, |
|
"loss": 0.796, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 1.2982456140350877e-06, |
|
"loss": 0.8078, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 1.2894736842105264e-06, |
|
"loss": 0.8064, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 1.2807017543859648e-06, |
|
"loss": 0.7821, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 1.2719298245614033e-06, |
|
"loss": 0.7981, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 1.263157894736842e-06, |
|
"loss": 0.7868, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 1.2543859649122804e-06, |
|
"loss": 0.7887, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 1.2456140350877193e-06, |
|
"loss": 0.7935, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 1.236842105263158e-06, |
|
"loss": 0.7878, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 1.2280701754385965e-06, |
|
"loss": 0.7779, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.5315901740865616, |
|
"eval_accuracy_undropoff": 0.8268033994807842, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27780135060302485, |
|
"eval_iou_undropoff": 0.8132247439558549, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7485586404800415, |
|
"eval_mean_accuracy": 0.6791967867836729, |
|
"eval_mean_iou": 0.36367536485295987, |
|
"eval_overall_accuracy": 0.8145286560058593, |
|
"eval_runtime": 2.3681, |
|
"eval_samples_per_second": 8.446, |
|
"eval_steps_per_second": 0.845, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 1.2192982456140352e-06, |
|
"loss": 0.7784, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 1.2105263157894736e-06, |
|
"loss": 0.7842, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 1.201754385964912e-06, |
|
"loss": 0.7646, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 1.1929824561403508e-06, |
|
"loss": 0.7897, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 1.1842105263157892e-06, |
|
"loss": 0.7794, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 1.1754385964912281e-06, |
|
"loss": 0.7655, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 1.1666666666666668e-06, |
|
"loss": 0.7677, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 1.1578947368421053e-06, |
|
"loss": 0.7643, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 1.149122807017544e-06, |
|
"loss": 0.7651, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 1.1403508771929824e-06, |
|
"loss": 0.7695, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_dropoff": 0.5541870226381339, |
|
"eval_accuracy_undropoff": 0.8329422464394708, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28578390091121564, |
|
"eval_iou_undropoff": 0.8195285806313918, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7353506684303284, |
|
"eval_mean_accuracy": 0.6935646345388023, |
|
"eval_mean_iou": 0.3684374938475358, |
|
"eval_overall_accuracy": 0.8213518142700196, |
|
"eval_runtime": 2.4084, |
|
"eval_samples_per_second": 8.304, |
|
"eval_steps_per_second": 0.83, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 1.1315789473684209e-06, |
|
"loss": 0.7605, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 1.1228070175438595e-06, |
|
"loss": 0.7552, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 1.114035087719298e-06, |
|
"loss": 0.7735, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 1.1052631578947369e-06, |
|
"loss": 0.7486, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 1.0964912280701756e-06, |
|
"loss": 0.7472, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 1.087719298245614e-06, |
|
"loss": 0.7678, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 1.0789473684210527e-06, |
|
"loss": 0.7578, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 1.0701754385964912e-06, |
|
"loss": 0.7411, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 1.0614035087719296e-06, |
|
"loss": 0.751, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 1.0526315789473683e-06, |
|
"loss": 0.7568, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.5577375627881374, |
|
"eval_accuracy_undropoff": 0.848569867768118, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2924013736977288, |
|
"eval_iou_undropoff": 0.8346954991270276, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7164114117622375, |
|
"eval_mean_accuracy": 0.7031537152781278, |
|
"eval_mean_iou": 0.3756989576082521, |
|
"eval_overall_accuracy": 0.8364772796630859, |
|
"eval_runtime": 2.284, |
|
"eval_samples_per_second": 8.757, |
|
"eval_steps_per_second": 0.876, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 1.0438596491228068e-06, |
|
"loss": 0.7328, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 1.0350877192982457e-06, |
|
"loss": 0.7637, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 1.0263157894736843e-06, |
|
"loss": 0.7493, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 1.0175438596491228e-06, |
|
"loss": 0.7258, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 1.0087719298245615e-06, |
|
"loss": 0.7437, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 1e-06, |
|
"loss": 0.733, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 9.912280701754384e-07, |
|
"loss": 0.7313, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 9.82456140350877e-07, |
|
"loss": 0.7433, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 9.736842105263158e-07, |
|
"loss": 0.7263, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 9.649122807017545e-07, |
|
"loss": 0.7285, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_dropoff": 0.5629578660061011, |
|
"eval_accuracy_undropoff": 0.8607761172643752, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3042259247231594, |
|
"eval_iou_undropoff": 0.8466948800351104, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6976498365402222, |
|
"eval_mean_accuracy": 0.7118669916352381, |
|
"eval_mean_iou": 0.3836402682527566, |
|
"eval_overall_accuracy": 0.8483930587768554, |
|
"eval_runtime": 2.403, |
|
"eval_samples_per_second": 8.323, |
|
"eval_steps_per_second": 0.832, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 9.56140350877193e-07, |
|
"loss": 0.7262, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 9.473684210526315e-07, |
|
"loss": 0.7348, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 9.385964912280702e-07, |
|
"loss": 0.7368, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 9.298245614035087e-07, |
|
"loss": 0.7111, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 9.210526315789473e-07, |
|
"loss": 0.7316, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 9.122807017543859e-07, |
|
"loss": 0.722, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 9.035087719298246e-07, |
|
"loss": 0.7184, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 8.947368421052631e-07, |
|
"loss": 0.7344, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 8.859649122807017e-07, |
|
"loss": 0.7296, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 8.771929824561403e-07, |
|
"loss": 0.7217, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.5817381132594784, |
|
"eval_accuracy_undropoff": 0.8615813098210208, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.30911656953148425, |
|
"eval_iou_undropoff": 0.8479873812981374, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6922430992126465, |
|
"eval_mean_accuracy": 0.7216597115402497, |
|
"eval_mean_iou": 0.38570131694320725, |
|
"eval_overall_accuracy": 0.8499456405639648, |
|
"eval_runtime": 2.4409, |
|
"eval_samples_per_second": 8.194, |
|
"eval_steps_per_second": 0.819, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 8.684210526315789e-07, |
|
"loss": 0.7193, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 8.596491228070175e-07, |
|
"loss": 0.7183, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 8.508771929824561e-07, |
|
"loss": 0.7078, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 8.421052631578947e-07, |
|
"loss": 0.7164, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 8.333333333333333e-07, |
|
"loss": 0.7079, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 8.245614035087719e-07, |
|
"loss": 0.714, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 8.157894736842105e-07, |
|
"loss": 0.6951, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 8.07017543859649e-07, |
|
"loss": 0.7092, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 7.982456140350877e-07, |
|
"loss": 0.7053, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 7.894736842105263e-07, |
|
"loss": 0.7095, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_dropoff": 0.5827794215463658, |
|
"eval_accuracy_undropoff": 0.8745350391103478, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3172149465037017, |
|
"eval_iou_undropoff": 0.8605078409241332, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6707638502120972, |
|
"eval_mean_accuracy": 0.7286572303283567, |
|
"eval_mean_iou": 0.39257426247594496, |
|
"eval_overall_accuracy": 0.8624040603637695, |
|
"eval_runtime": 2.5432, |
|
"eval_samples_per_second": 7.864, |
|
"eval_steps_per_second": 0.786, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 7.807017543859649e-07, |
|
"loss": 0.7204, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 7.719298245614034e-07, |
|
"loss": 0.7071, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 7.631578947368421e-07, |
|
"loss": 0.6954, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 7.543859649122807e-07, |
|
"loss": 0.7101, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 7.456140350877193e-07, |
|
"loss": 0.7127, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 7.368421052631578e-07, |
|
"loss": 0.683, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 7.280701754385965e-07, |
|
"loss": 0.6881, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 7.192982456140351e-07, |
|
"loss": 0.6876, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 7.105263157894736e-07, |
|
"loss": 0.7081, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 7.017543859649122e-07, |
|
"loss": 0.6944, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.5858207757058648, |
|
"eval_accuracy_undropoff": 0.8781162155949838, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3212115419441817, |
|
"eval_iou_undropoff": 0.8640945209406817, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6636559963226318, |
|
"eval_mean_accuracy": 0.7319684956504242, |
|
"eval_mean_iou": 0.39510202096162117, |
|
"eval_overall_accuracy": 0.8659627914428711, |
|
"eval_runtime": 2.517, |
|
"eval_samples_per_second": 7.946, |
|
"eval_steps_per_second": 0.795, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 6.929824561403509e-07, |
|
"loss": 0.6867, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 6.842105263157895e-07, |
|
"loss": 0.7011, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 6.75438596491228e-07, |
|
"loss": 0.6953, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 6.666666666666666e-07, |
|
"loss": 0.6797, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 6.578947368421053e-07, |
|
"loss": 0.6792, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 6.491228070175438e-07, |
|
"loss": 0.7, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 6.403508771929824e-07, |
|
"loss": 0.686, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 6.31578947368421e-07, |
|
"loss": 0.6837, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 6.228070175438597e-07, |
|
"loss": 0.6896, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 6.140350877192982e-07, |
|
"loss": 0.6878, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_dropoff": 0.6005045987293287, |
|
"eval_accuracy_undropoff": 0.8788236944726098, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.31748884361660845, |
|
"eval_iou_undropoff": 0.8651941216012483, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6632260084152222, |
|
"eval_mean_accuracy": 0.7396641466009692, |
|
"eval_mean_iou": 0.3942276550726189, |
|
"eval_overall_accuracy": 0.8672513961791992, |
|
"eval_runtime": 2.7068, |
|
"eval_samples_per_second": 7.389, |
|
"eval_steps_per_second": 0.739, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 6.052631578947368e-07, |
|
"loss": 0.6892, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 5.964912280701754e-07, |
|
"loss": 0.6707, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 5.877192982456141e-07, |
|
"loss": 0.6824, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 5.789473684210526e-07, |
|
"loss": 0.6725, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 5.701754385964912e-07, |
|
"loss": 0.6696, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 5.614035087719298e-07, |
|
"loss": 0.6839, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 5.526315789473684e-07, |
|
"loss": 0.6838, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 5.43859649122807e-07, |
|
"loss": 0.6648, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 5.350877192982456e-07, |
|
"loss": 0.6693, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 5.263157894736842e-07, |
|
"loss": 0.6868, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_dropoff": 0.5902061973898484, |
|
"eval_accuracy_undropoff": 0.8879755457090063, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.325670517480535, |
|
"eval_iou_undropoff": 0.8738749678072607, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6468318700790405, |
|
"eval_mean_accuracy": 0.7390908715494273, |
|
"eval_mean_iou": 0.39984849509593196, |
|
"eval_overall_accuracy": 0.8755945205688477, |
|
"eval_runtime": 2.6683, |
|
"eval_samples_per_second": 7.495, |
|
"eval_steps_per_second": 0.75, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 5.175438596491228e-07, |
|
"loss": 0.6574, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 5.087719298245614e-07, |
|
"loss": 0.6836, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 5e-07, |
|
"loss": 0.6629, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 4.912280701754385e-07, |
|
"loss": 0.6804, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 4.824561403508772e-07, |
|
"loss": 0.681, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 4.7368421052631574e-07, |
|
"loss": 0.6714, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 4.6491228070175437e-07, |
|
"loss": 0.6893, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 4.5614035087719294e-07, |
|
"loss": 0.648, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 4.4736842105263156e-07, |
|
"loss": 0.6745, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 4.3859649122807013e-07, |
|
"loss": 0.6581, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_dropoff": 0.5942200509185991, |
|
"eval_accuracy_undropoff": 0.8899351925466951, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3249399847986575, |
|
"eval_iou_undropoff": 0.8759081395906847, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6444216370582581, |
|
"eval_mean_accuracy": 0.742077621732647, |
|
"eval_mean_iou": 0.4002827081297807, |
|
"eval_overall_accuracy": 0.8776395797729493, |
|
"eval_runtime": 2.4463, |
|
"eval_samples_per_second": 8.176, |
|
"eval_steps_per_second": 0.818, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 4.2982456140350876e-07, |
|
"loss": 0.6779, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 4.2105263157894733e-07, |
|
"loss": 0.6504, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 4.1228070175438595e-07, |
|
"loss": 0.6653, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 4.035087719298245e-07, |
|
"loss": 0.6627, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 3.9473684210526315e-07, |
|
"loss": 0.6524, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 3.859649122807017e-07, |
|
"loss": 0.6682, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 3.7719298245614034e-07, |
|
"loss": 0.6396, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 3.684210526315789e-07, |
|
"loss": 0.6903, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 3.5964912280701754e-07, |
|
"loss": 0.6529, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 3.508771929824561e-07, |
|
"loss": 0.6587, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_dropoff": 0.5913759489896557, |
|
"eval_accuracy_undropoff": 0.8939941909118319, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.32814914180842586, |
|
"eval_iou_undropoff": 0.879741009921869, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.638337254524231, |
|
"eval_mean_accuracy": 0.7426850699507438, |
|
"eval_mean_iou": 0.402630050576765, |
|
"eval_overall_accuracy": 0.8814115524291992, |
|
"eval_runtime": 2.3566, |
|
"eval_samples_per_second": 8.487, |
|
"eval_steps_per_second": 0.849, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.5, |
|
"learning_rate": 3.4210526315789473e-07, |
|
"loss": 0.6522, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 101.0, |
|
"learning_rate": 3.333333333333333e-07, |
|
"loss": 0.6649, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 101.5, |
|
"learning_rate": 3.245614035087719e-07, |
|
"loss": 0.672, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 102.0, |
|
"learning_rate": 3.157894736842105e-07, |
|
"loss": 0.6523, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 102.5, |
|
"learning_rate": 3.070175438596491e-07, |
|
"loss": 0.6533, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 103.0, |
|
"learning_rate": 2.982456140350877e-07, |
|
"loss": 0.6615, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 103.5, |
|
"learning_rate": 2.894736842105263e-07, |
|
"loss": 0.6647, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 104.0, |
|
"learning_rate": 2.807017543859649e-07, |
|
"loss": 0.6435, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 104.5, |
|
"learning_rate": 2.719298245614035e-07, |
|
"loss": 0.6511, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"learning_rate": 2.631578947368421e-07, |
|
"loss": 0.6525, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"eval_accuracy_dropoff": 0.5917979770178216, |
|
"eval_accuracy_undropoff": 0.8950694394001056, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3289015454744775, |
|
"eval_iou_undropoff": 0.8807744496703084, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.633358359336853, |
|
"eval_mean_accuracy": 0.7434337082089636, |
|
"eval_mean_iou": 0.40322533171492864, |
|
"eval_overall_accuracy": 0.8824596405029297, |
|
"eval_runtime": 2.376, |
|
"eval_samples_per_second": 8.418, |
|
"eval_steps_per_second": 0.842, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 105.5, |
|
"learning_rate": 2.543859649122807e-07, |
|
"loss": 0.6613, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 106.0, |
|
"learning_rate": 2.456140350877193e-07, |
|
"loss": 0.6392, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 106.5, |
|
"learning_rate": 2.3684210526315787e-07, |
|
"loss": 0.6579, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 107.0, |
|
"learning_rate": 2.2807017543859647e-07, |
|
"loss": 0.6415, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 107.5, |
|
"learning_rate": 2.1929824561403507e-07, |
|
"loss": 0.6327, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 108.0, |
|
"learning_rate": 2.1052631578947366e-07, |
|
"loss": 0.6781, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 108.5, |
|
"learning_rate": 2.0175438596491226e-07, |
|
"loss": 0.6501, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 109.0, |
|
"learning_rate": 1.9298245614035086e-07, |
|
"loss": 0.6548, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 109.5, |
|
"learning_rate": 1.8421052631578946e-07, |
|
"loss": 0.6426, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"learning_rate": 1.7543859649122805e-07, |
|
"loss": 0.658, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"eval_accuracy_dropoff": 0.5968164407440537, |
|
"eval_accuracy_undropoff": 0.8934095009139512, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3284923282019689, |
|
"eval_iou_undropoff": 0.879300818272647, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6345422267913818, |
|
"eval_mean_accuracy": 0.7451129708290025, |
|
"eval_mean_iou": 0.4025977154915386, |
|
"eval_overall_accuracy": 0.8810773849487304, |
|
"eval_runtime": 2.4124, |
|
"eval_samples_per_second": 8.291, |
|
"eval_steps_per_second": 0.829, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 110.5, |
|
"learning_rate": 1.6666666666666665e-07, |
|
"loss": 0.6447, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 111.0, |
|
"learning_rate": 1.5789473684210525e-07, |
|
"loss": 0.6614, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 111.5, |
|
"learning_rate": 1.4912280701754385e-07, |
|
"loss": 0.6471, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 112.0, |
|
"learning_rate": 1.4035087719298244e-07, |
|
"loss": 0.666, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 112.5, |
|
"learning_rate": 1.3157894736842104e-07, |
|
"loss": 0.6325, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 113.0, |
|
"learning_rate": 1.2280701754385964e-07, |
|
"loss": 0.6846, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 113.5, |
|
"learning_rate": 1.1403508771929823e-07, |
|
"loss": 0.6417, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 114.0, |
|
"learning_rate": 1.0526315789473683e-07, |
|
"loss": 0.6601, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 114.5, |
|
"learning_rate": 9.649122807017543e-08, |
|
"loss": 0.655, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"learning_rate": 8.771929824561403e-08, |
|
"loss": 0.6575, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"eval_accuracy_dropoff": 0.5947659349985092, |
|
"eval_accuracy_undropoff": 0.8977401472869528, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3314162436288719, |
|
"eval_iou_undropoff": 0.8834775915808935, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.629997193813324, |
|
"eval_mean_accuracy": 0.746253041142731, |
|
"eval_mean_iou": 0.40496461173658843, |
|
"eval_overall_accuracy": 0.885142707824707, |
|
"eval_runtime": 2.3255, |
|
"eval_samples_per_second": 8.6, |
|
"eval_steps_per_second": 0.86, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 115.5, |
|
"learning_rate": 7.894736842105262e-08, |
|
"loss": 0.6416, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 116.0, |
|
"learning_rate": 7.017543859649122e-08, |
|
"loss": 0.658, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 116.5, |
|
"learning_rate": 6.140350877192982e-08, |
|
"loss": 0.6671, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 117.0, |
|
"learning_rate": 5.2631578947368416e-08, |
|
"loss": 0.6314, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 117.5, |
|
"learning_rate": 4.385964912280701e-08, |
|
"loss": 0.6438, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 118.0, |
|
"learning_rate": 3.508771929824561e-08, |
|
"loss": 0.6678, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 118.5, |
|
"learning_rate": 2.6315789473684208e-08, |
|
"loss": 0.6558, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 119.0, |
|
"learning_rate": 1.7543859649122805e-08, |
|
"loss": 0.6432, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 119.5, |
|
"learning_rate": 8.771929824561403e-09, |
|
"loss": 0.6433, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.6625, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"eval_accuracy_dropoff": 0.5956191655771922, |
|
"eval_accuracy_undropoff": 0.8985765843397411, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.33180686807149173, |
|
"eval_iou_undropoff": 0.884310092282515, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6278509497642517, |
|
"eval_mean_accuracy": 0.7470978749584667, |
|
"eval_mean_iou": 0.4053723201180022, |
|
"eval_overall_accuracy": 0.8859798431396484, |
|
"eval_runtime": 2.3839, |
|
"eval_samples_per_second": 8.39, |
|
"eval_steps_per_second": 0.839, |
|
"step": 240 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 1.3891198199110042e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|