|
{ |
|
"best_metric": 0.2344692200422287, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGBD-b0_6/checkpoint-200", |
|
"epoch": 100.0, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 5.833333333333332e-06, |
|
"loss": 1.079, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.1666666666666665e-05, |
|
"loss": 1.0795, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.75e-05, |
|
"loss": 1.0738, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 2.333333333333333e-05, |
|
"loss": 1.0662, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 2.9166666666666666e-05, |
|
"loss": 1.0593, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 3.5e-05, |
|
"loss": 1.0514, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 4.0833333333333334e-05, |
|
"loss": 1.0365, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 4.666666666666666e-05, |
|
"loss": 1.0287, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 5.2499999999999995e-05, |
|
"loss": 1.0123, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 5.833333333333333e-05, |
|
"loss": 0.9975, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.6267483199155944, |
|
"eval_accuracy_undropoff": 0.7225725563868626, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.128993031460863, |
|
"eval_iou_undropoff": 0.7166682161261027, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.047046422958374, |
|
"eval_mean_accuracy": 0.6746604381512284, |
|
"eval_mean_iou": 0.28188708252898853, |
|
"eval_overall_accuracy": 0.7185882568359375, |
|
"eval_runtime": 2.9658, |
|
"eval_samples_per_second": 6.744, |
|
"eval_steps_per_second": 0.674, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 6.416666666666666e-05, |
|
"loss": 0.9837, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 7e-05, |
|
"loss": 0.9765, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 6.969298245614035e-05, |
|
"loss": 0.9655, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 6.938596491228069e-05, |
|
"loss": 0.9246, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 6.907894736842105e-05, |
|
"loss": 0.8986, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 6.877192982456139e-05, |
|
"loss": 0.8794, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 6.846491228070175e-05, |
|
"loss": 0.8586, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 6.81578947368421e-05, |
|
"loss": 0.8485, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 6.785087719298245e-05, |
|
"loss": 0.8307, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 6.75438596491228e-05, |
|
"loss": 0.8329, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.01171586504277621, |
|
"eval_accuracy_undropoff": 0.9934380587814448, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.010567823996491185, |
|
"eval_iou_undropoff": 0.9526087857573317, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8435441255569458, |
|
"eval_mean_accuracy": 0.5025769619121104, |
|
"eval_mean_iou": 0.3210588699179409, |
|
"eval_overall_accuracy": 0.9526187896728515, |
|
"eval_runtime": 2.8522, |
|
"eval_samples_per_second": 7.012, |
|
"eval_steps_per_second": 0.701, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 6.723684210526316e-05, |
|
"loss": 0.8067, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 6.69298245614035e-05, |
|
"loss": 0.7975, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 6.662280701754386e-05, |
|
"loss": 0.7694, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 6.63157894736842e-05, |
|
"loss": 0.7727, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 6.600877192982455e-05, |
|
"loss": 0.757, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 6.570175438596491e-05, |
|
"loss": 0.7564, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 6.539473684210525e-05, |
|
"loss": 0.7134, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 6.508771929824561e-05, |
|
"loss": 0.7425, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 6.478070175438596e-05, |
|
"loss": 0.7061, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 6.447368421052631e-05, |
|
"loss": 0.6857, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.0006468038257758205, |
|
"eval_accuracy_undropoff": 0.998134683679328, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0006266750223336311, |
|
"eval_iou_undropoff": 0.9566587236175594, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6183835864067078, |
|
"eval_mean_accuracy": 0.49939074375255194, |
|
"eval_mean_iou": 0.3190951328799643, |
|
"eval_overall_accuracy": 0.9566598892211914, |
|
"eval_runtime": 3.0847, |
|
"eval_samples_per_second": 6.484, |
|
"eval_steps_per_second": 0.648, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 6.416666666666666e-05, |
|
"loss": 0.6817, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 6.3859649122807e-05, |
|
"loss": 0.7188, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 6.355263157894736e-05, |
|
"loss": 0.6683, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 6.324561403508772e-05, |
|
"loss": 0.6371, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 6.293859649122806e-05, |
|
"loss": 0.651, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 6.263157894736842e-05, |
|
"loss": 0.6381, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 6.232456140350877e-05, |
|
"loss": 0.6075, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 6.201754385964911e-05, |
|
"loss": 0.6391, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 6.171052631578947e-05, |
|
"loss": 0.6282, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 6.140350877192981e-05, |
|
"loss": 0.5913, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.0005458840799100897, |
|
"eval_accuracy_undropoff": 0.9988294259470615, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0005386297079178567, |
|
"eval_iou_undropoff": 0.957320579747961, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.47930747270584106, |
|
"eval_mean_accuracy": 0.4996876550134858, |
|
"eval_mean_iou": 0.3192864031519596, |
|
"eval_overall_accuracy": 0.9573215484619141, |
|
"eval_runtime": 2.984, |
|
"eval_samples_per_second": 6.702, |
|
"eval_steps_per_second": 0.67, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 6.109649122807017e-05, |
|
"loss": 0.6276, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 6.0789473684210525e-05, |
|
"loss": 0.5505, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 6.0482456140350875e-05, |
|
"loss": 0.6116, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 6.0175438596491224e-05, |
|
"loss": 0.6011, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 5.986842105263157e-05, |
|
"loss": 0.5694, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 5.956140350877192e-05, |
|
"loss": 0.5327, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 5.925438596491227e-05, |
|
"loss": 0.5521, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 5.894736842105262e-05, |
|
"loss": 0.5535, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 5.8640350877192984e-05, |
|
"loss": 0.5362, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 5.833333333333333e-05, |
|
"loss": 0.5299, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.09108924516617353, |
|
"eval_accuracy_undropoff": 0.9973127743221984, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.08687453788975855, |
|
"eval_iou_undropoff": 0.9594810504970981, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4528784155845642, |
|
"eval_mean_accuracy": 0.544201009744186, |
|
"eval_mean_iou": 0.34878519612895226, |
|
"eval_overall_accuracy": 0.959632682800293, |
|
"eval_runtime": 2.8843, |
|
"eval_samples_per_second": 6.934, |
|
"eval_steps_per_second": 0.693, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 5.802631578947368e-05, |
|
"loss": 0.5134, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 5.771929824561403e-05, |
|
"loss": 0.5267, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 5.741228070175438e-05, |
|
"loss": 0.5084, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 5.710526315789473e-05, |
|
"loss": 0.5228, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 5.679824561403508e-05, |
|
"loss": 0.4761, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 5.649122807017543e-05, |
|
"loss": 0.4738, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 5.618421052631579e-05, |
|
"loss": 0.5078, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 5.587719298245614e-05, |
|
"loss": 0.4839, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 5.557017543859649e-05, |
|
"loss": 0.5064, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 5.526315789473684e-05, |
|
"loss": 0.4922, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.4050918599050437, |
|
"eval_accuracy_undropoff": 0.9915056762493072, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.33902157179657477, |
|
"eval_iou_undropoff": 0.9665597842071078, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4037347435951233, |
|
"eval_mean_accuracy": 0.6982987680771755, |
|
"eval_mean_iou": 0.4351937853345609, |
|
"eval_overall_accuracy": 0.9671230316162109, |
|
"eval_runtime": 2.9628, |
|
"eval_samples_per_second": 6.75, |
|
"eval_steps_per_second": 0.675, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 5.495614035087719e-05, |
|
"loss": 0.4806, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 5.464912280701754e-05, |
|
"loss": 0.4603, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 5.4342105263157886e-05, |
|
"loss": 0.4779, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 5.4035087719298236e-05, |
|
"loss": 0.4506, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 5.37280701754386e-05, |
|
"loss": 0.413, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 5.342105263157895e-05, |
|
"loss": 0.4503, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 5.31140350877193e-05, |
|
"loss": 0.4412, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 5.2807017543859646e-05, |
|
"loss": 0.4465, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 5.2499999999999995e-05, |
|
"loss": 0.4392, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 5.2192982456140345e-05, |
|
"loss": 0.4769, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.5524438633913622, |
|
"eval_accuracy_undropoff": 0.9595162874374239, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28580378904201265, |
|
"eval_iou_undropoff": 0.9412408125018302, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4160759449005127, |
|
"eval_mean_accuracy": 0.755980075414393, |
|
"eval_mean_iou": 0.409014867181281, |
|
"eval_overall_accuracy": 0.9425905227661133, |
|
"eval_runtime": 2.7982, |
|
"eval_samples_per_second": 7.148, |
|
"eval_steps_per_second": 0.715, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 5.1885964912280694e-05, |
|
"loss": 0.4447, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 5.157894736842104e-05, |
|
"loss": 0.4664, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 5.127192982456139e-05, |
|
"loss": 0.4283, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 5.0964912280701755e-05, |
|
"loss": 0.4043, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 5.0657894736842104e-05, |
|
"loss": 0.4003, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 5.0350877192982454e-05, |
|
"loss": 0.3996, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 5.00438596491228e-05, |
|
"loss": 0.403, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 4.973684210526315e-05, |
|
"loss": 0.4097, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 4.94298245614035e-05, |
|
"loss": 0.3745, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 4.912280701754385e-05, |
|
"loss": 0.3916, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.4036193490676392, |
|
"eval_accuracy_undropoff": 0.9856426564986064, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3032580935344783, |
|
"eval_iou_undropoff": 0.9607844430524553, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3343408703804016, |
|
"eval_mean_accuracy": 0.6946310027831228, |
|
"eval_mean_iou": 0.6320212682934667, |
|
"eval_overall_accuracy": 0.9614425659179687, |
|
"eval_runtime": 2.924, |
|
"eval_samples_per_second": 6.84, |
|
"eval_steps_per_second": 0.684, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 4.88157894736842e-05, |
|
"loss": 0.388, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 4.850877192982456e-05, |
|
"loss": 0.3773, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 4.820175438596491e-05, |
|
"loss": 0.3627, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 4.789473684210526e-05, |
|
"loss": 0.3741, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 4.758771929824561e-05, |
|
"loss": 0.3713, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 4.728070175438596e-05, |
|
"loss": 0.3504, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 4.697368421052631e-05, |
|
"loss": 0.3656, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 4.666666666666666e-05, |
|
"loss": 0.3492, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 4.635964912280701e-05, |
|
"loss": 0.3917, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 4.605263157894737e-05, |
|
"loss": 0.3567, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_dropoff": 0.44580380283951465, |
|
"eval_accuracy_undropoff": 0.9821179987203688, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3156829624817281, |
|
"eval_iou_undropoff": 0.9590595792950887, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3371790051460266, |
|
"eval_mean_accuracy": 0.7139609007799417, |
|
"eval_mean_iou": 0.6373712708884084, |
|
"eval_overall_accuracy": 0.9598184585571289, |
|
"eval_runtime": 3.0037, |
|
"eval_samples_per_second": 6.658, |
|
"eval_steps_per_second": 0.666, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 4.574561403508772e-05, |
|
"loss": 0.3213, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 4.543859649122807e-05, |
|
"loss": 0.3371, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 4.513157894736842e-05, |
|
"loss": 0.3338, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 4.482456140350877e-05, |
|
"loss": 0.3045, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 4.4517543859649116e-05, |
|
"loss": 0.3385, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 4.4210526315789466e-05, |
|
"loss": 0.3173, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 4.3903508771929815e-05, |
|
"loss": 0.3285, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 4.359649122807018e-05, |
|
"loss": 0.3121, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 4.328947368421053e-05, |
|
"loss": 0.3049, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 4.2982456140350876e-05, |
|
"loss": 0.3234, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.3863024381293149, |
|
"eval_accuracy_undropoff": 0.9902968923666909, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.31569397792706333, |
|
"eval_iou_undropoff": 0.9646148967136478, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3073747456073761, |
|
"eval_mean_accuracy": 0.6882996652480029, |
|
"eval_mean_iou": 0.6401544373203556, |
|
"eval_overall_accuracy": 0.9651832580566406, |
|
"eval_runtime": 3.0588, |
|
"eval_samples_per_second": 6.539, |
|
"eval_steps_per_second": 0.654, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 4.2675438596491225e-05, |
|
"loss": 0.3488, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 4.2368421052631575e-05, |
|
"loss": 0.3084, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 4.2061403508771924e-05, |
|
"loss": 0.29, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 4.175438596491227e-05, |
|
"loss": 0.3101, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 4.144736842105262e-05, |
|
"loss": 0.3028, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 4.1140350877192985e-05, |
|
"loss": 0.314, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 4.0833333333333334e-05, |
|
"loss": 0.2912, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 4.0526315789473684e-05, |
|
"loss": 0.3274, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 4.021929824561403e-05, |
|
"loss": 0.2956, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 3.991228070175438e-05, |
|
"loss": 0.3181, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_dropoff": 0.4446202894561802, |
|
"eval_accuracy_undropoff": 0.9829880683836546, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3193806470320748, |
|
"eval_iou_undropoff": 0.9598610947822125, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3043469786643982, |
|
"eval_mean_accuracy": 0.7138041789199174, |
|
"eval_mean_iou": 0.6396208709071436, |
|
"eval_overall_accuracy": 0.960603141784668, |
|
"eval_runtime": 2.8365, |
|
"eval_samples_per_second": 7.051, |
|
"eval_steps_per_second": 0.705, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 3.960526315789473e-05, |
|
"loss": 0.2801, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 3.929824561403508e-05, |
|
"loss": 0.2676, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 3.899122807017543e-05, |
|
"loss": 0.309, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 3.868421052631579e-05, |
|
"loss": 0.2612, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 3.837719298245614e-05, |
|
"loss": 0.2854, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 3.807017543859649e-05, |
|
"loss": 0.2848, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 3.776315789473684e-05, |
|
"loss": 0.2815, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 3.745614035087719e-05, |
|
"loss": 0.2925, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 3.714912280701754e-05, |
|
"loss": 0.2696, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 3.684210526315789e-05, |
|
"loss": 0.2584, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.4576160003669809, |
|
"eval_accuracy_undropoff": 0.9831222008065856, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.329447431341726, |
|
"eval_iou_undropoff": 0.9605208665087082, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.30685028433799744, |
|
"eval_mean_accuracy": 0.7203691005867833, |
|
"eval_mean_iou": 0.644984148925217, |
|
"eval_overall_accuracy": 0.9612720489501954, |
|
"eval_runtime": 2.8737, |
|
"eval_samples_per_second": 6.96, |
|
"eval_steps_per_second": 0.696, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 3.653508771929824e-05, |
|
"loss": 0.2541, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 3.62280701754386e-05, |
|
"loss": 0.2704, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 3.592105263157895e-05, |
|
"loss": 0.2804, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 3.56140350877193e-05, |
|
"loss": 0.283, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 3.530701754385965e-05, |
|
"loss": 0.2669, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.247, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 3.4692982456140346e-05, |
|
"loss": 0.2767, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 3.4385964912280695e-05, |
|
"loss": 0.2632, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 3.407894736842105e-05, |
|
"loss": 0.2451, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 3.37719298245614e-05, |
|
"loss": 0.2566, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_dropoff": 0.4262666574921443, |
|
"eval_accuracy_undropoff": 0.9862876463839471, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3238921149676889, |
|
"eval_iou_undropoff": 0.9623348238315046, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.2823648154735565, |
|
"eval_mean_accuracy": 0.7062771519380457, |
|
"eval_mean_iou": 0.6431134693995968, |
|
"eval_overall_accuracy": 0.9630023956298828, |
|
"eval_runtime": 2.9223, |
|
"eval_samples_per_second": 6.844, |
|
"eval_steps_per_second": 0.684, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 3.346491228070175e-05, |
|
"loss": 0.2521, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 3.31578947368421e-05, |
|
"loss": 0.2365, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 3.2850877192982455e-05, |
|
"loss": 0.2561, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 3.2543859649122804e-05, |
|
"loss": 0.2301, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 3.2236842105263154e-05, |
|
"loss": 0.2501, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 3.19298245614035e-05, |
|
"loss": 0.2413, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 3.162280701754386e-05, |
|
"loss": 0.2298, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 3.131578947368421e-05, |
|
"loss": 0.2468, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 3.100877192982456e-05, |
|
"loss": 0.2407, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 3.070175438596491e-05, |
|
"loss": 0.2353, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.42118855937062777, |
|
"eval_accuracy_undropoff": 0.9880347510440537, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.33013566135359323, |
|
"eval_iou_undropoff": 0.9638323196590685, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.27631157636642456, |
|
"eval_mean_accuracy": 0.7046116552073407, |
|
"eval_mean_iou": 0.6469839905063308, |
|
"eval_overall_accuracy": 0.9644657135009765, |
|
"eval_runtime": 2.8477, |
|
"eval_samples_per_second": 7.023, |
|
"eval_steps_per_second": 0.702, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 3.0394736842105263e-05, |
|
"loss": 0.2337, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 3.0087719298245612e-05, |
|
"loss": 0.244, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 2.978070175438596e-05, |
|
"loss": 0.23, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 2.947368421052631e-05, |
|
"loss": 0.2381, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 2.9166666666666666e-05, |
|
"loss": 0.2376, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 2.8859649122807016e-05, |
|
"loss": 0.2279, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 2.8552631578947365e-05, |
|
"loss": 0.2189, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 2.8245614035087714e-05, |
|
"loss": 0.225, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 2.793859649122807e-05, |
|
"loss": 0.2154, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 2.763157894736842e-05, |
|
"loss": 0.2368, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_dropoff": 0.40438083442280787, |
|
"eval_accuracy_undropoff": 0.9901615658865824, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3296277184480541, |
|
"eval_iou_undropoff": 0.9652204665736905, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.26441723108291626, |
|
"eval_mean_accuracy": 0.6972712001546951, |
|
"eval_mean_iou": 0.6474240925108723, |
|
"eval_overall_accuracy": 0.9658052444458007, |
|
"eval_runtime": 2.8488, |
|
"eval_samples_per_second": 7.02, |
|
"eval_steps_per_second": 0.702, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 2.732456140350877e-05, |
|
"loss": 0.238, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 2.7017543859649118e-05, |
|
"loss": 0.2203, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 2.6710526315789474e-05, |
|
"loss": 0.2107, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 2.6403508771929823e-05, |
|
"loss": 0.2256, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 2.6096491228070172e-05, |
|
"loss": 0.2311, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 2.578947368421052e-05, |
|
"loss": 0.2081, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 2.5482456140350878e-05, |
|
"loss": 0.2142, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 2.5175438596491227e-05, |
|
"loss": 0.2102, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 2.4868421052631576e-05, |
|
"loss": 0.2177, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 2.4561403508771925e-05, |
|
"loss": 0.2225, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.4312667721736737, |
|
"eval_accuracy_undropoff": 0.9866235744698635, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3296307983591038, |
|
"eval_iou_undropoff": 0.9628663865014161, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.26733678579330444, |
|
"eval_mean_accuracy": 0.7089451733217687, |
|
"eval_mean_iou": 0.6462485924302599, |
|
"eval_overall_accuracy": 0.9635322570800782, |
|
"eval_runtime": 3.0269, |
|
"eval_samples_per_second": 6.607, |
|
"eval_steps_per_second": 0.661, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 2.425438596491228e-05, |
|
"loss": 0.2138, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 2.394736842105263e-05, |
|
"loss": 0.2049, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 2.364035087719298e-05, |
|
"loss": 0.2031, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 2.333333333333333e-05, |
|
"loss": 0.2172, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 2.3026315789473685e-05, |
|
"loss": 0.1964, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 2.2719298245614034e-05, |
|
"loss": 0.211, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 2.2412280701754384e-05, |
|
"loss": 0.2032, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 2.2105263157894733e-05, |
|
"loss": 0.2194, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 2.179824561403509e-05, |
|
"loss": 0.2043, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 2.1491228070175438e-05, |
|
"loss": 0.1976, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_dropoff": 0.4243675313653983, |
|
"eval_accuracy_undropoff": 0.9870488578345574, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.32680615954188497, |
|
"eval_iou_undropoff": 0.9630001339709574, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.25681835412979126, |
|
"eval_mean_accuracy": 0.7057081945999779, |
|
"eval_mean_iou": 0.6449031467564212, |
|
"eval_overall_accuracy": 0.9636529922485352, |
|
"eval_runtime": 2.9331, |
|
"eval_samples_per_second": 6.819, |
|
"eval_steps_per_second": 0.682, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 2.1184210526315787e-05, |
|
"loss": 0.2023, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 2.0877192982456137e-05, |
|
"loss": 0.1936, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 2.0570175438596493e-05, |
|
"loss": 0.2093, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 2.0263157894736842e-05, |
|
"loss": 0.1947, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 1.995614035087719e-05, |
|
"loss": 0.2036, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 1.964912280701754e-05, |
|
"loss": 0.212, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 1.9342105263157896e-05, |
|
"loss": 0.2043, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 1.9035087719298245e-05, |
|
"loss": 0.1904, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 1.8728070175438595e-05, |
|
"loss": 0.1993, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 1.8421052631578944e-05, |
|
"loss": 0.1981, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_dropoff": 0.4364641390857589, |
|
"eval_accuracy_undropoff": 0.9854585726837529, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.32689374158260726, |
|
"eval_iou_undropoff": 0.9619411107763359, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.25717592239379883, |
|
"eval_mean_accuracy": 0.7109613558847558, |
|
"eval_mean_iou": 0.6444174261794716, |
|
"eval_overall_accuracy": 0.9626317977905273, |
|
"eval_runtime": 2.8139, |
|
"eval_samples_per_second": 7.108, |
|
"eval_steps_per_second": 0.711, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 1.81140350877193e-05, |
|
"loss": 0.1837, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 1.780701754385965e-05, |
|
"loss": 0.198, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 1.75e-05, |
|
"loss": 0.1923, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 1.7192982456140348e-05, |
|
"loss": 0.1936, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 1.68859649122807e-05, |
|
"loss": 0.1893, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 1.657894736842105e-05, |
|
"loss": 0.2052, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 1.6271929824561402e-05, |
|
"loss": 0.1878, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 1.596491228070175e-05, |
|
"loss": 0.1862, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 1.5657894736842104e-05, |
|
"loss": 0.1868, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 1.5350877192982453e-05, |
|
"loss": 0.1857, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_dropoff": 0.41567467143741826, |
|
"eval_accuracy_undropoff": 0.9896526985194686, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3356247847134513, |
|
"eval_iou_undropoff": 0.965185408747415, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.25026845932006836, |
|
"eval_mean_accuracy": 0.7026636849784434, |
|
"eval_mean_iou": 0.6504050967304331, |
|
"eval_overall_accuracy": 0.965787124633789, |
|
"eval_runtime": 3.0421, |
|
"eval_samples_per_second": 6.574, |
|
"eval_steps_per_second": 0.657, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 1.5043859649122806e-05, |
|
"loss": 0.193, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 1.4736842105263155e-05, |
|
"loss": 0.1832, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 1.4429824561403508e-05, |
|
"loss": 0.1883, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 1.4122807017543857e-05, |
|
"loss": 0.1873, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 1.381578947368421e-05, |
|
"loss": 0.1945, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 1.3508771929824559e-05, |
|
"loss": 0.186, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 1.3201754385964912e-05, |
|
"loss": 0.1763, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 1.289473684210526e-05, |
|
"loss": 0.1973, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 1.2587719298245613e-05, |
|
"loss": 0.1743, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 1.2280701754385963e-05, |
|
"loss": 0.1826, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_dropoff": 0.40587169430491526, |
|
"eval_accuracy_undropoff": 0.9909383000804993, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3357429353126221, |
|
"eval_iou_undropoff": 0.9660385433654574, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.2344692200422287, |
|
"eval_mean_accuracy": 0.6984049971927073, |
|
"eval_mean_iou": 0.6508907393390397, |
|
"eval_overall_accuracy": 0.966611671447754, |
|
"eval_runtime": 2.9534, |
|
"eval_samples_per_second": 6.772, |
|
"eval_steps_per_second": 0.677, |
|
"step": 200 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 1.017979138473984e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|