|
{ |
|
"best_metric": 0.17315250635147095, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGB-b5_7/checkpoint-120", |
|
"epoch": 120.0, |
|
"global_step": 240, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 1.017, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 1.025, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.0124, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.9968, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.9685, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.9552, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 0.9261, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.8902, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.8624, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.8255, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.61789949310764, |
|
"eval_accuracy_undropoff": 0.9532616169325269, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.30068083396580203, |
|
"eval_iou_undropoff": 0.9377191953832399, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7949008941650391, |
|
"eval_mean_accuracy": 0.7855805550200834, |
|
"eval_mean_iou": 0.41280000978301395, |
|
"eval_overall_accuracy": 0.9393175125122071, |
|
"eval_runtime": 2.0065, |
|
"eval_samples_per_second": 9.968, |
|
"eval_steps_per_second": 0.997, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.7939, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7364, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 4.9780701754385966e-05, |
|
"loss": 0.6993, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 4.956140350877193e-05, |
|
"loss": 0.6614, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 4.9342105263157894e-05, |
|
"loss": 0.5986, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 4.912280701754386e-05, |
|
"loss": 0.5876, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 4.890350877192983e-05, |
|
"loss": 0.5312, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 4.868421052631579e-05, |
|
"loss": 0.5088, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 4.8464912280701755e-05, |
|
"loss": 0.462, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 4.824561403508772e-05, |
|
"loss": 0.4434, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.41871602559691734, |
|
"eval_accuracy_undropoff": 0.9944325094007127, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3713824207926633, |
|
"eval_iou_undropoff": 0.9699718650625573, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.42470020055770874, |
|
"eval_mean_accuracy": 0.706574267498815, |
|
"eval_mean_iou": 0.4471180952850735, |
|
"eval_overall_accuracy": 0.9704946517944336, |
|
"eval_runtime": 2.0254, |
|
"eval_samples_per_second": 9.875, |
|
"eval_steps_per_second": 0.987, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 4.802631578947368e-05, |
|
"loss": 0.3698, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 4.780701754385965e-05, |
|
"loss": 0.4431, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 4.758771929824562e-05, |
|
"loss": 0.3667, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 4.736842105263158e-05, |
|
"loss": 0.3123, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 4.714912280701755e-05, |
|
"loss": 0.3021, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 4.6929824561403515e-05, |
|
"loss": 0.2765, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 4.671052631578948e-05, |
|
"loss": 0.2547, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 4.649122807017544e-05, |
|
"loss": 0.2615, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 4.6271929824561406e-05, |
|
"loss": 0.2296, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 4.605263157894737e-05, |
|
"loss": 0.2107, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.40458267391453934, |
|
"eval_accuracy_undropoff": 0.9961075726111145, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3712713740875759, |
|
"eval_iou_undropoff": 0.9710249883455491, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.2725875973701477, |
|
"eval_mean_accuracy": 0.7003451232628269, |
|
"eval_mean_iou": 0.6711481812165625, |
|
"eval_overall_accuracy": 0.9715124130249023, |
|
"eval_runtime": 2.0394, |
|
"eval_samples_per_second": 9.807, |
|
"eval_steps_per_second": 0.981, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.205, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 4.56140350877193e-05, |
|
"loss": 0.1816, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 4.539473684210527e-05, |
|
"loss": 0.1842, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 4.517543859649123e-05, |
|
"loss": 0.149, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 4.4956140350877196e-05, |
|
"loss": 0.1361, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 4.473684210526316e-05, |
|
"loss": 0.1646, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 4.451754385964912e-05, |
|
"loss": 0.1288, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 4.429824561403509e-05, |
|
"loss": 0.1178, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 4.407894736842105e-05, |
|
"loss": 0.1103, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 4.3859649122807014e-05, |
|
"loss": 0.1678, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.47821739030711713, |
|
"eval_accuracy_undropoff": 0.9904238604465575, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.39174564284479585, |
|
"eval_iou_undropoff": 0.9685003651750826, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.23879647254943848, |
|
"eval_mean_accuracy": 0.7343206253768373, |
|
"eval_mean_iou": 0.6801230040099393, |
|
"eval_overall_accuracy": 0.9691267013549805, |
|
"eval_runtime": 2.833, |
|
"eval_samples_per_second": 7.06, |
|
"eval_steps_per_second": 0.706, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 4.3640350877192985e-05, |
|
"loss": 0.0934, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 4.342105263157895e-05, |
|
"loss": 0.1108, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 4.320175438596491e-05, |
|
"loss": 0.0932, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 4.298245614035088e-05, |
|
"loss": 0.097, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 4.2763157894736847e-05, |
|
"loss": 0.0813, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 4.254385964912281e-05, |
|
"loss": 0.1387, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 4.2324561403508774e-05, |
|
"loss": 0.0835, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 4.210526315789474e-05, |
|
"loss": 0.0745, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 4.188596491228071e-05, |
|
"loss": 0.0736, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.0972, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.42408770843367966, |
|
"eval_accuracy_undropoff": 0.9951975020323848, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3818202843962053, |
|
"eval_iou_undropoff": 0.9709387427964766, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18494448065757751, |
|
"eval_mean_accuracy": 0.7096426052330322, |
|
"eval_mean_iou": 0.676379513596341, |
|
"eval_overall_accuracy": 0.9714511871337891, |
|
"eval_runtime": 2.046, |
|
"eval_samples_per_second": 9.775, |
|
"eval_steps_per_second": 0.978, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 4.1447368421052636e-05, |
|
"loss": 0.0686, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 4.12280701754386e-05, |
|
"loss": 0.0779, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 4.100877192982456e-05, |
|
"loss": 0.0723, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 4.078947368421053e-05, |
|
"loss": 0.0685, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 4.057017543859649e-05, |
|
"loss": 0.0662, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 4.0350877192982455e-05, |
|
"loss": 0.0641, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 4.0131578947368425e-05, |
|
"loss": 0.0567, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 3.991228070175439e-05, |
|
"loss": 0.0769, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 3.969298245614035e-05, |
|
"loss": 0.0618, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 3.9473684210526316e-05, |
|
"loss": 0.0604, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.5239248606619418, |
|
"eval_accuracy_undropoff": 0.9897283221407057, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.42364186677942717, |
|
"eval_iou_undropoff": 0.9697005089236638, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.20193985104560852, |
|
"eval_mean_accuracy": 0.7568265914013237, |
|
"eval_mean_iou": 0.464447458567697, |
|
"eval_overall_accuracy": 0.9703605651855469, |
|
"eval_runtime": 2.0527, |
|
"eval_samples_per_second": 9.743, |
|
"eval_steps_per_second": 0.974, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 3.925438596491228e-05, |
|
"loss": 0.0556, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 3.9035087719298244e-05, |
|
"loss": 0.0561, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 3.8815789473684214e-05, |
|
"loss": 0.055, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 3.859649122807018e-05, |
|
"loss": 0.0524, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 3.837719298245614e-05, |
|
"loss": 0.051, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 3.815789473684211e-05, |
|
"loss": 0.0542, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 3.7938596491228076e-05, |
|
"loss": 0.0516, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 3.771929824561404e-05, |
|
"loss": 0.0466, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.0502, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 3.728070175438597e-05, |
|
"loss": 0.0497, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.4775384756531113, |
|
"eval_accuracy_undropoff": 0.9913645784928411, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.39826387693343585, |
|
"eval_iou_undropoff": 0.9693923400870129, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1793430596590042, |
|
"eval_mean_accuracy": 0.7344515270729762, |
|
"eval_mean_iou": 0.6838281085102244, |
|
"eval_overall_accuracy": 0.9700000762939454, |
|
"eval_runtime": 6.1591, |
|
"eval_samples_per_second": 3.247, |
|
"eval_steps_per_second": 0.325, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 3.706140350877193e-05, |
|
"loss": 0.0442, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 0.0477, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 3.662280701754386e-05, |
|
"loss": 0.0418, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 3.640350877192983e-05, |
|
"loss": 0.0829, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 3.618421052631579e-05, |
|
"loss": 0.0458, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 3.5964912280701756e-05, |
|
"loss": 0.0465, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 3.574561403508772e-05, |
|
"loss": 0.0445, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 3.5526315789473684e-05, |
|
"loss": 0.0509, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 3.530701754385965e-05, |
|
"loss": 0.0429, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 3.508771929824561e-05, |
|
"loss": 0.0492, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.5238606390054817, |
|
"eval_accuracy_undropoff": 0.9895599600786884, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.42225237750136807, |
|
"eval_iou_undropoff": 0.969532907193877, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.1999509036540985, |
|
"eval_mean_accuracy": 0.7567102995420851, |
|
"eval_mean_iou": 0.46392842823174835, |
|
"eval_overall_accuracy": 0.970196533203125, |
|
"eval_runtime": 2.1001, |
|
"eval_samples_per_second": 9.523, |
|
"eval_steps_per_second": 0.952, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 3.4868421052631575e-05, |
|
"loss": 0.0405, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 3.4649122807017546e-05, |
|
"loss": 0.0504, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 3.442982456140351e-05, |
|
"loss": 0.0391, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 3.421052631578947e-05, |
|
"loss": 0.0491, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 3.3991228070175444e-05, |
|
"loss": 0.0453, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 3.377192982456141e-05, |
|
"loss": 0.0471, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 3.355263157894737e-05, |
|
"loss": 0.0401, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.0405, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 3.31140350877193e-05, |
|
"loss": 0.0438, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 3.289473684210527e-05, |
|
"loss": 0.0409, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_dropoff": 0.5687102915204477, |
|
"eval_accuracy_undropoff": 0.9869465669363577, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4371709563555452, |
|
"eval_iou_undropoff": 0.9688193019443224, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18931810557842255, |
|
"eval_mean_accuracy": 0.7778284292284028, |
|
"eval_mean_iou": 0.7029951291499338, |
|
"eval_overall_accuracy": 0.9695566177368165, |
|
"eval_runtime": 2.0628, |
|
"eval_samples_per_second": 9.696, |
|
"eval_steps_per_second": 0.97, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 3.267543859649123e-05, |
|
"loss": 0.0392, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 3.24561403508772e-05, |
|
"loss": 0.0384, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 3.223684210526316e-05, |
|
"loss": 0.0429, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 3.2017543859649124e-05, |
|
"loss": 0.0351, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 3.179824561403509e-05, |
|
"loss": 0.0372, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 3.157894736842105e-05, |
|
"loss": 0.0429, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 3.1359649122807015e-05, |
|
"loss": 0.0358, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 3.1140350877192986e-05, |
|
"loss": 0.0388, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 3.092105263157895e-05, |
|
"loss": 0.0356, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 3.0701754385964913e-05, |
|
"loss": 0.0328, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.5544622583086768, |
|
"eval_accuracy_undropoff": 0.9884865026761807, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.438174508517341, |
|
"eval_iou_undropoff": 0.969742542478441, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18416434526443481, |
|
"eval_mean_accuracy": 0.7714743804924287, |
|
"eval_mean_iou": 0.7039585254978911, |
|
"eval_overall_accuracy": 0.9704401016235351, |
|
"eval_runtime": 2.0828, |
|
"eval_samples_per_second": 9.602, |
|
"eval_steps_per_second": 0.96, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 3.048245614035088e-05, |
|
"loss": 0.0341, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 3.0263157894736844e-05, |
|
"loss": 0.0357, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 3.0043859649122808e-05, |
|
"loss": 0.0339, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 2.9824561403508772e-05, |
|
"loss": 0.0363, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 2.9605263157894735e-05, |
|
"loss": 0.0337, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 2.9385964912280706e-05, |
|
"loss": 0.0319, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 0.0321, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 2.8947368421052634e-05, |
|
"loss": 0.0358, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 2.8728070175438597e-05, |
|
"loss": 0.032, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 2.850877192982456e-05, |
|
"loss": 0.0332, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_dropoff": 0.5215807702011515, |
|
"eval_accuracy_undropoff": 0.991029446445043, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.43221031584629244, |
|
"eval_iou_undropoff": 0.9708785696265562, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.17805632948875427, |
|
"eval_mean_accuracy": 0.7563051083230972, |
|
"eval_mean_iou": 0.7015444427364244, |
|
"eval_overall_accuracy": 0.9715101242065429, |
|
"eval_runtime": 4.0759, |
|
"eval_samples_per_second": 4.907, |
|
"eval_steps_per_second": 0.491, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 2.8289473684210528e-05, |
|
"loss": 0.0333, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 2.8070175438596492e-05, |
|
"loss": 0.0332, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 2.7850877192982456e-05, |
|
"loss": 0.0337, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 2.7631578947368426e-05, |
|
"loss": 0.0291, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 2.741228070175439e-05, |
|
"loss": 0.0306, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 2.7192982456140354e-05, |
|
"loss": 0.0382, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 2.6973684210526317e-05, |
|
"loss": 0.0322, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 2.675438596491228e-05, |
|
"loss": 0.0289, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 2.6535087719298245e-05, |
|
"loss": 0.0326, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 2.6315789473684212e-05, |
|
"loss": 0.0314, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.4674969609394711, |
|
"eval_accuracy_undropoff": 0.9935327873175207, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.40684732446545196, |
|
"eval_iou_undropoff": 0.9710988280806261, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.17315250635147095, |
|
"eval_mean_accuracy": 0.7305148741284959, |
|
"eval_mean_iou": 0.688973076273039, |
|
"eval_overall_accuracy": 0.9716606140136719, |
|
"eval_runtime": 5.7933, |
|
"eval_samples_per_second": 3.452, |
|
"eval_steps_per_second": 0.345, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 2.6096491228070176e-05, |
|
"loss": 0.0296, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 2.5877192982456143e-05, |
|
"loss": 0.0372, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 2.565789473684211e-05, |
|
"loss": 0.0304, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 2.5438596491228074e-05, |
|
"loss": 0.029, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 2.5219298245614037e-05, |
|
"loss": 0.0296, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.0292, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 2.4780701754385965e-05, |
|
"loss": 0.0294, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 2.456140350877193e-05, |
|
"loss": 0.0306, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 2.4342105263157896e-05, |
|
"loss": 0.0292, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 2.412280701754386e-05, |
|
"loss": 0.0318, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_dropoff": 0.5036904516158628, |
|
"eval_accuracy_undropoff": 0.9917598114185697, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.42329058099236316, |
|
"eval_iou_undropoff": 0.9708558880266382, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.17862842977046967, |
|
"eval_mean_accuracy": 0.7477251315172162, |
|
"eval_mean_iou": 0.6970732345095008, |
|
"eval_overall_accuracy": 0.9714662551879882, |
|
"eval_runtime": 2.0816, |
|
"eval_samples_per_second": 9.608, |
|
"eval_steps_per_second": 0.961, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 2.3903508771929827e-05, |
|
"loss": 0.0288, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 2.368421052631579e-05, |
|
"loss": 0.0291, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 2.3464912280701758e-05, |
|
"loss": 0.031, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 2.324561403508772e-05, |
|
"loss": 0.0272, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 2.3026315789473685e-05, |
|
"loss": 0.0294, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 2.280701754385965e-05, |
|
"loss": 0.0303, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 2.2587719298245616e-05, |
|
"loss": 0.0271, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 2.236842105263158e-05, |
|
"loss": 0.0317, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 2.2149122807017543e-05, |
|
"loss": 0.0287, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 2.1929824561403507e-05, |
|
"loss": 0.0291, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.5465584073029198, |
|
"eval_accuracy_undropoff": 0.9909323297946122, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4520695555833798, |
|
"eval_iou_undropoff": 0.9718150809185412, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18141531944274902, |
|
"eval_mean_accuracy": 0.768745368548766, |
|
"eval_mean_iou": 0.7119423182509605, |
|
"eval_overall_accuracy": 0.9724555969238281, |
|
"eval_runtime": 4.4125, |
|
"eval_samples_per_second": 4.533, |
|
"eval_steps_per_second": 0.453, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 2.1710526315789474e-05, |
|
"loss": 0.0268, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 2.149122807017544e-05, |
|
"loss": 0.0275, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 2.1271929824561405e-05, |
|
"loss": 0.0271, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 0.0283, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.0266, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 2.06140350877193e-05, |
|
"loss": 0.0263, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 2.0394736842105264e-05, |
|
"loss": 0.0264, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 2.0175438596491227e-05, |
|
"loss": 0.0281, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 1.9956140350877194e-05, |
|
"loss": 0.0257, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 1.9736842105263158e-05, |
|
"loss": 0.0273, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_dropoff": 0.5446271703479437, |
|
"eval_accuracy_undropoff": 0.9907217777123257, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.44867109822876083, |
|
"eval_iou_undropoff": 0.9715287635773632, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1755458563566208, |
|
"eval_mean_accuracy": 0.7676744740301347, |
|
"eval_mean_iou": 0.710099930903062, |
|
"eval_overall_accuracy": 0.9721735000610352, |
|
"eval_runtime": 4.8978, |
|
"eval_samples_per_second": 4.083, |
|
"eval_steps_per_second": 0.408, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 1.9517543859649122e-05, |
|
"loss": 0.0272, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 1.929824561403509e-05, |
|
"loss": 0.0251, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 1.9078947368421056e-05, |
|
"loss": 0.0261, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 1.885964912280702e-05, |
|
"loss": 0.0271, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 1.8640350877192984e-05, |
|
"loss": 0.0292, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.0256, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 1.8201754385964914e-05, |
|
"loss": 0.0258, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 1.7982456140350878e-05, |
|
"loss": 0.0265, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 1.7763157894736842e-05, |
|
"loss": 0.0264, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 1.7543859649122806e-05, |
|
"loss": 0.0274, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.5065529025895089, |
|
"eval_accuracy_undropoff": 0.992240618442014, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.42969823141428487, |
|
"eval_iou_undropoff": 0.9714446539608538, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.17860735952854156, |
|
"eval_mean_accuracy": 0.7493967605157614, |
|
"eval_mean_iou": 0.7005714426875693, |
|
"eval_overall_accuracy": 0.9720460891723632, |
|
"eval_runtime": 2.1071, |
|
"eval_samples_per_second": 9.492, |
|
"eval_steps_per_second": 0.949, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 1.7324561403508773e-05, |
|
"loss": 0.0257, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 1.7105263157894737e-05, |
|
"loss": 0.0254, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 1.6885964912280704e-05, |
|
"loss": 0.0246, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.0261, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 1.6447368421052635e-05, |
|
"loss": 0.0245, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 1.62280701754386e-05, |
|
"loss": 0.0284, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 1.6008771929824562e-05, |
|
"loss": 0.0244, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 1.5789473684210526e-05, |
|
"loss": 0.0256, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 1.5570175438596493e-05, |
|
"loss": 0.0248, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 1.5350877192982457e-05, |
|
"loss": 0.0248, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_dropoff": 0.5130576389366729, |
|
"eval_accuracy_undropoff": 0.9921148444193251, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.43414836753785657, |
|
"eval_iou_undropoff": 0.9715899479169509, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.17410005629062653, |
|
"eval_mean_accuracy": 0.7525862416779989, |
|
"eval_mean_iou": 0.7028691577274038, |
|
"eval_overall_accuracy": 0.9721960067749024, |
|
"eval_runtime": 2.0503, |
|
"eval_samples_per_second": 9.755, |
|
"eval_steps_per_second": 0.975, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 1.5131578947368422e-05, |
|
"loss": 0.0259, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 1.4912280701754386e-05, |
|
"loss": 0.0243, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 1.4692982456140353e-05, |
|
"loss": 0.0256, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 1.4473684210526317e-05, |
|
"loss": 0.0244, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 1.425438596491228e-05, |
|
"loss": 0.0243, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 1.4035087719298246e-05, |
|
"loss": 0.0264, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 1.3815789473684213e-05, |
|
"loss": 0.0254, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 1.3596491228070177e-05, |
|
"loss": 0.0262, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 1.337719298245614e-05, |
|
"loss": 0.0242, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 1.3157894736842106e-05, |
|
"loss": 0.0248, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_dropoff": 0.527768985527191, |
|
"eval_accuracy_undropoff": 0.9911968134594125, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4387408000610151, |
|
"eval_iou_undropoff": 0.9712979898510656, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18321658670902252, |
|
"eval_mean_accuracy": 0.7594828994933017, |
|
"eval_mean_iou": 0.7050193949560404, |
|
"eval_overall_accuracy": 0.9719278335571289, |
|
"eval_runtime": 2.0666, |
|
"eval_samples_per_second": 9.678, |
|
"eval_steps_per_second": 0.968, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 1.2938596491228071e-05, |
|
"loss": 0.0239, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 1.2719298245614037e-05, |
|
"loss": 0.0251, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.0256, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 1.2280701754385964e-05, |
|
"loss": 0.0231, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 1.206140350877193e-05, |
|
"loss": 0.0252, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 1.1842105263157895e-05, |
|
"loss": 0.0225, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 1.162280701754386e-05, |
|
"loss": 0.0261, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 1.1403508771929824e-05, |
|
"loss": 0.0233, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 1.118421052631579e-05, |
|
"loss": 0.0248, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 1.0964912280701754e-05, |
|
"loss": 0.0242, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_dropoff": 0.5159980733503062, |
|
"eval_accuracy_undropoff": 0.9918171261630864, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.434115617510507, |
|
"eval_iou_undropoff": 0.9714197444836119, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18081368505954742, |
|
"eval_mean_accuracy": 0.7539075997566963, |
|
"eval_mean_iou": 0.7027676809970594, |
|
"eval_overall_accuracy": 0.9720329284667969, |
|
"eval_runtime": 2.0418, |
|
"eval_samples_per_second": 9.795, |
|
"eval_steps_per_second": 0.98, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 1.074561403508772e-05, |
|
"loss": 0.0245, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.0235, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 1.030701754385965e-05, |
|
"loss": 0.024, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 1.0087719298245614e-05, |
|
"loss": 0.0235, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 9.868421052631579e-06, |
|
"loss": 0.0234, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 9.649122807017545e-06, |
|
"loss": 0.025, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 9.42982456140351e-06, |
|
"loss": 0.0227, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 9.210526315789474e-06, |
|
"loss": 0.0308, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 8.991228070175439e-06, |
|
"loss": 0.0229, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 8.771929824561403e-06, |
|
"loss": 0.024, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_dropoff": 0.5077364159728435, |
|
"eval_accuracy_undropoff": 0.9924722655344351, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4326619003131096, |
|
"eval_iou_undropoff": 0.971720292833711, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1796456277370453, |
|
"eval_mean_accuracy": 0.7501043407536393, |
|
"eval_mean_iou": 0.7021910965734103, |
|
"eval_overall_accuracy": 0.9723173141479492, |
|
"eval_runtime": 7.6434, |
|
"eval_samples_per_second": 2.617, |
|
"eval_steps_per_second": 0.262, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.5, |
|
"learning_rate": 8.552631578947368e-06, |
|
"loss": 0.0228, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 101.0, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.0252, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 101.5, |
|
"learning_rate": 8.1140350877193e-06, |
|
"loss": 0.0229, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 102.0, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.0235, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 102.5, |
|
"learning_rate": 7.675438596491228e-06, |
|
"loss": 0.0249, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 103.0, |
|
"learning_rate": 7.456140350877193e-06, |
|
"loss": 0.023, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 103.5, |
|
"learning_rate": 7.236842105263158e-06, |
|
"loss": 0.023, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 104.0, |
|
"learning_rate": 7.017543859649123e-06, |
|
"loss": 0.0245, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 104.5, |
|
"learning_rate": 6.798245614035088e-06, |
|
"loss": 0.0229, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"learning_rate": 6.578947368421053e-06, |
|
"loss": 0.0231, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"eval_accuracy_dropoff": 0.5556962315649442, |
|
"eval_accuracy_undropoff": 0.9904648564096492, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4555673390796817, |
|
"eval_iou_undropoff": 0.9717344160901538, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18349342048168182, |
|
"eval_mean_accuracy": 0.7730805439872968, |
|
"eval_mean_iou": 0.7136508775849177, |
|
"eval_overall_accuracy": 0.9723875045776367, |
|
"eval_runtime": 2.0357, |
|
"eval_samples_per_second": 9.824, |
|
"eval_steps_per_second": 0.982, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 105.5, |
|
"learning_rate": 6.3596491228070184e-06, |
|
"loss": 0.0244, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 106.0, |
|
"learning_rate": 6.140350877192982e-06, |
|
"loss": 0.0236, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 106.5, |
|
"learning_rate": 5.921052631578948e-06, |
|
"loss": 0.0245, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 107.0, |
|
"learning_rate": 5.701754385964912e-06, |
|
"loss": 0.0223, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 107.5, |
|
"learning_rate": 5.482456140350877e-06, |
|
"loss": 0.0216, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 108.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.0294, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 108.5, |
|
"learning_rate": 5.043859649122807e-06, |
|
"loss": 0.0225, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 109.0, |
|
"learning_rate": 4.824561403508772e-06, |
|
"loss": 0.0233, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 109.5, |
|
"learning_rate": 4.605263157894737e-06, |
|
"loss": 0.0217, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"learning_rate": 4.3859649122807014e-06, |
|
"loss": 0.0238, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"eval_accuracy_dropoff": 0.5214064542764743, |
|
"eval_accuracy_undropoff": 0.9916911531308676, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.43759672911228664, |
|
"eval_iou_undropoff": 0.9715196240517069, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18225832283496857, |
|
"eval_mean_accuracy": 0.756548803703671, |
|
"eval_mean_iou": 0.7045581765819968, |
|
"eval_overall_accuracy": 0.9721370697021484, |
|
"eval_runtime": 7.7597, |
|
"eval_samples_per_second": 2.577, |
|
"eval_steps_per_second": 0.258, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 110.5, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 0.0235, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 111.0, |
|
"learning_rate": 3.9473684210526315e-06, |
|
"loss": 0.0229, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 111.5, |
|
"learning_rate": 3.7280701754385965e-06, |
|
"loss": 0.0218, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 112.0, |
|
"learning_rate": 3.5087719298245615e-06, |
|
"loss": 0.0258, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 112.5, |
|
"learning_rate": 3.2894736842105265e-06, |
|
"loss": 0.0222, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 113.0, |
|
"learning_rate": 3.070175438596491e-06, |
|
"loss": 0.0329, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 113.5, |
|
"learning_rate": 2.850877192982456e-06, |
|
"loss": 0.023, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 114.0, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.0241, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 114.5, |
|
"learning_rate": 2.412280701754386e-06, |
|
"loss": 0.0219, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"learning_rate": 2.1929824561403507e-06, |
|
"loss": 0.0228, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"eval_accuracy_dropoff": 0.5087593752150279, |
|
"eval_accuracy_undropoff": 0.9921096701715562, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.43046774024522305, |
|
"eval_iou_undropoff": 0.9714074879193636, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1833326816558838, |
|
"eval_mean_accuracy": 0.750434522693292, |
|
"eval_mean_iou": 0.7009376140822934, |
|
"eval_overall_accuracy": 0.9720123291015625, |
|
"eval_runtime": 6.3105, |
|
"eval_samples_per_second": 3.169, |
|
"eval_steps_per_second": 0.317, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 115.5, |
|
"learning_rate": 1.9736842105263157e-06, |
|
"loss": 0.0224, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 116.0, |
|
"learning_rate": 1.7543859649122807e-06, |
|
"loss": 0.0247, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 116.5, |
|
"learning_rate": 1.5350877192982455e-06, |
|
"loss": 0.0247, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 117.0, |
|
"learning_rate": 1.3157894736842106e-06, |
|
"loss": 0.0217, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 117.5, |
|
"learning_rate": 1.0964912280701754e-06, |
|
"loss": 0.0228, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 118.0, |
|
"learning_rate": 8.771929824561404e-07, |
|
"loss": 0.0244, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 118.5, |
|
"learning_rate": 6.578947368421053e-07, |
|
"loss": 0.0243, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 119.0, |
|
"learning_rate": 4.385964912280702e-07, |
|
"loss": 0.0232, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 119.5, |
|
"learning_rate": 2.192982456140351e-07, |
|
"loss": 0.0226, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.0255, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"eval_accuracy_dropoff": 0.5144705153787931, |
|
"eval_accuracy_undropoff": 0.9919130487563397, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.43363711233378827, |
|
"eval_iou_undropoff": 0.9714506401430753, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18413399159908295, |
|
"eval_mean_accuracy": 0.7531917820675664, |
|
"eval_mean_iou": 0.7025438762384317, |
|
"eval_overall_accuracy": 0.9720613479614257, |
|
"eval_runtime": 2.0379, |
|
"eval_samples_per_second": 9.814, |
|
"eval_steps_per_second": 0.981, |
|
"step": 240 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 1.3891198199110042e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|