|
{ |
|
"best_metric": 0.17315250635147095, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGB-b5_7/checkpoint-120", |
|
"epoch": 80.0, |
|
"global_step": 160, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 1.017, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 1.025, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.0124, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.9968, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.9685, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.9552, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 0.9261, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.8902, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.8624, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.8255, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.61789949310764, |
|
"eval_accuracy_undropoff": 0.9532616169325269, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.30068083396580203, |
|
"eval_iou_undropoff": 0.9377191953832399, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7949008941650391, |
|
"eval_mean_accuracy": 0.7855805550200834, |
|
"eval_mean_iou": 0.41280000978301395, |
|
"eval_overall_accuracy": 0.9393175125122071, |
|
"eval_runtime": 2.0065, |
|
"eval_samples_per_second": 9.968, |
|
"eval_steps_per_second": 0.997, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.7939, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7364, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 4.9780701754385966e-05, |
|
"loss": 0.6993, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 4.956140350877193e-05, |
|
"loss": 0.6614, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 4.9342105263157894e-05, |
|
"loss": 0.5986, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 4.912280701754386e-05, |
|
"loss": 0.5876, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 4.890350877192983e-05, |
|
"loss": 0.5312, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 4.868421052631579e-05, |
|
"loss": 0.5088, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 4.8464912280701755e-05, |
|
"loss": 0.462, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 4.824561403508772e-05, |
|
"loss": 0.4434, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.41871602559691734, |
|
"eval_accuracy_undropoff": 0.9944325094007127, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3713824207926633, |
|
"eval_iou_undropoff": 0.9699718650625573, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.42470020055770874, |
|
"eval_mean_accuracy": 0.706574267498815, |
|
"eval_mean_iou": 0.4471180952850735, |
|
"eval_overall_accuracy": 0.9704946517944336, |
|
"eval_runtime": 2.0254, |
|
"eval_samples_per_second": 9.875, |
|
"eval_steps_per_second": 0.987, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 4.802631578947368e-05, |
|
"loss": 0.3698, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 4.780701754385965e-05, |
|
"loss": 0.4431, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 4.758771929824562e-05, |
|
"loss": 0.3667, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 4.736842105263158e-05, |
|
"loss": 0.3123, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 4.714912280701755e-05, |
|
"loss": 0.3021, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 4.6929824561403515e-05, |
|
"loss": 0.2765, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 4.671052631578948e-05, |
|
"loss": 0.2547, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 4.649122807017544e-05, |
|
"loss": 0.2615, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 4.6271929824561406e-05, |
|
"loss": 0.2296, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 4.605263157894737e-05, |
|
"loss": 0.2107, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.40458267391453934, |
|
"eval_accuracy_undropoff": 0.9961075726111145, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3712713740875759, |
|
"eval_iou_undropoff": 0.9710249883455491, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.2725875973701477, |
|
"eval_mean_accuracy": 0.7003451232628269, |
|
"eval_mean_iou": 0.6711481812165625, |
|
"eval_overall_accuracy": 0.9715124130249023, |
|
"eval_runtime": 2.0394, |
|
"eval_samples_per_second": 9.807, |
|
"eval_steps_per_second": 0.981, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.205, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 4.56140350877193e-05, |
|
"loss": 0.1816, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 4.539473684210527e-05, |
|
"loss": 0.1842, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 4.517543859649123e-05, |
|
"loss": 0.149, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 4.4956140350877196e-05, |
|
"loss": 0.1361, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 4.473684210526316e-05, |
|
"loss": 0.1646, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 4.451754385964912e-05, |
|
"loss": 0.1288, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 4.429824561403509e-05, |
|
"loss": 0.1178, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 4.407894736842105e-05, |
|
"loss": 0.1103, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 4.3859649122807014e-05, |
|
"loss": 0.1678, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.47821739030711713, |
|
"eval_accuracy_undropoff": 0.9904238604465575, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.39174564284479585, |
|
"eval_iou_undropoff": 0.9685003651750826, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.23879647254943848, |
|
"eval_mean_accuracy": 0.7343206253768373, |
|
"eval_mean_iou": 0.6801230040099393, |
|
"eval_overall_accuracy": 0.9691267013549805, |
|
"eval_runtime": 2.833, |
|
"eval_samples_per_second": 7.06, |
|
"eval_steps_per_second": 0.706, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 4.3640350877192985e-05, |
|
"loss": 0.0934, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 4.342105263157895e-05, |
|
"loss": 0.1108, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 4.320175438596491e-05, |
|
"loss": 0.0932, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 4.298245614035088e-05, |
|
"loss": 0.097, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 4.2763157894736847e-05, |
|
"loss": 0.0813, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 4.254385964912281e-05, |
|
"loss": 0.1387, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 4.2324561403508774e-05, |
|
"loss": 0.0835, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 4.210526315789474e-05, |
|
"loss": 0.0745, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 4.188596491228071e-05, |
|
"loss": 0.0736, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.0972, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.42408770843367966, |
|
"eval_accuracy_undropoff": 0.9951975020323848, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3818202843962053, |
|
"eval_iou_undropoff": 0.9709387427964766, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18494448065757751, |
|
"eval_mean_accuracy": 0.7096426052330322, |
|
"eval_mean_iou": 0.676379513596341, |
|
"eval_overall_accuracy": 0.9714511871337891, |
|
"eval_runtime": 2.046, |
|
"eval_samples_per_second": 9.775, |
|
"eval_steps_per_second": 0.978, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 4.1447368421052636e-05, |
|
"loss": 0.0686, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 4.12280701754386e-05, |
|
"loss": 0.0779, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 4.100877192982456e-05, |
|
"loss": 0.0723, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 4.078947368421053e-05, |
|
"loss": 0.0685, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 4.057017543859649e-05, |
|
"loss": 0.0662, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 4.0350877192982455e-05, |
|
"loss": 0.0641, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 4.0131578947368425e-05, |
|
"loss": 0.0567, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 3.991228070175439e-05, |
|
"loss": 0.0769, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 3.969298245614035e-05, |
|
"loss": 0.0618, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 3.9473684210526316e-05, |
|
"loss": 0.0604, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.5239248606619418, |
|
"eval_accuracy_undropoff": 0.9897283221407057, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.42364186677942717, |
|
"eval_iou_undropoff": 0.9697005089236638, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.20193985104560852, |
|
"eval_mean_accuracy": 0.7568265914013237, |
|
"eval_mean_iou": 0.464447458567697, |
|
"eval_overall_accuracy": 0.9703605651855469, |
|
"eval_runtime": 2.0527, |
|
"eval_samples_per_second": 9.743, |
|
"eval_steps_per_second": 0.974, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 3.925438596491228e-05, |
|
"loss": 0.0556, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 3.9035087719298244e-05, |
|
"loss": 0.0561, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 3.8815789473684214e-05, |
|
"loss": 0.055, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 3.859649122807018e-05, |
|
"loss": 0.0524, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 3.837719298245614e-05, |
|
"loss": 0.051, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 3.815789473684211e-05, |
|
"loss": 0.0542, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 3.7938596491228076e-05, |
|
"loss": 0.0516, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 3.771929824561404e-05, |
|
"loss": 0.0466, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.0502, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 3.728070175438597e-05, |
|
"loss": 0.0497, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.4775384756531113, |
|
"eval_accuracy_undropoff": 0.9913645784928411, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.39826387693343585, |
|
"eval_iou_undropoff": 0.9693923400870129, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1793430596590042, |
|
"eval_mean_accuracy": 0.7344515270729762, |
|
"eval_mean_iou": 0.6838281085102244, |
|
"eval_overall_accuracy": 0.9700000762939454, |
|
"eval_runtime": 6.1591, |
|
"eval_samples_per_second": 3.247, |
|
"eval_steps_per_second": 0.325, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 3.706140350877193e-05, |
|
"loss": 0.0442, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 0.0477, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 3.662280701754386e-05, |
|
"loss": 0.0418, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 3.640350877192983e-05, |
|
"loss": 0.0829, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 3.618421052631579e-05, |
|
"loss": 0.0458, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 3.5964912280701756e-05, |
|
"loss": 0.0465, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 3.574561403508772e-05, |
|
"loss": 0.0445, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 3.5526315789473684e-05, |
|
"loss": 0.0509, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 3.530701754385965e-05, |
|
"loss": 0.0429, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 3.508771929824561e-05, |
|
"loss": 0.0492, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.5238606390054817, |
|
"eval_accuracy_undropoff": 0.9895599600786884, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.42225237750136807, |
|
"eval_iou_undropoff": 0.969532907193877, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.1999509036540985, |
|
"eval_mean_accuracy": 0.7567102995420851, |
|
"eval_mean_iou": 0.46392842823174835, |
|
"eval_overall_accuracy": 0.970196533203125, |
|
"eval_runtime": 2.1001, |
|
"eval_samples_per_second": 9.523, |
|
"eval_steps_per_second": 0.952, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 3.4868421052631575e-05, |
|
"loss": 0.0405, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 3.4649122807017546e-05, |
|
"loss": 0.0504, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 3.442982456140351e-05, |
|
"loss": 0.0391, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 3.421052631578947e-05, |
|
"loss": 0.0491, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 3.3991228070175444e-05, |
|
"loss": 0.0453, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 3.377192982456141e-05, |
|
"loss": 0.0471, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 3.355263157894737e-05, |
|
"loss": 0.0401, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.0405, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 3.31140350877193e-05, |
|
"loss": 0.0438, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 3.289473684210527e-05, |
|
"loss": 0.0409, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_dropoff": 0.5687102915204477, |
|
"eval_accuracy_undropoff": 0.9869465669363577, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4371709563555452, |
|
"eval_iou_undropoff": 0.9688193019443224, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18931810557842255, |
|
"eval_mean_accuracy": 0.7778284292284028, |
|
"eval_mean_iou": 0.7029951291499338, |
|
"eval_overall_accuracy": 0.9695566177368165, |
|
"eval_runtime": 2.0628, |
|
"eval_samples_per_second": 9.696, |
|
"eval_steps_per_second": 0.97, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 3.267543859649123e-05, |
|
"loss": 0.0392, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 3.24561403508772e-05, |
|
"loss": 0.0384, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 3.223684210526316e-05, |
|
"loss": 0.0429, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 3.2017543859649124e-05, |
|
"loss": 0.0351, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 3.179824561403509e-05, |
|
"loss": 0.0372, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 3.157894736842105e-05, |
|
"loss": 0.0429, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 3.1359649122807015e-05, |
|
"loss": 0.0358, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 3.1140350877192986e-05, |
|
"loss": 0.0388, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 3.092105263157895e-05, |
|
"loss": 0.0356, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 3.0701754385964913e-05, |
|
"loss": 0.0328, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.5544622583086768, |
|
"eval_accuracy_undropoff": 0.9884865026761807, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.438174508517341, |
|
"eval_iou_undropoff": 0.969742542478441, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18416434526443481, |
|
"eval_mean_accuracy": 0.7714743804924287, |
|
"eval_mean_iou": 0.7039585254978911, |
|
"eval_overall_accuracy": 0.9704401016235351, |
|
"eval_runtime": 2.0828, |
|
"eval_samples_per_second": 9.602, |
|
"eval_steps_per_second": 0.96, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 3.048245614035088e-05, |
|
"loss": 0.0341, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 3.0263157894736844e-05, |
|
"loss": 0.0357, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 3.0043859649122808e-05, |
|
"loss": 0.0339, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 2.9824561403508772e-05, |
|
"loss": 0.0363, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 2.9605263157894735e-05, |
|
"loss": 0.0337, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 2.9385964912280706e-05, |
|
"loss": 0.0319, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 0.0321, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 2.8947368421052634e-05, |
|
"loss": 0.0358, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 2.8728070175438597e-05, |
|
"loss": 0.032, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 2.850877192982456e-05, |
|
"loss": 0.0332, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_dropoff": 0.5215807702011515, |
|
"eval_accuracy_undropoff": 0.991029446445043, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.43221031584629244, |
|
"eval_iou_undropoff": 0.9708785696265562, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.17805632948875427, |
|
"eval_mean_accuracy": 0.7563051083230972, |
|
"eval_mean_iou": 0.7015444427364244, |
|
"eval_overall_accuracy": 0.9715101242065429, |
|
"eval_runtime": 4.0759, |
|
"eval_samples_per_second": 4.907, |
|
"eval_steps_per_second": 0.491, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 2.8289473684210528e-05, |
|
"loss": 0.0333, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 2.8070175438596492e-05, |
|
"loss": 0.0332, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 2.7850877192982456e-05, |
|
"loss": 0.0337, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 2.7631578947368426e-05, |
|
"loss": 0.0291, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 2.741228070175439e-05, |
|
"loss": 0.0306, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 2.7192982456140354e-05, |
|
"loss": 0.0382, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 2.6973684210526317e-05, |
|
"loss": 0.0322, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 2.675438596491228e-05, |
|
"loss": 0.0289, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 2.6535087719298245e-05, |
|
"loss": 0.0326, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 2.6315789473684212e-05, |
|
"loss": 0.0314, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.4674969609394711, |
|
"eval_accuracy_undropoff": 0.9935327873175207, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.40684732446545196, |
|
"eval_iou_undropoff": 0.9710988280806261, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.17315250635147095, |
|
"eval_mean_accuracy": 0.7305148741284959, |
|
"eval_mean_iou": 0.688973076273039, |
|
"eval_overall_accuracy": 0.9716606140136719, |
|
"eval_runtime": 5.7933, |
|
"eval_samples_per_second": 3.452, |
|
"eval_steps_per_second": 0.345, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 2.6096491228070176e-05, |
|
"loss": 0.0296, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 2.5877192982456143e-05, |
|
"loss": 0.0372, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 2.565789473684211e-05, |
|
"loss": 0.0304, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 2.5438596491228074e-05, |
|
"loss": 0.029, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 2.5219298245614037e-05, |
|
"loss": 0.0296, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.0292, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 2.4780701754385965e-05, |
|
"loss": 0.0294, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 2.456140350877193e-05, |
|
"loss": 0.0306, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 2.4342105263157896e-05, |
|
"loss": 0.0292, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 2.412280701754386e-05, |
|
"loss": 0.0318, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_dropoff": 0.5036904516158628, |
|
"eval_accuracy_undropoff": 0.9917598114185697, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.42329058099236316, |
|
"eval_iou_undropoff": 0.9708558880266382, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.17862842977046967, |
|
"eval_mean_accuracy": 0.7477251315172162, |
|
"eval_mean_iou": 0.6970732345095008, |
|
"eval_overall_accuracy": 0.9714662551879882, |
|
"eval_runtime": 2.0816, |
|
"eval_samples_per_second": 9.608, |
|
"eval_steps_per_second": 0.961, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 2.3903508771929827e-05, |
|
"loss": 0.0288, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 2.368421052631579e-05, |
|
"loss": 0.0291, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 2.3464912280701758e-05, |
|
"loss": 0.031, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 2.324561403508772e-05, |
|
"loss": 0.0272, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 2.3026315789473685e-05, |
|
"loss": 0.0294, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 2.280701754385965e-05, |
|
"loss": 0.0303, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 2.2587719298245616e-05, |
|
"loss": 0.0271, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 2.236842105263158e-05, |
|
"loss": 0.0317, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 2.2149122807017543e-05, |
|
"loss": 0.0287, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 2.1929824561403507e-05, |
|
"loss": 0.0291, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.5465584073029198, |
|
"eval_accuracy_undropoff": 0.9909323297946122, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4520695555833798, |
|
"eval_iou_undropoff": 0.9718150809185412, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18141531944274902, |
|
"eval_mean_accuracy": 0.768745368548766, |
|
"eval_mean_iou": 0.7119423182509605, |
|
"eval_overall_accuracy": 0.9724555969238281, |
|
"eval_runtime": 4.4125, |
|
"eval_samples_per_second": 4.533, |
|
"eval_steps_per_second": 0.453, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 2.1710526315789474e-05, |
|
"loss": 0.0268, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 2.149122807017544e-05, |
|
"loss": 0.0275, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 2.1271929824561405e-05, |
|
"loss": 0.0271, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 0.0283, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.0266, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 2.06140350877193e-05, |
|
"loss": 0.0263, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 2.0394736842105264e-05, |
|
"loss": 0.0264, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 2.0175438596491227e-05, |
|
"loss": 0.0281, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 1.9956140350877194e-05, |
|
"loss": 0.0257, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 1.9736842105263158e-05, |
|
"loss": 0.0273, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_dropoff": 0.5446271703479437, |
|
"eval_accuracy_undropoff": 0.9907217777123257, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.44867109822876083, |
|
"eval_iou_undropoff": 0.9715287635773632, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1755458563566208, |
|
"eval_mean_accuracy": 0.7676744740301347, |
|
"eval_mean_iou": 0.710099930903062, |
|
"eval_overall_accuracy": 0.9721735000610352, |
|
"eval_runtime": 4.8978, |
|
"eval_samples_per_second": 4.083, |
|
"eval_steps_per_second": 0.408, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 1.9517543859649122e-05, |
|
"loss": 0.0272, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 1.929824561403509e-05, |
|
"loss": 0.0251, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 1.9078947368421056e-05, |
|
"loss": 0.0261, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 1.885964912280702e-05, |
|
"loss": 0.0271, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 1.8640350877192984e-05, |
|
"loss": 0.0292, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.0256, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 1.8201754385964914e-05, |
|
"loss": 0.0258, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 1.7982456140350878e-05, |
|
"loss": 0.0265, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 1.7763157894736842e-05, |
|
"loss": 0.0264, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 1.7543859649122806e-05, |
|
"loss": 0.0274, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.5065529025895089, |
|
"eval_accuracy_undropoff": 0.992240618442014, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.42969823141428487, |
|
"eval_iou_undropoff": 0.9714446539608538, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.17860735952854156, |
|
"eval_mean_accuracy": 0.7493967605157614, |
|
"eval_mean_iou": 0.7005714426875693, |
|
"eval_overall_accuracy": 0.9720460891723632, |
|
"eval_runtime": 2.1071, |
|
"eval_samples_per_second": 9.492, |
|
"eval_steps_per_second": 0.949, |
|
"step": 160 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 9.260798799406694e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|