|
{ |
|
"best_metric": 0.1999509036540985, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGB-b5_7/checkpoint-80", |
|
"epoch": 40.0, |
|
"global_step": 80, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 1.017, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 1.025, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.0124, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.9968, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.9685, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.9552, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 0.9261, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.8902, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.8624, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.8255, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.61789949310764, |
|
"eval_accuracy_undropoff": 0.9532616169325269, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.30068083396580203, |
|
"eval_iou_undropoff": 0.9377191953832399, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7949008941650391, |
|
"eval_mean_accuracy": 0.7855805550200834, |
|
"eval_mean_iou": 0.41280000978301395, |
|
"eval_overall_accuracy": 0.9393175125122071, |
|
"eval_runtime": 2.0065, |
|
"eval_samples_per_second": 9.968, |
|
"eval_steps_per_second": 0.997, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.7939, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7364, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 4.9780701754385966e-05, |
|
"loss": 0.6993, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 4.956140350877193e-05, |
|
"loss": 0.6614, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 4.9342105263157894e-05, |
|
"loss": 0.5986, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 4.912280701754386e-05, |
|
"loss": 0.5876, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 4.890350877192983e-05, |
|
"loss": 0.5312, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 4.868421052631579e-05, |
|
"loss": 0.5088, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 4.8464912280701755e-05, |
|
"loss": 0.462, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 4.824561403508772e-05, |
|
"loss": 0.4434, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.41871602559691734, |
|
"eval_accuracy_undropoff": 0.9944325094007127, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3713824207926633, |
|
"eval_iou_undropoff": 0.9699718650625573, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.42470020055770874, |
|
"eval_mean_accuracy": 0.706574267498815, |
|
"eval_mean_iou": 0.4471180952850735, |
|
"eval_overall_accuracy": 0.9704946517944336, |
|
"eval_runtime": 2.0254, |
|
"eval_samples_per_second": 9.875, |
|
"eval_steps_per_second": 0.987, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 4.802631578947368e-05, |
|
"loss": 0.3698, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 4.780701754385965e-05, |
|
"loss": 0.4431, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 4.758771929824562e-05, |
|
"loss": 0.3667, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 4.736842105263158e-05, |
|
"loss": 0.3123, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 4.714912280701755e-05, |
|
"loss": 0.3021, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 4.6929824561403515e-05, |
|
"loss": 0.2765, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 4.671052631578948e-05, |
|
"loss": 0.2547, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 4.649122807017544e-05, |
|
"loss": 0.2615, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 4.6271929824561406e-05, |
|
"loss": 0.2296, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 4.605263157894737e-05, |
|
"loss": 0.2107, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.40458267391453934, |
|
"eval_accuracy_undropoff": 0.9961075726111145, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3712713740875759, |
|
"eval_iou_undropoff": 0.9710249883455491, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.2725875973701477, |
|
"eval_mean_accuracy": 0.7003451232628269, |
|
"eval_mean_iou": 0.6711481812165625, |
|
"eval_overall_accuracy": 0.9715124130249023, |
|
"eval_runtime": 2.0394, |
|
"eval_samples_per_second": 9.807, |
|
"eval_steps_per_second": 0.981, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.205, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 4.56140350877193e-05, |
|
"loss": 0.1816, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 4.539473684210527e-05, |
|
"loss": 0.1842, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 4.517543859649123e-05, |
|
"loss": 0.149, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 4.4956140350877196e-05, |
|
"loss": 0.1361, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 4.473684210526316e-05, |
|
"loss": 0.1646, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 4.451754385964912e-05, |
|
"loss": 0.1288, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 4.429824561403509e-05, |
|
"loss": 0.1178, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 4.407894736842105e-05, |
|
"loss": 0.1103, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 4.3859649122807014e-05, |
|
"loss": 0.1678, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.47821739030711713, |
|
"eval_accuracy_undropoff": 0.9904238604465575, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.39174564284479585, |
|
"eval_iou_undropoff": 0.9685003651750826, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.23879647254943848, |
|
"eval_mean_accuracy": 0.7343206253768373, |
|
"eval_mean_iou": 0.6801230040099393, |
|
"eval_overall_accuracy": 0.9691267013549805, |
|
"eval_runtime": 2.833, |
|
"eval_samples_per_second": 7.06, |
|
"eval_steps_per_second": 0.706, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 4.3640350877192985e-05, |
|
"loss": 0.0934, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 4.342105263157895e-05, |
|
"loss": 0.1108, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 4.320175438596491e-05, |
|
"loss": 0.0932, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 4.298245614035088e-05, |
|
"loss": 0.097, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 4.2763157894736847e-05, |
|
"loss": 0.0813, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 4.254385964912281e-05, |
|
"loss": 0.1387, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 4.2324561403508774e-05, |
|
"loss": 0.0835, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 4.210526315789474e-05, |
|
"loss": 0.0745, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 4.188596491228071e-05, |
|
"loss": 0.0736, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.0972, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.42408770843367966, |
|
"eval_accuracy_undropoff": 0.9951975020323848, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3818202843962053, |
|
"eval_iou_undropoff": 0.9709387427964766, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18494448065757751, |
|
"eval_mean_accuracy": 0.7096426052330322, |
|
"eval_mean_iou": 0.676379513596341, |
|
"eval_overall_accuracy": 0.9714511871337891, |
|
"eval_runtime": 2.046, |
|
"eval_samples_per_second": 9.775, |
|
"eval_steps_per_second": 0.978, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 4.1447368421052636e-05, |
|
"loss": 0.0686, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 4.12280701754386e-05, |
|
"loss": 0.0779, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 4.100877192982456e-05, |
|
"loss": 0.0723, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 4.078947368421053e-05, |
|
"loss": 0.0685, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 4.057017543859649e-05, |
|
"loss": 0.0662, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 4.0350877192982455e-05, |
|
"loss": 0.0641, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 4.0131578947368425e-05, |
|
"loss": 0.0567, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 3.991228070175439e-05, |
|
"loss": 0.0769, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 3.969298245614035e-05, |
|
"loss": 0.0618, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 3.9473684210526316e-05, |
|
"loss": 0.0604, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.5239248606619418, |
|
"eval_accuracy_undropoff": 0.9897283221407057, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.42364186677942717, |
|
"eval_iou_undropoff": 0.9697005089236638, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.20193985104560852, |
|
"eval_mean_accuracy": 0.7568265914013237, |
|
"eval_mean_iou": 0.464447458567697, |
|
"eval_overall_accuracy": 0.9703605651855469, |
|
"eval_runtime": 2.0527, |
|
"eval_samples_per_second": 9.743, |
|
"eval_steps_per_second": 0.974, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 3.925438596491228e-05, |
|
"loss": 0.0556, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 3.9035087719298244e-05, |
|
"loss": 0.0561, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 3.8815789473684214e-05, |
|
"loss": 0.055, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 3.859649122807018e-05, |
|
"loss": 0.0524, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 3.837719298245614e-05, |
|
"loss": 0.051, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 3.815789473684211e-05, |
|
"loss": 0.0542, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 3.7938596491228076e-05, |
|
"loss": 0.0516, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 3.771929824561404e-05, |
|
"loss": 0.0466, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.0502, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 3.728070175438597e-05, |
|
"loss": 0.0497, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.4775384756531113, |
|
"eval_accuracy_undropoff": 0.9913645784928411, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.39826387693343585, |
|
"eval_iou_undropoff": 0.9693923400870129, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1793430596590042, |
|
"eval_mean_accuracy": 0.7344515270729762, |
|
"eval_mean_iou": 0.6838281085102244, |
|
"eval_overall_accuracy": 0.9700000762939454, |
|
"eval_runtime": 6.1591, |
|
"eval_samples_per_second": 3.247, |
|
"eval_steps_per_second": 0.325, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 3.706140350877193e-05, |
|
"loss": 0.0442, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 0.0477, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 3.662280701754386e-05, |
|
"loss": 0.0418, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 3.640350877192983e-05, |
|
"loss": 0.0829, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 3.618421052631579e-05, |
|
"loss": 0.0458, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 3.5964912280701756e-05, |
|
"loss": 0.0465, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 3.574561403508772e-05, |
|
"loss": 0.0445, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 3.5526315789473684e-05, |
|
"loss": 0.0509, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 3.530701754385965e-05, |
|
"loss": 0.0429, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 3.508771929824561e-05, |
|
"loss": 0.0492, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.5238606390054817, |
|
"eval_accuracy_undropoff": 0.9895599600786884, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.42225237750136807, |
|
"eval_iou_undropoff": 0.969532907193877, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.1999509036540985, |
|
"eval_mean_accuracy": 0.7567102995420851, |
|
"eval_mean_iou": 0.46392842823174835, |
|
"eval_overall_accuracy": 0.970196533203125, |
|
"eval_runtime": 2.1001, |
|
"eval_samples_per_second": 9.523, |
|
"eval_steps_per_second": 0.952, |
|
"step": 80 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 4.630399399703347e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|