|
{ |
|
"best_metric": 0.22809401154518127, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGB-b5_4/checkpoint-200", |
|
"epoch": 100.0, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 5.833333333333333e-07, |
|
"loss": 0.9908, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.1666666666666666e-06, |
|
"loss": 0.9755, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.75e-06, |
|
"loss": 0.9832, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 2.333333333333333e-06, |
|
"loss": 0.9833, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 2.9166666666666666e-06, |
|
"loss": 0.9793, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 3.5e-06, |
|
"loss": 0.975, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 4.083333333333334e-06, |
|
"loss": 0.9728, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 4.666666666666666e-06, |
|
"loss": 0.9563, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 5.25e-06, |
|
"loss": 0.952, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 5.833333333333333e-06, |
|
"loss": 0.9465, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.30711254845294617, |
|
"eval_accuracy_undropoff": 0.6931472063539763, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.12608002952896777, |
|
"eval_iou_undropoff": 0.6824276188640029, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.9973907470703125, |
|
"eval_mean_accuracy": 0.5001298774034613, |
|
"eval_mean_iou": 0.26950254946432356, |
|
"eval_overall_accuracy": 0.677096176147461, |
|
"eval_runtime": 2.6009, |
|
"eval_samples_per_second": 7.69, |
|
"eval_steps_per_second": 0.769, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 6.4166666666666665e-06, |
|
"loss": 0.9364, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 7e-06, |
|
"loss": 0.9277, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 6.969298245614035e-06, |
|
"loss": 0.9148, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 6.9385964912280696e-06, |
|
"loss": 0.9038, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 6.907894736842105e-06, |
|
"loss": 0.8925, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 6.87719298245614e-06, |
|
"loss": 0.8917, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 6.846491228070175e-06, |
|
"loss": 0.8802, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 6.815789473684211e-06, |
|
"loss": 0.8682, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 6.785087719298246e-06, |
|
"loss": 0.8584, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 6.75438596491228e-06, |
|
"loss": 0.8558, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.543416133397555, |
|
"eval_accuracy_undropoff": 0.8804402488813177, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2787406762512059, |
|
"eval_iou_undropoff": 0.8677574035561078, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8237255215644836, |
|
"eval_mean_accuracy": 0.7119281911394364, |
|
"eval_mean_iou": 0.38216602660243787, |
|
"eval_overall_accuracy": 0.8664270401000976, |
|
"eval_runtime": 2.0893, |
|
"eval_samples_per_second": 9.573, |
|
"eval_steps_per_second": 0.957, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 6.7236842105263156e-06, |
|
"loss": 0.8292, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 6.692982456140351e-06, |
|
"loss": 0.8484, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 6.662280701754385e-06, |
|
"loss": 0.8316, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 6.6315789473684205e-06, |
|
"loss": 0.8077, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 6.600877192982457e-06, |
|
"loss": 0.8079, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 6.570175438596491e-06, |
|
"loss": 0.7915, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 6.539473684210526e-06, |
|
"loss": 0.792, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 6.5087719298245616e-06, |
|
"loss": 0.7818, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 6.478070175438596e-06, |
|
"loss": 0.7716, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 6.447368421052631e-06, |
|
"loss": 0.7585, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.5624716163214752, |
|
"eval_accuracy_undropoff": 0.9348810967813194, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.349393058642503, |
|
"eval_iou_undropoff": 0.9201710251519329, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.680091917514801, |
|
"eval_mean_accuracy": 0.7486763565513973, |
|
"eval_mean_iou": 0.42318802793147864, |
|
"eval_overall_accuracy": 0.9193965911865234, |
|
"eval_runtime": 2.1213, |
|
"eval_samples_per_second": 9.428, |
|
"eval_steps_per_second": 0.943, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 6.4166666666666665e-06, |
|
"loss": 0.7533, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 6.385964912280701e-06, |
|
"loss": 0.7398, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 6.355263157894737e-06, |
|
"loss": 0.7511, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 6.324561403508772e-06, |
|
"loss": 0.7172, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 6.293859649122807e-06, |
|
"loss": 0.708, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 6.263157894736842e-06, |
|
"loss": 0.7258, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 6.232456140350877e-06, |
|
"loss": 0.7048, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 6.201754385964912e-06, |
|
"loss": 0.6919, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 6.171052631578947e-06, |
|
"loss": 0.6687, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 6.140350877192982e-06, |
|
"loss": 0.715, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.5952063120713778, |
|
"eval_accuracy_undropoff": 0.9374720018468085, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3660574736640166, |
|
"eval_iou_undropoff": 0.9233451410139708, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6076321601867676, |
|
"eval_mean_accuracy": 0.7663391569590932, |
|
"eval_mean_iou": 0.4298008715593291, |
|
"eval_overall_accuracy": 0.923240852355957, |
|
"eval_runtime": 2.0795, |
|
"eval_samples_per_second": 9.618, |
|
"eval_steps_per_second": 0.962, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 6.1096491228070174e-06, |
|
"loss": 0.663, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 6.078947368421053e-06, |
|
"loss": 0.6637, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 6.048245614035088e-06, |
|
"loss": 0.64, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 6.017543859649122e-06, |
|
"loss": 0.6611, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 5.986842105263158e-06, |
|
"loss": 0.6221, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 5.956140350877193e-06, |
|
"loss": 0.6652, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 5.925438596491228e-06, |
|
"loss": 0.6329, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 5.894736842105263e-06, |
|
"loss": 0.6025, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 5.864035087719299e-06, |
|
"loss": 0.6005, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 5.833333333333333e-06, |
|
"loss": 0.6145, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.5993531961742242, |
|
"eval_accuracy_undropoff": 0.95265662796263, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.38194797677722625, |
|
"eval_iou_undropoff": 0.9374606493852639, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5297996997833252, |
|
"eval_mean_accuracy": 0.7760049120684271, |
|
"eval_mean_iou": 0.4398028753874967, |
|
"eval_overall_accuracy": 0.9379665374755859, |
|
"eval_runtime": 5.0194, |
|
"eval_samples_per_second": 3.985, |
|
"eval_steps_per_second": 0.398, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 5.802631578947368e-06, |
|
"loss": 0.5857, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 5.771929824561404e-06, |
|
"loss": 0.5952, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 5.741228070175439e-06, |
|
"loss": 0.5949, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 5.710526315789473e-06, |
|
"loss": 0.5617, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 5.6798245614035086e-06, |
|
"loss": 0.5524, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 5.649122807017544e-06, |
|
"loss": 0.5645, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 5.618421052631579e-06, |
|
"loss": 0.5292, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 5.587719298245614e-06, |
|
"loss": 0.5744, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 5.55701754385965e-06, |
|
"loss": 0.5422, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 5.526315789473684e-06, |
|
"loss": 0.5355, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.5917658661895915, |
|
"eval_accuracy_undropoff": 0.9580567515475479, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3856504387079416, |
|
"eval_iou_undropoff": 0.9421913855333733, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4821084141731262, |
|
"eval_mean_accuracy": 0.7749113088685697, |
|
"eval_mean_iou": 0.4426139414137717, |
|
"eval_overall_accuracy": 0.9428266525268555, |
|
"eval_runtime": 2.0486, |
|
"eval_samples_per_second": 9.763, |
|
"eval_steps_per_second": 0.976, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 5.495614035087719e-06, |
|
"loss": 0.5131, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 5.4649122807017546e-06, |
|
"loss": 0.528, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 5.434210526315789e-06, |
|
"loss": 0.5148, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 5.403508771929824e-06, |
|
"loss": 0.497, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 5.37280701754386e-06, |
|
"loss": 0.5018, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 5.342105263157895e-06, |
|
"loss": 0.4883, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 5.31140350877193e-06, |
|
"loss": 0.4913, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 5.280701754385965e-06, |
|
"loss": 0.479, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 5.25e-06, |
|
"loss": 0.4777, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 5.219298245614035e-06, |
|
"loss": 0.4619, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.57430675015482, |
|
"eval_accuracy_undropoff": 0.9688024701062811, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3961785898502891, |
|
"eval_iou_undropoff": 0.9516637965632654, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.426582396030426, |
|
"eval_mean_accuracy": 0.7715546101305506, |
|
"eval_mean_iou": 0.4492807954711848, |
|
"eval_overall_accuracy": 0.9523996353149414, |
|
"eval_runtime": 2.1625, |
|
"eval_samples_per_second": 9.248, |
|
"eval_steps_per_second": 0.925, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 5.18859649122807e-06, |
|
"loss": 0.4508, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 5.157894736842105e-06, |
|
"loss": 0.465, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 5.12719298245614e-06, |
|
"loss": 0.4234, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 5.096491228070176e-06, |
|
"loss": 0.5107, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 5.0657894736842104e-06, |
|
"loss": 0.4482, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 5.035087719298246e-06, |
|
"loss": 0.4299, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 5.004385964912281e-06, |
|
"loss": 0.4463, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 4.973684210526315e-06, |
|
"loss": 0.4152, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 4.942982456140351e-06, |
|
"loss": 0.4062, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 4.912280701754386e-06, |
|
"loss": 0.4367, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.57421041767013, |
|
"eval_accuracy_undropoff": 0.9734218793066906, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.39970303668933804, |
|
"eval_iou_undropoff": 0.9559435963272845, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.3941105604171753, |
|
"eval_mean_accuracy": 0.7738161484884103, |
|
"eval_mean_iou": 0.4518822110055409, |
|
"eval_overall_accuracy": 0.9568229675292969, |
|
"eval_runtime": 2.0471, |
|
"eval_samples_per_second": 9.77, |
|
"eval_steps_per_second": 0.977, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 4.88157894736842e-06, |
|
"loss": 0.3861, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 4.8508771929824564e-06, |
|
"loss": 0.4259, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 4.820175438596492e-06, |
|
"loss": 0.4131, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 4.789473684210526e-06, |
|
"loss": 0.3905, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 4.758771929824561e-06, |
|
"loss": 0.3853, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 4.728070175438597e-06, |
|
"loss": 0.405, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 4.697368421052631e-06, |
|
"loss": 0.3566, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 4.666666666666666e-06, |
|
"loss": 0.4075, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 4.635964912280702e-06, |
|
"loss": 0.3765, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 4.605263157894737e-06, |
|
"loss": 0.3839, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_dropoff": 0.5852886534094819, |
|
"eval_accuracy_undropoff": 0.9738350230900806, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.40173932756916064, |
|
"eval_iou_undropoff": 0.9567496536398221, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.3801003098487854, |
|
"eval_mean_accuracy": 0.7795618382497813, |
|
"eval_mean_iou": 0.45282966040299427, |
|
"eval_overall_accuracy": 0.957679557800293, |
|
"eval_runtime": 2.0345, |
|
"eval_samples_per_second": 9.83, |
|
"eval_steps_per_second": 0.983, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 4.574561403508772e-06, |
|
"loss": 0.3629, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 4.543859649122807e-06, |
|
"loss": 0.3631, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 4.513157894736842e-06, |
|
"loss": 0.3893, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 4.482456140350877e-06, |
|
"loss": 0.3262, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 4.451754385964912e-06, |
|
"loss": 0.3453, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 4.421052631578947e-06, |
|
"loss": 0.3552, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 4.390350877192982e-06, |
|
"loss": 0.3294, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 4.359649122807018e-06, |
|
"loss": 0.3545, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 4.3289473684210525e-06, |
|
"loss": 0.3348, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 4.298245614035088e-06, |
|
"loss": 0.3164, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.579715131080988, |
|
"eval_accuracy_undropoff": 0.9773475412870145, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.402982790123693, |
|
"eval_iou_undropoff": 0.9598959470439332, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.35493195056915283, |
|
"eval_mean_accuracy": 0.7785313361840013, |
|
"eval_mean_iou": 0.45429291238920877, |
|
"eval_overall_accuracy": 0.9608142852783204, |
|
"eval_runtime": 4.9884, |
|
"eval_samples_per_second": 4.009, |
|
"eval_steps_per_second": 0.401, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 4.267543859649123e-06, |
|
"loss": 0.3212, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 4.2368421052631575e-06, |
|
"loss": 0.317, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 4.206140350877193e-06, |
|
"loss": 0.3036, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 4.175438596491228e-06, |
|
"loss": 0.3359, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 4.144736842105262e-06, |
|
"loss": 0.3258, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 4.1140350877192985e-06, |
|
"loss": 0.2865, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 4.083333333333334e-06, |
|
"loss": 0.302, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 4.052631578947368e-06, |
|
"loss": 0.2893, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 4.0219298245614034e-06, |
|
"loss": 0.2898, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 3.991228070175439e-06, |
|
"loss": 0.3018, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_dropoff": 0.56500378449047, |
|
"eval_accuracy_undropoff": 0.9812415607521366, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.40872075659532103, |
|
"eval_iou_undropoff": 0.9631125878264742, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.3326951861381531, |
|
"eval_mean_accuracy": 0.7731226726213033, |
|
"eval_mean_iou": 0.4572777814739317, |
|
"eval_overall_accuracy": 0.9639347076416016, |
|
"eval_runtime": 2.119, |
|
"eval_samples_per_second": 9.438, |
|
"eval_steps_per_second": 0.944, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 3.960526315789473e-06, |
|
"loss": 0.3035, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 3.929824561403508e-06, |
|
"loss": 0.2833, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 3.899122807017544e-06, |
|
"loss": 0.3054, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 3.868421052631579e-06, |
|
"loss": 0.2554, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 3.837719298245614e-06, |
|
"loss": 0.2572, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 3.8070175438596494e-06, |
|
"loss": 0.3069, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 3.7763157894736843e-06, |
|
"loss": 0.2838, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 3.745614035087719e-06, |
|
"loss": 0.2495, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 3.7149122807017544e-06, |
|
"loss": 0.2681, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 3.6842105263157892e-06, |
|
"loss": 0.2646, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.5570815844400101, |
|
"eval_accuracy_undropoff": 0.9835134535417228, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.412100255185145, |
|
"eval_iou_undropoff": 0.9650224323826128, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.3126855492591858, |
|
"eval_mean_accuracy": 0.7702975189908665, |
|
"eval_mean_iou": 0.4590408958559193, |
|
"eval_overall_accuracy": 0.9657827377319336, |
|
"eval_runtime": 2.0509, |
|
"eval_samples_per_second": 9.752, |
|
"eval_steps_per_second": 0.975, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 3.653508771929824e-06, |
|
"loss": 0.2256, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 3.62280701754386e-06, |
|
"loss": 0.3095, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 3.592105263157895e-06, |
|
"loss": 0.2611, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 3.56140350877193e-06, |
|
"loss": 0.2418, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 3.530701754385965e-06, |
|
"loss": 0.2557, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 3.5e-06, |
|
"loss": 0.2477, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 3.4692982456140348e-06, |
|
"loss": 0.2424, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 3.43859649122807e-06, |
|
"loss": 0.2324, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 3.4078947368421053e-06, |
|
"loss": 0.2386, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 3.37719298245614e-06, |
|
"loss": 0.2378, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_dropoff": 0.5606688226794192, |
|
"eval_accuracy_undropoff": 0.984957665697822, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4217334745748091, |
|
"eval_iou_undropoff": 0.9665563204257981, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.2958146035671234, |
|
"eval_mean_accuracy": 0.7728132441886206, |
|
"eval_mean_iou": 0.46276326500020243, |
|
"eval_overall_accuracy": 0.9673160552978516, |
|
"eval_runtime": 2.3341, |
|
"eval_samples_per_second": 8.568, |
|
"eval_steps_per_second": 0.857, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 3.3464912280701754e-06, |
|
"loss": 0.2187, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 3.3157894736842103e-06, |
|
"loss": 0.2415, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 3.2850877192982455e-06, |
|
"loss": 0.2579, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 3.2543859649122808e-06, |
|
"loss": 0.2052, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 3.2236842105263156e-06, |
|
"loss": 0.234, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 3.1929824561403505e-06, |
|
"loss": 0.2178, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 3.162280701754386e-06, |
|
"loss": 0.2077, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 3.131578947368421e-06, |
|
"loss": 0.2241, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 3.100877192982456e-06, |
|
"loss": 0.2022, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 3.070175438596491e-06, |
|
"loss": 0.2076, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.5586183169338746, |
|
"eval_accuracy_undropoff": 0.9871487606184022, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.43398741259738133, |
|
"eval_iou_undropoff": 0.9686073602153299, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.277849018573761, |
|
"eval_mean_accuracy": 0.7728835387761384, |
|
"eval_mean_iou": 0.4675315909375704, |
|
"eval_overall_accuracy": 0.9693307876586914, |
|
"eval_runtime": 2.1472, |
|
"eval_samples_per_second": 9.314, |
|
"eval_steps_per_second": 0.931, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 3.0394736842105263e-06, |
|
"loss": 0.2043, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 3.008771929824561e-06, |
|
"loss": 0.1992, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 2.9780701754385965e-06, |
|
"loss": 0.1925, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 2.9473684210526313e-06, |
|
"loss": 0.216, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 2.9166666666666666e-06, |
|
"loss": 0.2052, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 2.885964912280702e-06, |
|
"loss": 0.1942, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 2.8552631578947367e-06, |
|
"loss": 0.1871, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 2.824561403508772e-06, |
|
"loss": 0.2154, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 2.793859649122807e-06, |
|
"loss": 0.1965, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 2.763157894736842e-06, |
|
"loss": 0.1951, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_dropoff": 0.5567237780683043, |
|
"eval_accuracy_undropoff": 0.9871250784843832, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.43141931883218215, |
|
"eval_iou_undropoff": 0.968521144388865, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.2648250460624695, |
|
"eval_mean_accuracy": 0.7719244282763438, |
|
"eval_mean_iou": 0.46664682107368244, |
|
"eval_overall_accuracy": 0.9692293167114258, |
|
"eval_runtime": 2.1475, |
|
"eval_samples_per_second": 9.313, |
|
"eval_steps_per_second": 0.931, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 2.7324561403508773e-06, |
|
"loss": 0.1975, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 2.701754385964912e-06, |
|
"loss": 0.1756, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 2.6710526315789474e-06, |
|
"loss": 0.1888, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 2.6403508771929826e-06, |
|
"loss": 0.1835, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 2.6096491228070175e-06, |
|
"loss": 0.2126, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 2.5789473684210523e-06, |
|
"loss": 0.1656, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 2.548245614035088e-06, |
|
"loss": 0.1864, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 2.517543859649123e-06, |
|
"loss": 0.1706, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 2.4868421052631577e-06, |
|
"loss": 0.1853, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 2.456140350877193e-06, |
|
"loss": 0.1734, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.539682102800523, |
|
"eval_accuracy_undropoff": 0.9889820364048132, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4321544542235413, |
|
"eval_iou_undropoff": 0.9696399303821179, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.25215277075767517, |
|
"eval_mean_accuracy": 0.7643320696026681, |
|
"eval_mean_iou": 0.4672647948685531, |
|
"eval_overall_accuracy": 0.9703004837036133, |
|
"eval_runtime": 2.1141, |
|
"eval_samples_per_second": 9.46, |
|
"eval_steps_per_second": 0.946, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 2.4254385964912282e-06, |
|
"loss": 0.1723, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 2.394736842105263e-06, |
|
"loss": 0.1727, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 2.3640350877192983e-06, |
|
"loss": 0.1721, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 2.333333333333333e-06, |
|
"loss": 0.1671, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 2.3026315789473684e-06, |
|
"loss": 0.1593, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 2.2719298245614037e-06, |
|
"loss": 0.1843, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 2.2412280701754385e-06, |
|
"loss": 0.1642, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 2.2105263157894734e-06, |
|
"loss": 0.1664, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 2.179824561403509e-06, |
|
"loss": 0.1688, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 2.149122807017544e-06, |
|
"loss": 0.1569, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_dropoff": 0.5312094314089773, |
|
"eval_accuracy_undropoff": 0.9893838366450177, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4282074606555437, |
|
"eval_iou_undropoff": 0.969664935333436, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.2435857504606247, |
|
"eval_mean_accuracy": 0.7602966340269974, |
|
"eval_mean_iou": 0.4659574653296599, |
|
"eval_overall_accuracy": 0.9703332901000976, |
|
"eval_runtime": 2.1386, |
|
"eval_samples_per_second": 9.352, |
|
"eval_steps_per_second": 0.935, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 2.1184210526315787e-06, |
|
"loss": 0.1727, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 2.087719298245614e-06, |
|
"loss": 0.1484, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 2.0570175438596493e-06, |
|
"loss": 0.1735, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 2.026315789473684e-06, |
|
"loss": 0.1573, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 1.9956140350877194e-06, |
|
"loss": 0.1487, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 1.964912280701754e-06, |
|
"loss": 0.1786, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 1.9342105263157895e-06, |
|
"loss": 0.1642, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 1.9035087719298247e-06, |
|
"loss": 0.1373, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 1.8728070175438596e-06, |
|
"loss": 0.1549, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 1.8421052631578946e-06, |
|
"loss": 0.1691, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_dropoff": 0.536333402142251, |
|
"eval_accuracy_undropoff": 0.9884882937619468, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.42495702016145154, |
|
"eval_iou_undropoff": 0.968999484387958, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.24105700850486755, |
|
"eval_mean_accuracy": 0.7624108479520989, |
|
"eval_mean_iou": 0.46465216818313654, |
|
"eval_overall_accuracy": 0.9696880340576172, |
|
"eval_runtime": 2.1127, |
|
"eval_samples_per_second": 9.467, |
|
"eval_steps_per_second": 0.947, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 1.81140350877193e-06, |
|
"loss": 0.1479, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 1.780701754385965e-06, |
|
"loss": 0.17, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 1.75e-06, |
|
"loss": 0.147, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 1.719298245614035e-06, |
|
"loss": 0.1518, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 1.68859649122807e-06, |
|
"loss": 0.1644, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 1.6578947368421051e-06, |
|
"loss": 0.1347, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 1.6271929824561404e-06, |
|
"loss": 0.1816, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 1.5964912280701752e-06, |
|
"loss": 0.1317, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 1.5657894736842105e-06, |
|
"loss": 0.149, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 1.5350877192982455e-06, |
|
"loss": 0.1498, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_dropoff": 0.5179109612605793, |
|
"eval_accuracy_undropoff": 0.9894966750482846, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4175833117579613, |
|
"eval_iou_undropoff": 0.9692314619953263, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.23353056609630585, |
|
"eval_mean_accuracy": 0.753703818154432, |
|
"eval_mean_iou": 0.4622715912510959, |
|
"eval_overall_accuracy": 0.9698884963989258, |
|
"eval_runtime": 2.0928, |
|
"eval_samples_per_second": 9.557, |
|
"eval_steps_per_second": 0.956, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 1.5043859649122806e-06, |
|
"loss": 0.1655, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 1.4736842105263156e-06, |
|
"loss": 0.1366, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 1.442982456140351e-06, |
|
"loss": 0.1521, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 1.412280701754386e-06, |
|
"loss": 0.1423, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 1.381578947368421e-06, |
|
"loss": 0.1314, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 1.350877192982456e-06, |
|
"loss": 0.1636, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 1.3201754385964913e-06, |
|
"loss": 0.1308, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 1.2894736842105262e-06, |
|
"loss": 0.2098, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 1.2587719298245614e-06, |
|
"loss": 0.1392, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 1.2280701754385965e-06, |
|
"loss": 0.1478, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_dropoff": 0.49344251014931534, |
|
"eval_accuracy_undropoff": 0.9906238650237766, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4062143607017968, |
|
"eval_iou_undropoff": 0.9693348553642241, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.22809401154518127, |
|
"eval_mean_accuracy": 0.742033187586546, |
|
"eval_mean_iou": 0.45851640535534033, |
|
"eval_overall_accuracy": 0.9699514389038086, |
|
"eval_runtime": 2.0787, |
|
"eval_samples_per_second": 9.621, |
|
"eval_steps_per_second": 0.962, |
|
"step": 200 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 1.1575998499258368e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|