|
{ |
|
"best_metric": 0.29069358110427856, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGB-b0_5/checkpoint-240", |
|
"epoch": 80.0, |
|
"global_step": 240, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.777777777777778e-06, |
|
"loss": 1.249, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 1.2488, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 1.2553, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 1.2475, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 1.2427, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.2497, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 1.9444444444444445e-05, |
|
"loss": 1.2339, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 1.2302, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.2196, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 1.2123, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"eval_accuracy_dropoff": 0.1908240861993705, |
|
"eval_accuracy_undropoff": 0.18874432817853504, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.04942539850194509, |
|
"eval_iou_undropoff": 0.188595832344016, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.1206141710281372, |
|
"eval_mean_accuracy": 0.18978420718895278, |
|
"eval_mean_iou": 0.07934041028198703, |
|
"eval_overall_accuracy": 0.18883641560872397, |
|
"eval_runtime": 1.7856, |
|
"eval_samples_per_second": 8.401, |
|
"eval_steps_per_second": 0.56, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 3.055555555555556e-05, |
|
"loss": 1.2065, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.1867, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 1.1956, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 1.1767, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 1.1659, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 1.1494, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 4.722222222222222e-05, |
|
"loss": 1.1325, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 5e-05, |
|
"loss": 1.1703, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 4.985380116959065e-05, |
|
"loss": 1.1172, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"learning_rate": 4.970760233918128e-05, |
|
"loss": 1.0927, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"eval_accuracy_dropoff": 0.6449789785650286, |
|
"eval_accuracy_undropoff": 0.5299809582198437, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.12901550653889501, |
|
"eval_iou_undropoff": 0.529796420189495, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0984647274017334, |
|
"eval_mean_accuracy": 0.5874799683924361, |
|
"eval_mean_iou": 0.21960397557613, |
|
"eval_overall_accuracy": 0.5350728352864583, |
|
"eval_runtime": 1.7244, |
|
"eval_samples_per_second": 8.699, |
|
"eval_steps_per_second": 0.58, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 4.956140350877193e-05, |
|
"loss": 1.1371, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 4.941520467836258e-05, |
|
"loss": 1.064, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 4.926900584795322e-05, |
|
"loss": 1.0567, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 4.912280701754386e-05, |
|
"loss": 1.0585, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 4.8976608187134504e-05, |
|
"loss": 1.0397, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 8.67, |
|
"learning_rate": 4.883040935672515e-05, |
|
"loss": 1.0011, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 4.868421052631579e-05, |
|
"loss": 0.9476, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 4.853801169590643e-05, |
|
"loss": 0.9694, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 9.67, |
|
"learning_rate": 4.839181286549708e-05, |
|
"loss": 0.9674, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 4.824561403508772e-05, |
|
"loss": 1.0578, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.6399533622808832, |
|
"eval_accuracy_undropoff": 0.8725462021281238, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.23667468886542886, |
|
"eval_iou_undropoff": 0.8620580507674747, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.9785888195037842, |
|
"eval_mean_accuracy": 0.7562497822045036, |
|
"eval_mean_iou": 0.36624424654430116, |
|
"eval_overall_accuracy": 0.8622474670410156, |
|
"eval_runtime": 1.8276, |
|
"eval_samples_per_second": 8.208, |
|
"eval_steps_per_second": 0.547, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 10.33, |
|
"learning_rate": 4.8099415204678366e-05, |
|
"loss": 0.9272, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 10.67, |
|
"learning_rate": 4.7953216374269006e-05, |
|
"loss": 0.9222, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 4.780701754385965e-05, |
|
"loss": 0.8411, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 11.33, |
|
"learning_rate": 4.7660818713450294e-05, |
|
"loss": 0.889, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 11.67, |
|
"learning_rate": 4.751461988304094e-05, |
|
"loss": 0.8438, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 4.736842105263158e-05, |
|
"loss": 0.86, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 12.33, |
|
"learning_rate": 4.722222222222222e-05, |
|
"loss": 0.8268, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 12.67, |
|
"learning_rate": 4.707602339181287e-05, |
|
"loss": 0.8049, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 4.6929824561403515e-05, |
|
"loss": 1.0057, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"learning_rate": 4.678362573099415e-05, |
|
"loss": 0.788, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"eval_accuracy_dropoff": 0.536460128196292, |
|
"eval_accuracy_undropoff": 0.964590165330336, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3397820203137278, |
|
"eval_iou_undropoff": 0.9467918336729614, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7939841747283936, |
|
"eval_mean_accuracy": 0.750525146763314, |
|
"eval_mean_iou": 0.42885795132889637, |
|
"eval_overall_accuracy": 0.9456334431966146, |
|
"eval_runtime": 1.891, |
|
"eval_samples_per_second": 7.932, |
|
"eval_steps_per_second": 0.529, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 13.67, |
|
"learning_rate": 4.6637426900584796e-05, |
|
"loss": 0.7872, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 4.649122807017544e-05, |
|
"loss": 0.8197, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 14.33, |
|
"learning_rate": 4.634502923976608e-05, |
|
"loss": 0.7351, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 14.67, |
|
"learning_rate": 4.619883040935672e-05, |
|
"loss": 0.7447, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 4.605263157894737e-05, |
|
"loss": 0.6917, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 15.33, |
|
"learning_rate": 4.590643274853802e-05, |
|
"loss": 0.7277, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 15.67, |
|
"learning_rate": 4.576023391812866e-05, |
|
"loss": 0.6602, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 4.56140350877193e-05, |
|
"loss": 0.6809, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 16.33, |
|
"learning_rate": 4.5467836257309945e-05, |
|
"loss": 0.6865, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"learning_rate": 4.5321637426900585e-05, |
|
"loss": 0.6353, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"eval_accuracy_dropoff": 0.38302088358949615, |
|
"eval_accuracy_undropoff": 0.9849956839341233, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2966305629072793, |
|
"eval_iou_undropoff": 0.9581110717398175, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.620606541633606, |
|
"eval_mean_accuracy": 0.6840082837618098, |
|
"eval_mean_iou": 0.41824721154903227, |
|
"eval_overall_accuracy": 0.9583414713541667, |
|
"eval_runtime": 1.7715, |
|
"eval_samples_per_second": 8.467, |
|
"eval_steps_per_second": 0.564, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 4.517543859649123e-05, |
|
"loss": 0.6601, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 17.33, |
|
"learning_rate": 4.502923976608187e-05, |
|
"loss": 0.6353, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 17.67, |
|
"learning_rate": 4.488304093567251e-05, |
|
"loss": 0.6038, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 4.473684210526316e-05, |
|
"loss": 0.5588, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 18.33, |
|
"learning_rate": 4.4590643274853806e-05, |
|
"loss": 0.5965, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 18.67, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.5992, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 4.429824561403509e-05, |
|
"loss": 0.7979, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 19.33, |
|
"learning_rate": 4.4152046783625734e-05, |
|
"loss": 0.5471, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 19.67, |
|
"learning_rate": 4.400584795321638e-05, |
|
"loss": 0.5725, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 4.3859649122807014e-05, |
|
"loss": 0.6944, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.36309072529694214, |
|
"eval_accuracy_undropoff": 0.9900834262005954, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.30135334188209384, |
|
"eval_iou_undropoff": 0.9619544548614083, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.52126544713974, |
|
"eval_mean_accuracy": 0.6765870757487688, |
|
"eval_mean_iou": 0.4211025989145007, |
|
"eval_overall_accuracy": 0.9623214721679687, |
|
"eval_runtime": 1.857, |
|
"eval_samples_per_second": 8.078, |
|
"eval_steps_per_second": 0.539, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 20.33, |
|
"learning_rate": 4.371345029239766e-05, |
|
"loss": 0.5386, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 20.67, |
|
"learning_rate": 4.356725146198831e-05, |
|
"loss": 0.5411, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 4.342105263157895e-05, |
|
"loss": 0.8263, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 21.33, |
|
"learning_rate": 4.327485380116959e-05, |
|
"loss": 0.556, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 21.67, |
|
"learning_rate": 4.3128654970760236e-05, |
|
"loss": 0.5597, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 4.298245614035088e-05, |
|
"loss": 0.5996, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 22.33, |
|
"learning_rate": 4.283625730994152e-05, |
|
"loss": 0.5711, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 22.67, |
|
"learning_rate": 4.269005847953216e-05, |
|
"loss": 0.5204, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 4.254385964912281e-05, |
|
"loss": 0.8124, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 23.33, |
|
"learning_rate": 4.239766081871345e-05, |
|
"loss": 0.5046, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 23.33, |
|
"eval_accuracy_dropoff": 0.36825418705630986, |
|
"eval_accuracy_undropoff": 0.9909756437643757, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.30895554752439464, |
|
"eval_iou_undropoff": 0.9628048952316295, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4764857590198517, |
|
"eval_mean_accuracy": 0.6796149154103428, |
|
"eval_mean_iou": 0.42392014758534136, |
|
"eval_overall_accuracy": 0.9634028116861979, |
|
"eval_runtime": 1.754, |
|
"eval_samples_per_second": 8.552, |
|
"eval_steps_per_second": 0.57, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 23.67, |
|
"learning_rate": 4.22514619883041e-05, |
|
"loss": 0.5107, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 4.210526315789474e-05, |
|
"loss": 0.5692, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 24.33, |
|
"learning_rate": 4.195906432748538e-05, |
|
"loss": 0.4992, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 24.67, |
|
"learning_rate": 4.1812865497076025e-05, |
|
"loss": 0.514, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.7924, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 25.33, |
|
"learning_rate": 4.152046783625731e-05, |
|
"loss": 0.4816, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 25.67, |
|
"learning_rate": 4.137426900584795e-05, |
|
"loss": 0.4831, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 4.12280701754386e-05, |
|
"loss": 0.8194, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 26.33, |
|
"learning_rate": 4.1081871345029247e-05, |
|
"loss": 0.5093, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"learning_rate": 4.093567251461988e-05, |
|
"loss": 0.4684, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"eval_accuracy_dropoff": 0.27794242653984885, |
|
"eval_accuracy_undropoff": 0.9914216194986126, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.23522092062411898, |
|
"eval_iou_undropoff": 0.9593306905362942, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4643230140209198, |
|
"eval_mean_accuracy": 0.6346820230192307, |
|
"eval_mean_iou": 0.39818387038680436, |
|
"eval_overall_accuracy": 0.9598302205403646, |
|
"eval_runtime": 1.7927, |
|
"eval_samples_per_second": 8.367, |
|
"eval_steps_per_second": 0.558, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 4.078947368421053e-05, |
|
"loss": 0.5992, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 27.33, |
|
"learning_rate": 4.0643274853801174e-05, |
|
"loss": 0.505, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 27.67, |
|
"learning_rate": 4.0497076023391814e-05, |
|
"loss": 0.4917, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 4.0350877192982455e-05, |
|
"loss": 0.4608, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 28.33, |
|
"learning_rate": 4.02046783625731e-05, |
|
"loss": 0.4843, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 28.67, |
|
"learning_rate": 4.005847953216375e-05, |
|
"loss": 0.4416, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 3.991228070175439e-05, |
|
"loss": 0.7439, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 29.33, |
|
"learning_rate": 3.976608187134503e-05, |
|
"loss": 0.4523, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 29.67, |
|
"learning_rate": 3.9619883040935676e-05, |
|
"loss": 0.4525, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 3.9473684210526316e-05, |
|
"loss": 0.4401, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.3077400234337308, |
|
"eval_accuracy_undropoff": 0.9935777897698063, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2703275413210631, |
|
"eval_iou_undropoff": 0.9627020713766379, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.44831418991088867, |
|
"eval_mean_accuracy": 0.6506589066017685, |
|
"eval_mean_iou": 0.41100987089923363, |
|
"eval_overall_accuracy": 0.9632102966308593, |
|
"eval_runtime": 1.8561, |
|
"eval_samples_per_second": 8.082, |
|
"eval_steps_per_second": 0.539, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 30.33, |
|
"learning_rate": 3.932748538011696e-05, |
|
"loss": 0.4473, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 30.67, |
|
"learning_rate": 3.9181286549707604e-05, |
|
"loss": 0.4753, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 3.9035087719298244e-05, |
|
"loss": 0.7232, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 31.33, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.4391, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 31.67, |
|
"learning_rate": 3.874269005847954e-05, |
|
"loss": 0.4428, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 3.859649122807018e-05, |
|
"loss": 0.457, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 32.33, |
|
"learning_rate": 3.845029239766082e-05, |
|
"loss": 0.4326, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 32.67, |
|
"learning_rate": 3.8304093567251465e-05, |
|
"loss": 0.4359, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 3.815789473684211e-05, |
|
"loss": 0.7037, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 3.8011695906432746e-05, |
|
"loss": 0.4268, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"eval_accuracy_dropoff": 0.4108484388999931, |
|
"eval_accuracy_undropoff": 0.9894514498468887, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.33465263157894737, |
|
"eval_iou_undropoff": 0.9631619518896926, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4365720748901367, |
|
"eval_mean_accuracy": 0.7001499443734409, |
|
"eval_mean_iou": 0.6489072917343199, |
|
"eval_overall_accuracy": 0.9638320922851562, |
|
"eval_runtime": 1.7768, |
|
"eval_samples_per_second": 8.442, |
|
"eval_steps_per_second": 0.563, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 33.67, |
|
"learning_rate": 3.786549707602339e-05, |
|
"loss": 0.4203, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 3.771929824561404e-05, |
|
"loss": 0.6864, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 34.33, |
|
"learning_rate": 3.757309941520468e-05, |
|
"loss": 0.4093, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 34.67, |
|
"learning_rate": 3.742690058479532e-05, |
|
"loss": 0.4149, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 3.728070175438597e-05, |
|
"loss": 0.3847, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 35.33, |
|
"learning_rate": 3.713450292397661e-05, |
|
"loss": 0.4227, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 35.67, |
|
"learning_rate": 3.6988304093567254e-05, |
|
"loss": 0.3916, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 0.6716, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 36.33, |
|
"learning_rate": 3.669590643274854e-05, |
|
"loss": 0.4096, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 36.67, |
|
"learning_rate": 3.654970760233918e-05, |
|
"loss": 0.3939, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 36.67, |
|
"eval_accuracy_dropoff": 0.36697911641050385, |
|
"eval_accuracy_undropoff": 0.9927164392616175, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3171232876712329, |
|
"eval_iou_undropoff": 0.9644338706379278, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4026946723461151, |
|
"eval_mean_accuracy": 0.6798477778360607, |
|
"eval_mean_iou": 0.4271857194363869, |
|
"eval_overall_accuracy": 0.9650100708007813, |
|
"eval_runtime": 1.777, |
|
"eval_samples_per_second": 8.441, |
|
"eval_steps_per_second": 0.563, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 3.640350877192983e-05, |
|
"loss": 0.4204, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 37.33, |
|
"learning_rate": 3.625730994152047e-05, |
|
"loss": 0.4005, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 37.67, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 0.4024, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 3.5964912280701756e-05, |
|
"loss": 0.6594, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 38.33, |
|
"learning_rate": 3.5818713450292403e-05, |
|
"loss": 0.3816, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 38.67, |
|
"learning_rate": 3.5672514619883044e-05, |
|
"loss": 0.4391, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 3.5526315789473684e-05, |
|
"loss": 0.4499, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 39.33, |
|
"learning_rate": 3.538011695906433e-05, |
|
"loss": 0.3772, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 39.67, |
|
"learning_rate": 3.523391812865498e-05, |
|
"loss": 0.3751, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 3.508771929824561e-05, |
|
"loss": 0.4472, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.38870700944241504, |
|
"eval_accuracy_undropoff": 0.9904841657326722, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3224725900442657, |
|
"eval_iou_undropoff": 0.963205427478915, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4159059226512909, |
|
"eval_mean_accuracy": 0.6895955875875436, |
|
"eval_mean_iou": 0.6428390087615904, |
|
"eval_overall_accuracy": 0.9638387044270833, |
|
"eval_runtime": 1.8087, |
|
"eval_samples_per_second": 8.293, |
|
"eval_steps_per_second": 0.553, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 40.33, |
|
"learning_rate": 3.494152046783626e-05, |
|
"loss": 0.3815, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 40.67, |
|
"learning_rate": 3.4795321637426905e-05, |
|
"loss": 0.3682, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 3.4649122807017546e-05, |
|
"loss": 0.6125, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 41.33, |
|
"learning_rate": 3.4502923976608186e-05, |
|
"loss": 0.365, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 41.67, |
|
"learning_rate": 3.435672514619883e-05, |
|
"loss": 0.3718, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 3.421052631578947e-05, |
|
"loss": 0.3577, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 42.33, |
|
"learning_rate": 3.406432748538012e-05, |
|
"loss": 0.3627, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 42.67, |
|
"learning_rate": 3.391812865497076e-05, |
|
"loss": 0.363, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 3.377192982456141e-05, |
|
"loss": 0.5795, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 43.33, |
|
"learning_rate": 3.362573099415205e-05, |
|
"loss": 0.3618, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 43.33, |
|
"eval_accuracy_dropoff": 0.3402313506559147, |
|
"eval_accuracy_undropoff": 0.9938960397567675, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3006237090643349, |
|
"eval_iou_undropoff": 0.964417056574582, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.37652963399887085, |
|
"eval_mean_accuracy": 0.6670636952063411, |
|
"eval_mean_iou": 0.6325203828194584, |
|
"eval_overall_accuracy": 0.9649531046549479, |
|
"eval_runtime": 1.8655, |
|
"eval_samples_per_second": 8.041, |
|
"eval_steps_per_second": 0.536, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 43.67, |
|
"learning_rate": 3.3479532163742695e-05, |
|
"loss": 0.3473, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.5669, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 44.33, |
|
"learning_rate": 3.3187134502923975e-05, |
|
"loss": 0.3485, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 44.67, |
|
"learning_rate": 3.304093567251462e-05, |
|
"loss": 0.3792, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 3.289473684210527e-05, |
|
"loss": 0.37, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 45.33, |
|
"learning_rate": 3.274853801169591e-05, |
|
"loss": 0.3561, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 45.67, |
|
"learning_rate": 3.260233918128655e-05, |
|
"loss": 0.3428, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 3.24561403508772e-05, |
|
"loss": 0.3342, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 46.33, |
|
"learning_rate": 3.230994152046784e-05, |
|
"loss": 0.3489, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 46.67, |
|
"learning_rate": 3.216374269005848e-05, |
|
"loss": 0.3456, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 46.67, |
|
"eval_accuracy_dropoff": 0.37153950421577414, |
|
"eval_accuracy_undropoff": 0.9917358780559715, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3152974435210684, |
|
"eval_iou_undropoff": 0.9636773065196076, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3671184182167053, |
|
"eval_mean_accuracy": 0.6816376911358728, |
|
"eval_mean_iou": 0.639487375020338, |
|
"eval_overall_accuracy": 0.9642748514811198, |
|
"eval_runtime": 1.7082, |
|
"eval_samples_per_second": 8.781, |
|
"eval_steps_per_second": 0.585, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 3.2017543859649124e-05, |
|
"loss": 0.4194, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 47.33, |
|
"learning_rate": 3.187134502923977e-05, |
|
"loss": 0.3389, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 47.67, |
|
"learning_rate": 3.172514619883041e-05, |
|
"loss": 0.3342, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 3.157894736842105e-05, |
|
"loss": 0.5441, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 48.33, |
|
"learning_rate": 3.14327485380117e-05, |
|
"loss": 0.3351, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 48.67, |
|
"learning_rate": 3.128654970760234e-05, |
|
"loss": 0.3305, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 3.1140350877192986e-05, |
|
"loss": 0.3938, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 49.33, |
|
"learning_rate": 3.0994152046783626e-05, |
|
"loss": 0.3272, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 49.67, |
|
"learning_rate": 3.084795321637427e-05, |
|
"loss": 0.3305, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 3.0701754385964913e-05, |
|
"loss": 0.3352, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.3754623566981414, |
|
"eval_accuracy_undropoff": 0.9922707296226875, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3217789372646501, |
|
"eval_iou_undropoff": 0.9643673342403736, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3572177588939667, |
|
"eval_mean_accuracy": 0.6838665431604144, |
|
"eval_mean_iou": 0.6430731357525119, |
|
"eval_overall_accuracy": 0.964959716796875, |
|
"eval_runtime": 1.8157, |
|
"eval_samples_per_second": 8.261, |
|
"eval_steps_per_second": 0.551, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 50.33, |
|
"learning_rate": 3.055555555555556e-05, |
|
"loss": 0.3312, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 50.67, |
|
"learning_rate": 3.0409356725146197e-05, |
|
"loss": 0.321, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 3.0263157894736844e-05, |
|
"loss": 0.3254, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 51.33, |
|
"learning_rate": 3.0116959064327488e-05, |
|
"loss": 0.3363, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 51.67, |
|
"learning_rate": 2.997076023391813e-05, |
|
"loss": 0.309, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 2.9824561403508772e-05, |
|
"loss": 0.5117, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 52.33, |
|
"learning_rate": 2.9678362573099415e-05, |
|
"loss": 0.3175, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 52.67, |
|
"learning_rate": 2.9532163742690062e-05, |
|
"loss": 0.3249, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 2.9385964912280706e-05, |
|
"loss": 0.3119, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"learning_rate": 2.9239766081871346e-05, |
|
"loss": 0.3143, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"eval_accuracy_dropoff": 0.34666988306108854, |
|
"eval_accuracy_undropoff": 0.9937688462001057, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3055714466523225, |
|
"eval_iou_undropoff": 0.9645728268129413, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.34511667490005493, |
|
"eval_mean_accuracy": 0.6702193646305972, |
|
"eval_mean_iou": 0.635072136732632, |
|
"eval_overall_accuracy": 0.9651166280110677, |
|
"eval_runtime": 1.7723, |
|
"eval_samples_per_second": 8.464, |
|
"eval_steps_per_second": 0.564, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 53.67, |
|
"learning_rate": 2.909356725146199e-05, |
|
"loss": 0.3077, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 2.8947368421052634e-05, |
|
"loss": 0.487, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 54.33, |
|
"learning_rate": 2.8801169590643277e-05, |
|
"loss": 0.3072, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 54.67, |
|
"learning_rate": 2.8654970760233917e-05, |
|
"loss": 0.3049, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 2.850877192982456e-05, |
|
"loss": 0.3264, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 55.33, |
|
"learning_rate": 2.8362573099415208e-05, |
|
"loss": 0.2989, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 55.67, |
|
"learning_rate": 2.821637426900585e-05, |
|
"loss": 0.3035, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 2.8070175438596492e-05, |
|
"loss": 0.4488, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 56.33, |
|
"learning_rate": 2.7923976608187135e-05, |
|
"loss": 0.2993, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 56.67, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.3009, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 56.67, |
|
"eval_accuracy_dropoff": 0.3984366025685207, |
|
"eval_accuracy_undropoff": 0.989834893184022, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.32674545120555415, |
|
"eval_iou_undropoff": 0.9629961667593027, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3357206881046295, |
|
"eval_mean_accuracy": 0.6941357478762713, |
|
"eval_mean_iou": 0.6448708089824284, |
|
"eval_overall_accuracy": 0.9636489868164062, |
|
"eval_runtime": 1.7591, |
|
"eval_samples_per_second": 8.527, |
|
"eval_steps_per_second": 0.568, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 2.7631578947368426e-05, |
|
"loss": 0.4793, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 57.33, |
|
"learning_rate": 2.7485380116959063e-05, |
|
"loss": 0.2956, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 57.67, |
|
"learning_rate": 2.733918128654971e-05, |
|
"loss": 0.296, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 2.7192982456140354e-05, |
|
"loss": 0.4863, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 58.33, |
|
"learning_rate": 2.7046783625730997e-05, |
|
"loss": 0.2851, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 58.67, |
|
"learning_rate": 2.6900584795321637e-05, |
|
"loss": 0.3046, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 2.675438596491228e-05, |
|
"loss": 0.3018, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 59.33, |
|
"learning_rate": 2.6608187134502928e-05, |
|
"loss": 0.2937, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 59.67, |
|
"learning_rate": 2.6461988304093572e-05, |
|
"loss": 0.2863, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 2.6315789473684212e-05, |
|
"loss": 0.2765, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.39654122728421437, |
|
"eval_accuracy_undropoff": 0.9903465944590442, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.32816347095339043, |
|
"eval_iou_undropoff": 0.9634116888452051, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.31876522302627563, |
|
"eval_mean_accuracy": 0.6934439108716293, |
|
"eval_mean_iou": 0.6457875798992978, |
|
"eval_overall_accuracy": 0.9640541076660156, |
|
"eval_runtime": 1.8131, |
|
"eval_samples_per_second": 8.273, |
|
"eval_steps_per_second": 0.552, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 60.33, |
|
"learning_rate": 2.6169590643274856e-05, |
|
"loss": 0.287, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 60.67, |
|
"learning_rate": 2.60233918128655e-05, |
|
"loss": 0.288, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 2.5877192982456143e-05, |
|
"loss": 0.3018, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 61.33, |
|
"learning_rate": 2.5730994152046783e-05, |
|
"loss": 0.2837, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 61.67, |
|
"learning_rate": 2.5584795321637427e-05, |
|
"loss": 0.2924, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 2.5438596491228074e-05, |
|
"loss": 0.2696, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 62.33, |
|
"learning_rate": 2.5292397660818717e-05, |
|
"loss": 0.2786, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 62.67, |
|
"learning_rate": 2.5146198830409358e-05, |
|
"loss": 0.2827, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.4671, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 63.33, |
|
"learning_rate": 2.485380116959064e-05, |
|
"loss": 0.2703, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 63.33, |
|
"eval_accuracy_dropoff": 0.3524651365818917, |
|
"eval_accuracy_undropoff": 0.9939758683488148, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.31190818665595915, |
|
"eval_iou_undropoff": 0.9650252545150309, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3179321587085724, |
|
"eval_mean_accuracy": 0.6732205024653533, |
|
"eval_mean_iou": 0.638466720585495, |
|
"eval_overall_accuracy": 0.9655710856119791, |
|
"eval_runtime": 1.773, |
|
"eval_samples_per_second": 8.46, |
|
"eval_steps_per_second": 0.564, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 63.67, |
|
"learning_rate": 2.470760233918129e-05, |
|
"loss": 0.2893, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 2.456140350877193e-05, |
|
"loss": 0.4278, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 64.33, |
|
"learning_rate": 2.4415204678362576e-05, |
|
"loss": 0.2735, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 64.67, |
|
"learning_rate": 2.4269005847953216e-05, |
|
"loss": 0.2705, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 2.412280701754386e-05, |
|
"loss": 0.266, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 65.33, |
|
"learning_rate": 2.3976608187134503e-05, |
|
"loss": 0.2685, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 65.67, |
|
"learning_rate": 2.3830409356725147e-05, |
|
"loss": 0.2656, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 2.368421052631579e-05, |
|
"loss": 0.2716, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 66.33, |
|
"learning_rate": 2.3538011695906434e-05, |
|
"loss": 0.2551, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"learning_rate": 2.3391812865497074e-05, |
|
"loss": 0.2746, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"eval_accuracy_dropoff": 0.3455728628207779, |
|
"eval_accuracy_undropoff": 0.9949042748743232, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3113299320079893, |
|
"eval_iou_undropoff": 0.9656272622393687, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.30669257044792175, |
|
"eval_mean_accuracy": 0.6702385688475505, |
|
"eval_mean_iou": 0.638478597123679, |
|
"eval_overall_accuracy": 0.9661532084147135, |
|
"eval_runtime": 1.696, |
|
"eval_samples_per_second": 8.844, |
|
"eval_steps_per_second": 0.59, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 2.324561403508772e-05, |
|
"loss": 0.4201, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 67.33, |
|
"learning_rate": 2.309941520467836e-05, |
|
"loss": 0.2592, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 67.67, |
|
"learning_rate": 2.295321637426901e-05, |
|
"loss": 0.2628, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 2.280701754385965e-05, |
|
"loss": 0.2498, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 68.33, |
|
"learning_rate": 2.2660818713450292e-05, |
|
"loss": 0.2683, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 68.67, |
|
"learning_rate": 2.2514619883040936e-05, |
|
"loss": 0.26, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 2.236842105263158e-05, |
|
"loss": 0.4197, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 69.33, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.257, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 69.67, |
|
"learning_rate": 2.2076023391812867e-05, |
|
"loss": 0.2665, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 2.1929824561403507e-05, |
|
"loss": 0.2516, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.400757001401429, |
|
"eval_accuracy_undropoff": 0.9929330408413721, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3477171020501729, |
|
"eval_iou_undropoff": 0.9661113534771909, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.29922232031822205, |
|
"eval_mean_accuracy": 0.6968450211214006, |
|
"eval_mean_iou": 0.6569142277636819, |
|
"eval_overall_accuracy": 0.9667126973470052, |
|
"eval_runtime": 1.8044, |
|
"eval_samples_per_second": 8.313, |
|
"eval_steps_per_second": 0.554, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 70.33, |
|
"learning_rate": 2.1783625730994154e-05, |
|
"loss": 0.2516, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 70.67, |
|
"learning_rate": 2.1637426900584794e-05, |
|
"loss": 0.2601, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 2.149122807017544e-05, |
|
"loss": 0.407, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 71.33, |
|
"learning_rate": 2.134502923976608e-05, |
|
"loss": 0.2538, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 71.67, |
|
"learning_rate": 2.1198830409356725e-05, |
|
"loss": 0.2517, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 0.433, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 72.33, |
|
"learning_rate": 2.0906432748538013e-05, |
|
"loss": 0.2498, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 72.67, |
|
"learning_rate": 2.0760233918128656e-05, |
|
"loss": 0.2528, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 2.06140350877193e-05, |
|
"loss": 0.3325, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 73.33, |
|
"learning_rate": 2.046783625730994e-05, |
|
"loss": 0.2503, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 73.33, |
|
"eval_accuracy_dropoff": 0.44967491442093416, |
|
"eval_accuracy_undropoff": 0.9898551164273406, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.36889644871438465, |
|
"eval_iou_undropoff": 0.9652450473135042, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.29994916915893555, |
|
"eval_mean_accuracy": 0.7197650154241374, |
|
"eval_mean_iou": 0.6670707480139444, |
|
"eval_overall_accuracy": 0.9659370422363281, |
|
"eval_runtime": 1.939, |
|
"eval_samples_per_second": 7.736, |
|
"eval_steps_per_second": 0.516, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 73.67, |
|
"learning_rate": 2.0321637426900587e-05, |
|
"loss": 0.2474, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 2.0175438596491227e-05, |
|
"loss": 0.2447, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 74.33, |
|
"learning_rate": 2.0029239766081874e-05, |
|
"loss": 0.2487, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 74.67, |
|
"learning_rate": 1.9883040935672515e-05, |
|
"loss": 0.2432, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 1.9736842105263158e-05, |
|
"loss": 0.2436, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 75.33, |
|
"learning_rate": 1.9590643274853802e-05, |
|
"loss": 0.2536, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 75.67, |
|
"learning_rate": 1.9444444444444445e-05, |
|
"loss": 0.2392, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 1.929824561403509e-05, |
|
"loss": 0.2453, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 76.33, |
|
"learning_rate": 1.9152046783625733e-05, |
|
"loss": 0.2452, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 76.67, |
|
"learning_rate": 1.9005847953216373e-05, |
|
"loss": 0.2443, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 76.67, |
|
"eval_accuracy_dropoff": 0.35473384336159164, |
|
"eval_accuracy_undropoff": 0.9952078896194092, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3214811807388206, |
|
"eval_iou_undropoff": 0.9663200011988431, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.2815561890602112, |
|
"eval_mean_accuracy": 0.6749708664905004, |
|
"eval_mean_iou": 0.6439005909688318, |
|
"eval_overall_accuracy": 0.9668490091959635, |
|
"eval_runtime": 1.7007, |
|
"eval_samples_per_second": 8.82, |
|
"eval_steps_per_second": 0.588, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 1.885964912280702e-05, |
|
"loss": 0.2319, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 77.33, |
|
"learning_rate": 1.871345029239766e-05, |
|
"loss": 0.2409, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 77.67, |
|
"learning_rate": 1.8567251461988304e-05, |
|
"loss": 0.238, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.3025, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 78.33, |
|
"learning_rate": 1.827485380116959e-05, |
|
"loss": 0.2339, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 78.67, |
|
"learning_rate": 1.8128654970760235e-05, |
|
"loss": 0.2509, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 1.7982456140350878e-05, |
|
"loss": 0.2269, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 79.33, |
|
"learning_rate": 1.7836257309941522e-05, |
|
"loss": 0.2356, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 79.67, |
|
"learning_rate": 1.7690058479532165e-05, |
|
"loss": 0.2378, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 1.7543859649122806e-05, |
|
"loss": 0.3757, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.42150274542238153, |
|
"eval_accuracy_undropoff": 0.9910844767448668, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3534797917278783, |
|
"eval_iou_undropoff": 0.9652153676829396, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.29069358110427856, |
|
"eval_mean_accuracy": 0.7062936110836242, |
|
"eval_mean_iou": 0.6593475797054089, |
|
"eval_overall_accuracy": 0.9658645629882813, |
|
"eval_runtime": 1.8247, |
|
"eval_samples_per_second": 8.221, |
|
"eval_steps_per_second": 0.548, |
|
"step": 240 |
|
} |
|
], |
|
"max_steps": 360, |
|
"num_train_epochs": 120, |
|
"total_flos": 4.76793375031296e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|