|
{ |
|
"best_metric": 0.10267864167690277, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/safety-utcustom-train-SF-RGBD-b0/checkpoint-1280", |
|
"epoch": 128.0, |
|
"global_step": 1280, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.0566, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.0568, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1e-05, |
|
"loss": 1.0517, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.0462, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.0401, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2e-05, |
|
"loss": 1.0392, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 1.025, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 1.0164, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3e-05, |
|
"loss": 1.0188, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.0084, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy_safe": 0.03683871689006596, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.7845366863390846, |
|
"eval_iou_safe": 0.016300672001855917, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.766643132432382, |
|
"eval_loss": 1.0688321590423584, |
|
"eval_mean_accuracy": 0.41068770161457524, |
|
"eval_mean_iou": 0.260981268144746, |
|
"eval_overall_accuracy": 0.7624508871960995, |
|
"eval_runtime": 17.0867, |
|
"eval_samples_per_second": 3.921, |
|
"eval_steps_per_second": 0.293, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 1.0016, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4e-05, |
|
"loss": 0.9714, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.9643, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.951, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9372, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 4.9824561403508773e-05, |
|
"loss": 0.9318, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 4.9649122807017544e-05, |
|
"loss": 0.9234, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 4.9473684210526315e-05, |
|
"loss": 0.8955, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 4.9298245614035086e-05, |
|
"loss": 0.8359, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 4.912280701754386e-05, |
|
"loss": 0.8483, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy_safe": 0.00020817190373205964, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9980423994443833, |
|
"eval_iou_safe": 0.00020692073211620515, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9686639129241729, |
|
"eval_loss": 0.873957633972168, |
|
"eval_mean_accuracy": 0.4991252856740577, |
|
"eval_mean_iou": 0.3229569445520964, |
|
"eval_overall_accuracy": 0.9685679763110716, |
|
"eval_runtime": 9.9346, |
|
"eval_samples_per_second": 6.744, |
|
"eval_steps_per_second": 0.503, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.8947368421052635e-05, |
|
"loss": 0.814, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 4.8771929824561406e-05, |
|
"loss": 0.8311, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.859649122807018e-05, |
|
"loss": 0.8158, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.842105263157895e-05, |
|
"loss": 0.8113, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 4.824561403508772e-05, |
|
"loss": 0.7571, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 4.807017543859649e-05, |
|
"loss": 0.7934, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.789473684210526e-05, |
|
"loss": 0.7214, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 4.771929824561404e-05, |
|
"loss": 0.7534, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 4.754385964912281e-05, |
|
"loss": 0.7021, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.736842105263158e-05, |
|
"loss": 0.7058, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy_safe": 0.0008673829322169151, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.992971247730839, |
|
"eval_iou_safe": 0.0008545401719334826, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9641277576335231, |
|
"eval_loss": 0.74162358045578, |
|
"eval_mean_accuracy": 0.49691931533152794, |
|
"eval_mean_iou": 0.3216607659351522, |
|
"eval_overall_accuracy": 0.9636660903247435, |
|
"eval_runtime": 9.5465, |
|
"eval_samples_per_second": 7.018, |
|
"eval_steps_per_second": 0.524, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 4.719298245614036e-05, |
|
"loss": 0.6607, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 4.701754385964913e-05, |
|
"loss": 0.6468, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 4.68421052631579e-05, |
|
"loss": 0.6593, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.6234, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 4.649122807017544e-05, |
|
"loss": 0.6841, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 4.6315789473684214e-05, |
|
"loss": 0.6508, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 4.6140350877192985e-05, |
|
"loss": 0.6758, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 4.5964912280701756e-05, |
|
"loss": 0.5821, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 4.5789473684210527e-05, |
|
"loss": 0.6304, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 4.56140350877193e-05, |
|
"loss": 0.578, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy_safe": 0.0007363117335708035, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9952830902666999, |
|
"eval_iou_safe": 0.0007249429724180075, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.966194230792341, |
|
"eval_loss": 0.5968723297119141, |
|
"eval_mean_accuracy": 0.49800970100013536, |
|
"eval_mean_iou": 0.32230639125491967, |
|
"eval_overall_accuracy": 0.965905773105906, |
|
"eval_runtime": 9.4884, |
|
"eval_samples_per_second": 7.061, |
|
"eval_steps_per_second": 0.527, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 4.5438596491228075e-05, |
|
"loss": 0.6387, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 4.5263157894736846e-05, |
|
"loss": 0.5828, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 4.508771929824562e-05, |
|
"loss": 0.5795, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 4.491228070175439e-05, |
|
"loss": 0.5699, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 4.473684210526316e-05, |
|
"loss": 0.5602, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 4.456140350877193e-05, |
|
"loss": 0.5501, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 4.43859649122807e-05, |
|
"loss": 0.608, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 4.421052631578947e-05, |
|
"loss": 0.5565, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 4.403508771929824e-05, |
|
"loss": 0.5368, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 4.3859649122807014e-05, |
|
"loss": 0.5531, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_safe": 0.0060851731489084465, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9974240893698892, |
|
"eval_iou_safe": 0.005865184362233192, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.968219331241184, |
|
"eval_loss": 0.5068420767784119, |
|
"eval_mean_accuracy": 0.5017546312593989, |
|
"eval_mean_iou": 0.3246948385344724, |
|
"eval_overall_accuracy": 0.9681415273182428, |
|
"eval_runtime": 9.555, |
|
"eval_samples_per_second": 7.012, |
|
"eval_steps_per_second": 0.523, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 4.368421052631579e-05, |
|
"loss": 0.5522, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 4.350877192982456e-05, |
|
"loss": 0.5304, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.5472, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 4.3157894736842105e-05, |
|
"loss": 0.5052, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 4.298245614035088e-05, |
|
"loss": 0.5344, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 4.2807017543859654e-05, |
|
"loss": 0.4878, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 4.2631578947368425e-05, |
|
"loss": 0.558, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 4.2456140350877196e-05, |
|
"loss": 0.4542, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 4.228070175438597e-05, |
|
"loss": 0.4765, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 4.210526315789474e-05, |
|
"loss": 0.4786, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy_safe": 0.009651080759133542, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.996145227712823, |
|
"eval_iou_safe": 0.00919338047228307, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.967087021440742, |
|
"eval_loss": 0.45749950408935547, |
|
"eval_mean_accuracy": 0.5028981542359783, |
|
"eval_mean_iou": 0.325426800637675, |
|
"eval_overall_accuracy": 0.967005772377128, |
|
"eval_runtime": 9.3868, |
|
"eval_samples_per_second": 7.138, |
|
"eval_steps_per_second": 0.533, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 4.1929824561403516e-05, |
|
"loss": 0.4643, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 4.1754385964912287e-05, |
|
"loss": 0.5082, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 4.157894736842106e-05, |
|
"loss": 0.4564, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 4.140350877192983e-05, |
|
"loss": 0.4646, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 4.12280701754386e-05, |
|
"loss": 0.5312, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"learning_rate": 4.105263157894737e-05, |
|
"loss": 0.4676, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 4.087719298245614e-05, |
|
"loss": 0.4669, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 4.070175438596491e-05, |
|
"loss": 0.4406, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 4.0526315789473684e-05, |
|
"loss": 0.4704, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 4.0350877192982455e-05, |
|
"loss": 0.4681, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy_safe": 0.006705833824850328, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.998296845861793, |
|
"eval_iou_safe": 0.006371969925913715, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.969000696975069, |
|
"eval_loss": 0.4381723403930664, |
|
"eval_mean_accuracy": 0.5025013398433217, |
|
"eval_mean_iou": 0.32512422230032756, |
|
"eval_overall_accuracy": 0.9690068373039588, |
|
"eval_runtime": 10.1282, |
|
"eval_samples_per_second": 6.615, |
|
"eval_steps_per_second": 0.494, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 4.017543859649123e-05, |
|
"loss": 0.4417, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 4e-05, |
|
"loss": 0.4517, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 3.9824561403508774e-05, |
|
"loss": 0.4441, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"learning_rate": 3.9649122807017545e-05, |
|
"loss": 0.4982, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 3.9473684210526316e-05, |
|
"loss": 0.479, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"learning_rate": 3.929824561403509e-05, |
|
"loss": 0.4123, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"learning_rate": 3.912280701754386e-05, |
|
"loss": 0.4351, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"learning_rate": 3.894736842105263e-05, |
|
"loss": 0.4318, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 7.9, |
|
"learning_rate": 3.877192982456141e-05, |
|
"loss": 0.4846, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 3.859649122807018e-05, |
|
"loss": 0.4139, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy_safe": 0.0016518826064664362, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9980280842666458, |
|
"eval_iou_safe": 0.0015664870476928589, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.968595196515178, |
|
"eval_loss": 0.3973134160041809, |
|
"eval_mean_accuracy": 0.4998399834365561, |
|
"eval_mean_iou": 0.3233872278542903, |
|
"eval_overall_accuracy": 0.968596728880014, |
|
"eval_runtime": 11.1226, |
|
"eval_samples_per_second": 6.024, |
|
"eval_steps_per_second": 0.45, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"learning_rate": 3.842105263157895e-05, |
|
"loss": 0.433, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 8.2, |
|
"learning_rate": 3.824561403508773e-05, |
|
"loss": 0.394, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 8.3, |
|
"learning_rate": 3.80701754385965e-05, |
|
"loss": 0.444, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"learning_rate": 3.789473684210527e-05, |
|
"loss": 0.4225, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 3.771929824561404e-05, |
|
"loss": 0.3741, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"learning_rate": 3.754385964912281e-05, |
|
"loss": 0.4193, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"learning_rate": 3.736842105263158e-05, |
|
"loss": 0.399, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"learning_rate": 3.719298245614035e-05, |
|
"loss": 0.4082, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.9, |
|
"learning_rate": 3.7017543859649124e-05, |
|
"loss": 0.3906, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 0.4275, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy_safe": 0.007727418167239139, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9994094402495628, |
|
"eval_iou_safe": 0.007580341408442182, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9701099208246821, |
|
"eval_loss": 0.3982888162136078, |
|
"eval_mean_accuracy": 0.503568429208401, |
|
"eval_mean_iou": 0.48884513111656214, |
|
"eval_overall_accuracy": 0.9701167434009154, |
|
"eval_runtime": 9.6246, |
|
"eval_samples_per_second": 6.961, |
|
"eval_steps_per_second": 0.52, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 0.4041, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"learning_rate": 3.6491228070175443e-05, |
|
"loss": 0.3714, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 3.6315789473684214e-05, |
|
"loss": 0.4078, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"learning_rate": 3.6140350877192985e-05, |
|
"loss": 0.4167, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 3.5964912280701756e-05, |
|
"loss": 0.4058, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 3.578947368421053e-05, |
|
"loss": 0.3794, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 9.7, |
|
"learning_rate": 3.56140350877193e-05, |
|
"loss": 0.3786, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 3.543859649122807e-05, |
|
"loss": 0.3545, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 9.9, |
|
"learning_rate": 3.526315789473684e-05, |
|
"loss": 0.3598, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 3.508771929824561e-05, |
|
"loss": 0.3975, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_safe": 0.0008095574034024542, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9997554686032364, |
|
"eval_iou_safe": 0.0008031575565652393, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9702474966446942, |
|
"eval_loss": 0.33975234627723694, |
|
"eval_mean_accuracy": 0.5002825130033194, |
|
"eval_mean_iou": 0.3236835514004198, |
|
"eval_overall_accuracy": 0.9702482081171292, |
|
"eval_runtime": 9.7288, |
|
"eval_samples_per_second": 6.887, |
|
"eval_steps_per_second": 0.514, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"learning_rate": 3.491228070175438e-05, |
|
"loss": 0.3621, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 10.2, |
|
"learning_rate": 3.473684210526316e-05, |
|
"loss": 0.37, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 10.3, |
|
"learning_rate": 3.456140350877193e-05, |
|
"loss": 0.359, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 10.4, |
|
"learning_rate": 3.43859649122807e-05, |
|
"loss": 0.3523, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 3.421052631578947e-05, |
|
"loss": 0.3318, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 10.6, |
|
"learning_rate": 3.403508771929825e-05, |
|
"loss": 0.3661, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 10.7, |
|
"learning_rate": 3.385964912280702e-05, |
|
"loss": 0.3563, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 10.8, |
|
"learning_rate": 3.368421052631579e-05, |
|
"loss": 0.3595, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 10.9, |
|
"learning_rate": 3.3508771929824564e-05, |
|
"loss": 0.3554, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.4325, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy_safe": 0.09409370048689095, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9992699846041437, |
|
"eval_iou_safe": 0.09189049790961165, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9724559733145028, |
|
"eval_loss": 0.37850186228752136, |
|
"eval_mean_accuracy": 0.5466818425455173, |
|
"eval_mean_iou": 0.3547821570747048, |
|
"eval_overall_accuracy": 0.9725325285498776, |
|
"eval_runtime": 10.4486, |
|
"eval_samples_per_second": 6.412, |
|
"eval_steps_per_second": 0.479, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 11.1, |
|
"learning_rate": 3.3157894736842106e-05, |
|
"loss": 0.3409, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 11.2, |
|
"learning_rate": 3.2982456140350884e-05, |
|
"loss": 0.3633, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 11.3, |
|
"learning_rate": 3.2807017543859655e-05, |
|
"loss": 0.3483, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 11.4, |
|
"learning_rate": 3.2631578947368426e-05, |
|
"loss": 0.3548, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 3.24561403508772e-05, |
|
"loss": 0.3474, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 11.6, |
|
"learning_rate": 3.228070175438597e-05, |
|
"loss": 0.3529, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 11.7, |
|
"learning_rate": 3.210526315789474e-05, |
|
"loss": 0.3309, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 11.8, |
|
"learning_rate": 3.192982456140351e-05, |
|
"loss": 0.3306, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 11.9, |
|
"learning_rate": 3.175438596491228e-05, |
|
"loss": 0.3194, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 3.157894736842105e-05, |
|
"loss": 0.3239, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy_safe": 0.07718166082628826, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9994698690736191, |
|
"eval_iou_safe": 0.07586054028042748, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9721634919205556, |
|
"eval_loss": 0.33375129103660583, |
|
"eval_mean_accuracy": 0.5383257649499537, |
|
"eval_mean_iou": 0.34934134406699435, |
|
"eval_overall_accuracy": 0.972226954218167, |
|
"eval_runtime": 9.4146, |
|
"eval_samples_per_second": 7.117, |
|
"eval_steps_per_second": 0.531, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 12.1, |
|
"learning_rate": 3.140350877192982e-05, |
|
"loss": 0.3293, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 12.2, |
|
"learning_rate": 3.12280701754386e-05, |
|
"loss": 0.3444, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 12.3, |
|
"learning_rate": 3.105263157894737e-05, |
|
"loss": 0.3658, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 12.4, |
|
"learning_rate": 3.087719298245614e-05, |
|
"loss": 0.3282, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 3.0701754385964913e-05, |
|
"loss": 0.3397, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 12.6, |
|
"learning_rate": 3.0526315789473684e-05, |
|
"loss": 0.3385, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 12.7, |
|
"learning_rate": 3.035087719298246e-05, |
|
"loss": 0.3236, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"learning_rate": 3.017543859649123e-05, |
|
"loss": 0.3293, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 12.9, |
|
"learning_rate": 3e-05, |
|
"loss": 0.3317, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 2.9824561403508772e-05, |
|
"loss": 0.3733, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy_safe": 0.07628151009440981, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9995119932441748, |
|
"eval_iou_safe": 0.07507778115040219, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9721785568914258, |
|
"eval_loss": 0.3012864589691162, |
|
"eval_mean_accuracy": 0.5378967516692923, |
|
"eval_mean_iou": 0.523628169020914, |
|
"eval_overall_accuracy": 0.972241245098968, |
|
"eval_runtime": 9.4095, |
|
"eval_samples_per_second": 7.12, |
|
"eval_steps_per_second": 0.531, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 13.1, |
|
"learning_rate": 2.9649122807017543e-05, |
|
"loss": 0.3196, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 13.2, |
|
"learning_rate": 2.9473684210526314e-05, |
|
"loss": 0.3177, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 13.3, |
|
"learning_rate": 2.929824561403509e-05, |
|
"loss": 0.3177, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 13.4, |
|
"learning_rate": 2.9122807017543863e-05, |
|
"loss": 0.3131, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 2.8947368421052634e-05, |
|
"loss": 0.3246, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 13.6, |
|
"learning_rate": 2.8771929824561404e-05, |
|
"loss": 0.3093, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 13.7, |
|
"learning_rate": 2.8596491228070175e-05, |
|
"loss": 0.3346, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 13.8, |
|
"learning_rate": 2.842105263157895e-05, |
|
"loss": 0.3108, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 13.9, |
|
"learning_rate": 2.824561403508772e-05, |
|
"loss": 0.3331, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 2.8070175438596492e-05, |
|
"loss": 0.3165, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy_safe": 0.08003631443209548, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9994350198294546, |
|
"eval_iou_safe": 0.07857775456444727, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9722117608452171, |
|
"eval_loss": 0.28493982553482056, |
|
"eval_mean_accuracy": 0.539735667130775, |
|
"eval_mean_iou": 0.5253947577048321, |
|
"eval_overall_accuracy": 0.9722774562551014, |
|
"eval_runtime": 10.0221, |
|
"eval_samples_per_second": 6.685, |
|
"eval_steps_per_second": 0.499, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 14.1, |
|
"learning_rate": 2.7894736842105263e-05, |
|
"loss": 0.3037, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 14.2, |
|
"learning_rate": 2.7719298245614034e-05, |
|
"loss": 0.3372, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 14.3, |
|
"learning_rate": 2.754385964912281e-05, |
|
"loss": 0.3018, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 14.4, |
|
"learning_rate": 2.7368421052631583e-05, |
|
"loss": 0.3047, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 2.7192982456140354e-05, |
|
"loss": 0.3158, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 14.6, |
|
"learning_rate": 2.7017543859649125e-05, |
|
"loss": 0.3036, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 14.7, |
|
"learning_rate": 2.6842105263157896e-05, |
|
"loss": 0.3219, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 14.8, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.2892, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 14.9, |
|
"learning_rate": 2.6491228070175438e-05, |
|
"loss": 0.3252, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 2.6315789473684212e-05, |
|
"loss": 0.3329, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_safe": 0.11177481968072599, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.999028621320486, |
|
"eval_iou_safe": 0.10831796981091193, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9727305324523915, |
|
"eval_loss": 0.3002106547355652, |
|
"eval_mean_accuracy": 0.555401720500606, |
|
"eval_mean_iou": 0.5405242511316517, |
|
"eval_overall_accuracy": 0.9728205666613223, |
|
"eval_runtime": 10.0001, |
|
"eval_samples_per_second": 6.7, |
|
"eval_steps_per_second": 0.5, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 15.1, |
|
"learning_rate": 2.6140350877192983e-05, |
|
"loss": 0.3213, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 15.2, |
|
"learning_rate": 2.5964912280701754e-05, |
|
"loss": 0.2808, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 15.3, |
|
"learning_rate": 2.578947368421053e-05, |
|
"loss": 0.2909, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 15.4, |
|
"learning_rate": 2.5614035087719303e-05, |
|
"loss": 0.2886, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 2.5438596491228074e-05, |
|
"loss": 0.2776, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 15.6, |
|
"learning_rate": 2.5263157894736845e-05, |
|
"loss": 0.2945, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 15.7, |
|
"learning_rate": 2.5087719298245616e-05, |
|
"loss": 0.3205, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 15.8, |
|
"learning_rate": 2.4912280701754387e-05, |
|
"loss": 0.2827, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 15.9, |
|
"learning_rate": 2.4736842105263158e-05, |
|
"loss": 0.3261, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 2.456140350877193e-05, |
|
"loss": 0.3214, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy_safe": 0.09076873258005945, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9994798427630265, |
|
"eval_iou_safe": 0.08924361530356209, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9725644143423847, |
|
"eval_loss": 0.27253061532974243, |
|
"eval_mean_accuracy": 0.545124287671543, |
|
"eval_mean_iou": 0.5309040148229734, |
|
"eval_overall_accuracy": 0.972637973614593, |
|
"eval_runtime": 9.3554, |
|
"eval_samples_per_second": 7.162, |
|
"eval_steps_per_second": 0.534, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 16.1, |
|
"learning_rate": 2.4385964912280703e-05, |
|
"loss": 0.2676, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 16.2, |
|
"learning_rate": 2.4210526315789474e-05, |
|
"loss": 0.2881, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 16.3, |
|
"learning_rate": 2.4035087719298245e-05, |
|
"loss": 0.2869, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 16.4, |
|
"learning_rate": 2.385964912280702e-05, |
|
"loss": 0.3117, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 2.368421052631579e-05, |
|
"loss": 0.2852, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 16.6, |
|
"learning_rate": 2.3508771929824565e-05, |
|
"loss": 0.3013, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 16.7, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 0.3106, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 16.8, |
|
"learning_rate": 2.3157894736842107e-05, |
|
"loss": 0.3104, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 16.9, |
|
"learning_rate": 2.2982456140350878e-05, |
|
"loss": 0.2777, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 2.280701754385965e-05, |
|
"loss": 0.2744, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy_safe": 0.15732206121024978, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9985794532845882, |
|
"eval_iou_safe": 0.15030708175648924, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9736074113594023, |
|
"eval_loss": 0.2896406948566437, |
|
"eval_mean_accuracy": 0.577950757247419, |
|
"eval_mean_iou": 0.5619572465579458, |
|
"eval_overall_accuracy": 0.9737300588123834, |
|
"eval_runtime": 9.4906, |
|
"eval_samples_per_second": 7.06, |
|
"eval_steps_per_second": 0.527, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 17.1, |
|
"learning_rate": 2.2631578947368423e-05, |
|
"loss": 0.2712, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 17.2, |
|
"learning_rate": 2.2456140350877194e-05, |
|
"loss": 0.2807, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 17.3, |
|
"learning_rate": 2.2280701754385965e-05, |
|
"loss": 0.2618, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 17.4, |
|
"learning_rate": 2.2105263157894736e-05, |
|
"loss": 0.2755, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 2.1929824561403507e-05, |
|
"loss": 0.2889, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 17.6, |
|
"learning_rate": 2.175438596491228e-05, |
|
"loss": 0.277, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 17.7, |
|
"learning_rate": 2.1578947368421053e-05, |
|
"loss": 0.2669, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 17.8, |
|
"learning_rate": 2.1403508771929827e-05, |
|
"loss": 0.2867, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 17.9, |
|
"learning_rate": 2.1228070175438598e-05, |
|
"loss": 0.3369, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 0.2948, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy_safe": 0.13295631088546306, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9988779599416738, |
|
"eval_iou_safe": 0.12822929505304623, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9731947501724811, |
|
"eval_loss": 0.2564152479171753, |
|
"eval_mean_accuracy": 0.5659171354135685, |
|
"eval_mean_iou": 0.5507120226127636, |
|
"eval_overall_accuracy": 0.9733000228654093, |
|
"eval_runtime": 9.4795, |
|
"eval_samples_per_second": 7.068, |
|
"eval_steps_per_second": 0.527, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 18.1, |
|
"learning_rate": 2.0877192982456143e-05, |
|
"loss": 0.2978, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 18.2, |
|
"learning_rate": 2.0701754385964914e-05, |
|
"loss": 0.2467, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 18.3, |
|
"learning_rate": 2.0526315789473685e-05, |
|
"loss": 0.2906, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 18.4, |
|
"learning_rate": 2.0350877192982456e-05, |
|
"loss": 0.2572, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 2.0175438596491227e-05, |
|
"loss": 0.2631, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 18.6, |
|
"learning_rate": 2e-05, |
|
"loss": 0.2838, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 18.7, |
|
"learning_rate": 1.9824561403508773e-05, |
|
"loss": 0.2543, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 18.8, |
|
"learning_rate": 1.9649122807017544e-05, |
|
"loss": 0.2629, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 18.9, |
|
"learning_rate": 1.9473684210526315e-05, |
|
"loss": 0.2622, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 1.929824561403509e-05, |
|
"loss": 0.2653, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy_safe": 0.17323757425761657, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9986642883133119, |
|
"eval_iou_safe": 0.1659548460122348, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.97415022926933, |
|
"eval_loss": 0.25176867842674255, |
|
"eval_mean_accuracy": 0.5859509312854643, |
|
"eval_mean_iou": 0.5700525376407825, |
|
"eval_overall_accuracy": 0.974282506686538, |
|
"eval_runtime": 11.4222, |
|
"eval_samples_per_second": 5.866, |
|
"eval_steps_per_second": 0.438, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 19.1, |
|
"learning_rate": 1.9122807017543863e-05, |
|
"loss": 0.2738, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"learning_rate": 1.8947368421052634e-05, |
|
"loss": 0.2741, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 19.3, |
|
"learning_rate": 1.8771929824561405e-05, |
|
"loss": 0.2625, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 19.4, |
|
"learning_rate": 1.8596491228070176e-05, |
|
"loss": 0.2632, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.247, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 19.6, |
|
"learning_rate": 1.8245614035087722e-05, |
|
"loss": 0.2735, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 19.7, |
|
"learning_rate": 1.8070175438596493e-05, |
|
"loss": 0.2583, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 19.8, |
|
"learning_rate": 1.7894736842105264e-05, |
|
"loss": 0.2458, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 19.9, |
|
"learning_rate": 1.7719298245614035e-05, |
|
"loss": 0.2588, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 1.7543859649122806e-05, |
|
"loss": 0.3026, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_safe": 0.14083407542761978, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9990070312163571, |
|
"eval_iou_safe": 0.13638476313495493, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9735479371956898, |
|
"eval_loss": 0.2531002461910248, |
|
"eval_mean_accuracy": 0.5699205533219884, |
|
"eval_mean_iou": 0.5549663501653224, |
|
"eval_overall_accuracy": 0.9736579781147972, |
|
"eval_runtime": 9.5606, |
|
"eval_samples_per_second": 7.008, |
|
"eval_steps_per_second": 0.523, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 20.1, |
|
"learning_rate": 1.736842105263158e-05, |
|
"loss": 0.306, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 20.2, |
|
"learning_rate": 1.719298245614035e-05, |
|
"loss": 0.2678, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 20.3, |
|
"learning_rate": 1.7017543859649125e-05, |
|
"loss": 0.2485, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 20.4, |
|
"learning_rate": 1.6842105263157896e-05, |
|
"loss": 0.2974, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.2612, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 20.6, |
|
"learning_rate": 1.6491228070175442e-05, |
|
"loss": 0.2509, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 20.7, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.2503, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 20.8, |
|
"learning_rate": 1.6140350877192984e-05, |
|
"loss": 0.2584, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 20.9, |
|
"learning_rate": 1.5964912280701755e-05, |
|
"loss": 0.2552, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 1.5789473684210526e-05, |
|
"loss": 0.2649, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_accuracy_safe": 0.18018242026823336, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9985888402863834, |
|
"eval_iou_safe": 0.17219883762699062, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.974277524161329, |
|
"eval_loss": 0.2383996993303299, |
|
"eval_mean_accuracy": 0.5893856302773084, |
|
"eval_mean_iou": 0.5732381808941598, |
|
"eval_overall_accuracy": 0.9744144268889925, |
|
"eval_runtime": 9.6539, |
|
"eval_samples_per_second": 6.94, |
|
"eval_steps_per_second": 0.518, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 21.1, |
|
"learning_rate": 1.56140350877193e-05, |
|
"loss": 0.2475, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 21.2, |
|
"learning_rate": 1.543859649122807e-05, |
|
"loss": 0.254, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 21.3, |
|
"learning_rate": 1.5263157894736842e-05, |
|
"loss": 0.2623, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 21.4, |
|
"learning_rate": 1.5087719298245615e-05, |
|
"loss": 0.2521, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 1.4912280701754386e-05, |
|
"loss": 0.233, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 21.6, |
|
"learning_rate": 1.4736842105263157e-05, |
|
"loss": 0.249, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 21.7, |
|
"learning_rate": 1.4561403508771931e-05, |
|
"loss": 0.2474, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 21.8, |
|
"learning_rate": 1.4385964912280702e-05, |
|
"loss": 0.2573, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 21.9, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.2623, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 1.4035087719298246e-05, |
|
"loss": 0.2431, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_accuracy_safe": 0.19926677229463263, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9983434875269627, |
|
"eval_iou_safe": 0.1889817324970706, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.974590481658128, |
|
"eval_loss": 0.2389637678861618, |
|
"eval_mean_accuracy": 0.5988051299107977, |
|
"eval_mean_iou": 0.5817861070775993, |
|
"eval_overall_accuracy": 0.9747400426152927, |
|
"eval_runtime": 10.415, |
|
"eval_samples_per_second": 6.433, |
|
"eval_steps_per_second": 0.48, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 22.1, |
|
"learning_rate": 1.3859649122807017e-05, |
|
"loss": 0.2517, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 22.2, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.2393, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 22.3, |
|
"learning_rate": 1.3508771929824562e-05, |
|
"loss": 0.2455, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 22.4, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.2367, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 1.3157894736842106e-05, |
|
"loss": 0.2516, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 22.6, |
|
"learning_rate": 1.2982456140350877e-05, |
|
"loss": 0.2405, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 22.7, |
|
"learning_rate": 1.2807017543859651e-05, |
|
"loss": 0.2342, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 22.8, |
|
"learning_rate": 1.2631578947368422e-05, |
|
"loss": 0.2365, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 22.9, |
|
"learning_rate": 1.2456140350877193e-05, |
|
"loss": 0.2546, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 1.2280701754385964e-05, |
|
"loss": 0.2608, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_accuracy_safe": 0.23167027112462943, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9980992494740052, |
|
"eval_iou_safe": 0.2180533381712627, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9752910790561756, |
|
"eval_loss": 0.2354857325553894, |
|
"eval_mean_accuracy": 0.6148847602993173, |
|
"eval_mean_iou": 0.5966722086137192, |
|
"eval_overall_accuracy": 0.9754601663617946, |
|
"eval_runtime": 9.0986, |
|
"eval_samples_per_second": 7.364, |
|
"eval_steps_per_second": 0.55, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 23.1, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.2717, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 23.2, |
|
"learning_rate": 1.192982456140351e-05, |
|
"loss": 0.2398, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 23.3, |
|
"learning_rate": 1.1754385964912282e-05, |
|
"loss": 0.232, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 23.4, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.2343, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 1.1403508771929824e-05, |
|
"loss": 0.2494, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 23.6, |
|
"learning_rate": 1.1228070175438597e-05, |
|
"loss": 0.2579, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 23.7, |
|
"learning_rate": 1.1052631578947368e-05, |
|
"loss": 0.2554, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 23.8, |
|
"learning_rate": 1.087719298245614e-05, |
|
"loss": 0.2516, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 23.9, |
|
"learning_rate": 1.0701754385964913e-05, |
|
"loss": 0.248, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.223, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy_safe": 0.16970828948230732, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9988791333168983, |
|
"eval_iou_safe": 0.1636807106061085, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.974257712308983, |
|
"eval_loss": 0.22904758155345917, |
|
"eval_mean_accuracy": 0.5842937113996027, |
|
"eval_mean_iou": 0.5689692114575458, |
|
"eval_overall_accuracy": 0.9743867560998717, |
|
"eval_runtime": 9.2103, |
|
"eval_samples_per_second": 7.274, |
|
"eval_steps_per_second": 0.543, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 24.1, |
|
"learning_rate": 1.0350877192982457e-05, |
|
"loss": 0.2361, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 24.2, |
|
"learning_rate": 1.0175438596491228e-05, |
|
"loss": 0.2487, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 24.3, |
|
"learning_rate": 1e-05, |
|
"loss": 0.2318, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 24.4, |
|
"learning_rate": 9.824561403508772e-06, |
|
"loss": 0.2492, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 9.649122807017545e-06, |
|
"loss": 0.2644, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 24.6, |
|
"learning_rate": 9.473684210526317e-06, |
|
"loss": 0.2297, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 24.7, |
|
"learning_rate": 9.298245614035088e-06, |
|
"loss": 0.2757, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 24.8, |
|
"learning_rate": 9.122807017543861e-06, |
|
"loss": 0.2373, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 24.9, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.2465, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 8.771929824561403e-06, |
|
"loss": 0.2448, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_safe": 0.21411444057655907, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9984515553851293, |
|
"eval_iou_safe": 0.2037491172883097, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9751261777280302, |
|
"eval_loss": 0.22617702186107635, |
|
"eval_mean_accuracy": 0.6062829979808442, |
|
"eval_mean_iou": 0.58943764750817, |
|
"eval_overall_accuracy": 0.975283494636194, |
|
"eval_runtime": 9.8675, |
|
"eval_samples_per_second": 6.79, |
|
"eval_steps_per_second": 0.507, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 25.1, |
|
"learning_rate": 8.596491228070176e-06, |
|
"loss": 0.2223, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 25.2, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.2382, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 25.3, |
|
"learning_rate": 8.245614035087721e-06, |
|
"loss": 0.2313, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 25.4, |
|
"learning_rate": 8.070175438596492e-06, |
|
"loss": 0.2753, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.2313, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 25.6, |
|
"learning_rate": 7.719298245614036e-06, |
|
"loss": 0.2392, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 25.7, |
|
"learning_rate": 7.5438596491228074e-06, |
|
"loss": 0.2287, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 25.8, |
|
"learning_rate": 7.3684210526315784e-06, |
|
"loss": 0.2145, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 25.9, |
|
"learning_rate": 7.192982456140351e-06, |
|
"loss": 0.2461, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 7.017543859649123e-06, |
|
"loss": 0.2547, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_accuracy_safe": 0.2736573875968096, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997840402899504, |
|
"eval_iou_safe": 0.25552723843257524, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9762572758507593, |
|
"eval_loss": 0.22811570763587952, |
|
"eval_mean_accuracy": 0.6357488952481568, |
|
"eval_mean_iou": 0.6158922571416673, |
|
"eval_overall_accuracy": 0.976449197797633, |
|
"eval_runtime": 9.8195, |
|
"eval_samples_per_second": 6.823, |
|
"eval_steps_per_second": 0.509, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 26.1, |
|
"learning_rate": 6.842105263157896e-06, |
|
"loss": 0.232, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 26.2, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.2341, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 26.3, |
|
"learning_rate": 6.4912280701754385e-06, |
|
"loss": 0.235, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 26.4, |
|
"learning_rate": 6.315789473684211e-06, |
|
"loss": 0.2276, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 6.140350877192982e-06, |
|
"loss": 0.2661, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 26.6, |
|
"learning_rate": 5.964912280701755e-06, |
|
"loss": 0.2416, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 26.7, |
|
"learning_rate": 5.789473684210527e-06, |
|
"loss": 0.2499, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 26.8, |
|
"learning_rate": 5.6140350877192985e-06, |
|
"loss": 0.223, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 26.9, |
|
"learning_rate": 5.43859649122807e-06, |
|
"loss": 0.2192, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.2266, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_accuracy_safe": 0.2391143442006777, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9981174954587445, |
|
"eval_iou_safe": 0.22518692513373836, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9755248903429237, |
|
"eval_loss": 0.21912012994289398, |
|
"eval_mean_accuracy": 0.6186159198297111, |
|
"eval_mean_iou": 0.600355907738331, |
|
"eval_overall_accuracy": 0.9756977593720849, |
|
"eval_runtime": 9.2527, |
|
"eval_samples_per_second": 7.241, |
|
"eval_steps_per_second": 0.54, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 27.1, |
|
"learning_rate": 5.087719298245614e-06, |
|
"loss": 0.235, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 27.2, |
|
"learning_rate": 4.912280701754386e-06, |
|
"loss": 0.2273, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 27.3, |
|
"learning_rate": 4.736842105263159e-06, |
|
"loss": 0.2235, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 27.4, |
|
"learning_rate": 4.5614035087719304e-06, |
|
"loss": 0.2364, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 4.3859649122807014e-06, |
|
"loss": 0.2449, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 27.6, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 0.253, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 27.7, |
|
"learning_rate": 4.035087719298246e-06, |
|
"loss": 0.247, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 27.8, |
|
"learning_rate": 3.859649122807018e-06, |
|
"loss": 0.2271, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 27.9, |
|
"learning_rate": 3.6842105263157892e-06, |
|
"loss": 0.2359, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 3.5087719298245615e-06, |
|
"loss": 0.2357, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy_safe": 0.2226726188410993, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9984927995242667, |
|
"eval_iou_safe": 0.21216656994042152, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9754146075867751, |
|
"eval_loss": 0.22175094485282898, |
|
"eval_mean_accuracy": 0.610582709182683, |
|
"eval_mean_iou": 0.5937905887635984, |
|
"eval_overall_accuracy": 0.9755763153531658, |
|
"eval_runtime": 9.6277, |
|
"eval_samples_per_second": 6.959, |
|
"eval_steps_per_second": 0.519, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 28.1, |
|
"learning_rate": 2.305263157894737e-05, |
|
"loss": 0.213, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 28.2, |
|
"learning_rate": 2.294736842105263e-05, |
|
"loss": 0.2174, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 28.3, |
|
"learning_rate": 2.2842105263157897e-05, |
|
"loss": 0.2389, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 28.4, |
|
"learning_rate": 2.273684210526316e-05, |
|
"loss": 0.2463, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 2.2631578947368423e-05, |
|
"loss": 0.2331, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 28.6, |
|
"learning_rate": 2.2526315789473683e-05, |
|
"loss": 0.2407, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 28.7, |
|
"learning_rate": 2.242105263157895e-05, |
|
"loss": 0.2369, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 28.8, |
|
"learning_rate": 2.2315789473684213e-05, |
|
"loss": 0.2691, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 28.9, |
|
"learning_rate": 2.2210526315789476e-05, |
|
"loss": 0.2388, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 2.2105263157894736e-05, |
|
"loss": 0.2563, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_accuracy_safe": 0.1852113137574643, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9987948263070255, |
|
"eval_iou_safe": 0.17815716870865347, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9746240459457417, |
|
"eval_loss": 0.20956651866436005, |
|
"eval_mean_accuracy": 0.592003070032245, |
|
"eval_mean_iou": 0.5763906073271976, |
|
"eval_overall_accuracy": 0.9747628738631063, |
|
"eval_runtime": 9.3361, |
|
"eval_samples_per_second": 7.176, |
|
"eval_steps_per_second": 0.536, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 29.1, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 0.2531, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 29.2, |
|
"learning_rate": 2.1894736842105266e-05, |
|
"loss": 0.2131, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 29.3, |
|
"learning_rate": 2.1789473684210526e-05, |
|
"loss": 0.2521, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 29.4, |
|
"learning_rate": 2.168421052631579e-05, |
|
"loss": 0.2286, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 2.1578947368421053e-05, |
|
"loss": 0.2359, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 29.6, |
|
"learning_rate": 2.147368421052632e-05, |
|
"loss": 0.2319, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 29.7, |
|
"learning_rate": 2.136842105263158e-05, |
|
"loss": 0.2117, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 29.8, |
|
"learning_rate": 2.1263157894736842e-05, |
|
"loss": 0.2248, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 29.9, |
|
"learning_rate": 2.1157894736842106e-05, |
|
"loss": 0.2189, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 0.226, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_safe": 0.2843975158152821, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9976802371813743, |
|
"eval_iou_safe": 0.2642573906315484, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9764128620744182, |
|
"eval_loss": 0.21209673583507538, |
|
"eval_mean_accuracy": 0.6410388764983282, |
|
"eval_mean_iou": 0.6203351263529833, |
|
"eval_overall_accuracy": 0.9766110092846315, |
|
"eval_runtime": 10.6202, |
|
"eval_samples_per_second": 6.309, |
|
"eval_steps_per_second": 0.471, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 30.1, |
|
"learning_rate": 2.0947368421052632e-05, |
|
"loss": 0.2109, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 30.2, |
|
"learning_rate": 2.0842105263157895e-05, |
|
"loss": 0.2058, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 30.3, |
|
"learning_rate": 2.073684210526316e-05, |
|
"loss": 0.2244, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 30.4, |
|
"learning_rate": 2.0631578947368422e-05, |
|
"loss": 0.2107, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 2.0526315789473685e-05, |
|
"loss": 0.2341, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 30.6, |
|
"learning_rate": 2.042105263157895e-05, |
|
"loss": 0.2223, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 30.7, |
|
"learning_rate": 2.0315789473684212e-05, |
|
"loss": 0.2399, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 30.8, |
|
"learning_rate": 2.0210526315789475e-05, |
|
"loss": 0.2152, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 30.9, |
|
"learning_rate": 2.010526315789474e-05, |
|
"loss": 0.2303, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 2e-05, |
|
"loss": 0.2221, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"eval_accuracy_safe": 0.2718031156394925, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9977821448196129, |
|
"eval_iou_safe": 0.253343035006872, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9761463764725383, |
|
"eval_loss": 0.20163898169994354, |
|
"eval_mean_accuracy": 0.6347926302295527, |
|
"eval_mean_iou": 0.6147447057397052, |
|
"eval_overall_accuracy": 0.9763378883475688, |
|
"eval_runtime": 9.6143, |
|
"eval_samples_per_second": 6.969, |
|
"eval_steps_per_second": 0.52, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 31.1, |
|
"learning_rate": 1.9894736842105265e-05, |
|
"loss": 0.2184, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 31.2, |
|
"learning_rate": 1.9789473684210528e-05, |
|
"loss": 0.2253, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 31.3, |
|
"learning_rate": 1.968421052631579e-05, |
|
"loss": 0.2304, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 31.4, |
|
"learning_rate": 1.957894736842105e-05, |
|
"loss": 0.2157, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 1.9473684210526315e-05, |
|
"loss": 0.2134, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 31.6, |
|
"learning_rate": 1.936842105263158e-05, |
|
"loss": 0.1973, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 31.7, |
|
"learning_rate": 1.9263157894736845e-05, |
|
"loss": 0.2071, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 31.8, |
|
"learning_rate": 1.9157894736842104e-05, |
|
"loss": 0.2047, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 31.9, |
|
"learning_rate": 1.9052631578947368e-05, |
|
"loss": 0.2216, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 1.8947368421052634e-05, |
|
"loss": 0.2317, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_accuracy_safe": 0.2649045300519273, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.99817452149465, |
|
"eval_iou_safe": 0.2499158963989111, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9763296849028891, |
|
"eval_loss": 0.20076872408390045, |
|
"eval_mean_accuracy": 0.6315395257732886, |
|
"eval_mean_iou": 0.6131227906509, |
|
"eval_overall_accuracy": 0.9765149016878498, |
|
"eval_runtime": 10.0363, |
|
"eval_samples_per_second": 6.676, |
|
"eval_steps_per_second": 0.498, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 32.1, |
|
"learning_rate": 1.8842105263157894e-05, |
|
"loss": 0.2222, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 32.2, |
|
"learning_rate": 1.8736842105263158e-05, |
|
"loss": 0.2257, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 32.3, |
|
"learning_rate": 1.863157894736842e-05, |
|
"loss": 0.2139, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 32.4, |
|
"learning_rate": 1.8526315789473687e-05, |
|
"loss": 0.1901, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.2044, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 32.6, |
|
"learning_rate": 1.831578947368421e-05, |
|
"loss": 0.2069, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 32.7, |
|
"learning_rate": 1.8210526315789474e-05, |
|
"loss": 0.2133, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 32.8, |
|
"learning_rate": 1.810526315789474e-05, |
|
"loss": 0.2049, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 32.9, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.2138, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 1.7894736842105264e-05, |
|
"loss": 0.2643, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"eval_accuracy_safe": 0.32538617815659926, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9975752200987912, |
|
"eval_iou_safe": 0.30137717538008896, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.977503612135026, |
|
"eval_loss": 0.19892987608909607, |
|
"eval_mean_accuracy": 0.6614806991276952, |
|
"eval_mean_iou": 0.6394403937575575, |
|
"eval_overall_accuracy": 0.9777198336017665, |
|
"eval_runtime": 10.0482, |
|
"eval_samples_per_second": 6.668, |
|
"eval_steps_per_second": 0.498, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 33.1, |
|
"learning_rate": 1.7789473684210527e-05, |
|
"loss": 0.2032, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 33.2, |
|
"learning_rate": 1.768421052631579e-05, |
|
"loss": 0.2138, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 33.3, |
|
"learning_rate": 1.7578947368421054e-05, |
|
"loss": 0.225, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 33.4, |
|
"learning_rate": 1.7473684210526317e-05, |
|
"loss": 0.1898, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 1.736842105263158e-05, |
|
"loss": 0.2084, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 33.6, |
|
"learning_rate": 1.7263157894736843e-05, |
|
"loss": 0.2185, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 33.7, |
|
"learning_rate": 1.7157894736842107e-05, |
|
"loss": 0.2103, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 33.8, |
|
"learning_rate": 1.705263157894737e-05, |
|
"loss": 0.192, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 33.9, |
|
"learning_rate": 1.694736842105263e-05, |
|
"loss": 0.2015, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 1.6842105263157896e-05, |
|
"loss": 0.2118, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_accuracy_safe": 0.3347327111306433, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997749583657136, |
|
"eval_iou_safe": 0.3116878455021897, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9779470808145468, |
|
"eval_loss": 0.1900891214609146, |
|
"eval_mean_accuracy": 0.6662411473938896, |
|
"eval_mean_iou": 0.6448174631583683, |
|
"eval_overall_accuracy": 0.9781651283378032, |
|
"eval_runtime": 9.3476, |
|
"eval_samples_per_second": 7.168, |
|
"eval_steps_per_second": 0.535, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 34.1, |
|
"learning_rate": 1.673684210526316e-05, |
|
"loss": 0.1939, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 34.2, |
|
"learning_rate": 1.6631578947368423e-05, |
|
"loss": 0.1964, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 34.3, |
|
"learning_rate": 1.6526315789473683e-05, |
|
"loss": 0.2013, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 34.4, |
|
"learning_rate": 1.642105263157895e-05, |
|
"loss": 0.2023, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.1977, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 34.6, |
|
"learning_rate": 1.6210526315789473e-05, |
|
"loss": 0.2127, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 34.7, |
|
"learning_rate": 1.6105263157894736e-05, |
|
"loss": 0.2226, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 34.8, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.2097, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 34.9, |
|
"learning_rate": 1.5894736842105266e-05, |
|
"loss": 0.1962, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 1.5789473684210526e-05, |
|
"loss": 0.2133, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_safe": 0.3618933620147956, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9975545100260806, |
|
"eval_iou_safe": 0.3349795266599463, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9785487908911983, |
|
"eval_loss": 0.19170759618282318, |
|
"eval_mean_accuracy": 0.6797239360204381, |
|
"eval_mean_iou": 0.6567641587755723, |
|
"eval_overall_accuracy": 0.9787780989461871, |
|
"eval_runtime": 10.6094, |
|
"eval_samples_per_second": 6.315, |
|
"eval_steps_per_second": 0.471, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 35.1, |
|
"learning_rate": 1.568421052631579e-05, |
|
"loss": 0.1896, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 35.2, |
|
"learning_rate": 1.5578947368421056e-05, |
|
"loss": 0.1961, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 35.3, |
|
"learning_rate": 1.5473684210526316e-05, |
|
"loss": 0.1909, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 35.4, |
|
"learning_rate": 1.536842105263158e-05, |
|
"loss": 0.2388, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 1.5263157894736842e-05, |
|
"loss": 0.2134, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 35.6, |
|
"learning_rate": 1.5157894736842107e-05, |
|
"loss": 0.1938, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 35.7, |
|
"learning_rate": 1.5052631578947369e-05, |
|
"loss": 0.1976, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 35.8, |
|
"learning_rate": 1.4947368421052632e-05, |
|
"loss": 0.21, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 35.9, |
|
"learning_rate": 1.4842105263157895e-05, |
|
"loss": 0.1986, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 1.4736842105263157e-05, |
|
"loss": 0.2064, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_accuracy_safe": 0.3401316880042868, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9978212768833464, |
|
"eval_iou_safe": 0.31741129406516555, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9781749054634312, |
|
"eval_loss": 0.1860196888446808, |
|
"eval_mean_accuracy": 0.6689764824438166, |
|
"eval_mean_iou": 0.6477930997642984, |
|
"eval_overall_accuracy": 0.9783941809810809, |
|
"eval_runtime": 9.7715, |
|
"eval_samples_per_second": 6.857, |
|
"eval_steps_per_second": 0.512, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 36.1, |
|
"learning_rate": 1.4631578947368422e-05, |
|
"loss": 0.1955, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 36.2, |
|
"learning_rate": 1.4526315789473685e-05, |
|
"loss": 0.2, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 36.3, |
|
"learning_rate": 1.4421052631578948e-05, |
|
"loss": 0.1799, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 36.4, |
|
"learning_rate": 1.431578947368421e-05, |
|
"loss": 0.2033, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.1931, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 36.6, |
|
"learning_rate": 1.4105263157894738e-05, |
|
"loss": 0.1955, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 36.7, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 0.1978, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 36.8, |
|
"learning_rate": 1.3894736842105263e-05, |
|
"loss": 0.1951, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 36.9, |
|
"learning_rate": 1.3789473684210526e-05, |
|
"loss": 0.1876, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.2341, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"eval_accuracy_safe": 0.2703825351482839, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9982565404228352, |
|
"eval_iou_safe": 0.2557340766682649, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.976569175210793, |
|
"eval_loss": 0.17752937972545624, |
|
"eval_mean_accuracy": 0.6343195377855595, |
|
"eval_mean_iou": 0.616151625939529, |
|
"eval_overall_accuracy": 0.9767563093954058, |
|
"eval_runtime": 9.2963, |
|
"eval_samples_per_second": 7.207, |
|
"eval_steps_per_second": 0.538, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 37.1, |
|
"learning_rate": 1.3578947368421053e-05, |
|
"loss": 0.1998, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 37.2, |
|
"learning_rate": 1.3473684210526316e-05, |
|
"loss": 0.1972, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 37.3, |
|
"learning_rate": 1.336842105263158e-05, |
|
"loss": 0.1904, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 37.4, |
|
"learning_rate": 1.3263157894736844e-05, |
|
"loss": 0.1917, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 1.3157894736842106e-05, |
|
"loss": 0.1875, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 37.6, |
|
"learning_rate": 1.305263157894737e-05, |
|
"loss": 0.1848, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 37.7, |
|
"learning_rate": 1.2947368421052633e-05, |
|
"loss": 0.1964, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 37.8, |
|
"learning_rate": 1.2842105263157894e-05, |
|
"loss": 0.1845, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 37.9, |
|
"learning_rate": 1.2736842105263157e-05, |
|
"loss": 0.1866, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 1.2631578947368422e-05, |
|
"loss": 0.2093, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_accuracy_safe": 0.3552260785424883, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9928191196329964, |
|
"eval_iou_safe": 0.28741779073267426, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9737097853338179, |
|
"eval_loss": 0.1933618187904358, |
|
"eval_mean_accuracy": 0.6740225990877423, |
|
"eval_mean_iou": 0.6305637880332461, |
|
"eval_overall_accuracy": 0.9739856435291803, |
|
"eval_runtime": 9.4354, |
|
"eval_samples_per_second": 7.101, |
|
"eval_steps_per_second": 0.53, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 38.1, |
|
"learning_rate": 1.2526315789473686e-05, |
|
"loss": 0.2115, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 38.2, |
|
"learning_rate": 1.2421052631578949e-05, |
|
"loss": 0.1946, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 38.3, |
|
"learning_rate": 1.231578947368421e-05, |
|
"loss": 0.1942, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 38.4, |
|
"learning_rate": 1.2210526315789474e-05, |
|
"loss": 0.1908, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.206, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 38.6, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.1776, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 38.7, |
|
"learning_rate": 1.1894736842105264e-05, |
|
"loss": 0.1942, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 38.8, |
|
"learning_rate": 1.1789473684210527e-05, |
|
"loss": 0.178, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 38.9, |
|
"learning_rate": 1.168421052631579e-05, |
|
"loss": 0.2015, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.1958, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"eval_accuracy_safe": 0.30012027709993405, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9980261481975256, |
|
"eval_iou_safe": 0.28184293125481946, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9772090847024161, |
|
"eval_loss": 0.17546288669109344, |
|
"eval_mean_accuracy": 0.6490732126487297, |
|
"eval_mean_iou": 0.6295260079786178, |
|
"eval_overall_accuracy": 0.9774111278021513, |
|
"eval_runtime": 10.0143, |
|
"eval_samples_per_second": 6.69, |
|
"eval_steps_per_second": 0.499, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 39.1, |
|
"learning_rate": 1.1473684210526315e-05, |
|
"loss": 0.1823, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 39.2, |
|
"learning_rate": 1.136842105263158e-05, |
|
"loss": 0.2134, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 39.3, |
|
"learning_rate": 1.1263157894736842e-05, |
|
"loss": 0.1747, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 39.4, |
|
"learning_rate": 1.1157894736842106e-05, |
|
"loss": 0.1852, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 1.1052631578947368e-05, |
|
"loss": 0.1861, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 39.6, |
|
"learning_rate": 1.0947368421052633e-05, |
|
"loss": 0.1801, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 39.7, |
|
"learning_rate": 1.0842105263157895e-05, |
|
"loss": 0.2033, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 39.8, |
|
"learning_rate": 1.073684210526316e-05, |
|
"loss": 0.1907, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 39.9, |
|
"learning_rate": 1.0631578947368421e-05, |
|
"loss": 0.1898, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.1886, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_safe": 0.3881249494026623, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9968947211374042, |
|
"eval_iou_safe": 0.35219360153638685, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9786680764654981, |
|
"eval_loss": 0.17675457894802094, |
|
"eval_mean_accuracy": 0.6925098352700332, |
|
"eval_mean_iou": 0.6654308390009425, |
|
"eval_overall_accuracy": 0.9789126381945255, |
|
"eval_runtime": 9.5568, |
|
"eval_samples_per_second": 7.011, |
|
"eval_steps_per_second": 0.523, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 40.1, |
|
"learning_rate": 1.0421052631578948e-05, |
|
"loss": 0.1862, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 40.2, |
|
"learning_rate": 1.0315789473684211e-05, |
|
"loss": 0.1751, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 40.3, |
|
"learning_rate": 1.0210526315789474e-05, |
|
"loss": 0.1777, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 40.4, |
|
"learning_rate": 1.0105263157894738e-05, |
|
"loss": 0.1875, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 1e-05, |
|
"loss": 0.1964, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 40.6, |
|
"learning_rate": 9.894736842105264e-06, |
|
"loss": 0.2019, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 40.7, |
|
"learning_rate": 9.789473684210526e-06, |
|
"loss": 0.1781, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 40.8, |
|
"learning_rate": 9.68421052631579e-06, |
|
"loss": 0.1974, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 40.9, |
|
"learning_rate": 9.578947368421052e-06, |
|
"loss": 0.1903, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 9.473684210526317e-06, |
|
"loss": 0.1734, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"eval_accuracy_safe": 0.3947960879102239, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9972934340386531, |
|
"eval_iou_safe": 0.3625567543168683, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9792547042493136, |
|
"eval_loss": 0.17450523376464844, |
|
"eval_mean_accuracy": 0.6960447609744385, |
|
"eval_mean_iou": 0.6709057292830909, |
|
"eval_overall_accuracy": 0.9794966284908465, |
|
"eval_runtime": 10.0572, |
|
"eval_samples_per_second": 6.662, |
|
"eval_steps_per_second": 0.497, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 41.1, |
|
"learning_rate": 9.368421052631579e-06, |
|
"loss": 0.2101, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 41.2, |
|
"learning_rate": 9.263157894736844e-06, |
|
"loss": 0.1794, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 41.3, |
|
"learning_rate": 9.157894736842105e-06, |
|
"loss": 0.2051, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 41.4, |
|
"learning_rate": 9.05263157894737e-06, |
|
"loss": 0.1762, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.1851, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 41.6, |
|
"learning_rate": 8.842105263157895e-06, |
|
"loss": 0.1835, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 41.7, |
|
"learning_rate": 8.736842105263158e-06, |
|
"loss": 0.2018, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 41.8, |
|
"learning_rate": 8.631578947368422e-06, |
|
"loss": 0.1997, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 41.9, |
|
"learning_rate": 8.526315789473685e-06, |
|
"loss": 0.1848, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.1795, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"eval_accuracy_safe": 0.41681412176514354, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9969580247307602, |
|
"eval_iou_safe": 0.3789419821537469, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9795699690858644, |
|
"eval_loss": 0.17103362083435059, |
|
"eval_mean_accuracy": 0.7068860732479518, |
|
"eval_mean_iou": 0.6792559756198057, |
|
"eval_overall_accuracy": 0.9798215040520056, |
|
"eval_runtime": 9.5328, |
|
"eval_samples_per_second": 7.028, |
|
"eval_steps_per_second": 0.525, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 42.1, |
|
"learning_rate": 8.315789473684212e-06, |
|
"loss": 0.1863, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 42.2, |
|
"learning_rate": 8.210526315789475e-06, |
|
"loss": 0.1977, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 42.3, |
|
"learning_rate": 8.105263157894736e-06, |
|
"loss": 0.1796, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 42.4, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.1833, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.1844, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 42.6, |
|
"learning_rate": 7.789473684210528e-06, |
|
"loss": 0.1899, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 42.7, |
|
"learning_rate": 7.68421052631579e-06, |
|
"loss": 0.189, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 42.8, |
|
"learning_rate": 7.578947368421054e-06, |
|
"loss": 0.1717, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 42.9, |
|
"learning_rate": 7.473684210526316e-06, |
|
"loss": 0.1709, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 7.3684210526315784e-06, |
|
"loss": 0.222, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"eval_accuracy_safe": 0.404142620884268, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997192699775639, |
|
"eval_iou_safe": 0.3700154592236505, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9794293850090163, |
|
"eval_loss": 0.17061825096607208, |
|
"eval_mean_accuracy": 0.7006676603299535, |
|
"eval_mean_iou": 0.6747224221163334, |
|
"eval_overall_accuracy": 0.9796749513540695, |
|
"eval_runtime": 9.3364, |
|
"eval_samples_per_second": 7.176, |
|
"eval_steps_per_second": 0.536, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 43.1, |
|
"learning_rate": 7.2631578947368426e-06, |
|
"loss": 0.1925, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 43.2, |
|
"learning_rate": 7.157894736842105e-06, |
|
"loss": 0.179, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 43.3, |
|
"learning_rate": 7.052631578947369e-06, |
|
"loss": 0.1774, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 43.4, |
|
"learning_rate": 6.9473684210526315e-06, |
|
"loss": 0.1743, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 6.842105263157896e-06, |
|
"loss": 0.1815, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 43.6, |
|
"learning_rate": 6.736842105263158e-06, |
|
"loss": 0.1784, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 43.7, |
|
"learning_rate": 6.631578947368422e-06, |
|
"loss": 0.1719, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 43.8, |
|
"learning_rate": 6.526315789473685e-06, |
|
"loss": 0.1847, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 43.9, |
|
"learning_rate": 6.421052631578947e-06, |
|
"loss": 0.1777, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 6.315789473684211e-06, |
|
"loss": 0.1831, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_accuracy_safe": 0.4044336760459674, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997242392216392, |
|
"eval_iou_safe": 0.3708362421682382, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9794867149476597, |
|
"eval_loss": 0.1686682254076004, |
|
"eval_mean_accuracy": 0.7008380341311797, |
|
"eval_mean_iou": 0.675161478557949, |
|
"eval_overall_accuracy": 0.9797317732625933, |
|
"eval_runtime": 9.4019, |
|
"eval_samples_per_second": 7.126, |
|
"eval_steps_per_second": 0.532, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 44.1, |
|
"learning_rate": 6.2105263157894745e-06, |
|
"loss": 0.1826, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 44.2, |
|
"learning_rate": 6.105263157894737e-06, |
|
"loss": 0.1878, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 44.3, |
|
"learning_rate": 6e-06, |
|
"loss": 0.1669, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 44.4, |
|
"learning_rate": 5.8947368421052634e-06, |
|
"loss": 0.1728, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 5.789473684210527e-06, |
|
"loss": 0.2053, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 44.6, |
|
"learning_rate": 5.68421052631579e-06, |
|
"loss": 0.1774, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 44.7, |
|
"learning_rate": 5.578947368421053e-06, |
|
"loss": 0.1788, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 44.8, |
|
"learning_rate": 5.4736842105263165e-06, |
|
"loss": 0.1749, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 44.9, |
|
"learning_rate": 5.36842105263158e-06, |
|
"loss": 0.1795, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.1935, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_safe": 0.4346667900277948, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9964187414776291, |
|
"eval_iou_safe": 0.38890805113099386, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9795630927466161, |
|
"eval_loss": 0.1710895597934723, |
|
"eval_mean_accuracy": 0.7155427657527119, |
|
"eval_mean_iou": 0.684235571938805, |
|
"eval_overall_accuracy": 0.9798254895566115, |
|
"eval_runtime": 10.0694, |
|
"eval_samples_per_second": 6.654, |
|
"eval_steps_per_second": 0.497, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 45.1, |
|
"learning_rate": 5.1578947368421055e-06, |
|
"loss": 0.1838, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 45.2, |
|
"learning_rate": 5.052631578947369e-06, |
|
"loss": 0.1726, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 45.3, |
|
"learning_rate": 4.947368421052632e-06, |
|
"loss": 0.1731, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 45.4, |
|
"learning_rate": 4.842105263157895e-06, |
|
"loss": 0.1724, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 4.736842105263159e-06, |
|
"loss": 0.1789, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 45.6, |
|
"learning_rate": 4.631578947368422e-06, |
|
"loss": 0.1706, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 45.7, |
|
"learning_rate": 4.526315789473685e-06, |
|
"loss": 0.179, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 45.8, |
|
"learning_rate": 4.4210526315789476e-06, |
|
"loss": 0.2259, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 45.9, |
|
"learning_rate": 4.315789473684211e-06, |
|
"loss": 0.2031, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 0.1728, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"eval_accuracy_safe": 0.4207558953126626, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9969068069022154, |
|
"eval_iou_safe": 0.38194129740606275, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9796351289125812, |
|
"eval_loss": 0.17141538858413696, |
|
"eval_mean_accuracy": 0.708831351107439, |
|
"eval_mean_iou": 0.6807882131593219, |
|
"eval_overall_accuracy": 0.979888232786264, |
|
"eval_runtime": 9.7002, |
|
"eval_samples_per_second": 6.907, |
|
"eval_steps_per_second": 0.515, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 46.1, |
|
"learning_rate": 4.105263157894737e-06, |
|
"loss": 0.1766, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 46.2, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.1703, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 46.3, |
|
"learning_rate": 3.894736842105264e-06, |
|
"loss": 0.1648, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 46.4, |
|
"learning_rate": 3.789473684210527e-06, |
|
"loss": 0.1851, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 3.6842105263157892e-06, |
|
"loss": 0.178, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 46.6, |
|
"learning_rate": 3.5789473684210525e-06, |
|
"loss": 0.1785, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 46.7, |
|
"learning_rate": 3.4736842105263158e-06, |
|
"loss": 0.2019, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 46.8, |
|
"learning_rate": 3.368421052631579e-06, |
|
"loss": 0.1876, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 46.9, |
|
"learning_rate": 3.2631578947368423e-06, |
|
"loss": 0.1748, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 3.1578947368421056e-06, |
|
"loss": 0.1742, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"eval_accuracy_safe": 0.38982501994980745, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9973832559120804, |
|
"eval_iou_safe": 0.35896443759717717, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9791974221442664, |
|
"eval_loss": 0.16703662276268005, |
|
"eval_mean_accuracy": 0.6936041379309439, |
|
"eval_mean_iou": 0.6690809298707218, |
|
"eval_overall_accuracy": 0.9794369597933186, |
|
"eval_runtime": 9.6852, |
|
"eval_samples_per_second": 6.918, |
|
"eval_steps_per_second": 0.516, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 47.1, |
|
"learning_rate": 3.0526315789473684e-06, |
|
"loss": 0.1799, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 47.2, |
|
"learning_rate": 2.9473684210526317e-06, |
|
"loss": 0.1994, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 47.3, |
|
"learning_rate": 2.842105263157895e-06, |
|
"loss": 0.1865, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 47.4, |
|
"learning_rate": 2.7368421052631583e-06, |
|
"loss": 0.1713, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.1622, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 47.6, |
|
"learning_rate": 2.5263157894736844e-06, |
|
"loss": 0.1643, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 47.7, |
|
"learning_rate": 2.4210526315789477e-06, |
|
"loss": 0.1772, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 47.8, |
|
"learning_rate": 2.315789473684211e-06, |
|
"loss": 0.1819, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 47.9, |
|
"learning_rate": 2.2105263157894738e-06, |
|
"loss": 0.19, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 2.105263157894737e-06, |
|
"loss": 0.2064, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"eval_accuracy_safe": 0.4209312994167332, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9969624248878517, |
|
"eval_iou_safe": 0.3827353713872098, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.979694923134079, |
|
"eval_loss": 0.16833512485027313, |
|
"eval_mean_accuracy": 0.7089468621522924, |
|
"eval_mean_iou": 0.6812151472606445, |
|
"eval_overall_accuracy": 0.9799473890617713, |
|
"eval_runtime": 9.8036, |
|
"eval_samples_per_second": 6.834, |
|
"eval_steps_per_second": 0.51, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 48.1, |
|
"learning_rate": 1.6466165413533834e-05, |
|
"loss": 0.1721, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 48.2, |
|
"learning_rate": 1.6390977443609023e-05, |
|
"loss": 0.1789, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 48.3, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.1666, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 48.4, |
|
"learning_rate": 1.62406015037594e-05, |
|
"loss": 0.1764, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 1.6165413533834585e-05, |
|
"loss": 0.1802, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 48.6, |
|
"learning_rate": 1.6090225563909775e-05, |
|
"loss": 0.1667, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 48.7, |
|
"learning_rate": 1.6015037593984964e-05, |
|
"loss": 0.1788, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 48.8, |
|
"learning_rate": 1.5939849624060154e-05, |
|
"loss": 0.2033, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 48.9, |
|
"learning_rate": 1.586466165413534e-05, |
|
"loss": 0.1924, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 1.5789473684210526e-05, |
|
"loss": 0.1946, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"eval_accuracy_safe": 0.37460341324821417, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9975849591131536, |
|
"eval_iou_safe": 0.347065714704107, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9789501628246108, |
|
"eval_loss": 0.1659359484910965, |
|
"eval_mean_accuracy": 0.6860941861806839, |
|
"eval_mean_iou": 0.6630079387643589, |
|
"eval_overall_accuracy": 0.9791830831499242, |
|
"eval_runtime": 16.317, |
|
"eval_samples_per_second": 4.106, |
|
"eval_steps_per_second": 0.306, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 49.1, |
|
"learning_rate": 1.5714285714285715e-05, |
|
"loss": 0.2351, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 49.2, |
|
"learning_rate": 1.5639097744360905e-05, |
|
"loss": 0.1724, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 49.3, |
|
"learning_rate": 1.556390977443609e-05, |
|
"loss": 0.1938, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 49.4, |
|
"learning_rate": 1.548872180451128e-05, |
|
"loss": 0.1825, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 1.5413533834586467e-05, |
|
"loss": 0.167, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 49.6, |
|
"learning_rate": 1.5338345864661656e-05, |
|
"loss": 0.1594, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 49.7, |
|
"learning_rate": 1.5263157894736842e-05, |
|
"loss": 0.1752, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 49.8, |
|
"learning_rate": 1.5187969924812032e-05, |
|
"loss": 0.1783, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 49.9, |
|
"learning_rate": 1.511278195488722e-05, |
|
"loss": 0.1806, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 1.5037593984962406e-05, |
|
"loss": 0.1836, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_safe": 0.44868755324767445, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9964670258681129, |
|
"eval_iou_safe": 0.402023415385625, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.98002171616563, |
|
"eval_loss": 0.16180779039859772, |
|
"eval_mean_accuracy": 0.7225772895578937, |
|
"eval_mean_iou": 0.6910225657756275, |
|
"eval_overall_accuracy": 0.9802864985679512, |
|
"eval_runtime": 14.3683, |
|
"eval_samples_per_second": 4.663, |
|
"eval_steps_per_second": 0.348, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 50.1, |
|
"learning_rate": 1.4962406015037595e-05, |
|
"loss": 0.1755, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 50.2, |
|
"learning_rate": 1.4887218045112783e-05, |
|
"loss": 0.1764, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 50.3, |
|
"learning_rate": 1.481203007518797e-05, |
|
"loss": 0.1642, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 50.4, |
|
"learning_rate": 1.4736842105263157e-05, |
|
"loss": 0.208, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 1.4661654135338346e-05, |
|
"loss": 0.1609, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 50.6, |
|
"learning_rate": 1.4586466165413534e-05, |
|
"loss": 0.1684, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 50.7, |
|
"learning_rate": 1.4511278195488722e-05, |
|
"loss": 0.1795, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 50.8, |
|
"learning_rate": 1.4436090225563912e-05, |
|
"loss": 0.1653, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 50.9, |
|
"learning_rate": 1.4360902255639098e-05, |
|
"loss": 0.1681, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 0.1786, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"eval_accuracy_safe": 0.4326756643189502, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9966320610934238, |
|
"eval_iou_safe": 0.3895693916085858, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9797144327801063, |
|
"eval_loss": 0.1594761162996292, |
|
"eval_mean_accuracy": 0.714653862706187, |
|
"eval_mean_iou": 0.684641912194346, |
|
"eval_overall_accuracy": 0.97997369339217, |
|
"eval_runtime": 14.553, |
|
"eval_samples_per_second": 4.604, |
|
"eval_steps_per_second": 0.344, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 51.1, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.1714, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 51.2, |
|
"learning_rate": 1.4135338345864663e-05, |
|
"loss": 0.1736, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 51.3, |
|
"learning_rate": 1.406015037593985e-05, |
|
"loss": 0.1555, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 51.4, |
|
"learning_rate": 1.3984962406015037e-05, |
|
"loss": 0.1724, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 1.3909774436090226e-05, |
|
"loss": 0.1612, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 51.6, |
|
"learning_rate": 1.3834586466165414e-05, |
|
"loss": 0.1639, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 51.7, |
|
"learning_rate": 1.3759398496240602e-05, |
|
"loss": 0.1732, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 51.8, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.1685, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 51.9, |
|
"learning_rate": 1.3609022556390977e-05, |
|
"loss": 0.177, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 1.3533834586466165e-05, |
|
"loss": 0.1867, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"eval_accuracy_safe": 0.4539978643104691, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9965902302666741, |
|
"eval_iou_safe": 0.40826213641035775, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9802987203511426, |
|
"eval_loss": 0.1555413454771042, |
|
"eval_mean_accuracy": 0.7252940472885716, |
|
"eval_mean_iou": 0.6942804283807502, |
|
"eval_overall_accuracy": 0.9805629217802588, |
|
"eval_runtime": 18.6158, |
|
"eval_samples_per_second": 3.599, |
|
"eval_steps_per_second": 0.269, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 52.1, |
|
"learning_rate": 1.3458646616541353e-05, |
|
"loss": 0.1934, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 52.2, |
|
"learning_rate": 1.3383458646616543e-05, |
|
"loss": 0.1734, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 52.3, |
|
"learning_rate": 1.330827067669173e-05, |
|
"loss": 0.1766, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 52.4, |
|
"learning_rate": 1.3233082706766916e-05, |
|
"loss": 0.152, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 1.3157894736842106e-05, |
|
"loss": 0.1553, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 52.6, |
|
"learning_rate": 1.3082706766917294e-05, |
|
"loss": 0.16, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 52.7, |
|
"learning_rate": 1.3007518796992482e-05, |
|
"loss": 0.1743, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 52.8, |
|
"learning_rate": 1.2932330827067671e-05, |
|
"loss": 0.1708, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 52.9, |
|
"learning_rate": 1.2857142857142857e-05, |
|
"loss": 0.1588, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 1.2781954887218045e-05, |
|
"loss": 0.1824, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"eval_accuracy_safe": 0.4386297662692125, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9965735096697266, |
|
"eval_iou_safe": 0.3942474610451035, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9798314336927032, |
|
"eval_loss": 0.15635764598846436, |
|
"eval_mean_accuracy": 0.7176016379694695, |
|
"eval_mean_iou": 0.6870394473689033, |
|
"eval_overall_accuracy": 0.9800927461083255, |
|
"eval_runtime": 25.5349, |
|
"eval_samples_per_second": 2.624, |
|
"eval_steps_per_second": 0.196, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 53.1, |
|
"learning_rate": 1.2706766917293233e-05, |
|
"loss": 0.1698, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 53.2, |
|
"learning_rate": 1.2631578947368422e-05, |
|
"loss": 0.1703, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 53.3, |
|
"learning_rate": 1.255639097744361e-05, |
|
"loss": 0.1558, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 53.4, |
|
"learning_rate": 1.2481203007518798e-05, |
|
"loss": 0.1752, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 1.2406015037593984e-05, |
|
"loss": 0.1496, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 53.6, |
|
"learning_rate": 1.2330827067669174e-05, |
|
"loss": 0.1542, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 53.7, |
|
"learning_rate": 1.2255639097744361e-05, |
|
"loss": 0.1897, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 53.8, |
|
"learning_rate": 1.218045112781955e-05, |
|
"loss": 0.181, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 53.9, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.1629, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 1.2030075187969925e-05, |
|
"loss": 0.1494, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"eval_accuracy_safe": 0.4920258595764858, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9956031283591532, |
|
"eval_iou_safe": 0.4299212121007973, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9804440437651487, |
|
"eval_loss": 0.1539766639471054, |
|
"eval_mean_accuracy": 0.7438144939678195, |
|
"eval_mean_iou": 0.705182627932973, |
|
"eval_overall_accuracy": 0.9807282632856227, |
|
"eval_runtime": 20.2085, |
|
"eval_samples_per_second": 3.315, |
|
"eval_steps_per_second": 0.247, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 54.1, |
|
"learning_rate": 1.1954887218045113e-05, |
|
"loss": 0.1891, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 54.2, |
|
"learning_rate": 1.1879699248120302e-05, |
|
"loss": 0.176, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 54.3, |
|
"learning_rate": 1.1804511278195488e-05, |
|
"loss": 0.1554, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 54.4, |
|
"learning_rate": 1.1729323308270678e-05, |
|
"loss": 0.1469, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 1.1654135338345864e-05, |
|
"loss": 0.1646, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 54.6, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.1596, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 54.7, |
|
"learning_rate": 1.1503759398496241e-05, |
|
"loss": 0.175, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 54.8, |
|
"learning_rate": 1.1428571428571429e-05, |
|
"loss": 0.1619, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 54.9, |
|
"learning_rate": 1.1353383458646617e-05, |
|
"loss": 0.1554, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 1.1278195488721805e-05, |
|
"loss": 0.1583, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_safe": 0.4557634704569373, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9963986767612919, |
|
"eval_iou_safe": 0.4075434429648423, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9801621116214008, |
|
"eval_loss": 0.1501733511686325, |
|
"eval_mean_accuracy": 0.7260810736091146, |
|
"eval_mean_iou": 0.6938527772931216, |
|
"eval_overall_accuracy": 0.9804291796328416, |
|
"eval_runtime": 18.9217, |
|
"eval_samples_per_second": 3.541, |
|
"eval_steps_per_second": 0.264, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 55.1, |
|
"learning_rate": 1.1203007518796992e-05, |
|
"loss": 0.1617, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 55.2, |
|
"learning_rate": 1.1127819548872182e-05, |
|
"loss": 0.1719, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 55.3, |
|
"learning_rate": 1.1052631578947368e-05, |
|
"loss": 0.1641, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 55.4, |
|
"learning_rate": 1.0977443609022558e-05, |
|
"loss": 0.1823, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 1.0902255639097744e-05, |
|
"loss": 0.1654, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 55.6, |
|
"learning_rate": 1.0827067669172933e-05, |
|
"loss": 0.1518, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 55.7, |
|
"learning_rate": 1.0751879699248121e-05, |
|
"loss": 0.1463, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 55.8, |
|
"learning_rate": 1.0676691729323309e-05, |
|
"loss": 0.148, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 55.9, |
|
"learning_rate": 1.0601503759398497e-05, |
|
"loss": 0.1515, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.1648, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"eval_accuracy_safe": 0.47907294112204657, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9957864682379647, |
|
"eval_iou_safe": 0.42081808808017324, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9802440112401288, |
|
"eval_loss": 0.15234236419200897, |
|
"eval_mean_accuracy": 0.7374297046800056, |
|
"eval_mean_iou": 0.700531049660151, |
|
"eval_overall_accuracy": 0.9805235791562209, |
|
"eval_runtime": 23.4099, |
|
"eval_samples_per_second": 2.862, |
|
"eval_steps_per_second": 0.214, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 56.1, |
|
"learning_rate": 1.0451127819548872e-05, |
|
"loss": 0.1528, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 56.2, |
|
"learning_rate": 1.0375939849624062e-05, |
|
"loss": 0.1529, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 56.3, |
|
"learning_rate": 1.0300751879699248e-05, |
|
"loss": 0.1472, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 56.4, |
|
"learning_rate": 1.0225563909774437e-05, |
|
"loss": 0.1677, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 1.0150375939849624e-05, |
|
"loss": 0.1472, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 56.6, |
|
"learning_rate": 1.0075187969924813e-05, |
|
"loss": 0.1868, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 56.7, |
|
"learning_rate": 1e-05, |
|
"loss": 0.1579, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 56.8, |
|
"learning_rate": 9.924812030075189e-06, |
|
"loss": 0.1549, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 56.9, |
|
"learning_rate": 9.849624060150376e-06, |
|
"loss": 0.1538, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 9.774436090225564e-06, |
|
"loss": 0.1993, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"eval_accuracy_safe": 0.45862197909799884, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9964170400835537, |
|
"eval_iou_safe": 0.4103208806066156, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9802640745426133, |
|
"eval_loss": 0.15015093982219696, |
|
"eval_mean_accuracy": 0.7275195095907763, |
|
"eval_mean_iou": 0.6952924775746144, |
|
"eval_overall_accuracy": 0.9805314362938724, |
|
"eval_runtime": 22.6151, |
|
"eval_samples_per_second": 2.963, |
|
"eval_steps_per_second": 0.221, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 57.1, |
|
"learning_rate": 9.699248120300752e-06, |
|
"loss": 0.1521, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 57.2, |
|
"learning_rate": 9.624060150375942e-06, |
|
"loss": 0.1808, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 57.3, |
|
"learning_rate": 9.548872180451128e-06, |
|
"loss": 0.1622, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 57.4, |
|
"learning_rate": 9.473684210526317e-06, |
|
"loss": 0.1626, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 9.398496240601503e-06, |
|
"loss": 0.1647, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 57.6, |
|
"learning_rate": 9.323308270676693e-06, |
|
"loss": 0.1704, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 57.7, |
|
"learning_rate": 9.24812030075188e-06, |
|
"loss": 0.1771, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 57.8, |
|
"learning_rate": 9.172932330827068e-06, |
|
"loss": 0.1453, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 57.9, |
|
"learning_rate": 9.097744360902256e-06, |
|
"loss": 0.1824, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 9.022556390977444e-06, |
|
"loss": 0.2243, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"eval_accuracy_safe": 0.3919915497627226, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9972870978124414, |
|
"eval_iou_safe": 0.3599124314432707, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9791664098544022, |
|
"eval_loss": 0.14736422896385193, |
|
"eval_mean_accuracy": 0.694639323787582, |
|
"eval_mean_iou": 0.6695394206488364, |
|
"eval_overall_accuracy": 0.9794076378665754, |
|
"eval_runtime": 23.6296, |
|
"eval_samples_per_second": 2.835, |
|
"eval_steps_per_second": 0.212, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 58.1, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.1836, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 58.2, |
|
"learning_rate": 8.872180451127821e-06, |
|
"loss": 0.1469, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 58.3, |
|
"learning_rate": 8.796992481203007e-06, |
|
"loss": 0.1728, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 58.4, |
|
"learning_rate": 8.721804511278197e-06, |
|
"loss": 0.1744, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 8.646616541353383e-06, |
|
"loss": 0.138, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 58.6, |
|
"learning_rate": 8.571428571428573e-06, |
|
"loss": 0.1424, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 58.7, |
|
"learning_rate": 8.49624060150376e-06, |
|
"loss": 0.161, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 58.8, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.1621, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 58.9, |
|
"learning_rate": 8.345864661654136e-06, |
|
"loss": 0.174, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 8.270676691729324e-06, |
|
"loss": 0.1551, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"eval_accuracy_safe": 0.4686585633825621, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.996123578939933, |
|
"eval_iou_safe": 0.41571446890361186, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9802699770855438, |
|
"eval_loss": 0.14445674419403076, |
|
"eval_mean_accuracy": 0.7323910711612476, |
|
"eval_mean_iou": 0.6979922229945779, |
|
"eval_overall_accuracy": 0.9805431081287896, |
|
"eval_runtime": 20.355, |
|
"eval_samples_per_second": 3.292, |
|
"eval_steps_per_second": 0.246, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 59.1, |
|
"learning_rate": 8.195488721804512e-06, |
|
"loss": 0.1517, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 59.2, |
|
"learning_rate": 8.1203007518797e-06, |
|
"loss": 0.1525, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 59.3, |
|
"learning_rate": 8.045112781954887e-06, |
|
"loss": 0.1456, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 59.4, |
|
"learning_rate": 7.969924812030077e-06, |
|
"loss": 0.1556, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.1628, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 59.6, |
|
"learning_rate": 7.819548872180452e-06, |
|
"loss": 0.16, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 59.7, |
|
"learning_rate": 7.74436090225564e-06, |
|
"loss": 0.1565, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 59.8, |
|
"learning_rate": 7.669172932330828e-06, |
|
"loss": 0.172, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 59.9, |
|
"learning_rate": 7.593984962406016e-06, |
|
"loss": 0.1578, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 7.518796992481203e-06, |
|
"loss": 0.1666, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_safe": 0.4460487816161079, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9963782013636263, |
|
"eval_iou_safe": 0.3986167931304745, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9798569567217434, |
|
"eval_loss": 0.14441508054733276, |
|
"eval_mean_accuracy": 0.7212134914898671, |
|
"eval_mean_iou": 0.689236874926109, |
|
"eval_overall_accuracy": 0.9801223527139692, |
|
"eval_runtime": 20.2732, |
|
"eval_samples_per_second": 3.305, |
|
"eval_steps_per_second": 0.247, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 60.1, |
|
"learning_rate": 7.4436090225563915e-06, |
|
"loss": 0.1501, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 60.2, |
|
"learning_rate": 7.3684210526315784e-06, |
|
"loss": 0.1478, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 60.3, |
|
"learning_rate": 7.293233082706767e-06, |
|
"loss": 0.1636, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 60.4, |
|
"learning_rate": 7.218045112781956e-06, |
|
"loss": 0.1505, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 7.142857142857143e-06, |
|
"loss": 0.1494, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 60.6, |
|
"learning_rate": 7.067669172932331e-06, |
|
"loss": 0.1579, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 60.7, |
|
"learning_rate": 6.992481203007518e-06, |
|
"loss": 0.1575, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 60.8, |
|
"learning_rate": 6.917293233082707e-06, |
|
"loss": 0.1672, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 60.9, |
|
"learning_rate": 6.842105263157896e-06, |
|
"loss": 0.1442, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 6.766917293233083e-06, |
|
"loss": 0.1632, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"eval_accuracy_safe": 0.5119718119822205, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9951075533331307, |
|
"eval_iou_safe": 0.44107453922579637, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9805422419053816, |
|
"eval_loss": 0.15042045712471008, |
|
"eval_mean_accuracy": 0.7535396826576757, |
|
"eval_mean_iou": 0.710808390565589, |
|
"eval_overall_accuracy": 0.9808364982035622, |
|
"eval_runtime": 23.2018, |
|
"eval_samples_per_second": 2.888, |
|
"eval_steps_per_second": 0.216, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 61.1, |
|
"learning_rate": 6.691729323308271e-06, |
|
"loss": 0.1545, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 61.2, |
|
"learning_rate": 6.616541353383458e-06, |
|
"loss": 0.1553, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 61.3, |
|
"learning_rate": 6.541353383458647e-06, |
|
"loss": 0.151, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 61.4, |
|
"learning_rate": 6.4661654135338356e-06, |
|
"loss": 0.1437, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 6.3909774436090225e-06, |
|
"loss": 0.1447, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 61.6, |
|
"learning_rate": 6.315789473684211e-06, |
|
"loss": 0.1787, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 61.7, |
|
"learning_rate": 6.240601503759399e-06, |
|
"loss": 0.1452, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 61.8, |
|
"learning_rate": 6.165413533834587e-06, |
|
"loss": 0.1741, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 61.9, |
|
"learning_rate": 6.090225563909775e-06, |
|
"loss": 0.1482, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 6.015037593984962e-06, |
|
"loss": 0.1589, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"eval_accuracy_safe": 0.405906299513109, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9970843972424274, |
|
"eval_iou_safe": 0.3704234667492234, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9793746498816952, |
|
"eval_loss": 0.14296869933605194, |
|
"eval_mean_accuracy": 0.7014953483777682, |
|
"eval_mean_iou": 0.6748990583154593, |
|
"eval_overall_accuracy": 0.9796219441428113, |
|
"eval_runtime": 23.0241, |
|
"eval_samples_per_second": 2.91, |
|
"eval_steps_per_second": 0.217, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 62.1, |
|
"learning_rate": 5.939849624060151e-06, |
|
"loss": 0.1638, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 62.2, |
|
"learning_rate": 5.864661654135339e-06, |
|
"loss": 0.1501, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 62.3, |
|
"learning_rate": 5.789473684210527e-06, |
|
"loss": 0.1434, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 62.4, |
|
"learning_rate": 5.7142857142857145e-06, |
|
"loss": 0.1469, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 5.639097744360902e-06, |
|
"loss": 0.149, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 62.6, |
|
"learning_rate": 5.563909774436091e-06, |
|
"loss": 0.1446, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 62.7, |
|
"learning_rate": 5.488721804511279e-06, |
|
"loss": 0.1566, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 62.8, |
|
"learning_rate": 5.413533834586467e-06, |
|
"loss": 0.1554, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 62.9, |
|
"learning_rate": 5.338345864661654e-06, |
|
"loss": 0.2087, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.1454, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"eval_accuracy_safe": 0.4835100866997429, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.995890429282846, |
|
"eval_iou_safe": 0.42599376070945183, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9804767015760938, |
|
"eval_loss": 0.1422625035047531, |
|
"eval_mean_accuracy": 0.7397002579912945, |
|
"eval_mean_iou": 0.7032352311427728, |
|
"eval_overall_accuracy": 0.9807555355242829, |
|
"eval_runtime": 21.807, |
|
"eval_samples_per_second": 3.072, |
|
"eval_steps_per_second": 0.229, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 63.1, |
|
"learning_rate": 5.187969924812031e-06, |
|
"loss": 0.1521, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 63.2, |
|
"learning_rate": 5.112781954887219e-06, |
|
"loss": 0.1503, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 63.3, |
|
"learning_rate": 5.0375939849624065e-06, |
|
"loss": 0.1595, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 63.4, |
|
"learning_rate": 4.962406015037594e-06, |
|
"loss": 0.1531, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 4.887218045112782e-06, |
|
"loss": 0.1396, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 63.6, |
|
"learning_rate": 4.812030075187971e-06, |
|
"loss": 0.1479, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 63.7, |
|
"learning_rate": 4.736842105263159e-06, |
|
"loss": 0.1574, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 63.8, |
|
"learning_rate": 4.661654135338346e-06, |
|
"loss": 0.1508, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 63.9, |
|
"learning_rate": 4.586466165413534e-06, |
|
"loss": 0.1508, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 4.511278195488722e-06, |
|
"loss": 0.1635, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"eval_accuracy_safe": 0.49020628293645746, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9957314956087019, |
|
"eval_iou_safe": 0.4299155788612444, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.980516979053144, |
|
"eval_loss": 0.14236415922641754, |
|
"eval_mean_accuracy": 0.7429688892725796, |
|
"eval_mean_iou": 0.7052162789571942, |
|
"eval_overall_accuracy": 0.9807990913960472, |
|
"eval_runtime": 21.5855, |
|
"eval_samples_per_second": 3.104, |
|
"eval_steps_per_second": 0.232, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 64.1, |
|
"learning_rate": 4.436090225563911e-06, |
|
"loss": 0.1545, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 64.2, |
|
"learning_rate": 4.3609022556390985e-06, |
|
"loss": 0.1461, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 64.3, |
|
"learning_rate": 4.285714285714286e-06, |
|
"loss": 0.1462, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 64.4, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 0.1401, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 4.135338345864662e-06, |
|
"loss": 0.1609, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 64.6, |
|
"learning_rate": 4.06015037593985e-06, |
|
"loss": 0.1584, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 64.7, |
|
"learning_rate": 3.984962406015038e-06, |
|
"loss": 0.1526, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 64.8, |
|
"learning_rate": 3.909774436090226e-06, |
|
"loss": 0.1604, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 64.9, |
|
"learning_rate": 3.834586466165414e-06, |
|
"loss": 0.1448, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 3.7593984962406014e-06, |
|
"loss": 0.1515, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_safe": 0.4774518987976145, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9961566094524996, |
|
"eval_iou_safe": 0.4239224934623968, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9805607490668283, |
|
"eval_loss": 0.14221766591072083, |
|
"eval_mean_accuracy": 0.736804254125057, |
|
"eval_mean_iou": 0.7022416212646125, |
|
"eval_overall_accuracy": 0.9808349040017199, |
|
"eval_runtime": 22.283, |
|
"eval_samples_per_second": 3.007, |
|
"eval_steps_per_second": 0.224, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 65.1, |
|
"learning_rate": 3.6842105263157892e-06, |
|
"loss": 0.1606, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 65.2, |
|
"learning_rate": 3.609022556390978e-06, |
|
"loss": 0.1578, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 65.3, |
|
"learning_rate": 3.5338345864661657e-06, |
|
"loss": 0.1409, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 65.4, |
|
"learning_rate": 3.4586466165413535e-06, |
|
"loss": 0.142, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 3.3834586466165413e-06, |
|
"loss": 0.1564, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 65.6, |
|
"learning_rate": 3.308270676691729e-06, |
|
"loss": 0.1412, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 65.7, |
|
"learning_rate": 3.2330827067669178e-06, |
|
"loss": 0.1794, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 65.8, |
|
"learning_rate": 3.1578947368421056e-06, |
|
"loss": 0.141, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 65.9, |
|
"learning_rate": 3.0827067669172934e-06, |
|
"loss": 0.1577, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 3.007518796992481e-06, |
|
"loss": 0.151, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"eval_accuracy_safe": 0.4717927070443059, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9962058325431629, |
|
"eval_iou_safe": 0.41950013111146534, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.980442962798809, |
|
"eval_loss": 0.1422828882932663, |
|
"eval_mean_accuracy": 0.7339992697937344, |
|
"eval_mean_iou": 0.6999715469551372, |
|
"eval_overall_accuracy": 0.9807155096708838, |
|
"eval_runtime": 22.7162, |
|
"eval_samples_per_second": 2.949, |
|
"eval_steps_per_second": 0.22, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 66.1, |
|
"learning_rate": 2.9323308270676694e-06, |
|
"loss": 0.1474, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 66.2, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 0.1435, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 66.3, |
|
"learning_rate": 2.7819548872180455e-06, |
|
"loss": 0.1392, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 66.4, |
|
"learning_rate": 2.7067669172932333e-06, |
|
"loss": 0.1462, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.1822, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 66.6, |
|
"learning_rate": 2.5563909774436093e-06, |
|
"loss": 0.157, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 66.7, |
|
"learning_rate": 2.481203007518797e-06, |
|
"loss": 0.1467, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 66.8, |
|
"learning_rate": 2.4060150375939854e-06, |
|
"loss": 0.1562, |
|
"step": 668 |
|
}, |
|
{ |
|
"epoch": 66.9, |
|
"learning_rate": 2.330827067669173e-06, |
|
"loss": 0.1624, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 2.255639097744361e-06, |
|
"loss": 0.166, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"eval_accuracy_safe": 0.4720953273117683, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9962893768591397, |
|
"eval_iou_safe": 0.42079618726258444, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9805340739883608, |
|
"eval_loss": 0.14265188574790955, |
|
"eval_mean_accuracy": 0.734192352085454, |
|
"eval_mean_iou": 0.7006651306254726, |
|
"eval_overall_accuracy": 0.9808055251391966, |
|
"eval_runtime": 20.5005, |
|
"eval_samples_per_second": 3.268, |
|
"eval_steps_per_second": 0.244, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 67.1, |
|
"learning_rate": 2.1804511278195492e-06, |
|
"loss": 0.1524, |
|
"step": 671 |
|
}, |
|
{ |
|
"epoch": 67.2, |
|
"learning_rate": 2.105263157894737e-06, |
|
"loss": 0.1599, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 67.3, |
|
"learning_rate": 2.030075187969925e-06, |
|
"loss": 0.1492, |
|
"step": 673 |
|
}, |
|
{ |
|
"epoch": 67.4, |
|
"learning_rate": 1.954887218045113e-06, |
|
"loss": 0.1374, |
|
"step": 674 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 1.8796992481203007e-06, |
|
"loss": 0.1415, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 67.6, |
|
"learning_rate": 1.804511278195489e-06, |
|
"loss": 0.139, |
|
"step": 676 |
|
}, |
|
{ |
|
"epoch": 67.7, |
|
"learning_rate": 1.7293233082706767e-06, |
|
"loss": 0.1544, |
|
"step": 677 |
|
}, |
|
{ |
|
"epoch": 67.8, |
|
"learning_rate": 1.6541353383458646e-06, |
|
"loss": 0.1454, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 67.9, |
|
"learning_rate": 1.5789473684210528e-06, |
|
"loss": 0.1681, |
|
"step": 679 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 1.5037593984962406e-06, |
|
"loss": 0.1561, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"eval_accuracy_safe": 0.49159216811037737, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9958987602469391, |
|
"eval_iou_safe": 0.4332189017626765, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9807224254704696, |
|
"eval_loss": 0.14203643798828125, |
|
"eval_mean_accuracy": 0.7437454641786583, |
|
"eval_mean_iou": 0.7069706636165731, |
|
"eval_overall_accuracy": 0.9810023521309468, |
|
"eval_runtime": 24.1843, |
|
"eval_samples_per_second": 2.77, |
|
"eval_steps_per_second": 0.207, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 68.1, |
|
"learning_rate": 1.047087980173482e-05, |
|
"loss": 0.1439, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 68.2, |
|
"learning_rate": 1.0408921933085503e-05, |
|
"loss": 0.1547, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 68.3, |
|
"learning_rate": 1.0346964064436184e-05, |
|
"loss": 0.1357, |
|
"step": 683 |
|
}, |
|
{ |
|
"epoch": 68.4, |
|
"learning_rate": 1.0285006195786865e-05, |
|
"loss": 0.1621, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 1.0223048327137546e-05, |
|
"loss": 0.1519, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 68.6, |
|
"learning_rate": 1.0161090458488229e-05, |
|
"loss": 0.1583, |
|
"step": 686 |
|
}, |
|
{ |
|
"epoch": 68.7, |
|
"learning_rate": 1.009913258983891e-05, |
|
"loss": 0.1496, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 68.8, |
|
"learning_rate": 1.0037174721189593e-05, |
|
"loss": 0.1493, |
|
"step": 688 |
|
}, |
|
{ |
|
"epoch": 68.9, |
|
"learning_rate": 9.975216852540274e-06, |
|
"loss": 0.1489, |
|
"step": 689 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 9.913258983890955e-06, |
|
"loss": 0.1501, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"eval_accuracy_safe": 0.49060913412053153, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9957952685521476, |
|
"eval_iou_safe": 0.43106096658430304, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9805916176323012, |
|
"eval_loss": 0.14368486404418945, |
|
"eval_mean_accuracy": 0.7432022013363395, |
|
"eval_mean_iou": 0.7058262921083021, |
|
"eval_overall_accuracy": 0.9808728801670359, |
|
"eval_runtime": 33.1735, |
|
"eval_samples_per_second": 2.02, |
|
"eval_steps_per_second": 0.151, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 69.1, |
|
"learning_rate": 9.851301115241636e-06, |
|
"loss": 0.1885, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 69.2, |
|
"learning_rate": 9.789343246592318e-06, |
|
"loss": 0.1545, |
|
"step": 692 |
|
}, |
|
{ |
|
"epoch": 69.3, |
|
"learning_rate": 9.727385377943e-06, |
|
"loss": 0.1513, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 69.4, |
|
"learning_rate": 9.665427509293682e-06, |
|
"loss": 0.1867, |
|
"step": 694 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 9.603469640644363e-06, |
|
"loss": 0.1561, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 69.6, |
|
"learning_rate": 9.541511771995044e-06, |
|
"loss": 0.1596, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 69.7, |
|
"learning_rate": 9.479553903345725e-06, |
|
"loss": 0.1342, |
|
"step": 697 |
|
}, |
|
{ |
|
"epoch": 69.8, |
|
"learning_rate": 9.417596034696406e-06, |
|
"loss": 0.1491, |
|
"step": 698 |
|
}, |
|
{ |
|
"epoch": 69.9, |
|
"learning_rate": 9.355638166047089e-06, |
|
"loss": 0.1512, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 9.29368029739777e-06, |
|
"loss": 0.1598, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_safe": 0.3444782402535071, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9977165531445693, |
|
"eval_iou_safe": 0.3204386406872229, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9781991094356883, |
|
"eval_loss": 0.13794589042663574, |
|
"eval_mean_accuracy": 0.6710973966990382, |
|
"eval_mean_iou": 0.6493188750614556, |
|
"eval_overall_accuracy": 0.9784209407977204, |
|
"eval_runtime": 20.8869, |
|
"eval_samples_per_second": 3.208, |
|
"eval_steps_per_second": 0.239, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 70.1, |
|
"learning_rate": 9.231722428748452e-06, |
|
"loss": 0.1522, |
|
"step": 701 |
|
}, |
|
{ |
|
"epoch": 70.2, |
|
"learning_rate": 9.169764560099132e-06, |
|
"loss": 0.165, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 70.3, |
|
"learning_rate": 9.107806691449814e-06, |
|
"loss": 0.1419, |
|
"step": 703 |
|
}, |
|
{ |
|
"epoch": 70.4, |
|
"learning_rate": 9.045848822800495e-06, |
|
"loss": 0.157, |
|
"step": 704 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 8.983890954151178e-06, |
|
"loss": 0.1561, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 70.6, |
|
"learning_rate": 8.921933085501859e-06, |
|
"loss": 0.144, |
|
"step": 706 |
|
}, |
|
{ |
|
"epoch": 70.7, |
|
"learning_rate": 8.859975216852542e-06, |
|
"loss": 0.1353, |
|
"step": 707 |
|
}, |
|
{ |
|
"epoch": 70.8, |
|
"learning_rate": 8.798017348203221e-06, |
|
"loss": 0.1631, |
|
"step": 708 |
|
}, |
|
{ |
|
"epoch": 70.9, |
|
"learning_rate": 8.736059479553904e-06, |
|
"loss": 0.1466, |
|
"step": 709 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 8.674101610904585e-06, |
|
"loss": 0.1431, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"eval_accuracy_safe": 0.48976295388221325, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9959733869112106, |
|
"eval_iou_safe": 0.4325414726735724, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9807421425781063, |
|
"eval_loss": 0.1400483250617981, |
|
"eval_mean_accuracy": 0.7428681703967119, |
|
"eval_mean_iou": 0.7066418076258394, |
|
"eval_overall_accuracy": 0.9810207423879139, |
|
"eval_runtime": 19.4833, |
|
"eval_samples_per_second": 3.439, |
|
"eval_steps_per_second": 0.257, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 71.1, |
|
"learning_rate": 8.612143742255268e-06, |
|
"loss": 0.1456, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 71.2, |
|
"learning_rate": 8.550185873605949e-06, |
|
"loss": 0.1665, |
|
"step": 712 |
|
}, |
|
{ |
|
"epoch": 71.3, |
|
"learning_rate": 8.488228004956631e-06, |
|
"loss": 0.1558, |
|
"step": 713 |
|
}, |
|
{ |
|
"epoch": 71.4, |
|
"learning_rate": 8.42627013630731e-06, |
|
"loss": 0.1391, |
|
"step": 714 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 8.364312267657993e-06, |
|
"loss": 0.1484, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 71.6, |
|
"learning_rate": 8.302354399008674e-06, |
|
"loss": 0.1439, |
|
"step": 716 |
|
}, |
|
{ |
|
"epoch": 71.7, |
|
"learning_rate": 8.240396530359357e-06, |
|
"loss": 0.1571, |
|
"step": 717 |
|
}, |
|
{ |
|
"epoch": 71.8, |
|
"learning_rate": 8.178438661710038e-06, |
|
"loss": 0.1634, |
|
"step": 718 |
|
}, |
|
{ |
|
"epoch": 71.9, |
|
"learning_rate": 8.116480793060719e-06, |
|
"loss": 0.1503, |
|
"step": 719 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 8.0545229244114e-06, |
|
"loss": 0.164, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"eval_accuracy_safe": 0.4698439867232586, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9963593686912747, |
|
"eval_iou_safe": 0.4196496205600011, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.980536830137278, |
|
"eval_loss": 0.134708434343338, |
|
"eval_mean_accuracy": 0.7331016777072666, |
|
"eval_mean_iou": 0.7000932253486396, |
|
"eval_overall_accuracy": 0.9808069485336987, |
|
"eval_runtime": 19.8575, |
|
"eval_samples_per_second": 3.374, |
|
"eval_steps_per_second": 0.252, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 72.1, |
|
"learning_rate": 7.992565055762081e-06, |
|
"loss": 0.1546, |
|
"step": 721 |
|
}, |
|
{ |
|
"epoch": 72.2, |
|
"learning_rate": 7.930607187112764e-06, |
|
"loss": 0.1389, |
|
"step": 722 |
|
}, |
|
{ |
|
"epoch": 72.3, |
|
"learning_rate": 7.868649318463445e-06, |
|
"loss": 0.1454, |
|
"step": 723 |
|
}, |
|
{ |
|
"epoch": 72.4, |
|
"learning_rate": 7.806691449814127e-06, |
|
"loss": 0.1507, |
|
"step": 724 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 7.744733581164808e-06, |
|
"loss": 0.144, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 72.6, |
|
"learning_rate": 7.68277571251549e-06, |
|
"loss": 0.1469, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 72.7, |
|
"learning_rate": 7.620817843866171e-06, |
|
"loss": 0.1488, |
|
"step": 727 |
|
}, |
|
{ |
|
"epoch": 72.8, |
|
"learning_rate": 7.558859975216853e-06, |
|
"loss": 0.1521, |
|
"step": 728 |
|
}, |
|
{ |
|
"epoch": 72.9, |
|
"learning_rate": 7.496902106567534e-06, |
|
"loss": 0.148, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 7.434944237918216e-06, |
|
"loss": 0.1555, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"eval_accuracy_safe": 0.5271298106021179, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9936756835468036, |
|
"eval_iou_safe": 0.43644499911426604, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9795766635519878, |
|
"eval_loss": 0.136807382106781, |
|
"eval_mean_accuracy": 0.7604027470744608, |
|
"eval_mean_iou": 0.7080108313331269, |
|
"eval_overall_accuracy": 0.9798946665294135, |
|
"eval_runtime": 22.6871, |
|
"eval_samples_per_second": 2.953, |
|
"eval_steps_per_second": 0.22, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 73.1, |
|
"learning_rate": 7.372986369268897e-06, |
|
"loss": 0.1576, |
|
"step": 731 |
|
}, |
|
{ |
|
"epoch": 73.2, |
|
"learning_rate": 7.31102850061958e-06, |
|
"loss": 0.1501, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 73.3, |
|
"learning_rate": 7.249070631970261e-06, |
|
"loss": 0.1374, |
|
"step": 733 |
|
}, |
|
{ |
|
"epoch": 73.4, |
|
"learning_rate": 7.1871127633209425e-06, |
|
"loss": 0.1479, |
|
"step": 734 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 7.1251548946716235e-06, |
|
"loss": 0.1385, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 73.6, |
|
"learning_rate": 7.063197026022305e-06, |
|
"loss": 0.1477, |
|
"step": 736 |
|
}, |
|
{ |
|
"epoch": 73.7, |
|
"learning_rate": 7.001239157372986e-06, |
|
"loss": 0.144, |
|
"step": 737 |
|
}, |
|
{ |
|
"epoch": 73.8, |
|
"learning_rate": 6.939281288723669e-06, |
|
"loss": 0.1363, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 73.9, |
|
"learning_rate": 6.87732342007435e-06, |
|
"loss": 0.1296, |
|
"step": 739 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 6.815365551425032e-06, |
|
"loss": 0.1924, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"eval_accuracy_safe": 0.4637819437858759, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.996495656223588, |
|
"eval_iou_safe": 0.41589848807241764, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9804929124148382, |
|
"eval_loss": 0.1311986744403839, |
|
"eval_mean_accuracy": 0.730138800004732, |
|
"eval_mean_iou": 0.6981957002436279, |
|
"eval_overall_accuracy": 0.9807601473224696, |
|
"eval_runtime": 20.8062, |
|
"eval_samples_per_second": 3.22, |
|
"eval_steps_per_second": 0.24, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 74.1, |
|
"learning_rate": 6.753407682775713e-06, |
|
"loss": 0.133, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 74.2, |
|
"learning_rate": 6.691449814126394e-06, |
|
"loss": 0.1258, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 74.3, |
|
"learning_rate": 6.629491945477076e-06, |
|
"loss": 0.1859, |
|
"step": 743 |
|
}, |
|
{ |
|
"epoch": 74.4, |
|
"learning_rate": 6.567534076827757e-06, |
|
"loss": 0.1428, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 6.5055762081784395e-06, |
|
"loss": 0.1348, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 74.6, |
|
"learning_rate": 6.44361833952912e-06, |
|
"loss": 0.1371, |
|
"step": 746 |
|
}, |
|
{ |
|
"epoch": 74.7, |
|
"learning_rate": 6.381660470879802e-06, |
|
"loss": 0.1312, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 74.8, |
|
"learning_rate": 6.319702602230483e-06, |
|
"loss": 0.1389, |
|
"step": 748 |
|
}, |
|
{ |
|
"epoch": 74.9, |
|
"learning_rate": 6.257744733581165e-06, |
|
"loss": 0.1492, |
|
"step": 749 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 6.195786864931847e-06, |
|
"loss": 0.1612, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_safe": 0.5052293553224544, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.995557366725402, |
|
"eval_iou_safe": 0.4408789523082254, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9807871371405564, |
|
"eval_loss": 0.13402974605560303, |
|
"eval_mean_accuracy": 0.7503933610239282, |
|
"eval_mean_iou": 0.7108330447243909, |
|
"eval_overall_accuracy": 0.9810738634707322, |
|
"eval_runtime": 24.6564, |
|
"eval_samples_per_second": 2.717, |
|
"eval_steps_per_second": 0.203, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 75.1, |
|
"learning_rate": 6.133828996282529e-06, |
|
"loss": 0.135, |
|
"step": 751 |
|
}, |
|
{ |
|
"epoch": 75.2, |
|
"learning_rate": 6.071871127633209e-06, |
|
"loss": 0.1421, |
|
"step": 752 |
|
}, |
|
{ |
|
"epoch": 75.3, |
|
"learning_rate": 6.009913258983891e-06, |
|
"loss": 0.155, |
|
"step": 753 |
|
}, |
|
{ |
|
"epoch": 75.4, |
|
"learning_rate": 5.947955390334573e-06, |
|
"loss": 0.1435, |
|
"step": 754 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 5.885997521685254e-06, |
|
"loss": 0.1424, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 75.6, |
|
"learning_rate": 5.824039653035936e-06, |
|
"loss": 0.1373, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 75.7, |
|
"learning_rate": 5.7620817843866174e-06, |
|
"loss": 0.1646, |
|
"step": 757 |
|
}, |
|
{ |
|
"epoch": 75.8, |
|
"learning_rate": 5.7001239157372985e-06, |
|
"loss": 0.1537, |
|
"step": 758 |
|
}, |
|
{ |
|
"epoch": 75.9, |
|
"learning_rate": 5.63816604708798e-06, |
|
"loss": 0.1333, |
|
"step": 759 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 5.576208178438662e-06, |
|
"loss": 0.1234, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"eval_accuracy_safe": 0.5301444481709785, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9945833479516331, |
|
"eval_iou_safe": 0.4500530167166289, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9805601468897901, |
|
"eval_loss": 0.13541895151138306, |
|
"eval_mean_accuracy": 0.7623638980613058, |
|
"eval_mean_iou": 0.7153065818032095, |
|
"eval_overall_accuracy": 0.9808645675431437, |
|
"eval_runtime": 25.5302, |
|
"eval_samples_per_second": 2.624, |
|
"eval_steps_per_second": 0.196, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 76.1, |
|
"learning_rate": 5.514250309789343e-06, |
|
"loss": 0.1355, |
|
"step": 761 |
|
}, |
|
{ |
|
"epoch": 76.2, |
|
"learning_rate": 5.452292441140025e-06, |
|
"loss": 0.1363, |
|
"step": 762 |
|
}, |
|
{ |
|
"epoch": 76.3, |
|
"learning_rate": 5.390334572490707e-06, |
|
"loss": 0.1321, |
|
"step": 763 |
|
}, |
|
{ |
|
"epoch": 76.4, |
|
"learning_rate": 5.328376703841388e-06, |
|
"loss": 0.1391, |
|
"step": 764 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 5.26641883519207e-06, |
|
"loss": 0.1345, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 76.6, |
|
"learning_rate": 5.2044609665427516e-06, |
|
"loss": 0.1428, |
|
"step": 766 |
|
}, |
|
{ |
|
"epoch": 76.7, |
|
"learning_rate": 5.1425030978934326e-06, |
|
"loss": 0.1348, |
|
"step": 767 |
|
}, |
|
{ |
|
"epoch": 76.8, |
|
"learning_rate": 5.0805452292441144e-06, |
|
"loss": 0.1649, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 76.9, |
|
"learning_rate": 5.018587360594796e-06, |
|
"loss": 0.1385, |
|
"step": 769 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 4.956629491945477e-06, |
|
"loss": 0.1679, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"eval_accuracy_safe": 0.4644064594970721, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9964220269282573, |
|
"eval_iou_safe": 0.41555706568607836, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9804388030904859, |
|
"eval_loss": 0.13231320679187775, |
|
"eval_mean_accuracy": 0.7304142432126647, |
|
"eval_mean_iou": 0.6979979343882822, |
|
"eval_overall_accuracy": 0.9807071401112115, |
|
"eval_runtime": 22.9487, |
|
"eval_samples_per_second": 2.92, |
|
"eval_steps_per_second": 0.218, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 77.1, |
|
"learning_rate": 4.894671623296159e-06, |
|
"loss": 0.1413, |
|
"step": 771 |
|
}, |
|
{ |
|
"epoch": 77.2, |
|
"learning_rate": 4.832713754646841e-06, |
|
"loss": 0.1337, |
|
"step": 772 |
|
}, |
|
{ |
|
"epoch": 77.3, |
|
"learning_rate": 4.770755885997522e-06, |
|
"loss": 0.1354, |
|
"step": 773 |
|
}, |
|
{ |
|
"epoch": 77.4, |
|
"learning_rate": 4.708798017348203e-06, |
|
"loss": 0.1626, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 4.646840148698885e-06, |
|
"loss": 0.1414, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 77.6, |
|
"learning_rate": 4.584882280049566e-06, |
|
"loss": 0.143, |
|
"step": 776 |
|
}, |
|
{ |
|
"epoch": 77.7, |
|
"learning_rate": 4.522924411400248e-06, |
|
"loss": 0.1493, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 77.8, |
|
"learning_rate": 4.4609665427509296e-06, |
|
"loss": 0.1534, |
|
"step": 778 |
|
}, |
|
{ |
|
"epoch": 77.9, |
|
"learning_rate": 4.3990086741016106e-06, |
|
"loss": 0.1324, |
|
"step": 779 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 4.337050805452292e-06, |
|
"loss": 0.1375, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"eval_accuracy_safe": 0.4804087108376606, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9961355473672218, |
|
"eval_iou_safe": 0.4262858941182306, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9806268893582346, |
|
"eval_loss": 0.13545650243759155, |
|
"eval_mean_accuracy": 0.7382721291024412, |
|
"eval_mean_iou": 0.7034563917382326, |
|
"eval_overall_accuracy": 0.9809018035433186, |
|
"eval_runtime": 21.5828, |
|
"eval_samples_per_second": 3.104, |
|
"eval_steps_per_second": 0.232, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 78.1, |
|
"learning_rate": 4.275092936802974e-06, |
|
"loss": 0.1339, |
|
"step": 781 |
|
}, |
|
{ |
|
"epoch": 78.2, |
|
"learning_rate": 4.213135068153655e-06, |
|
"loss": 0.1292, |
|
"step": 782 |
|
}, |
|
{ |
|
"epoch": 78.3, |
|
"learning_rate": 4.151177199504337e-06, |
|
"loss": 0.1346, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 78.4, |
|
"learning_rate": 4.089219330855019e-06, |
|
"loss": 0.1421, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 4.0272614622057e-06, |
|
"loss": 0.1394, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 78.6, |
|
"learning_rate": 3.965303593556382e-06, |
|
"loss": 0.1361, |
|
"step": 786 |
|
}, |
|
{ |
|
"epoch": 78.7, |
|
"learning_rate": 3.903345724907064e-06, |
|
"loss": 0.1292, |
|
"step": 787 |
|
}, |
|
{ |
|
"epoch": 78.8, |
|
"learning_rate": 3.841387856257745e-06, |
|
"loss": 0.1457, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 78.9, |
|
"learning_rate": 3.7794299876084265e-06, |
|
"loss": 0.1498, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 3.717472118959108e-06, |
|
"loss": 0.1839, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"eval_accuracy_safe": 0.5069506285634983, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9955435795665153, |
|
"eval_iou_safe": 0.4422061971111085, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9808241787110663, |
|
"eval_loss": 0.13185860216617584, |
|
"eval_mean_accuracy": 0.7512471040650068, |
|
"eval_mean_iou": 0.7115151879110874, |
|
"eval_overall_accuracy": 0.9811113272140275, |
|
"eval_runtime": 20.4128, |
|
"eval_samples_per_second": 3.282, |
|
"eval_steps_per_second": 0.245, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 79.1, |
|
"learning_rate": 3.65551425030979e-06, |
|
"loss": 0.135, |
|
"step": 791 |
|
}, |
|
{ |
|
"epoch": 79.2, |
|
"learning_rate": 3.5935563816604712e-06, |
|
"loss": 0.1396, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 79.3, |
|
"learning_rate": 3.5315985130111527e-06, |
|
"loss": 0.1413, |
|
"step": 793 |
|
}, |
|
{ |
|
"epoch": 79.4, |
|
"learning_rate": 3.4696406443618345e-06, |
|
"loss": 0.1609, |
|
"step": 794 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 3.407682775712516e-06, |
|
"loss": 0.1377, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 79.6, |
|
"learning_rate": 3.345724907063197e-06, |
|
"loss": 0.1328, |
|
"step": 796 |
|
}, |
|
{ |
|
"epoch": 79.7, |
|
"learning_rate": 3.2837670384138784e-06, |
|
"loss": 0.1416, |
|
"step": 797 |
|
}, |
|
{ |
|
"epoch": 79.8, |
|
"learning_rate": 3.22180916976456e-06, |
|
"loss": 0.1443, |
|
"step": 798 |
|
}, |
|
{ |
|
"epoch": 79.9, |
|
"learning_rate": 3.1598513011152417e-06, |
|
"loss": 0.1379, |
|
"step": 799 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 3.0978934324659235e-06, |
|
"loss": 0.155, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_safe": 0.48455287373603034, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9960956526095923, |
|
"eval_iou_safe": 0.42946368930778284, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9807093943419638, |
|
"eval_loss": 0.12976635992527008, |
|
"eval_mean_accuracy": 0.7403242631728113, |
|
"eval_mean_iou": 0.7050865418248733, |
|
"eval_overall_accuracy": 0.980985499140042, |
|
"eval_runtime": 20.7172, |
|
"eval_samples_per_second": 3.234, |
|
"eval_steps_per_second": 0.241, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 80.1, |
|
"learning_rate": 3.0359355638166045e-06, |
|
"loss": 0.1268, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 80.2, |
|
"learning_rate": 2.9739776951672864e-06, |
|
"loss": 0.1498, |
|
"step": 802 |
|
}, |
|
{ |
|
"epoch": 80.3, |
|
"learning_rate": 2.912019826517968e-06, |
|
"loss": 0.1779, |
|
"step": 803 |
|
}, |
|
{ |
|
"epoch": 80.4, |
|
"learning_rate": 2.8500619578686492e-06, |
|
"loss": 0.1434, |
|
"step": 804 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 2.788104089219331e-06, |
|
"loss": 0.1526, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 80.6, |
|
"learning_rate": 2.7261462205700125e-06, |
|
"loss": 0.1374, |
|
"step": 806 |
|
}, |
|
{ |
|
"epoch": 80.7, |
|
"learning_rate": 2.664188351920694e-06, |
|
"loss": 0.1359, |
|
"step": 807 |
|
}, |
|
{ |
|
"epoch": 80.8, |
|
"learning_rate": 2.6022304832713758e-06, |
|
"loss": 0.1213, |
|
"step": 808 |
|
}, |
|
{ |
|
"epoch": 80.9, |
|
"learning_rate": 2.5402726146220572e-06, |
|
"loss": 0.161, |
|
"step": 809 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 2.4783147459727386e-06, |
|
"loss": 0.1219, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"eval_accuracy_safe": 0.4670857089988088, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9963235220781695, |
|
"eval_iou_safe": 0.41674764002084386, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9804205487075093, |
|
"eval_loss": 0.1302366703748703, |
|
"eval_mean_accuracy": 0.7317046155384892, |
|
"eval_mean_iou": 0.6985840943641766, |
|
"eval_overall_accuracy": 0.9806906856707672, |
|
"eval_runtime": 21.1758, |
|
"eval_samples_per_second": 3.164, |
|
"eval_steps_per_second": 0.236, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 81.1, |
|
"learning_rate": 2.4163568773234205e-06, |
|
"loss": 0.1313, |
|
"step": 811 |
|
}, |
|
{ |
|
"epoch": 81.2, |
|
"learning_rate": 2.3543990086741015e-06, |
|
"loss": 0.1277, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 81.3, |
|
"learning_rate": 2.292441140024783e-06, |
|
"loss": 0.138, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 81.4, |
|
"learning_rate": 2.2304832713754648e-06, |
|
"loss": 0.1392, |
|
"step": 814 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 2.168525402726146e-06, |
|
"loss": 0.1464, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 81.6, |
|
"learning_rate": 2.1065675340768276e-06, |
|
"loss": 0.1554, |
|
"step": 816 |
|
}, |
|
{ |
|
"epoch": 81.7, |
|
"learning_rate": 2.0446096654275095e-06, |
|
"loss": 0.1641, |
|
"step": 817 |
|
}, |
|
{ |
|
"epoch": 81.8, |
|
"learning_rate": 1.982651796778191e-06, |
|
"loss": 0.1309, |
|
"step": 818 |
|
}, |
|
{ |
|
"epoch": 81.9, |
|
"learning_rate": 1.9206939281288723e-06, |
|
"loss": 0.125, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 1.858736059479554e-06, |
|
"loss": 0.1218, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"eval_accuracy_safe": 0.48643413094012744, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.996007414792718, |
|
"eval_iou_safe": 0.4300261563759361, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9806778064135621, |
|
"eval_loss": 0.13131824135780334, |
|
"eval_mean_accuracy": 0.7412207728664227, |
|
"eval_mean_iou": 0.7053519813947491, |
|
"eval_overall_accuracy": 0.9809554370481577, |
|
"eval_runtime": 23.7593, |
|
"eval_samples_per_second": 2.82, |
|
"eval_steps_per_second": 0.21, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 82.1, |
|
"learning_rate": 1.7967781908302356e-06, |
|
"loss": 0.1455, |
|
"step": 821 |
|
}, |
|
{ |
|
"epoch": 82.2, |
|
"learning_rate": 1.7348203221809173e-06, |
|
"loss": 0.1631, |
|
"step": 822 |
|
}, |
|
{ |
|
"epoch": 82.3, |
|
"learning_rate": 1.6728624535315985e-06, |
|
"loss": 0.1352, |
|
"step": 823 |
|
}, |
|
{ |
|
"epoch": 82.4, |
|
"learning_rate": 1.61090458488228e-06, |
|
"loss": 0.1363, |
|
"step": 824 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 1.5489467162329618e-06, |
|
"loss": 0.1328, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 82.6, |
|
"learning_rate": 1.4869888475836432e-06, |
|
"loss": 0.1254, |
|
"step": 826 |
|
}, |
|
{ |
|
"epoch": 82.7, |
|
"learning_rate": 1.4250309789343246e-06, |
|
"loss": 0.149, |
|
"step": 827 |
|
}, |
|
{ |
|
"epoch": 82.8, |
|
"learning_rate": 1.3630731102850063e-06, |
|
"loss": 0.1236, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 82.9, |
|
"learning_rate": 1.3011152416356879e-06, |
|
"loss": 0.1354, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 1.2391573729863693e-06, |
|
"loss": 0.138, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"eval_accuracy_safe": 0.5097050512526937, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9955362459713628, |
|
"eval_iou_safe": 0.4445154197610971, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9808979738499072, |
|
"eval_loss": 0.1318480670452118, |
|
"eval_mean_accuracy": 0.7526206486120283, |
|
"eval_mean_iou": 0.7127066968055021, |
|
"eval_overall_accuracy": 0.981185571471257, |
|
"eval_runtime": 23.7783, |
|
"eval_samples_per_second": 2.818, |
|
"eval_steps_per_second": 0.21, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 83.1, |
|
"learning_rate": 1.1771995043370507e-06, |
|
"loss": 0.1215, |
|
"step": 831 |
|
}, |
|
{ |
|
"epoch": 83.2, |
|
"learning_rate": 1.1152416356877324e-06, |
|
"loss": 0.1519, |
|
"step": 832 |
|
}, |
|
{ |
|
"epoch": 83.3, |
|
"learning_rate": 1.0532837670384138e-06, |
|
"loss": 0.1582, |
|
"step": 833 |
|
}, |
|
{ |
|
"epoch": 83.4, |
|
"learning_rate": 9.913258983890955e-07, |
|
"loss": 0.1445, |
|
"step": 834 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 9.29368029739777e-07, |
|
"loss": 0.1427, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 83.6, |
|
"learning_rate": 8.674101610904586e-07, |
|
"loss": 0.1311, |
|
"step": 836 |
|
}, |
|
{ |
|
"epoch": 83.7, |
|
"learning_rate": 8.0545229244114e-07, |
|
"loss": 0.2, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 83.8, |
|
"learning_rate": 7.434944237918216e-07, |
|
"loss": 0.1395, |
|
"step": 838 |
|
}, |
|
{ |
|
"epoch": 83.9, |
|
"learning_rate": 6.815365551425031e-07, |
|
"loss": 0.1336, |
|
"step": 839 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 6.195786864931847e-07, |
|
"loss": 0.1399, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"eval_accuracy_safe": 0.5067039063072232, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9957111962173198, |
|
"eval_iou_safe": 0.44412438503541113, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9809820592367888, |
|
"eval_loss": 0.12903741002082825, |
|
"eval_mean_accuracy": 0.7512075512622716, |
|
"eval_mean_iou": 0.7125532221361, |
|
"eval_overall_accuracy": 0.9812667049578766, |
|
"eval_runtime": 17.5403, |
|
"eval_samples_per_second": 3.82, |
|
"eval_steps_per_second": 0.285, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 84.1, |
|
"learning_rate": 1.574561403508772e-05, |
|
"loss": 0.1331, |
|
"step": 841 |
|
}, |
|
{ |
|
"epoch": 84.2, |
|
"learning_rate": 1.570175438596491e-05, |
|
"loss": 0.1282, |
|
"step": 842 |
|
}, |
|
{ |
|
"epoch": 84.3, |
|
"learning_rate": 1.5657894736842104e-05, |
|
"loss": 0.134, |
|
"step": 843 |
|
}, |
|
{ |
|
"epoch": 84.4, |
|
"learning_rate": 1.56140350877193e-05, |
|
"loss": 0.1366, |
|
"step": 844 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 1.5570175438596493e-05, |
|
"loss": 0.1243, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 84.6, |
|
"learning_rate": 1.5526315789473686e-05, |
|
"loss": 0.1528, |
|
"step": 846 |
|
}, |
|
{ |
|
"epoch": 84.7, |
|
"learning_rate": 1.548245614035088e-05, |
|
"loss": 0.1302, |
|
"step": 847 |
|
}, |
|
{ |
|
"epoch": 84.8, |
|
"learning_rate": 1.543859649122807e-05, |
|
"loss": 0.1396, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 84.9, |
|
"learning_rate": 1.5394736842105264e-05, |
|
"loss": 0.1465, |
|
"step": 849 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 1.5350877192982457e-05, |
|
"loss": 0.1455, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_safe": 0.502411324551563, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9957116655674096, |
|
"eval_iou_safe": 0.4403679024693527, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9808562632737128, |
|
"eval_loss": 0.12765273451805115, |
|
"eval_mean_accuracy": 0.7490614950594863, |
|
"eval_mean_iou": 0.7106120828715328, |
|
"eval_overall_accuracy": 0.9811403644618704, |
|
"eval_runtime": 27.1945, |
|
"eval_samples_per_second": 2.464, |
|
"eval_steps_per_second": 0.184, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 85.1, |
|
"learning_rate": 1.530701754385965e-05, |
|
"loss": 0.1528, |
|
"step": 851 |
|
}, |
|
{ |
|
"epoch": 85.2, |
|
"learning_rate": 1.5263157894736842e-05, |
|
"loss": 0.1349, |
|
"step": 852 |
|
}, |
|
{ |
|
"epoch": 85.3, |
|
"learning_rate": 1.5219298245614037e-05, |
|
"loss": 0.1384, |
|
"step": 853 |
|
}, |
|
{ |
|
"epoch": 85.4, |
|
"learning_rate": 1.517543859649123e-05, |
|
"loss": 0.1252, |
|
"step": 854 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 1.5131578947368422e-05, |
|
"loss": 0.1472, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 85.6, |
|
"learning_rate": 1.5087719298245615e-05, |
|
"loss": 0.1444, |
|
"step": 856 |
|
}, |
|
{ |
|
"epoch": 85.7, |
|
"learning_rate": 1.5043859649122808e-05, |
|
"loss": 0.1227, |
|
"step": 857 |
|
}, |
|
{ |
|
"epoch": 85.8, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.1421, |
|
"step": 858 |
|
}, |
|
{ |
|
"epoch": 85.9, |
|
"learning_rate": 1.4956140350877193e-05, |
|
"loss": 0.1247, |
|
"step": 859 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 1.4912280701754386e-05, |
|
"loss": 0.1466, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"eval_accuracy_safe": 0.492041279717503, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9959391243546583, |
|
"eval_iou_safe": 0.43412202666920624, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9807753771931, |
|
"eval_loss": 0.1242642030119896, |
|
"eval_mean_accuracy": 0.7439902020360807, |
|
"eval_mean_iou": 0.7074487019311532, |
|
"eval_overall_accuracy": 0.9810547899844041, |
|
"eval_runtime": 17.7681, |
|
"eval_samples_per_second": 3.771, |
|
"eval_steps_per_second": 0.281, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 86.1, |
|
"learning_rate": 1.4868421052631579e-05, |
|
"loss": 0.1354, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 86.2, |
|
"learning_rate": 1.4824561403508771e-05, |
|
"loss": 0.1308, |
|
"step": 862 |
|
}, |
|
{ |
|
"epoch": 86.3, |
|
"learning_rate": 1.4780701754385964e-05, |
|
"loss": 0.1233, |
|
"step": 863 |
|
}, |
|
{ |
|
"epoch": 86.4, |
|
"learning_rate": 1.4736842105263157e-05, |
|
"loss": 0.1533, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 1.4692982456140353e-05, |
|
"loss": 0.1413, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 86.6, |
|
"learning_rate": 1.4649122807017546e-05, |
|
"loss": 0.1384, |
|
"step": 866 |
|
}, |
|
{ |
|
"epoch": 86.7, |
|
"learning_rate": 1.4605263157894739e-05, |
|
"loss": 0.1346, |
|
"step": 867 |
|
}, |
|
{ |
|
"epoch": 86.8, |
|
"learning_rate": 1.4561403508771931e-05, |
|
"loss": 0.1308, |
|
"step": 868 |
|
}, |
|
{ |
|
"epoch": 86.9, |
|
"learning_rate": 1.4517543859649124e-05, |
|
"loss": 0.1421, |
|
"step": 869 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 1.4473684210526317e-05, |
|
"loss": 0.1769, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"eval_accuracy_safe": 0.5737448968970821, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9924047421725019, |
|
"eval_iou_safe": 0.4591662013198367, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9796940667397661, |
|
"eval_loss": 0.13170459866523743, |
|
"eval_mean_accuracy": 0.783074819534792, |
|
"eval_mean_iou": 0.7194301340298014, |
|
"eval_overall_accuracy": 0.9800382016310051, |
|
"eval_runtime": 18.2024, |
|
"eval_samples_per_second": 3.681, |
|
"eval_steps_per_second": 0.275, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 87.1, |
|
"learning_rate": 1.442982456140351e-05, |
|
"loss": 0.128, |
|
"step": 871 |
|
}, |
|
{ |
|
"epoch": 87.2, |
|
"learning_rate": 1.4385964912280702e-05, |
|
"loss": 0.1425, |
|
"step": 872 |
|
}, |
|
{ |
|
"epoch": 87.3, |
|
"learning_rate": 1.4342105263157895e-05, |
|
"loss": 0.1304, |
|
"step": 873 |
|
}, |
|
{ |
|
"epoch": 87.4, |
|
"learning_rate": 1.4298245614035088e-05, |
|
"loss": 0.1368, |
|
"step": 874 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 1.425438596491228e-05, |
|
"loss": 0.1372, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 87.6, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.1296, |
|
"step": 876 |
|
}, |
|
{ |
|
"epoch": 87.7, |
|
"learning_rate": 1.4166666666666668e-05, |
|
"loss": 0.1435, |
|
"step": 877 |
|
}, |
|
{ |
|
"epoch": 87.8, |
|
"learning_rate": 1.412280701754386e-05, |
|
"loss": 0.1258, |
|
"step": 878 |
|
}, |
|
{ |
|
"epoch": 87.9, |
|
"learning_rate": 1.4078947368421053e-05, |
|
"loss": 0.1254, |
|
"step": 879 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 1.4035087719298246e-05, |
|
"loss": 0.1453, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"eval_accuracy_safe": 0.33410241286656567, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9977892437397206, |
|
"eval_iou_safe": 0.31147885653495877, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9779675641298045, |
|
"eval_loss": 0.12544433772563934, |
|
"eval_mean_accuracy": 0.6659458283031431, |
|
"eval_mean_iou": 0.6447232103323817, |
|
"eval_overall_accuracy": 0.9781849989250525, |
|
"eval_runtime": 17.2538, |
|
"eval_samples_per_second": 3.883, |
|
"eval_steps_per_second": 0.29, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 88.1, |
|
"learning_rate": 1.3991228070175439e-05, |
|
"loss": 0.1388, |
|
"step": 881 |
|
}, |
|
{ |
|
"epoch": 88.2, |
|
"learning_rate": 1.3947368421052631e-05, |
|
"loss": 0.1517, |
|
"step": 882 |
|
}, |
|
{ |
|
"epoch": 88.3, |
|
"learning_rate": 1.3903508771929824e-05, |
|
"loss": 0.1554, |
|
"step": 883 |
|
}, |
|
{ |
|
"epoch": 88.4, |
|
"learning_rate": 1.3859649122807017e-05, |
|
"loss": 0.1403, |
|
"step": 884 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 1.3815789473684213e-05, |
|
"loss": 0.1562, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 88.6, |
|
"learning_rate": 1.3771929824561406e-05, |
|
"loss": 0.1317, |
|
"step": 886 |
|
}, |
|
{ |
|
"epoch": 88.7, |
|
"learning_rate": 1.3728070175438599e-05, |
|
"loss": 0.1659, |
|
"step": 887 |
|
}, |
|
{ |
|
"epoch": 88.8, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.1442, |
|
"step": 888 |
|
}, |
|
{ |
|
"epoch": 88.9, |
|
"learning_rate": 1.3640350877192984e-05, |
|
"loss": 0.1227, |
|
"step": 889 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 1.3596491228070177e-05, |
|
"loss": 0.133, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"eval_accuracy_safe": 0.5256726072759935, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9950238330108703, |
|
"eval_iou_safe": 0.4518074285146284, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9808627969534082, |
|
"eval_loss": 0.1283280849456787, |
|
"eval_mean_accuracy": 0.760348220143432, |
|
"eval_mean_iou": 0.7163351127340183, |
|
"eval_overall_accuracy": 0.9811599503702192, |
|
"eval_runtime": 19.2209, |
|
"eval_samples_per_second": 3.486, |
|
"eval_steps_per_second": 0.26, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 89.1, |
|
"learning_rate": 1.355263157894737e-05, |
|
"loss": 0.1368, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 89.2, |
|
"learning_rate": 1.3508771929824562e-05, |
|
"loss": 0.1169, |
|
"step": 892 |
|
}, |
|
{ |
|
"epoch": 89.3, |
|
"learning_rate": 1.3464912280701755e-05, |
|
"loss": 0.1393, |
|
"step": 893 |
|
}, |
|
{ |
|
"epoch": 89.4, |
|
"learning_rate": 1.3421052631578948e-05, |
|
"loss": 0.1361, |
|
"step": 894 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 1.337719298245614e-05, |
|
"loss": 0.1236, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 89.6, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.1337, |
|
"step": 896 |
|
}, |
|
{ |
|
"epoch": 89.7, |
|
"learning_rate": 1.3289473684210526e-05, |
|
"loss": 0.1374, |
|
"step": 897 |
|
}, |
|
{ |
|
"epoch": 89.8, |
|
"learning_rate": 1.3245614035087719e-05, |
|
"loss": 0.1266, |
|
"step": 898 |
|
}, |
|
{ |
|
"epoch": 89.9, |
|
"learning_rate": 1.3201754385964912e-05, |
|
"loss": 0.1272, |
|
"step": 899 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 1.3157894736842106e-05, |
|
"loss": 0.1288, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_safe": 0.5048592719380419, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.995672768178721, |
|
"eval_iou_safe": 0.44201843191985946, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9808899412694433, |
|
"eval_loss": 0.12205161154270172, |
|
"eval_mean_accuracy": 0.7502660200583815, |
|
"eval_mean_iou": 0.7114541865946513, |
|
"eval_overall_accuracy": 0.9811749244803813, |
|
"eval_runtime": 18.0737, |
|
"eval_samples_per_second": 3.707, |
|
"eval_steps_per_second": 0.277, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 90.1, |
|
"learning_rate": 1.3114035087719299e-05, |
|
"loss": 0.1401, |
|
"step": 901 |
|
}, |
|
{ |
|
"epoch": 90.2, |
|
"learning_rate": 1.3070175438596491e-05, |
|
"loss": 0.1164, |
|
"step": 902 |
|
}, |
|
{ |
|
"epoch": 90.3, |
|
"learning_rate": 1.3026315789473684e-05, |
|
"loss": 0.129, |
|
"step": 903 |
|
}, |
|
{ |
|
"epoch": 90.4, |
|
"learning_rate": 1.2982456140350877e-05, |
|
"loss": 0.1278, |
|
"step": 904 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 1.2938596491228071e-05, |
|
"loss": 0.136, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 90.6, |
|
"learning_rate": 1.2894736842105264e-05, |
|
"loss": 0.133, |
|
"step": 906 |
|
}, |
|
{ |
|
"epoch": 90.7, |
|
"learning_rate": 1.2850877192982459e-05, |
|
"loss": 0.1241, |
|
"step": 907 |
|
}, |
|
{ |
|
"epoch": 90.8, |
|
"learning_rate": 1.2807017543859651e-05, |
|
"loss": 0.1189, |
|
"step": 908 |
|
}, |
|
{ |
|
"epoch": 90.9, |
|
"learning_rate": 1.2763157894736844e-05, |
|
"loss": 0.1336, |
|
"step": 909 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 1.2719298245614037e-05, |
|
"loss": 0.1318, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"eval_accuracy_safe": 0.48381463448483236, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9961137812568093, |
|
"eval_iou_safe": 0.42903586409180877, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9807055467668601, |
|
"eval_loss": 0.12193302810192108, |
|
"eval_mean_accuracy": 0.7399642078708208, |
|
"eval_mean_iou": 0.7048707054293344, |
|
"eval_overall_accuracy": 0.9809812858923158, |
|
"eval_runtime": 17.5221, |
|
"eval_samples_per_second": 3.824, |
|
"eval_steps_per_second": 0.285, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 91.1, |
|
"learning_rate": 1.267543859649123e-05, |
|
"loss": 0.1287, |
|
"step": 911 |
|
}, |
|
{ |
|
"epoch": 91.2, |
|
"learning_rate": 1.2631578947368422e-05, |
|
"loss": 0.1326, |
|
"step": 912 |
|
}, |
|
{ |
|
"epoch": 91.3, |
|
"learning_rate": 1.2587719298245615e-05, |
|
"loss": 0.1311, |
|
"step": 913 |
|
}, |
|
{ |
|
"epoch": 91.4, |
|
"learning_rate": 1.2543859649122808e-05, |
|
"loss": 0.146, |
|
"step": 914 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.1249, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 91.6, |
|
"learning_rate": 1.2456140350877193e-05, |
|
"loss": 0.1406, |
|
"step": 916 |
|
}, |
|
{ |
|
"epoch": 91.7, |
|
"learning_rate": 1.2412280701754386e-05, |
|
"loss": 0.1393, |
|
"step": 917 |
|
}, |
|
{ |
|
"epoch": 91.8, |
|
"learning_rate": 1.2368421052631579e-05, |
|
"loss": 0.1216, |
|
"step": 918 |
|
}, |
|
{ |
|
"epoch": 91.9, |
|
"learning_rate": 1.2324561403508772e-05, |
|
"loss": 0.134, |
|
"step": 919 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 1.2280701754385964e-05, |
|
"loss": 0.1211, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"eval_accuracy_safe": 0.5354682518571632, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9949727911886092, |
|
"eval_iou_safe": 0.45956426078181606, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811008382233649, |
|
"eval_loss": 0.1242252066731453, |
|
"eval_mean_accuracy": 0.7652205215228862, |
|
"eval_mean_iou": 0.7203325495025905, |
|
"eval_overall_accuracy": 0.9813997638759329, |
|
"eval_runtime": 17.0852, |
|
"eval_samples_per_second": 3.922, |
|
"eval_steps_per_second": 0.293, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 92.1, |
|
"learning_rate": 1.2236842105263159e-05, |
|
"loss": 0.1204, |
|
"step": 921 |
|
}, |
|
{ |
|
"epoch": 92.2, |
|
"learning_rate": 1.2192982456140352e-05, |
|
"loss": 0.1165, |
|
"step": 922 |
|
}, |
|
{ |
|
"epoch": 92.3, |
|
"learning_rate": 1.2149122807017544e-05, |
|
"loss": 0.1266, |
|
"step": 923 |
|
}, |
|
{ |
|
"epoch": 92.4, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.1406, |
|
"step": 924 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 1.206140350877193e-05, |
|
"loss": 0.1394, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 92.6, |
|
"learning_rate": 1.2017543859649123e-05, |
|
"loss": 0.1291, |
|
"step": 926 |
|
}, |
|
{ |
|
"epoch": 92.7, |
|
"learning_rate": 1.1973684210526315e-05, |
|
"loss": 0.1194, |
|
"step": 927 |
|
}, |
|
{ |
|
"epoch": 92.8, |
|
"learning_rate": 1.192982456140351e-05, |
|
"loss": 0.1345, |
|
"step": 928 |
|
}, |
|
{ |
|
"epoch": 92.9, |
|
"learning_rate": 1.1885964912280702e-05, |
|
"loss": 0.1218, |
|
"step": 929 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 1.1842105263157895e-05, |
|
"loss": 0.1137, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"eval_accuracy_safe": 0.5135099710486852, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9958312911715366, |
|
"eval_iou_safe": 0.4516518410394384, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.981300656208812, |
|
"eval_loss": 0.11808586120605469, |
|
"eval_mean_accuracy": 0.7546706311101109, |
|
"eval_mean_iou": 0.7164762486241252, |
|
"eval_overall_accuracy": 0.9815842927391849, |
|
"eval_runtime": 19.3338, |
|
"eval_samples_per_second": 3.465, |
|
"eval_steps_per_second": 0.259, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 93.1, |
|
"learning_rate": 1.179824561403509e-05, |
|
"loss": 0.1216, |
|
"step": 931 |
|
}, |
|
{ |
|
"epoch": 93.2, |
|
"learning_rate": 1.1754385964912282e-05, |
|
"loss": 0.1282, |
|
"step": 932 |
|
}, |
|
{ |
|
"epoch": 93.3, |
|
"learning_rate": 1.1710526315789475e-05, |
|
"loss": 0.1266, |
|
"step": 933 |
|
}, |
|
{ |
|
"epoch": 93.4, |
|
"learning_rate": 1.1666666666666668e-05, |
|
"loss": 0.1267, |
|
"step": 934 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 1.162280701754386e-05, |
|
"loss": 0.166, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 93.6, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.1248, |
|
"step": 936 |
|
}, |
|
{ |
|
"epoch": 93.7, |
|
"learning_rate": 1.1535087719298246e-05, |
|
"loss": 0.1241, |
|
"step": 937 |
|
}, |
|
{ |
|
"epoch": 93.8, |
|
"learning_rate": 1.1491228070175439e-05, |
|
"loss": 0.1206, |
|
"step": 938 |
|
}, |
|
{ |
|
"epoch": 93.9, |
|
"learning_rate": 1.1447368421052632e-05, |
|
"loss": 0.1313, |
|
"step": 939 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 1.1403508771929824e-05, |
|
"loss": 0.1312, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"eval_accuracy_safe": 0.47751165184405614, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9963331437550096, |
|
"eval_iou_safe": 0.4261701728702587, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9807362753129867, |
|
"eval_loss": 0.11989368498325348, |
|
"eval_mean_accuracy": 0.7369223977995328, |
|
"eval_mean_iou": 0.7034532240916227, |
|
"eval_overall_accuracy": 0.9810079887731752, |
|
"eval_runtime": 18.3423, |
|
"eval_samples_per_second": 3.653, |
|
"eval_steps_per_second": 0.273, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 94.1, |
|
"learning_rate": 1.1359649122807019e-05, |
|
"loss": 0.1204, |
|
"step": 941 |
|
}, |
|
{ |
|
"epoch": 94.2, |
|
"learning_rate": 1.1315789473684212e-05, |
|
"loss": 0.1054, |
|
"step": 942 |
|
}, |
|
{ |
|
"epoch": 94.3, |
|
"learning_rate": 1.1271929824561404e-05, |
|
"loss": 0.1257, |
|
"step": 943 |
|
}, |
|
{ |
|
"epoch": 94.4, |
|
"learning_rate": 1.1228070175438597e-05, |
|
"loss": 0.1242, |
|
"step": 944 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 1.118421052631579e-05, |
|
"loss": 0.1173, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 94.6, |
|
"learning_rate": 1.1140350877192983e-05, |
|
"loss": 0.1194, |
|
"step": 946 |
|
}, |
|
{ |
|
"epoch": 94.7, |
|
"learning_rate": 1.1096491228070175e-05, |
|
"loss": 0.1233, |
|
"step": 947 |
|
}, |
|
{ |
|
"epoch": 94.8, |
|
"learning_rate": 1.1052631578947368e-05, |
|
"loss": 0.13, |
|
"step": 948 |
|
}, |
|
{ |
|
"epoch": 94.9, |
|
"learning_rate": 1.100877192982456e-05, |
|
"loss": 0.1517, |
|
"step": 949 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 1.0964912280701754e-05, |
|
"loss": 0.1591, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_safe": 0.5115014976811963, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9956283559264777, |
|
"eval_iou_safe": 0.44726250429787834, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9810415833250473, |
|
"eval_loss": 0.11822065711021423, |
|
"eval_mean_accuracy": 0.753564926803837, |
|
"eval_mean_iou": 0.7141520438114628, |
|
"eval_overall_accuracy": 0.981328024793027, |
|
"eval_runtime": 19.5493, |
|
"eval_samples_per_second": 3.427, |
|
"eval_steps_per_second": 0.256, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 95.1, |
|
"learning_rate": 1.0921052631578948e-05, |
|
"loss": 0.1191, |
|
"step": 951 |
|
}, |
|
{ |
|
"epoch": 95.2, |
|
"learning_rate": 1.087719298245614e-05, |
|
"loss": 0.1158, |
|
"step": 952 |
|
}, |
|
{ |
|
"epoch": 95.3, |
|
"learning_rate": 1.0833333333333334e-05, |
|
"loss": 0.1534, |
|
"step": 953 |
|
}, |
|
{ |
|
"epoch": 95.4, |
|
"learning_rate": 1.0789473684210526e-05, |
|
"loss": 0.1281, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 1.074561403508772e-05, |
|
"loss": 0.1206, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 95.6, |
|
"learning_rate": 1.0701754385964913e-05, |
|
"loss": 0.1271, |
|
"step": 956 |
|
}, |
|
{ |
|
"epoch": 95.7, |
|
"learning_rate": 1.0657894736842106e-05, |
|
"loss": 0.1086, |
|
"step": 957 |
|
}, |
|
{ |
|
"epoch": 95.8, |
|
"learning_rate": 1.0614035087719299e-05, |
|
"loss": 0.1212, |
|
"step": 958 |
|
}, |
|
{ |
|
"epoch": 95.9, |
|
"learning_rate": 1.0570175438596492e-05, |
|
"loss": 0.1386, |
|
"step": 959 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.1207, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"eval_accuracy_safe": 0.5205762506698124, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9955665777209134, |
|
"eval_iou_safe": 0.45439112739729026, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9812477722936591, |
|
"eval_loss": 0.11563575267791748, |
|
"eval_mean_accuracy": 0.7580714141953628, |
|
"eval_mean_iou": 0.7178194498454746, |
|
"eval_overall_accuracy": 0.9815361250692339, |
|
"eval_runtime": 18.6427, |
|
"eval_samples_per_second": 3.594, |
|
"eval_steps_per_second": 0.268, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 96.1, |
|
"learning_rate": 1.0482456140350879e-05, |
|
"loss": 0.1266, |
|
"step": 961 |
|
}, |
|
{ |
|
"epoch": 96.2, |
|
"learning_rate": 1.0438596491228072e-05, |
|
"loss": 0.1123, |
|
"step": 962 |
|
}, |
|
{ |
|
"epoch": 96.3, |
|
"learning_rate": 1.0394736842105264e-05, |
|
"loss": 0.1281, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 96.4, |
|
"learning_rate": 1.0350877192982457e-05, |
|
"loss": 0.1292, |
|
"step": 964 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 1.030701754385965e-05, |
|
"loss": 0.1297, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 96.6, |
|
"learning_rate": 1.0263157894736843e-05, |
|
"loss": 0.1127, |
|
"step": 966 |
|
}, |
|
{ |
|
"epoch": 96.7, |
|
"learning_rate": 1.0219298245614035e-05, |
|
"loss": 0.1264, |
|
"step": 967 |
|
}, |
|
{ |
|
"epoch": 96.8, |
|
"learning_rate": 1.0175438596491228e-05, |
|
"loss": 0.1411, |
|
"step": 968 |
|
}, |
|
{ |
|
"epoch": 96.9, |
|
"learning_rate": 1.0131578947368421e-05, |
|
"loss": 0.131, |
|
"step": 969 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 1.0087719298245614e-05, |
|
"loss": 0.1203, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"eval_accuracy_safe": 0.505391266803135, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9957803080180366, |
|
"eval_iou_safe": 0.4438572197600918, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9810115337153822, |
|
"eval_loss": 0.11649709939956665, |
|
"eval_mean_accuracy": 0.7505857874105858, |
|
"eval_mean_iou": 0.712434376737737, |
|
"eval_overall_accuracy": 0.9812950020405784, |
|
"eval_runtime": 16.6836, |
|
"eval_samples_per_second": 4.016, |
|
"eval_steps_per_second": 0.3, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 97.1, |
|
"learning_rate": 1.0043859649122808e-05, |
|
"loss": 0.116, |
|
"step": 971 |
|
}, |
|
{ |
|
"epoch": 97.2, |
|
"learning_rate": 1e-05, |
|
"loss": 0.1208, |
|
"step": 972 |
|
}, |
|
{ |
|
"epoch": 97.3, |
|
"learning_rate": 9.956140350877194e-06, |
|
"loss": 0.1224, |
|
"step": 973 |
|
}, |
|
{ |
|
"epoch": 97.4, |
|
"learning_rate": 9.912280701754386e-06, |
|
"loss": 0.1179, |
|
"step": 974 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 9.868421052631579e-06, |
|
"loss": 0.138, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 97.6, |
|
"learning_rate": 9.824561403508772e-06, |
|
"loss": 0.1348, |
|
"step": 976 |
|
}, |
|
{ |
|
"epoch": 97.7, |
|
"learning_rate": 9.780701754385965e-06, |
|
"loss": 0.1465, |
|
"step": 977 |
|
}, |
|
{ |
|
"epoch": 97.8, |
|
"learning_rate": 9.736842105263157e-06, |
|
"loss": 0.1131, |
|
"step": 978 |
|
}, |
|
{ |
|
"epoch": 97.9, |
|
"learning_rate": 9.692982456140352e-06, |
|
"loss": 0.1338, |
|
"step": 979 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 9.649122807017545e-06, |
|
"loss": 0.1196, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"eval_accuracy_safe": 0.5295970331648683, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9952786314408473, |
|
"eval_iou_safe": 0.4584791340231646, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9812295085191116, |
|
"eval_loss": 0.11311475187540054, |
|
"eval_mean_accuracy": 0.7624378323028578, |
|
"eval_mean_iou": 0.7198543212711381, |
|
"eval_overall_accuracy": 0.9815231437113747, |
|
"eval_runtime": 19.8625, |
|
"eval_samples_per_second": 3.373, |
|
"eval_steps_per_second": 0.252, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 98.1, |
|
"learning_rate": 9.605263157894737e-06, |
|
"loss": 0.1089, |
|
"step": 981 |
|
}, |
|
{ |
|
"epoch": 98.2, |
|
"learning_rate": 9.561403508771932e-06, |
|
"loss": 0.1449, |
|
"step": 982 |
|
}, |
|
{ |
|
"epoch": 98.3, |
|
"learning_rate": 9.517543859649124e-06, |
|
"loss": 0.1128, |
|
"step": 983 |
|
}, |
|
{ |
|
"epoch": 98.4, |
|
"learning_rate": 9.473684210526317e-06, |
|
"loss": 0.1272, |
|
"step": 984 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 9.42982456140351e-06, |
|
"loss": 0.1172, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 98.6, |
|
"learning_rate": 9.385964912280703e-06, |
|
"loss": 0.1287, |
|
"step": 986 |
|
}, |
|
{ |
|
"epoch": 98.7, |
|
"learning_rate": 9.342105263157895e-06, |
|
"loss": 0.1128, |
|
"step": 987 |
|
}, |
|
{ |
|
"epoch": 98.8, |
|
"learning_rate": 9.298245614035088e-06, |
|
"loss": 0.118, |
|
"step": 988 |
|
}, |
|
{ |
|
"epoch": 98.9, |
|
"learning_rate": 9.254385964912281e-06, |
|
"loss": 0.127, |
|
"step": 989 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 9.210526315789474e-06, |
|
"loss": 0.1304, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"eval_accuracy_safe": 0.5269370588394031, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9953231023618518, |
|
"eval_iou_safe": 0.45675408800723116, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.981195032675021, |
|
"eval_loss": 0.11546216160058975, |
|
"eval_mean_accuracy": 0.7611300806006275, |
|
"eval_mean_iou": 0.7189745603411261, |
|
"eval_overall_accuracy": 0.9814877296561626, |
|
"eval_runtime": 18.4586, |
|
"eval_samples_per_second": 3.63, |
|
"eval_steps_per_second": 0.271, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 99.1, |
|
"learning_rate": 9.166666666666666e-06, |
|
"loss": 0.1096, |
|
"step": 991 |
|
}, |
|
{ |
|
"epoch": 99.2, |
|
"learning_rate": 9.122807017543861e-06, |
|
"loss": 0.1245, |
|
"step": 992 |
|
}, |
|
{ |
|
"epoch": 99.3, |
|
"learning_rate": 9.078947368421054e-06, |
|
"loss": 0.1343, |
|
"step": 993 |
|
}, |
|
{ |
|
"epoch": 99.4, |
|
"learning_rate": 9.035087719298246e-06, |
|
"loss": 0.1265, |
|
"step": 994 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 8.991228070175439e-06, |
|
"loss": 0.1263, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 99.6, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.111, |
|
"step": 996 |
|
}, |
|
{ |
|
"epoch": 99.7, |
|
"learning_rate": 8.903508771929825e-06, |
|
"loss": 0.1493, |
|
"step": 997 |
|
}, |
|
{ |
|
"epoch": 99.8, |
|
"learning_rate": 8.859649122807017e-06, |
|
"loss": 0.1205, |
|
"step": 998 |
|
}, |
|
{ |
|
"epoch": 99.9, |
|
"learning_rate": 8.81578947368421e-06, |
|
"loss": 0.1235, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 8.771929824561403e-06, |
|
"loss": 0.1058, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_safe": 0.5162855964317794, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9954840894426386, |
|
"eval_iou_safe": 0.4495824766061013, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9810401925369387, |
|
"eval_loss": 0.11435040831565857, |
|
"eval_mean_accuracy": 0.7558848429372089, |
|
"eval_mean_iou": 0.7153113345715201, |
|
"eval_overall_accuracy": 0.981329334315969, |
|
"eval_runtime": 20.0728, |
|
"eval_samples_per_second": 3.338, |
|
"eval_steps_per_second": 0.249, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 100.1, |
|
"learning_rate": 8.728070175438596e-06, |
|
"loss": 0.1198, |
|
"step": 1001 |
|
}, |
|
{ |
|
"epoch": 100.2, |
|
"learning_rate": 8.68421052631579e-06, |
|
"loss": 0.126, |
|
"step": 1002 |
|
}, |
|
{ |
|
"epoch": 100.3, |
|
"learning_rate": 8.640350877192983e-06, |
|
"loss": 0.1215, |
|
"step": 1003 |
|
}, |
|
{ |
|
"epoch": 100.4, |
|
"learning_rate": 8.596491228070176e-06, |
|
"loss": 0.1391, |
|
"step": 1004 |
|
}, |
|
{ |
|
"epoch": 100.5, |
|
"learning_rate": 8.552631578947368e-06, |
|
"loss": 0.1263, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 100.6, |
|
"learning_rate": 8.508771929824563e-06, |
|
"loss": 0.1219, |
|
"step": 1006 |
|
}, |
|
{ |
|
"epoch": 100.7, |
|
"learning_rate": 8.464912280701755e-06, |
|
"loss": 0.1176, |
|
"step": 1007 |
|
}, |
|
{ |
|
"epoch": 100.8, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.1459, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 100.9, |
|
"learning_rate": 8.377192982456141e-06, |
|
"loss": 0.1112, |
|
"step": 1009 |
|
}, |
|
{ |
|
"epoch": 101.0, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.1135, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 101.0, |
|
"eval_accuracy_safe": 0.49336548432735416, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9960561685332915, |
|
"eval_iou_safe": 0.43677221410727207, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9809295741332075, |
|
"eval_loss": 0.11129783093929291, |
|
"eval_mean_accuracy": 0.7447108264303228, |
|
"eval_mean_iou": 0.7088508941202398, |
|
"eval_overall_accuracy": 0.9812074917465893, |
|
"eval_runtime": 19.0445, |
|
"eval_samples_per_second": 3.518, |
|
"eval_steps_per_second": 0.263, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 101.1, |
|
"learning_rate": 8.289473684210526e-06, |
|
"loss": 0.1253, |
|
"step": 1011 |
|
}, |
|
{ |
|
"epoch": 101.2, |
|
"learning_rate": 8.245614035087721e-06, |
|
"loss": 0.124, |
|
"step": 1012 |
|
}, |
|
{ |
|
"epoch": 101.3, |
|
"learning_rate": 8.201754385964914e-06, |
|
"loss": 0.1127, |
|
"step": 1013 |
|
}, |
|
{ |
|
"epoch": 101.4, |
|
"learning_rate": 8.157894736842106e-06, |
|
"loss": 0.1103, |
|
"step": 1014 |
|
}, |
|
{ |
|
"epoch": 101.5, |
|
"learning_rate": 8.1140350877193e-06, |
|
"loss": 0.1101, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 101.6, |
|
"learning_rate": 8.070175438596492e-06, |
|
"loss": 0.1305, |
|
"step": 1016 |
|
}, |
|
{ |
|
"epoch": 101.7, |
|
"learning_rate": 8.026315789473685e-06, |
|
"loss": 0.1192, |
|
"step": 1017 |
|
}, |
|
{ |
|
"epoch": 101.8, |
|
"learning_rate": 7.982456140350877e-06, |
|
"loss": 0.1208, |
|
"step": 1018 |
|
}, |
|
{ |
|
"epoch": 101.9, |
|
"learning_rate": 7.93859649122807e-06, |
|
"loss": 0.1385, |
|
"step": 1019 |
|
}, |
|
{ |
|
"epoch": 102.0, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.1116, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 102.0, |
|
"eval_accuracy_safe": 0.5877965003989961, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9931559956599197, |
|
"eval_iou_safe": 0.4798908502070164, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9808498300393228, |
|
"eval_loss": 0.11282429099082947, |
|
"eval_mean_accuracy": 0.7904762480294579, |
|
"eval_mean_iou": 0.7303703401231696, |
|
"eval_overall_accuracy": 0.9811823261317922, |
|
"eval_runtime": 16.4189, |
|
"eval_samples_per_second": 4.081, |
|
"eval_steps_per_second": 0.305, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 102.1, |
|
"learning_rate": 7.850877192982456e-06, |
|
"loss": 0.1283, |
|
"step": 1021 |
|
}, |
|
{ |
|
"epoch": 102.2, |
|
"learning_rate": 7.80701754385965e-06, |
|
"loss": 0.1271, |
|
"step": 1022 |
|
}, |
|
{ |
|
"epoch": 102.3, |
|
"learning_rate": 7.763157894736843e-06, |
|
"loss": 0.1075, |
|
"step": 1023 |
|
}, |
|
{ |
|
"epoch": 102.4, |
|
"learning_rate": 7.719298245614036e-06, |
|
"loss": 0.1209, |
|
"step": 1024 |
|
}, |
|
{ |
|
"epoch": 102.5, |
|
"learning_rate": 7.675438596491228e-06, |
|
"loss": 0.1059, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 102.6, |
|
"learning_rate": 7.631578947368421e-06, |
|
"loss": 0.119, |
|
"step": 1026 |
|
}, |
|
{ |
|
"epoch": 102.7, |
|
"learning_rate": 7.587719298245615e-06, |
|
"loss": 0.1087, |
|
"step": 1027 |
|
}, |
|
{ |
|
"epoch": 102.8, |
|
"learning_rate": 7.5438596491228074e-06, |
|
"loss": 0.1259, |
|
"step": 1028 |
|
}, |
|
{ |
|
"epoch": 102.9, |
|
"learning_rate": 7.5e-06, |
|
"loss": 0.1367, |
|
"step": 1029 |
|
}, |
|
{ |
|
"epoch": 103.0, |
|
"learning_rate": 7.456140350877193e-06, |
|
"loss": 0.1036, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 103.0, |
|
"eval_accuracy_safe": 0.4825713856153215, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9963090895629095, |
|
"eval_iou_safe": 0.43038233630673545, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9808612910002253, |
|
"eval_loss": 0.10781022161245346, |
|
"eval_mean_accuracy": 0.7394402375891155, |
|
"eval_mean_iou": 0.7056218136534804, |
|
"eval_overall_accuracy": 0.9811341015260611, |
|
"eval_runtime": 19.3266, |
|
"eval_samples_per_second": 3.467, |
|
"eval_steps_per_second": 0.259, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 103.1, |
|
"learning_rate": 7.412280701754386e-06, |
|
"loss": 0.1083, |
|
"step": 1031 |
|
}, |
|
{ |
|
"epoch": 103.2, |
|
"learning_rate": 7.3684210526315784e-06, |
|
"loss": 0.1114, |
|
"step": 1032 |
|
}, |
|
{ |
|
"epoch": 103.3, |
|
"learning_rate": 7.324561403508773e-06, |
|
"loss": 0.1182, |
|
"step": 1033 |
|
}, |
|
{ |
|
"epoch": 103.4, |
|
"learning_rate": 7.280701754385966e-06, |
|
"loss": 0.1093, |
|
"step": 1034 |
|
}, |
|
{ |
|
"epoch": 103.5, |
|
"learning_rate": 7.236842105263158e-06, |
|
"loss": 0.1197, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 103.6, |
|
"learning_rate": 7.192982456140351e-06, |
|
"loss": 0.1255, |
|
"step": 1036 |
|
}, |
|
{ |
|
"epoch": 103.7, |
|
"learning_rate": 7.149122807017544e-06, |
|
"loss": 0.1124, |
|
"step": 1037 |
|
}, |
|
{ |
|
"epoch": 103.8, |
|
"learning_rate": 7.1052631578947375e-06, |
|
"loss": 0.1398, |
|
"step": 1038 |
|
}, |
|
{ |
|
"epoch": 103.9, |
|
"learning_rate": 7.06140350877193e-06, |
|
"loss": 0.1537, |
|
"step": 1039 |
|
}, |
|
{ |
|
"epoch": 104.0, |
|
"learning_rate": 7.017543859649123e-06, |
|
"loss": 0.1195, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 104.0, |
|
"eval_accuracy_safe": 0.43636493305731283, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9966433841643392, |
|
"eval_iou_safe": 0.3930227511436334, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9798337234468582, |
|
"eval_loss": 0.11099809408187866, |
|
"eval_mean_accuracy": 0.7165041586108261, |
|
"eval_mean_iou": 0.6864282372952457, |
|
"eval_overall_accuracy": 0.9800936570808069, |
|
"eval_runtime": 17.9205, |
|
"eval_samples_per_second": 3.739, |
|
"eval_steps_per_second": 0.279, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 104.1, |
|
"learning_rate": 6.973684210526316e-06, |
|
"loss": 0.1218, |
|
"step": 1041 |
|
}, |
|
{ |
|
"epoch": 104.2, |
|
"learning_rate": 6.9298245614035085e-06, |
|
"loss": 0.107, |
|
"step": 1042 |
|
}, |
|
{ |
|
"epoch": 104.3, |
|
"learning_rate": 6.885964912280703e-06, |
|
"loss": 0.1203, |
|
"step": 1043 |
|
}, |
|
{ |
|
"epoch": 104.4, |
|
"learning_rate": 6.842105263157896e-06, |
|
"loss": 0.1182, |
|
"step": 1044 |
|
}, |
|
{ |
|
"epoch": 104.5, |
|
"learning_rate": 6.798245614035088e-06, |
|
"loss": 0.1221, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 104.6, |
|
"learning_rate": 6.754385964912281e-06, |
|
"loss": 0.1434, |
|
"step": 1046 |
|
}, |
|
{ |
|
"epoch": 104.7, |
|
"learning_rate": 6.710526315789474e-06, |
|
"loss": 0.1148, |
|
"step": 1047 |
|
}, |
|
{ |
|
"epoch": 104.8, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.111, |
|
"step": 1048 |
|
}, |
|
{ |
|
"epoch": 104.9, |
|
"learning_rate": 6.622807017543859e-06, |
|
"loss": 0.1081, |
|
"step": 1049 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"learning_rate": 6.578947368421053e-06, |
|
"loss": 0.1205, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"eval_accuracy_safe": 0.5793289154629319, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9934076259767909, |
|
"eval_iou_safe": 0.47619175877104036, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.980848678837411, |
|
"eval_loss": 0.11199548095464706, |
|
"eval_mean_accuracy": 0.7863682707198614, |
|
"eval_mean_iou": 0.7285202188042257, |
|
"eval_overall_accuracy": 0.9811764048106635, |
|
"eval_runtime": 20.878, |
|
"eval_samples_per_second": 3.209, |
|
"eval_steps_per_second": 0.239, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 105.1, |
|
"learning_rate": 6.535087719298246e-06, |
|
"loss": 0.1122, |
|
"step": 1051 |
|
}, |
|
{ |
|
"epoch": 105.2, |
|
"learning_rate": 6.4912280701754385e-06, |
|
"loss": 0.1133, |
|
"step": 1052 |
|
}, |
|
{ |
|
"epoch": 105.3, |
|
"learning_rate": 6.447368421052632e-06, |
|
"loss": 0.1254, |
|
"step": 1053 |
|
}, |
|
{ |
|
"epoch": 105.4, |
|
"learning_rate": 6.403508771929826e-06, |
|
"loss": 0.1119, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 105.5, |
|
"learning_rate": 6.3596491228070184e-06, |
|
"loss": 0.1219, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 105.6, |
|
"learning_rate": 6.315789473684211e-06, |
|
"loss": 0.1116, |
|
"step": 1056 |
|
}, |
|
{ |
|
"epoch": 105.7, |
|
"learning_rate": 6.271929824561404e-06, |
|
"loss": 0.1246, |
|
"step": 1057 |
|
}, |
|
{ |
|
"epoch": 105.8, |
|
"learning_rate": 6.228070175438597e-06, |
|
"loss": 0.1108, |
|
"step": 1058 |
|
}, |
|
{ |
|
"epoch": 105.9, |
|
"learning_rate": 6.184210526315789e-06, |
|
"loss": 0.1106, |
|
"step": 1059 |
|
}, |
|
{ |
|
"epoch": 106.0, |
|
"learning_rate": 6.140350877192982e-06, |
|
"loss": 0.1453, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 106.0, |
|
"eval_accuracy_safe": 0.47073642738462845, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9963649422235906, |
|
"eval_iou_safe": 0.42051549604742744, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9805685279809166, |
|
"eval_loss": 0.11096663773059845, |
|
"eval_mean_accuracy": 0.7335506848041096, |
|
"eval_mean_iou": 0.7005420120141721, |
|
"eval_overall_accuracy": 0.9808387186989855, |
|
"eval_runtime": 17.4824, |
|
"eval_samples_per_second": 3.832, |
|
"eval_steps_per_second": 0.286, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 106.1, |
|
"learning_rate": 6.096491228070176e-06, |
|
"loss": 0.1014, |
|
"step": 1061 |
|
}, |
|
{ |
|
"epoch": 106.2, |
|
"learning_rate": 6.0526315789473685e-06, |
|
"loss": 0.1013, |
|
"step": 1062 |
|
}, |
|
{ |
|
"epoch": 106.3, |
|
"learning_rate": 6.008771929824561e-06, |
|
"loss": 0.1278, |
|
"step": 1063 |
|
}, |
|
{ |
|
"epoch": 106.4, |
|
"learning_rate": 5.964912280701755e-06, |
|
"loss": 0.1197, |
|
"step": 1064 |
|
}, |
|
{ |
|
"epoch": 106.5, |
|
"learning_rate": 5.921052631578948e-06, |
|
"loss": 0.1369, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 106.6, |
|
"learning_rate": 5.877192982456141e-06, |
|
"loss": 0.1188, |
|
"step": 1066 |
|
}, |
|
{ |
|
"epoch": 106.7, |
|
"learning_rate": 5.833333333333334e-06, |
|
"loss": 0.1079, |
|
"step": 1067 |
|
}, |
|
{ |
|
"epoch": 106.8, |
|
"learning_rate": 5.789473684210527e-06, |
|
"loss": 0.124, |
|
"step": 1068 |
|
}, |
|
{ |
|
"epoch": 106.9, |
|
"learning_rate": 5.7456140350877194e-06, |
|
"loss": 0.1202, |
|
"step": 1069 |
|
}, |
|
{ |
|
"epoch": 107.0, |
|
"learning_rate": 5.701754385964912e-06, |
|
"loss": 0.0965, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 107.0, |
|
"eval_accuracy_safe": 0.5637545730355704, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9941070749480517, |
|
"eval_iou_safe": 0.47231148848761234, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9810800900529558, |
|
"eval_loss": 0.10906389355659485, |
|
"eval_mean_accuracy": 0.7789308239918111, |
|
"eval_mean_iou": 0.7266957892702841, |
|
"eval_overall_accuracy": 0.9813951520777461, |
|
"eval_runtime": 18.5937, |
|
"eval_samples_per_second": 3.603, |
|
"eval_steps_per_second": 0.269, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 107.1, |
|
"learning_rate": 5.657894736842106e-06, |
|
"loss": 0.1237, |
|
"step": 1071 |
|
}, |
|
{ |
|
"epoch": 107.2, |
|
"learning_rate": 5.6140350877192985e-06, |
|
"loss": 0.1172, |
|
"step": 1072 |
|
}, |
|
{ |
|
"epoch": 107.3, |
|
"learning_rate": 5.570175438596491e-06, |
|
"loss": 0.1145, |
|
"step": 1073 |
|
}, |
|
{ |
|
"epoch": 107.4, |
|
"learning_rate": 5.526315789473684e-06, |
|
"loss": 0.1006, |
|
"step": 1074 |
|
}, |
|
{ |
|
"epoch": 107.5, |
|
"learning_rate": 5.482456140350877e-06, |
|
"loss": 0.112, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 107.6, |
|
"learning_rate": 5.43859649122807e-06, |
|
"loss": 0.1282, |
|
"step": 1076 |
|
}, |
|
{ |
|
"epoch": 107.7, |
|
"learning_rate": 5.394736842105263e-06, |
|
"loss": 0.1131, |
|
"step": 1077 |
|
}, |
|
{ |
|
"epoch": 107.8, |
|
"learning_rate": 5.350877192982457e-06, |
|
"loss": 0.1172, |
|
"step": 1078 |
|
}, |
|
{ |
|
"epoch": 107.9, |
|
"learning_rate": 5.3070175438596495e-06, |
|
"loss": 0.1197, |
|
"step": 1079 |
|
}, |
|
{ |
|
"epoch": 108.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.1058, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 108.0, |
|
"eval_accuracy_safe": 0.4881419115577812, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.996181015657167, |
|
"eval_iou_safe": 0.4337227862496061, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9808989382624861, |
|
"eval_loss": 0.10848396271467209, |
|
"eval_mean_accuracy": 0.742161463607474, |
|
"eval_mean_iou": 0.7073108622560461, |
|
"eval_overall_accuracy": 0.9811743551225804, |
|
"eval_runtime": 16.6681, |
|
"eval_samples_per_second": 4.02, |
|
"eval_steps_per_second": 0.3, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 108.1, |
|
"learning_rate": 5.219298245614036e-06, |
|
"loss": 0.1041, |
|
"step": 1081 |
|
}, |
|
{ |
|
"epoch": 108.2, |
|
"learning_rate": 5.1754385964912286e-06, |
|
"loss": 0.1122, |
|
"step": 1082 |
|
}, |
|
{ |
|
"epoch": 108.3, |
|
"learning_rate": 5.131578947368421e-06, |
|
"loss": 0.1104, |
|
"step": 1083 |
|
}, |
|
{ |
|
"epoch": 108.4, |
|
"learning_rate": 5.087719298245614e-06, |
|
"loss": 0.1273, |
|
"step": 1084 |
|
}, |
|
{ |
|
"epoch": 108.5, |
|
"learning_rate": 5.043859649122807e-06, |
|
"loss": 0.1075, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 108.6, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1164, |
|
"step": 1086 |
|
}, |
|
{ |
|
"epoch": 108.7, |
|
"learning_rate": 4.956140350877193e-06, |
|
"loss": 0.1248, |
|
"step": 1087 |
|
}, |
|
{ |
|
"epoch": 108.8, |
|
"learning_rate": 4.912280701754386e-06, |
|
"loss": 0.116, |
|
"step": 1088 |
|
}, |
|
{ |
|
"epoch": 108.9, |
|
"learning_rate": 4.868421052631579e-06, |
|
"loss": 0.1061, |
|
"step": 1089 |
|
}, |
|
{ |
|
"epoch": 109.0, |
|
"learning_rate": 4.824561403508772e-06, |
|
"loss": 0.1163, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 109.0, |
|
"eval_accuracy_safe": 0.5127910069737588, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9956981717523291, |
|
"eval_iou_safe": 0.44929119815714896, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811483215803616, |
|
"eval_loss": 0.10774041712284088, |
|
"eval_mean_accuracy": 0.754244589363044, |
|
"eval_mean_iou": 0.7152197598687553, |
|
"eval_overall_accuracy": 0.9814338684082031, |
|
"eval_runtime": 18.1041, |
|
"eval_samples_per_second": 3.701, |
|
"eval_steps_per_second": 0.276, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 109.1, |
|
"learning_rate": 4.780701754385966e-06, |
|
"loss": 0.1082, |
|
"step": 1091 |
|
}, |
|
{ |
|
"epoch": 109.2, |
|
"learning_rate": 4.736842105263159e-06, |
|
"loss": 0.106, |
|
"step": 1092 |
|
}, |
|
{ |
|
"epoch": 109.3, |
|
"learning_rate": 4.692982456140351e-06, |
|
"loss": 0.107, |
|
"step": 1093 |
|
}, |
|
{ |
|
"epoch": 109.4, |
|
"learning_rate": 4.649122807017544e-06, |
|
"loss": 0.1144, |
|
"step": 1094 |
|
}, |
|
{ |
|
"epoch": 109.5, |
|
"learning_rate": 4.605263157894737e-06, |
|
"loss": 0.1318, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 109.6, |
|
"learning_rate": 4.5614035087719304e-06, |
|
"loss": 0.1208, |
|
"step": 1096 |
|
}, |
|
{ |
|
"epoch": 109.7, |
|
"learning_rate": 4.517543859649123e-06, |
|
"loss": 0.104, |
|
"step": 1097 |
|
}, |
|
{ |
|
"epoch": 109.8, |
|
"learning_rate": 4.473684210526316e-06, |
|
"loss": 0.1042, |
|
"step": 1098 |
|
}, |
|
{ |
|
"epoch": 109.9, |
|
"learning_rate": 4.429824561403509e-06, |
|
"loss": 0.114, |
|
"step": 1099 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"learning_rate": 4.3859649122807014e-06, |
|
"loss": 0.1145, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"eval_accuracy_safe": 0.5227524180708633, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9954438426724419, |
|
"eval_iou_safe": 0.45469024797933144, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811908589202809, |
|
"eval_loss": 0.10809645056724548, |
|
"eval_mean_accuracy": 0.7590981303716526, |
|
"eval_mean_iou": 0.7179405534498062, |
|
"eval_overall_accuracy": 0.981481295913013, |
|
"eval_runtime": 18.597, |
|
"eval_samples_per_second": 3.603, |
|
"eval_steps_per_second": 0.269, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 110.1, |
|
"learning_rate": 4.342105263157895e-06, |
|
"loss": 0.11, |
|
"step": 1101 |
|
}, |
|
{ |
|
"epoch": 110.2, |
|
"learning_rate": 4.298245614035088e-06, |
|
"loss": 0.1131, |
|
"step": 1102 |
|
}, |
|
{ |
|
"epoch": 110.3, |
|
"learning_rate": 4.254385964912281e-06, |
|
"loss": 0.1218, |
|
"step": 1103 |
|
}, |
|
{ |
|
"epoch": 110.4, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 0.1157, |
|
"step": 1104 |
|
}, |
|
{ |
|
"epoch": 110.5, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 0.1113, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 110.6, |
|
"learning_rate": 4.1228070175438605e-06, |
|
"loss": 0.1096, |
|
"step": 1106 |
|
}, |
|
{ |
|
"epoch": 110.7, |
|
"learning_rate": 4.078947368421053e-06, |
|
"loss": 0.1602, |
|
"step": 1107 |
|
}, |
|
{ |
|
"epoch": 110.8, |
|
"learning_rate": 4.035087719298246e-06, |
|
"loss": 0.1037, |
|
"step": 1108 |
|
}, |
|
{ |
|
"epoch": 110.9, |
|
"learning_rate": 3.991228070175439e-06, |
|
"loss": 0.1077, |
|
"step": 1109 |
|
}, |
|
{ |
|
"epoch": 111.0, |
|
"learning_rate": 3.9473684210526315e-06, |
|
"loss": 0.1031, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 111.0, |
|
"eval_accuracy_safe": 0.5521894672726783, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9944743413932868, |
|
"eval_iou_safe": 0.46734672570009755, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811017093018296, |
|
"eval_loss": 0.10733948647975922, |
|
"eval_mean_accuracy": 0.7733319043329825, |
|
"eval_mean_iou": 0.7242242175009636, |
|
"eval_overall_accuracy": 0.9814099553805679, |
|
"eval_runtime": 18.7402, |
|
"eval_samples_per_second": 3.575, |
|
"eval_steps_per_second": 0.267, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 111.1, |
|
"learning_rate": 3.903508771929825e-06, |
|
"loss": 0.1205, |
|
"step": 1111 |
|
}, |
|
{ |
|
"epoch": 111.2, |
|
"learning_rate": 3.859649122807018e-06, |
|
"loss": 0.1161, |
|
"step": 1112 |
|
}, |
|
{ |
|
"epoch": 111.3, |
|
"learning_rate": 3.8157894736842105e-06, |
|
"loss": 0.1093, |
|
"step": 1113 |
|
}, |
|
{ |
|
"epoch": 111.4, |
|
"learning_rate": 3.7719298245614037e-06, |
|
"loss": 0.1335, |
|
"step": 1114 |
|
}, |
|
{ |
|
"epoch": 111.5, |
|
"learning_rate": 3.7280701754385965e-06, |
|
"loss": 0.0984, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 111.6, |
|
"learning_rate": 3.6842105263157892e-06, |
|
"loss": 0.1212, |
|
"step": 1116 |
|
}, |
|
{ |
|
"epoch": 111.7, |
|
"learning_rate": 3.640350877192983e-06, |
|
"loss": 0.1044, |
|
"step": 1117 |
|
}, |
|
{ |
|
"epoch": 111.8, |
|
"learning_rate": 3.5964912280701756e-06, |
|
"loss": 0.1292, |
|
"step": 1118 |
|
}, |
|
{ |
|
"epoch": 111.9, |
|
"learning_rate": 3.5526315789473687e-06, |
|
"loss": 0.1126, |
|
"step": 1119 |
|
}, |
|
{ |
|
"epoch": 112.0, |
|
"learning_rate": 3.5087719298245615e-06, |
|
"loss": 0.1042, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 112.0, |
|
"eval_accuracy_safe": 0.5489878604939842, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9946506996895131, |
|
"eval_iou_safe": 0.4669267844847372, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811813668886878, |
|
"eval_loss": 0.1064290776848793, |
|
"eval_mean_accuracy": 0.7718192800917487, |
|
"eval_mean_iou": 0.7240540756867125, |
|
"eval_overall_accuracy": 0.9814865340047808, |
|
"eval_runtime": 19.0521, |
|
"eval_samples_per_second": 3.517, |
|
"eval_steps_per_second": 0.262, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 112.1, |
|
"learning_rate": 3.4649122807017542e-06, |
|
"loss": 0.1091, |
|
"step": 1121 |
|
}, |
|
{ |
|
"epoch": 112.2, |
|
"learning_rate": 3.421052631578948e-06, |
|
"loss": 0.1145, |
|
"step": 1122 |
|
}, |
|
{ |
|
"epoch": 112.3, |
|
"learning_rate": 3.3771929824561406e-06, |
|
"loss": 0.1097, |
|
"step": 1123 |
|
}, |
|
{ |
|
"epoch": 112.4, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.1126, |
|
"step": 1124 |
|
}, |
|
{ |
|
"epoch": 112.5, |
|
"learning_rate": 3.2894736842105265e-06, |
|
"loss": 0.1211, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 112.6, |
|
"learning_rate": 3.2456140350877192e-06, |
|
"loss": 0.1137, |
|
"step": 1126 |
|
}, |
|
{ |
|
"epoch": 112.7, |
|
"learning_rate": 3.201754385964913e-06, |
|
"loss": 0.1051, |
|
"step": 1127 |
|
}, |
|
{ |
|
"epoch": 112.8, |
|
"learning_rate": 3.1578947368421056e-06, |
|
"loss": 0.1179, |
|
"step": 1128 |
|
}, |
|
{ |
|
"epoch": 112.9, |
|
"learning_rate": 3.1140350877192983e-06, |
|
"loss": 0.1099, |
|
"step": 1129 |
|
}, |
|
{ |
|
"epoch": 113.0, |
|
"learning_rate": 3.070175438596491e-06, |
|
"loss": 0.1119, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 113.0, |
|
"eval_accuracy_safe": 0.5063627356872179, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9957986126715372, |
|
"eval_iou_safe": 0.44494541118604913, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9810581456066044, |
|
"eval_loss": 0.10626911371946335, |
|
"eval_mean_accuracy": 0.7510806741793776, |
|
"eval_mean_iou": 0.7130017783963267, |
|
"eval_overall_accuracy": 0.9813414616371269, |
|
"eval_runtime": 18.2981, |
|
"eval_samples_per_second": 3.662, |
|
"eval_steps_per_second": 0.273, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 113.1, |
|
"learning_rate": 3.0263157894736843e-06, |
|
"loss": 0.1019, |
|
"step": 1131 |
|
}, |
|
{ |
|
"epoch": 113.2, |
|
"learning_rate": 2.9824561403508774e-06, |
|
"loss": 0.1266, |
|
"step": 1132 |
|
}, |
|
{ |
|
"epoch": 113.3, |
|
"learning_rate": 2.9385964912280706e-06, |
|
"loss": 0.1021, |
|
"step": 1133 |
|
}, |
|
{ |
|
"epoch": 113.4, |
|
"learning_rate": 2.8947368421052634e-06, |
|
"loss": 0.1287, |
|
"step": 1134 |
|
}, |
|
{ |
|
"epoch": 113.5, |
|
"learning_rate": 2.850877192982456e-06, |
|
"loss": 0.1162, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 113.6, |
|
"learning_rate": 2.8070175438596493e-06, |
|
"loss": 0.0998, |
|
"step": 1136 |
|
}, |
|
{ |
|
"epoch": 113.7, |
|
"learning_rate": 2.763157894736842e-06, |
|
"loss": 0.1063, |
|
"step": 1137 |
|
}, |
|
{ |
|
"epoch": 113.8, |
|
"learning_rate": 2.719298245614035e-06, |
|
"loss": 0.1311, |
|
"step": 1138 |
|
}, |
|
{ |
|
"epoch": 113.9, |
|
"learning_rate": 2.6754385964912284e-06, |
|
"loss": 0.1056, |
|
"step": 1139 |
|
}, |
|
{ |
|
"epoch": 114.0, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.1116, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 114.0, |
|
"eval_accuracy_safe": 0.5172242975162008, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9956040083905715, |
|
"eval_iou_safe": 0.45195046241479725, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811859989103405, |
|
"eval_loss": 0.10742120444774628, |
|
"eval_mean_accuracy": 0.7564141529533861, |
|
"eval_mean_iou": 0.7165682306625689, |
|
"eval_overall_accuracy": 0.9814734387753615, |
|
"eval_runtime": 17.678, |
|
"eval_samples_per_second": 3.79, |
|
"eval_steps_per_second": 0.283, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 114.1, |
|
"learning_rate": 2.5877192982456143e-06, |
|
"loss": 0.1122, |
|
"step": 1141 |
|
}, |
|
{ |
|
"epoch": 114.2, |
|
"learning_rate": 2.543859649122807e-06, |
|
"loss": 0.1195, |
|
"step": 1142 |
|
}, |
|
{ |
|
"epoch": 114.3, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.1197, |
|
"step": 1143 |
|
}, |
|
{ |
|
"epoch": 114.4, |
|
"learning_rate": 2.456140350877193e-06, |
|
"loss": 0.1142, |
|
"step": 1144 |
|
}, |
|
{ |
|
"epoch": 114.5, |
|
"learning_rate": 2.412280701754386e-06, |
|
"loss": 0.1083, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 114.6, |
|
"learning_rate": 2.3684210526315793e-06, |
|
"loss": 0.113, |
|
"step": 1146 |
|
}, |
|
{ |
|
"epoch": 114.7, |
|
"learning_rate": 2.324561403508772e-06, |
|
"loss": 0.131, |
|
"step": 1147 |
|
}, |
|
{ |
|
"epoch": 114.8, |
|
"learning_rate": 2.2807017543859652e-06, |
|
"loss": 0.1079, |
|
"step": 1148 |
|
}, |
|
{ |
|
"epoch": 114.9, |
|
"learning_rate": 2.236842105263158e-06, |
|
"loss": 0.1083, |
|
"step": 1149 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"learning_rate": 2.1929824561403507e-06, |
|
"loss": 0.1063, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"eval_accuracy_safe": 0.5163414944429667, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9955996669022413, |
|
"eval_iou_safe": 0.45112284335766795, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811557381461328, |
|
"eval_loss": 0.10722891986370087, |
|
"eval_mean_accuracy": 0.755970580672604, |
|
"eval_mean_iou": 0.7161392907519004, |
|
"eval_overall_accuracy": 0.9814431489403568, |
|
"eval_runtime": 19.0504, |
|
"eval_samples_per_second": 3.517, |
|
"eval_steps_per_second": 0.262, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 115.1, |
|
"learning_rate": 2.149122807017544e-06, |
|
"loss": 0.1114, |
|
"step": 1151 |
|
}, |
|
{ |
|
"epoch": 115.2, |
|
"learning_rate": 2.105263157894737e-06, |
|
"loss": 0.1079, |
|
"step": 1152 |
|
}, |
|
{ |
|
"epoch": 115.3, |
|
"learning_rate": 2.0614035087719302e-06, |
|
"loss": 0.1131, |
|
"step": 1153 |
|
}, |
|
{ |
|
"epoch": 115.4, |
|
"learning_rate": 2.017543859649123e-06, |
|
"loss": 0.1024, |
|
"step": 1154 |
|
}, |
|
{ |
|
"epoch": 115.5, |
|
"learning_rate": 1.9736842105263157e-06, |
|
"loss": 0.1079, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 115.6, |
|
"learning_rate": 1.929824561403509e-06, |
|
"loss": 0.1255, |
|
"step": 1156 |
|
}, |
|
{ |
|
"epoch": 115.7, |
|
"learning_rate": 1.8859649122807019e-06, |
|
"loss": 0.107, |
|
"step": 1157 |
|
}, |
|
{ |
|
"epoch": 115.8, |
|
"learning_rate": 1.8421052631578946e-06, |
|
"loss": 0.1091, |
|
"step": 1158 |
|
}, |
|
{ |
|
"epoch": 115.9, |
|
"learning_rate": 1.7982456140350878e-06, |
|
"loss": 0.1332, |
|
"step": 1159 |
|
}, |
|
{ |
|
"epoch": 116.0, |
|
"learning_rate": 1.7543859649122807e-06, |
|
"loss": 0.1054, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 116.0, |
|
"eval_accuracy_safe": 0.4994313822999911, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9959546129076203, |
|
"eval_iou_safe": 0.440840145979192, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.981007934217502, |
|
"eval_loss": 0.10647319257259369, |
|
"eval_mean_accuracy": 0.7476929976038057, |
|
"eval_mean_iou": 0.710924040098347, |
|
"eval_overall_accuracy": 0.9812881128111882, |
|
"eval_runtime": 19.8728, |
|
"eval_samples_per_second": 3.371, |
|
"eval_steps_per_second": 0.252, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 116.1, |
|
"learning_rate": 1.710526315789474e-06, |
|
"loss": 0.1007, |
|
"step": 1161 |
|
}, |
|
{ |
|
"epoch": 116.2, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 0.104, |
|
"step": 1162 |
|
}, |
|
{ |
|
"epoch": 116.3, |
|
"learning_rate": 1.6228070175438596e-06, |
|
"loss": 0.1211, |
|
"step": 1163 |
|
}, |
|
{ |
|
"epoch": 116.4, |
|
"learning_rate": 1.5789473684210528e-06, |
|
"loss": 0.1136, |
|
"step": 1164 |
|
}, |
|
{ |
|
"epoch": 116.5, |
|
"learning_rate": 1.5350877192982455e-06, |
|
"loss": 0.1072, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 116.6, |
|
"learning_rate": 1.4912280701754387e-06, |
|
"loss": 0.1163, |
|
"step": 1166 |
|
}, |
|
{ |
|
"epoch": 116.7, |
|
"learning_rate": 1.4473684210526317e-06, |
|
"loss": 0.1028, |
|
"step": 1167 |
|
}, |
|
{ |
|
"epoch": 116.8, |
|
"learning_rate": 1.4035087719298246e-06, |
|
"loss": 0.1056, |
|
"step": 1168 |
|
}, |
|
{ |
|
"epoch": 116.9, |
|
"learning_rate": 1.3596491228070176e-06, |
|
"loss": 0.1079, |
|
"step": 1169 |
|
}, |
|
{ |
|
"epoch": 117.0, |
|
"learning_rate": 1.3157894736842106e-06, |
|
"loss": 0.1613, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 117.0, |
|
"eval_accuracy_safe": 0.5251425399285277, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9955069702595142, |
|
"eval_iou_safe": 0.4575946656365209, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9813234510056935, |
|
"eval_loss": 0.1059972271323204, |
|
"eval_mean_accuracy": 0.7603247550940209, |
|
"eval_mean_iou": 0.7194590583211072, |
|
"eval_overall_accuracy": 0.9816131591796875, |
|
"eval_runtime": 19.4796, |
|
"eval_samples_per_second": 3.44, |
|
"eval_steps_per_second": 0.257, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 117.1, |
|
"learning_rate": 1.2719298245614035e-06, |
|
"loss": 0.1029, |
|
"step": 1171 |
|
}, |
|
{ |
|
"epoch": 117.2, |
|
"learning_rate": 1.2280701754385965e-06, |
|
"loss": 0.1146, |
|
"step": 1172 |
|
}, |
|
{ |
|
"epoch": 117.3, |
|
"learning_rate": 1.1842105263157896e-06, |
|
"loss": 0.1123, |
|
"step": 1173 |
|
}, |
|
{ |
|
"epoch": 117.4, |
|
"learning_rate": 1.1403508771929826e-06, |
|
"loss": 0.1031, |
|
"step": 1174 |
|
}, |
|
{ |
|
"epoch": 117.5, |
|
"learning_rate": 1.0964912280701754e-06, |
|
"loss": 0.0943, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 117.6, |
|
"learning_rate": 1.0526315789473685e-06, |
|
"loss": 0.1081, |
|
"step": 1176 |
|
}, |
|
{ |
|
"epoch": 117.7, |
|
"learning_rate": 1.0087719298245615e-06, |
|
"loss": 0.1203, |
|
"step": 1177 |
|
}, |
|
{ |
|
"epoch": 117.8, |
|
"learning_rate": 9.649122807017545e-07, |
|
"loss": 0.1192, |
|
"step": 1178 |
|
}, |
|
{ |
|
"epoch": 117.9, |
|
"learning_rate": 9.210526315789473e-07, |
|
"loss": 0.1143, |
|
"step": 1179 |
|
}, |
|
{ |
|
"epoch": 118.0, |
|
"learning_rate": 8.771929824561404e-07, |
|
"loss": 0.1542, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 118.0, |
|
"eval_accuracy_safe": 0.5454296629542678, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9947301371922046, |
|
"eval_iou_safe": 0.4649324872746153, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811549060748658, |
|
"eval_loss": 0.10577701032161713, |
|
"eval_mean_accuracy": 0.7700799000732361, |
|
"eval_mean_iou": 0.7230436966747406, |
|
"eval_overall_accuracy": 0.9814585216009795, |
|
"eval_runtime": 17.036, |
|
"eval_samples_per_second": 3.933, |
|
"eval_steps_per_second": 0.293, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 118.1, |
|
"learning_rate": 8.333333333333333e-07, |
|
"loss": 0.0966, |
|
"step": 1181 |
|
}, |
|
{ |
|
"epoch": 118.2, |
|
"learning_rate": 7.894736842105264e-07, |
|
"loss": 0.1002, |
|
"step": 1182 |
|
}, |
|
{ |
|
"epoch": 118.3, |
|
"learning_rate": 7.456140350877194e-07, |
|
"loss": 0.1074, |
|
"step": 1183 |
|
}, |
|
{ |
|
"epoch": 118.4, |
|
"learning_rate": 7.017543859649123e-07, |
|
"loss": 0.0941, |
|
"step": 1184 |
|
}, |
|
{ |
|
"epoch": 118.5, |
|
"learning_rate": 6.578947368421053e-07, |
|
"loss": 0.1305, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 118.6, |
|
"learning_rate": 6.140350877192982e-07, |
|
"loss": 0.1193, |
|
"step": 1186 |
|
}, |
|
{ |
|
"epoch": 118.7, |
|
"learning_rate": 5.701754385964913e-07, |
|
"loss": 0.1081, |
|
"step": 1187 |
|
}, |
|
{ |
|
"epoch": 118.8, |
|
"learning_rate": 5.263157894736843e-07, |
|
"loss": 0.108, |
|
"step": 1188 |
|
}, |
|
{ |
|
"epoch": 118.9, |
|
"learning_rate": 4.824561403508772e-07, |
|
"loss": 0.1275, |
|
"step": 1189 |
|
}, |
|
{ |
|
"epoch": 119.0, |
|
"learning_rate": 4.385964912280702e-07, |
|
"loss": 0.1226, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 119.0, |
|
"eval_accuracy_safe": 0.5469389092563252, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9947000987864602, |
|
"eval_iou_safe": 0.46582711963463125, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811697351170248, |
|
"eval_loss": 0.10644111037254333, |
|
"eval_mean_accuracy": 0.7708195040213927, |
|
"eval_mean_iou": 0.723498427375828, |
|
"eval_overall_accuracy": 0.9814739511973822, |
|
"eval_runtime": 17.2915, |
|
"eval_samples_per_second": 3.875, |
|
"eval_steps_per_second": 0.289, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 119.1, |
|
"learning_rate": 3.947368421052632e-07, |
|
"loss": 0.1264, |
|
"step": 1191 |
|
}, |
|
{ |
|
"epoch": 119.2, |
|
"learning_rate": 3.5087719298245616e-07, |
|
"loss": 0.115, |
|
"step": 1192 |
|
}, |
|
{ |
|
"epoch": 119.3, |
|
"learning_rate": 3.070175438596491e-07, |
|
"loss": 0.1002, |
|
"step": 1193 |
|
}, |
|
{ |
|
"epoch": 119.4, |
|
"learning_rate": 2.6315789473684213e-07, |
|
"loss": 0.1337, |
|
"step": 1194 |
|
}, |
|
{ |
|
"epoch": 119.5, |
|
"learning_rate": 2.192982456140351e-07, |
|
"loss": 0.1204, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 119.6, |
|
"learning_rate": 1.7543859649122808e-07, |
|
"loss": 0.0902, |
|
"step": 1196 |
|
}, |
|
{ |
|
"epoch": 119.7, |
|
"learning_rate": 1.3157894736842107e-07, |
|
"loss": 0.1183, |
|
"step": 1197 |
|
}, |
|
{ |
|
"epoch": 119.8, |
|
"learning_rate": 8.771929824561404e-08, |
|
"loss": 0.0965, |
|
"step": 1198 |
|
}, |
|
{ |
|
"epoch": 119.9, |
|
"learning_rate": 4.385964912280702e-08, |
|
"loss": 0.1192, |
|
"step": 1199 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.1295, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"eval_accuracy_safe": 0.5437141722661054, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9948176123151832, |
|
"eval_iou_safe": 0.4646083655200244, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811906535332905, |
|
"eval_loss": 0.10602644830942154, |
|
"eval_mean_accuracy": 0.7692658922906443, |
|
"eval_mean_iou": 0.7228995095266575, |
|
"eval_overall_accuracy": 0.9814927400048099, |
|
"eval_runtime": 19.9015, |
|
"eval_samples_per_second": 3.367, |
|
"eval_steps_per_second": 0.251, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 120.1, |
|
"learning_rate": 4.008097165991903e-06, |
|
"loss": 0.1068, |
|
"step": 1201 |
|
}, |
|
{ |
|
"epoch": 120.2, |
|
"learning_rate": 3.967611336032389e-06, |
|
"loss": 0.1127, |
|
"step": 1202 |
|
}, |
|
{ |
|
"epoch": 120.3, |
|
"learning_rate": 3.9271255060728745e-06, |
|
"loss": 0.1137, |
|
"step": 1203 |
|
}, |
|
{ |
|
"epoch": 120.4, |
|
"learning_rate": 3.886639676113361e-06, |
|
"loss": 0.1092, |
|
"step": 1204 |
|
}, |
|
{ |
|
"epoch": 120.5, |
|
"learning_rate": 3.846153846153847e-06, |
|
"loss": 0.1072, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 120.6, |
|
"learning_rate": 3.8056680161943325e-06, |
|
"loss": 0.1076, |
|
"step": 1206 |
|
}, |
|
{ |
|
"epoch": 120.7, |
|
"learning_rate": 3.7651821862348182e-06, |
|
"loss": 0.1059, |
|
"step": 1207 |
|
}, |
|
{ |
|
"epoch": 120.8, |
|
"learning_rate": 3.724696356275304e-06, |
|
"loss": 0.1152, |
|
"step": 1208 |
|
}, |
|
{ |
|
"epoch": 120.9, |
|
"learning_rate": 3.6842105263157892e-06, |
|
"loss": 0.1227, |
|
"step": 1209 |
|
}, |
|
{ |
|
"epoch": 121.0, |
|
"learning_rate": 3.6437246963562754e-06, |
|
"loss": 0.1438, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 121.0, |
|
"eval_accuracy_safe": 0.4908770590707052, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9961502145575266, |
|
"eval_iou_safe": 0.43576120639059857, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9809490218372198, |
|
"eval_loss": 0.10755152255296707, |
|
"eval_mean_accuracy": 0.743513636814116, |
|
"eval_mean_iou": 0.7083551141139092, |
|
"eval_overall_accuracy": 0.9812252557099755, |
|
"eval_runtime": 20.1329, |
|
"eval_samples_per_second": 3.328, |
|
"eval_steps_per_second": 0.248, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 121.1, |
|
"learning_rate": 3.603238866396761e-06, |
|
"loss": 0.13, |
|
"step": 1211 |
|
}, |
|
{ |
|
"epoch": 121.2, |
|
"learning_rate": 3.5627530364372468e-06, |
|
"loss": 0.0933, |
|
"step": 1212 |
|
}, |
|
{ |
|
"epoch": 121.3, |
|
"learning_rate": 3.522267206477733e-06, |
|
"loss": 0.0936, |
|
"step": 1213 |
|
}, |
|
{ |
|
"epoch": 121.4, |
|
"learning_rate": 3.4817813765182186e-06, |
|
"loss": 0.1164, |
|
"step": 1214 |
|
}, |
|
{ |
|
"epoch": 121.5, |
|
"learning_rate": 3.4412955465587043e-06, |
|
"loss": 0.1013, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 121.6, |
|
"learning_rate": 3.4008097165991905e-06, |
|
"loss": 0.1025, |
|
"step": 1216 |
|
}, |
|
{ |
|
"epoch": 121.7, |
|
"learning_rate": 3.360323886639676e-06, |
|
"loss": 0.1096, |
|
"step": 1217 |
|
}, |
|
{ |
|
"epoch": 121.8, |
|
"learning_rate": 3.319838056680162e-06, |
|
"loss": 0.1262, |
|
"step": 1218 |
|
}, |
|
{ |
|
"epoch": 121.9, |
|
"learning_rate": 3.279352226720648e-06, |
|
"loss": 0.1008, |
|
"step": 1219 |
|
}, |
|
{ |
|
"epoch": 122.0, |
|
"learning_rate": 3.2388663967611337e-06, |
|
"loss": 0.1391, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 122.0, |
|
"eval_accuracy_safe": 0.5416767861342092, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9948285833735312, |
|
"eval_iou_safe": 0.46301001074227116, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811414643319519, |
|
"eval_loss": 0.10808227956295013, |
|
"eval_mean_accuracy": 0.7682526847538702, |
|
"eval_mean_iou": 0.7220757375371115, |
|
"eval_overall_accuracy": 0.9814432058761369, |
|
"eval_runtime": 23.1408, |
|
"eval_samples_per_second": 2.895, |
|
"eval_steps_per_second": 0.216, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 122.1, |
|
"learning_rate": 3.1983805668016195e-06, |
|
"loss": 0.1, |
|
"step": 1221 |
|
}, |
|
{ |
|
"epoch": 122.2, |
|
"learning_rate": 3.1578947368421056e-06, |
|
"loss": 0.0993, |
|
"step": 1222 |
|
}, |
|
{ |
|
"epoch": 122.3, |
|
"learning_rate": 3.1174089068825913e-06, |
|
"loss": 0.1111, |
|
"step": 1223 |
|
}, |
|
{ |
|
"epoch": 122.4, |
|
"learning_rate": 3.0769230769230774e-06, |
|
"loss": 0.1048, |
|
"step": 1224 |
|
}, |
|
{ |
|
"epoch": 122.5, |
|
"learning_rate": 3.0364372469635627e-06, |
|
"loss": 0.0984, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 122.6, |
|
"learning_rate": 2.9959514170040484e-06, |
|
"loss": 0.1017, |
|
"step": 1226 |
|
}, |
|
{ |
|
"epoch": 122.7, |
|
"learning_rate": 2.9554655870445346e-06, |
|
"loss": 0.1201, |
|
"step": 1227 |
|
}, |
|
{ |
|
"epoch": 122.8, |
|
"learning_rate": 2.9149797570850203e-06, |
|
"loss": 0.1078, |
|
"step": 1228 |
|
}, |
|
{ |
|
"epoch": 122.9, |
|
"learning_rate": 2.874493927125506e-06, |
|
"loss": 0.1181, |
|
"step": 1229 |
|
}, |
|
{ |
|
"epoch": 123.0, |
|
"learning_rate": 2.834008097165992e-06, |
|
"loss": 0.1756, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 123.0, |
|
"eval_accuracy_safe": 0.5473244127817549, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9946531637774844, |
|
"eval_iou_safe": 0.46554403890231466, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811347943352009, |
|
"eval_loss": 0.10414864122867584, |
|
"eval_mean_accuracy": 0.7709887882796196, |
|
"eval_mean_iou": 0.7233394166187578, |
|
"eval_overall_accuracy": 0.9814397897293319, |
|
"eval_runtime": 20.7037, |
|
"eval_samples_per_second": 3.236, |
|
"eval_steps_per_second": 0.242, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 123.1, |
|
"learning_rate": 2.793522267206478e-06, |
|
"loss": 0.1058, |
|
"step": 1231 |
|
}, |
|
{ |
|
"epoch": 123.2, |
|
"learning_rate": 2.753036437246964e-06, |
|
"loss": 0.1117, |
|
"step": 1232 |
|
}, |
|
{ |
|
"epoch": 123.3, |
|
"learning_rate": 2.7125506072874497e-06, |
|
"loss": 0.115, |
|
"step": 1233 |
|
}, |
|
{ |
|
"epoch": 123.4, |
|
"learning_rate": 2.6720647773279354e-06, |
|
"loss": 0.1188, |
|
"step": 1234 |
|
}, |
|
{ |
|
"epoch": 123.5, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.0973, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 123.6, |
|
"learning_rate": 2.591093117408907e-06, |
|
"loss": 0.1178, |
|
"step": 1236 |
|
}, |
|
{ |
|
"epoch": 123.7, |
|
"learning_rate": 2.550607287449393e-06, |
|
"loss": 0.1139, |
|
"step": 1237 |
|
}, |
|
{ |
|
"epoch": 123.8, |
|
"learning_rate": 2.5101214574898787e-06, |
|
"loss": 0.0987, |
|
"step": 1238 |
|
}, |
|
{ |
|
"epoch": 123.9, |
|
"learning_rate": 2.4696356275303644e-06, |
|
"loss": 0.1087, |
|
"step": 1239 |
|
}, |
|
{ |
|
"epoch": 124.0, |
|
"learning_rate": 2.4291497975708505e-06, |
|
"loss": 0.1174, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 124.0, |
|
"eval_accuracy_safe": 0.5275442268919549, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9952722365458744, |
|
"eval_iou_safe": 0.4566189400486498, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811627640253819, |
|
"eval_loss": 0.10285481065511703, |
|
"eval_mean_accuracy": 0.7614082317189146, |
|
"eval_mean_iou": 0.7188908520370159, |
|
"eval_overall_accuracy": 0.9814563011055562, |
|
"eval_runtime": 18.179, |
|
"eval_samples_per_second": 3.686, |
|
"eval_steps_per_second": 0.275, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 124.1, |
|
"learning_rate": 2.3886639676113362e-06, |
|
"loss": 0.1142, |
|
"step": 1241 |
|
}, |
|
{ |
|
"epoch": 124.2, |
|
"learning_rate": 2.348178137651822e-06, |
|
"loss": 0.097, |
|
"step": 1242 |
|
}, |
|
{ |
|
"epoch": 124.3, |
|
"learning_rate": 2.307692307692308e-06, |
|
"loss": 0.1176, |
|
"step": 1243 |
|
}, |
|
{ |
|
"epoch": 124.4, |
|
"learning_rate": 2.267206477732794e-06, |
|
"loss": 0.1262, |
|
"step": 1244 |
|
}, |
|
{ |
|
"epoch": 124.5, |
|
"learning_rate": 2.2267206477732795e-06, |
|
"loss": 0.1089, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 124.6, |
|
"learning_rate": 2.1862348178137652e-06, |
|
"loss": 0.1077, |
|
"step": 1246 |
|
}, |
|
{ |
|
"epoch": 124.7, |
|
"learning_rate": 2.145748987854251e-06, |
|
"loss": 0.0997, |
|
"step": 1247 |
|
}, |
|
{ |
|
"epoch": 124.8, |
|
"learning_rate": 2.105263157894737e-06, |
|
"loss": 0.1111, |
|
"step": 1248 |
|
}, |
|
{ |
|
"epoch": 124.9, |
|
"learning_rate": 2.0647773279352228e-06, |
|
"loss": 0.1299, |
|
"step": 1249 |
|
}, |
|
{ |
|
"epoch": 125.0, |
|
"learning_rate": 2.0242914979757085e-06, |
|
"loss": 0.1025, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 125.0, |
|
"eval_accuracy_safe": 0.49800309173827395, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9959171822379621, |
|
"eval_iou_safe": 0.43910277654560814, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9809290607960587, |
|
"eval_loss": 0.10430267453193665, |
|
"eval_mean_accuracy": 0.746960136988118, |
|
"eval_mean_iou": 0.7100159186708335, |
|
"eval_overall_accuracy": 0.9812095983704524, |
|
"eval_runtime": 19.8688, |
|
"eval_samples_per_second": 3.372, |
|
"eval_steps_per_second": 0.252, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 125.1, |
|
"learning_rate": 1.9838056680161946e-06, |
|
"loss": 0.1114, |
|
"step": 1251 |
|
}, |
|
{ |
|
"epoch": 125.2, |
|
"learning_rate": 1.9433198380566803e-06, |
|
"loss": 0.1115, |
|
"step": 1252 |
|
}, |
|
{ |
|
"epoch": 125.3, |
|
"learning_rate": 1.9028340080971663e-06, |
|
"loss": 0.1105, |
|
"step": 1253 |
|
}, |
|
{ |
|
"epoch": 125.4, |
|
"learning_rate": 1.862348178137652e-06, |
|
"loss": 0.0976, |
|
"step": 1254 |
|
}, |
|
{ |
|
"epoch": 125.5, |
|
"learning_rate": 1.8218623481781377e-06, |
|
"loss": 0.1028, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 125.6, |
|
"learning_rate": 1.7813765182186234e-06, |
|
"loss": 0.1074, |
|
"step": 1256 |
|
}, |
|
{ |
|
"epoch": 125.7, |
|
"learning_rate": 1.7408906882591093e-06, |
|
"loss": 0.1044, |
|
"step": 1257 |
|
}, |
|
{ |
|
"epoch": 125.8, |
|
"learning_rate": 1.7004048582995952e-06, |
|
"loss": 0.124, |
|
"step": 1258 |
|
}, |
|
{ |
|
"epoch": 125.9, |
|
"learning_rate": 1.659919028340081e-06, |
|
"loss": 0.1212, |
|
"step": 1259 |
|
}, |
|
{ |
|
"epoch": 126.0, |
|
"learning_rate": 1.6194331983805669e-06, |
|
"loss": 0.0997, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 126.0, |
|
"eval_accuracy_safe": 0.532251224937452, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9952876077613139, |
|
"eval_iou_safe": 0.46089457274287127, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9813165165727984, |
|
"eval_loss": 0.10380012542009354, |
|
"eval_mean_accuracy": 0.7637694163493829, |
|
"eval_mean_iou": 0.7211055446578348, |
|
"eval_overall_accuracy": 0.9816102554549032, |
|
"eval_runtime": 21.7085, |
|
"eval_samples_per_second": 3.086, |
|
"eval_steps_per_second": 0.23, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 126.1, |
|
"learning_rate": 1.5789473684210528e-06, |
|
"loss": 0.1142, |
|
"step": 1261 |
|
}, |
|
{ |
|
"epoch": 126.2, |
|
"learning_rate": 1.5384615384615387e-06, |
|
"loss": 0.1306, |
|
"step": 1262 |
|
}, |
|
{ |
|
"epoch": 126.3, |
|
"learning_rate": 1.4979757085020242e-06, |
|
"loss": 0.109, |
|
"step": 1263 |
|
}, |
|
{ |
|
"epoch": 126.4, |
|
"learning_rate": 1.4574898785425101e-06, |
|
"loss": 0.1471, |
|
"step": 1264 |
|
}, |
|
{ |
|
"epoch": 126.5, |
|
"learning_rate": 1.417004048582996e-06, |
|
"loss": 0.1115, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 126.6, |
|
"learning_rate": 1.376518218623482e-06, |
|
"loss": 0.1181, |
|
"step": 1266 |
|
}, |
|
{ |
|
"epoch": 126.7, |
|
"learning_rate": 1.3360323886639677e-06, |
|
"loss": 0.111, |
|
"step": 1267 |
|
}, |
|
{ |
|
"epoch": 126.8, |
|
"learning_rate": 1.2955465587044534e-06, |
|
"loss": 0.101, |
|
"step": 1268 |
|
}, |
|
{ |
|
"epoch": 126.9, |
|
"learning_rate": 1.2550607287449393e-06, |
|
"loss": 0.1125, |
|
"step": 1269 |
|
}, |
|
{ |
|
"epoch": 127.0, |
|
"learning_rate": 1.2145748987854253e-06, |
|
"loss": 0.1768, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 127.0, |
|
"eval_accuracy_safe": 0.5278776874414517, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.995465784789138, |
|
"eval_iou_safe": 0.4594362903401685, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9813633877670189, |
|
"eval_loss": 0.10365582257509232, |
|
"eval_mean_accuracy": 0.7616717361152949, |
|
"eval_mean_iou": 0.7203998390535937, |
|
"eval_overall_accuracy": 0.9816539821340077, |
|
"eval_runtime": 18.3333, |
|
"eval_samples_per_second": 3.655, |
|
"eval_steps_per_second": 0.273, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 127.1, |
|
"learning_rate": 1.174089068825911e-06, |
|
"loss": 0.0945, |
|
"step": 1271 |
|
}, |
|
{ |
|
"epoch": 127.2, |
|
"learning_rate": 1.133603238866397e-06, |
|
"loss": 0.1141, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 127.3, |
|
"learning_rate": 1.0931174089068826e-06, |
|
"loss": 0.1066, |
|
"step": 1273 |
|
}, |
|
{ |
|
"epoch": 127.4, |
|
"learning_rate": 1.0526315789473685e-06, |
|
"loss": 0.1334, |
|
"step": 1274 |
|
}, |
|
{ |
|
"epoch": 127.5, |
|
"learning_rate": 1.0121457489878542e-06, |
|
"loss": 0.1176, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 127.6, |
|
"learning_rate": 9.716599190283402e-07, |
|
"loss": 0.1162, |
|
"step": 1276 |
|
}, |
|
{ |
|
"epoch": 127.7, |
|
"learning_rate": 9.31174089068826e-07, |
|
"loss": 0.0927, |
|
"step": 1277 |
|
}, |
|
{ |
|
"epoch": 127.8, |
|
"learning_rate": 8.906882591093117e-07, |
|
"loss": 0.1288, |
|
"step": 1278 |
|
}, |
|
{ |
|
"epoch": 127.9, |
|
"learning_rate": 8.502024291497976e-07, |
|
"loss": 0.1166, |
|
"step": 1279 |
|
}, |
|
{ |
|
"epoch": 128.0, |
|
"learning_rate": 8.097165991902834e-07, |
|
"loss": 0.1527, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 128.0, |
|
"eval_accuracy_safe": 0.5170874437646732, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9956303506643592, |
|
"eval_iou_safe": 0.4521728284357186, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9812079316895111, |
|
"eval_loss": 0.10267864167690277, |
|
"eval_mean_accuracy": 0.7563588972145162, |
|
"eval_mean_iou": 0.7166903800626149, |
|
"eval_overall_accuracy": 0.9814949605002332, |
|
"eval_runtime": 24.9958, |
|
"eval_samples_per_second": 2.68, |
|
"eval_steps_per_second": 0.2, |
|
"step": 1280 |
|
} |
|
], |
|
"max_steps": 1300, |
|
"num_train_epochs": 130, |
|
"total_flos": 6.91944992331006e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|