|
{ |
|
"best_metric": 0.14203643798828125, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/safety-utcustom-train-SF-RGBD-b0/checkpoint-680", |
|
"epoch": 68.0, |
|
"global_step": 680, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.0566, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.0568, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1e-05, |
|
"loss": 1.0517, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.0462, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.0401, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2e-05, |
|
"loss": 1.0392, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 1.025, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 1.0164, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3e-05, |
|
"loss": 1.0188, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.0084, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy_safe": 0.03683871689006596, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.7845366863390846, |
|
"eval_iou_safe": 0.016300672001855917, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.766643132432382, |
|
"eval_loss": 1.0688321590423584, |
|
"eval_mean_accuracy": 0.41068770161457524, |
|
"eval_mean_iou": 0.260981268144746, |
|
"eval_overall_accuracy": 0.7624508871960995, |
|
"eval_runtime": 17.0867, |
|
"eval_samples_per_second": 3.921, |
|
"eval_steps_per_second": 0.293, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 1.0016, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4e-05, |
|
"loss": 0.9714, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.9643, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.951, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9372, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 4.9824561403508773e-05, |
|
"loss": 0.9318, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 4.9649122807017544e-05, |
|
"loss": 0.9234, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 4.9473684210526315e-05, |
|
"loss": 0.8955, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 4.9298245614035086e-05, |
|
"loss": 0.8359, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 4.912280701754386e-05, |
|
"loss": 0.8483, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy_safe": 0.00020817190373205964, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9980423994443833, |
|
"eval_iou_safe": 0.00020692073211620515, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9686639129241729, |
|
"eval_loss": 0.873957633972168, |
|
"eval_mean_accuracy": 0.4991252856740577, |
|
"eval_mean_iou": 0.3229569445520964, |
|
"eval_overall_accuracy": 0.9685679763110716, |
|
"eval_runtime": 9.9346, |
|
"eval_samples_per_second": 6.744, |
|
"eval_steps_per_second": 0.503, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.8947368421052635e-05, |
|
"loss": 0.814, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 4.8771929824561406e-05, |
|
"loss": 0.8311, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.859649122807018e-05, |
|
"loss": 0.8158, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.842105263157895e-05, |
|
"loss": 0.8113, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 4.824561403508772e-05, |
|
"loss": 0.7571, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 4.807017543859649e-05, |
|
"loss": 0.7934, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.789473684210526e-05, |
|
"loss": 0.7214, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 4.771929824561404e-05, |
|
"loss": 0.7534, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 4.754385964912281e-05, |
|
"loss": 0.7021, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.736842105263158e-05, |
|
"loss": 0.7058, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy_safe": 0.0008673829322169151, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.992971247730839, |
|
"eval_iou_safe": 0.0008545401719334826, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9641277576335231, |
|
"eval_loss": 0.74162358045578, |
|
"eval_mean_accuracy": 0.49691931533152794, |
|
"eval_mean_iou": 0.3216607659351522, |
|
"eval_overall_accuracy": 0.9636660903247435, |
|
"eval_runtime": 9.5465, |
|
"eval_samples_per_second": 7.018, |
|
"eval_steps_per_second": 0.524, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 4.719298245614036e-05, |
|
"loss": 0.6607, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 4.701754385964913e-05, |
|
"loss": 0.6468, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 4.68421052631579e-05, |
|
"loss": 0.6593, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.6234, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 4.649122807017544e-05, |
|
"loss": 0.6841, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 4.6315789473684214e-05, |
|
"loss": 0.6508, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 4.6140350877192985e-05, |
|
"loss": 0.6758, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 4.5964912280701756e-05, |
|
"loss": 0.5821, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 4.5789473684210527e-05, |
|
"loss": 0.6304, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 4.56140350877193e-05, |
|
"loss": 0.578, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy_safe": 0.0007363117335708035, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9952830902666999, |
|
"eval_iou_safe": 0.0007249429724180075, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.966194230792341, |
|
"eval_loss": 0.5968723297119141, |
|
"eval_mean_accuracy": 0.49800970100013536, |
|
"eval_mean_iou": 0.32230639125491967, |
|
"eval_overall_accuracy": 0.965905773105906, |
|
"eval_runtime": 9.4884, |
|
"eval_samples_per_second": 7.061, |
|
"eval_steps_per_second": 0.527, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 4.5438596491228075e-05, |
|
"loss": 0.6387, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 4.5263157894736846e-05, |
|
"loss": 0.5828, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 4.508771929824562e-05, |
|
"loss": 0.5795, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 4.491228070175439e-05, |
|
"loss": 0.5699, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 4.473684210526316e-05, |
|
"loss": 0.5602, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 4.456140350877193e-05, |
|
"loss": 0.5501, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 4.43859649122807e-05, |
|
"loss": 0.608, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 4.421052631578947e-05, |
|
"loss": 0.5565, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 4.403508771929824e-05, |
|
"loss": 0.5368, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 4.3859649122807014e-05, |
|
"loss": 0.5531, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_safe": 0.0060851731489084465, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9974240893698892, |
|
"eval_iou_safe": 0.005865184362233192, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.968219331241184, |
|
"eval_loss": 0.5068420767784119, |
|
"eval_mean_accuracy": 0.5017546312593989, |
|
"eval_mean_iou": 0.3246948385344724, |
|
"eval_overall_accuracy": 0.9681415273182428, |
|
"eval_runtime": 9.555, |
|
"eval_samples_per_second": 7.012, |
|
"eval_steps_per_second": 0.523, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 4.368421052631579e-05, |
|
"loss": 0.5522, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 4.350877192982456e-05, |
|
"loss": 0.5304, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.5472, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 4.3157894736842105e-05, |
|
"loss": 0.5052, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 4.298245614035088e-05, |
|
"loss": 0.5344, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 4.2807017543859654e-05, |
|
"loss": 0.4878, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 4.2631578947368425e-05, |
|
"loss": 0.558, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 4.2456140350877196e-05, |
|
"loss": 0.4542, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 4.228070175438597e-05, |
|
"loss": 0.4765, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 4.210526315789474e-05, |
|
"loss": 0.4786, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy_safe": 0.009651080759133542, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.996145227712823, |
|
"eval_iou_safe": 0.00919338047228307, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.967087021440742, |
|
"eval_loss": 0.45749950408935547, |
|
"eval_mean_accuracy": 0.5028981542359783, |
|
"eval_mean_iou": 0.325426800637675, |
|
"eval_overall_accuracy": 0.967005772377128, |
|
"eval_runtime": 9.3868, |
|
"eval_samples_per_second": 7.138, |
|
"eval_steps_per_second": 0.533, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 4.1929824561403516e-05, |
|
"loss": 0.4643, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 4.1754385964912287e-05, |
|
"loss": 0.5082, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 4.157894736842106e-05, |
|
"loss": 0.4564, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 4.140350877192983e-05, |
|
"loss": 0.4646, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 4.12280701754386e-05, |
|
"loss": 0.5312, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"learning_rate": 4.105263157894737e-05, |
|
"loss": 0.4676, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 4.087719298245614e-05, |
|
"loss": 0.4669, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 4.070175438596491e-05, |
|
"loss": 0.4406, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 4.0526315789473684e-05, |
|
"loss": 0.4704, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 4.0350877192982455e-05, |
|
"loss": 0.4681, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy_safe": 0.006705833824850328, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.998296845861793, |
|
"eval_iou_safe": 0.006371969925913715, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.969000696975069, |
|
"eval_loss": 0.4381723403930664, |
|
"eval_mean_accuracy": 0.5025013398433217, |
|
"eval_mean_iou": 0.32512422230032756, |
|
"eval_overall_accuracy": 0.9690068373039588, |
|
"eval_runtime": 10.1282, |
|
"eval_samples_per_second": 6.615, |
|
"eval_steps_per_second": 0.494, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 4.017543859649123e-05, |
|
"loss": 0.4417, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 4e-05, |
|
"loss": 0.4517, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 3.9824561403508774e-05, |
|
"loss": 0.4441, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"learning_rate": 3.9649122807017545e-05, |
|
"loss": 0.4982, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 3.9473684210526316e-05, |
|
"loss": 0.479, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"learning_rate": 3.929824561403509e-05, |
|
"loss": 0.4123, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"learning_rate": 3.912280701754386e-05, |
|
"loss": 0.4351, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"learning_rate": 3.894736842105263e-05, |
|
"loss": 0.4318, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 7.9, |
|
"learning_rate": 3.877192982456141e-05, |
|
"loss": 0.4846, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 3.859649122807018e-05, |
|
"loss": 0.4139, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy_safe": 0.0016518826064664362, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9980280842666458, |
|
"eval_iou_safe": 0.0015664870476928589, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.968595196515178, |
|
"eval_loss": 0.3973134160041809, |
|
"eval_mean_accuracy": 0.4998399834365561, |
|
"eval_mean_iou": 0.3233872278542903, |
|
"eval_overall_accuracy": 0.968596728880014, |
|
"eval_runtime": 11.1226, |
|
"eval_samples_per_second": 6.024, |
|
"eval_steps_per_second": 0.45, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"learning_rate": 3.842105263157895e-05, |
|
"loss": 0.433, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 8.2, |
|
"learning_rate": 3.824561403508773e-05, |
|
"loss": 0.394, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 8.3, |
|
"learning_rate": 3.80701754385965e-05, |
|
"loss": 0.444, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"learning_rate": 3.789473684210527e-05, |
|
"loss": 0.4225, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 3.771929824561404e-05, |
|
"loss": 0.3741, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"learning_rate": 3.754385964912281e-05, |
|
"loss": 0.4193, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"learning_rate": 3.736842105263158e-05, |
|
"loss": 0.399, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"learning_rate": 3.719298245614035e-05, |
|
"loss": 0.4082, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.9, |
|
"learning_rate": 3.7017543859649124e-05, |
|
"loss": 0.3906, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 0.4275, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy_safe": 0.007727418167239139, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9994094402495628, |
|
"eval_iou_safe": 0.007580341408442182, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9701099208246821, |
|
"eval_loss": 0.3982888162136078, |
|
"eval_mean_accuracy": 0.503568429208401, |
|
"eval_mean_iou": 0.48884513111656214, |
|
"eval_overall_accuracy": 0.9701167434009154, |
|
"eval_runtime": 9.6246, |
|
"eval_samples_per_second": 6.961, |
|
"eval_steps_per_second": 0.52, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 0.4041, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"learning_rate": 3.6491228070175443e-05, |
|
"loss": 0.3714, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 3.6315789473684214e-05, |
|
"loss": 0.4078, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"learning_rate": 3.6140350877192985e-05, |
|
"loss": 0.4167, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 3.5964912280701756e-05, |
|
"loss": 0.4058, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 3.578947368421053e-05, |
|
"loss": 0.3794, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 9.7, |
|
"learning_rate": 3.56140350877193e-05, |
|
"loss": 0.3786, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 3.543859649122807e-05, |
|
"loss": 0.3545, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 9.9, |
|
"learning_rate": 3.526315789473684e-05, |
|
"loss": 0.3598, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 3.508771929824561e-05, |
|
"loss": 0.3975, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_safe": 0.0008095574034024542, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9997554686032364, |
|
"eval_iou_safe": 0.0008031575565652393, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9702474966446942, |
|
"eval_loss": 0.33975234627723694, |
|
"eval_mean_accuracy": 0.5002825130033194, |
|
"eval_mean_iou": 0.3236835514004198, |
|
"eval_overall_accuracy": 0.9702482081171292, |
|
"eval_runtime": 9.7288, |
|
"eval_samples_per_second": 6.887, |
|
"eval_steps_per_second": 0.514, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"learning_rate": 3.491228070175438e-05, |
|
"loss": 0.3621, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 10.2, |
|
"learning_rate": 3.473684210526316e-05, |
|
"loss": 0.37, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 10.3, |
|
"learning_rate": 3.456140350877193e-05, |
|
"loss": 0.359, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 10.4, |
|
"learning_rate": 3.43859649122807e-05, |
|
"loss": 0.3523, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 3.421052631578947e-05, |
|
"loss": 0.3318, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 10.6, |
|
"learning_rate": 3.403508771929825e-05, |
|
"loss": 0.3661, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 10.7, |
|
"learning_rate": 3.385964912280702e-05, |
|
"loss": 0.3563, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 10.8, |
|
"learning_rate": 3.368421052631579e-05, |
|
"loss": 0.3595, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 10.9, |
|
"learning_rate": 3.3508771929824564e-05, |
|
"loss": 0.3554, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.4325, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy_safe": 0.09409370048689095, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9992699846041437, |
|
"eval_iou_safe": 0.09189049790961165, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9724559733145028, |
|
"eval_loss": 0.37850186228752136, |
|
"eval_mean_accuracy": 0.5466818425455173, |
|
"eval_mean_iou": 0.3547821570747048, |
|
"eval_overall_accuracy": 0.9725325285498776, |
|
"eval_runtime": 10.4486, |
|
"eval_samples_per_second": 6.412, |
|
"eval_steps_per_second": 0.479, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 11.1, |
|
"learning_rate": 3.3157894736842106e-05, |
|
"loss": 0.3409, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 11.2, |
|
"learning_rate": 3.2982456140350884e-05, |
|
"loss": 0.3633, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 11.3, |
|
"learning_rate": 3.2807017543859655e-05, |
|
"loss": 0.3483, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 11.4, |
|
"learning_rate": 3.2631578947368426e-05, |
|
"loss": 0.3548, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 3.24561403508772e-05, |
|
"loss": 0.3474, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 11.6, |
|
"learning_rate": 3.228070175438597e-05, |
|
"loss": 0.3529, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 11.7, |
|
"learning_rate": 3.210526315789474e-05, |
|
"loss": 0.3309, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 11.8, |
|
"learning_rate": 3.192982456140351e-05, |
|
"loss": 0.3306, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 11.9, |
|
"learning_rate": 3.175438596491228e-05, |
|
"loss": 0.3194, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 3.157894736842105e-05, |
|
"loss": 0.3239, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy_safe": 0.07718166082628826, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9994698690736191, |
|
"eval_iou_safe": 0.07586054028042748, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9721634919205556, |
|
"eval_loss": 0.33375129103660583, |
|
"eval_mean_accuracy": 0.5383257649499537, |
|
"eval_mean_iou": 0.34934134406699435, |
|
"eval_overall_accuracy": 0.972226954218167, |
|
"eval_runtime": 9.4146, |
|
"eval_samples_per_second": 7.117, |
|
"eval_steps_per_second": 0.531, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 12.1, |
|
"learning_rate": 3.140350877192982e-05, |
|
"loss": 0.3293, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 12.2, |
|
"learning_rate": 3.12280701754386e-05, |
|
"loss": 0.3444, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 12.3, |
|
"learning_rate": 3.105263157894737e-05, |
|
"loss": 0.3658, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 12.4, |
|
"learning_rate": 3.087719298245614e-05, |
|
"loss": 0.3282, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 3.0701754385964913e-05, |
|
"loss": 0.3397, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 12.6, |
|
"learning_rate": 3.0526315789473684e-05, |
|
"loss": 0.3385, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 12.7, |
|
"learning_rate": 3.035087719298246e-05, |
|
"loss": 0.3236, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"learning_rate": 3.017543859649123e-05, |
|
"loss": 0.3293, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 12.9, |
|
"learning_rate": 3e-05, |
|
"loss": 0.3317, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 2.9824561403508772e-05, |
|
"loss": 0.3733, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy_safe": 0.07628151009440981, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9995119932441748, |
|
"eval_iou_safe": 0.07507778115040219, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9721785568914258, |
|
"eval_loss": 0.3012864589691162, |
|
"eval_mean_accuracy": 0.5378967516692923, |
|
"eval_mean_iou": 0.523628169020914, |
|
"eval_overall_accuracy": 0.972241245098968, |
|
"eval_runtime": 9.4095, |
|
"eval_samples_per_second": 7.12, |
|
"eval_steps_per_second": 0.531, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 13.1, |
|
"learning_rate": 2.9649122807017543e-05, |
|
"loss": 0.3196, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 13.2, |
|
"learning_rate": 2.9473684210526314e-05, |
|
"loss": 0.3177, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 13.3, |
|
"learning_rate": 2.929824561403509e-05, |
|
"loss": 0.3177, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 13.4, |
|
"learning_rate": 2.9122807017543863e-05, |
|
"loss": 0.3131, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 2.8947368421052634e-05, |
|
"loss": 0.3246, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 13.6, |
|
"learning_rate": 2.8771929824561404e-05, |
|
"loss": 0.3093, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 13.7, |
|
"learning_rate": 2.8596491228070175e-05, |
|
"loss": 0.3346, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 13.8, |
|
"learning_rate": 2.842105263157895e-05, |
|
"loss": 0.3108, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 13.9, |
|
"learning_rate": 2.824561403508772e-05, |
|
"loss": 0.3331, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 2.8070175438596492e-05, |
|
"loss": 0.3165, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy_safe": 0.08003631443209548, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9994350198294546, |
|
"eval_iou_safe": 0.07857775456444727, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9722117608452171, |
|
"eval_loss": 0.28493982553482056, |
|
"eval_mean_accuracy": 0.539735667130775, |
|
"eval_mean_iou": 0.5253947577048321, |
|
"eval_overall_accuracy": 0.9722774562551014, |
|
"eval_runtime": 10.0221, |
|
"eval_samples_per_second": 6.685, |
|
"eval_steps_per_second": 0.499, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 14.1, |
|
"learning_rate": 2.7894736842105263e-05, |
|
"loss": 0.3037, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 14.2, |
|
"learning_rate": 2.7719298245614034e-05, |
|
"loss": 0.3372, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 14.3, |
|
"learning_rate": 2.754385964912281e-05, |
|
"loss": 0.3018, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 14.4, |
|
"learning_rate": 2.7368421052631583e-05, |
|
"loss": 0.3047, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 2.7192982456140354e-05, |
|
"loss": 0.3158, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 14.6, |
|
"learning_rate": 2.7017543859649125e-05, |
|
"loss": 0.3036, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 14.7, |
|
"learning_rate": 2.6842105263157896e-05, |
|
"loss": 0.3219, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 14.8, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.2892, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 14.9, |
|
"learning_rate": 2.6491228070175438e-05, |
|
"loss": 0.3252, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 2.6315789473684212e-05, |
|
"loss": 0.3329, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_safe": 0.11177481968072599, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.999028621320486, |
|
"eval_iou_safe": 0.10831796981091193, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9727305324523915, |
|
"eval_loss": 0.3002106547355652, |
|
"eval_mean_accuracy": 0.555401720500606, |
|
"eval_mean_iou": 0.5405242511316517, |
|
"eval_overall_accuracy": 0.9728205666613223, |
|
"eval_runtime": 10.0001, |
|
"eval_samples_per_second": 6.7, |
|
"eval_steps_per_second": 0.5, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 15.1, |
|
"learning_rate": 2.6140350877192983e-05, |
|
"loss": 0.3213, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 15.2, |
|
"learning_rate": 2.5964912280701754e-05, |
|
"loss": 0.2808, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 15.3, |
|
"learning_rate": 2.578947368421053e-05, |
|
"loss": 0.2909, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 15.4, |
|
"learning_rate": 2.5614035087719303e-05, |
|
"loss": 0.2886, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 2.5438596491228074e-05, |
|
"loss": 0.2776, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 15.6, |
|
"learning_rate": 2.5263157894736845e-05, |
|
"loss": 0.2945, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 15.7, |
|
"learning_rate": 2.5087719298245616e-05, |
|
"loss": 0.3205, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 15.8, |
|
"learning_rate": 2.4912280701754387e-05, |
|
"loss": 0.2827, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 15.9, |
|
"learning_rate": 2.4736842105263158e-05, |
|
"loss": 0.3261, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 2.456140350877193e-05, |
|
"loss": 0.3214, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy_safe": 0.09076873258005945, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9994798427630265, |
|
"eval_iou_safe": 0.08924361530356209, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9725644143423847, |
|
"eval_loss": 0.27253061532974243, |
|
"eval_mean_accuracy": 0.545124287671543, |
|
"eval_mean_iou": 0.5309040148229734, |
|
"eval_overall_accuracy": 0.972637973614593, |
|
"eval_runtime": 9.3554, |
|
"eval_samples_per_second": 7.162, |
|
"eval_steps_per_second": 0.534, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 16.1, |
|
"learning_rate": 2.4385964912280703e-05, |
|
"loss": 0.2676, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 16.2, |
|
"learning_rate": 2.4210526315789474e-05, |
|
"loss": 0.2881, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 16.3, |
|
"learning_rate": 2.4035087719298245e-05, |
|
"loss": 0.2869, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 16.4, |
|
"learning_rate": 2.385964912280702e-05, |
|
"loss": 0.3117, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 2.368421052631579e-05, |
|
"loss": 0.2852, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 16.6, |
|
"learning_rate": 2.3508771929824565e-05, |
|
"loss": 0.3013, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 16.7, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 0.3106, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 16.8, |
|
"learning_rate": 2.3157894736842107e-05, |
|
"loss": 0.3104, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 16.9, |
|
"learning_rate": 2.2982456140350878e-05, |
|
"loss": 0.2777, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 2.280701754385965e-05, |
|
"loss": 0.2744, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy_safe": 0.15732206121024978, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9985794532845882, |
|
"eval_iou_safe": 0.15030708175648924, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9736074113594023, |
|
"eval_loss": 0.2896406948566437, |
|
"eval_mean_accuracy": 0.577950757247419, |
|
"eval_mean_iou": 0.5619572465579458, |
|
"eval_overall_accuracy": 0.9737300588123834, |
|
"eval_runtime": 9.4906, |
|
"eval_samples_per_second": 7.06, |
|
"eval_steps_per_second": 0.527, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 17.1, |
|
"learning_rate": 2.2631578947368423e-05, |
|
"loss": 0.2712, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 17.2, |
|
"learning_rate": 2.2456140350877194e-05, |
|
"loss": 0.2807, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 17.3, |
|
"learning_rate": 2.2280701754385965e-05, |
|
"loss": 0.2618, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 17.4, |
|
"learning_rate": 2.2105263157894736e-05, |
|
"loss": 0.2755, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 2.1929824561403507e-05, |
|
"loss": 0.2889, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 17.6, |
|
"learning_rate": 2.175438596491228e-05, |
|
"loss": 0.277, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 17.7, |
|
"learning_rate": 2.1578947368421053e-05, |
|
"loss": 0.2669, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 17.8, |
|
"learning_rate": 2.1403508771929827e-05, |
|
"loss": 0.2867, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 17.9, |
|
"learning_rate": 2.1228070175438598e-05, |
|
"loss": 0.3369, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 0.2948, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy_safe": 0.13295631088546306, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9988779599416738, |
|
"eval_iou_safe": 0.12822929505304623, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9731947501724811, |
|
"eval_loss": 0.2564152479171753, |
|
"eval_mean_accuracy": 0.5659171354135685, |
|
"eval_mean_iou": 0.5507120226127636, |
|
"eval_overall_accuracy": 0.9733000228654093, |
|
"eval_runtime": 9.4795, |
|
"eval_samples_per_second": 7.068, |
|
"eval_steps_per_second": 0.527, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 18.1, |
|
"learning_rate": 2.0877192982456143e-05, |
|
"loss": 0.2978, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 18.2, |
|
"learning_rate": 2.0701754385964914e-05, |
|
"loss": 0.2467, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 18.3, |
|
"learning_rate": 2.0526315789473685e-05, |
|
"loss": 0.2906, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 18.4, |
|
"learning_rate": 2.0350877192982456e-05, |
|
"loss": 0.2572, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 2.0175438596491227e-05, |
|
"loss": 0.2631, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 18.6, |
|
"learning_rate": 2e-05, |
|
"loss": 0.2838, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 18.7, |
|
"learning_rate": 1.9824561403508773e-05, |
|
"loss": 0.2543, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 18.8, |
|
"learning_rate": 1.9649122807017544e-05, |
|
"loss": 0.2629, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 18.9, |
|
"learning_rate": 1.9473684210526315e-05, |
|
"loss": 0.2622, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 1.929824561403509e-05, |
|
"loss": 0.2653, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy_safe": 0.17323757425761657, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9986642883133119, |
|
"eval_iou_safe": 0.1659548460122348, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.97415022926933, |
|
"eval_loss": 0.25176867842674255, |
|
"eval_mean_accuracy": 0.5859509312854643, |
|
"eval_mean_iou": 0.5700525376407825, |
|
"eval_overall_accuracy": 0.974282506686538, |
|
"eval_runtime": 11.4222, |
|
"eval_samples_per_second": 5.866, |
|
"eval_steps_per_second": 0.438, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 19.1, |
|
"learning_rate": 1.9122807017543863e-05, |
|
"loss": 0.2738, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"learning_rate": 1.8947368421052634e-05, |
|
"loss": 0.2741, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 19.3, |
|
"learning_rate": 1.8771929824561405e-05, |
|
"loss": 0.2625, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 19.4, |
|
"learning_rate": 1.8596491228070176e-05, |
|
"loss": 0.2632, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.247, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 19.6, |
|
"learning_rate": 1.8245614035087722e-05, |
|
"loss": 0.2735, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 19.7, |
|
"learning_rate": 1.8070175438596493e-05, |
|
"loss": 0.2583, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 19.8, |
|
"learning_rate": 1.7894736842105264e-05, |
|
"loss": 0.2458, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 19.9, |
|
"learning_rate": 1.7719298245614035e-05, |
|
"loss": 0.2588, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 1.7543859649122806e-05, |
|
"loss": 0.3026, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_safe": 0.14083407542761978, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9990070312163571, |
|
"eval_iou_safe": 0.13638476313495493, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9735479371956898, |
|
"eval_loss": 0.2531002461910248, |
|
"eval_mean_accuracy": 0.5699205533219884, |
|
"eval_mean_iou": 0.5549663501653224, |
|
"eval_overall_accuracy": 0.9736579781147972, |
|
"eval_runtime": 9.5606, |
|
"eval_samples_per_second": 7.008, |
|
"eval_steps_per_second": 0.523, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 20.1, |
|
"learning_rate": 1.736842105263158e-05, |
|
"loss": 0.306, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 20.2, |
|
"learning_rate": 1.719298245614035e-05, |
|
"loss": 0.2678, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 20.3, |
|
"learning_rate": 1.7017543859649125e-05, |
|
"loss": 0.2485, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 20.4, |
|
"learning_rate": 1.6842105263157896e-05, |
|
"loss": 0.2974, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.2612, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 20.6, |
|
"learning_rate": 1.6491228070175442e-05, |
|
"loss": 0.2509, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 20.7, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.2503, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 20.8, |
|
"learning_rate": 1.6140350877192984e-05, |
|
"loss": 0.2584, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 20.9, |
|
"learning_rate": 1.5964912280701755e-05, |
|
"loss": 0.2552, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 1.5789473684210526e-05, |
|
"loss": 0.2649, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_accuracy_safe": 0.18018242026823336, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9985888402863834, |
|
"eval_iou_safe": 0.17219883762699062, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.974277524161329, |
|
"eval_loss": 0.2383996993303299, |
|
"eval_mean_accuracy": 0.5893856302773084, |
|
"eval_mean_iou": 0.5732381808941598, |
|
"eval_overall_accuracy": 0.9744144268889925, |
|
"eval_runtime": 9.6539, |
|
"eval_samples_per_second": 6.94, |
|
"eval_steps_per_second": 0.518, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 21.1, |
|
"learning_rate": 1.56140350877193e-05, |
|
"loss": 0.2475, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 21.2, |
|
"learning_rate": 1.543859649122807e-05, |
|
"loss": 0.254, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 21.3, |
|
"learning_rate": 1.5263157894736842e-05, |
|
"loss": 0.2623, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 21.4, |
|
"learning_rate": 1.5087719298245615e-05, |
|
"loss": 0.2521, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 1.4912280701754386e-05, |
|
"loss": 0.233, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 21.6, |
|
"learning_rate": 1.4736842105263157e-05, |
|
"loss": 0.249, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 21.7, |
|
"learning_rate": 1.4561403508771931e-05, |
|
"loss": 0.2474, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 21.8, |
|
"learning_rate": 1.4385964912280702e-05, |
|
"loss": 0.2573, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 21.9, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.2623, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 1.4035087719298246e-05, |
|
"loss": 0.2431, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_accuracy_safe": 0.19926677229463263, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9983434875269627, |
|
"eval_iou_safe": 0.1889817324970706, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.974590481658128, |
|
"eval_loss": 0.2389637678861618, |
|
"eval_mean_accuracy": 0.5988051299107977, |
|
"eval_mean_iou": 0.5817861070775993, |
|
"eval_overall_accuracy": 0.9747400426152927, |
|
"eval_runtime": 10.415, |
|
"eval_samples_per_second": 6.433, |
|
"eval_steps_per_second": 0.48, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 22.1, |
|
"learning_rate": 1.3859649122807017e-05, |
|
"loss": 0.2517, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 22.2, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.2393, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 22.3, |
|
"learning_rate": 1.3508771929824562e-05, |
|
"loss": 0.2455, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 22.4, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.2367, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 1.3157894736842106e-05, |
|
"loss": 0.2516, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 22.6, |
|
"learning_rate": 1.2982456140350877e-05, |
|
"loss": 0.2405, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 22.7, |
|
"learning_rate": 1.2807017543859651e-05, |
|
"loss": 0.2342, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 22.8, |
|
"learning_rate": 1.2631578947368422e-05, |
|
"loss": 0.2365, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 22.9, |
|
"learning_rate": 1.2456140350877193e-05, |
|
"loss": 0.2546, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 1.2280701754385964e-05, |
|
"loss": 0.2608, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_accuracy_safe": 0.23167027112462943, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9980992494740052, |
|
"eval_iou_safe": 0.2180533381712627, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9752910790561756, |
|
"eval_loss": 0.2354857325553894, |
|
"eval_mean_accuracy": 0.6148847602993173, |
|
"eval_mean_iou": 0.5966722086137192, |
|
"eval_overall_accuracy": 0.9754601663617946, |
|
"eval_runtime": 9.0986, |
|
"eval_samples_per_second": 7.364, |
|
"eval_steps_per_second": 0.55, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 23.1, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.2717, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 23.2, |
|
"learning_rate": 1.192982456140351e-05, |
|
"loss": 0.2398, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 23.3, |
|
"learning_rate": 1.1754385964912282e-05, |
|
"loss": 0.232, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 23.4, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.2343, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 1.1403508771929824e-05, |
|
"loss": 0.2494, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 23.6, |
|
"learning_rate": 1.1228070175438597e-05, |
|
"loss": 0.2579, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 23.7, |
|
"learning_rate": 1.1052631578947368e-05, |
|
"loss": 0.2554, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 23.8, |
|
"learning_rate": 1.087719298245614e-05, |
|
"loss": 0.2516, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 23.9, |
|
"learning_rate": 1.0701754385964913e-05, |
|
"loss": 0.248, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.223, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy_safe": 0.16970828948230732, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9988791333168983, |
|
"eval_iou_safe": 0.1636807106061085, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.974257712308983, |
|
"eval_loss": 0.22904758155345917, |
|
"eval_mean_accuracy": 0.5842937113996027, |
|
"eval_mean_iou": 0.5689692114575458, |
|
"eval_overall_accuracy": 0.9743867560998717, |
|
"eval_runtime": 9.2103, |
|
"eval_samples_per_second": 7.274, |
|
"eval_steps_per_second": 0.543, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 24.1, |
|
"learning_rate": 1.0350877192982457e-05, |
|
"loss": 0.2361, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 24.2, |
|
"learning_rate": 1.0175438596491228e-05, |
|
"loss": 0.2487, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 24.3, |
|
"learning_rate": 1e-05, |
|
"loss": 0.2318, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 24.4, |
|
"learning_rate": 9.824561403508772e-06, |
|
"loss": 0.2492, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 9.649122807017545e-06, |
|
"loss": 0.2644, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 24.6, |
|
"learning_rate": 9.473684210526317e-06, |
|
"loss": 0.2297, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 24.7, |
|
"learning_rate": 9.298245614035088e-06, |
|
"loss": 0.2757, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 24.8, |
|
"learning_rate": 9.122807017543861e-06, |
|
"loss": 0.2373, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 24.9, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.2465, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 8.771929824561403e-06, |
|
"loss": 0.2448, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_safe": 0.21411444057655907, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9984515553851293, |
|
"eval_iou_safe": 0.2037491172883097, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9751261777280302, |
|
"eval_loss": 0.22617702186107635, |
|
"eval_mean_accuracy": 0.6062829979808442, |
|
"eval_mean_iou": 0.58943764750817, |
|
"eval_overall_accuracy": 0.975283494636194, |
|
"eval_runtime": 9.8675, |
|
"eval_samples_per_second": 6.79, |
|
"eval_steps_per_second": 0.507, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 25.1, |
|
"learning_rate": 8.596491228070176e-06, |
|
"loss": 0.2223, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 25.2, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.2382, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 25.3, |
|
"learning_rate": 8.245614035087721e-06, |
|
"loss": 0.2313, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 25.4, |
|
"learning_rate": 8.070175438596492e-06, |
|
"loss": 0.2753, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.2313, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 25.6, |
|
"learning_rate": 7.719298245614036e-06, |
|
"loss": 0.2392, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 25.7, |
|
"learning_rate": 7.5438596491228074e-06, |
|
"loss": 0.2287, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 25.8, |
|
"learning_rate": 7.3684210526315784e-06, |
|
"loss": 0.2145, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 25.9, |
|
"learning_rate": 7.192982456140351e-06, |
|
"loss": 0.2461, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 7.017543859649123e-06, |
|
"loss": 0.2547, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_accuracy_safe": 0.2736573875968096, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997840402899504, |
|
"eval_iou_safe": 0.25552723843257524, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9762572758507593, |
|
"eval_loss": 0.22811570763587952, |
|
"eval_mean_accuracy": 0.6357488952481568, |
|
"eval_mean_iou": 0.6158922571416673, |
|
"eval_overall_accuracy": 0.976449197797633, |
|
"eval_runtime": 9.8195, |
|
"eval_samples_per_second": 6.823, |
|
"eval_steps_per_second": 0.509, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 26.1, |
|
"learning_rate": 6.842105263157896e-06, |
|
"loss": 0.232, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 26.2, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.2341, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 26.3, |
|
"learning_rate": 6.4912280701754385e-06, |
|
"loss": 0.235, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 26.4, |
|
"learning_rate": 6.315789473684211e-06, |
|
"loss": 0.2276, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 6.140350877192982e-06, |
|
"loss": 0.2661, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 26.6, |
|
"learning_rate": 5.964912280701755e-06, |
|
"loss": 0.2416, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 26.7, |
|
"learning_rate": 5.789473684210527e-06, |
|
"loss": 0.2499, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 26.8, |
|
"learning_rate": 5.6140350877192985e-06, |
|
"loss": 0.223, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 26.9, |
|
"learning_rate": 5.43859649122807e-06, |
|
"loss": 0.2192, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.2266, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_accuracy_safe": 0.2391143442006777, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9981174954587445, |
|
"eval_iou_safe": 0.22518692513373836, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9755248903429237, |
|
"eval_loss": 0.21912012994289398, |
|
"eval_mean_accuracy": 0.6186159198297111, |
|
"eval_mean_iou": 0.600355907738331, |
|
"eval_overall_accuracy": 0.9756977593720849, |
|
"eval_runtime": 9.2527, |
|
"eval_samples_per_second": 7.241, |
|
"eval_steps_per_second": 0.54, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 27.1, |
|
"learning_rate": 5.087719298245614e-06, |
|
"loss": 0.235, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 27.2, |
|
"learning_rate": 4.912280701754386e-06, |
|
"loss": 0.2273, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 27.3, |
|
"learning_rate": 4.736842105263159e-06, |
|
"loss": 0.2235, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 27.4, |
|
"learning_rate": 4.5614035087719304e-06, |
|
"loss": 0.2364, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 4.3859649122807014e-06, |
|
"loss": 0.2449, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 27.6, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 0.253, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 27.7, |
|
"learning_rate": 4.035087719298246e-06, |
|
"loss": 0.247, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 27.8, |
|
"learning_rate": 3.859649122807018e-06, |
|
"loss": 0.2271, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 27.9, |
|
"learning_rate": 3.6842105263157892e-06, |
|
"loss": 0.2359, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 3.5087719298245615e-06, |
|
"loss": 0.2357, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy_safe": 0.2226726188410993, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9984927995242667, |
|
"eval_iou_safe": 0.21216656994042152, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9754146075867751, |
|
"eval_loss": 0.22175094485282898, |
|
"eval_mean_accuracy": 0.610582709182683, |
|
"eval_mean_iou": 0.5937905887635984, |
|
"eval_overall_accuracy": 0.9755763153531658, |
|
"eval_runtime": 9.6277, |
|
"eval_samples_per_second": 6.959, |
|
"eval_steps_per_second": 0.519, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 28.1, |
|
"learning_rate": 2.305263157894737e-05, |
|
"loss": 0.213, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 28.2, |
|
"learning_rate": 2.294736842105263e-05, |
|
"loss": 0.2174, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 28.3, |
|
"learning_rate": 2.2842105263157897e-05, |
|
"loss": 0.2389, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 28.4, |
|
"learning_rate": 2.273684210526316e-05, |
|
"loss": 0.2463, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 2.2631578947368423e-05, |
|
"loss": 0.2331, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 28.6, |
|
"learning_rate": 2.2526315789473683e-05, |
|
"loss": 0.2407, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 28.7, |
|
"learning_rate": 2.242105263157895e-05, |
|
"loss": 0.2369, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 28.8, |
|
"learning_rate": 2.2315789473684213e-05, |
|
"loss": 0.2691, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 28.9, |
|
"learning_rate": 2.2210526315789476e-05, |
|
"loss": 0.2388, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 2.2105263157894736e-05, |
|
"loss": 0.2563, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_accuracy_safe": 0.1852113137574643, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9987948263070255, |
|
"eval_iou_safe": 0.17815716870865347, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9746240459457417, |
|
"eval_loss": 0.20956651866436005, |
|
"eval_mean_accuracy": 0.592003070032245, |
|
"eval_mean_iou": 0.5763906073271976, |
|
"eval_overall_accuracy": 0.9747628738631063, |
|
"eval_runtime": 9.3361, |
|
"eval_samples_per_second": 7.176, |
|
"eval_steps_per_second": 0.536, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 29.1, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 0.2531, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 29.2, |
|
"learning_rate": 2.1894736842105266e-05, |
|
"loss": 0.2131, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 29.3, |
|
"learning_rate": 2.1789473684210526e-05, |
|
"loss": 0.2521, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 29.4, |
|
"learning_rate": 2.168421052631579e-05, |
|
"loss": 0.2286, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 2.1578947368421053e-05, |
|
"loss": 0.2359, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 29.6, |
|
"learning_rate": 2.147368421052632e-05, |
|
"loss": 0.2319, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 29.7, |
|
"learning_rate": 2.136842105263158e-05, |
|
"loss": 0.2117, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 29.8, |
|
"learning_rate": 2.1263157894736842e-05, |
|
"loss": 0.2248, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 29.9, |
|
"learning_rate": 2.1157894736842106e-05, |
|
"loss": 0.2189, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 0.226, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_safe": 0.2843975158152821, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9976802371813743, |
|
"eval_iou_safe": 0.2642573906315484, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9764128620744182, |
|
"eval_loss": 0.21209673583507538, |
|
"eval_mean_accuracy": 0.6410388764983282, |
|
"eval_mean_iou": 0.6203351263529833, |
|
"eval_overall_accuracy": 0.9766110092846315, |
|
"eval_runtime": 10.6202, |
|
"eval_samples_per_second": 6.309, |
|
"eval_steps_per_second": 0.471, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 30.1, |
|
"learning_rate": 2.0947368421052632e-05, |
|
"loss": 0.2109, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 30.2, |
|
"learning_rate": 2.0842105263157895e-05, |
|
"loss": 0.2058, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 30.3, |
|
"learning_rate": 2.073684210526316e-05, |
|
"loss": 0.2244, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 30.4, |
|
"learning_rate": 2.0631578947368422e-05, |
|
"loss": 0.2107, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 2.0526315789473685e-05, |
|
"loss": 0.2341, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 30.6, |
|
"learning_rate": 2.042105263157895e-05, |
|
"loss": 0.2223, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 30.7, |
|
"learning_rate": 2.0315789473684212e-05, |
|
"loss": 0.2399, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 30.8, |
|
"learning_rate": 2.0210526315789475e-05, |
|
"loss": 0.2152, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 30.9, |
|
"learning_rate": 2.010526315789474e-05, |
|
"loss": 0.2303, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 2e-05, |
|
"loss": 0.2221, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"eval_accuracy_safe": 0.2718031156394925, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9977821448196129, |
|
"eval_iou_safe": 0.253343035006872, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9761463764725383, |
|
"eval_loss": 0.20163898169994354, |
|
"eval_mean_accuracy": 0.6347926302295527, |
|
"eval_mean_iou": 0.6147447057397052, |
|
"eval_overall_accuracy": 0.9763378883475688, |
|
"eval_runtime": 9.6143, |
|
"eval_samples_per_second": 6.969, |
|
"eval_steps_per_second": 0.52, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 31.1, |
|
"learning_rate": 1.9894736842105265e-05, |
|
"loss": 0.2184, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 31.2, |
|
"learning_rate": 1.9789473684210528e-05, |
|
"loss": 0.2253, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 31.3, |
|
"learning_rate": 1.968421052631579e-05, |
|
"loss": 0.2304, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 31.4, |
|
"learning_rate": 1.957894736842105e-05, |
|
"loss": 0.2157, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 1.9473684210526315e-05, |
|
"loss": 0.2134, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 31.6, |
|
"learning_rate": 1.936842105263158e-05, |
|
"loss": 0.1973, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 31.7, |
|
"learning_rate": 1.9263157894736845e-05, |
|
"loss": 0.2071, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 31.8, |
|
"learning_rate": 1.9157894736842104e-05, |
|
"loss": 0.2047, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 31.9, |
|
"learning_rate": 1.9052631578947368e-05, |
|
"loss": 0.2216, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 1.8947368421052634e-05, |
|
"loss": 0.2317, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_accuracy_safe": 0.2649045300519273, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.99817452149465, |
|
"eval_iou_safe": 0.2499158963989111, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9763296849028891, |
|
"eval_loss": 0.20076872408390045, |
|
"eval_mean_accuracy": 0.6315395257732886, |
|
"eval_mean_iou": 0.6131227906509, |
|
"eval_overall_accuracy": 0.9765149016878498, |
|
"eval_runtime": 10.0363, |
|
"eval_samples_per_second": 6.676, |
|
"eval_steps_per_second": 0.498, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 32.1, |
|
"learning_rate": 1.8842105263157894e-05, |
|
"loss": 0.2222, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 32.2, |
|
"learning_rate": 1.8736842105263158e-05, |
|
"loss": 0.2257, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 32.3, |
|
"learning_rate": 1.863157894736842e-05, |
|
"loss": 0.2139, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 32.4, |
|
"learning_rate": 1.8526315789473687e-05, |
|
"loss": 0.1901, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.2044, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 32.6, |
|
"learning_rate": 1.831578947368421e-05, |
|
"loss": 0.2069, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 32.7, |
|
"learning_rate": 1.8210526315789474e-05, |
|
"loss": 0.2133, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 32.8, |
|
"learning_rate": 1.810526315789474e-05, |
|
"loss": 0.2049, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 32.9, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.2138, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 1.7894736842105264e-05, |
|
"loss": 0.2643, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"eval_accuracy_safe": 0.32538617815659926, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9975752200987912, |
|
"eval_iou_safe": 0.30137717538008896, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.977503612135026, |
|
"eval_loss": 0.19892987608909607, |
|
"eval_mean_accuracy": 0.6614806991276952, |
|
"eval_mean_iou": 0.6394403937575575, |
|
"eval_overall_accuracy": 0.9777198336017665, |
|
"eval_runtime": 10.0482, |
|
"eval_samples_per_second": 6.668, |
|
"eval_steps_per_second": 0.498, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 33.1, |
|
"learning_rate": 1.7789473684210527e-05, |
|
"loss": 0.2032, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 33.2, |
|
"learning_rate": 1.768421052631579e-05, |
|
"loss": 0.2138, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 33.3, |
|
"learning_rate": 1.7578947368421054e-05, |
|
"loss": 0.225, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 33.4, |
|
"learning_rate": 1.7473684210526317e-05, |
|
"loss": 0.1898, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 1.736842105263158e-05, |
|
"loss": 0.2084, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 33.6, |
|
"learning_rate": 1.7263157894736843e-05, |
|
"loss": 0.2185, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 33.7, |
|
"learning_rate": 1.7157894736842107e-05, |
|
"loss": 0.2103, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 33.8, |
|
"learning_rate": 1.705263157894737e-05, |
|
"loss": 0.192, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 33.9, |
|
"learning_rate": 1.694736842105263e-05, |
|
"loss": 0.2015, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 1.6842105263157896e-05, |
|
"loss": 0.2118, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_accuracy_safe": 0.3347327111306433, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997749583657136, |
|
"eval_iou_safe": 0.3116878455021897, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9779470808145468, |
|
"eval_loss": 0.1900891214609146, |
|
"eval_mean_accuracy": 0.6662411473938896, |
|
"eval_mean_iou": 0.6448174631583683, |
|
"eval_overall_accuracy": 0.9781651283378032, |
|
"eval_runtime": 9.3476, |
|
"eval_samples_per_second": 7.168, |
|
"eval_steps_per_second": 0.535, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 34.1, |
|
"learning_rate": 1.673684210526316e-05, |
|
"loss": 0.1939, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 34.2, |
|
"learning_rate": 1.6631578947368423e-05, |
|
"loss": 0.1964, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 34.3, |
|
"learning_rate": 1.6526315789473683e-05, |
|
"loss": 0.2013, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 34.4, |
|
"learning_rate": 1.642105263157895e-05, |
|
"loss": 0.2023, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.1977, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 34.6, |
|
"learning_rate": 1.6210526315789473e-05, |
|
"loss": 0.2127, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 34.7, |
|
"learning_rate": 1.6105263157894736e-05, |
|
"loss": 0.2226, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 34.8, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.2097, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 34.9, |
|
"learning_rate": 1.5894736842105266e-05, |
|
"loss": 0.1962, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 1.5789473684210526e-05, |
|
"loss": 0.2133, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_safe": 0.3618933620147956, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9975545100260806, |
|
"eval_iou_safe": 0.3349795266599463, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9785487908911983, |
|
"eval_loss": 0.19170759618282318, |
|
"eval_mean_accuracy": 0.6797239360204381, |
|
"eval_mean_iou": 0.6567641587755723, |
|
"eval_overall_accuracy": 0.9787780989461871, |
|
"eval_runtime": 10.6094, |
|
"eval_samples_per_second": 6.315, |
|
"eval_steps_per_second": 0.471, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 35.1, |
|
"learning_rate": 1.568421052631579e-05, |
|
"loss": 0.1896, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 35.2, |
|
"learning_rate": 1.5578947368421056e-05, |
|
"loss": 0.1961, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 35.3, |
|
"learning_rate": 1.5473684210526316e-05, |
|
"loss": 0.1909, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 35.4, |
|
"learning_rate": 1.536842105263158e-05, |
|
"loss": 0.2388, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 1.5263157894736842e-05, |
|
"loss": 0.2134, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 35.6, |
|
"learning_rate": 1.5157894736842107e-05, |
|
"loss": 0.1938, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 35.7, |
|
"learning_rate": 1.5052631578947369e-05, |
|
"loss": 0.1976, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 35.8, |
|
"learning_rate": 1.4947368421052632e-05, |
|
"loss": 0.21, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 35.9, |
|
"learning_rate": 1.4842105263157895e-05, |
|
"loss": 0.1986, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 1.4736842105263157e-05, |
|
"loss": 0.2064, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_accuracy_safe": 0.3401316880042868, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9978212768833464, |
|
"eval_iou_safe": 0.31741129406516555, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9781749054634312, |
|
"eval_loss": 0.1860196888446808, |
|
"eval_mean_accuracy": 0.6689764824438166, |
|
"eval_mean_iou": 0.6477930997642984, |
|
"eval_overall_accuracy": 0.9783941809810809, |
|
"eval_runtime": 9.7715, |
|
"eval_samples_per_second": 6.857, |
|
"eval_steps_per_second": 0.512, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 36.1, |
|
"learning_rate": 1.4631578947368422e-05, |
|
"loss": 0.1955, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 36.2, |
|
"learning_rate": 1.4526315789473685e-05, |
|
"loss": 0.2, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 36.3, |
|
"learning_rate": 1.4421052631578948e-05, |
|
"loss": 0.1799, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 36.4, |
|
"learning_rate": 1.431578947368421e-05, |
|
"loss": 0.2033, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.1931, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 36.6, |
|
"learning_rate": 1.4105263157894738e-05, |
|
"loss": 0.1955, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 36.7, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 0.1978, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 36.8, |
|
"learning_rate": 1.3894736842105263e-05, |
|
"loss": 0.1951, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 36.9, |
|
"learning_rate": 1.3789473684210526e-05, |
|
"loss": 0.1876, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.2341, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"eval_accuracy_safe": 0.2703825351482839, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9982565404228352, |
|
"eval_iou_safe": 0.2557340766682649, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.976569175210793, |
|
"eval_loss": 0.17752937972545624, |
|
"eval_mean_accuracy": 0.6343195377855595, |
|
"eval_mean_iou": 0.616151625939529, |
|
"eval_overall_accuracy": 0.9767563093954058, |
|
"eval_runtime": 9.2963, |
|
"eval_samples_per_second": 7.207, |
|
"eval_steps_per_second": 0.538, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 37.1, |
|
"learning_rate": 1.3578947368421053e-05, |
|
"loss": 0.1998, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 37.2, |
|
"learning_rate": 1.3473684210526316e-05, |
|
"loss": 0.1972, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 37.3, |
|
"learning_rate": 1.336842105263158e-05, |
|
"loss": 0.1904, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 37.4, |
|
"learning_rate": 1.3263157894736844e-05, |
|
"loss": 0.1917, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 1.3157894736842106e-05, |
|
"loss": 0.1875, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 37.6, |
|
"learning_rate": 1.305263157894737e-05, |
|
"loss": 0.1848, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 37.7, |
|
"learning_rate": 1.2947368421052633e-05, |
|
"loss": 0.1964, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 37.8, |
|
"learning_rate": 1.2842105263157894e-05, |
|
"loss": 0.1845, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 37.9, |
|
"learning_rate": 1.2736842105263157e-05, |
|
"loss": 0.1866, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 1.2631578947368422e-05, |
|
"loss": 0.2093, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_accuracy_safe": 0.3552260785424883, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9928191196329964, |
|
"eval_iou_safe": 0.28741779073267426, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9737097853338179, |
|
"eval_loss": 0.1933618187904358, |
|
"eval_mean_accuracy": 0.6740225990877423, |
|
"eval_mean_iou": 0.6305637880332461, |
|
"eval_overall_accuracy": 0.9739856435291803, |
|
"eval_runtime": 9.4354, |
|
"eval_samples_per_second": 7.101, |
|
"eval_steps_per_second": 0.53, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 38.1, |
|
"learning_rate": 1.2526315789473686e-05, |
|
"loss": 0.2115, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 38.2, |
|
"learning_rate": 1.2421052631578949e-05, |
|
"loss": 0.1946, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 38.3, |
|
"learning_rate": 1.231578947368421e-05, |
|
"loss": 0.1942, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 38.4, |
|
"learning_rate": 1.2210526315789474e-05, |
|
"loss": 0.1908, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.206, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 38.6, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.1776, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 38.7, |
|
"learning_rate": 1.1894736842105264e-05, |
|
"loss": 0.1942, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 38.8, |
|
"learning_rate": 1.1789473684210527e-05, |
|
"loss": 0.178, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 38.9, |
|
"learning_rate": 1.168421052631579e-05, |
|
"loss": 0.2015, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.1958, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"eval_accuracy_safe": 0.30012027709993405, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9980261481975256, |
|
"eval_iou_safe": 0.28184293125481946, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9772090847024161, |
|
"eval_loss": 0.17546288669109344, |
|
"eval_mean_accuracy": 0.6490732126487297, |
|
"eval_mean_iou": 0.6295260079786178, |
|
"eval_overall_accuracy": 0.9774111278021513, |
|
"eval_runtime": 10.0143, |
|
"eval_samples_per_second": 6.69, |
|
"eval_steps_per_second": 0.499, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 39.1, |
|
"learning_rate": 1.1473684210526315e-05, |
|
"loss": 0.1823, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 39.2, |
|
"learning_rate": 1.136842105263158e-05, |
|
"loss": 0.2134, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 39.3, |
|
"learning_rate": 1.1263157894736842e-05, |
|
"loss": 0.1747, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 39.4, |
|
"learning_rate": 1.1157894736842106e-05, |
|
"loss": 0.1852, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 1.1052631578947368e-05, |
|
"loss": 0.1861, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 39.6, |
|
"learning_rate": 1.0947368421052633e-05, |
|
"loss": 0.1801, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 39.7, |
|
"learning_rate": 1.0842105263157895e-05, |
|
"loss": 0.2033, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 39.8, |
|
"learning_rate": 1.073684210526316e-05, |
|
"loss": 0.1907, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 39.9, |
|
"learning_rate": 1.0631578947368421e-05, |
|
"loss": 0.1898, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.1886, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_safe": 0.3881249494026623, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9968947211374042, |
|
"eval_iou_safe": 0.35219360153638685, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9786680764654981, |
|
"eval_loss": 0.17675457894802094, |
|
"eval_mean_accuracy": 0.6925098352700332, |
|
"eval_mean_iou": 0.6654308390009425, |
|
"eval_overall_accuracy": 0.9789126381945255, |
|
"eval_runtime": 9.5568, |
|
"eval_samples_per_second": 7.011, |
|
"eval_steps_per_second": 0.523, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 40.1, |
|
"learning_rate": 1.0421052631578948e-05, |
|
"loss": 0.1862, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 40.2, |
|
"learning_rate": 1.0315789473684211e-05, |
|
"loss": 0.1751, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 40.3, |
|
"learning_rate": 1.0210526315789474e-05, |
|
"loss": 0.1777, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 40.4, |
|
"learning_rate": 1.0105263157894738e-05, |
|
"loss": 0.1875, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 1e-05, |
|
"loss": 0.1964, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 40.6, |
|
"learning_rate": 9.894736842105264e-06, |
|
"loss": 0.2019, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 40.7, |
|
"learning_rate": 9.789473684210526e-06, |
|
"loss": 0.1781, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 40.8, |
|
"learning_rate": 9.68421052631579e-06, |
|
"loss": 0.1974, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 40.9, |
|
"learning_rate": 9.578947368421052e-06, |
|
"loss": 0.1903, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 9.473684210526317e-06, |
|
"loss": 0.1734, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"eval_accuracy_safe": 0.3947960879102239, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9972934340386531, |
|
"eval_iou_safe": 0.3625567543168683, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9792547042493136, |
|
"eval_loss": 0.17450523376464844, |
|
"eval_mean_accuracy": 0.6960447609744385, |
|
"eval_mean_iou": 0.6709057292830909, |
|
"eval_overall_accuracy": 0.9794966284908465, |
|
"eval_runtime": 10.0572, |
|
"eval_samples_per_second": 6.662, |
|
"eval_steps_per_second": 0.497, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 41.1, |
|
"learning_rate": 9.368421052631579e-06, |
|
"loss": 0.2101, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 41.2, |
|
"learning_rate": 9.263157894736844e-06, |
|
"loss": 0.1794, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 41.3, |
|
"learning_rate": 9.157894736842105e-06, |
|
"loss": 0.2051, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 41.4, |
|
"learning_rate": 9.05263157894737e-06, |
|
"loss": 0.1762, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.1851, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 41.6, |
|
"learning_rate": 8.842105263157895e-06, |
|
"loss": 0.1835, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 41.7, |
|
"learning_rate": 8.736842105263158e-06, |
|
"loss": 0.2018, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 41.8, |
|
"learning_rate": 8.631578947368422e-06, |
|
"loss": 0.1997, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 41.9, |
|
"learning_rate": 8.526315789473685e-06, |
|
"loss": 0.1848, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.1795, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"eval_accuracy_safe": 0.41681412176514354, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9969580247307602, |
|
"eval_iou_safe": 0.3789419821537469, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9795699690858644, |
|
"eval_loss": 0.17103362083435059, |
|
"eval_mean_accuracy": 0.7068860732479518, |
|
"eval_mean_iou": 0.6792559756198057, |
|
"eval_overall_accuracy": 0.9798215040520056, |
|
"eval_runtime": 9.5328, |
|
"eval_samples_per_second": 7.028, |
|
"eval_steps_per_second": 0.525, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 42.1, |
|
"learning_rate": 8.315789473684212e-06, |
|
"loss": 0.1863, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 42.2, |
|
"learning_rate": 8.210526315789475e-06, |
|
"loss": 0.1977, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 42.3, |
|
"learning_rate": 8.105263157894736e-06, |
|
"loss": 0.1796, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 42.4, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.1833, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.1844, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 42.6, |
|
"learning_rate": 7.789473684210528e-06, |
|
"loss": 0.1899, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 42.7, |
|
"learning_rate": 7.68421052631579e-06, |
|
"loss": 0.189, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 42.8, |
|
"learning_rate": 7.578947368421054e-06, |
|
"loss": 0.1717, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 42.9, |
|
"learning_rate": 7.473684210526316e-06, |
|
"loss": 0.1709, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 7.3684210526315784e-06, |
|
"loss": 0.222, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"eval_accuracy_safe": 0.404142620884268, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997192699775639, |
|
"eval_iou_safe": 0.3700154592236505, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9794293850090163, |
|
"eval_loss": 0.17061825096607208, |
|
"eval_mean_accuracy": 0.7006676603299535, |
|
"eval_mean_iou": 0.6747224221163334, |
|
"eval_overall_accuracy": 0.9796749513540695, |
|
"eval_runtime": 9.3364, |
|
"eval_samples_per_second": 7.176, |
|
"eval_steps_per_second": 0.536, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 43.1, |
|
"learning_rate": 7.2631578947368426e-06, |
|
"loss": 0.1925, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 43.2, |
|
"learning_rate": 7.157894736842105e-06, |
|
"loss": 0.179, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 43.3, |
|
"learning_rate": 7.052631578947369e-06, |
|
"loss": 0.1774, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 43.4, |
|
"learning_rate": 6.9473684210526315e-06, |
|
"loss": 0.1743, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 6.842105263157896e-06, |
|
"loss": 0.1815, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 43.6, |
|
"learning_rate": 6.736842105263158e-06, |
|
"loss": 0.1784, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 43.7, |
|
"learning_rate": 6.631578947368422e-06, |
|
"loss": 0.1719, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 43.8, |
|
"learning_rate": 6.526315789473685e-06, |
|
"loss": 0.1847, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 43.9, |
|
"learning_rate": 6.421052631578947e-06, |
|
"loss": 0.1777, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 6.315789473684211e-06, |
|
"loss": 0.1831, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_accuracy_safe": 0.4044336760459674, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997242392216392, |
|
"eval_iou_safe": 0.3708362421682382, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9794867149476597, |
|
"eval_loss": 0.1686682254076004, |
|
"eval_mean_accuracy": 0.7008380341311797, |
|
"eval_mean_iou": 0.675161478557949, |
|
"eval_overall_accuracy": 0.9797317732625933, |
|
"eval_runtime": 9.4019, |
|
"eval_samples_per_second": 7.126, |
|
"eval_steps_per_second": 0.532, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 44.1, |
|
"learning_rate": 6.2105263157894745e-06, |
|
"loss": 0.1826, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 44.2, |
|
"learning_rate": 6.105263157894737e-06, |
|
"loss": 0.1878, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 44.3, |
|
"learning_rate": 6e-06, |
|
"loss": 0.1669, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 44.4, |
|
"learning_rate": 5.8947368421052634e-06, |
|
"loss": 0.1728, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 5.789473684210527e-06, |
|
"loss": 0.2053, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 44.6, |
|
"learning_rate": 5.68421052631579e-06, |
|
"loss": 0.1774, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 44.7, |
|
"learning_rate": 5.578947368421053e-06, |
|
"loss": 0.1788, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 44.8, |
|
"learning_rate": 5.4736842105263165e-06, |
|
"loss": 0.1749, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 44.9, |
|
"learning_rate": 5.36842105263158e-06, |
|
"loss": 0.1795, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.1935, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_safe": 0.4346667900277948, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9964187414776291, |
|
"eval_iou_safe": 0.38890805113099386, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9795630927466161, |
|
"eval_loss": 0.1710895597934723, |
|
"eval_mean_accuracy": 0.7155427657527119, |
|
"eval_mean_iou": 0.684235571938805, |
|
"eval_overall_accuracy": 0.9798254895566115, |
|
"eval_runtime": 10.0694, |
|
"eval_samples_per_second": 6.654, |
|
"eval_steps_per_second": 0.497, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 45.1, |
|
"learning_rate": 5.1578947368421055e-06, |
|
"loss": 0.1838, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 45.2, |
|
"learning_rate": 5.052631578947369e-06, |
|
"loss": 0.1726, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 45.3, |
|
"learning_rate": 4.947368421052632e-06, |
|
"loss": 0.1731, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 45.4, |
|
"learning_rate": 4.842105263157895e-06, |
|
"loss": 0.1724, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 4.736842105263159e-06, |
|
"loss": 0.1789, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 45.6, |
|
"learning_rate": 4.631578947368422e-06, |
|
"loss": 0.1706, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 45.7, |
|
"learning_rate": 4.526315789473685e-06, |
|
"loss": 0.179, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 45.8, |
|
"learning_rate": 4.4210526315789476e-06, |
|
"loss": 0.2259, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 45.9, |
|
"learning_rate": 4.315789473684211e-06, |
|
"loss": 0.2031, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 0.1728, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"eval_accuracy_safe": 0.4207558953126626, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9969068069022154, |
|
"eval_iou_safe": 0.38194129740606275, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9796351289125812, |
|
"eval_loss": 0.17141538858413696, |
|
"eval_mean_accuracy": 0.708831351107439, |
|
"eval_mean_iou": 0.6807882131593219, |
|
"eval_overall_accuracy": 0.979888232786264, |
|
"eval_runtime": 9.7002, |
|
"eval_samples_per_second": 6.907, |
|
"eval_steps_per_second": 0.515, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 46.1, |
|
"learning_rate": 4.105263157894737e-06, |
|
"loss": 0.1766, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 46.2, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.1703, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 46.3, |
|
"learning_rate": 3.894736842105264e-06, |
|
"loss": 0.1648, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 46.4, |
|
"learning_rate": 3.789473684210527e-06, |
|
"loss": 0.1851, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 3.6842105263157892e-06, |
|
"loss": 0.178, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 46.6, |
|
"learning_rate": 3.5789473684210525e-06, |
|
"loss": 0.1785, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 46.7, |
|
"learning_rate": 3.4736842105263158e-06, |
|
"loss": 0.2019, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 46.8, |
|
"learning_rate": 3.368421052631579e-06, |
|
"loss": 0.1876, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 46.9, |
|
"learning_rate": 3.2631578947368423e-06, |
|
"loss": 0.1748, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 3.1578947368421056e-06, |
|
"loss": 0.1742, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"eval_accuracy_safe": 0.38982501994980745, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9973832559120804, |
|
"eval_iou_safe": 0.35896443759717717, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9791974221442664, |
|
"eval_loss": 0.16703662276268005, |
|
"eval_mean_accuracy": 0.6936041379309439, |
|
"eval_mean_iou": 0.6690809298707218, |
|
"eval_overall_accuracy": 0.9794369597933186, |
|
"eval_runtime": 9.6852, |
|
"eval_samples_per_second": 6.918, |
|
"eval_steps_per_second": 0.516, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 47.1, |
|
"learning_rate": 3.0526315789473684e-06, |
|
"loss": 0.1799, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 47.2, |
|
"learning_rate": 2.9473684210526317e-06, |
|
"loss": 0.1994, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 47.3, |
|
"learning_rate": 2.842105263157895e-06, |
|
"loss": 0.1865, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 47.4, |
|
"learning_rate": 2.7368421052631583e-06, |
|
"loss": 0.1713, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.1622, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 47.6, |
|
"learning_rate": 2.5263157894736844e-06, |
|
"loss": 0.1643, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 47.7, |
|
"learning_rate": 2.4210526315789477e-06, |
|
"loss": 0.1772, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 47.8, |
|
"learning_rate": 2.315789473684211e-06, |
|
"loss": 0.1819, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 47.9, |
|
"learning_rate": 2.2105263157894738e-06, |
|
"loss": 0.19, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 2.105263157894737e-06, |
|
"loss": 0.2064, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"eval_accuracy_safe": 0.4209312994167332, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9969624248878517, |
|
"eval_iou_safe": 0.3827353713872098, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.979694923134079, |
|
"eval_loss": 0.16833512485027313, |
|
"eval_mean_accuracy": 0.7089468621522924, |
|
"eval_mean_iou": 0.6812151472606445, |
|
"eval_overall_accuracy": 0.9799473890617713, |
|
"eval_runtime": 9.8036, |
|
"eval_samples_per_second": 6.834, |
|
"eval_steps_per_second": 0.51, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 48.1, |
|
"learning_rate": 1.6466165413533834e-05, |
|
"loss": 0.1721, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 48.2, |
|
"learning_rate": 1.6390977443609023e-05, |
|
"loss": 0.1789, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 48.3, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.1666, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 48.4, |
|
"learning_rate": 1.62406015037594e-05, |
|
"loss": 0.1764, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 1.6165413533834585e-05, |
|
"loss": 0.1802, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 48.6, |
|
"learning_rate": 1.6090225563909775e-05, |
|
"loss": 0.1667, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 48.7, |
|
"learning_rate": 1.6015037593984964e-05, |
|
"loss": 0.1788, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 48.8, |
|
"learning_rate": 1.5939849624060154e-05, |
|
"loss": 0.2033, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 48.9, |
|
"learning_rate": 1.586466165413534e-05, |
|
"loss": 0.1924, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 1.5789473684210526e-05, |
|
"loss": 0.1946, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"eval_accuracy_safe": 0.37460341324821417, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9975849591131536, |
|
"eval_iou_safe": 0.347065714704107, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9789501628246108, |
|
"eval_loss": 0.1659359484910965, |
|
"eval_mean_accuracy": 0.6860941861806839, |
|
"eval_mean_iou": 0.6630079387643589, |
|
"eval_overall_accuracy": 0.9791830831499242, |
|
"eval_runtime": 16.317, |
|
"eval_samples_per_second": 4.106, |
|
"eval_steps_per_second": 0.306, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 49.1, |
|
"learning_rate": 1.5714285714285715e-05, |
|
"loss": 0.2351, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 49.2, |
|
"learning_rate": 1.5639097744360905e-05, |
|
"loss": 0.1724, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 49.3, |
|
"learning_rate": 1.556390977443609e-05, |
|
"loss": 0.1938, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 49.4, |
|
"learning_rate": 1.548872180451128e-05, |
|
"loss": 0.1825, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 1.5413533834586467e-05, |
|
"loss": 0.167, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 49.6, |
|
"learning_rate": 1.5338345864661656e-05, |
|
"loss": 0.1594, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 49.7, |
|
"learning_rate": 1.5263157894736842e-05, |
|
"loss": 0.1752, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 49.8, |
|
"learning_rate": 1.5187969924812032e-05, |
|
"loss": 0.1783, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 49.9, |
|
"learning_rate": 1.511278195488722e-05, |
|
"loss": 0.1806, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 1.5037593984962406e-05, |
|
"loss": 0.1836, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_safe": 0.44868755324767445, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9964670258681129, |
|
"eval_iou_safe": 0.402023415385625, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.98002171616563, |
|
"eval_loss": 0.16180779039859772, |
|
"eval_mean_accuracy": 0.7225772895578937, |
|
"eval_mean_iou": 0.6910225657756275, |
|
"eval_overall_accuracy": 0.9802864985679512, |
|
"eval_runtime": 14.3683, |
|
"eval_samples_per_second": 4.663, |
|
"eval_steps_per_second": 0.348, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 50.1, |
|
"learning_rate": 1.4962406015037595e-05, |
|
"loss": 0.1755, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 50.2, |
|
"learning_rate": 1.4887218045112783e-05, |
|
"loss": 0.1764, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 50.3, |
|
"learning_rate": 1.481203007518797e-05, |
|
"loss": 0.1642, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 50.4, |
|
"learning_rate": 1.4736842105263157e-05, |
|
"loss": 0.208, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 1.4661654135338346e-05, |
|
"loss": 0.1609, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 50.6, |
|
"learning_rate": 1.4586466165413534e-05, |
|
"loss": 0.1684, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 50.7, |
|
"learning_rate": 1.4511278195488722e-05, |
|
"loss": 0.1795, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 50.8, |
|
"learning_rate": 1.4436090225563912e-05, |
|
"loss": 0.1653, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 50.9, |
|
"learning_rate": 1.4360902255639098e-05, |
|
"loss": 0.1681, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 0.1786, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"eval_accuracy_safe": 0.4326756643189502, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9966320610934238, |
|
"eval_iou_safe": 0.3895693916085858, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9797144327801063, |
|
"eval_loss": 0.1594761162996292, |
|
"eval_mean_accuracy": 0.714653862706187, |
|
"eval_mean_iou": 0.684641912194346, |
|
"eval_overall_accuracy": 0.97997369339217, |
|
"eval_runtime": 14.553, |
|
"eval_samples_per_second": 4.604, |
|
"eval_steps_per_second": 0.344, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 51.1, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.1714, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 51.2, |
|
"learning_rate": 1.4135338345864663e-05, |
|
"loss": 0.1736, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 51.3, |
|
"learning_rate": 1.406015037593985e-05, |
|
"loss": 0.1555, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 51.4, |
|
"learning_rate": 1.3984962406015037e-05, |
|
"loss": 0.1724, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 1.3909774436090226e-05, |
|
"loss": 0.1612, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 51.6, |
|
"learning_rate": 1.3834586466165414e-05, |
|
"loss": 0.1639, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 51.7, |
|
"learning_rate": 1.3759398496240602e-05, |
|
"loss": 0.1732, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 51.8, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.1685, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 51.9, |
|
"learning_rate": 1.3609022556390977e-05, |
|
"loss": 0.177, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 1.3533834586466165e-05, |
|
"loss": 0.1867, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"eval_accuracy_safe": 0.4539978643104691, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9965902302666741, |
|
"eval_iou_safe": 0.40826213641035775, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9802987203511426, |
|
"eval_loss": 0.1555413454771042, |
|
"eval_mean_accuracy": 0.7252940472885716, |
|
"eval_mean_iou": 0.6942804283807502, |
|
"eval_overall_accuracy": 0.9805629217802588, |
|
"eval_runtime": 18.6158, |
|
"eval_samples_per_second": 3.599, |
|
"eval_steps_per_second": 0.269, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 52.1, |
|
"learning_rate": 1.3458646616541353e-05, |
|
"loss": 0.1934, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 52.2, |
|
"learning_rate": 1.3383458646616543e-05, |
|
"loss": 0.1734, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 52.3, |
|
"learning_rate": 1.330827067669173e-05, |
|
"loss": 0.1766, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 52.4, |
|
"learning_rate": 1.3233082706766916e-05, |
|
"loss": 0.152, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 1.3157894736842106e-05, |
|
"loss": 0.1553, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 52.6, |
|
"learning_rate": 1.3082706766917294e-05, |
|
"loss": 0.16, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 52.7, |
|
"learning_rate": 1.3007518796992482e-05, |
|
"loss": 0.1743, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 52.8, |
|
"learning_rate": 1.2932330827067671e-05, |
|
"loss": 0.1708, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 52.9, |
|
"learning_rate": 1.2857142857142857e-05, |
|
"loss": 0.1588, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 1.2781954887218045e-05, |
|
"loss": 0.1824, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"eval_accuracy_safe": 0.4386297662692125, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9965735096697266, |
|
"eval_iou_safe": 0.3942474610451035, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9798314336927032, |
|
"eval_loss": 0.15635764598846436, |
|
"eval_mean_accuracy": 0.7176016379694695, |
|
"eval_mean_iou": 0.6870394473689033, |
|
"eval_overall_accuracy": 0.9800927461083255, |
|
"eval_runtime": 25.5349, |
|
"eval_samples_per_second": 2.624, |
|
"eval_steps_per_second": 0.196, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 53.1, |
|
"learning_rate": 1.2706766917293233e-05, |
|
"loss": 0.1698, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 53.2, |
|
"learning_rate": 1.2631578947368422e-05, |
|
"loss": 0.1703, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 53.3, |
|
"learning_rate": 1.255639097744361e-05, |
|
"loss": 0.1558, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 53.4, |
|
"learning_rate": 1.2481203007518798e-05, |
|
"loss": 0.1752, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 1.2406015037593984e-05, |
|
"loss": 0.1496, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 53.6, |
|
"learning_rate": 1.2330827067669174e-05, |
|
"loss": 0.1542, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 53.7, |
|
"learning_rate": 1.2255639097744361e-05, |
|
"loss": 0.1897, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 53.8, |
|
"learning_rate": 1.218045112781955e-05, |
|
"loss": 0.181, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 53.9, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.1629, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 1.2030075187969925e-05, |
|
"loss": 0.1494, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"eval_accuracy_safe": 0.4920258595764858, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9956031283591532, |
|
"eval_iou_safe": 0.4299212121007973, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9804440437651487, |
|
"eval_loss": 0.1539766639471054, |
|
"eval_mean_accuracy": 0.7438144939678195, |
|
"eval_mean_iou": 0.705182627932973, |
|
"eval_overall_accuracy": 0.9807282632856227, |
|
"eval_runtime": 20.2085, |
|
"eval_samples_per_second": 3.315, |
|
"eval_steps_per_second": 0.247, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 54.1, |
|
"learning_rate": 1.1954887218045113e-05, |
|
"loss": 0.1891, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 54.2, |
|
"learning_rate": 1.1879699248120302e-05, |
|
"loss": 0.176, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 54.3, |
|
"learning_rate": 1.1804511278195488e-05, |
|
"loss": 0.1554, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 54.4, |
|
"learning_rate": 1.1729323308270678e-05, |
|
"loss": 0.1469, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 1.1654135338345864e-05, |
|
"loss": 0.1646, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 54.6, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.1596, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 54.7, |
|
"learning_rate": 1.1503759398496241e-05, |
|
"loss": 0.175, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 54.8, |
|
"learning_rate": 1.1428571428571429e-05, |
|
"loss": 0.1619, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 54.9, |
|
"learning_rate": 1.1353383458646617e-05, |
|
"loss": 0.1554, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 1.1278195488721805e-05, |
|
"loss": 0.1583, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_safe": 0.4557634704569373, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9963986767612919, |
|
"eval_iou_safe": 0.4075434429648423, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9801621116214008, |
|
"eval_loss": 0.1501733511686325, |
|
"eval_mean_accuracy": 0.7260810736091146, |
|
"eval_mean_iou": 0.6938527772931216, |
|
"eval_overall_accuracy": 0.9804291796328416, |
|
"eval_runtime": 18.9217, |
|
"eval_samples_per_second": 3.541, |
|
"eval_steps_per_second": 0.264, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 55.1, |
|
"learning_rate": 1.1203007518796992e-05, |
|
"loss": 0.1617, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 55.2, |
|
"learning_rate": 1.1127819548872182e-05, |
|
"loss": 0.1719, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 55.3, |
|
"learning_rate": 1.1052631578947368e-05, |
|
"loss": 0.1641, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 55.4, |
|
"learning_rate": 1.0977443609022558e-05, |
|
"loss": 0.1823, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 1.0902255639097744e-05, |
|
"loss": 0.1654, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 55.6, |
|
"learning_rate": 1.0827067669172933e-05, |
|
"loss": 0.1518, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 55.7, |
|
"learning_rate": 1.0751879699248121e-05, |
|
"loss": 0.1463, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 55.8, |
|
"learning_rate": 1.0676691729323309e-05, |
|
"loss": 0.148, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 55.9, |
|
"learning_rate": 1.0601503759398497e-05, |
|
"loss": 0.1515, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.1648, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"eval_accuracy_safe": 0.47907294112204657, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9957864682379647, |
|
"eval_iou_safe": 0.42081808808017324, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9802440112401288, |
|
"eval_loss": 0.15234236419200897, |
|
"eval_mean_accuracy": 0.7374297046800056, |
|
"eval_mean_iou": 0.700531049660151, |
|
"eval_overall_accuracy": 0.9805235791562209, |
|
"eval_runtime": 23.4099, |
|
"eval_samples_per_second": 2.862, |
|
"eval_steps_per_second": 0.214, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 56.1, |
|
"learning_rate": 1.0451127819548872e-05, |
|
"loss": 0.1528, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 56.2, |
|
"learning_rate": 1.0375939849624062e-05, |
|
"loss": 0.1529, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 56.3, |
|
"learning_rate": 1.0300751879699248e-05, |
|
"loss": 0.1472, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 56.4, |
|
"learning_rate": 1.0225563909774437e-05, |
|
"loss": 0.1677, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 1.0150375939849624e-05, |
|
"loss": 0.1472, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 56.6, |
|
"learning_rate": 1.0075187969924813e-05, |
|
"loss": 0.1868, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 56.7, |
|
"learning_rate": 1e-05, |
|
"loss": 0.1579, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 56.8, |
|
"learning_rate": 9.924812030075189e-06, |
|
"loss": 0.1549, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 56.9, |
|
"learning_rate": 9.849624060150376e-06, |
|
"loss": 0.1538, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 9.774436090225564e-06, |
|
"loss": 0.1993, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"eval_accuracy_safe": 0.45862197909799884, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9964170400835537, |
|
"eval_iou_safe": 0.4103208806066156, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9802640745426133, |
|
"eval_loss": 0.15015093982219696, |
|
"eval_mean_accuracy": 0.7275195095907763, |
|
"eval_mean_iou": 0.6952924775746144, |
|
"eval_overall_accuracy": 0.9805314362938724, |
|
"eval_runtime": 22.6151, |
|
"eval_samples_per_second": 2.963, |
|
"eval_steps_per_second": 0.221, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 57.1, |
|
"learning_rate": 9.699248120300752e-06, |
|
"loss": 0.1521, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 57.2, |
|
"learning_rate": 9.624060150375942e-06, |
|
"loss": 0.1808, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 57.3, |
|
"learning_rate": 9.548872180451128e-06, |
|
"loss": 0.1622, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 57.4, |
|
"learning_rate": 9.473684210526317e-06, |
|
"loss": 0.1626, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 9.398496240601503e-06, |
|
"loss": 0.1647, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 57.6, |
|
"learning_rate": 9.323308270676693e-06, |
|
"loss": 0.1704, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 57.7, |
|
"learning_rate": 9.24812030075188e-06, |
|
"loss": 0.1771, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 57.8, |
|
"learning_rate": 9.172932330827068e-06, |
|
"loss": 0.1453, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 57.9, |
|
"learning_rate": 9.097744360902256e-06, |
|
"loss": 0.1824, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 9.022556390977444e-06, |
|
"loss": 0.2243, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"eval_accuracy_safe": 0.3919915497627226, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9972870978124414, |
|
"eval_iou_safe": 0.3599124314432707, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9791664098544022, |
|
"eval_loss": 0.14736422896385193, |
|
"eval_mean_accuracy": 0.694639323787582, |
|
"eval_mean_iou": 0.6695394206488364, |
|
"eval_overall_accuracy": 0.9794076378665754, |
|
"eval_runtime": 23.6296, |
|
"eval_samples_per_second": 2.835, |
|
"eval_steps_per_second": 0.212, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 58.1, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.1836, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 58.2, |
|
"learning_rate": 8.872180451127821e-06, |
|
"loss": 0.1469, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 58.3, |
|
"learning_rate": 8.796992481203007e-06, |
|
"loss": 0.1728, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 58.4, |
|
"learning_rate": 8.721804511278197e-06, |
|
"loss": 0.1744, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 8.646616541353383e-06, |
|
"loss": 0.138, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 58.6, |
|
"learning_rate": 8.571428571428573e-06, |
|
"loss": 0.1424, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 58.7, |
|
"learning_rate": 8.49624060150376e-06, |
|
"loss": 0.161, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 58.8, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.1621, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 58.9, |
|
"learning_rate": 8.345864661654136e-06, |
|
"loss": 0.174, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 8.270676691729324e-06, |
|
"loss": 0.1551, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"eval_accuracy_safe": 0.4686585633825621, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.996123578939933, |
|
"eval_iou_safe": 0.41571446890361186, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9802699770855438, |
|
"eval_loss": 0.14445674419403076, |
|
"eval_mean_accuracy": 0.7323910711612476, |
|
"eval_mean_iou": 0.6979922229945779, |
|
"eval_overall_accuracy": 0.9805431081287896, |
|
"eval_runtime": 20.355, |
|
"eval_samples_per_second": 3.292, |
|
"eval_steps_per_second": 0.246, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 59.1, |
|
"learning_rate": 8.195488721804512e-06, |
|
"loss": 0.1517, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 59.2, |
|
"learning_rate": 8.1203007518797e-06, |
|
"loss": 0.1525, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 59.3, |
|
"learning_rate": 8.045112781954887e-06, |
|
"loss": 0.1456, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 59.4, |
|
"learning_rate": 7.969924812030077e-06, |
|
"loss": 0.1556, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.1628, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 59.6, |
|
"learning_rate": 7.819548872180452e-06, |
|
"loss": 0.16, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 59.7, |
|
"learning_rate": 7.74436090225564e-06, |
|
"loss": 0.1565, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 59.8, |
|
"learning_rate": 7.669172932330828e-06, |
|
"loss": 0.172, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 59.9, |
|
"learning_rate": 7.593984962406016e-06, |
|
"loss": 0.1578, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 7.518796992481203e-06, |
|
"loss": 0.1666, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_safe": 0.4460487816161079, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9963782013636263, |
|
"eval_iou_safe": 0.3986167931304745, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9798569567217434, |
|
"eval_loss": 0.14441508054733276, |
|
"eval_mean_accuracy": 0.7212134914898671, |
|
"eval_mean_iou": 0.689236874926109, |
|
"eval_overall_accuracy": 0.9801223527139692, |
|
"eval_runtime": 20.2732, |
|
"eval_samples_per_second": 3.305, |
|
"eval_steps_per_second": 0.247, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 60.1, |
|
"learning_rate": 7.4436090225563915e-06, |
|
"loss": 0.1501, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 60.2, |
|
"learning_rate": 7.3684210526315784e-06, |
|
"loss": 0.1478, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 60.3, |
|
"learning_rate": 7.293233082706767e-06, |
|
"loss": 0.1636, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 60.4, |
|
"learning_rate": 7.218045112781956e-06, |
|
"loss": 0.1505, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 7.142857142857143e-06, |
|
"loss": 0.1494, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 60.6, |
|
"learning_rate": 7.067669172932331e-06, |
|
"loss": 0.1579, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 60.7, |
|
"learning_rate": 6.992481203007518e-06, |
|
"loss": 0.1575, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 60.8, |
|
"learning_rate": 6.917293233082707e-06, |
|
"loss": 0.1672, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 60.9, |
|
"learning_rate": 6.842105263157896e-06, |
|
"loss": 0.1442, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 6.766917293233083e-06, |
|
"loss": 0.1632, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"eval_accuracy_safe": 0.5119718119822205, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9951075533331307, |
|
"eval_iou_safe": 0.44107453922579637, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9805422419053816, |
|
"eval_loss": 0.15042045712471008, |
|
"eval_mean_accuracy": 0.7535396826576757, |
|
"eval_mean_iou": 0.710808390565589, |
|
"eval_overall_accuracy": 0.9808364982035622, |
|
"eval_runtime": 23.2018, |
|
"eval_samples_per_second": 2.888, |
|
"eval_steps_per_second": 0.216, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 61.1, |
|
"learning_rate": 6.691729323308271e-06, |
|
"loss": 0.1545, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 61.2, |
|
"learning_rate": 6.616541353383458e-06, |
|
"loss": 0.1553, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 61.3, |
|
"learning_rate": 6.541353383458647e-06, |
|
"loss": 0.151, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 61.4, |
|
"learning_rate": 6.4661654135338356e-06, |
|
"loss": 0.1437, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 6.3909774436090225e-06, |
|
"loss": 0.1447, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 61.6, |
|
"learning_rate": 6.315789473684211e-06, |
|
"loss": 0.1787, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 61.7, |
|
"learning_rate": 6.240601503759399e-06, |
|
"loss": 0.1452, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 61.8, |
|
"learning_rate": 6.165413533834587e-06, |
|
"loss": 0.1741, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 61.9, |
|
"learning_rate": 6.090225563909775e-06, |
|
"loss": 0.1482, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 6.015037593984962e-06, |
|
"loss": 0.1589, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"eval_accuracy_safe": 0.405906299513109, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9970843972424274, |
|
"eval_iou_safe": 0.3704234667492234, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9793746498816952, |
|
"eval_loss": 0.14296869933605194, |
|
"eval_mean_accuracy": 0.7014953483777682, |
|
"eval_mean_iou": 0.6748990583154593, |
|
"eval_overall_accuracy": 0.9796219441428113, |
|
"eval_runtime": 23.0241, |
|
"eval_samples_per_second": 2.91, |
|
"eval_steps_per_second": 0.217, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 62.1, |
|
"learning_rate": 5.939849624060151e-06, |
|
"loss": 0.1638, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 62.2, |
|
"learning_rate": 5.864661654135339e-06, |
|
"loss": 0.1501, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 62.3, |
|
"learning_rate": 5.789473684210527e-06, |
|
"loss": 0.1434, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 62.4, |
|
"learning_rate": 5.7142857142857145e-06, |
|
"loss": 0.1469, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 5.639097744360902e-06, |
|
"loss": 0.149, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 62.6, |
|
"learning_rate": 5.563909774436091e-06, |
|
"loss": 0.1446, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 62.7, |
|
"learning_rate": 5.488721804511279e-06, |
|
"loss": 0.1566, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 62.8, |
|
"learning_rate": 5.413533834586467e-06, |
|
"loss": 0.1554, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 62.9, |
|
"learning_rate": 5.338345864661654e-06, |
|
"loss": 0.2087, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.1454, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"eval_accuracy_safe": 0.4835100866997429, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.995890429282846, |
|
"eval_iou_safe": 0.42599376070945183, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9804767015760938, |
|
"eval_loss": 0.1422625035047531, |
|
"eval_mean_accuracy": 0.7397002579912945, |
|
"eval_mean_iou": 0.7032352311427728, |
|
"eval_overall_accuracy": 0.9807555355242829, |
|
"eval_runtime": 21.807, |
|
"eval_samples_per_second": 3.072, |
|
"eval_steps_per_second": 0.229, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 63.1, |
|
"learning_rate": 5.187969924812031e-06, |
|
"loss": 0.1521, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 63.2, |
|
"learning_rate": 5.112781954887219e-06, |
|
"loss": 0.1503, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 63.3, |
|
"learning_rate": 5.0375939849624065e-06, |
|
"loss": 0.1595, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 63.4, |
|
"learning_rate": 4.962406015037594e-06, |
|
"loss": 0.1531, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 4.887218045112782e-06, |
|
"loss": 0.1396, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 63.6, |
|
"learning_rate": 4.812030075187971e-06, |
|
"loss": 0.1479, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 63.7, |
|
"learning_rate": 4.736842105263159e-06, |
|
"loss": 0.1574, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 63.8, |
|
"learning_rate": 4.661654135338346e-06, |
|
"loss": 0.1508, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 63.9, |
|
"learning_rate": 4.586466165413534e-06, |
|
"loss": 0.1508, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 4.511278195488722e-06, |
|
"loss": 0.1635, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"eval_accuracy_safe": 0.49020628293645746, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9957314956087019, |
|
"eval_iou_safe": 0.4299155788612444, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.980516979053144, |
|
"eval_loss": 0.14236415922641754, |
|
"eval_mean_accuracy": 0.7429688892725796, |
|
"eval_mean_iou": 0.7052162789571942, |
|
"eval_overall_accuracy": 0.9807990913960472, |
|
"eval_runtime": 21.5855, |
|
"eval_samples_per_second": 3.104, |
|
"eval_steps_per_second": 0.232, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 64.1, |
|
"learning_rate": 4.436090225563911e-06, |
|
"loss": 0.1545, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 64.2, |
|
"learning_rate": 4.3609022556390985e-06, |
|
"loss": 0.1461, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 64.3, |
|
"learning_rate": 4.285714285714286e-06, |
|
"loss": 0.1462, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 64.4, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 0.1401, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 4.135338345864662e-06, |
|
"loss": 0.1609, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 64.6, |
|
"learning_rate": 4.06015037593985e-06, |
|
"loss": 0.1584, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 64.7, |
|
"learning_rate": 3.984962406015038e-06, |
|
"loss": 0.1526, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 64.8, |
|
"learning_rate": 3.909774436090226e-06, |
|
"loss": 0.1604, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 64.9, |
|
"learning_rate": 3.834586466165414e-06, |
|
"loss": 0.1448, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 3.7593984962406014e-06, |
|
"loss": 0.1515, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_safe": 0.4774518987976145, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9961566094524996, |
|
"eval_iou_safe": 0.4239224934623968, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9805607490668283, |
|
"eval_loss": 0.14221766591072083, |
|
"eval_mean_accuracy": 0.736804254125057, |
|
"eval_mean_iou": 0.7022416212646125, |
|
"eval_overall_accuracy": 0.9808349040017199, |
|
"eval_runtime": 22.283, |
|
"eval_samples_per_second": 3.007, |
|
"eval_steps_per_second": 0.224, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 65.1, |
|
"learning_rate": 3.6842105263157892e-06, |
|
"loss": 0.1606, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 65.2, |
|
"learning_rate": 3.609022556390978e-06, |
|
"loss": 0.1578, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 65.3, |
|
"learning_rate": 3.5338345864661657e-06, |
|
"loss": 0.1409, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 65.4, |
|
"learning_rate": 3.4586466165413535e-06, |
|
"loss": 0.142, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 3.3834586466165413e-06, |
|
"loss": 0.1564, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 65.6, |
|
"learning_rate": 3.308270676691729e-06, |
|
"loss": 0.1412, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 65.7, |
|
"learning_rate": 3.2330827067669178e-06, |
|
"loss": 0.1794, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 65.8, |
|
"learning_rate": 3.1578947368421056e-06, |
|
"loss": 0.141, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 65.9, |
|
"learning_rate": 3.0827067669172934e-06, |
|
"loss": 0.1577, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 3.007518796992481e-06, |
|
"loss": 0.151, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"eval_accuracy_safe": 0.4717927070443059, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9962058325431629, |
|
"eval_iou_safe": 0.41950013111146534, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.980442962798809, |
|
"eval_loss": 0.1422828882932663, |
|
"eval_mean_accuracy": 0.7339992697937344, |
|
"eval_mean_iou": 0.6999715469551372, |
|
"eval_overall_accuracy": 0.9807155096708838, |
|
"eval_runtime": 22.7162, |
|
"eval_samples_per_second": 2.949, |
|
"eval_steps_per_second": 0.22, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 66.1, |
|
"learning_rate": 2.9323308270676694e-06, |
|
"loss": 0.1474, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 66.2, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 0.1435, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 66.3, |
|
"learning_rate": 2.7819548872180455e-06, |
|
"loss": 0.1392, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 66.4, |
|
"learning_rate": 2.7067669172932333e-06, |
|
"loss": 0.1462, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.1822, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 66.6, |
|
"learning_rate": 2.5563909774436093e-06, |
|
"loss": 0.157, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 66.7, |
|
"learning_rate": 2.481203007518797e-06, |
|
"loss": 0.1467, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 66.8, |
|
"learning_rate": 2.4060150375939854e-06, |
|
"loss": 0.1562, |
|
"step": 668 |
|
}, |
|
{ |
|
"epoch": 66.9, |
|
"learning_rate": 2.330827067669173e-06, |
|
"loss": 0.1624, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 2.255639097744361e-06, |
|
"loss": 0.166, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"eval_accuracy_safe": 0.4720953273117683, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9962893768591397, |
|
"eval_iou_safe": 0.42079618726258444, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9805340739883608, |
|
"eval_loss": 0.14265188574790955, |
|
"eval_mean_accuracy": 0.734192352085454, |
|
"eval_mean_iou": 0.7006651306254726, |
|
"eval_overall_accuracy": 0.9808055251391966, |
|
"eval_runtime": 20.5005, |
|
"eval_samples_per_second": 3.268, |
|
"eval_steps_per_second": 0.244, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 67.1, |
|
"learning_rate": 2.1804511278195492e-06, |
|
"loss": 0.1524, |
|
"step": 671 |
|
}, |
|
{ |
|
"epoch": 67.2, |
|
"learning_rate": 2.105263157894737e-06, |
|
"loss": 0.1599, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 67.3, |
|
"learning_rate": 2.030075187969925e-06, |
|
"loss": 0.1492, |
|
"step": 673 |
|
}, |
|
{ |
|
"epoch": 67.4, |
|
"learning_rate": 1.954887218045113e-06, |
|
"loss": 0.1374, |
|
"step": 674 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 1.8796992481203007e-06, |
|
"loss": 0.1415, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 67.6, |
|
"learning_rate": 1.804511278195489e-06, |
|
"loss": 0.139, |
|
"step": 676 |
|
}, |
|
{ |
|
"epoch": 67.7, |
|
"learning_rate": 1.7293233082706767e-06, |
|
"loss": 0.1544, |
|
"step": 677 |
|
}, |
|
{ |
|
"epoch": 67.8, |
|
"learning_rate": 1.6541353383458646e-06, |
|
"loss": 0.1454, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 67.9, |
|
"learning_rate": 1.5789473684210528e-06, |
|
"loss": 0.1681, |
|
"step": 679 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 1.5037593984962406e-06, |
|
"loss": 0.1561, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"eval_accuracy_safe": 0.49159216811037737, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9958987602469391, |
|
"eval_iou_safe": 0.4332189017626765, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9807224254704696, |
|
"eval_loss": 0.14203643798828125, |
|
"eval_mean_accuracy": 0.7437454641786583, |
|
"eval_mean_iou": 0.7069706636165731, |
|
"eval_overall_accuracy": 0.9810023521309468, |
|
"eval_runtime": 24.1843, |
|
"eval_samples_per_second": 2.77, |
|
"eval_steps_per_second": 0.207, |
|
"step": 680 |
|
} |
|
], |
|
"max_steps": 700, |
|
"num_train_epochs": 70, |
|
"total_flos": 3.675957771758469e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|