|
{ |
|
"best_metric": 0.1686682254076004, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/safety-utcustom-train-SF-RGBD-b0/checkpoint-440", |
|
"epoch": 44.0, |
|
"global_step": 440, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.0566, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.0568, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1e-05, |
|
"loss": 1.0517, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.0462, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.0401, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2e-05, |
|
"loss": 1.0392, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 1.025, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 1.0164, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3e-05, |
|
"loss": 1.0188, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.0084, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy_safe": 0.03683871689006596, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.7845366863390846, |
|
"eval_iou_safe": 0.016300672001855917, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.766643132432382, |
|
"eval_loss": 1.0688321590423584, |
|
"eval_mean_accuracy": 0.41068770161457524, |
|
"eval_mean_iou": 0.260981268144746, |
|
"eval_overall_accuracy": 0.7624508871960995, |
|
"eval_runtime": 17.0867, |
|
"eval_samples_per_second": 3.921, |
|
"eval_steps_per_second": 0.293, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 1.0016, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4e-05, |
|
"loss": 0.9714, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.9643, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.951, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9372, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 4.9824561403508773e-05, |
|
"loss": 0.9318, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 4.9649122807017544e-05, |
|
"loss": 0.9234, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 4.9473684210526315e-05, |
|
"loss": 0.8955, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 4.9298245614035086e-05, |
|
"loss": 0.8359, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 4.912280701754386e-05, |
|
"loss": 0.8483, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy_safe": 0.00020817190373205964, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9980423994443833, |
|
"eval_iou_safe": 0.00020692073211620515, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9686639129241729, |
|
"eval_loss": 0.873957633972168, |
|
"eval_mean_accuracy": 0.4991252856740577, |
|
"eval_mean_iou": 0.3229569445520964, |
|
"eval_overall_accuracy": 0.9685679763110716, |
|
"eval_runtime": 9.9346, |
|
"eval_samples_per_second": 6.744, |
|
"eval_steps_per_second": 0.503, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.8947368421052635e-05, |
|
"loss": 0.814, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 4.8771929824561406e-05, |
|
"loss": 0.8311, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.859649122807018e-05, |
|
"loss": 0.8158, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.842105263157895e-05, |
|
"loss": 0.8113, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 4.824561403508772e-05, |
|
"loss": 0.7571, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 4.807017543859649e-05, |
|
"loss": 0.7934, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.789473684210526e-05, |
|
"loss": 0.7214, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 4.771929824561404e-05, |
|
"loss": 0.7534, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 4.754385964912281e-05, |
|
"loss": 0.7021, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.736842105263158e-05, |
|
"loss": 0.7058, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy_safe": 0.0008673829322169151, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.992971247730839, |
|
"eval_iou_safe": 0.0008545401719334826, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9641277576335231, |
|
"eval_loss": 0.74162358045578, |
|
"eval_mean_accuracy": 0.49691931533152794, |
|
"eval_mean_iou": 0.3216607659351522, |
|
"eval_overall_accuracy": 0.9636660903247435, |
|
"eval_runtime": 9.5465, |
|
"eval_samples_per_second": 7.018, |
|
"eval_steps_per_second": 0.524, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 4.719298245614036e-05, |
|
"loss": 0.6607, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 4.701754385964913e-05, |
|
"loss": 0.6468, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 4.68421052631579e-05, |
|
"loss": 0.6593, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.6234, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 4.649122807017544e-05, |
|
"loss": 0.6841, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 4.6315789473684214e-05, |
|
"loss": 0.6508, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 4.6140350877192985e-05, |
|
"loss": 0.6758, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 4.5964912280701756e-05, |
|
"loss": 0.5821, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 4.5789473684210527e-05, |
|
"loss": 0.6304, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 4.56140350877193e-05, |
|
"loss": 0.578, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy_safe": 0.0007363117335708035, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9952830902666999, |
|
"eval_iou_safe": 0.0007249429724180075, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.966194230792341, |
|
"eval_loss": 0.5968723297119141, |
|
"eval_mean_accuracy": 0.49800970100013536, |
|
"eval_mean_iou": 0.32230639125491967, |
|
"eval_overall_accuracy": 0.965905773105906, |
|
"eval_runtime": 9.4884, |
|
"eval_samples_per_second": 7.061, |
|
"eval_steps_per_second": 0.527, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 4.5438596491228075e-05, |
|
"loss": 0.6387, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 4.5263157894736846e-05, |
|
"loss": 0.5828, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 4.508771929824562e-05, |
|
"loss": 0.5795, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 4.491228070175439e-05, |
|
"loss": 0.5699, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 4.473684210526316e-05, |
|
"loss": 0.5602, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 4.456140350877193e-05, |
|
"loss": 0.5501, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 4.43859649122807e-05, |
|
"loss": 0.608, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 4.421052631578947e-05, |
|
"loss": 0.5565, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 4.403508771929824e-05, |
|
"loss": 0.5368, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 4.3859649122807014e-05, |
|
"loss": 0.5531, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_safe": 0.0060851731489084465, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9974240893698892, |
|
"eval_iou_safe": 0.005865184362233192, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.968219331241184, |
|
"eval_loss": 0.5068420767784119, |
|
"eval_mean_accuracy": 0.5017546312593989, |
|
"eval_mean_iou": 0.3246948385344724, |
|
"eval_overall_accuracy": 0.9681415273182428, |
|
"eval_runtime": 9.555, |
|
"eval_samples_per_second": 7.012, |
|
"eval_steps_per_second": 0.523, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 4.368421052631579e-05, |
|
"loss": 0.5522, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 4.350877192982456e-05, |
|
"loss": 0.5304, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.5472, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 4.3157894736842105e-05, |
|
"loss": 0.5052, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 4.298245614035088e-05, |
|
"loss": 0.5344, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 4.2807017543859654e-05, |
|
"loss": 0.4878, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 4.2631578947368425e-05, |
|
"loss": 0.558, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 4.2456140350877196e-05, |
|
"loss": 0.4542, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 4.228070175438597e-05, |
|
"loss": 0.4765, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 4.210526315789474e-05, |
|
"loss": 0.4786, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy_safe": 0.009651080759133542, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.996145227712823, |
|
"eval_iou_safe": 0.00919338047228307, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.967087021440742, |
|
"eval_loss": 0.45749950408935547, |
|
"eval_mean_accuracy": 0.5028981542359783, |
|
"eval_mean_iou": 0.325426800637675, |
|
"eval_overall_accuracy": 0.967005772377128, |
|
"eval_runtime": 9.3868, |
|
"eval_samples_per_second": 7.138, |
|
"eval_steps_per_second": 0.533, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 4.1929824561403516e-05, |
|
"loss": 0.4643, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 4.1754385964912287e-05, |
|
"loss": 0.5082, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 4.157894736842106e-05, |
|
"loss": 0.4564, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 4.140350877192983e-05, |
|
"loss": 0.4646, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 4.12280701754386e-05, |
|
"loss": 0.5312, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"learning_rate": 4.105263157894737e-05, |
|
"loss": 0.4676, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 4.087719298245614e-05, |
|
"loss": 0.4669, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 4.070175438596491e-05, |
|
"loss": 0.4406, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 4.0526315789473684e-05, |
|
"loss": 0.4704, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 4.0350877192982455e-05, |
|
"loss": 0.4681, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy_safe": 0.006705833824850328, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.998296845861793, |
|
"eval_iou_safe": 0.006371969925913715, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.969000696975069, |
|
"eval_loss": 0.4381723403930664, |
|
"eval_mean_accuracy": 0.5025013398433217, |
|
"eval_mean_iou": 0.32512422230032756, |
|
"eval_overall_accuracy": 0.9690068373039588, |
|
"eval_runtime": 10.1282, |
|
"eval_samples_per_second": 6.615, |
|
"eval_steps_per_second": 0.494, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 4.017543859649123e-05, |
|
"loss": 0.4417, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 4e-05, |
|
"loss": 0.4517, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 3.9824561403508774e-05, |
|
"loss": 0.4441, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"learning_rate": 3.9649122807017545e-05, |
|
"loss": 0.4982, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 3.9473684210526316e-05, |
|
"loss": 0.479, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"learning_rate": 3.929824561403509e-05, |
|
"loss": 0.4123, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"learning_rate": 3.912280701754386e-05, |
|
"loss": 0.4351, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"learning_rate": 3.894736842105263e-05, |
|
"loss": 0.4318, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 7.9, |
|
"learning_rate": 3.877192982456141e-05, |
|
"loss": 0.4846, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 3.859649122807018e-05, |
|
"loss": 0.4139, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy_safe": 0.0016518826064664362, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9980280842666458, |
|
"eval_iou_safe": 0.0015664870476928589, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.968595196515178, |
|
"eval_loss": 0.3973134160041809, |
|
"eval_mean_accuracy": 0.4998399834365561, |
|
"eval_mean_iou": 0.3233872278542903, |
|
"eval_overall_accuracy": 0.968596728880014, |
|
"eval_runtime": 11.1226, |
|
"eval_samples_per_second": 6.024, |
|
"eval_steps_per_second": 0.45, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"learning_rate": 3.842105263157895e-05, |
|
"loss": 0.433, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 8.2, |
|
"learning_rate": 3.824561403508773e-05, |
|
"loss": 0.394, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 8.3, |
|
"learning_rate": 3.80701754385965e-05, |
|
"loss": 0.444, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"learning_rate": 3.789473684210527e-05, |
|
"loss": 0.4225, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 3.771929824561404e-05, |
|
"loss": 0.3741, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"learning_rate": 3.754385964912281e-05, |
|
"loss": 0.4193, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"learning_rate": 3.736842105263158e-05, |
|
"loss": 0.399, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"learning_rate": 3.719298245614035e-05, |
|
"loss": 0.4082, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.9, |
|
"learning_rate": 3.7017543859649124e-05, |
|
"loss": 0.3906, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 0.4275, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy_safe": 0.007727418167239139, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9994094402495628, |
|
"eval_iou_safe": 0.007580341408442182, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9701099208246821, |
|
"eval_loss": 0.3982888162136078, |
|
"eval_mean_accuracy": 0.503568429208401, |
|
"eval_mean_iou": 0.48884513111656214, |
|
"eval_overall_accuracy": 0.9701167434009154, |
|
"eval_runtime": 9.6246, |
|
"eval_samples_per_second": 6.961, |
|
"eval_steps_per_second": 0.52, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 0.4041, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"learning_rate": 3.6491228070175443e-05, |
|
"loss": 0.3714, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 3.6315789473684214e-05, |
|
"loss": 0.4078, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"learning_rate": 3.6140350877192985e-05, |
|
"loss": 0.4167, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 3.5964912280701756e-05, |
|
"loss": 0.4058, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 3.578947368421053e-05, |
|
"loss": 0.3794, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 9.7, |
|
"learning_rate": 3.56140350877193e-05, |
|
"loss": 0.3786, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 3.543859649122807e-05, |
|
"loss": 0.3545, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 9.9, |
|
"learning_rate": 3.526315789473684e-05, |
|
"loss": 0.3598, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 3.508771929824561e-05, |
|
"loss": 0.3975, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_safe": 0.0008095574034024542, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9997554686032364, |
|
"eval_iou_safe": 0.0008031575565652393, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9702474966446942, |
|
"eval_loss": 0.33975234627723694, |
|
"eval_mean_accuracy": 0.5002825130033194, |
|
"eval_mean_iou": 0.3236835514004198, |
|
"eval_overall_accuracy": 0.9702482081171292, |
|
"eval_runtime": 9.7288, |
|
"eval_samples_per_second": 6.887, |
|
"eval_steps_per_second": 0.514, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"learning_rate": 3.491228070175438e-05, |
|
"loss": 0.3621, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 10.2, |
|
"learning_rate": 3.473684210526316e-05, |
|
"loss": 0.37, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 10.3, |
|
"learning_rate": 3.456140350877193e-05, |
|
"loss": 0.359, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 10.4, |
|
"learning_rate": 3.43859649122807e-05, |
|
"loss": 0.3523, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 3.421052631578947e-05, |
|
"loss": 0.3318, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 10.6, |
|
"learning_rate": 3.403508771929825e-05, |
|
"loss": 0.3661, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 10.7, |
|
"learning_rate": 3.385964912280702e-05, |
|
"loss": 0.3563, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 10.8, |
|
"learning_rate": 3.368421052631579e-05, |
|
"loss": 0.3595, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 10.9, |
|
"learning_rate": 3.3508771929824564e-05, |
|
"loss": 0.3554, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.4325, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy_safe": 0.09409370048689095, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9992699846041437, |
|
"eval_iou_safe": 0.09189049790961165, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9724559733145028, |
|
"eval_loss": 0.37850186228752136, |
|
"eval_mean_accuracy": 0.5466818425455173, |
|
"eval_mean_iou": 0.3547821570747048, |
|
"eval_overall_accuracy": 0.9725325285498776, |
|
"eval_runtime": 10.4486, |
|
"eval_samples_per_second": 6.412, |
|
"eval_steps_per_second": 0.479, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 11.1, |
|
"learning_rate": 3.3157894736842106e-05, |
|
"loss": 0.3409, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 11.2, |
|
"learning_rate": 3.2982456140350884e-05, |
|
"loss": 0.3633, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 11.3, |
|
"learning_rate": 3.2807017543859655e-05, |
|
"loss": 0.3483, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 11.4, |
|
"learning_rate": 3.2631578947368426e-05, |
|
"loss": 0.3548, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 3.24561403508772e-05, |
|
"loss": 0.3474, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 11.6, |
|
"learning_rate": 3.228070175438597e-05, |
|
"loss": 0.3529, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 11.7, |
|
"learning_rate": 3.210526315789474e-05, |
|
"loss": 0.3309, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 11.8, |
|
"learning_rate": 3.192982456140351e-05, |
|
"loss": 0.3306, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 11.9, |
|
"learning_rate": 3.175438596491228e-05, |
|
"loss": 0.3194, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 3.157894736842105e-05, |
|
"loss": 0.3239, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy_safe": 0.07718166082628826, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9994698690736191, |
|
"eval_iou_safe": 0.07586054028042748, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9721634919205556, |
|
"eval_loss": 0.33375129103660583, |
|
"eval_mean_accuracy": 0.5383257649499537, |
|
"eval_mean_iou": 0.34934134406699435, |
|
"eval_overall_accuracy": 0.972226954218167, |
|
"eval_runtime": 9.4146, |
|
"eval_samples_per_second": 7.117, |
|
"eval_steps_per_second": 0.531, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 12.1, |
|
"learning_rate": 3.140350877192982e-05, |
|
"loss": 0.3293, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 12.2, |
|
"learning_rate": 3.12280701754386e-05, |
|
"loss": 0.3444, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 12.3, |
|
"learning_rate": 3.105263157894737e-05, |
|
"loss": 0.3658, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 12.4, |
|
"learning_rate": 3.087719298245614e-05, |
|
"loss": 0.3282, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 3.0701754385964913e-05, |
|
"loss": 0.3397, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 12.6, |
|
"learning_rate": 3.0526315789473684e-05, |
|
"loss": 0.3385, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 12.7, |
|
"learning_rate": 3.035087719298246e-05, |
|
"loss": 0.3236, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"learning_rate": 3.017543859649123e-05, |
|
"loss": 0.3293, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 12.9, |
|
"learning_rate": 3e-05, |
|
"loss": 0.3317, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 2.9824561403508772e-05, |
|
"loss": 0.3733, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy_safe": 0.07628151009440981, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9995119932441748, |
|
"eval_iou_safe": 0.07507778115040219, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9721785568914258, |
|
"eval_loss": 0.3012864589691162, |
|
"eval_mean_accuracy": 0.5378967516692923, |
|
"eval_mean_iou": 0.523628169020914, |
|
"eval_overall_accuracy": 0.972241245098968, |
|
"eval_runtime": 9.4095, |
|
"eval_samples_per_second": 7.12, |
|
"eval_steps_per_second": 0.531, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 13.1, |
|
"learning_rate": 2.9649122807017543e-05, |
|
"loss": 0.3196, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 13.2, |
|
"learning_rate": 2.9473684210526314e-05, |
|
"loss": 0.3177, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 13.3, |
|
"learning_rate": 2.929824561403509e-05, |
|
"loss": 0.3177, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 13.4, |
|
"learning_rate": 2.9122807017543863e-05, |
|
"loss": 0.3131, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 2.8947368421052634e-05, |
|
"loss": 0.3246, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 13.6, |
|
"learning_rate": 2.8771929824561404e-05, |
|
"loss": 0.3093, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 13.7, |
|
"learning_rate": 2.8596491228070175e-05, |
|
"loss": 0.3346, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 13.8, |
|
"learning_rate": 2.842105263157895e-05, |
|
"loss": 0.3108, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 13.9, |
|
"learning_rate": 2.824561403508772e-05, |
|
"loss": 0.3331, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 2.8070175438596492e-05, |
|
"loss": 0.3165, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy_safe": 0.08003631443209548, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9994350198294546, |
|
"eval_iou_safe": 0.07857775456444727, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9722117608452171, |
|
"eval_loss": 0.28493982553482056, |
|
"eval_mean_accuracy": 0.539735667130775, |
|
"eval_mean_iou": 0.5253947577048321, |
|
"eval_overall_accuracy": 0.9722774562551014, |
|
"eval_runtime": 10.0221, |
|
"eval_samples_per_second": 6.685, |
|
"eval_steps_per_second": 0.499, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 14.1, |
|
"learning_rate": 2.7894736842105263e-05, |
|
"loss": 0.3037, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 14.2, |
|
"learning_rate": 2.7719298245614034e-05, |
|
"loss": 0.3372, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 14.3, |
|
"learning_rate": 2.754385964912281e-05, |
|
"loss": 0.3018, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 14.4, |
|
"learning_rate": 2.7368421052631583e-05, |
|
"loss": 0.3047, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 2.7192982456140354e-05, |
|
"loss": 0.3158, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 14.6, |
|
"learning_rate": 2.7017543859649125e-05, |
|
"loss": 0.3036, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 14.7, |
|
"learning_rate": 2.6842105263157896e-05, |
|
"loss": 0.3219, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 14.8, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.2892, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 14.9, |
|
"learning_rate": 2.6491228070175438e-05, |
|
"loss": 0.3252, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 2.6315789473684212e-05, |
|
"loss": 0.3329, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_safe": 0.11177481968072599, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.999028621320486, |
|
"eval_iou_safe": 0.10831796981091193, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9727305324523915, |
|
"eval_loss": 0.3002106547355652, |
|
"eval_mean_accuracy": 0.555401720500606, |
|
"eval_mean_iou": 0.5405242511316517, |
|
"eval_overall_accuracy": 0.9728205666613223, |
|
"eval_runtime": 10.0001, |
|
"eval_samples_per_second": 6.7, |
|
"eval_steps_per_second": 0.5, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 15.1, |
|
"learning_rate": 2.6140350877192983e-05, |
|
"loss": 0.3213, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 15.2, |
|
"learning_rate": 2.5964912280701754e-05, |
|
"loss": 0.2808, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 15.3, |
|
"learning_rate": 2.578947368421053e-05, |
|
"loss": 0.2909, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 15.4, |
|
"learning_rate": 2.5614035087719303e-05, |
|
"loss": 0.2886, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 2.5438596491228074e-05, |
|
"loss": 0.2776, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 15.6, |
|
"learning_rate": 2.5263157894736845e-05, |
|
"loss": 0.2945, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 15.7, |
|
"learning_rate": 2.5087719298245616e-05, |
|
"loss": 0.3205, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 15.8, |
|
"learning_rate": 2.4912280701754387e-05, |
|
"loss": 0.2827, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 15.9, |
|
"learning_rate": 2.4736842105263158e-05, |
|
"loss": 0.3261, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 2.456140350877193e-05, |
|
"loss": 0.3214, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy_safe": 0.09076873258005945, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9994798427630265, |
|
"eval_iou_safe": 0.08924361530356209, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9725644143423847, |
|
"eval_loss": 0.27253061532974243, |
|
"eval_mean_accuracy": 0.545124287671543, |
|
"eval_mean_iou": 0.5309040148229734, |
|
"eval_overall_accuracy": 0.972637973614593, |
|
"eval_runtime": 9.3554, |
|
"eval_samples_per_second": 7.162, |
|
"eval_steps_per_second": 0.534, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 16.1, |
|
"learning_rate": 2.4385964912280703e-05, |
|
"loss": 0.2676, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 16.2, |
|
"learning_rate": 2.4210526315789474e-05, |
|
"loss": 0.2881, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 16.3, |
|
"learning_rate": 2.4035087719298245e-05, |
|
"loss": 0.2869, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 16.4, |
|
"learning_rate": 2.385964912280702e-05, |
|
"loss": 0.3117, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 2.368421052631579e-05, |
|
"loss": 0.2852, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 16.6, |
|
"learning_rate": 2.3508771929824565e-05, |
|
"loss": 0.3013, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 16.7, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 0.3106, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 16.8, |
|
"learning_rate": 2.3157894736842107e-05, |
|
"loss": 0.3104, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 16.9, |
|
"learning_rate": 2.2982456140350878e-05, |
|
"loss": 0.2777, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 2.280701754385965e-05, |
|
"loss": 0.2744, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy_safe": 0.15732206121024978, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9985794532845882, |
|
"eval_iou_safe": 0.15030708175648924, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9736074113594023, |
|
"eval_loss": 0.2896406948566437, |
|
"eval_mean_accuracy": 0.577950757247419, |
|
"eval_mean_iou": 0.5619572465579458, |
|
"eval_overall_accuracy": 0.9737300588123834, |
|
"eval_runtime": 9.4906, |
|
"eval_samples_per_second": 7.06, |
|
"eval_steps_per_second": 0.527, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 17.1, |
|
"learning_rate": 2.2631578947368423e-05, |
|
"loss": 0.2712, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 17.2, |
|
"learning_rate": 2.2456140350877194e-05, |
|
"loss": 0.2807, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 17.3, |
|
"learning_rate": 2.2280701754385965e-05, |
|
"loss": 0.2618, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 17.4, |
|
"learning_rate": 2.2105263157894736e-05, |
|
"loss": 0.2755, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 2.1929824561403507e-05, |
|
"loss": 0.2889, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 17.6, |
|
"learning_rate": 2.175438596491228e-05, |
|
"loss": 0.277, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 17.7, |
|
"learning_rate": 2.1578947368421053e-05, |
|
"loss": 0.2669, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 17.8, |
|
"learning_rate": 2.1403508771929827e-05, |
|
"loss": 0.2867, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 17.9, |
|
"learning_rate": 2.1228070175438598e-05, |
|
"loss": 0.3369, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 0.2948, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy_safe": 0.13295631088546306, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9988779599416738, |
|
"eval_iou_safe": 0.12822929505304623, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9731947501724811, |
|
"eval_loss": 0.2564152479171753, |
|
"eval_mean_accuracy": 0.5659171354135685, |
|
"eval_mean_iou": 0.5507120226127636, |
|
"eval_overall_accuracy": 0.9733000228654093, |
|
"eval_runtime": 9.4795, |
|
"eval_samples_per_second": 7.068, |
|
"eval_steps_per_second": 0.527, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 18.1, |
|
"learning_rate": 2.0877192982456143e-05, |
|
"loss": 0.2978, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 18.2, |
|
"learning_rate": 2.0701754385964914e-05, |
|
"loss": 0.2467, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 18.3, |
|
"learning_rate": 2.0526315789473685e-05, |
|
"loss": 0.2906, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 18.4, |
|
"learning_rate": 2.0350877192982456e-05, |
|
"loss": 0.2572, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 2.0175438596491227e-05, |
|
"loss": 0.2631, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 18.6, |
|
"learning_rate": 2e-05, |
|
"loss": 0.2838, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 18.7, |
|
"learning_rate": 1.9824561403508773e-05, |
|
"loss": 0.2543, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 18.8, |
|
"learning_rate": 1.9649122807017544e-05, |
|
"loss": 0.2629, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 18.9, |
|
"learning_rate": 1.9473684210526315e-05, |
|
"loss": 0.2622, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 1.929824561403509e-05, |
|
"loss": 0.2653, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy_safe": 0.17323757425761657, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9986642883133119, |
|
"eval_iou_safe": 0.1659548460122348, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.97415022926933, |
|
"eval_loss": 0.25176867842674255, |
|
"eval_mean_accuracy": 0.5859509312854643, |
|
"eval_mean_iou": 0.5700525376407825, |
|
"eval_overall_accuracy": 0.974282506686538, |
|
"eval_runtime": 11.4222, |
|
"eval_samples_per_second": 5.866, |
|
"eval_steps_per_second": 0.438, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 19.1, |
|
"learning_rate": 1.9122807017543863e-05, |
|
"loss": 0.2738, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"learning_rate": 1.8947368421052634e-05, |
|
"loss": 0.2741, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 19.3, |
|
"learning_rate": 1.8771929824561405e-05, |
|
"loss": 0.2625, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 19.4, |
|
"learning_rate": 1.8596491228070176e-05, |
|
"loss": 0.2632, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.247, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 19.6, |
|
"learning_rate": 1.8245614035087722e-05, |
|
"loss": 0.2735, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 19.7, |
|
"learning_rate": 1.8070175438596493e-05, |
|
"loss": 0.2583, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 19.8, |
|
"learning_rate": 1.7894736842105264e-05, |
|
"loss": 0.2458, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 19.9, |
|
"learning_rate": 1.7719298245614035e-05, |
|
"loss": 0.2588, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 1.7543859649122806e-05, |
|
"loss": 0.3026, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_safe": 0.14083407542761978, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9990070312163571, |
|
"eval_iou_safe": 0.13638476313495493, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9735479371956898, |
|
"eval_loss": 0.2531002461910248, |
|
"eval_mean_accuracy": 0.5699205533219884, |
|
"eval_mean_iou": 0.5549663501653224, |
|
"eval_overall_accuracy": 0.9736579781147972, |
|
"eval_runtime": 9.5606, |
|
"eval_samples_per_second": 7.008, |
|
"eval_steps_per_second": 0.523, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 20.1, |
|
"learning_rate": 1.736842105263158e-05, |
|
"loss": 0.306, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 20.2, |
|
"learning_rate": 1.719298245614035e-05, |
|
"loss": 0.2678, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 20.3, |
|
"learning_rate": 1.7017543859649125e-05, |
|
"loss": 0.2485, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 20.4, |
|
"learning_rate": 1.6842105263157896e-05, |
|
"loss": 0.2974, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.2612, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 20.6, |
|
"learning_rate": 1.6491228070175442e-05, |
|
"loss": 0.2509, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 20.7, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.2503, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 20.8, |
|
"learning_rate": 1.6140350877192984e-05, |
|
"loss": 0.2584, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 20.9, |
|
"learning_rate": 1.5964912280701755e-05, |
|
"loss": 0.2552, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 1.5789473684210526e-05, |
|
"loss": 0.2649, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_accuracy_safe": 0.18018242026823336, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9985888402863834, |
|
"eval_iou_safe": 0.17219883762699062, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.974277524161329, |
|
"eval_loss": 0.2383996993303299, |
|
"eval_mean_accuracy": 0.5893856302773084, |
|
"eval_mean_iou": 0.5732381808941598, |
|
"eval_overall_accuracy": 0.9744144268889925, |
|
"eval_runtime": 9.6539, |
|
"eval_samples_per_second": 6.94, |
|
"eval_steps_per_second": 0.518, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 21.1, |
|
"learning_rate": 1.56140350877193e-05, |
|
"loss": 0.2475, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 21.2, |
|
"learning_rate": 1.543859649122807e-05, |
|
"loss": 0.254, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 21.3, |
|
"learning_rate": 1.5263157894736842e-05, |
|
"loss": 0.2623, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 21.4, |
|
"learning_rate": 1.5087719298245615e-05, |
|
"loss": 0.2521, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 1.4912280701754386e-05, |
|
"loss": 0.233, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 21.6, |
|
"learning_rate": 1.4736842105263157e-05, |
|
"loss": 0.249, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 21.7, |
|
"learning_rate": 1.4561403508771931e-05, |
|
"loss": 0.2474, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 21.8, |
|
"learning_rate": 1.4385964912280702e-05, |
|
"loss": 0.2573, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 21.9, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.2623, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 1.4035087719298246e-05, |
|
"loss": 0.2431, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_accuracy_safe": 0.19926677229463263, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9983434875269627, |
|
"eval_iou_safe": 0.1889817324970706, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.974590481658128, |
|
"eval_loss": 0.2389637678861618, |
|
"eval_mean_accuracy": 0.5988051299107977, |
|
"eval_mean_iou": 0.5817861070775993, |
|
"eval_overall_accuracy": 0.9747400426152927, |
|
"eval_runtime": 10.415, |
|
"eval_samples_per_second": 6.433, |
|
"eval_steps_per_second": 0.48, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 22.1, |
|
"learning_rate": 1.3859649122807017e-05, |
|
"loss": 0.2517, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 22.2, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.2393, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 22.3, |
|
"learning_rate": 1.3508771929824562e-05, |
|
"loss": 0.2455, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 22.4, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.2367, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 1.3157894736842106e-05, |
|
"loss": 0.2516, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 22.6, |
|
"learning_rate": 1.2982456140350877e-05, |
|
"loss": 0.2405, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 22.7, |
|
"learning_rate": 1.2807017543859651e-05, |
|
"loss": 0.2342, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 22.8, |
|
"learning_rate": 1.2631578947368422e-05, |
|
"loss": 0.2365, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 22.9, |
|
"learning_rate": 1.2456140350877193e-05, |
|
"loss": 0.2546, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 1.2280701754385964e-05, |
|
"loss": 0.2608, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_accuracy_safe": 0.23167027112462943, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9980992494740052, |
|
"eval_iou_safe": 0.2180533381712627, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9752910790561756, |
|
"eval_loss": 0.2354857325553894, |
|
"eval_mean_accuracy": 0.6148847602993173, |
|
"eval_mean_iou": 0.5966722086137192, |
|
"eval_overall_accuracy": 0.9754601663617946, |
|
"eval_runtime": 9.0986, |
|
"eval_samples_per_second": 7.364, |
|
"eval_steps_per_second": 0.55, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 23.1, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.2717, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 23.2, |
|
"learning_rate": 1.192982456140351e-05, |
|
"loss": 0.2398, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 23.3, |
|
"learning_rate": 1.1754385964912282e-05, |
|
"loss": 0.232, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 23.4, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.2343, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 1.1403508771929824e-05, |
|
"loss": 0.2494, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 23.6, |
|
"learning_rate": 1.1228070175438597e-05, |
|
"loss": 0.2579, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 23.7, |
|
"learning_rate": 1.1052631578947368e-05, |
|
"loss": 0.2554, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 23.8, |
|
"learning_rate": 1.087719298245614e-05, |
|
"loss": 0.2516, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 23.9, |
|
"learning_rate": 1.0701754385964913e-05, |
|
"loss": 0.248, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.223, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy_safe": 0.16970828948230732, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9988791333168983, |
|
"eval_iou_safe": 0.1636807106061085, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.974257712308983, |
|
"eval_loss": 0.22904758155345917, |
|
"eval_mean_accuracy": 0.5842937113996027, |
|
"eval_mean_iou": 0.5689692114575458, |
|
"eval_overall_accuracy": 0.9743867560998717, |
|
"eval_runtime": 9.2103, |
|
"eval_samples_per_second": 7.274, |
|
"eval_steps_per_second": 0.543, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 24.1, |
|
"learning_rate": 1.0350877192982457e-05, |
|
"loss": 0.2361, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 24.2, |
|
"learning_rate": 1.0175438596491228e-05, |
|
"loss": 0.2487, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 24.3, |
|
"learning_rate": 1e-05, |
|
"loss": 0.2318, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 24.4, |
|
"learning_rate": 9.824561403508772e-06, |
|
"loss": 0.2492, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 9.649122807017545e-06, |
|
"loss": 0.2644, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 24.6, |
|
"learning_rate": 9.473684210526317e-06, |
|
"loss": 0.2297, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 24.7, |
|
"learning_rate": 9.298245614035088e-06, |
|
"loss": 0.2757, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 24.8, |
|
"learning_rate": 9.122807017543861e-06, |
|
"loss": 0.2373, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 24.9, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.2465, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 8.771929824561403e-06, |
|
"loss": 0.2448, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_safe": 0.21411444057655907, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9984515553851293, |
|
"eval_iou_safe": 0.2037491172883097, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9751261777280302, |
|
"eval_loss": 0.22617702186107635, |
|
"eval_mean_accuracy": 0.6062829979808442, |
|
"eval_mean_iou": 0.58943764750817, |
|
"eval_overall_accuracy": 0.975283494636194, |
|
"eval_runtime": 9.8675, |
|
"eval_samples_per_second": 6.79, |
|
"eval_steps_per_second": 0.507, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 25.1, |
|
"learning_rate": 8.596491228070176e-06, |
|
"loss": 0.2223, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 25.2, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.2382, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 25.3, |
|
"learning_rate": 8.245614035087721e-06, |
|
"loss": 0.2313, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 25.4, |
|
"learning_rate": 8.070175438596492e-06, |
|
"loss": 0.2753, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.2313, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 25.6, |
|
"learning_rate": 7.719298245614036e-06, |
|
"loss": 0.2392, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 25.7, |
|
"learning_rate": 7.5438596491228074e-06, |
|
"loss": 0.2287, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 25.8, |
|
"learning_rate": 7.3684210526315784e-06, |
|
"loss": 0.2145, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 25.9, |
|
"learning_rate": 7.192982456140351e-06, |
|
"loss": 0.2461, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 7.017543859649123e-06, |
|
"loss": 0.2547, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_accuracy_safe": 0.2736573875968096, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997840402899504, |
|
"eval_iou_safe": 0.25552723843257524, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9762572758507593, |
|
"eval_loss": 0.22811570763587952, |
|
"eval_mean_accuracy": 0.6357488952481568, |
|
"eval_mean_iou": 0.6158922571416673, |
|
"eval_overall_accuracy": 0.976449197797633, |
|
"eval_runtime": 9.8195, |
|
"eval_samples_per_second": 6.823, |
|
"eval_steps_per_second": 0.509, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 26.1, |
|
"learning_rate": 6.842105263157896e-06, |
|
"loss": 0.232, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 26.2, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.2341, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 26.3, |
|
"learning_rate": 6.4912280701754385e-06, |
|
"loss": 0.235, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 26.4, |
|
"learning_rate": 6.315789473684211e-06, |
|
"loss": 0.2276, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 6.140350877192982e-06, |
|
"loss": 0.2661, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 26.6, |
|
"learning_rate": 5.964912280701755e-06, |
|
"loss": 0.2416, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 26.7, |
|
"learning_rate": 5.789473684210527e-06, |
|
"loss": 0.2499, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 26.8, |
|
"learning_rate": 5.6140350877192985e-06, |
|
"loss": 0.223, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 26.9, |
|
"learning_rate": 5.43859649122807e-06, |
|
"loss": 0.2192, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.2266, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_accuracy_safe": 0.2391143442006777, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9981174954587445, |
|
"eval_iou_safe": 0.22518692513373836, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9755248903429237, |
|
"eval_loss": 0.21912012994289398, |
|
"eval_mean_accuracy": 0.6186159198297111, |
|
"eval_mean_iou": 0.600355907738331, |
|
"eval_overall_accuracy": 0.9756977593720849, |
|
"eval_runtime": 9.2527, |
|
"eval_samples_per_second": 7.241, |
|
"eval_steps_per_second": 0.54, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 27.1, |
|
"learning_rate": 5.087719298245614e-06, |
|
"loss": 0.235, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 27.2, |
|
"learning_rate": 4.912280701754386e-06, |
|
"loss": 0.2273, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 27.3, |
|
"learning_rate": 4.736842105263159e-06, |
|
"loss": 0.2235, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 27.4, |
|
"learning_rate": 4.5614035087719304e-06, |
|
"loss": 0.2364, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 4.3859649122807014e-06, |
|
"loss": 0.2449, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 27.6, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 0.253, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 27.7, |
|
"learning_rate": 4.035087719298246e-06, |
|
"loss": 0.247, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 27.8, |
|
"learning_rate": 3.859649122807018e-06, |
|
"loss": 0.2271, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 27.9, |
|
"learning_rate": 3.6842105263157892e-06, |
|
"loss": 0.2359, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 3.5087719298245615e-06, |
|
"loss": 0.2357, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy_safe": 0.2226726188410993, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9984927995242667, |
|
"eval_iou_safe": 0.21216656994042152, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9754146075867751, |
|
"eval_loss": 0.22175094485282898, |
|
"eval_mean_accuracy": 0.610582709182683, |
|
"eval_mean_iou": 0.5937905887635984, |
|
"eval_overall_accuracy": 0.9755763153531658, |
|
"eval_runtime": 9.6277, |
|
"eval_samples_per_second": 6.959, |
|
"eval_steps_per_second": 0.519, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 28.1, |
|
"learning_rate": 2.305263157894737e-05, |
|
"loss": 0.213, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 28.2, |
|
"learning_rate": 2.294736842105263e-05, |
|
"loss": 0.2174, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 28.3, |
|
"learning_rate": 2.2842105263157897e-05, |
|
"loss": 0.2389, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 28.4, |
|
"learning_rate": 2.273684210526316e-05, |
|
"loss": 0.2463, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 2.2631578947368423e-05, |
|
"loss": 0.2331, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 28.6, |
|
"learning_rate": 2.2526315789473683e-05, |
|
"loss": 0.2407, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 28.7, |
|
"learning_rate": 2.242105263157895e-05, |
|
"loss": 0.2369, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 28.8, |
|
"learning_rate": 2.2315789473684213e-05, |
|
"loss": 0.2691, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 28.9, |
|
"learning_rate": 2.2210526315789476e-05, |
|
"loss": 0.2388, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 2.2105263157894736e-05, |
|
"loss": 0.2563, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_accuracy_safe": 0.1852113137574643, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9987948263070255, |
|
"eval_iou_safe": 0.17815716870865347, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9746240459457417, |
|
"eval_loss": 0.20956651866436005, |
|
"eval_mean_accuracy": 0.592003070032245, |
|
"eval_mean_iou": 0.5763906073271976, |
|
"eval_overall_accuracy": 0.9747628738631063, |
|
"eval_runtime": 9.3361, |
|
"eval_samples_per_second": 7.176, |
|
"eval_steps_per_second": 0.536, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 29.1, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 0.2531, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 29.2, |
|
"learning_rate": 2.1894736842105266e-05, |
|
"loss": 0.2131, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 29.3, |
|
"learning_rate": 2.1789473684210526e-05, |
|
"loss": 0.2521, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 29.4, |
|
"learning_rate": 2.168421052631579e-05, |
|
"loss": 0.2286, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 2.1578947368421053e-05, |
|
"loss": 0.2359, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 29.6, |
|
"learning_rate": 2.147368421052632e-05, |
|
"loss": 0.2319, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 29.7, |
|
"learning_rate": 2.136842105263158e-05, |
|
"loss": 0.2117, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 29.8, |
|
"learning_rate": 2.1263157894736842e-05, |
|
"loss": 0.2248, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 29.9, |
|
"learning_rate": 2.1157894736842106e-05, |
|
"loss": 0.2189, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 0.226, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_safe": 0.2843975158152821, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9976802371813743, |
|
"eval_iou_safe": 0.2642573906315484, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9764128620744182, |
|
"eval_loss": 0.21209673583507538, |
|
"eval_mean_accuracy": 0.6410388764983282, |
|
"eval_mean_iou": 0.6203351263529833, |
|
"eval_overall_accuracy": 0.9766110092846315, |
|
"eval_runtime": 10.6202, |
|
"eval_samples_per_second": 6.309, |
|
"eval_steps_per_second": 0.471, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 30.1, |
|
"learning_rate": 2.0947368421052632e-05, |
|
"loss": 0.2109, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 30.2, |
|
"learning_rate": 2.0842105263157895e-05, |
|
"loss": 0.2058, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 30.3, |
|
"learning_rate": 2.073684210526316e-05, |
|
"loss": 0.2244, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 30.4, |
|
"learning_rate": 2.0631578947368422e-05, |
|
"loss": 0.2107, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 2.0526315789473685e-05, |
|
"loss": 0.2341, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 30.6, |
|
"learning_rate": 2.042105263157895e-05, |
|
"loss": 0.2223, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 30.7, |
|
"learning_rate": 2.0315789473684212e-05, |
|
"loss": 0.2399, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 30.8, |
|
"learning_rate": 2.0210526315789475e-05, |
|
"loss": 0.2152, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 30.9, |
|
"learning_rate": 2.010526315789474e-05, |
|
"loss": 0.2303, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 2e-05, |
|
"loss": 0.2221, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"eval_accuracy_safe": 0.2718031156394925, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9977821448196129, |
|
"eval_iou_safe": 0.253343035006872, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9761463764725383, |
|
"eval_loss": 0.20163898169994354, |
|
"eval_mean_accuracy": 0.6347926302295527, |
|
"eval_mean_iou": 0.6147447057397052, |
|
"eval_overall_accuracy": 0.9763378883475688, |
|
"eval_runtime": 9.6143, |
|
"eval_samples_per_second": 6.969, |
|
"eval_steps_per_second": 0.52, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 31.1, |
|
"learning_rate": 1.9894736842105265e-05, |
|
"loss": 0.2184, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 31.2, |
|
"learning_rate": 1.9789473684210528e-05, |
|
"loss": 0.2253, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 31.3, |
|
"learning_rate": 1.968421052631579e-05, |
|
"loss": 0.2304, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 31.4, |
|
"learning_rate": 1.957894736842105e-05, |
|
"loss": 0.2157, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 1.9473684210526315e-05, |
|
"loss": 0.2134, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 31.6, |
|
"learning_rate": 1.936842105263158e-05, |
|
"loss": 0.1973, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 31.7, |
|
"learning_rate": 1.9263157894736845e-05, |
|
"loss": 0.2071, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 31.8, |
|
"learning_rate": 1.9157894736842104e-05, |
|
"loss": 0.2047, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 31.9, |
|
"learning_rate": 1.9052631578947368e-05, |
|
"loss": 0.2216, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 1.8947368421052634e-05, |
|
"loss": 0.2317, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_accuracy_safe": 0.2649045300519273, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.99817452149465, |
|
"eval_iou_safe": 0.2499158963989111, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9763296849028891, |
|
"eval_loss": 0.20076872408390045, |
|
"eval_mean_accuracy": 0.6315395257732886, |
|
"eval_mean_iou": 0.6131227906509, |
|
"eval_overall_accuracy": 0.9765149016878498, |
|
"eval_runtime": 10.0363, |
|
"eval_samples_per_second": 6.676, |
|
"eval_steps_per_second": 0.498, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 32.1, |
|
"learning_rate": 1.8842105263157894e-05, |
|
"loss": 0.2222, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 32.2, |
|
"learning_rate": 1.8736842105263158e-05, |
|
"loss": 0.2257, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 32.3, |
|
"learning_rate": 1.863157894736842e-05, |
|
"loss": 0.2139, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 32.4, |
|
"learning_rate": 1.8526315789473687e-05, |
|
"loss": 0.1901, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.2044, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 32.6, |
|
"learning_rate": 1.831578947368421e-05, |
|
"loss": 0.2069, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 32.7, |
|
"learning_rate": 1.8210526315789474e-05, |
|
"loss": 0.2133, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 32.8, |
|
"learning_rate": 1.810526315789474e-05, |
|
"loss": 0.2049, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 32.9, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.2138, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 1.7894736842105264e-05, |
|
"loss": 0.2643, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"eval_accuracy_safe": 0.32538617815659926, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9975752200987912, |
|
"eval_iou_safe": 0.30137717538008896, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.977503612135026, |
|
"eval_loss": 0.19892987608909607, |
|
"eval_mean_accuracy": 0.6614806991276952, |
|
"eval_mean_iou": 0.6394403937575575, |
|
"eval_overall_accuracy": 0.9777198336017665, |
|
"eval_runtime": 10.0482, |
|
"eval_samples_per_second": 6.668, |
|
"eval_steps_per_second": 0.498, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 33.1, |
|
"learning_rate": 1.7789473684210527e-05, |
|
"loss": 0.2032, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 33.2, |
|
"learning_rate": 1.768421052631579e-05, |
|
"loss": 0.2138, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 33.3, |
|
"learning_rate": 1.7578947368421054e-05, |
|
"loss": 0.225, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 33.4, |
|
"learning_rate": 1.7473684210526317e-05, |
|
"loss": 0.1898, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 1.736842105263158e-05, |
|
"loss": 0.2084, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 33.6, |
|
"learning_rate": 1.7263157894736843e-05, |
|
"loss": 0.2185, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 33.7, |
|
"learning_rate": 1.7157894736842107e-05, |
|
"loss": 0.2103, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 33.8, |
|
"learning_rate": 1.705263157894737e-05, |
|
"loss": 0.192, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 33.9, |
|
"learning_rate": 1.694736842105263e-05, |
|
"loss": 0.2015, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 1.6842105263157896e-05, |
|
"loss": 0.2118, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_accuracy_safe": 0.3347327111306433, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997749583657136, |
|
"eval_iou_safe": 0.3116878455021897, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9779470808145468, |
|
"eval_loss": 0.1900891214609146, |
|
"eval_mean_accuracy": 0.6662411473938896, |
|
"eval_mean_iou": 0.6448174631583683, |
|
"eval_overall_accuracy": 0.9781651283378032, |
|
"eval_runtime": 9.3476, |
|
"eval_samples_per_second": 7.168, |
|
"eval_steps_per_second": 0.535, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 34.1, |
|
"learning_rate": 1.673684210526316e-05, |
|
"loss": 0.1939, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 34.2, |
|
"learning_rate": 1.6631578947368423e-05, |
|
"loss": 0.1964, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 34.3, |
|
"learning_rate": 1.6526315789473683e-05, |
|
"loss": 0.2013, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 34.4, |
|
"learning_rate": 1.642105263157895e-05, |
|
"loss": 0.2023, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.1977, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 34.6, |
|
"learning_rate": 1.6210526315789473e-05, |
|
"loss": 0.2127, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 34.7, |
|
"learning_rate": 1.6105263157894736e-05, |
|
"loss": 0.2226, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 34.8, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.2097, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 34.9, |
|
"learning_rate": 1.5894736842105266e-05, |
|
"loss": 0.1962, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 1.5789473684210526e-05, |
|
"loss": 0.2133, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_safe": 0.3618933620147956, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9975545100260806, |
|
"eval_iou_safe": 0.3349795266599463, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9785487908911983, |
|
"eval_loss": 0.19170759618282318, |
|
"eval_mean_accuracy": 0.6797239360204381, |
|
"eval_mean_iou": 0.6567641587755723, |
|
"eval_overall_accuracy": 0.9787780989461871, |
|
"eval_runtime": 10.6094, |
|
"eval_samples_per_second": 6.315, |
|
"eval_steps_per_second": 0.471, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 35.1, |
|
"learning_rate": 1.568421052631579e-05, |
|
"loss": 0.1896, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 35.2, |
|
"learning_rate": 1.5578947368421056e-05, |
|
"loss": 0.1961, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 35.3, |
|
"learning_rate": 1.5473684210526316e-05, |
|
"loss": 0.1909, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 35.4, |
|
"learning_rate": 1.536842105263158e-05, |
|
"loss": 0.2388, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 1.5263157894736842e-05, |
|
"loss": 0.2134, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 35.6, |
|
"learning_rate": 1.5157894736842107e-05, |
|
"loss": 0.1938, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 35.7, |
|
"learning_rate": 1.5052631578947369e-05, |
|
"loss": 0.1976, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 35.8, |
|
"learning_rate": 1.4947368421052632e-05, |
|
"loss": 0.21, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 35.9, |
|
"learning_rate": 1.4842105263157895e-05, |
|
"loss": 0.1986, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 1.4736842105263157e-05, |
|
"loss": 0.2064, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_accuracy_safe": 0.3401316880042868, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9978212768833464, |
|
"eval_iou_safe": 0.31741129406516555, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9781749054634312, |
|
"eval_loss": 0.1860196888446808, |
|
"eval_mean_accuracy": 0.6689764824438166, |
|
"eval_mean_iou": 0.6477930997642984, |
|
"eval_overall_accuracy": 0.9783941809810809, |
|
"eval_runtime": 9.7715, |
|
"eval_samples_per_second": 6.857, |
|
"eval_steps_per_second": 0.512, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 36.1, |
|
"learning_rate": 1.4631578947368422e-05, |
|
"loss": 0.1955, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 36.2, |
|
"learning_rate": 1.4526315789473685e-05, |
|
"loss": 0.2, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 36.3, |
|
"learning_rate": 1.4421052631578948e-05, |
|
"loss": 0.1799, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 36.4, |
|
"learning_rate": 1.431578947368421e-05, |
|
"loss": 0.2033, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.1931, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 36.6, |
|
"learning_rate": 1.4105263157894738e-05, |
|
"loss": 0.1955, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 36.7, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 0.1978, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 36.8, |
|
"learning_rate": 1.3894736842105263e-05, |
|
"loss": 0.1951, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 36.9, |
|
"learning_rate": 1.3789473684210526e-05, |
|
"loss": 0.1876, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.2341, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"eval_accuracy_safe": 0.2703825351482839, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9982565404228352, |
|
"eval_iou_safe": 0.2557340766682649, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.976569175210793, |
|
"eval_loss": 0.17752937972545624, |
|
"eval_mean_accuracy": 0.6343195377855595, |
|
"eval_mean_iou": 0.616151625939529, |
|
"eval_overall_accuracy": 0.9767563093954058, |
|
"eval_runtime": 9.2963, |
|
"eval_samples_per_second": 7.207, |
|
"eval_steps_per_second": 0.538, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 37.1, |
|
"learning_rate": 1.3578947368421053e-05, |
|
"loss": 0.1998, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 37.2, |
|
"learning_rate": 1.3473684210526316e-05, |
|
"loss": 0.1972, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 37.3, |
|
"learning_rate": 1.336842105263158e-05, |
|
"loss": 0.1904, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 37.4, |
|
"learning_rate": 1.3263157894736844e-05, |
|
"loss": 0.1917, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 1.3157894736842106e-05, |
|
"loss": 0.1875, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 37.6, |
|
"learning_rate": 1.305263157894737e-05, |
|
"loss": 0.1848, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 37.7, |
|
"learning_rate": 1.2947368421052633e-05, |
|
"loss": 0.1964, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 37.8, |
|
"learning_rate": 1.2842105263157894e-05, |
|
"loss": 0.1845, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 37.9, |
|
"learning_rate": 1.2736842105263157e-05, |
|
"loss": 0.1866, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 1.2631578947368422e-05, |
|
"loss": 0.2093, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_accuracy_safe": 0.3552260785424883, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9928191196329964, |
|
"eval_iou_safe": 0.28741779073267426, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9737097853338179, |
|
"eval_loss": 0.1933618187904358, |
|
"eval_mean_accuracy": 0.6740225990877423, |
|
"eval_mean_iou": 0.6305637880332461, |
|
"eval_overall_accuracy": 0.9739856435291803, |
|
"eval_runtime": 9.4354, |
|
"eval_samples_per_second": 7.101, |
|
"eval_steps_per_second": 0.53, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 38.1, |
|
"learning_rate": 1.2526315789473686e-05, |
|
"loss": 0.2115, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 38.2, |
|
"learning_rate": 1.2421052631578949e-05, |
|
"loss": 0.1946, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 38.3, |
|
"learning_rate": 1.231578947368421e-05, |
|
"loss": 0.1942, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 38.4, |
|
"learning_rate": 1.2210526315789474e-05, |
|
"loss": 0.1908, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.206, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 38.6, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.1776, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 38.7, |
|
"learning_rate": 1.1894736842105264e-05, |
|
"loss": 0.1942, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 38.8, |
|
"learning_rate": 1.1789473684210527e-05, |
|
"loss": 0.178, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 38.9, |
|
"learning_rate": 1.168421052631579e-05, |
|
"loss": 0.2015, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.1958, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"eval_accuracy_safe": 0.30012027709993405, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9980261481975256, |
|
"eval_iou_safe": 0.28184293125481946, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9772090847024161, |
|
"eval_loss": 0.17546288669109344, |
|
"eval_mean_accuracy": 0.6490732126487297, |
|
"eval_mean_iou": 0.6295260079786178, |
|
"eval_overall_accuracy": 0.9774111278021513, |
|
"eval_runtime": 10.0143, |
|
"eval_samples_per_second": 6.69, |
|
"eval_steps_per_second": 0.499, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 39.1, |
|
"learning_rate": 1.1473684210526315e-05, |
|
"loss": 0.1823, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 39.2, |
|
"learning_rate": 1.136842105263158e-05, |
|
"loss": 0.2134, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 39.3, |
|
"learning_rate": 1.1263157894736842e-05, |
|
"loss": 0.1747, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 39.4, |
|
"learning_rate": 1.1157894736842106e-05, |
|
"loss": 0.1852, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 1.1052631578947368e-05, |
|
"loss": 0.1861, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 39.6, |
|
"learning_rate": 1.0947368421052633e-05, |
|
"loss": 0.1801, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 39.7, |
|
"learning_rate": 1.0842105263157895e-05, |
|
"loss": 0.2033, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 39.8, |
|
"learning_rate": 1.073684210526316e-05, |
|
"loss": 0.1907, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 39.9, |
|
"learning_rate": 1.0631578947368421e-05, |
|
"loss": 0.1898, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.1886, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_safe": 0.3881249494026623, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9968947211374042, |
|
"eval_iou_safe": 0.35219360153638685, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9786680764654981, |
|
"eval_loss": 0.17675457894802094, |
|
"eval_mean_accuracy": 0.6925098352700332, |
|
"eval_mean_iou": 0.6654308390009425, |
|
"eval_overall_accuracy": 0.9789126381945255, |
|
"eval_runtime": 9.5568, |
|
"eval_samples_per_second": 7.011, |
|
"eval_steps_per_second": 0.523, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 40.1, |
|
"learning_rate": 1.0421052631578948e-05, |
|
"loss": 0.1862, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 40.2, |
|
"learning_rate": 1.0315789473684211e-05, |
|
"loss": 0.1751, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 40.3, |
|
"learning_rate": 1.0210526315789474e-05, |
|
"loss": 0.1777, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 40.4, |
|
"learning_rate": 1.0105263157894738e-05, |
|
"loss": 0.1875, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 1e-05, |
|
"loss": 0.1964, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 40.6, |
|
"learning_rate": 9.894736842105264e-06, |
|
"loss": 0.2019, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 40.7, |
|
"learning_rate": 9.789473684210526e-06, |
|
"loss": 0.1781, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 40.8, |
|
"learning_rate": 9.68421052631579e-06, |
|
"loss": 0.1974, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 40.9, |
|
"learning_rate": 9.578947368421052e-06, |
|
"loss": 0.1903, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 9.473684210526317e-06, |
|
"loss": 0.1734, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"eval_accuracy_safe": 0.3947960879102239, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9972934340386531, |
|
"eval_iou_safe": 0.3625567543168683, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9792547042493136, |
|
"eval_loss": 0.17450523376464844, |
|
"eval_mean_accuracy": 0.6960447609744385, |
|
"eval_mean_iou": 0.6709057292830909, |
|
"eval_overall_accuracy": 0.9794966284908465, |
|
"eval_runtime": 10.0572, |
|
"eval_samples_per_second": 6.662, |
|
"eval_steps_per_second": 0.497, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 41.1, |
|
"learning_rate": 9.368421052631579e-06, |
|
"loss": 0.2101, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 41.2, |
|
"learning_rate": 9.263157894736844e-06, |
|
"loss": 0.1794, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 41.3, |
|
"learning_rate": 9.157894736842105e-06, |
|
"loss": 0.2051, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 41.4, |
|
"learning_rate": 9.05263157894737e-06, |
|
"loss": 0.1762, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.1851, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 41.6, |
|
"learning_rate": 8.842105263157895e-06, |
|
"loss": 0.1835, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 41.7, |
|
"learning_rate": 8.736842105263158e-06, |
|
"loss": 0.2018, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 41.8, |
|
"learning_rate": 8.631578947368422e-06, |
|
"loss": 0.1997, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 41.9, |
|
"learning_rate": 8.526315789473685e-06, |
|
"loss": 0.1848, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.1795, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"eval_accuracy_safe": 0.41681412176514354, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9969580247307602, |
|
"eval_iou_safe": 0.3789419821537469, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9795699690858644, |
|
"eval_loss": 0.17103362083435059, |
|
"eval_mean_accuracy": 0.7068860732479518, |
|
"eval_mean_iou": 0.6792559756198057, |
|
"eval_overall_accuracy": 0.9798215040520056, |
|
"eval_runtime": 9.5328, |
|
"eval_samples_per_second": 7.028, |
|
"eval_steps_per_second": 0.525, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 42.1, |
|
"learning_rate": 8.315789473684212e-06, |
|
"loss": 0.1863, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 42.2, |
|
"learning_rate": 8.210526315789475e-06, |
|
"loss": 0.1977, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 42.3, |
|
"learning_rate": 8.105263157894736e-06, |
|
"loss": 0.1796, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 42.4, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.1833, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.1844, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 42.6, |
|
"learning_rate": 7.789473684210528e-06, |
|
"loss": 0.1899, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 42.7, |
|
"learning_rate": 7.68421052631579e-06, |
|
"loss": 0.189, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 42.8, |
|
"learning_rate": 7.578947368421054e-06, |
|
"loss": 0.1717, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 42.9, |
|
"learning_rate": 7.473684210526316e-06, |
|
"loss": 0.1709, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 7.3684210526315784e-06, |
|
"loss": 0.222, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"eval_accuracy_safe": 0.404142620884268, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997192699775639, |
|
"eval_iou_safe": 0.3700154592236505, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9794293850090163, |
|
"eval_loss": 0.17061825096607208, |
|
"eval_mean_accuracy": 0.7006676603299535, |
|
"eval_mean_iou": 0.6747224221163334, |
|
"eval_overall_accuracy": 0.9796749513540695, |
|
"eval_runtime": 9.3364, |
|
"eval_samples_per_second": 7.176, |
|
"eval_steps_per_second": 0.536, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 43.1, |
|
"learning_rate": 7.2631578947368426e-06, |
|
"loss": 0.1925, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 43.2, |
|
"learning_rate": 7.157894736842105e-06, |
|
"loss": 0.179, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 43.3, |
|
"learning_rate": 7.052631578947369e-06, |
|
"loss": 0.1774, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 43.4, |
|
"learning_rate": 6.9473684210526315e-06, |
|
"loss": 0.1743, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 6.842105263157896e-06, |
|
"loss": 0.1815, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 43.6, |
|
"learning_rate": 6.736842105263158e-06, |
|
"loss": 0.1784, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 43.7, |
|
"learning_rate": 6.631578947368422e-06, |
|
"loss": 0.1719, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 43.8, |
|
"learning_rate": 6.526315789473685e-06, |
|
"loss": 0.1847, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 43.9, |
|
"learning_rate": 6.421052631578947e-06, |
|
"loss": 0.1777, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 6.315789473684211e-06, |
|
"loss": 0.1831, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_accuracy_safe": 0.4044336760459674, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997242392216392, |
|
"eval_iou_safe": 0.3708362421682382, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9794867149476597, |
|
"eval_loss": 0.1686682254076004, |
|
"eval_mean_accuracy": 0.7008380341311797, |
|
"eval_mean_iou": 0.675161478557949, |
|
"eval_overall_accuracy": 0.9797317732625933, |
|
"eval_runtime": 9.4019, |
|
"eval_samples_per_second": 7.126, |
|
"eval_steps_per_second": 0.532, |
|
"step": 440 |
|
} |
|
], |
|
"max_steps": 500, |
|
"num_train_epochs": 50, |
|
"total_flos": 2.378560911137833e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|