|
{ |
|
"best_metric": 0.09308235347270966, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/safety-utcustom-train-SF-RGBD-b5/checkpoint-800", |
|
"epoch": 76.36363636363636, |
|
"global_step": 840, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 7.272727272727273e-08, |
|
"loss": 0.8005, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.4545454545454545e-07, |
|
"loss": 0.7952, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 2.1818181818181815e-07, |
|
"loss": 0.7806, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 2.909090909090909e-07, |
|
"loss": 0.7985, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.636363636363636e-07, |
|
"loss": 0.7916, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.363636363636363e-07, |
|
"loss": 0.7885, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 5.090909090909091e-07, |
|
"loss": 0.7869, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 5.818181818181818e-07, |
|
"loss": 0.7871, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 6.545454545454546e-07, |
|
"loss": 0.7895, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 7.272727272727272e-07, |
|
"loss": 0.789, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"eval_accuracy_safe": 0.020290978060994367, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8956518586322223, |
|
"eval_iou_safe": 0.009542102362547317, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.872225496966486, |
|
"eval_loss": 0.9554787278175354, |
|
"eval_mean_accuracy": 0.4579714183466083, |
|
"eval_mean_iou": 0.29392253310967775, |
|
"eval_overall_accuracy": 0.8697951017920651, |
|
"eval_runtime": 10.4712, |
|
"eval_samples_per_second": 6.399, |
|
"eval_steps_per_second": 0.478, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 8e-07, |
|
"loss": 0.7808, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 8.727272727272726e-07, |
|
"loss": 0.7854, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 9.454545454545454e-07, |
|
"loss": 0.7698, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 1.0181818181818181e-06, |
|
"loss": 0.7788, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 1.0909090909090908e-06, |
|
"loss": 0.7783, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 1.1636363636363636e-06, |
|
"loss": 0.7696, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.2363636363636363e-06, |
|
"loss": 0.7638, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 1.3090909090909091e-06, |
|
"loss": 0.7611, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 1.3818181818181818e-06, |
|
"loss": 0.7597, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 1.4545454545454544e-06, |
|
"loss": 0.7579, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_accuracy_safe": 0.011667264197131082, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9614483463212281, |
|
"eval_iou_safe": 0.006852036536648151, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9338335778122896, |
|
"eval_loss": 0.8321800827980042, |
|
"eval_mean_accuracy": 0.4865578052591796, |
|
"eval_mean_iou": 0.3135618714496459, |
|
"eval_overall_accuracy": 0.9333933360541045, |
|
"eval_runtime": 11.8686, |
|
"eval_samples_per_second": 5.645, |
|
"eval_steps_per_second": 0.421, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 1.5272727272727273e-06, |
|
"loss": 0.7626, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.6e-06, |
|
"loss": 0.7399, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.6727272727272726e-06, |
|
"loss": 0.7658, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 1.7454545454545452e-06, |
|
"loss": 0.7404, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.818181818181818e-06, |
|
"loss": 0.742, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 1.8909090909090907e-06, |
|
"loss": 0.721, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.9636363636363636e-06, |
|
"loss": 0.7222, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 2.0363636363636362e-06, |
|
"loss": 0.7358, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 2.109090909090909e-06, |
|
"loss": 0.7427, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 2.1818181818181815e-06, |
|
"loss": 0.7103, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"eval_accuracy_safe": 0.005111776747198353, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9893036287919527, |
|
"eval_iou_safe": 0.004283838202726343, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9604181920132638, |
|
"eval_loss": 0.6728952527046204, |
|
"eval_mean_accuracy": 0.4972077027695755, |
|
"eval_mean_iou": 0.32156734340533005, |
|
"eval_overall_accuracy": 0.96023217955632, |
|
"eval_runtime": 12.232, |
|
"eval_samples_per_second": 5.477, |
|
"eval_steps_per_second": 0.409, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 2.254545454545454e-06, |
|
"loss": 0.7102, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 2.3272727272727272e-06, |
|
"loss": 0.7035, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.4e-06, |
|
"loss": 0.6845, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 2.4727272727272725e-06, |
|
"loss": 0.6932, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 2.545454545454545e-06, |
|
"loss": 0.6848, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 2.6181818181818183e-06, |
|
"loss": 0.6816, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 2.690909090909091e-06, |
|
"loss": 0.6617, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 2.7636363636363635e-06, |
|
"loss": 0.6924, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 2.836363636363636e-06, |
|
"loss": 0.6775, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 2.909090909090909e-06, |
|
"loss": 0.676, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"eval_accuracy_safe": 0.0021453271190165035, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9968958945126286, |
|
"eval_iou_safe": 0.0020376330502982305, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9675401930395611, |
|
"eval_loss": 0.5335736274719238, |
|
"eval_mean_accuracy": 0.49952061081582255, |
|
"eval_mean_iou": 0.32319260869661975, |
|
"eval_overall_accuracy": 0.9675125577556554, |
|
"eval_runtime": 10.8655, |
|
"eval_samples_per_second": 6.166, |
|
"eval_steps_per_second": 0.46, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 2.981818181818182e-06, |
|
"loss": 0.6708, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 3.0545454545454546e-06, |
|
"loss": 0.6587, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 3.127272727272727e-06, |
|
"loss": 0.6511, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.2e-06, |
|
"loss": 0.6533, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 3.272727272727273e-06, |
|
"loss": 0.6371, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 3.345454545454545e-06, |
|
"loss": 0.6289, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 3.418181818181818e-06, |
|
"loss": 0.6143, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 3.4909090909090904e-06, |
|
"loss": 0.6047, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 3.5636363636363635e-06, |
|
"loss": 0.6173, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 3.636363636363636e-06, |
|
"loss": 0.5955, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"eval_accuracy_safe": 0.00010023091661173242, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9992954468465131, |
|
"eval_iou_safe": 9.918496661090691e-05, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9697964694697941, |
|
"eval_loss": 0.4440336227416992, |
|
"eval_mean_accuracy": 0.4996978388815624, |
|
"eval_mean_iou": 0.32329855147880165, |
|
"eval_overall_accuracy": 0.96978082229842, |
|
"eval_runtime": 10.7685, |
|
"eval_samples_per_second": 6.222, |
|
"eval_steps_per_second": 0.464, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 3.709090909090909e-06, |
|
"loss": 0.6184, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 3.7818181818181815e-06, |
|
"loss": 0.6059, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 3.8545454545454545e-06, |
|
"loss": 0.5942, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 3.927272727272727e-06, |
|
"loss": 0.5764, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 4e-06, |
|
"loss": 0.5745, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 3.9961722488038276e-06, |
|
"loss": 0.5496, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 3.992344497607655e-06, |
|
"loss": 0.5785, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 3.988516746411483e-06, |
|
"loss": 0.5468, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 3.984688995215311e-06, |
|
"loss": 0.5697, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 3.980861244019139e-06, |
|
"loss": 0.5691, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"eval_accuracy_safe": 1.9275176271487e-05, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9997433241696639, |
|
"eval_iou_safe": 1.9217760285345306e-05, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9702130617813918, |
|
"eval_loss": 0.38122960925102234, |
|
"eval_mean_accuracy": 0.49988129967296774, |
|
"eval_mean_iou": 0.3234107598472257, |
|
"eval_overall_accuracy": 0.9702130787408174, |
|
"eval_runtime": 11.2754, |
|
"eval_samples_per_second": 5.942, |
|
"eval_steps_per_second": 0.443, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 3.977033492822966e-06, |
|
"loss": 0.5238, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"learning_rate": 3.973205741626794e-06, |
|
"loss": 0.5514, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"learning_rate": 3.969377990430622e-06, |
|
"loss": 0.5261, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 3.96555023923445e-06, |
|
"loss": 0.5348, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 3.961722488038277e-06, |
|
"loss": 0.5614, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 3.957894736842105e-06, |
|
"loss": 0.5475, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 3.954066985645933e-06, |
|
"loss": 0.5134, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 3.950239234449761e-06, |
|
"loss": 0.5047, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 3.946411483253588e-06, |
|
"loss": 0.4845, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"learning_rate": 3.942583732057416e-06, |
|
"loss": 0.5067, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"eval_accuracy_safe": 0.0, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.999641064518858, |
|
"eval_iou_safe": 0.0, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9701133255524892, |
|
"eval_loss": 0.3590196967124939, |
|
"eval_mean_accuracy": 0.499820532259429, |
|
"eval_mean_iou": 0.3233711085174964, |
|
"eval_overall_accuracy": 0.9701132703183303, |
|
"eval_runtime": 10.8376, |
|
"eval_samples_per_second": 6.182, |
|
"eval_steps_per_second": 0.461, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"learning_rate": 3.938755980861244e-06, |
|
"loss": 0.4808, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 3.934928229665072e-06, |
|
"loss": 0.5075, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 3.931100478468899e-06, |
|
"loss": 0.5245, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 3.927272727272727e-06, |
|
"loss": 0.4984, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 3.923444976076555e-06, |
|
"loss": 0.483, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 6.91, |
|
"learning_rate": 3.919617224880383e-06, |
|
"loss": 0.5161, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 3.9157894736842104e-06, |
|
"loss": 0.4869, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 3.911961722488038e-06, |
|
"loss": 0.4928, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 7.18, |
|
"learning_rate": 3.908133971291866e-06, |
|
"loss": 0.4828, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"learning_rate": 3.904306220095694e-06, |
|
"loss": 0.4656, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"eval_accuracy_safe": 0.0, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.999869403337525, |
|
"eval_iou_safe": 0.0, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.970334864374417, |
|
"eval_loss": 0.32469046115875244, |
|
"eval_mean_accuracy": 0.4999347016687625, |
|
"eval_mean_iou": 0.32344495479147234, |
|
"eval_overall_accuracy": 0.970334864374417, |
|
"eval_runtime": 11.3252, |
|
"eval_samples_per_second": 5.916, |
|
"eval_steps_per_second": 0.441, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 3.9004784688995215e-06, |
|
"loss": 0.4827, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 3.896650717703349e-06, |
|
"loss": 0.4507, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"learning_rate": 3.892822966507177e-06, |
|
"loss": 0.4652, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"learning_rate": 3.888995215311005e-06, |
|
"loss": 0.4509, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 7.73, |
|
"learning_rate": 3.8851674641148325e-06, |
|
"loss": 0.4557, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 7.82, |
|
"learning_rate": 3.88133971291866e-06, |
|
"loss": 0.4792, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"learning_rate": 3.877511961722488e-06, |
|
"loss": 0.4415, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 3.873684210526316e-06, |
|
"loss": 0.4496, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 3.8698564593301435e-06, |
|
"loss": 0.4443, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 8.18, |
|
"learning_rate": 3.866028708133971e-06, |
|
"loss": 0.4227, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 8.18, |
|
"eval_accuracy_safe": 0.0, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9997710158249596, |
|
"eval_iou_safe": 0.0, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9702393830712162, |
|
"eval_loss": 0.3170846998691559, |
|
"eval_mean_accuracy": 0.4998855079124798, |
|
"eval_mean_iou": 0.32341312769040537, |
|
"eval_overall_accuracy": 0.9702393830712162, |
|
"eval_runtime": 10.5487, |
|
"eval_samples_per_second": 6.351, |
|
"eval_steps_per_second": 0.474, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 8.27, |
|
"learning_rate": 3.862200956937799e-06, |
|
"loss": 0.4439, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 8.36, |
|
"learning_rate": 3.858373205741627e-06, |
|
"loss": 0.462, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 8.45, |
|
"learning_rate": 3.8545454545454545e-06, |
|
"loss": 0.4303, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"learning_rate": 3.850717703349282e-06, |
|
"loss": 0.4348, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 8.64, |
|
"learning_rate": 3.84688995215311e-06, |
|
"loss": 0.4616, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 8.73, |
|
"learning_rate": 3.843062200956938e-06, |
|
"loss": 0.4138, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 3.8392344497607655e-06, |
|
"loss": 0.4202, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 8.91, |
|
"learning_rate": 3.835406698564593e-06, |
|
"loss": 0.3929, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 3.831578947368421e-06, |
|
"loss": 0.399, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 3.827751196172249e-06, |
|
"loss": 0.3898, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"eval_accuracy_safe": 0.00038357600780259137, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9996028711553041, |
|
"eval_iou_safe": 0.0003819299168198865, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9700920569815766, |
|
"eval_loss": 0.3121778070926666, |
|
"eval_mean_accuracy": 0.49999322358155335, |
|
"eval_mean_iou": 0.32349132896613214, |
|
"eval_overall_accuracy": 0.9700875353457322, |
|
"eval_runtime": 10.9682, |
|
"eval_samples_per_second": 6.109, |
|
"eval_steps_per_second": 0.456, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 9.18, |
|
"learning_rate": 3.8239234449760766e-06, |
|
"loss": 0.3893, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 9.27, |
|
"learning_rate": 3.820095693779904e-06, |
|
"loss": 0.4505, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 9.36, |
|
"learning_rate": 3.816267942583732e-06, |
|
"loss": 0.4265, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 9.45, |
|
"learning_rate": 3.8124401913875594e-06, |
|
"loss": 0.403, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 9.55, |
|
"learning_rate": 3.808612440191387e-06, |
|
"loss": 0.3879, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 9.64, |
|
"learning_rate": 3.804784688995215e-06, |
|
"loss": 0.405, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 9.73, |
|
"learning_rate": 3.800956937799043e-06, |
|
"loss": 0.3982, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 9.82, |
|
"learning_rate": 3.797129186602871e-06, |
|
"loss": 0.3909, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 9.91, |
|
"learning_rate": 3.7933014354066986e-06, |
|
"loss": 0.3877, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 3.789473684210526e-06, |
|
"loss": 0.3513, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_safe": 0.0, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9998844812091585, |
|
"eval_iou_safe": 0.0, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9703494968698986, |
|
"eval_loss": 0.2876318097114563, |
|
"eval_mean_accuracy": 0.49994224060457926, |
|
"eval_mean_iou": 0.3234498322899662, |
|
"eval_overall_accuracy": 0.9703494968698986, |
|
"eval_runtime": 10.9657, |
|
"eval_samples_per_second": 6.11, |
|
"eval_steps_per_second": 0.456, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.09, |
|
"learning_rate": 3.7856459330143537e-06, |
|
"loss": 0.4009, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 10.18, |
|
"learning_rate": 3.7818181818181815e-06, |
|
"loss": 0.3629, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 10.27, |
|
"learning_rate": 3.777990430622009e-06, |
|
"loss": 0.3697, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 10.36, |
|
"learning_rate": 3.7741626794258374e-06, |
|
"loss": 0.3489, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 10.45, |
|
"learning_rate": 3.770334928229665e-06, |
|
"loss": 0.369, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 10.55, |
|
"learning_rate": 3.766507177033493e-06, |
|
"loss": 0.3628, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 10.64, |
|
"learning_rate": 3.7626794258373202e-06, |
|
"loss": 0.365, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 10.73, |
|
"learning_rate": 3.758851674641148e-06, |
|
"loss": 0.3425, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 10.82, |
|
"learning_rate": 3.7550239234449757e-06, |
|
"loss": 0.3448, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 10.91, |
|
"learning_rate": 3.7511961722488035e-06, |
|
"loss": 0.4157, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 10.91, |
|
"eval_accuracy_safe": 1.9275176271487003e-06, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.999816777458711, |
|
"eval_iou_safe": 1.924298112263552e-06, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.970290201310306, |
|
"eval_loss": 0.2819870412349701, |
|
"eval_mean_accuracy": 0.4999093524881691, |
|
"eval_mean_iou": 0.3234307085361394, |
|
"eval_overall_accuracy": 0.9702838499154618, |
|
"eval_runtime": 10.472, |
|
"eval_samples_per_second": 6.398, |
|
"eval_steps_per_second": 0.477, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 3.7473684210526317e-06, |
|
"loss": 0.3494, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 11.09, |
|
"learning_rate": 3.7435406698564594e-06, |
|
"loss": 0.3522, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 11.18, |
|
"learning_rate": 3.7397129186602868e-06, |
|
"loss": 0.3473, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 11.27, |
|
"learning_rate": 3.7358851674641145e-06, |
|
"loss": 0.3539, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 11.36, |
|
"learning_rate": 3.7320574162679423e-06, |
|
"loss": 0.3872, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 11.45, |
|
"learning_rate": 3.72822966507177e-06, |
|
"loss": 0.3386, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 11.55, |
|
"learning_rate": 3.7244019138755978e-06, |
|
"loss": 0.347, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 11.64, |
|
"learning_rate": 3.720574162679426e-06, |
|
"loss": 0.356, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 11.73, |
|
"learning_rate": 3.7167464114832537e-06, |
|
"loss": 0.3492, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 11.82, |
|
"learning_rate": 3.712918660287081e-06, |
|
"loss": 0.3317, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 11.82, |
|
"eval_accuracy_safe": 0.0, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9998623044174174, |
|
"eval_iou_safe": 0.0, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9703347152502294, |
|
"eval_loss": 0.26932939887046814, |
|
"eval_mean_accuracy": 0.4999311522087087, |
|
"eval_mean_iou": 0.3234449050834098, |
|
"eval_overall_accuracy": 0.9703279751450268, |
|
"eval_runtime": 11.1056, |
|
"eval_samples_per_second": 6.033, |
|
"eval_steps_per_second": 0.45, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 11.91, |
|
"learning_rate": 3.709090909090909e-06, |
|
"loss": 0.3109, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 3.7052631578947366e-06, |
|
"loss": 0.3266, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 12.09, |
|
"learning_rate": 3.7014354066985643e-06, |
|
"loss": 0.3358, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 12.18, |
|
"learning_rate": 3.6976076555023925e-06, |
|
"loss": 0.3508, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 12.27, |
|
"learning_rate": 3.6937799043062203e-06, |
|
"loss": 0.3609, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 12.36, |
|
"learning_rate": 3.6899521531100476e-06, |
|
"loss": 0.3139, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 12.45, |
|
"learning_rate": 3.6861244019138753e-06, |
|
"loss": 0.3304, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 12.55, |
|
"learning_rate": 3.682296650717703e-06, |
|
"loss": 0.3292, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 12.64, |
|
"learning_rate": 3.678468899521531e-06, |
|
"loss": 0.3122, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 12.73, |
|
"learning_rate": 3.6746411483253586e-06, |
|
"loss": 0.321, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 12.73, |
|
"eval_accuracy_safe": 0.0, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9998858892594278, |
|
"eval_iou_safe": 0.0, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.970356332880103, |
|
"eval_loss": 0.2647153437137604, |
|
"eval_mean_accuracy": 0.4999429446297139, |
|
"eval_mean_iou": 0.32345211096003434, |
|
"eval_overall_accuracy": 0.9703508633286205, |
|
"eval_runtime": 11.6782, |
|
"eval_samples_per_second": 5.737, |
|
"eval_steps_per_second": 0.428, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 12.82, |
|
"learning_rate": 3.6708133971291868e-06, |
|
"loss": 0.3236, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 12.91, |
|
"learning_rate": 3.6669856459330145e-06, |
|
"loss": 0.3252, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 3.663157894736842e-06, |
|
"loss": 0.3171, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 13.09, |
|
"learning_rate": 3.6593301435406696e-06, |
|
"loss": 0.3249, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 13.18, |
|
"learning_rate": 3.6555023923444974e-06, |
|
"loss": 0.3035, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 13.27, |
|
"learning_rate": 3.651674641148325e-06, |
|
"loss": 0.3212, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 13.36, |
|
"learning_rate": 3.647846889952153e-06, |
|
"loss": 0.3009, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 13.45, |
|
"learning_rate": 3.644019138755981e-06, |
|
"loss": 0.3335, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 13.55, |
|
"learning_rate": 3.6401913875598084e-06, |
|
"loss": 0.2966, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"learning_rate": 3.636363636363636e-06, |
|
"loss": 0.2887, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"eval_accuracy_safe": 0.0, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9999037832315998, |
|
"eval_iou_safe": 0.0, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9703687812285815, |
|
"eval_loss": 0.2538875639438629, |
|
"eval_mean_accuracy": 0.4999518916157999, |
|
"eval_mean_iou": 0.3234562604095272, |
|
"eval_overall_accuracy": 0.9703682287415462, |
|
"eval_runtime": 10.4703, |
|
"eval_samples_per_second": 6.399, |
|
"eval_steps_per_second": 0.478, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 13.73, |
|
"learning_rate": 3.632535885167464e-06, |
|
"loss": 0.3162, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 13.82, |
|
"learning_rate": 3.6287081339712917e-06, |
|
"loss": 0.3097, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 13.91, |
|
"learning_rate": 3.6248803827751194e-06, |
|
"loss": 0.3177, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 3.621052631578947e-06, |
|
"loss": 0.2942, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 14.09, |
|
"learning_rate": 3.617224880382775e-06, |
|
"loss": 0.2976, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 14.18, |
|
"learning_rate": 3.6133971291866027e-06, |
|
"loss": 0.2883, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 14.27, |
|
"learning_rate": 3.6095693779904304e-06, |
|
"loss": 0.2901, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 14.36, |
|
"learning_rate": 3.605741626794258e-06, |
|
"loss": 0.2725, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 14.45, |
|
"learning_rate": 3.601913875598086e-06, |
|
"loss": 0.2876, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 14.55, |
|
"learning_rate": 3.5980861244019137e-06, |
|
"loss": 0.3008, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 14.55, |
|
"eval_accuracy_safe": 0.0, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9999237892791757, |
|
"eval_iou_safe": 0.0, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9703876438425548, |
|
"eval_loss": 0.2536369860172272, |
|
"eval_mean_accuracy": 0.49996189463958784, |
|
"eval_mean_iou": 0.3234625479475183, |
|
"eval_overall_accuracy": 0.9703876438425548, |
|
"eval_runtime": 11.5733, |
|
"eval_samples_per_second": 5.789, |
|
"eval_steps_per_second": 0.432, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 14.64, |
|
"learning_rate": 3.5942583732057415e-06, |
|
"loss": 0.3092, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 14.73, |
|
"learning_rate": 3.5904306220095692e-06, |
|
"loss": 0.2576, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 14.82, |
|
"learning_rate": 3.586602870813397e-06, |
|
"loss": 0.2861, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 14.91, |
|
"learning_rate": 3.5827751196172247e-06, |
|
"loss": 0.3094, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 3.5789473684210525e-06, |
|
"loss": 0.2665, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 15.09, |
|
"learning_rate": 3.5751196172248802e-06, |
|
"loss": 0.2972, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 15.18, |
|
"learning_rate": 3.571291866028708e-06, |
|
"loss": 0.2658, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 15.27, |
|
"learning_rate": 3.5674641148325353e-06, |
|
"loss": 0.2905, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 15.36, |
|
"learning_rate": 3.5636363636363635e-06, |
|
"loss": 0.2572, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 15.45, |
|
"learning_rate": 3.5598086124401913e-06, |
|
"loss": 0.2853, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 15.45, |
|
"eval_accuracy_safe": 0.0, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.999935112350091, |
|
"eval_iou_safe": 0.0, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.970398632448111, |
|
"eval_loss": 0.23974378407001495, |
|
"eval_mean_accuracy": 0.4999675561750455, |
|
"eval_mean_iou": 0.323466210816037, |
|
"eval_overall_accuracy": 0.970398632448111, |
|
"eval_runtime": 10.444, |
|
"eval_samples_per_second": 6.415, |
|
"eval_steps_per_second": 0.479, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 15.55, |
|
"learning_rate": 3.555980861244019e-06, |
|
"loss": 0.2842, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 15.64, |
|
"learning_rate": 3.5521531100478468e-06, |
|
"loss": 0.2953, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 15.73, |
|
"learning_rate": 3.5483253588516745e-06, |
|
"loss": 0.2651, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 15.82, |
|
"learning_rate": 3.5444976076555023e-06, |
|
"loss": 0.2886, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 15.91, |
|
"learning_rate": 3.5406698564593296e-06, |
|
"loss": 0.2602, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 3.536842105263158e-06, |
|
"loss": 0.2527, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 16.09, |
|
"learning_rate": 3.5330143540669856e-06, |
|
"loss": 0.2378, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 16.18, |
|
"learning_rate": 3.5291866028708133e-06, |
|
"loss": 0.2776, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 16.27, |
|
"learning_rate": 3.525358851674641e-06, |
|
"loss": 0.2698, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 16.36, |
|
"learning_rate": 3.521531100478469e-06, |
|
"loss": 0.2684, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 16.36, |
|
"eval_accuracy_safe": 0.0, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9999444406831249, |
|
"eval_iou_safe": 0.0, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9704076852371444, |
|
"eval_loss": 0.2320707142353058, |
|
"eval_mean_accuracy": 0.49997222034156247, |
|
"eval_mean_iou": 0.32346922841238146, |
|
"eval_overall_accuracy": 0.9704076852371444, |
|
"eval_runtime": 10.8231, |
|
"eval_samples_per_second": 6.19, |
|
"eval_steps_per_second": 0.462, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 16.45, |
|
"learning_rate": 3.517703349282296e-06, |
|
"loss": 0.2772, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 16.55, |
|
"learning_rate": 3.513875598086124e-06, |
|
"loss": 0.2784, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 16.64, |
|
"learning_rate": 3.510047846889952e-06, |
|
"loss": 0.2508, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 16.73, |
|
"learning_rate": 3.50622009569378e-06, |
|
"loss": 0.2542, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 16.82, |
|
"learning_rate": 3.5023923444976076e-06, |
|
"loss": 0.2578, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 16.91, |
|
"learning_rate": 3.4985645933014354e-06, |
|
"loss": 0.2542, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 3.494736842105263e-06, |
|
"loss": 0.2606, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 17.09, |
|
"learning_rate": 3.4909090909090904e-06, |
|
"loss": 0.2607, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 17.18, |
|
"learning_rate": 3.487081339712918e-06, |
|
"loss": 0.2417, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 17.27, |
|
"learning_rate": 3.4832535885167464e-06, |
|
"loss": 0.2585, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 17.27, |
|
"eval_accuracy_safe": 7.710070508594801e-06, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9999450860394984, |
|
"eval_iou_safe": 7.69974090371859e-06, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9704085325345925, |
|
"eval_loss": 0.2208096832036972, |
|
"eval_mean_accuracy": 0.4999763980550035, |
|
"eval_mean_iou": 0.3234720774251654, |
|
"eval_overall_accuracy": 0.9704085392738456, |
|
"eval_runtime": 11.7648, |
|
"eval_samples_per_second": 5.695, |
|
"eval_steps_per_second": 0.425, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 17.36, |
|
"learning_rate": 3.479425837320574e-06, |
|
"loss": 0.2631, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 17.45, |
|
"learning_rate": 3.475598086124402e-06, |
|
"loss": 0.2473, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 17.55, |
|
"learning_rate": 3.4717703349282296e-06, |
|
"loss": 0.2549, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 17.64, |
|
"learning_rate": 3.467942583732057e-06, |
|
"loss": 0.2364, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 17.73, |
|
"learning_rate": 3.4641148325358847e-06, |
|
"loss": 0.2363, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 17.82, |
|
"learning_rate": 3.460287081339713e-06, |
|
"loss": 0.2579, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 17.91, |
|
"learning_rate": 3.4564593301435407e-06, |
|
"loss": 0.2345, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 3.4526315789473684e-06, |
|
"loss": 0.2844, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 18.09, |
|
"learning_rate": 3.448803827751196e-06, |
|
"loss": 0.2902, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"learning_rate": 3.444976076555024e-06, |
|
"loss": 0.2088, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"eval_accuracy_safe": 0.008429034583521266, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9996887035529685, |
|
"eval_iou_safe": 0.00834470322205154, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9704011128022085, |
|
"eval_loss": 0.20112648606300354, |
|
"eval_mean_accuracy": 0.5040588690682448, |
|
"eval_mean_iou": 0.32624860534142003, |
|
"eval_overall_accuracy": 0.9704084823380655, |
|
"eval_runtime": 10.8546, |
|
"eval_samples_per_second": 6.173, |
|
"eval_steps_per_second": 0.461, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 18.27, |
|
"learning_rate": 3.4411483253588513e-06, |
|
"loss": 0.2356, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 18.36, |
|
"learning_rate": 3.437320574162679e-06, |
|
"loss": 0.234, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 18.45, |
|
"learning_rate": 3.433492822966507e-06, |
|
"loss": 0.2314, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 18.55, |
|
"learning_rate": 3.429665071770335e-06, |
|
"loss": 0.2514, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 18.64, |
|
"learning_rate": 3.4258373205741627e-06, |
|
"loss": 0.2345, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 18.73, |
|
"learning_rate": 3.4220095693779905e-06, |
|
"loss": 0.223, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 18.82, |
|
"learning_rate": 3.418181818181818e-06, |
|
"loss": 0.2321, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 18.91, |
|
"learning_rate": 3.4143540669856456e-06, |
|
"loss": 0.2277, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 3.4105263157894733e-06, |
|
"loss": 0.2287, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 19.09, |
|
"learning_rate": 3.4066985645933015e-06, |
|
"loss": 0.2518, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 19.09, |
|
"eval_accuracy_safe": 0.04677507025801751, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9988634100888913, |
|
"eval_iou_safe": 0.04509203479967148, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9706997641478909, |
|
"eval_loss": 0.2025505006313324, |
|
"eval_mean_accuracy": 0.5228192401734544, |
|
"eval_mean_iou": 0.33859726631585413, |
|
"eval_overall_accuracy": 0.9707402471286147, |
|
"eval_runtime": 11.7249, |
|
"eval_samples_per_second": 5.714, |
|
"eval_steps_per_second": 0.426, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 19.18, |
|
"learning_rate": 3.4028708133971292e-06, |
|
"loss": 0.2585, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 19.27, |
|
"learning_rate": 3.399043062200957e-06, |
|
"loss": 0.2251, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 19.36, |
|
"learning_rate": 3.3952153110047848e-06, |
|
"loss": 0.2321, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 19.45, |
|
"learning_rate": 3.391387559808612e-06, |
|
"loss": 0.2208, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 19.55, |
|
"learning_rate": 3.38755980861244e-06, |
|
"loss": 0.2056, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 19.64, |
|
"learning_rate": 3.3837320574162676e-06, |
|
"loss": 0.2267, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 19.73, |
|
"learning_rate": 3.3799043062200958e-06, |
|
"loss": 0.2294, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 19.82, |
|
"learning_rate": 3.3760765550239235e-06, |
|
"loss": 0.1943, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 19.91, |
|
"learning_rate": 3.3722488038277513e-06, |
|
"loss": 0.2065, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 3.3684210526315786e-06, |
|
"loss": 0.218, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_safe": 0.08791986152713367, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9983610294865674, |
|
"eval_iou_safe": 0.08342752835910436, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9713937664812292, |
|
"eval_loss": 0.18889041244983673, |
|
"eval_mean_accuracy": 0.5431404455068505, |
|
"eval_mean_iou": 0.5274106474201667, |
|
"eval_overall_accuracy": 0.971468057205428, |
|
"eval_runtime": 10.4226, |
|
"eval_samples_per_second": 6.428, |
|
"eval_steps_per_second": 0.48, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 20.09, |
|
"learning_rate": 3.3645933014354064e-06, |
|
"loss": 0.268, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 20.18, |
|
"learning_rate": 3.360765550239234e-06, |
|
"loss": 0.2117, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 20.27, |
|
"learning_rate": 3.356937799043062e-06, |
|
"loss": 0.1985, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 20.36, |
|
"learning_rate": 3.35311004784689e-06, |
|
"loss": 0.1991, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 20.45, |
|
"learning_rate": 3.349282296650718e-06, |
|
"loss": 0.2118, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 20.55, |
|
"learning_rate": 3.345454545454545e-06, |
|
"loss": 0.2384, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 20.64, |
|
"learning_rate": 3.341626794258373e-06, |
|
"loss": 0.2055, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 20.73, |
|
"learning_rate": 3.3377990430622007e-06, |
|
"loss": 0.2158, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 20.82, |
|
"learning_rate": 3.3339712918660284e-06, |
|
"loss": 0.2031, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 20.91, |
|
"learning_rate": 3.330143540669856e-06, |
|
"loss": 0.2046, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 20.91, |
|
"eval_accuracy_safe": 0.19307751319385816, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9968958358438674, |
|
"eval_iou_safe": 0.17520884641217957, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9729983355550175, |
|
"eval_loss": 0.18470104038715363, |
|
"eval_mean_accuracy": 0.5949866745188628, |
|
"eval_mean_iou": 0.5741035909835985, |
|
"eval_overall_accuracy": 0.9731523314518715, |
|
"eval_runtime": 11.9598, |
|
"eval_samples_per_second": 5.602, |
|
"eval_steps_per_second": 0.418, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 3.3263157894736843e-06, |
|
"loss": 0.2316, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 21.09, |
|
"learning_rate": 3.322488038277512e-06, |
|
"loss": 0.1925, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 21.18, |
|
"learning_rate": 3.3186602870813394e-06, |
|
"loss": 0.2185, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 21.27, |
|
"learning_rate": 3.314832535885167e-06, |
|
"loss": 0.2268, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 21.36, |
|
"learning_rate": 3.311004784688995e-06, |
|
"loss": 0.2062, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 21.45, |
|
"learning_rate": 3.3071770334928227e-06, |
|
"loss": 0.2012, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 21.55, |
|
"learning_rate": 3.3033492822966505e-06, |
|
"loss": 0.2023, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 21.64, |
|
"learning_rate": 3.2995215311004786e-06, |
|
"loss": 0.1949, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 21.73, |
|
"learning_rate": 3.295693779904306e-06, |
|
"loss": 0.2129, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 21.82, |
|
"learning_rate": 3.2918660287081337e-06, |
|
"loss": 0.2147, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 21.82, |
|
"eval_accuracy_safe": 0.20418001472623468, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9968364630575131, |
|
"eval_iou_safe": 0.18495645371941807, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9732613980096713, |
|
"eval_loss": 0.1766258329153061, |
|
"eval_mean_accuracy": 0.6005082388918739, |
|
"eval_mean_iou": 0.5791089258645447, |
|
"eval_overall_accuracy": 0.9734226625357101, |
|
"eval_runtime": 10.3683, |
|
"eval_samples_per_second": 6.462, |
|
"eval_steps_per_second": 0.482, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 21.91, |
|
"learning_rate": 3.2880382775119615e-06, |
|
"loss": 0.1957, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 3.2842105263157892e-06, |
|
"loss": 0.1871, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 22.09, |
|
"learning_rate": 3.280382775119617e-06, |
|
"loss": 0.2137, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 22.18, |
|
"learning_rate": 3.276555023923445e-06, |
|
"loss": 0.1965, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 22.27, |
|
"learning_rate": 3.272727272727273e-06, |
|
"loss": 0.2014, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 22.36, |
|
"learning_rate": 3.2688995215311003e-06, |
|
"loss": 0.1888, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 22.45, |
|
"learning_rate": 3.265071770334928e-06, |
|
"loss": 0.2214, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 22.55, |
|
"learning_rate": 3.2612440191387558e-06, |
|
"loss": 0.1947, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 22.64, |
|
"learning_rate": 3.2574162679425835e-06, |
|
"loss": 0.1854, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 22.73, |
|
"learning_rate": 3.2535885167464113e-06, |
|
"loss": 0.188, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 22.73, |
|
"eval_accuracy_safe": 0.20203854264247245, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997185483518009, |
|
"eval_iou_safe": 0.18493758546160294, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9735402085935885, |
|
"eval_loss": 0.17257128655910492, |
|
"eval_mean_accuracy": 0.5996120130802407, |
|
"eval_mean_iou": 0.5792388970275957, |
|
"eval_overall_accuracy": 0.9736981178397563, |
|
"eval_runtime": 11.9222, |
|
"eval_samples_per_second": 5.62, |
|
"eval_steps_per_second": 0.419, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 22.82, |
|
"learning_rate": 3.2497607655502395e-06, |
|
"loss": 0.1837, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 22.91, |
|
"learning_rate": 3.245933014354067e-06, |
|
"loss": 0.22, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 3.2421052631578945e-06, |
|
"loss": 0.2045, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 23.09, |
|
"learning_rate": 3.2382775119617223e-06, |
|
"loss": 0.1979, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 23.18, |
|
"learning_rate": 3.23444976076555e-06, |
|
"loss": 0.2015, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 23.27, |
|
"learning_rate": 3.230622009569378e-06, |
|
"loss": 0.1811, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 23.36, |
|
"learning_rate": 3.2267942583732056e-06, |
|
"loss": 0.1812, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 23.45, |
|
"learning_rate": 3.2229665071770337e-06, |
|
"loss": 0.1955, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 23.55, |
|
"learning_rate": 3.219138755980861e-06, |
|
"loss": 0.1956, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 23.64, |
|
"learning_rate": 3.215311004784689e-06, |
|
"loss": 0.2175, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 23.64, |
|
"eval_accuracy_safe": 0.1897583278399081, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9973954590144141, |
|
"eval_iou_safe": 0.1748006022770048, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9733899999204132, |
|
"eval_loss": 0.17063008248806, |
|
"eval_mean_accuracy": 0.593576893427161, |
|
"eval_mean_iou": 0.574095301098709, |
|
"eval_overall_accuracy": 0.9735391531417619, |
|
"eval_runtime": 10.3294, |
|
"eval_samples_per_second": 6.486, |
|
"eval_steps_per_second": 0.484, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 23.73, |
|
"learning_rate": 3.2114832535885166e-06, |
|
"loss": 0.2014, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 23.82, |
|
"learning_rate": 3.2076555023923443e-06, |
|
"loss": 0.1943, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 23.91, |
|
"learning_rate": 3.203827751196172e-06, |
|
"loss": 0.1902, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 3.2e-06, |
|
"loss": 0.1767, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 24.09, |
|
"learning_rate": 3.1961722488038276e-06, |
|
"loss": 0.1836, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 24.18, |
|
"learning_rate": 3.1923444976076554e-06, |
|
"loss": 0.1745, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 24.27, |
|
"learning_rate": 3.188516746411483e-06, |
|
"loss": 0.1805, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 24.36, |
|
"learning_rate": 3.184688995215311e-06, |
|
"loss": 0.2039, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 24.45, |
|
"learning_rate": 3.1808612440191386e-06, |
|
"loss": 0.1827, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 24.55, |
|
"learning_rate": 3.1770334928229664e-06, |
|
"loss": 0.2059, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 24.55, |
|
"eval_accuracy_safe": 0.3006272142358742, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9961704552801475, |
|
"eval_iou_safe": 0.2670303179723187, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9754068347526109, |
|
"eval_loss": 0.16893711686134338, |
|
"eval_mean_accuracy": 0.6483988347580109, |
|
"eval_mean_iou": 0.6212185763624648, |
|
"eval_overall_accuracy": 0.975625223188258, |
|
"eval_runtime": 11.3681, |
|
"eval_samples_per_second": 5.894, |
|
"eval_steps_per_second": 0.44, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 24.64, |
|
"learning_rate": 3.173205741626794e-06, |
|
"loss": 0.1668, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 24.73, |
|
"learning_rate": 3.169377990430622e-06, |
|
"loss": 0.183, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 24.82, |
|
"learning_rate": 3.1655502392344497e-06, |
|
"loss": 0.2111, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 24.91, |
|
"learning_rate": 3.1617224880382774e-06, |
|
"loss": 0.1755, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 3.157894736842105e-06, |
|
"loss": 0.1962, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 25.09, |
|
"learning_rate": 3.154066985645933e-06, |
|
"loss": 0.1748, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 25.18, |
|
"learning_rate": 3.1502392344497607e-06, |
|
"loss": 0.1813, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 25.27, |
|
"learning_rate": 3.146411483253588e-06, |
|
"loss": 0.1882, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 25.36, |
|
"learning_rate": 3.142583732057416e-06, |
|
"loss": 0.1855, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 25.45, |
|
"learning_rate": 3.138755980861244e-06, |
|
"loss": 0.1776, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 25.45, |
|
"eval_accuracy_safe": 0.2869803894356614, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9966670276751107, |
|
"eval_iou_safe": 0.2586568730325289, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.97549630595009, |
|
"eval_loss": 0.16121132671833038, |
|
"eval_mean_accuracy": 0.6418237085553861, |
|
"eval_mean_iou": 0.6170765894913094, |
|
"eval_overall_accuracy": 0.9757040223078941, |
|
"eval_runtime": 10.9596, |
|
"eval_samples_per_second": 6.113, |
|
"eval_steps_per_second": 0.456, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 25.55, |
|
"learning_rate": 3.1349282296650717e-06, |
|
"loss": 0.1784, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 25.64, |
|
"learning_rate": 3.1311004784688995e-06, |
|
"loss": 0.1672, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 25.73, |
|
"learning_rate": 3.127272727272727e-06, |
|
"loss": 0.1759, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 25.82, |
|
"learning_rate": 3.1234449760765545e-06, |
|
"loss": 0.162, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 25.91, |
|
"learning_rate": 3.1196172248803823e-06, |
|
"loss": 0.1923, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 3.1157894736842105e-06, |
|
"loss": 0.2386, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 26.09, |
|
"learning_rate": 3.1119617224880382e-06, |
|
"loss": 0.1797, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 26.18, |
|
"learning_rate": 3.108133971291866e-06, |
|
"loss": 0.1789, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 26.27, |
|
"learning_rate": 3.1043062200956937e-06, |
|
"loss": 0.1767, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 26.36, |
|
"learning_rate": 3.1004784688995215e-06, |
|
"loss": 0.1585, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 26.36, |
|
"eval_accuracy_safe": 0.42542819804087106, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9944028241733601, |
|
"eval_iou_safe": 0.35934744914157324, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.977311106799397, |
|
"eval_loss": 0.1537064015865326, |
|
"eval_mean_accuracy": 0.7099155111071156, |
|
"eval_mean_iou": 0.6683292779704851, |
|
"eval_overall_accuracy": 0.9775962260232043, |
|
"eval_runtime": 11.3177, |
|
"eval_samples_per_second": 5.92, |
|
"eval_steps_per_second": 0.442, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 26.45, |
|
"learning_rate": 3.096650717703349e-06, |
|
"loss": 0.1764, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 26.55, |
|
"learning_rate": 3.0928229665071766e-06, |
|
"loss": 0.1974, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 26.64, |
|
"learning_rate": 3.0889952153110048e-06, |
|
"loss": 0.1666, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 26.73, |
|
"learning_rate": 3.0851674641148325e-06, |
|
"loss": 0.1899, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 26.82, |
|
"learning_rate": 3.0813397129186603e-06, |
|
"loss": 0.1647, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 26.91, |
|
"learning_rate": 3.077511961722488e-06, |
|
"loss": 0.16, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 3.0736842105263154e-06, |
|
"loss": 0.1782, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 27.09, |
|
"learning_rate": 3.069856459330143e-06, |
|
"loss": 0.1746, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 27.18, |
|
"learning_rate": 3.066028708133971e-06, |
|
"loss": 0.1644, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 27.27, |
|
"learning_rate": 3.062200956937799e-06, |
|
"loss": 0.1588, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 27.27, |
|
"eval_accuracy_safe": 0.2797734010277524, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.997013760053919, |
|
"eval_iou_safe": 0.25477705888341623, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.975626203425886, |
|
"eval_loss": 0.15269163250923157, |
|
"eval_mean_accuracy": 0.6383935805408356, |
|
"eval_mean_iou": 0.6152016311546511, |
|
"eval_overall_accuracy": 0.9758276298864564, |
|
"eval_runtime": 10.7571, |
|
"eval_samples_per_second": 6.228, |
|
"eval_steps_per_second": 0.465, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 27.36, |
|
"learning_rate": 3.058373205741627e-06, |
|
"loss": 0.1755, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 27.45, |
|
"learning_rate": 3.0545454545454546e-06, |
|
"loss": 0.1739, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 27.55, |
|
"learning_rate": 3.0507177033492823e-06, |
|
"loss": 0.164, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 27.64, |
|
"learning_rate": 3.0468899521531096e-06, |
|
"loss": 0.183, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 27.73, |
|
"learning_rate": 3.0430622009569374e-06, |
|
"loss": 0.1602, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 27.82, |
|
"learning_rate": 3.0392344497607656e-06, |
|
"loss": 0.1449, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 27.91, |
|
"learning_rate": 3.0354066985645933e-06, |
|
"loss": 0.1753, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 3.031578947368421e-06, |
|
"loss": 0.1904, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 28.09, |
|
"learning_rate": 3.027751196172249e-06, |
|
"loss": 0.1588, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 28.18, |
|
"learning_rate": 3.023923444976076e-06, |
|
"loss": 0.153, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 28.18, |
|
"eval_accuracy_safe": 0.4287859337473641, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.994641840706569, |
|
"eval_iou_safe": 0.36460202808904973, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9776442139822791, |
|
"eval_loss": 0.14515650272369385, |
|
"eval_mean_accuracy": 0.7117138872269666, |
|
"eval_mean_iou": 0.6711231210356644, |
|
"eval_overall_accuracy": 0.9779273645201726, |
|
"eval_runtime": 11.3084, |
|
"eval_samples_per_second": 5.925, |
|
"eval_steps_per_second": 0.442, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 28.27, |
|
"learning_rate": 3.020095693779904e-06, |
|
"loss": 0.1594, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 28.36, |
|
"learning_rate": 3.0162679425837317e-06, |
|
"loss": 0.1821, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 28.45, |
|
"learning_rate": 3.01244019138756e-06, |
|
"loss": 0.1644, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 28.55, |
|
"learning_rate": 3.0086124401913876e-06, |
|
"loss": 0.1711, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 28.64, |
|
"learning_rate": 3.0047846889952154e-06, |
|
"loss": 0.1571, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 28.73, |
|
"learning_rate": 3.000956937799043e-06, |
|
"loss": 0.1773, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 28.82, |
|
"learning_rate": 2.9971291866028705e-06, |
|
"loss": 0.1596, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 28.91, |
|
"learning_rate": 2.9933014354066982e-06, |
|
"loss": 0.1609, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 2.989473684210526e-06, |
|
"loss": 0.1647, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 29.09, |
|
"learning_rate": 2.985645933014354e-06, |
|
"loss": 0.1623, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 29.09, |
|
"eval_accuracy_safe": 0.4401370850536428, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9944793282379906, |
|
"eval_iou_safe": 0.3725626161484481, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9778165413193792, |
|
"eval_loss": 0.1442144215106964, |
|
"eval_mean_accuracy": 0.7173082066458167, |
|
"eval_mean_iou": 0.6751895787339137, |
|
"eval_overall_accuracy": 0.9781049472182545, |
|
"eval_runtime": 11.6309, |
|
"eval_samples_per_second": 5.761, |
|
"eval_steps_per_second": 0.43, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 29.18, |
|
"learning_rate": 2.981818181818182e-06, |
|
"loss": 0.1641, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 29.27, |
|
"learning_rate": 2.9779904306220097e-06, |
|
"loss": 0.1667, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 29.36, |
|
"learning_rate": 2.974162679425837e-06, |
|
"loss": 0.1811, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 29.45, |
|
"learning_rate": 2.9703349282296648e-06, |
|
"loss": 0.1639, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 29.55, |
|
"learning_rate": 2.9665071770334925e-06, |
|
"loss": 0.1707, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 29.64, |
|
"learning_rate": 2.9626794258373203e-06, |
|
"loss": 0.1458, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 29.73, |
|
"learning_rate": 2.9588516746411484e-06, |
|
"loss": 0.1455, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 29.82, |
|
"learning_rate": 2.955023923444976e-06, |
|
"loss": 0.1499, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 29.91, |
|
"learning_rate": 2.951196172248804e-06, |
|
"loss": 0.1571, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 2.9473684210526313e-06, |
|
"loss": 0.1603, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_safe": 0.4049618158758062, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9958304698088795, |
|
"eval_iou_safe": 0.3561710950360333, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9781153749897787, |
|
"eval_loss": 0.14068810641765594, |
|
"eval_mean_accuracy": 0.7003961428423429, |
|
"eval_mean_iou": 0.6671432350129061, |
|
"eval_overall_accuracy": 0.9783771571828358, |
|
"eval_runtime": 13.5235, |
|
"eval_samples_per_second": 4.954, |
|
"eval_steps_per_second": 0.37, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 30.09, |
|
"learning_rate": 2.943540669856459e-06, |
|
"loss": 0.1504, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 30.18, |
|
"learning_rate": 2.939712918660287e-06, |
|
"loss": 0.1434, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 30.27, |
|
"learning_rate": 2.9358851674641146e-06, |
|
"loss": 0.1525, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 30.36, |
|
"learning_rate": 2.9320574162679427e-06, |
|
"loss": 0.1589, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 30.45, |
|
"learning_rate": 2.9282296650717705e-06, |
|
"loss": 0.1517, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 30.55, |
|
"learning_rate": 2.924401913875598e-06, |
|
"loss": 0.1638, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 30.64, |
|
"learning_rate": 2.9205741626794256e-06, |
|
"loss": 0.1892, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 30.73, |
|
"learning_rate": 2.9167464114832533e-06, |
|
"loss": 0.1542, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 30.82, |
|
"learning_rate": 2.912918660287081e-06, |
|
"loss": 0.1422, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 30.91, |
|
"learning_rate": 2.909090909090909e-06, |
|
"loss": 0.1694, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 30.91, |
|
"eval_accuracy_safe": 0.4584928354169799, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9947577115099778, |
|
"eval_iou_safe": 0.39112826314300936, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9786278609893545, |
|
"eval_loss": 0.1343463510274887, |
|
"eval_mean_accuracy": 0.7266252734634788, |
|
"eval_mean_iou": 0.684878062066182, |
|
"eval_overall_accuracy": 0.9789173069284923, |
|
"eval_runtime": 11.7988, |
|
"eval_samples_per_second": 5.679, |
|
"eval_steps_per_second": 0.424, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 2.905263157894737e-06, |
|
"loss": 0.1586, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 31.09, |
|
"learning_rate": 2.9014354066985648e-06, |
|
"loss": 0.1398, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 31.18, |
|
"learning_rate": 2.897607655502392e-06, |
|
"loss": 0.1652, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 31.27, |
|
"learning_rate": 2.89377990430622e-06, |
|
"loss": 0.1538, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 31.36, |
|
"learning_rate": 2.8899521531100476e-06, |
|
"loss": 0.1564, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 31.45, |
|
"learning_rate": 2.8861244019138754e-06, |
|
"loss": 0.1537, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 31.55, |
|
"learning_rate": 2.882296650717703e-06, |
|
"loss": 0.1463, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 31.64, |
|
"learning_rate": 2.8784688995215313e-06, |
|
"loss": 0.1446, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 31.73, |
|
"learning_rate": 2.8746411483253586e-06, |
|
"loss": 0.1421, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 31.82, |
|
"learning_rate": 2.8708133971291864e-06, |
|
"loss": 0.1585, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 31.82, |
|
"eval_accuracy_safe": 0.38612418610568194, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9962063605620138, |
|
"eval_iou_safe": 0.34333223643618116, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9779338359482509, |
|
"eval_loss": 0.1352754831314087, |
|
"eval_mean_accuracy": 0.6911652733338479, |
|
"eval_mean_iou": 0.660633036192216, |
|
"eval_overall_accuracy": 0.9781855113470732, |
|
"eval_runtime": 10.374, |
|
"eval_samples_per_second": 6.458, |
|
"eval_steps_per_second": 0.482, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 31.91, |
|
"learning_rate": 2.866985645933014e-06, |
|
"loss": 0.1652, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 2.863157894736842e-06, |
|
"loss": 0.1985, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 32.09, |
|
"learning_rate": 2.8593301435406697e-06, |
|
"loss": 0.1471, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 32.18, |
|
"learning_rate": 2.855502392344498e-06, |
|
"loss": 0.1474, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 32.27, |
|
"learning_rate": 2.851674641148325e-06, |
|
"loss": 0.1454, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 32.36, |
|
"learning_rate": 2.847846889952153e-06, |
|
"loss": 0.1432, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 32.45, |
|
"learning_rate": 2.8440191387559807e-06, |
|
"loss": 0.1323, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 32.55, |
|
"learning_rate": 2.8401913875598084e-06, |
|
"loss": 0.1604, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 32.64, |
|
"learning_rate": 2.836363636363636e-06, |
|
"loss": 0.1515, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 32.73, |
|
"learning_rate": 2.832535885167464e-06, |
|
"loss": 0.1342, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 32.73, |
|
"eval_accuracy_safe": 0.4963030211911288, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9938814935611622, |
|
"eval_iou_safe": 0.4132349849379946, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9788741179805001, |
|
"eval_loss": 0.1338294893503189, |
|
"eval_mean_accuracy": 0.7450922573761455, |
|
"eval_mean_iou": 0.6960545514592473, |
|
"eval_overall_accuracy": 0.9791838233150653, |
|
"eval_runtime": 12.0645, |
|
"eval_samples_per_second": 5.553, |
|
"eval_steps_per_second": 0.414, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 32.82, |
|
"learning_rate": 2.828708133971292e-06, |
|
"loss": 0.1544, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 32.91, |
|
"learning_rate": 2.8248803827751195e-06, |
|
"loss": 0.1796, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 2.8210526315789472e-06, |
|
"loss": 0.15, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 33.09, |
|
"learning_rate": 2.817224880382775e-06, |
|
"loss": 0.1424, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 33.18, |
|
"learning_rate": 2.8133971291866027e-06, |
|
"loss": 0.1369, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 33.27, |
|
"learning_rate": 2.8095693779904305e-06, |
|
"loss": 0.1645, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 33.36, |
|
"learning_rate": 2.8057416267942582e-06, |
|
"loss": 0.1385, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 33.45, |
|
"learning_rate": 2.801913875598086e-06, |
|
"loss": 0.1455, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 33.55, |
|
"learning_rate": 2.7980861244019138e-06, |
|
"loss": 0.1577, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 33.64, |
|
"learning_rate": 2.7942583732057415e-06, |
|
"loss": 0.1358, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 33.64, |
|
"eval_accuracy_safe": 0.5048110839973632, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.993699327057575, |
|
"eval_iou_safe": 0.41823486576088, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9789443854555531, |
|
"eval_loss": 0.13420753180980682, |
|
"eval_mean_accuracy": 0.749255205527469, |
|
"eval_mean_iou": 0.6985896256082165, |
|
"eval_overall_accuracy": 0.9792583522511952, |
|
"eval_runtime": 10.8902, |
|
"eval_samples_per_second": 6.152, |
|
"eval_steps_per_second": 0.459, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 33.73, |
|
"learning_rate": 2.7904306220095693e-06, |
|
"loss": 0.1426, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 33.82, |
|
"learning_rate": 2.786602870813397e-06, |
|
"loss": 0.1332, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 33.91, |
|
"learning_rate": 2.7827751196172248e-06, |
|
"loss": 0.1415, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 2.7789473684210525e-06, |
|
"loss": 0.1376, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 34.09, |
|
"learning_rate": 2.7751196172248803e-06, |
|
"loss": 0.1378, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 34.18, |
|
"learning_rate": 2.771291866028708e-06, |
|
"loss": 0.1321, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 34.27, |
|
"learning_rate": 2.767464114832536e-06, |
|
"loss": 0.1382, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 34.36, |
|
"learning_rate": 2.7636363636363635e-06, |
|
"loss": 0.1418, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 34.45, |
|
"learning_rate": 2.7598086124401913e-06, |
|
"loss": 0.1481, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 34.55, |
|
"learning_rate": 2.755980861244019e-06, |
|
"loss": 0.1493, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 34.55, |
|
"eval_accuracy_safe": 0.48086938755054914, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9945674487173425, |
|
"eval_iou_safe": 0.40804123003145243, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9790967191500697, |
|
"eval_loss": 0.12969090044498444, |
|
"eval_mean_accuracy": 0.7377184181339458, |
|
"eval_mean_iou": 0.6935689745907611, |
|
"eval_overall_accuracy": 0.9793936316646746, |
|
"eval_runtime": 10.4894, |
|
"eval_samples_per_second": 6.387, |
|
"eval_steps_per_second": 0.477, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 34.64, |
|
"learning_rate": 2.7521531100478464e-06, |
|
"loss": 0.1437, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 34.73, |
|
"learning_rate": 2.7483253588516746e-06, |
|
"loss": 0.1472, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 34.82, |
|
"learning_rate": 2.7444976076555023e-06, |
|
"loss": 0.1412, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 34.91, |
|
"learning_rate": 2.74066985645933e-06, |
|
"loss": 0.1484, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 2.736842105263158e-06, |
|
"loss": 0.154, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 35.09, |
|
"learning_rate": 2.7330143540669856e-06, |
|
"loss": 0.1348, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 35.18, |
|
"learning_rate": 2.7291866028708133e-06, |
|
"loss": 0.139, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 35.27, |
|
"learning_rate": 2.7253588516746407e-06, |
|
"loss": 0.1368, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 35.36, |
|
"learning_rate": 2.721531100478469e-06, |
|
"loss": 0.1499, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 35.45, |
|
"learning_rate": 2.7177033492822966e-06, |
|
"loss": 0.1435, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 35.45, |
|
"eval_accuracy_safe": 0.5658305095200096, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9923169150369561, |
|
"eval_iou_safe": 0.4517890479393129, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.979374460845436, |
|
"eval_loss": 0.12713803350925446, |
|
"eval_mean_accuracy": 0.7790737122784828, |
|
"eval_mean_iou": 0.7155817543923745, |
|
"eval_overall_accuracy": 0.9797191904551947, |
|
"eval_runtime": 11.095, |
|
"eval_samples_per_second": 6.039, |
|
"eval_steps_per_second": 0.451, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 35.55, |
|
"learning_rate": 2.7138755980861244e-06, |
|
"loss": 0.1389, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 35.64, |
|
"learning_rate": 2.710047846889952e-06, |
|
"loss": 0.1338, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 35.73, |
|
"learning_rate": 2.70622009569378e-06, |
|
"loss": 0.1411, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 35.82, |
|
"learning_rate": 2.7023923444976072e-06, |
|
"loss": 0.1556, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 35.91, |
|
"learning_rate": 2.698564593301435e-06, |
|
"loss": 0.1377, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 2.694736842105263e-06, |
|
"loss": 0.1369, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 36.09, |
|
"learning_rate": 2.690909090909091e-06, |
|
"loss": 0.1292, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 36.18, |
|
"learning_rate": 2.6870813397129187e-06, |
|
"loss": 0.1583, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 36.27, |
|
"learning_rate": 2.6832535885167464e-06, |
|
"loss": 0.1229, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 36.36, |
|
"learning_rate": 2.679425837320574e-06, |
|
"loss": 0.1305, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 36.36, |
|
"eval_accuracy_safe": 0.4157327843763131, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.996766177881572, |
|
"eval_iou_safe": 0.3758054230365799, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9793497969358366, |
|
"eval_loss": 0.12249775975942612, |
|
"eval_mean_accuracy": 0.7062494811289426, |
|
"eval_mean_iou": 0.6775776099862082, |
|
"eval_overall_accuracy": 0.9796033830785039, |
|
"eval_runtime": 11.1918, |
|
"eval_samples_per_second": 5.987, |
|
"eval_steps_per_second": 0.447, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 36.45, |
|
"learning_rate": 2.6755980861244015e-06, |
|
"loss": 0.1488, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 36.55, |
|
"learning_rate": 2.6717703349282293e-06, |
|
"loss": 0.1464, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 36.64, |
|
"learning_rate": 2.6679425837320574e-06, |
|
"loss": 0.1271, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 36.73, |
|
"learning_rate": 2.664114832535885e-06, |
|
"loss": 0.1289, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 36.82, |
|
"learning_rate": 2.660287081339713e-06, |
|
"loss": 0.1291, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 36.91, |
|
"learning_rate": 2.6564593301435407e-06, |
|
"loss": 0.1388, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 2.652631578947368e-06, |
|
"loss": 0.1631, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 37.09, |
|
"learning_rate": 2.648803827751196e-06, |
|
"loss": 0.1404, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 37.18, |
|
"learning_rate": 2.6449760765550235e-06, |
|
"loss": 0.1367, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 37.27, |
|
"learning_rate": 2.6411483253588517e-06, |
|
"loss": 0.1496, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 37.27, |
|
"eval_accuracy_safe": 0.5385040921199225, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9933532400351403, |
|
"eval_iou_safe": 0.44198577745433837, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9795931225563403, |
|
"eval_loss": 0.12370359897613525, |
|
"eval_mean_accuracy": 0.7659286660775313, |
|
"eval_mean_iou": 0.7107894500053393, |
|
"eval_overall_accuracy": 0.9799177255203475, |
|
"eval_runtime": 10.3073, |
|
"eval_samples_per_second": 6.5, |
|
"eval_steps_per_second": 0.485, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 37.36, |
|
"learning_rate": 2.6373205741626795e-06, |
|
"loss": 0.13, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 37.45, |
|
"learning_rate": 2.6334928229665072e-06, |
|
"loss": 0.1364, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 37.55, |
|
"learning_rate": 2.629665071770335e-06, |
|
"loss": 0.127, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 37.64, |
|
"learning_rate": 2.6258373205741623e-06, |
|
"loss": 0.1319, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 37.73, |
|
"learning_rate": 2.62200956937799e-06, |
|
"loss": 0.1428, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 37.82, |
|
"learning_rate": 2.6181818181818183e-06, |
|
"loss": 0.1363, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 37.91, |
|
"learning_rate": 2.614354066985646e-06, |
|
"loss": 0.1275, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 2.6105263157894738e-06, |
|
"loss": 0.1578, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 38.09, |
|
"learning_rate": 2.6066985645933015e-06, |
|
"loss": 0.1307, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 38.18, |
|
"learning_rate": 2.602870813397129e-06, |
|
"loss": 0.1445, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 38.18, |
|
"eval_accuracy_safe": 0.5762718725062741, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9924268602954817, |
|
"eval_iou_safe": 0.4614568943562515, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9797902966849596, |
|
"eval_loss": 0.12068379670381546, |
|
"eval_mean_accuracy": 0.7843493664008778, |
|
"eval_mean_iou": 0.7206235955206055, |
|
"eval_overall_accuracy": 0.9801343092277869, |
|
"eval_runtime": 10.7505, |
|
"eval_samples_per_second": 6.232, |
|
"eval_steps_per_second": 0.465, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 38.27, |
|
"learning_rate": 2.5990430622009566e-06, |
|
"loss": 0.1382, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 38.36, |
|
"learning_rate": 2.5952153110047844e-06, |
|
"loss": 0.133, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 38.45, |
|
"learning_rate": 2.5913875598086125e-06, |
|
"loss": 0.136, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 38.55, |
|
"learning_rate": 2.5875598086124403e-06, |
|
"loss": 0.1289, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 38.64, |
|
"learning_rate": 2.583732057416268e-06, |
|
"loss": 0.1296, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 38.73, |
|
"learning_rate": 2.5799043062200954e-06, |
|
"loss": 0.1263, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 38.82, |
|
"learning_rate": 2.576076555023923e-06, |
|
"loss": 0.1309, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 38.91, |
|
"learning_rate": 2.572248803827751e-06, |
|
"loss": 0.1328, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 2.5684210526315787e-06, |
|
"loss": 0.1083, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 39.09, |
|
"learning_rate": 2.564593301435407e-06, |
|
"loss": 0.1307, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 39.09, |
|
"eval_accuracy_safe": 0.4852602727051939, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9956334014399426, |
|
"eval_iou_safe": 0.42437840298029433, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9802750640686686, |
|
"eval_loss": 0.11942077428102493, |
|
"eval_mean_accuracy": 0.7404468370725683, |
|
"eval_mean_iou": 0.7023267335244815, |
|
"eval_overall_accuracy": 0.9805577975600513, |
|
"eval_runtime": 11.0705, |
|
"eval_samples_per_second": 6.052, |
|
"eval_steps_per_second": 0.452, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 39.18, |
|
"learning_rate": 2.5607655502392346e-06, |
|
"loss": 0.1464, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 39.27, |
|
"learning_rate": 2.5569377990430623e-06, |
|
"loss": 0.131, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 39.36, |
|
"learning_rate": 2.5531100478468897e-06, |
|
"loss": 0.1256, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 39.45, |
|
"learning_rate": 2.5492822966507174e-06, |
|
"loss": 0.1231, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 39.55, |
|
"learning_rate": 2.545454545454545e-06, |
|
"loss": 0.1165, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 39.64, |
|
"learning_rate": 2.541626794258373e-06, |
|
"loss": 0.1234, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 39.73, |
|
"learning_rate": 2.537799043062201e-06, |
|
"loss": 0.1336, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 39.82, |
|
"learning_rate": 2.533971291866029e-06, |
|
"loss": 0.1271, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 39.91, |
|
"learning_rate": 2.530143540669856e-06, |
|
"loss": 0.1397, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 2.526315789473684e-06, |
|
"loss": 0.1379, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_safe": 0.5721855351367188, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9922185861931518, |
|
"eval_iou_safe": 0.4556878276420518, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9794644021123315, |
|
"eval_loss": 0.11739415675401688, |
|
"eval_mean_accuracy": 0.7822020606649354, |
|
"eval_mean_iou": 0.7175761148771916, |
|
"eval_overall_accuracy": 0.9798114833547108, |
|
"eval_runtime": 11.1185, |
|
"eval_samples_per_second": 6.026, |
|
"eval_steps_per_second": 0.45, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 40.09, |
|
"learning_rate": 2.5224880382775117e-06, |
|
"loss": 0.1205, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 40.18, |
|
"learning_rate": 2.5186602870813395e-06, |
|
"loss": 0.1271, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 40.27, |
|
"learning_rate": 2.5148325358851672e-06, |
|
"loss": 0.1292, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 40.36, |
|
"learning_rate": 2.5110047846889954e-06, |
|
"loss": 0.1333, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 40.45, |
|
"learning_rate": 2.507177033492823e-06, |
|
"loss": 0.1326, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 40.55, |
|
"learning_rate": 2.5033492822966505e-06, |
|
"loss": 0.1345, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 40.64, |
|
"learning_rate": 2.4995215311004783e-06, |
|
"loss": 0.13, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 40.73, |
|
"learning_rate": 2.495693779904306e-06, |
|
"loss": 0.151, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 40.82, |
|
"learning_rate": 2.4918660287081338e-06, |
|
"loss": 0.1345, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 40.91, |
|
"learning_rate": 2.4880382775119615e-06, |
|
"loss": 0.1202, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 40.91, |
|
"eval_accuracy_safe": 0.5398707021175708, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9942766276679765, |
|
"eval_iou_safe": 0.4544224441718747, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9805439411158418, |
|
"eval_loss": 0.11430685967206955, |
|
"eval_mean_accuracy": 0.7670736648927736, |
|
"eval_mean_iou": 0.7174831926438583, |
|
"eval_overall_accuracy": 0.9808542052311684, |
|
"eval_runtime": 10.6429, |
|
"eval_samples_per_second": 6.295, |
|
"eval_steps_per_second": 0.47, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 2.4842105263157897e-06, |
|
"loss": 0.1246, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 41.09, |
|
"learning_rate": 2.480382775119617e-06, |
|
"loss": 0.1379, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 41.18, |
|
"learning_rate": 2.4765550239234448e-06, |
|
"loss": 0.1143, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 41.27, |
|
"learning_rate": 2.4727272727272725e-06, |
|
"loss": 0.1425, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 41.36, |
|
"learning_rate": 2.4688995215311003e-06, |
|
"loss": 0.1284, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 41.45, |
|
"learning_rate": 2.465071770334928e-06, |
|
"loss": 0.1436, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 41.55, |
|
"learning_rate": 2.461244019138756e-06, |
|
"loss": 0.1248, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 41.64, |
|
"learning_rate": 2.457416267942584e-06, |
|
"loss": 0.1195, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 41.73, |
|
"learning_rate": 2.4535885167464113e-06, |
|
"loss": 0.1224, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 41.82, |
|
"learning_rate": 2.449760765550239e-06, |
|
"loss": 0.1239, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 41.82, |
|
"eval_accuracy_safe": 0.5579739476717515, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9931808712146769, |
|
"eval_iou_safe": 0.45584717644594847, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9799958550830518, |
|
"eval_loss": 0.11501991748809814, |
|
"eval_mean_accuracy": 0.7755774094432142, |
|
"eval_mean_iou": 0.7179215157645001, |
|
"eval_overall_accuracy": 0.9803255565130888, |
|
"eval_runtime": 10.3848, |
|
"eval_samples_per_second": 6.452, |
|
"eval_steps_per_second": 0.481, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 41.91, |
|
"learning_rate": 2.445933014354067e-06, |
|
"loss": 0.1168, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 2.4421052631578946e-06, |
|
"loss": 0.1028, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 42.09, |
|
"learning_rate": 2.4382775119617223e-06, |
|
"loss": 0.1296, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 42.18, |
|
"learning_rate": 2.4344497607655505e-06, |
|
"loss": 0.113, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 42.27, |
|
"learning_rate": 2.430622009569378e-06, |
|
"loss": 0.1187, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 42.36, |
|
"learning_rate": 2.4267942583732056e-06, |
|
"loss": 0.1362, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 42.45, |
|
"learning_rate": 2.4229665071770334e-06, |
|
"loss": 0.1244, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 42.55, |
|
"learning_rate": 2.419138755980861e-06, |
|
"loss": 0.1272, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 42.64, |
|
"learning_rate": 2.415311004784689e-06, |
|
"loss": 0.1167, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 42.73, |
|
"learning_rate": 2.4114832535885166e-06, |
|
"loss": 0.1183, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 42.73, |
|
"eval_accuracy_safe": 0.47769476601863525, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9961165386885865, |
|
"eval_iou_safe": 0.42364293711912065, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9805284404970969, |
|
"eval_loss": 0.1128791943192482, |
|
"eval_mean_accuracy": 0.7369056523536108, |
|
"eval_mean_iou": 0.7020856888081087, |
|
"eval_overall_accuracy": 0.9808031907722131, |
|
"eval_runtime": 10.8661, |
|
"eval_samples_per_second": 6.166, |
|
"eval_steps_per_second": 0.46, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 42.82, |
|
"learning_rate": 2.407655502392345e-06, |
|
"loss": 0.1203, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 42.91, |
|
"learning_rate": 2.403827751196172e-06, |
|
"loss": 0.1455, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 2.4e-06, |
|
"loss": 0.1139, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 43.09, |
|
"learning_rate": 2.3961722488038276e-06, |
|
"loss": 0.1254, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 43.18, |
|
"learning_rate": 2.3923444976076554e-06, |
|
"loss": 0.1316, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 43.27, |
|
"learning_rate": 2.388516746411483e-06, |
|
"loss": 0.1215, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 43.36, |
|
"learning_rate": 2.384688995215311e-06, |
|
"loss": 0.1256, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 43.45, |
|
"learning_rate": 2.3808612440191387e-06, |
|
"loss": 0.1339, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 43.55, |
|
"learning_rate": 2.3770334928229664e-06, |
|
"loss": 0.1328, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 43.64, |
|
"learning_rate": 2.373205741626794e-06, |
|
"loss": 0.1202, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 43.64, |
|
"eval_accuracy_safe": 0.593264867907217, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9927646163538233, |
|
"eval_iou_safe": 0.47932343707173647, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9806244962218924, |
|
"eval_loss": 0.11194637417793274, |
|
"eval_mean_accuracy": 0.7930147421305201, |
|
"eval_mean_iou": 0.7299739666468145, |
|
"eval_overall_accuracy": 0.9809640343509504, |
|
"eval_runtime": 10.421, |
|
"eval_samples_per_second": 6.429, |
|
"eval_steps_per_second": 0.48, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 43.73, |
|
"learning_rate": 2.369377990430622e-06, |
|
"loss": 0.1129, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 43.82, |
|
"learning_rate": 2.3655502392344497e-06, |
|
"loss": 0.1249, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 43.91, |
|
"learning_rate": 2.3617224880382774e-06, |
|
"loss": 0.1222, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 2.3578947368421048e-06, |
|
"loss": 0.1237, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 44.09, |
|
"learning_rate": 2.354066985645933e-06, |
|
"loss": 0.1188, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 44.18, |
|
"learning_rate": 2.3502392344497607e-06, |
|
"loss": 0.1145, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 44.27, |
|
"learning_rate": 2.3464114832535885e-06, |
|
"loss": 0.117, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 44.36, |
|
"learning_rate": 2.3425837320574162e-06, |
|
"loss": 0.1173, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 44.45, |
|
"learning_rate": 2.338755980861244e-06, |
|
"loss": 0.1351, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 44.55, |
|
"learning_rate": 2.3349282296650717e-06, |
|
"loss": 0.1276, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 44.55, |
|
"eval_accuracy_safe": 0.5424612858084588, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9942335647972413, |
|
"eval_iou_safe": 0.4560598940187007, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9805777244564327, |
|
"eval_loss": 0.11313802003860474, |
|
"eval_mean_accuracy": 0.76834742530285, |
|
"eval_mean_iou": 0.7183188092375666, |
|
"eval_overall_accuracy": 0.9808889360570195, |
|
"eval_runtime": 11.0555, |
|
"eval_samples_per_second": 6.06, |
|
"eval_steps_per_second": 0.452, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 44.64, |
|
"learning_rate": 2.331100478468899e-06, |
|
"loss": 0.1254, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 44.73, |
|
"learning_rate": 2.3272727272727272e-06, |
|
"loss": 0.1121, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 44.82, |
|
"learning_rate": 2.323444976076555e-06, |
|
"loss": 0.1126, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 44.91, |
|
"learning_rate": 2.3196172248803828e-06, |
|
"loss": 0.1346, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 2.3157894736842105e-06, |
|
"loss": 0.1102, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 45.09, |
|
"learning_rate": 2.3119617224880383e-06, |
|
"loss": 0.1278, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 45.18, |
|
"learning_rate": 2.3081339712918656e-06, |
|
"loss": 0.1173, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 45.27, |
|
"learning_rate": 2.3043062200956934e-06, |
|
"loss": 0.1239, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 45.36, |
|
"learning_rate": 2.3004784688995215e-06, |
|
"loss": 0.1201, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 45.45, |
|
"learning_rate": 2.2966507177033493e-06, |
|
"loss": 0.1172, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 45.45, |
|
"eval_accuracy_safe": 0.6271833955921526, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9898213219409551, |
|
"eval_iou_safe": 0.4700070490079388, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.978715258102556, |
|
"eval_loss": 0.11352450400590897, |
|
"eval_mean_accuracy": 0.8085023587665539, |
|
"eval_mean_iou": 0.7243611535552474, |
|
"eval_overall_accuracy": 0.9791095790578358, |
|
"eval_runtime": 10.616, |
|
"eval_samples_per_second": 6.311, |
|
"eval_steps_per_second": 0.471, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 45.55, |
|
"learning_rate": 2.292822966507177e-06, |
|
"loss": 0.1216, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 45.64, |
|
"learning_rate": 2.288995215311005e-06, |
|
"loss": 0.1183, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 45.73, |
|
"learning_rate": 2.2851674641148326e-06, |
|
"loss": 0.1205, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 45.82, |
|
"learning_rate": 2.28133971291866e-06, |
|
"loss": 0.152, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 45.91, |
|
"learning_rate": 2.2775119617224876e-06, |
|
"loss": 0.1116, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 2.273684210526316e-06, |
|
"loss": 0.143, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 46.09, |
|
"learning_rate": 2.2698564593301436e-06, |
|
"loss": 0.1235, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 46.18, |
|
"learning_rate": 2.2660287081339713e-06, |
|
"loss": 0.1128, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 46.27, |
|
"learning_rate": 2.262200956937799e-06, |
|
"loss": 0.1084, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 46.36, |
|
"learning_rate": 2.2583732057416264e-06, |
|
"loss": 0.1288, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 46.36, |
|
"eval_accuracy_safe": 0.42358163615406264, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9973644819084901, |
|
"eval_iou_safe": 0.3898272913695354, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9801677166712226, |
|
"eval_loss": 0.11046311259269714, |
|
"eval_mean_accuracy": 0.7104730590312764, |
|
"eval_mean_iou": 0.6849975040203791, |
|
"eval_overall_accuracy": 0.980415856660302, |
|
"eval_runtime": 10.6066, |
|
"eval_samples_per_second": 6.317, |
|
"eval_steps_per_second": 0.471, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 46.45, |
|
"learning_rate": 2.254545454545454e-06, |
|
"loss": 0.1269, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 46.55, |
|
"learning_rate": 2.250717703349282e-06, |
|
"loss": 0.1058, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 46.64, |
|
"learning_rate": 2.24688995215311e-06, |
|
"loss": 0.1251, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 46.73, |
|
"learning_rate": 2.243062200956938e-06, |
|
"loss": 0.1636, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 46.82, |
|
"learning_rate": 2.2392344497607656e-06, |
|
"loss": 0.1107, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 46.91, |
|
"learning_rate": 2.2354066985645934e-06, |
|
"loss": 0.118, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 2.2315789473684207e-06, |
|
"loss": 0.1174, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 47.09, |
|
"learning_rate": 2.2277511961722485e-06, |
|
"loss": 0.1267, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 47.18, |
|
"learning_rate": 2.2239234449760762e-06, |
|
"loss": 0.1232, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 47.27, |
|
"learning_rate": 2.2200956937799044e-06, |
|
"loss": 0.1185, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 47.27, |
|
"eval_accuracy_safe": 0.6035404643775467, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9914433958511564, |
|
"eval_iou_safe": 0.471103501402246, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.979622075379713, |
|
"eval_loss": 0.1129835844039917, |
|
"eval_mean_accuracy": 0.7974919301143515, |
|
"eval_mean_iou": 0.7253627883909795, |
|
"eval_overall_accuracy": 0.9799853652270872, |
|
"eval_runtime": 10.9811, |
|
"eval_samples_per_second": 6.101, |
|
"eval_steps_per_second": 0.455, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 47.36, |
|
"learning_rate": 2.216267942583732e-06, |
|
"loss": 0.123, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 47.45, |
|
"learning_rate": 2.21244019138756e-06, |
|
"loss": 0.1112, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 47.55, |
|
"learning_rate": 2.2086124401913872e-06, |
|
"loss": 0.1046, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 47.64, |
|
"learning_rate": 2.204784688995215e-06, |
|
"loss": 0.1181, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 47.73, |
|
"learning_rate": 2.2009569377990427e-06, |
|
"loss": 0.1084, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 47.82, |
|
"learning_rate": 2.197129186602871e-06, |
|
"loss": 0.1212, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 47.91, |
|
"learning_rate": 2.1933014354066987e-06, |
|
"loss": 0.1342, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 2.1894736842105264e-06, |
|
"loss": 0.1148, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 48.09, |
|
"learning_rate": 2.185645933014354e-06, |
|
"loss": 0.1158, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 48.18, |
|
"learning_rate": 2.1818181818181815e-06, |
|
"loss": 0.1045, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 48.18, |
|
"eval_accuracy_safe": 0.5750498263306618, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9930318525611789, |
|
"eval_iou_safe": 0.4679260257256816, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9803515847470109, |
|
"eval_loss": 0.11019009351730347, |
|
"eval_mean_accuracy": 0.7840408394459204, |
|
"eval_mean_iou": 0.7241388052363462, |
|
"eval_overall_accuracy": 0.9806853337074394, |
|
"eval_runtime": 10.6094, |
|
"eval_samples_per_second": 6.315, |
|
"eval_steps_per_second": 0.471, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 48.27, |
|
"learning_rate": 2.1779904306220093e-06, |
|
"loss": 0.119, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 48.36, |
|
"learning_rate": 2.174162679425837e-06, |
|
"loss": 0.1194, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 48.45, |
|
"learning_rate": 2.1703349282296652e-06, |
|
"loss": 0.1058, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 48.55, |
|
"learning_rate": 2.166507177033493e-06, |
|
"loss": 0.1141, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 48.64, |
|
"learning_rate": 2.1626794258373207e-06, |
|
"loss": 0.1087, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 48.73, |
|
"learning_rate": 2.158851674641148e-06, |
|
"loss": 0.124, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 48.82, |
|
"learning_rate": 2.155023923444976e-06, |
|
"loss": 0.1151, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 48.91, |
|
"learning_rate": 2.1511961722488036e-06, |
|
"loss": 0.1327, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 2.1473684210526313e-06, |
|
"loss": 0.1204, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 49.09, |
|
"learning_rate": 2.1435406698564595e-06, |
|
"loss": 0.1211, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 49.09, |
|
"eval_accuracy_safe": 0.5811735498321132, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9929194432146821, |
|
"eval_iou_safe": 0.4714920819591893, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9804210186295622, |
|
"eval_loss": 0.10691045969724655, |
|
"eval_mean_accuracy": 0.7870464965233976, |
|
"eval_mean_iou": 0.7259565502943758, |
|
"eval_overall_accuracy": 0.9807571297261253, |
|
"eval_runtime": 10.4844, |
|
"eval_samples_per_second": 6.39, |
|
"eval_steps_per_second": 0.477, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 49.18, |
|
"learning_rate": 2.1397129186602873e-06, |
|
"loss": 0.1222, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 49.27, |
|
"learning_rate": 2.135885167464115e-06, |
|
"loss": 0.1076, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 49.36, |
|
"learning_rate": 2.1320574162679423e-06, |
|
"loss": 0.1193, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 49.45, |
|
"learning_rate": 2.12822966507177e-06, |
|
"loss": 0.1208, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 49.55, |
|
"learning_rate": 2.124401913875598e-06, |
|
"loss": 0.1147, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 49.64, |
|
"learning_rate": 2.1205741626794256e-06, |
|
"loss": 0.1024, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 49.73, |
|
"learning_rate": 2.116746411483254e-06, |
|
"loss": 0.0985, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 49.82, |
|
"learning_rate": 2.1129186602870815e-06, |
|
"loss": 0.1168, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 49.91, |
|
"learning_rate": 2.109090909090909e-06, |
|
"loss": 0.1315, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 2.1052631578947366e-06, |
|
"loss": 0.1206, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_safe": 0.5221317573949213, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9953372415333057, |
|
"eval_iou_safe": 0.4527711197804365, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9810675158079116, |
|
"eval_loss": 0.10707142949104309, |
|
"eval_mean_accuracy": 0.7587344994641135, |
|
"eval_mean_iou": 0.7169193177941741, |
|
"eval_overall_accuracy": 0.9813595102794135, |
|
"eval_runtime": 10.5803, |
|
"eval_samples_per_second": 6.333, |
|
"eval_steps_per_second": 0.473, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 50.09, |
|
"learning_rate": 2.1014354066985644e-06, |
|
"loss": 0.1185, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 50.18, |
|
"learning_rate": 2.097607655502392e-06, |
|
"loss": 0.1185, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 50.27, |
|
"learning_rate": 2.09377990430622e-06, |
|
"loss": 0.1056, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 50.36, |
|
"learning_rate": 2.089952153110048e-06, |
|
"loss": 0.1211, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 50.45, |
|
"learning_rate": 2.0861244019138754e-06, |
|
"loss": 0.1243, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 50.55, |
|
"learning_rate": 2.082296650717703e-06, |
|
"loss": 0.1082, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 50.64, |
|
"learning_rate": 2.078468899521531e-06, |
|
"loss": 0.1183, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 50.73, |
|
"learning_rate": 2.0746411483253587e-06, |
|
"loss": 0.1222, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 50.82, |
|
"learning_rate": 2.0708133971291864e-06, |
|
"loss": 0.108, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 50.91, |
|
"learning_rate": 2.066985645933014e-06, |
|
"loss": 0.1193, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 50.91, |
|
"eval_accuracy_safe": 0.4955589993870494, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9961437609937925, |
|
"eval_iou_safe": 0.43983467087288786, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9810803435069183, |
|
"eval_loss": 0.10526557266712189, |
|
"eval_mean_accuracy": 0.745851380190421, |
|
"eval_mean_iou": 0.7104575071899031, |
|
"eval_overall_accuracy": 0.9813572897839902, |
|
"eval_runtime": 11.1855, |
|
"eval_samples_per_second": 5.99, |
|
"eval_steps_per_second": 0.447, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 2.0631578947368424e-06, |
|
"loss": 0.1199, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 51.09, |
|
"learning_rate": 2.0593301435406697e-06, |
|
"loss": 0.1011, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 51.18, |
|
"learning_rate": 2.0555023923444975e-06, |
|
"loss": 0.111, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 51.27, |
|
"learning_rate": 2.051674641148325e-06, |
|
"loss": 0.1125, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 51.36, |
|
"learning_rate": 2.047846889952153e-06, |
|
"loss": 0.1328, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 51.45, |
|
"learning_rate": 2.0440191387559807e-06, |
|
"loss": 0.1076, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 51.55, |
|
"learning_rate": 2.0401913875598085e-06, |
|
"loss": 0.1145, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 51.64, |
|
"learning_rate": 2.0363636363636362e-06, |
|
"loss": 0.1035, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 51.73, |
|
"learning_rate": 2.032535885167464e-06, |
|
"loss": 0.1105, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 51.82, |
|
"learning_rate": 2.0287081339712917e-06, |
|
"loss": 0.1116, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 51.82, |
|
"eval_accuracy_safe": 0.5256976650051465, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9951027424947108, |
|
"eval_iou_safe": 0.4528379892906064, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9809413209191069, |
|
"eval_loss": 0.1042569950222969, |
|
"eval_mean_accuracy": 0.7604002037499287, |
|
"eval_mean_iou": 0.7168896551048567, |
|
"eval_overall_accuracy": 0.9812372691595732, |
|
"eval_runtime": 10.8585, |
|
"eval_samples_per_second": 6.17, |
|
"eval_steps_per_second": 0.46, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 51.91, |
|
"learning_rate": 2.0248803827751195e-06, |
|
"loss": 0.1133, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 2.0210526315789473e-06, |
|
"loss": 0.1099, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 52.09, |
|
"learning_rate": 2.017224880382775e-06, |
|
"loss": 0.1218, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 52.18, |
|
"learning_rate": 2.013397129186603e-06, |
|
"loss": 0.1045, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 52.27, |
|
"learning_rate": 2.0095693779904305e-06, |
|
"loss": 0.1032, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 52.36, |
|
"learning_rate": 2.0057416267942583e-06, |
|
"loss": 0.1136, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 52.45, |
|
"learning_rate": 2.001913875598086e-06, |
|
"loss": 0.0962, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 52.55, |
|
"learning_rate": 1.9980861244019138e-06, |
|
"loss": 0.1201, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 52.64, |
|
"learning_rate": 1.9942583732057415e-06, |
|
"loss": 0.1166, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 52.73, |
|
"learning_rate": 1.9904306220095693e-06, |
|
"loss": 0.1218, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 52.73, |
|
"eval_accuracy_safe": 0.5936118210801038, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9921905425252888, |
|
"eval_iou_safe": 0.4724050601844111, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9800676658313282, |
|
"eval_loss": 0.10777109861373901, |
|
"eval_mean_accuracy": 0.7929011818026963, |
|
"eval_mean_iou": 0.7262363630078696, |
|
"eval_overall_accuracy": 0.9804171661832439, |
|
"eval_runtime": 10.4208, |
|
"eval_samples_per_second": 6.429, |
|
"eval_steps_per_second": 0.48, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 52.82, |
|
"learning_rate": 1.986602870813397e-06, |
|
"loss": 0.109, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 52.91, |
|
"learning_rate": 1.982775119617225e-06, |
|
"loss": 0.0965, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 1.9789473684210526e-06, |
|
"loss": 0.1157, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 53.09, |
|
"learning_rate": 1.9751196172248803e-06, |
|
"loss": 0.1081, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 53.18, |
|
"learning_rate": 1.971291866028708e-06, |
|
"loss": 0.1227, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 53.27, |
|
"learning_rate": 1.967464114832536e-06, |
|
"loss": 0.1076, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 53.36, |
|
"learning_rate": 1.9636363636363636e-06, |
|
"loss": 0.1168, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 53.45, |
|
"learning_rate": 1.9598086124401913e-06, |
|
"loss": 0.1099, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 53.55, |
|
"learning_rate": 1.955980861244019e-06, |
|
"loss": 0.0984, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 53.64, |
|
"learning_rate": 1.952153110047847e-06, |
|
"loss": 0.1284, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 53.64, |
|
"eval_accuracy_safe": 0.58715849206441, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9923766398358776, |
|
"eval_iou_safe": 0.46955410556755317, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9800613348526154, |
|
"eval_loss": 0.10542566329240799, |
|
"eval_mean_accuracy": 0.7897675659501437, |
|
"eval_mean_iou": 0.7248077202100842, |
|
"eval_overall_accuracy": 0.9804071454859492, |
|
"eval_runtime": 10.3478, |
|
"eval_samples_per_second": 6.475, |
|
"eval_steps_per_second": 0.483, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 53.73, |
|
"learning_rate": 1.9483253588516746e-06, |
|
"loss": 0.1142, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 53.82, |
|
"learning_rate": 1.9444976076555024e-06, |
|
"loss": 0.1066, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 53.91, |
|
"learning_rate": 1.94066985645933e-06, |
|
"loss": 0.0976, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 1.936842105263158e-06, |
|
"loss": 0.0958, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 54.09, |
|
"learning_rate": 1.9330143540669856e-06, |
|
"loss": 0.1366, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 54.18, |
|
"learning_rate": 1.9291866028708134e-06, |
|
"loss": 0.1249, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 54.27, |
|
"learning_rate": 1.925358851674641e-06, |
|
"loss": 0.1053, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 54.36, |
|
"learning_rate": 1.921531100478469e-06, |
|
"loss": 0.1064, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 54.45, |
|
"learning_rate": 1.9177033492822967e-06, |
|
"loss": 0.0949, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 54.55, |
|
"learning_rate": 1.9138755980861244e-06, |
|
"loss": 0.096, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 54.55, |
|
"eval_accuracy_safe": 0.5451308977220597, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9942086892424842, |
|
"eval_iou_safe": 0.45798961642974895, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9806317785722483, |
|
"eval_loss": 0.10281717777252197, |
|
"eval_mean_accuracy": 0.7696697934822719, |
|
"eval_mean_iou": 0.7193106975009986, |
|
"eval_overall_accuracy": 0.9809436513416803, |
|
"eval_runtime": 10.3139, |
|
"eval_samples_per_second": 6.496, |
|
"eval_steps_per_second": 0.485, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 54.64, |
|
"learning_rate": 1.910047846889952e-06, |
|
"loss": 0.1136, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 54.73, |
|
"learning_rate": 1.9062200956937797e-06, |
|
"loss": 0.1175, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 54.82, |
|
"learning_rate": 1.9023923444976075e-06, |
|
"loss": 0.1107, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 54.91, |
|
"learning_rate": 1.8985645933014354e-06, |
|
"loss": 0.096, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 1.894736842105263e-06, |
|
"loss": 0.1316, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 55.09, |
|
"learning_rate": 1.8909090909090907e-06, |
|
"loss": 0.1177, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 55.18, |
|
"learning_rate": 1.8870813397129187e-06, |
|
"loss": 0.1111, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 55.27, |
|
"learning_rate": 1.8832535885167464e-06, |
|
"loss": 0.1068, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 55.36, |
|
"learning_rate": 1.879425837320574e-06, |
|
"loss": 0.1087, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 55.45, |
|
"learning_rate": 1.8755980861244017e-06, |
|
"loss": 0.1091, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 55.45, |
|
"eval_accuracy_safe": 0.6013989922937846, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9916947328242215, |
|
"eval_iou_safe": 0.4724773003979623, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9798073122435136, |
|
"eval_loss": 0.10218217223882675, |
|
"eval_mean_accuracy": 0.7965468625590031, |
|
"eval_mean_iou": 0.726142306320738, |
|
"eval_overall_accuracy": 0.9801660224572936, |
|
"eval_runtime": 11.0365, |
|
"eval_samples_per_second": 6.071, |
|
"eval_steps_per_second": 0.453, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 55.55, |
|
"learning_rate": 1.8717703349282297e-06, |
|
"loss": 0.1026, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 55.64, |
|
"learning_rate": 1.8679425837320573e-06, |
|
"loss": 0.0904, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 55.73, |
|
"learning_rate": 1.864114832535885e-06, |
|
"loss": 0.1088, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 55.82, |
|
"learning_rate": 1.860287081339713e-06, |
|
"loss": 0.1116, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 55.91, |
|
"learning_rate": 1.8564593301435405e-06, |
|
"loss": 0.1021, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 1.8526315789473683e-06, |
|
"loss": 0.1323, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 56.09, |
|
"learning_rate": 1.8488038277511962e-06, |
|
"loss": 0.1097, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 56.18, |
|
"learning_rate": 1.8449760765550238e-06, |
|
"loss": 0.0945, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 56.27, |
|
"learning_rate": 1.8411483253588515e-06, |
|
"loss": 0.1039, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 56.36, |
|
"learning_rate": 1.8373205741626793e-06, |
|
"loss": 0.1068, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 56.36, |
|
"eval_accuracy_safe": 0.492559781959206, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9961584281840974, |
|
"eval_iou_safe": 0.4373597635384344, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.981006588415475, |
|
"eval_loss": 0.10153985768556595, |
|
"eval_mean_accuracy": 0.7443591050716517, |
|
"eval_mean_iou": 0.7091831759769547, |
|
"eval_overall_accuracy": 0.9812829316552005, |
|
"eval_runtime": 10.1863, |
|
"eval_samples_per_second": 6.577, |
|
"eval_steps_per_second": 0.491, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 56.45, |
|
"learning_rate": 1.8334928229665073e-06, |
|
"loss": 0.1031, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 56.55, |
|
"learning_rate": 1.8296650717703348e-06, |
|
"loss": 0.0976, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 56.64, |
|
"learning_rate": 1.8258373205741626e-06, |
|
"loss": 0.1113, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 56.73, |
|
"learning_rate": 1.8220095693779905e-06, |
|
"loss": 0.1008, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 56.82, |
|
"learning_rate": 1.818181818181818e-06, |
|
"loss": 0.102, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 56.91, |
|
"learning_rate": 1.8143540669856458e-06, |
|
"loss": 0.1101, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 1.8105263157894736e-06, |
|
"loss": 0.1363, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 57.09, |
|
"learning_rate": 1.8066985645933013e-06, |
|
"loss": 0.1012, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 57.18, |
|
"learning_rate": 1.802870813397129e-06, |
|
"loss": 0.1346, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 57.27, |
|
"learning_rate": 1.7990430622009569e-06, |
|
"loss": 0.106, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 57.27, |
|
"eval_accuracy_safe": 0.5713085146163661, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9936834278232846, |
|
"eval_iou_safe": 0.4731230854937291, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9808845669247197, |
|
"eval_loss": 0.10105204582214355, |
|
"eval_mean_accuracy": 0.7824959712198254, |
|
"eval_mean_iou": 0.7270038262092244, |
|
"eval_overall_accuracy": 0.9812071501319088, |
|
"eval_runtime": 10.4732, |
|
"eval_samples_per_second": 6.397, |
|
"eval_steps_per_second": 0.477, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 57.36, |
|
"learning_rate": 1.7952153110047846e-06, |
|
"loss": 0.1141, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 57.45, |
|
"learning_rate": 1.7913875598086124e-06, |
|
"loss": 0.105, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 57.55, |
|
"learning_rate": 1.7875598086124401e-06, |
|
"loss": 0.1156, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 57.64, |
|
"learning_rate": 1.7837320574162677e-06, |
|
"loss": 0.0997, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 57.73, |
|
"learning_rate": 1.7799043062200956e-06, |
|
"loss": 0.0994, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 57.82, |
|
"learning_rate": 1.7760765550239234e-06, |
|
"loss": 0.103, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 57.91, |
|
"learning_rate": 1.7722488038277511e-06, |
|
"loss": 0.1301, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 1.768421052631579e-06, |
|
"loss": 0.1008, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 58.09, |
|
"learning_rate": 1.7645933014354067e-06, |
|
"loss": 0.1128, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 58.18, |
|
"learning_rate": 1.7607655502392344e-06, |
|
"loss": 0.1009, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 58.18, |
|
"eval_accuracy_safe": 0.45120489126873065, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9968523622918036, |
|
"eval_iou_safe": 0.40891741330729336, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9804745786419761, |
|
"eval_loss": 0.10278800129890442, |
|
"eval_mean_accuracy": 0.7240286267802671, |
|
"eval_mean_iou": 0.6946959959746347, |
|
"eval_overall_accuracy": 0.9807348109003323, |
|
"eval_runtime": 10.4926, |
|
"eval_samples_per_second": 6.385, |
|
"eval_steps_per_second": 0.477, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 58.27, |
|
"learning_rate": 1.756937799043062e-06, |
|
"loss": 0.1043, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 58.36, |
|
"learning_rate": 1.75311004784689e-06, |
|
"loss": 0.1104, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 58.45, |
|
"learning_rate": 1.7492822966507177e-06, |
|
"loss": 0.0972, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 58.55, |
|
"learning_rate": 1.7454545454545452e-06, |
|
"loss": 0.1032, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 58.64, |
|
"learning_rate": 1.7416267942583732e-06, |
|
"loss": 0.1053, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 58.73, |
|
"learning_rate": 1.737799043062201e-06, |
|
"loss": 0.1061, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 58.82, |
|
"learning_rate": 1.7339712918660285e-06, |
|
"loss": 0.117, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 58.91, |
|
"learning_rate": 1.7301435406698565e-06, |
|
"loss": 0.1165, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 1.7263157894736842e-06, |
|
"loss": 0.0908, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 59.09, |
|
"learning_rate": 1.722488038277512e-06, |
|
"loss": 0.1018, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 59.09, |
|
"eval_accuracy_safe": 0.6052867953477434, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9918838809103937, |
|
"eval_iou_safe": 0.4778646863682016, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9801087838204473, |
|
"eval_loss": 0.1022411584854126, |
|
"eval_mean_accuracy": 0.7985853381290686, |
|
"eval_mean_iou": 0.7289867350943244, |
|
"eval_overall_accuracy": 0.9804644228807137, |
|
"eval_runtime": 10.6805, |
|
"eval_samples_per_second": 6.273, |
|
"eval_steps_per_second": 0.468, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 59.18, |
|
"learning_rate": 1.7186602870813395e-06, |
|
"loss": 0.0988, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 59.27, |
|
"learning_rate": 1.7148325358851675e-06, |
|
"loss": 0.0946, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 59.36, |
|
"learning_rate": 1.7110047846889952e-06, |
|
"loss": 0.1154, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 59.45, |
|
"learning_rate": 1.7071770334928228e-06, |
|
"loss": 0.1091, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 59.55, |
|
"learning_rate": 1.7033492822966507e-06, |
|
"loss": 0.1015, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 59.64, |
|
"learning_rate": 1.6995215311004785e-06, |
|
"loss": 0.1099, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 59.73, |
|
"learning_rate": 1.695693779904306e-06, |
|
"loss": 0.1035, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 59.82, |
|
"learning_rate": 1.6918660287081338e-06, |
|
"loss": 0.0961, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 59.91, |
|
"learning_rate": 1.6880382775119618e-06, |
|
"loss": 0.129, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 1.6842105263157893e-06, |
|
"loss": 0.1012, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_safe": 0.516669172439582, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9949175838843014, |
|
"eval_iou_safe": 0.4427407912710119, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9804931878913242, |
|
"eval_loss": 0.10163220763206482, |
|
"eval_mean_accuracy": 0.7557933781619417, |
|
"eval_mean_iou": 0.711616989581168, |
|
"eval_overall_accuracy": 0.980790892643715, |
|
"eval_runtime": 10.6583, |
|
"eval_samples_per_second": 6.286, |
|
"eval_steps_per_second": 0.469, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 60.09, |
|
"learning_rate": 1.680382775119617e-06, |
|
"loss": 0.0998, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 60.18, |
|
"learning_rate": 1.676555023923445e-06, |
|
"loss": 0.0925, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 60.27, |
|
"learning_rate": 1.6727272727272726e-06, |
|
"loss": 0.1051, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 60.36, |
|
"learning_rate": 1.6688995215311003e-06, |
|
"loss": 0.1022, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 60.45, |
|
"learning_rate": 1.665071770334928e-06, |
|
"loss": 0.0945, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 60.55, |
|
"learning_rate": 1.661244019138756e-06, |
|
"loss": 0.1073, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 60.64, |
|
"learning_rate": 1.6574162679425836e-06, |
|
"loss": 0.095, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 60.73, |
|
"learning_rate": 1.6535885167464114e-06, |
|
"loss": 0.1152, |
|
"step": 668 |
|
}, |
|
{ |
|
"epoch": 60.82, |
|
"learning_rate": 1.6497607655502393e-06, |
|
"loss": 0.1185, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 60.91, |
|
"learning_rate": 1.6459330143540669e-06, |
|
"loss": 0.1052, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 60.91, |
|
"eval_accuracy_safe": 0.5463510163800448, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9943183411572037, |
|
"eval_iou_safe": 0.46040818978469733, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9807758590757052, |
|
"eval_loss": 0.09994357079267502, |
|
"eval_mean_accuracy": 0.7703346787686243, |
|
"eval_mean_iou": 0.7205920244302013, |
|
"eval_overall_accuracy": 0.9810861046634504, |
|
"eval_runtime": 10.1258, |
|
"eval_samples_per_second": 6.617, |
|
"eval_steps_per_second": 0.494, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 1.6421052631578946e-06, |
|
"loss": 0.0986, |
|
"step": 671 |
|
}, |
|
{ |
|
"epoch": 61.09, |
|
"learning_rate": 1.6382775119617226e-06, |
|
"loss": 0.1114, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 61.18, |
|
"learning_rate": 1.6344497607655501e-06, |
|
"loss": 0.1073, |
|
"step": 673 |
|
}, |
|
{ |
|
"epoch": 61.27, |
|
"learning_rate": 1.6306220095693779e-06, |
|
"loss": 0.1028, |
|
"step": 674 |
|
}, |
|
{ |
|
"epoch": 61.36, |
|
"learning_rate": 1.6267942583732056e-06, |
|
"loss": 0.0855, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 61.45, |
|
"learning_rate": 1.6229665071770334e-06, |
|
"loss": 0.0954, |
|
"step": 676 |
|
}, |
|
{ |
|
"epoch": 61.55, |
|
"learning_rate": 1.6191387559808612e-06, |
|
"loss": 0.105, |
|
"step": 677 |
|
}, |
|
{ |
|
"epoch": 61.64, |
|
"learning_rate": 1.615311004784689e-06, |
|
"loss": 0.105, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 61.73, |
|
"learning_rate": 1.6114832535885169e-06, |
|
"loss": 0.0932, |
|
"step": 679 |
|
}, |
|
{ |
|
"epoch": 61.82, |
|
"learning_rate": 1.6076555023923444e-06, |
|
"loss": 0.1229, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 61.82, |
|
"eval_accuracy_safe": 0.5706261733763556, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.993868645102455, |
|
"eval_iou_safe": 0.4749514687715583, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9810472858831734, |
|
"eval_loss": 0.09930147975683212, |
|
"eval_mean_accuracy": 0.7822474092394053, |
|
"eval_mean_iou": 0.7279993773273659, |
|
"eval_overall_accuracy": 0.9813667411234841, |
|
"eval_runtime": 10.8251, |
|
"eval_samples_per_second": 6.189, |
|
"eval_steps_per_second": 0.462, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 61.91, |
|
"learning_rate": 1.6038277511961722e-06, |
|
"loss": 0.1183, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 1.6e-06, |
|
"loss": 0.1107, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 62.09, |
|
"learning_rate": 1.5961722488038277e-06, |
|
"loss": 0.1028, |
|
"step": 683 |
|
}, |
|
{ |
|
"epoch": 62.18, |
|
"learning_rate": 1.5923444976076554e-06, |
|
"loss": 0.097, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 62.27, |
|
"learning_rate": 1.5885167464114832e-06, |
|
"loss": 0.0915, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 62.36, |
|
"learning_rate": 1.584688995215311e-06, |
|
"loss": 0.0974, |
|
"step": 686 |
|
}, |
|
{ |
|
"epoch": 62.45, |
|
"learning_rate": 1.5808612440191387e-06, |
|
"loss": 0.0997, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 62.55, |
|
"learning_rate": 1.5770334928229665e-06, |
|
"loss": 0.1126, |
|
"step": 688 |
|
}, |
|
{ |
|
"epoch": 62.64, |
|
"learning_rate": 1.573205741626794e-06, |
|
"loss": 0.1003, |
|
"step": 689 |
|
}, |
|
{ |
|
"epoch": 62.73, |
|
"learning_rate": 1.569377990430622e-06, |
|
"loss": 0.0963, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 62.73, |
|
"eval_accuracy_safe": 0.5745833670648918, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9936493412730159, |
|
"eval_iou_safe": 0.475394228193057, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9809474391002709, |
|
"eval_loss": 0.09741559624671936, |
|
"eval_mean_accuracy": 0.7841163541689538, |
|
"eval_mean_iou": 0.7281708336466639, |
|
"eval_overall_accuracy": 0.9812708043340427, |
|
"eval_runtime": 10.6643, |
|
"eval_samples_per_second": 6.283, |
|
"eval_steps_per_second": 0.469, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 62.82, |
|
"learning_rate": 1.5655502392344497e-06, |
|
"loss": 0.1206, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 62.91, |
|
"learning_rate": 1.5617224880382773e-06, |
|
"loss": 0.1127, |
|
"step": 692 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 1.5578947368421052e-06, |
|
"loss": 0.0828, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 63.09, |
|
"learning_rate": 1.554066985645933e-06, |
|
"loss": 0.0967, |
|
"step": 694 |
|
}, |
|
{ |
|
"epoch": 63.18, |
|
"learning_rate": 1.5502392344497607e-06, |
|
"loss": 0.1018, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 63.27, |
|
"learning_rate": 1.5464114832535883e-06, |
|
"loss": 0.0982, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 63.36, |
|
"learning_rate": 1.5425837320574163e-06, |
|
"loss": 0.1014, |
|
"step": 697 |
|
}, |
|
{ |
|
"epoch": 63.45, |
|
"learning_rate": 1.538755980861244e-06, |
|
"loss": 0.0924, |
|
"step": 698 |
|
}, |
|
{ |
|
"epoch": 63.55, |
|
"learning_rate": 1.5349282296650716e-06, |
|
"loss": 0.0919, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 63.64, |
|
"learning_rate": 1.5311004784688995e-06, |
|
"loss": 0.1115, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 63.64, |
|
"eval_accuracy_safe": 0.5239243487881696, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9954837374300712, |
|
"eval_iou_safe": 0.45622972199516265, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9812646835919959, |
|
"eval_loss": 0.09740366041660309, |
|
"eval_mean_accuracy": 0.7597040431091204, |
|
"eval_mean_iou": 0.7187472027935793, |
|
"eval_overall_accuracy": 0.9815546291977612, |
|
"eval_runtime": 9.9953, |
|
"eval_samples_per_second": 6.703, |
|
"eval_steps_per_second": 0.5, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 63.73, |
|
"learning_rate": 1.5272727272727273e-06, |
|
"loss": 0.0956, |
|
"step": 701 |
|
}, |
|
{ |
|
"epoch": 63.82, |
|
"learning_rate": 1.5234449760765548e-06, |
|
"loss": 0.1093, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 63.91, |
|
"learning_rate": 1.5196172248803828e-06, |
|
"loss": 0.1099, |
|
"step": 703 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 1.5157894736842105e-06, |
|
"loss": 0.0971, |
|
"step": 704 |
|
}, |
|
{ |
|
"epoch": 64.09, |
|
"learning_rate": 1.511961722488038e-06, |
|
"loss": 0.1028, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 64.18, |
|
"learning_rate": 1.5081339712918658e-06, |
|
"loss": 0.0902, |
|
"step": 706 |
|
}, |
|
{ |
|
"epoch": 64.27, |
|
"learning_rate": 1.5043062200956938e-06, |
|
"loss": 0.1065, |
|
"step": 707 |
|
}, |
|
{ |
|
"epoch": 64.36, |
|
"learning_rate": 1.5004784688995216e-06, |
|
"loss": 0.0981, |
|
"step": 708 |
|
}, |
|
{ |
|
"epoch": 64.45, |
|
"learning_rate": 1.4966507177033491e-06, |
|
"loss": 0.0957, |
|
"step": 709 |
|
}, |
|
{ |
|
"epoch": 64.55, |
|
"learning_rate": 1.492822966507177e-06, |
|
"loss": 0.1025, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 64.55, |
|
"eval_accuracy_safe": 0.5844753875274189, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9934714575889979, |
|
"eval_iou_safe": 0.4812515871000508, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9810634398778569, |
|
"eval_loss": 0.09636449068784714, |
|
"eval_mean_accuracy": 0.7889734225582083, |
|
"eval_mean_iou": 0.7311575134889539, |
|
"eval_overall_accuracy": 0.981390369472219, |
|
"eval_runtime": 10.8756, |
|
"eval_samples_per_second": 6.161, |
|
"eval_steps_per_second": 0.46, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 64.64, |
|
"learning_rate": 1.4889952153110048e-06, |
|
"loss": 0.1116, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 64.73, |
|
"learning_rate": 1.4851674641148324e-06, |
|
"loss": 0.0946, |
|
"step": 712 |
|
}, |
|
{ |
|
"epoch": 64.82, |
|
"learning_rate": 1.4813397129186601e-06, |
|
"loss": 0.1029, |
|
"step": 713 |
|
}, |
|
{ |
|
"epoch": 64.91, |
|
"learning_rate": 1.477511961722488e-06, |
|
"loss": 0.1018, |
|
"step": 714 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 1.4736842105263156e-06, |
|
"loss": 0.0875, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 65.09, |
|
"learning_rate": 1.4698564593301434e-06, |
|
"loss": 0.114, |
|
"step": 716 |
|
}, |
|
{ |
|
"epoch": 65.18, |
|
"learning_rate": 1.4660287081339714e-06, |
|
"loss": 0.0997, |
|
"step": 717 |
|
}, |
|
{ |
|
"epoch": 65.27, |
|
"learning_rate": 1.462200956937799e-06, |
|
"loss": 0.1042, |
|
"step": 718 |
|
}, |
|
{ |
|
"epoch": 65.36, |
|
"learning_rate": 1.4583732057416267e-06, |
|
"loss": 0.1002, |
|
"step": 719 |
|
}, |
|
{ |
|
"epoch": 65.45, |
|
"learning_rate": 1.4545454545454544e-06, |
|
"loss": 0.0916, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 65.45, |
|
"eval_accuracy_safe": 0.5493309586316167, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9947474444767644, |
|
"eval_iou_safe": 0.4684850846003245, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9812869104271664, |
|
"eval_loss": 0.09620904177427292, |
|
"eval_mean_accuracy": 0.7720392015541906, |
|
"eval_mean_iou": 0.7248859975137455, |
|
"eval_overall_accuracy": 0.9815905556749942, |
|
"eval_runtime": 10.4119, |
|
"eval_samples_per_second": 6.435, |
|
"eval_steps_per_second": 0.48, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 65.55, |
|
"learning_rate": 1.4507177033492824e-06, |
|
"loss": 0.0908, |
|
"step": 721 |
|
}, |
|
{ |
|
"epoch": 65.64, |
|
"learning_rate": 1.44688995215311e-06, |
|
"loss": 0.1045, |
|
"step": 722 |
|
}, |
|
{ |
|
"epoch": 65.73, |
|
"learning_rate": 1.4430622009569377e-06, |
|
"loss": 0.0971, |
|
"step": 723 |
|
}, |
|
{ |
|
"epoch": 65.82, |
|
"learning_rate": 1.4392344497607657e-06, |
|
"loss": 0.0876, |
|
"step": 724 |
|
}, |
|
{ |
|
"epoch": 65.91, |
|
"learning_rate": 1.4354066985645932e-06, |
|
"loss": 0.1065, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 1.431578947368421e-06, |
|
"loss": 0.0998, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 66.09, |
|
"learning_rate": 1.427751196172249e-06, |
|
"loss": 0.0952, |
|
"step": 727 |
|
}, |
|
{ |
|
"epoch": 66.18, |
|
"learning_rate": 1.4239234449760765e-06, |
|
"loss": 0.1067, |
|
"step": 728 |
|
}, |
|
{ |
|
"epoch": 66.27, |
|
"learning_rate": 1.4200956937799042e-06, |
|
"loss": 0.0991, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 66.36, |
|
"learning_rate": 1.416267942583732e-06, |
|
"loss": 0.1055, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 66.36, |
|
"eval_accuracy_safe": 0.5273225623648329, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9953211662927315, |
|
"eval_iou_safe": 0.45706304548309923, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9812044738737931, |
|
"eval_loss": 0.09465456008911133, |
|
"eval_mean_accuracy": 0.7613218643287822, |
|
"eval_mean_iou": 0.7191337596784462, |
|
"eval_overall_accuracy": 0.9814972379314366, |
|
"eval_runtime": 10.9178, |
|
"eval_samples_per_second": 6.137, |
|
"eval_steps_per_second": 0.458, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 66.45, |
|
"learning_rate": 1.4124401913875597e-06, |
|
"loss": 0.0913, |
|
"step": 731 |
|
}, |
|
{ |
|
"epoch": 66.55, |
|
"learning_rate": 1.4086124401913875e-06, |
|
"loss": 0.0915, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 66.64, |
|
"learning_rate": 1.4047846889952152e-06, |
|
"loss": 0.094, |
|
"step": 733 |
|
}, |
|
{ |
|
"epoch": 66.73, |
|
"learning_rate": 1.400956937799043e-06, |
|
"loss": 0.0836, |
|
"step": 734 |
|
}, |
|
{ |
|
"epoch": 66.82, |
|
"learning_rate": 1.3971291866028708e-06, |
|
"loss": 0.095, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 66.91, |
|
"learning_rate": 1.3933014354066985e-06, |
|
"loss": 0.1146, |
|
"step": 736 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 1.3894736842105263e-06, |
|
"loss": 0.085, |
|
"step": 737 |
|
}, |
|
{ |
|
"epoch": 67.09, |
|
"learning_rate": 1.385645933014354e-06, |
|
"loss": 0.0946, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 67.18, |
|
"learning_rate": 1.3818181818181818e-06, |
|
"loss": 0.1043, |
|
"step": 739 |
|
}, |
|
{ |
|
"epoch": 67.27, |
|
"learning_rate": 1.3779904306220095e-06, |
|
"loss": 0.1081, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 67.27, |
|
"eval_accuracy_safe": 0.6093114521532299, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9919017748825657, |
|
"eval_iou_safe": 0.4812654624068449, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9802451202305071, |
|
"eval_loss": 0.09639958292245865, |
|
"eval_mean_accuracy": 0.8006066135178977, |
|
"eval_mean_iou": 0.730755291318676, |
|
"eval_overall_accuracy": 0.9806006702024546, |
|
"eval_runtime": 10.5244, |
|
"eval_samples_per_second": 6.366, |
|
"eval_steps_per_second": 0.475, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 67.36, |
|
"learning_rate": 1.3741626794258373e-06, |
|
"loss": 0.0912, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 67.45, |
|
"learning_rate": 1.370334928229665e-06, |
|
"loss": 0.0936, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 67.55, |
|
"learning_rate": 1.3665071770334928e-06, |
|
"loss": 0.095, |
|
"step": 743 |
|
}, |
|
{ |
|
"epoch": 67.64, |
|
"learning_rate": 1.3626794258373203e-06, |
|
"loss": 0.0994, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 67.73, |
|
"learning_rate": 1.3588516746411483e-06, |
|
"loss": 0.0947, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 67.82, |
|
"learning_rate": 1.355023923444976e-06, |
|
"loss": 0.1102, |
|
"step": 746 |
|
}, |
|
{ |
|
"epoch": 67.91, |
|
"learning_rate": 1.3511961722488036e-06, |
|
"loss": 0.0852, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 1.3473684210526316e-06, |
|
"loss": 0.1126, |
|
"step": 748 |
|
}, |
|
{ |
|
"epoch": 68.09, |
|
"learning_rate": 1.3435406698564593e-06, |
|
"loss": 0.0905, |
|
"step": 749 |
|
}, |
|
{ |
|
"epoch": 68.18, |
|
"learning_rate": 1.339712918660287e-06, |
|
"loss": 0.1039, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 68.18, |
|
"eval_accuracy_safe": 0.540481725205377, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9944637223475061, |
|
"eval_iou_safe": 0.45730282288511936, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9807464396876859, |
|
"eval_loss": 0.09501567482948303, |
|
"eval_mean_accuracy": 0.7674727237764416, |
|
"eval_mean_iou": 0.7190246312864026, |
|
"eval_overall_accuracy": 0.9810538220761428, |
|
"eval_runtime": 10.2387, |
|
"eval_samples_per_second": 6.544, |
|
"eval_steps_per_second": 0.488, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 68.27, |
|
"learning_rate": 1.3358851674641146e-06, |
|
"loss": 0.0886, |
|
"step": 751 |
|
}, |
|
{ |
|
"epoch": 68.36, |
|
"learning_rate": 1.3320574162679426e-06, |
|
"loss": 0.0977, |
|
"step": 752 |
|
}, |
|
{ |
|
"epoch": 68.45, |
|
"learning_rate": 1.3282296650717704e-06, |
|
"loss": 0.0934, |
|
"step": 753 |
|
}, |
|
{ |
|
"epoch": 68.55, |
|
"learning_rate": 1.324401913875598e-06, |
|
"loss": 0.0906, |
|
"step": 754 |
|
}, |
|
{ |
|
"epoch": 68.64, |
|
"learning_rate": 1.3205741626794259e-06, |
|
"loss": 0.1029, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 68.73, |
|
"learning_rate": 1.3167464114832536e-06, |
|
"loss": 0.1015, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 68.82, |
|
"learning_rate": 1.3129186602870812e-06, |
|
"loss": 0.0967, |
|
"step": 757 |
|
}, |
|
{ |
|
"epoch": 68.91, |
|
"learning_rate": 1.3090909090909091e-06, |
|
"loss": 0.0941, |
|
"step": 758 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 1.3052631578947369e-06, |
|
"loss": 0.1303, |
|
"step": 759 |
|
}, |
|
{ |
|
"epoch": 69.09, |
|
"learning_rate": 1.3014354066985644e-06, |
|
"loss": 0.106, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 69.09, |
|
"eval_accuracy_safe": 0.5563972382527438, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9942656566096285, |
|
"eval_iou_safe": 0.46819118861133774, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9810197842105592, |
|
"eval_loss": 0.09392161667346954, |
|
"eval_mean_accuracy": 0.7753314474311861, |
|
"eval_mean_iou": 0.7246054864109485, |
|
"eval_overall_accuracy": 0.9813317256187325, |
|
"eval_runtime": 11.1215, |
|
"eval_samples_per_second": 6.024, |
|
"eval_steps_per_second": 0.45, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 69.18, |
|
"learning_rate": 1.2976076555023922e-06, |
|
"loss": 0.097, |
|
"step": 761 |
|
}, |
|
{ |
|
"epoch": 69.27, |
|
"learning_rate": 1.2937799043062201e-06, |
|
"loss": 0.0903, |
|
"step": 762 |
|
}, |
|
{ |
|
"epoch": 69.36, |
|
"learning_rate": 1.2899521531100477e-06, |
|
"loss": 0.1096, |
|
"step": 763 |
|
}, |
|
{ |
|
"epoch": 69.45, |
|
"learning_rate": 1.2861244019138754e-06, |
|
"loss": 0.116, |
|
"step": 764 |
|
}, |
|
{ |
|
"epoch": 69.55, |
|
"learning_rate": 1.2822966507177034e-06, |
|
"loss": 0.0892, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 69.64, |
|
"learning_rate": 1.2784688995215312e-06, |
|
"loss": 0.0923, |
|
"step": 766 |
|
}, |
|
{ |
|
"epoch": 69.73, |
|
"learning_rate": 1.2746411483253587e-06, |
|
"loss": 0.09, |
|
"step": 767 |
|
}, |
|
{ |
|
"epoch": 69.82, |
|
"learning_rate": 1.2708133971291865e-06, |
|
"loss": 0.0939, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 69.91, |
|
"learning_rate": 1.2669856459330144e-06, |
|
"loss": 0.0934, |
|
"step": 769 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 1.263157894736842e-06, |
|
"loss": 0.0912, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_safe": 0.5376868246460114, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9949495583591662, |
|
"eval_iou_safe": 0.46116623628253917, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811432599765284, |
|
"eval_loss": 0.0935571938753128, |
|
"eval_mean_accuracy": 0.7663181915025887, |
|
"eval_mean_iou": 0.7211547481295337, |
|
"eval_overall_accuracy": 0.9814427503898963, |
|
"eval_runtime": 12.6589, |
|
"eval_samples_per_second": 5.293, |
|
"eval_steps_per_second": 0.395, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 70.09, |
|
"learning_rate": 1.2593301435406697e-06, |
|
"loss": 0.1019, |
|
"step": 771 |
|
}, |
|
{ |
|
"epoch": 70.18, |
|
"learning_rate": 1.2555023923444977e-06, |
|
"loss": 0.1146, |
|
"step": 772 |
|
}, |
|
{ |
|
"epoch": 70.27, |
|
"learning_rate": 1.2516746411483252e-06, |
|
"loss": 0.0883, |
|
"step": 773 |
|
}, |
|
{ |
|
"epoch": 70.36, |
|
"learning_rate": 1.247846889952153e-06, |
|
"loss": 0.0854, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 70.45, |
|
"learning_rate": 1.2440191387559808e-06, |
|
"loss": 0.0956, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 70.55, |
|
"learning_rate": 1.2401913875598085e-06, |
|
"loss": 0.101, |
|
"step": 776 |
|
}, |
|
{ |
|
"epoch": 70.64, |
|
"learning_rate": 1.2363636363636363e-06, |
|
"loss": 0.1062, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 70.73, |
|
"learning_rate": 1.232535885167464e-06, |
|
"loss": 0.0888, |
|
"step": 778 |
|
}, |
|
{ |
|
"epoch": 70.82, |
|
"learning_rate": 1.228708133971292e-06, |
|
"loss": 0.0945, |
|
"step": 779 |
|
}, |
|
{ |
|
"epoch": 70.91, |
|
"learning_rate": 1.2248803827751195e-06, |
|
"loss": 0.0951, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 70.91, |
|
"eval_accuracy_safe": 0.5600498841561906, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.994080145986652, |
|
"eval_iou_safe": 0.46886018347439506, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9809443508467186, |
|
"eval_loss": 0.09377636760473251, |
|
"eval_mean_accuracy": 0.7770650150714213, |
|
"eval_mean_iou": 0.7249022671605568, |
|
"eval_overall_accuracy": 0.9812595879853662, |
|
"eval_runtime": 10.9659, |
|
"eval_samples_per_second": 6.11, |
|
"eval_steps_per_second": 0.456, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 1.2210526315789473e-06, |
|
"loss": 0.1009, |
|
"step": 781 |
|
}, |
|
{ |
|
"epoch": 71.09, |
|
"learning_rate": 1.2172248803827753e-06, |
|
"loss": 0.089, |
|
"step": 782 |
|
}, |
|
{ |
|
"epoch": 71.18, |
|
"learning_rate": 1.2133971291866028e-06, |
|
"loss": 0.1066, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 71.27, |
|
"learning_rate": 1.2095693779904306e-06, |
|
"loss": 0.0972, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 71.36, |
|
"learning_rate": 1.2057416267942583e-06, |
|
"loss": 0.0874, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 71.45, |
|
"learning_rate": 1.201913875598086e-06, |
|
"loss": 0.0933, |
|
"step": 786 |
|
}, |
|
{ |
|
"epoch": 71.55, |
|
"learning_rate": 1.1980861244019138e-06, |
|
"loss": 0.0909, |
|
"step": 787 |
|
}, |
|
{ |
|
"epoch": 71.64, |
|
"learning_rate": 1.1942583732057416e-06, |
|
"loss": 0.1034, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 71.73, |
|
"learning_rate": 1.1904306220095693e-06, |
|
"loss": 0.1078, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 71.82, |
|
"learning_rate": 1.186602870813397e-06, |
|
"loss": 0.0998, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 71.82, |
|
"eval_accuracy_safe": 0.5573243742314024, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.994380119362768, |
|
"eval_iou_safe": 0.4704600775799229, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811600412098591, |
|
"eval_loss": 0.09279777854681015, |
|
"eval_mean_accuracy": 0.7758522467970852, |
|
"eval_mean_iou": 0.7258100593948911, |
|
"eval_overall_accuracy": 0.9814701934358967, |
|
"eval_runtime": 10.7411, |
|
"eval_samples_per_second": 6.238, |
|
"eval_steps_per_second": 0.466, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 71.91, |
|
"learning_rate": 1.1827751196172248e-06, |
|
"loss": 0.0833, |
|
"step": 791 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 1.1789473684210524e-06, |
|
"loss": 0.1115, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 72.09, |
|
"learning_rate": 1.1751196172248804e-06, |
|
"loss": 0.105, |
|
"step": 793 |
|
}, |
|
{ |
|
"epoch": 72.18, |
|
"learning_rate": 1.1712918660287081e-06, |
|
"loss": 0.0938, |
|
"step": 794 |
|
}, |
|
{ |
|
"epoch": 72.27, |
|
"learning_rate": 1.1674641148325359e-06, |
|
"loss": 0.0878, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 72.36, |
|
"learning_rate": 1.1636363636363636e-06, |
|
"loss": 0.0952, |
|
"step": 796 |
|
}, |
|
{ |
|
"epoch": 72.45, |
|
"learning_rate": 1.1598086124401914e-06, |
|
"loss": 0.0866, |
|
"step": 797 |
|
}, |
|
{ |
|
"epoch": 72.55, |
|
"learning_rate": 1.1559808612440191e-06, |
|
"loss": 0.0936, |
|
"step": 798 |
|
}, |
|
{ |
|
"epoch": 72.64, |
|
"learning_rate": 1.1521531100478467e-06, |
|
"loss": 0.0865, |
|
"step": 799 |
|
}, |
|
{ |
|
"epoch": 72.73, |
|
"learning_rate": 1.1483253588516746e-06, |
|
"loss": 0.0889, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 72.73, |
|
"eval_accuracy_safe": 0.5398032390006207, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9949394086634752, |
|
"eval_iou_safe": 0.4628490775279766, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9811955809893045, |
|
"eval_loss": 0.09308235347270966, |
|
"eval_mean_accuracy": 0.7673713238320479, |
|
"eval_mean_iou": 0.7220223292586405, |
|
"eval_overall_accuracy": 0.9814954159864738, |
|
"eval_runtime": 10.6601, |
|
"eval_samples_per_second": 6.285, |
|
"eval_steps_per_second": 0.469, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 72.82, |
|
"learning_rate": 1.1444976076555024e-06, |
|
"loss": 0.1, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 72.91, |
|
"learning_rate": 1.14066985645933e-06, |
|
"loss": 0.091, |
|
"step": 802 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 1.136842105263158e-06, |
|
"loss": 0.0853, |
|
"step": 803 |
|
}, |
|
{ |
|
"epoch": 73.09, |
|
"learning_rate": 1.1330143540669857e-06, |
|
"loss": 0.0945, |
|
"step": 804 |
|
}, |
|
{ |
|
"epoch": 73.18, |
|
"learning_rate": 1.1291866028708132e-06, |
|
"loss": 0.1075, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 73.27, |
|
"learning_rate": 1.125358851674641e-06, |
|
"loss": 0.0914, |
|
"step": 806 |
|
}, |
|
{ |
|
"epoch": 73.36, |
|
"learning_rate": 1.121531100478469e-06, |
|
"loss": 0.0907, |
|
"step": 807 |
|
}, |
|
{ |
|
"epoch": 73.45, |
|
"learning_rate": 1.1177033492822967e-06, |
|
"loss": 0.1007, |
|
"step": 808 |
|
}, |
|
{ |
|
"epoch": 73.55, |
|
"learning_rate": 1.1138755980861242e-06, |
|
"loss": 0.0926, |
|
"step": 809 |
|
}, |
|
{ |
|
"epoch": 73.64, |
|
"learning_rate": 1.1100478468899522e-06, |
|
"loss": 0.0906, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 73.64, |
|
"eval_accuracy_safe": 0.5151445059965073, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9958092903860791, |
|
"eval_iou_safe": 0.4528016102142886, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9813270861723169, |
|
"eval_loss": 0.09284604340791702, |
|
"eval_mean_accuracy": 0.7554768981912932, |
|
"eval_mean_iou": 0.7170643481933028, |
|
"eval_overall_accuracy": 0.9816112233631646, |
|
"eval_runtime": 10.255, |
|
"eval_samples_per_second": 6.533, |
|
"eval_steps_per_second": 0.488, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 73.73, |
|
"learning_rate": 1.10622009569378e-06, |
|
"loss": 0.1001, |
|
"step": 811 |
|
}, |
|
{ |
|
"epoch": 73.82, |
|
"learning_rate": 1.1023923444976075e-06, |
|
"loss": 0.0837, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 73.91, |
|
"learning_rate": 1.0985645933014355e-06, |
|
"loss": 0.0877, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 1.0947368421052632e-06, |
|
"loss": 0.214, |
|
"step": 814 |
|
}, |
|
{ |
|
"epoch": 74.09, |
|
"learning_rate": 1.0909090909090908e-06, |
|
"loss": 0.0845, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 74.18, |
|
"learning_rate": 1.0870813397129185e-06, |
|
"loss": 0.0832, |
|
"step": 816 |
|
}, |
|
{ |
|
"epoch": 74.27, |
|
"learning_rate": 1.0832535885167465e-06, |
|
"loss": 0.1173, |
|
"step": 817 |
|
}, |
|
{ |
|
"epoch": 74.36, |
|
"learning_rate": 1.079425837320574e-06, |
|
"loss": 0.0883, |
|
"step": 818 |
|
}, |
|
{ |
|
"epoch": 74.45, |
|
"learning_rate": 1.0755980861244018e-06, |
|
"loss": 0.0978, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 74.55, |
|
"learning_rate": 1.0717703349282298e-06, |
|
"loss": 0.0911, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 74.55, |
|
"eval_accuracy_safe": 0.5682264139305554, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9938084509534436, |
|
"eval_iou_safe": 0.47217684067553367, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9809171438215548, |
|
"eval_loss": 0.09244522452354431, |
|
"eval_mean_accuracy": 0.7810174324419995, |
|
"eval_mean_iou": 0.7265469922485442, |
|
"eval_overall_accuracy": 0.9812374399669135, |
|
"eval_runtime": 10.4305, |
|
"eval_samples_per_second": 6.423, |
|
"eval_steps_per_second": 0.479, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 74.64, |
|
"learning_rate": 1.0679425837320575e-06, |
|
"loss": 0.086, |
|
"step": 821 |
|
}, |
|
{ |
|
"epoch": 74.73, |
|
"learning_rate": 1.064114832535885e-06, |
|
"loss": 0.0892, |
|
"step": 822 |
|
}, |
|
{ |
|
"epoch": 74.82, |
|
"learning_rate": 1.0602870813397128e-06, |
|
"loss": 0.1048, |
|
"step": 823 |
|
}, |
|
{ |
|
"epoch": 74.91, |
|
"learning_rate": 1.0564593301435408e-06, |
|
"loss": 0.0882, |
|
"step": 824 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 1.0526315789473683e-06, |
|
"loss": 0.182, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 75.09, |
|
"learning_rate": 1.048803827751196e-06, |
|
"loss": 0.0897, |
|
"step": 826 |
|
}, |
|
{ |
|
"epoch": 75.18, |
|
"learning_rate": 1.044976076555024e-06, |
|
"loss": 0.0971, |
|
"step": 827 |
|
}, |
|
{ |
|
"epoch": 75.27, |
|
"learning_rate": 1.0411483253588516e-06, |
|
"loss": 0.0932, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 75.36, |
|
"learning_rate": 1.0373205741626793e-06, |
|
"loss": 0.0946, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 75.45, |
|
"learning_rate": 1.033492822966507e-06, |
|
"loss": 0.0907, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 75.45, |
|
"eval_accuracy_safe": 0.48640714569334736, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9965232892101226, |
|
"eval_iou_safe": 0.43654313897125224, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.981184947472746, |
|
"eval_loss": 0.09287872165441513, |
|
"eval_mean_accuracy": 0.741465217451735, |
|
"eval_mean_iou": 0.7088640432219991, |
|
"eval_overall_accuracy": 0.9814552762615147, |
|
"eval_runtime": 10.9268, |
|
"eval_samples_per_second": 6.132, |
|
"eval_steps_per_second": 0.458, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 75.55, |
|
"learning_rate": 1.0296650717703349e-06, |
|
"loss": 0.0957, |
|
"step": 831 |
|
}, |
|
{ |
|
"epoch": 75.64, |
|
"learning_rate": 1.0258373205741626e-06, |
|
"loss": 0.0908, |
|
"step": 832 |
|
}, |
|
{ |
|
"epoch": 75.73, |
|
"learning_rate": 1.0220095693779904e-06, |
|
"loss": 0.113, |
|
"step": 833 |
|
}, |
|
{ |
|
"epoch": 75.82, |
|
"learning_rate": 1.0181818181818181e-06, |
|
"loss": 0.0981, |
|
"step": 834 |
|
}, |
|
{ |
|
"epoch": 75.91, |
|
"learning_rate": 1.0143540669856459e-06, |
|
"loss": 0.0899, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 1.0105263157894736e-06, |
|
"loss": 0.0954, |
|
"step": 836 |
|
}, |
|
{ |
|
"epoch": 76.09, |
|
"learning_rate": 1.0066985645933016e-06, |
|
"loss": 0.0948, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 76.18, |
|
"learning_rate": 1.0028708133971291e-06, |
|
"loss": 0.086, |
|
"step": 838 |
|
}, |
|
{ |
|
"epoch": 76.27, |
|
"learning_rate": 9.990430622009569e-07, |
|
"loss": 0.094, |
|
"step": 839 |
|
}, |
|
{ |
|
"epoch": 76.36, |
|
"learning_rate": 9.952153110047846e-07, |
|
"loss": 0.1117, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 76.36, |
|
"eval_accuracy_safe": 0.5239320588586782, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9955866424372505, |
|
"eval_iou_safe": 0.4575835778508384, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_iou_unsafe": 0.9813663457617646, |
|
"eval_loss": 0.09342558681964874, |
|
"eval_mean_accuracy": 0.7597593506479643, |
|
"eval_mean_iou": 0.7194749618063015, |
|
"eval_overall_accuracy": 0.9816547222991487, |
|
"eval_runtime": 10.5916, |
|
"eval_samples_per_second": 6.326, |
|
"eval_steps_per_second": 0.472, |
|
"step": 840 |
|
} |
|
], |
|
"max_steps": 1100, |
|
"num_train_epochs": 100, |
|
"total_flos": 9.392771801215402e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|