|
{ |
|
"best_metric": 0.7325581312179565, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGB-b0_1/checkpoint-160", |
|
"epoch": 53.333333333333336, |
|
"global_step": 160, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 5e-07, |
|
"loss": 1.1127, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1e-06, |
|
"loss": 1.1148, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.5e-06, |
|
"loss": 1.109, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 2e-06, |
|
"loss": 1.1127, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 2.5e-06, |
|
"loss": 1.1118, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 3e-06, |
|
"loss": 1.1088, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 3.5e-06, |
|
"loss": 1.1122, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 4e-06, |
|
"loss": 1.1054, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.5e-06, |
|
"loss": 1.1061, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 5e-06, |
|
"loss": 1.1029, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"eval_accuracy_dropoff": 0.3332988719645278, |
|
"eval_accuracy_undropoff": 0.45771106945832574, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.04096214090185886, |
|
"eval_iou_undropoff": 0.4500892406005614, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.085178256034851, |
|
"eval_mean_accuracy": 0.39550497071142676, |
|
"eval_mean_iou": 0.16368379383414008, |
|
"eval_overall_accuracy": 0.4522023518880208, |
|
"eval_runtime": 1.7594, |
|
"eval_samples_per_second": 8.526, |
|
"eval_steps_per_second": 0.568, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 5.500000000000001e-06, |
|
"loss": 1.1056, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 6e-06, |
|
"loss": 1.1072, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 6.5000000000000004e-06, |
|
"loss": 1.1039, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 7e-06, |
|
"loss": 1.1023, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 7.5e-06, |
|
"loss": 1.0945, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 8e-06, |
|
"loss": 1.0956, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 8.5e-06, |
|
"loss": 1.0946, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 9e-06, |
|
"loss": 1.1014, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 8.973684210526317e-06, |
|
"loss": 1.0905, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 1.0856, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"eval_accuracy_dropoff": 0.5153065913111402, |
|
"eval_accuracy_undropoff": 0.5019142896372908, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.07605219279767873, |
|
"eval_iou_undropoff": 0.49721567971024616, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.076412320137024, |
|
"eval_mean_accuracy": 0.5086104404742156, |
|
"eval_mean_iou": 0.19108929083597495, |
|
"eval_overall_accuracy": 0.5025072733561198, |
|
"eval_runtime": 1.9083, |
|
"eval_samples_per_second": 7.861, |
|
"eval_steps_per_second": 0.524, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 8.921052631578947e-06, |
|
"loss": 1.0941, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 8.894736842105264e-06, |
|
"loss": 1.0807, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 8.86842105263158e-06, |
|
"loss": 1.0792, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 8.842105263157895e-06, |
|
"loss": 1.0701, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 8.81578947368421e-06, |
|
"loss": 1.0735, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 8.67, |
|
"learning_rate": 8.789473684210527e-06, |
|
"loss": 1.0715, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 8.763157894736842e-06, |
|
"loss": 1.0614, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 8.736842105263158e-06, |
|
"loss": 1.061, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 9.67, |
|
"learning_rate": 8.710526315789473e-06, |
|
"loss": 1.0587, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 8.68421052631579e-06, |
|
"loss": 1.0755, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.704510993176649, |
|
"eval_accuracy_undropoff": 0.5688494996876041, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.11037454693354545, |
|
"eval_iou_undropoff": 0.565151253990595, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0611040592193604, |
|
"eval_mean_accuracy": 0.6366802464321266, |
|
"eval_mean_iou": 0.22517526697471346, |
|
"eval_overall_accuracy": 0.5748563130696615, |
|
"eval_runtime": 1.6608, |
|
"eval_samples_per_second": 9.032, |
|
"eval_steps_per_second": 0.602, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 10.33, |
|
"learning_rate": 8.657894736842105e-06, |
|
"loss": 1.0575, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 10.67, |
|
"learning_rate": 8.63157894736842e-06, |
|
"loss": 1.0559, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 8.605263157894737e-06, |
|
"loss": 1.0358, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 11.33, |
|
"learning_rate": 8.578947368421053e-06, |
|
"loss": 1.0525, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 11.67, |
|
"learning_rate": 8.552631578947368e-06, |
|
"loss": 1.0447, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 8.526315789473683e-06, |
|
"loss": 1.0542, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 12.33, |
|
"learning_rate": 8.5e-06, |
|
"loss": 1.04, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 12.67, |
|
"learning_rate": 8.473684210526317e-06, |
|
"loss": 1.0351, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 8.447368421052632e-06, |
|
"loss": 1.0672, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"learning_rate": 8.421052631578947e-06, |
|
"loss": 1.0285, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"eval_accuracy_dropoff": 0.8494440232499368, |
|
"eval_accuracy_undropoff": 0.6478915140077891, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.14199210424243355, |
|
"eval_iou_undropoff": 0.6445086621047246, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.038216471672058, |
|
"eval_mean_accuracy": 0.748667768628863, |
|
"eval_mean_iou": 0.2621669221157194, |
|
"eval_overall_accuracy": 0.6568158467610677, |
|
"eval_runtime": 1.6441, |
|
"eval_samples_per_second": 9.123, |
|
"eval_steps_per_second": 0.608, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 13.67, |
|
"learning_rate": 8.394736842105263e-06, |
|
"loss": 1.0311, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 8.36842105263158e-06, |
|
"loss": 1.0374, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 14.33, |
|
"learning_rate": 8.342105263157895e-06, |
|
"loss": 1.0281, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 14.67, |
|
"learning_rate": 8.31578947368421e-06, |
|
"loss": 1.0238, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 8.289473684210526e-06, |
|
"loss": 0.9876, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 15.33, |
|
"learning_rate": 8.263157894736843e-06, |
|
"loss": 1.0185, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 15.67, |
|
"learning_rate": 8.236842105263158e-06, |
|
"loss": 1.0069, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 8.210526315789473e-06, |
|
"loss": 1.0178, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 16.33, |
|
"learning_rate": 8.18421052631579e-06, |
|
"loss": 1.014, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"learning_rate": 8.157894736842105e-06, |
|
"loss": 0.9935, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"eval_accuracy_dropoff": 0.8486341810830059, |
|
"eval_accuracy_undropoff": 0.7141391337852696, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.15795859939683365, |
|
"eval_iou_undropoff": 0.7099499580316209, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0151127576828003, |
|
"eval_mean_accuracy": 0.7813866574341377, |
|
"eval_mean_iou": 0.2893028524761515, |
|
"eval_overall_accuracy": 0.7200942993164062, |
|
"eval_runtime": 1.8541, |
|
"eval_samples_per_second": 8.09, |
|
"eval_steps_per_second": 0.539, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 8.131578947368421e-06, |
|
"loss": 0.9613, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 17.33, |
|
"learning_rate": 8.105263157894736e-06, |
|
"loss": 0.9968, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 17.67, |
|
"learning_rate": 8.078947368421053e-06, |
|
"loss": 0.9971, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 8.052631578947368e-06, |
|
"loss": 0.9627, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 18.33, |
|
"learning_rate": 8.026315789473685e-06, |
|
"loss": 0.9881, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 18.67, |
|
"learning_rate": 8e-06, |
|
"loss": 0.9927, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 7.973684210526316e-06, |
|
"loss": 1.0041, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 19.33, |
|
"learning_rate": 7.947368421052631e-06, |
|
"loss": 0.9613, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 19.67, |
|
"learning_rate": 7.921052631578948e-06, |
|
"loss": 0.987, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.9927, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.8124325131527558, |
|
"eval_accuracy_undropoff": 0.7801403493086312, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.17348479366555017, |
|
"eval_iou_undropoff": 0.774391632219126, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.9833822846412659, |
|
"eval_mean_accuracy": 0.7962864312306934, |
|
"eval_mean_iou": 0.3159588086282254, |
|
"eval_overall_accuracy": 0.7815701802571614, |
|
"eval_runtime": 1.8711, |
|
"eval_samples_per_second": 8.017, |
|
"eval_steps_per_second": 0.534, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 20.33, |
|
"learning_rate": 7.86842105263158e-06, |
|
"loss": 0.9642, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 20.67, |
|
"learning_rate": 7.842105263157895e-06, |
|
"loss": 0.9628, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 7.815789473684211e-06, |
|
"loss": 1.0234, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 21.33, |
|
"learning_rate": 7.789473684210526e-06, |
|
"loss": 0.9644, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 21.67, |
|
"learning_rate": 7.763157894736843e-06, |
|
"loss": 0.9831, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 7.736842105263158e-06, |
|
"loss": 0.9766, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 22.33, |
|
"learning_rate": 7.710526315789474e-06, |
|
"loss": 0.9637, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 22.67, |
|
"learning_rate": 7.68421052631579e-06, |
|
"loss": 0.9429, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 7.657894736842106e-06, |
|
"loss": 1.0102, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 23.33, |
|
"learning_rate": 7.631578947368421e-06, |
|
"loss": 0.938, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 23.33, |
|
"eval_accuracy_dropoff": 0.8036276334229329, |
|
"eval_accuracy_undropoff": 0.8131103561100272, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.18604038910547227, |
|
"eval_iou_undropoff": 0.8064630295685331, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.9585382342338562, |
|
"eval_mean_accuracy": 0.80836899476648, |
|
"eval_mean_iou": 0.3308344728913351, |
|
"eval_overall_accuracy": 0.8126904805501302, |
|
"eval_runtime": 1.7808, |
|
"eval_samples_per_second": 8.423, |
|
"eval_steps_per_second": 0.562, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 23.67, |
|
"learning_rate": 7.605263157894738e-06, |
|
"loss": 0.9383, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 7.578947368421053e-06, |
|
"loss": 0.9818, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 24.33, |
|
"learning_rate": 7.5526315789473686e-06, |
|
"loss": 0.9393, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 24.67, |
|
"learning_rate": 7.526315789473684e-06, |
|
"loss": 0.9402, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 7.5e-06, |
|
"loss": 1.0092, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 25.33, |
|
"learning_rate": 7.473684210526315e-06, |
|
"loss": 0.9253, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 25.67, |
|
"learning_rate": 7.447368421052632e-06, |
|
"loss": 0.928, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 7.421052631578948e-06, |
|
"loss": 1.0012, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 26.33, |
|
"learning_rate": 7.3947368421052635e-06, |
|
"loss": 0.9346, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"learning_rate": 7.3684210526315784e-06, |
|
"loss": 0.9169, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"eval_accuracy_dropoff": 0.7942541411078181, |
|
"eval_accuracy_undropoff": 0.8396427191534337, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2048256657878366, |
|
"eval_iou_undropoff": 0.8323542934650573, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.9375818371772766, |
|
"eval_mean_accuracy": 0.8169484301306259, |
|
"eval_mean_iou": 0.34572665308429795, |
|
"eval_overall_accuracy": 0.8376330057779948, |
|
"eval_runtime": 1.7816, |
|
"eval_samples_per_second": 8.419, |
|
"eval_steps_per_second": 0.561, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 7.342105263157895e-06, |
|
"loss": 0.9599, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 27.33, |
|
"learning_rate": 7.315789473684211e-06, |
|
"loss": 0.9316, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 27.67, |
|
"learning_rate": 7.289473684210527e-06, |
|
"loss": 0.9335, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 7.263157894736842e-06, |
|
"loss": 0.8321, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 28.33, |
|
"learning_rate": 7.2368421052631575e-06, |
|
"loss": 0.9137, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 28.67, |
|
"learning_rate": 7.210526315789474e-06, |
|
"loss": 0.8788, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 7.18421052631579e-06, |
|
"loss": 0.9947, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 29.33, |
|
"learning_rate": 7.157894736842105e-06, |
|
"loss": 0.903, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 29.67, |
|
"learning_rate": 7.131578947368421e-06, |
|
"loss": 0.8931, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 7.1052631578947375e-06, |
|
"loss": 0.8814, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.7421428079123302, |
|
"eval_accuracy_undropoff": 0.8750230172440403, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2220341575149798, |
|
"eval_iou_undropoff": 0.8651189466137869, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.9003341197967529, |
|
"eval_mean_accuracy": 0.8085829125781853, |
|
"eval_mean_iou": 0.36238436804292223, |
|
"eval_overall_accuracy": 0.8691393534342448, |
|
"eval_runtime": 1.8384, |
|
"eval_samples_per_second": 8.159, |
|
"eval_steps_per_second": 0.544, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 30.33, |
|
"learning_rate": 7.078947368421053e-06, |
|
"loss": 0.8804, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 30.67, |
|
"learning_rate": 7.052631578947368e-06, |
|
"loss": 0.9237, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 7.026315789473684e-06, |
|
"loss": 0.9738, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 31.33, |
|
"learning_rate": 7e-06, |
|
"loss": 0.8767, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 31.67, |
|
"learning_rate": 6.9736842105263166e-06, |
|
"loss": 0.8922, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 6.9473684210526315e-06, |
|
"loss": 0.895, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 32.33, |
|
"learning_rate": 6.921052631578947e-06, |
|
"loss": 0.8999, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 32.67, |
|
"learning_rate": 6.894736842105263e-06, |
|
"loss": 0.8614, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 6.86842105263158e-06, |
|
"loss": 0.9943, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 6.842105263157895e-06, |
|
"loss": 0.8618, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"eval_accuracy_dropoff": 0.7550371034070806, |
|
"eval_accuracy_undropoff": 0.8817376662164335, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2286707289707921, |
|
"eval_iou_undropoff": 0.8719697906661579, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8893593549728394, |
|
"eval_mean_accuracy": 0.8183873848117571, |
|
"eval_mean_iou": 0.36688017321231664, |
|
"eval_overall_accuracy": 0.8761276245117188, |
|
"eval_runtime": 1.9949, |
|
"eval_samples_per_second": 7.519, |
|
"eval_steps_per_second": 0.501, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 33.67, |
|
"learning_rate": 6.815789473684211e-06, |
|
"loss": 0.875, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 6.7894736842105264e-06, |
|
"loss": 0.9501, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 34.33, |
|
"learning_rate": 6.763157894736842e-06, |
|
"loss": 0.8395, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 34.67, |
|
"learning_rate": 6.736842105263158e-06, |
|
"loss": 0.8817, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 6.710526315789474e-06, |
|
"loss": 0.8015, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 35.33, |
|
"learning_rate": 6.68421052631579e-06, |
|
"loss": 0.8708, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 35.67, |
|
"learning_rate": 6.6578947368421055e-06, |
|
"loss": 0.823, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 6.6315789473684205e-06, |
|
"loss": 0.9683, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 36.33, |
|
"learning_rate": 6.605263157894737e-06, |
|
"loss": 0.86, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 36.67, |
|
"learning_rate": 6.578947368421053e-06, |
|
"loss": 0.8388, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 36.67, |
|
"eval_accuracy_dropoff": 0.718686102878673, |
|
"eval_accuracy_undropoff": 0.9006134029012903, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.24306521418192836, |
|
"eval_iou_undropoff": 0.8892370991006613, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8618280291557312, |
|
"eval_mean_accuracy": 0.8096497528899816, |
|
"eval_mean_iou": 0.37743410442752984, |
|
"eval_overall_accuracy": 0.8925580342610677, |
|
"eval_runtime": 1.913, |
|
"eval_samples_per_second": 7.841, |
|
"eval_steps_per_second": 0.523, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 6.552631578947369e-06, |
|
"loss": 0.8596, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 37.33, |
|
"learning_rate": 6.526315789473684e-06, |
|
"loss": 0.8692, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 37.67, |
|
"learning_rate": 6.5000000000000004e-06, |
|
"loss": 0.8117, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 6.473684210526316e-06, |
|
"loss": 0.9507, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 38.33, |
|
"learning_rate": 6.447368421052632e-06, |
|
"loss": 0.8147, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 38.67, |
|
"learning_rate": 6.421052631578947e-06, |
|
"loss": 0.8928, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 6.394736842105263e-06, |
|
"loss": 0.826, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 39.33, |
|
"learning_rate": 6.3684210526315795e-06, |
|
"loss": 0.8085, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 39.67, |
|
"learning_rate": 6.342105263157895e-06, |
|
"loss": 0.8143, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 6.31578947368421e-06, |
|
"loss": 0.8878, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.6618248443494842, |
|
"eval_accuracy_undropoff": 0.9256646794669153, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2670577274286403, |
|
"eval_iou_undropoff": 0.9115604296316833, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8268659710884094, |
|
"eval_mean_accuracy": 0.7937447619081998, |
|
"eval_mean_iou": 0.39287271902010784, |
|
"eval_overall_accuracy": 0.9139823913574219, |
|
"eval_runtime": 1.8743, |
|
"eval_samples_per_second": 8.003, |
|
"eval_steps_per_second": 0.534, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 40.33, |
|
"learning_rate": 6.289473684210526e-06, |
|
"loss": 0.8174, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 40.67, |
|
"learning_rate": 6.263157894736843e-06, |
|
"loss": 0.8032, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 6.236842105263159e-06, |
|
"loss": 0.9556, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 41.33, |
|
"learning_rate": 6.210526315789474e-06, |
|
"loss": 0.795, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 41.67, |
|
"learning_rate": 6.184210526315789e-06, |
|
"loss": 0.8119, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 6.157894736842105e-06, |
|
"loss": 0.8237, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 42.33, |
|
"learning_rate": 6.131578947368422e-06, |
|
"loss": 0.7689, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 42.67, |
|
"learning_rate": 6.105263157894737e-06, |
|
"loss": 0.8232, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 6.078947368421053e-06, |
|
"loss": 0.9406, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 43.33, |
|
"learning_rate": 6.0526315789473685e-06, |
|
"loss": 0.8066, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 43.33, |
|
"eval_accuracy_dropoff": 0.6561502056195005, |
|
"eval_accuracy_undropoff": 0.9348247443090196, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28393932540146094, |
|
"eval_iou_undropoff": 0.9202498341878032, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8073786497116089, |
|
"eval_mean_accuracy": 0.7954874749642601, |
|
"eval_mean_iou": 0.4013963865297547, |
|
"eval_overall_accuracy": 0.9224856058756511, |
|
"eval_runtime": 1.8279, |
|
"eval_samples_per_second": 8.206, |
|
"eval_steps_per_second": 0.547, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 43.67, |
|
"learning_rate": 6.026315789473685e-06, |
|
"loss": 0.756, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 6e-06, |
|
"loss": 0.9245, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 44.33, |
|
"learning_rate": 5.973684210526316e-06, |
|
"loss": 0.7635, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 44.67, |
|
"learning_rate": 5.947368421052632e-06, |
|
"loss": 0.8357, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 5.921052631578948e-06, |
|
"loss": 0.8333, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 45.33, |
|
"learning_rate": 5.894736842105263e-06, |
|
"loss": 0.799, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 45.67, |
|
"learning_rate": 5.868421052631579e-06, |
|
"loss": 0.7785, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 5.842105263157895e-06, |
|
"loss": 0.7472, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 46.33, |
|
"learning_rate": 5.815789473684211e-06, |
|
"loss": 0.7746, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 46.67, |
|
"learning_rate": 5.789473684210526e-06, |
|
"loss": 0.8084, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 46.67, |
|
"eval_accuracy_dropoff": 0.6487237806419004, |
|
"eval_accuracy_undropoff": 0.9376131570292269, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2843927433873425, |
|
"eval_iou_undropoff": 0.922643123497163, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7918700575828552, |
|
"eval_mean_accuracy": 0.7931684688355636, |
|
"eval_mean_iou": 0.40234528896150185, |
|
"eval_overall_accuracy": 0.9248217264811198, |
|
"eval_runtime": 1.8666, |
|
"eval_samples_per_second": 8.036, |
|
"eval_steps_per_second": 0.536, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 5.7631578947368425e-06, |
|
"loss": 0.9072, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 47.33, |
|
"learning_rate": 5.736842105263158e-06, |
|
"loss": 0.7663, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 47.67, |
|
"learning_rate": 5.710526315789474e-06, |
|
"loss": 0.7524, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 5.684210526315789e-06, |
|
"loss": 0.9309, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 48.33, |
|
"learning_rate": 5.657894736842105e-06, |
|
"loss": 0.7694, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 48.67, |
|
"learning_rate": 5.631578947368422e-06, |
|
"loss": 0.7437, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 5.6052631578947374e-06, |
|
"loss": 0.8711, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 49.33, |
|
"learning_rate": 5.578947368421052e-06, |
|
"loss": 0.7414, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 49.67, |
|
"learning_rate": 5.552631578947368e-06, |
|
"loss": 0.7699, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 5.526315789473685e-06, |
|
"loss": 0.7415, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.6249109747972523, |
|
"eval_accuracy_undropoff": 0.9451210361112619, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.29126956931445824, |
|
"eval_iou_undropoff": 0.9289919517149979, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7706870436668396, |
|
"eval_mean_accuracy": 0.7850160054542571, |
|
"eval_mean_iou": 0.4067538403431521, |
|
"eval_overall_accuracy": 0.9309427897135417, |
|
"eval_runtime": 1.868, |
|
"eval_samples_per_second": 8.03, |
|
"eval_steps_per_second": 0.535, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 50.33, |
|
"learning_rate": 5.500000000000001e-06, |
|
"loss": 0.7662, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 50.67, |
|
"learning_rate": 5.473684210526316e-06, |
|
"loss": 0.735, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 5.4473684210526315e-06, |
|
"loss": 0.7445, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 51.33, |
|
"learning_rate": 5.421052631578947e-06, |
|
"loss": 0.7691, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 51.67, |
|
"learning_rate": 5.394736842105264e-06, |
|
"loss": 0.7024, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 5.368421052631579e-06, |
|
"loss": 0.9282, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 52.33, |
|
"learning_rate": 5.342105263157895e-06, |
|
"loss": 0.7189, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 52.67, |
|
"learning_rate": 5.315789473684211e-06, |
|
"loss": 0.7815, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 5.289473684210527e-06, |
|
"loss": 0.7113, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.7508, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"eval_accuracy_dropoff": 0.5735348174696165, |
|
"eval_accuracy_undropoff": 0.9585285142408886, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.30633518727011133, |
|
"eval_iou_undropoff": 0.9399596899192442, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7325581312179565, |
|
"eval_mean_accuracy": 0.7660316658552526, |
|
"eval_mean_iou": 0.4154316257297852, |
|
"eval_overall_accuracy": 0.9414817810058593, |
|
"eval_runtime": 1.8819, |
|
"eval_samples_per_second": 7.971, |
|
"eval_steps_per_second": 0.531, |
|
"step": 160 |
|
} |
|
], |
|
"max_steps": 360, |
|
"num_train_epochs": 120, |
|
"total_flos": 3.186802778701824e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|