|
{ |
|
"best_metric": 0.3478124737739563, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGBD-b0_7/checkpoint-80", |
|
"epoch": 40.0, |
|
"global_step": 80, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.0264, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.0249, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 2e-05, |
|
"loss": 1.0219, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 1.0261, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.0043, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4e-05, |
|
"loss": 0.9882, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.9908, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 5.333333333333333e-05, |
|
"loss": 0.9709, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 6.000000000000001e-05, |
|
"loss": 0.961, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.9508, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.19369710314456753, |
|
"eval_accuracy_undropoff": 0.9010801242217483, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.06049444267748618, |
|
"eval_iou_undropoff": 0.8706257192603624, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.026282548904419, |
|
"eval_mean_accuracy": 0.5473886136831579, |
|
"eval_mean_iou": 0.31037338731261616, |
|
"eval_overall_accuracy": 0.8716676712036133, |
|
"eval_runtime": 2.7665, |
|
"eval_samples_per_second": 7.229, |
|
"eval_steps_per_second": 0.723, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 7.333333333333333e-05, |
|
"loss": 0.9292, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 8e-05, |
|
"loss": 0.9246, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 7.964912280701755e-05, |
|
"loss": 0.9097, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 7.929824561403509e-05, |
|
"loss": 0.8786, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 7.894736842105263e-05, |
|
"loss": 0.8697, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 7.859649122807017e-05, |
|
"loss": 0.8421, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 7.824561403508773e-05, |
|
"loss": 0.8223, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 7.789473684210527e-05, |
|
"loss": 0.8166, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 7.754385964912281e-05, |
|
"loss": 0.7987, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 7.719298245614036e-05, |
|
"loss": 0.7814, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.09518566939608707, |
|
"eval_accuracy_undropoff": 0.9726282293027602, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.05836274909643214, |
|
"eval_iou_undropoff": 0.9358911019066932, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.7567641139030457, |
|
"eval_mean_accuracy": 0.5339069493494236, |
|
"eval_mean_iou": 0.4971269255015627, |
|
"eval_overall_accuracy": 0.9361448287963867, |
|
"eval_runtime": 2.8406, |
|
"eval_samples_per_second": 7.041, |
|
"eval_steps_per_second": 0.704, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 7.68421052631579e-05, |
|
"loss": 0.7775, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 7.649122807017544e-05, |
|
"loss": 0.744, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 7.614035087719298e-05, |
|
"loss": 0.7385, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 7.578947368421052e-05, |
|
"loss": 0.7156, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 7.543859649122808e-05, |
|
"loss": 0.7073, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 7.508771929824562e-05, |
|
"loss": 0.6876, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 7.473684210526316e-05, |
|
"loss": 0.68, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 7.43859649122807e-05, |
|
"loss": 0.6647, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 7.403508771929825e-05, |
|
"loss": 0.656, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 7.368421052631579e-05, |
|
"loss": 0.642, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.10256657262781256, |
|
"eval_accuracy_undropoff": 0.9861238615411099, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.07771069890623208, |
|
"eval_iou_undropoff": 0.9491694411343236, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.590660810470581, |
|
"eval_mean_accuracy": 0.5443452170844613, |
|
"eval_mean_iou": 0.5134400700202778, |
|
"eval_overall_accuracy": 0.9493862152099609, |
|
"eval_runtime": 2.7723, |
|
"eval_samples_per_second": 7.214, |
|
"eval_steps_per_second": 0.721, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 7.333333333333333e-05, |
|
"loss": 0.6219, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 7.298245614035087e-05, |
|
"loss": 0.6276, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 7.263157894736843e-05, |
|
"loss": 0.5895, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 7.228070175438597e-05, |
|
"loss": 0.5987, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 7.192982456140351e-05, |
|
"loss": 0.578, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 7.157894736842105e-05, |
|
"loss": 0.5622, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 7.12280701754386e-05, |
|
"loss": 0.5411, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 7.087719298245614e-05, |
|
"loss": 0.5512, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 7.052631578947368e-05, |
|
"loss": 0.5536, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 7.017543859649124e-05, |
|
"loss": 0.5118, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.20064680382577582, |
|
"eval_accuracy_undropoff": 0.9839122686389837, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.146396810999508, |
|
"eval_iou_undropoff": 0.9509353469996961, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.48036208748817444, |
|
"eval_mean_accuracy": 0.5922795362323798, |
|
"eval_mean_iou": 0.3657773859997347, |
|
"eval_overall_accuracy": 0.9513446807861328, |
|
"eval_runtime": 3.0452, |
|
"eval_samples_per_second": 6.568, |
|
"eval_steps_per_second": 0.657, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 6.982456140350878e-05, |
|
"loss": 0.5335, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 6.947368421052632e-05, |
|
"loss": 0.5013, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 6.912280701754386e-05, |
|
"loss": 0.538, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 6.87719298245614e-05, |
|
"loss": 0.5046, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 6.842105263157895e-05, |
|
"loss": 0.4906, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 6.80701754385965e-05, |
|
"loss": 0.4797, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 6.771929824561404e-05, |
|
"loss": 0.492, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 6.736842105263159e-05, |
|
"loss": 0.4671, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 6.701754385964913e-05, |
|
"loss": 0.477, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.4581, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.19300442670703458, |
|
"eval_accuracy_undropoff": 0.9900139008156406, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.15784833444007998, |
|
"eval_iou_undropoff": 0.9565325903664265, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.44047123193740845, |
|
"eval_mean_accuracy": 0.5915091637613376, |
|
"eval_mean_iou": 0.37146030826883547, |
|
"eval_overall_accuracy": 0.9568748474121094, |
|
"eval_runtime": 2.862, |
|
"eval_samples_per_second": 6.988, |
|
"eval_steps_per_second": 0.699, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 6.631578947368421e-05, |
|
"loss": 0.451, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 6.596491228070175e-05, |
|
"loss": 0.4583, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 6.561403508771931e-05, |
|
"loss": 0.4437, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 6.526315789473685e-05, |
|
"loss": 0.4363, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 6.49122807017544e-05, |
|
"loss": 0.4311, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 6.456140350877194e-05, |
|
"loss": 0.4346, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 6.421052631578948e-05, |
|
"loss": 0.419, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 6.385964912280702e-05, |
|
"loss": 0.4368, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 6.350877192982457e-05, |
|
"loss": 0.4127, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 6.315789473684212e-05, |
|
"loss": 0.4213, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.23790453909493336, |
|
"eval_accuracy_undropoff": 0.9892081112304063, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.19095414478964925, |
|
"eval_iou_undropoff": 0.9575495596432145, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4145582318305969, |
|
"eval_mean_accuracy": 0.6135563251626699, |
|
"eval_mean_iou": 0.38283456814428796, |
|
"eval_overall_accuracy": 0.9579694747924805, |
|
"eval_runtime": 2.9309, |
|
"eval_samples_per_second": 6.824, |
|
"eval_steps_per_second": 0.682, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 6.280701754385966e-05, |
|
"loss": 0.4109, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 6.24561403508772e-05, |
|
"loss": 0.3886, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 6.210526315789474e-05, |
|
"loss": 0.4031, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 6.175438596491228e-05, |
|
"loss": 0.3899, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 6.140350877192984e-05, |
|
"loss": 0.3879, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 6.105263157894738e-05, |
|
"loss": 0.3708, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 6.070175438596492e-05, |
|
"loss": 0.382, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 6.035087719298246e-05, |
|
"loss": 0.3716, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 6.000000000000001e-05, |
|
"loss": 0.3808, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 5.964912280701755e-05, |
|
"loss": 0.3571, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.2473726461616092, |
|
"eval_accuracy_undropoff": 0.9886681983766793, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.19633228600450728, |
|
"eval_iou_undropoff": 0.9574076008382422, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.375016450881958, |
|
"eval_mean_accuracy": 0.6180204222691442, |
|
"eval_mean_iou": 0.3845799622809165, |
|
"eval_overall_accuracy": 0.9578456878662109, |
|
"eval_runtime": 2.8583, |
|
"eval_samples_per_second": 6.997, |
|
"eval_steps_per_second": 0.7, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 5.929824561403509e-05, |
|
"loss": 0.367, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 5.8947368421052634e-05, |
|
"loss": 0.3767, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 5.8596491228070176e-05, |
|
"loss": 0.3713, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 5.8245614035087725e-05, |
|
"loss": 0.3326, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 5.789473684210527e-05, |
|
"loss": 0.348, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 5.7543859649122816e-05, |
|
"loss": 0.3401, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 5.719298245614036e-05, |
|
"loss": 0.3436, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 5.68421052631579e-05, |
|
"loss": 0.3487, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 5.649122807017544e-05, |
|
"loss": 0.3398, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 5.6140350877192984e-05, |
|
"loss": 0.3205, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.2522076194408129, |
|
"eval_accuracy_undropoff": 0.9881893814485306, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.19823899733902547, |
|
"eval_iou_undropoff": 0.9571383411399603, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3478124737739563, |
|
"eval_mean_accuracy": 0.6201985004446717, |
|
"eval_mean_iou": 0.5776886692394929, |
|
"eval_overall_accuracy": 0.9575878143310547, |
|
"eval_runtime": 2.8448, |
|
"eval_samples_per_second": 7.03, |
|
"eval_steps_per_second": 0.703, |
|
"step": 80 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 4.071916553895936e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|