|
{ |
|
"best_metric": 0.3562777638435364, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGBD-b0_5/checkpoint-80", |
|
"epoch": 40.0, |
|
"global_step": 80, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.9999999999999996e-06, |
|
"loss": 0.9932, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 9.999999999999999e-06, |
|
"loss": 0.9915, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.9957, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.9999999999999998e-05, |
|
"loss": 0.9918, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.9772, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 3e-05, |
|
"loss": 0.9703, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 3.5000000000000004e-05, |
|
"loss": 0.9584, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.9999999999999996e-05, |
|
"loss": 0.9508, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.9357, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9263, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.6617766462533544, |
|
"eval_accuracy_undropoff": 0.7675487100699817, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.10419987677799743, |
|
"eval_iou_undropoff": 0.7564535036864135, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0369741916656494, |
|
"eval_mean_accuracy": 0.7146626781616681, |
|
"eval_mean_iou": 0.2868844601548037, |
|
"eval_overall_accuracy": 0.7631507873535156, |
|
"eval_runtime": 2.7168, |
|
"eval_samples_per_second": 7.362, |
|
"eval_steps_per_second": 0.736, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 5.5e-05, |
|
"loss": 0.9136, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 6e-05, |
|
"loss": 0.9077, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 5.973684210526316e-05, |
|
"loss": 0.9036, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 5.9473684210526315e-05, |
|
"loss": 0.8739, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 5.921052631578947e-05, |
|
"loss": 0.8603, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 5.894736842105263e-05, |
|
"loss": 0.8455, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 5.868421052631579e-05, |
|
"loss": 0.8318, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 5.842105263157895e-05, |
|
"loss": 0.824, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 5.815789473684211e-05, |
|
"loss": 0.8011, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 5.789473684210527e-05, |
|
"loss": 0.8069, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.012454414092066332, |
|
"eval_accuracy_undropoff": 0.9999438793126609, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.01243832378125043, |
|
"eval_iou_undropoff": 0.9588635090688938, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.8622230291366577, |
|
"eval_mean_accuracy": 0.5061991467023637, |
|
"eval_mean_iou": 0.48565091642507213, |
|
"eval_overall_accuracy": 0.9588848114013672, |
|
"eval_runtime": 2.8264, |
|
"eval_samples_per_second": 7.076, |
|
"eval_steps_per_second": 0.708, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 5.7631578947368423e-05, |
|
"loss": 0.7927, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 5.736842105263158e-05, |
|
"loss": 0.7775, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 5.7105263157894736e-05, |
|
"loss": 0.7684, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 5.684210526315789e-05, |
|
"loss": 0.7542, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 5.6578947368421056e-05, |
|
"loss": 0.751, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 5.631578947368421e-05, |
|
"loss": 0.7325, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 5.605263157894737e-05, |
|
"loss": 0.7199, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 5.5789473684210526e-05, |
|
"loss": 0.7194, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 5.552631578947368e-05, |
|
"loss": 0.7123, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 5.5263157894736845e-05, |
|
"loss": 0.6851, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.016743503291359894, |
|
"eval_accuracy_undropoff": 0.9994596891272138, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.01653753794572063, |
|
"eval_iou_undropoff": 0.9585702479181101, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.649047315120697, |
|
"eval_mean_accuracy": 0.5081015962092869, |
|
"eval_mean_iou": 0.48755389293191537, |
|
"eval_overall_accuracy": 0.9585990905761719, |
|
"eval_runtime": 2.8111, |
|
"eval_samples_per_second": 7.115, |
|
"eval_steps_per_second": 0.711, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 5.5e-05, |
|
"loss": 0.6899, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 5.473684210526316e-05, |
|
"loss": 0.699, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 5.447368421052632e-05, |
|
"loss": 0.6628, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 5.421052631578948e-05, |
|
"loss": 0.6454, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 5.3947368421052635e-05, |
|
"loss": 0.6433, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 5.368421052631579e-05, |
|
"loss": 0.6197, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 5.342105263157895e-05, |
|
"loss": 0.5929, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 5.3157894736842104e-05, |
|
"loss": 0.6429, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 5.289473684210526e-05, |
|
"loss": 0.6129, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 5.263157894736842e-05, |
|
"loss": 0.5882, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.017670130048854333, |
|
"eval_accuracy_undropoff": 0.9993924239062187, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.017427892772310823, |
|
"eval_iou_undropoff": 0.9585426915068979, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.47389930486679077, |
|
"eval_mean_accuracy": 0.5085312769775365, |
|
"eval_mean_iou": 0.3253235280930696, |
|
"eval_overall_accuracy": 0.9585731506347657, |
|
"eval_runtime": 2.9413, |
|
"eval_samples_per_second": 6.8, |
|
"eval_steps_per_second": 0.68, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 5.236842105263158e-05, |
|
"loss": 0.6071, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 5.210526315789474e-05, |
|
"loss": 0.5821, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 5.18421052631579e-05, |
|
"loss": 0.6139, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 5.157894736842106e-05, |
|
"loss": 0.5813, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 5.131578947368421e-05, |
|
"loss": 0.5747, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 5.105263157894737e-05, |
|
"loss": 0.5474, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 5.0789473684210526e-05, |
|
"loss": 0.5487, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 5.052631578947368e-05, |
|
"loss": 0.5498, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 5.0263157894736846e-05, |
|
"loss": 0.5379, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.53, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.057308653868207986, |
|
"eval_accuracy_undropoff": 0.997473574022092, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.05417770704227795, |
|
"eval_iou_undropoff": 0.9582828192254225, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4153165817260742, |
|
"eval_mean_accuracy": 0.52739111394515, |
|
"eval_mean_iou": 0.33748684208923346, |
|
"eval_overall_accuracy": 0.9583822250366211, |
|
"eval_runtime": 2.8555, |
|
"eval_samples_per_second": 7.004, |
|
"eval_steps_per_second": 0.7, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 4.973684210526316e-05, |
|
"loss": 0.5316, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 4.9473684210526315e-05, |
|
"loss": 0.5182, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 4.921052631578947e-05, |
|
"loss": 0.5111, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 4.8947368421052635e-05, |
|
"loss": 0.5123, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 4.868421052631579e-05, |
|
"loss": 0.503, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 4.842105263157895e-05, |
|
"loss": 0.4843, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 4.815789473684211e-05, |
|
"loss": 0.511, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 4.789473684210527e-05, |
|
"loss": 0.5023, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 4.7631578947368424e-05, |
|
"loss": 0.504, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 4.736842105263158e-05, |
|
"loss": 0.5009, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.3230165829491502, |
|
"eval_accuracy_undropoff": 0.9745914583119812, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.20371640094081706, |
|
"eval_iou_undropoff": 0.946784743209426, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4274832606315613, |
|
"eval_mean_accuracy": 0.6488040206305657, |
|
"eval_mean_iou": 0.38350038138341436, |
|
"eval_overall_accuracy": 0.9474994659423828, |
|
"eval_runtime": 2.8386, |
|
"eval_samples_per_second": 7.046, |
|
"eval_steps_per_second": 0.705, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 4.710526315789474e-05, |
|
"loss": 0.4875, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 4.6842105263157894e-05, |
|
"loss": 0.4752, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 4.657894736842105e-05, |
|
"loss": 0.4921, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 4.631578947368421e-05, |
|
"loss": 0.4734, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 4.605263157894737e-05, |
|
"loss": 0.4495, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 4.5789473684210527e-05, |
|
"loss": 0.4543, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 4.552631578947369e-05, |
|
"loss": 0.4517, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 4.5263157894736846e-05, |
|
"loss": 0.4487, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.4513, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 4.473684210526316e-05, |
|
"loss": 0.4699, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.4157021950044726, |
|
"eval_accuracy_undropoff": 0.9812761884102821, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.29037932824486185, |
|
"eval_iou_undropoff": 0.9570171064769841, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.38186168670654297, |
|
"eval_mean_accuracy": 0.6984891917073773, |
|
"eval_mean_iou": 0.41579881157394866, |
|
"eval_overall_accuracy": 0.9577600479125976, |
|
"eval_runtime": 2.892, |
|
"eval_samples_per_second": 6.916, |
|
"eval_steps_per_second": 0.692, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 4.4473684210526316e-05, |
|
"loss": 0.4379, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 4.421052631578947e-05, |
|
"loss": 0.4571, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 4.394736842105263e-05, |
|
"loss": 0.4492, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 4.368421052631579e-05, |
|
"loss": 0.4127, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 4.342105263157895e-05, |
|
"loss": 0.4171, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 4.3157894736842105e-05, |
|
"loss": 0.4115, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 4.289473684210526e-05, |
|
"loss": 0.4204, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 4.2631578947368425e-05, |
|
"loss": 0.4289, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 4.236842105263158e-05, |
|
"loss": 0.4148, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 4.210526315789474e-05, |
|
"loss": 0.3946, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.3853758113718205, |
|
"eval_accuracy_undropoff": 0.9833942468335096, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27869836814989535, |
|
"eval_iou_undropoff": 0.9578537547951392, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3562777638435364, |
|
"eval_mean_accuracy": 0.6843850291026651, |
|
"eval_mean_iou": 0.6182760614725173, |
|
"eval_overall_accuracy": 0.9585290908813476, |
|
"eval_runtime": 2.8829, |
|
"eval_samples_per_second": 6.937, |
|
"eval_steps_per_second": 0.694, |
|
"step": 80 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 4.071916553895936e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|