|
{ |
|
"best_metric": 0.26064532995224, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGBD-b0_5/checkpoint-200", |
|
"epoch": 100.0, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.9999999999999996e-06, |
|
"loss": 0.9932, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 9.999999999999999e-06, |
|
"loss": 0.9915, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.9957, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.9999999999999998e-05, |
|
"loss": 0.9918, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.9772, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 3e-05, |
|
"loss": 0.9703, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 3.5000000000000004e-05, |
|
"loss": 0.9584, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.9999999999999996e-05, |
|
"loss": 0.9508, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.9357, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9263, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.6617766462533544, |
|
"eval_accuracy_undropoff": 0.7675487100699817, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.10419987677799743, |
|
"eval_iou_undropoff": 0.7564535036864135, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0369741916656494, |
|
"eval_mean_accuracy": 0.7146626781616681, |
|
"eval_mean_iou": 0.2868844601548037, |
|
"eval_overall_accuracy": 0.7631507873535156, |
|
"eval_runtime": 2.7168, |
|
"eval_samples_per_second": 7.362, |
|
"eval_steps_per_second": 0.736, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 5.5e-05, |
|
"loss": 0.9136, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 6e-05, |
|
"loss": 0.9077, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 5.973684210526316e-05, |
|
"loss": 0.9036, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 5.9473684210526315e-05, |
|
"loss": 0.8739, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 5.921052631578947e-05, |
|
"loss": 0.8603, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 5.894736842105263e-05, |
|
"loss": 0.8455, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 5.868421052631579e-05, |
|
"loss": 0.8318, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 5.842105263157895e-05, |
|
"loss": 0.824, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 5.815789473684211e-05, |
|
"loss": 0.8011, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 5.789473684210527e-05, |
|
"loss": 0.8069, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.012454414092066332, |
|
"eval_accuracy_undropoff": 0.9999438793126609, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.01243832378125043, |
|
"eval_iou_undropoff": 0.9588635090688938, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.8622230291366577, |
|
"eval_mean_accuracy": 0.5061991467023637, |
|
"eval_mean_iou": 0.48565091642507213, |
|
"eval_overall_accuracy": 0.9588848114013672, |
|
"eval_runtime": 2.8264, |
|
"eval_samples_per_second": 7.076, |
|
"eval_steps_per_second": 0.708, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 5.7631578947368423e-05, |
|
"loss": 0.7927, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 5.736842105263158e-05, |
|
"loss": 0.7775, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 5.7105263157894736e-05, |
|
"loss": 0.7684, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 5.684210526315789e-05, |
|
"loss": 0.7542, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 5.6578947368421056e-05, |
|
"loss": 0.751, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 5.631578947368421e-05, |
|
"loss": 0.7325, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 5.605263157894737e-05, |
|
"loss": 0.7199, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 5.5789473684210526e-05, |
|
"loss": 0.7194, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 5.552631578947368e-05, |
|
"loss": 0.7123, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 5.5263157894736845e-05, |
|
"loss": 0.6851, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.016743503291359894, |
|
"eval_accuracy_undropoff": 0.9994596891272138, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.01653753794572063, |
|
"eval_iou_undropoff": 0.9585702479181101, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.649047315120697, |
|
"eval_mean_accuracy": 0.5081015962092869, |
|
"eval_mean_iou": 0.48755389293191537, |
|
"eval_overall_accuracy": 0.9585990905761719, |
|
"eval_runtime": 2.8111, |
|
"eval_samples_per_second": 7.115, |
|
"eval_steps_per_second": 0.711, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 5.5e-05, |
|
"loss": 0.6899, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 5.473684210526316e-05, |
|
"loss": 0.699, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 5.447368421052632e-05, |
|
"loss": 0.6628, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 5.421052631578948e-05, |
|
"loss": 0.6454, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 5.3947368421052635e-05, |
|
"loss": 0.6433, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 5.368421052631579e-05, |
|
"loss": 0.6197, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 5.342105263157895e-05, |
|
"loss": 0.5929, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 5.3157894736842104e-05, |
|
"loss": 0.6429, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 5.289473684210526e-05, |
|
"loss": 0.6129, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 5.263157894736842e-05, |
|
"loss": 0.5882, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.017670130048854333, |
|
"eval_accuracy_undropoff": 0.9993924239062187, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.017427892772310823, |
|
"eval_iou_undropoff": 0.9585426915068979, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.47389930486679077, |
|
"eval_mean_accuracy": 0.5085312769775365, |
|
"eval_mean_iou": 0.3253235280930696, |
|
"eval_overall_accuracy": 0.9585731506347657, |
|
"eval_runtime": 2.9413, |
|
"eval_samples_per_second": 6.8, |
|
"eval_steps_per_second": 0.68, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 5.236842105263158e-05, |
|
"loss": 0.6071, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 5.210526315789474e-05, |
|
"loss": 0.5821, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 5.18421052631579e-05, |
|
"loss": 0.6139, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 5.157894736842106e-05, |
|
"loss": 0.5813, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 5.131578947368421e-05, |
|
"loss": 0.5747, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 5.105263157894737e-05, |
|
"loss": 0.5474, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 5.0789473684210526e-05, |
|
"loss": 0.5487, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 5.052631578947368e-05, |
|
"loss": 0.5498, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 5.0263157894736846e-05, |
|
"loss": 0.5379, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.53, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.057308653868207986, |
|
"eval_accuracy_undropoff": 0.997473574022092, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.05417770704227795, |
|
"eval_iou_undropoff": 0.9582828192254225, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4153165817260742, |
|
"eval_mean_accuracy": 0.52739111394515, |
|
"eval_mean_iou": 0.33748684208923346, |
|
"eval_overall_accuracy": 0.9583822250366211, |
|
"eval_runtime": 2.8555, |
|
"eval_samples_per_second": 7.004, |
|
"eval_steps_per_second": 0.7, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 4.973684210526316e-05, |
|
"loss": 0.5316, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 4.9473684210526315e-05, |
|
"loss": 0.5182, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 4.921052631578947e-05, |
|
"loss": 0.5111, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 4.8947368421052635e-05, |
|
"loss": 0.5123, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 4.868421052631579e-05, |
|
"loss": 0.503, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 4.842105263157895e-05, |
|
"loss": 0.4843, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 4.815789473684211e-05, |
|
"loss": 0.511, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 4.789473684210527e-05, |
|
"loss": 0.5023, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 4.7631578947368424e-05, |
|
"loss": 0.504, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 4.736842105263158e-05, |
|
"loss": 0.5009, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.3230165829491502, |
|
"eval_accuracy_undropoff": 0.9745914583119812, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.20371640094081706, |
|
"eval_iou_undropoff": 0.946784743209426, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4274832606315613, |
|
"eval_mean_accuracy": 0.6488040206305657, |
|
"eval_mean_iou": 0.38350038138341436, |
|
"eval_overall_accuracy": 0.9474994659423828, |
|
"eval_runtime": 2.8386, |
|
"eval_samples_per_second": 7.046, |
|
"eval_steps_per_second": 0.705, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 4.710526315789474e-05, |
|
"loss": 0.4875, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 4.6842105263157894e-05, |
|
"loss": 0.4752, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 4.657894736842105e-05, |
|
"loss": 0.4921, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 4.631578947368421e-05, |
|
"loss": 0.4734, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 4.605263157894737e-05, |
|
"loss": 0.4495, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 4.5789473684210527e-05, |
|
"loss": 0.4543, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 4.552631578947369e-05, |
|
"loss": 0.4517, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 4.5263157894736846e-05, |
|
"loss": 0.4487, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.4513, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 4.473684210526316e-05, |
|
"loss": 0.4699, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.4157021950044726, |
|
"eval_accuracy_undropoff": 0.9812761884102821, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.29037932824486185, |
|
"eval_iou_undropoff": 0.9570171064769841, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.38186168670654297, |
|
"eval_mean_accuracy": 0.6984891917073773, |
|
"eval_mean_iou": 0.41579881157394866, |
|
"eval_overall_accuracy": 0.9577600479125976, |
|
"eval_runtime": 2.892, |
|
"eval_samples_per_second": 6.916, |
|
"eval_steps_per_second": 0.692, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 4.4473684210526316e-05, |
|
"loss": 0.4379, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 4.421052631578947e-05, |
|
"loss": 0.4571, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 4.394736842105263e-05, |
|
"loss": 0.4492, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 4.368421052631579e-05, |
|
"loss": 0.4127, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 4.342105263157895e-05, |
|
"loss": 0.4171, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 4.3157894736842105e-05, |
|
"loss": 0.4115, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 4.289473684210526e-05, |
|
"loss": 0.4204, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 4.2631578947368425e-05, |
|
"loss": 0.4289, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 4.236842105263158e-05, |
|
"loss": 0.4148, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 4.210526315789474e-05, |
|
"loss": 0.3946, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.3853758113718205, |
|
"eval_accuracy_undropoff": 0.9833942468335096, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27869836814989535, |
|
"eval_iou_undropoff": 0.9578537547951392, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3562777638435364, |
|
"eval_mean_accuracy": 0.6843850291026651, |
|
"eval_mean_iou": 0.6182760614725173, |
|
"eval_overall_accuracy": 0.9585290908813476, |
|
"eval_runtime": 2.8829, |
|
"eval_samples_per_second": 6.937, |
|
"eval_steps_per_second": 0.694, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 4.1842105263157894e-05, |
|
"loss": 0.4187, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 4.157894736842106e-05, |
|
"loss": 0.3983, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 4.1315789473684214e-05, |
|
"loss": 0.401, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 4.105263157894737e-05, |
|
"loss": 0.3948, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 4.078947368421053e-05, |
|
"loss": 0.3945, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 4.0526315789473684e-05, |
|
"loss": 0.3759, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 4.026315789473684e-05, |
|
"loss": 0.4164, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 3.9999999999999996e-05, |
|
"loss": 0.3946, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 3.973684210526315e-05, |
|
"loss": 0.4144, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 3.9473684210526316e-05, |
|
"loss": 0.3788, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_dropoff": 0.4196151287873575, |
|
"eval_accuracy_undropoff": 0.9826666679933969, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.29982333255761356, |
|
"eval_iou_undropoff": 0.9585319037171599, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3259146809577942, |
|
"eval_mean_accuracy": 0.7011408983903772, |
|
"eval_mean_iou": 0.6291776181373867, |
|
"eval_overall_accuracy": 0.9592554092407226, |
|
"eval_runtime": 2.8842, |
|
"eval_samples_per_second": 6.934, |
|
"eval_steps_per_second": 0.693, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 3.921052631578948e-05, |
|
"loss": 0.3577, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 3.8947368421052636e-05, |
|
"loss": 0.3748, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 3.868421052631579e-05, |
|
"loss": 0.3641, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 3.842105263157895e-05, |
|
"loss": 0.3444, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 3.8157894736842105e-05, |
|
"loss": 0.3776, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 3.789473684210526e-05, |
|
"loss": 0.3474, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 3.763157894736842e-05, |
|
"loss": 0.3546, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 3.736842105263158e-05, |
|
"loss": 0.3562, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 3.710526315789474e-05, |
|
"loss": 0.3387, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 0.3412, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.4065873070483268, |
|
"eval_accuracy_undropoff": 0.9800554639558915, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2785358280905303, |
|
"eval_iou_undropoff": 0.9554580825207023, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3391803801059723, |
|
"eval_mean_accuracy": 0.6933213855021092, |
|
"eval_mean_iou": 0.6169969553056163, |
|
"eval_overall_accuracy": 0.9562110900878906, |
|
"eval_runtime": 2.9168, |
|
"eval_samples_per_second": 6.857, |
|
"eval_steps_per_second": 0.686, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 3.657894736842105e-05, |
|
"loss": 0.3485, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 3.6315789473684214e-05, |
|
"loss": 0.3646, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 3.605263157894737e-05, |
|
"loss": 0.3234, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 3.578947368421053e-05, |
|
"loss": 0.3435, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 3.5526315789473684e-05, |
|
"loss": 0.3357, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 3.526315789473685e-05, |
|
"loss": 0.3317, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 3.5000000000000004e-05, |
|
"loss": 0.3166, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 3.473684210526316e-05, |
|
"loss": 0.3606, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 3.447368421052632e-05, |
|
"loss": 0.3257, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 3.421052631578947e-05, |
|
"loss": 0.3326, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_dropoff": 0.3976513222780339, |
|
"eval_accuracy_undropoff": 0.9850716981582663, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.29584857751324195, |
|
"eval_iou_undropoff": 0.9599856017830206, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.32138317823410034, |
|
"eval_mean_accuracy": 0.6913615102181501, |
|
"eval_mean_iou": 0.6279170896481313, |
|
"eval_overall_accuracy": 0.9606472015380859, |
|
"eval_runtime": 2.8472, |
|
"eval_samples_per_second": 7.024, |
|
"eval_steps_per_second": 0.702, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 3.394736842105263e-05, |
|
"loss": 0.3169, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 3.3684210526315786e-05, |
|
"loss": 0.297, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 3.342105263157894e-05, |
|
"loss": 0.3307, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 3.3157894736842106e-05, |
|
"loss": 0.2873, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 3.289473684210527e-05, |
|
"loss": 0.3111, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 3.2631578947368426e-05, |
|
"loss": 0.3102, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 3.236842105263158e-05, |
|
"loss": 0.3094, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 3.210526315789474e-05, |
|
"loss": 0.3189, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 3.1842105263157895e-05, |
|
"loss": 0.3024, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 3.157894736842105e-05, |
|
"loss": 0.2954, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.3830684190004358, |
|
"eval_accuracy_undropoff": 0.9863793897770795, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2915370570142824, |
|
"eval_iou_undropoff": 0.9606677012885093, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3119274079799652, |
|
"eval_mean_accuracy": 0.6847239043887576, |
|
"eval_mean_iou": 0.6261023791513959, |
|
"eval_overall_accuracy": 0.9612941741943359, |
|
"eval_runtime": 2.8355, |
|
"eval_samples_per_second": 7.053, |
|
"eval_steps_per_second": 0.705, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 3.131578947368421e-05, |
|
"loss": 0.3056, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 3.105263157894737e-05, |
|
"loss": 0.3082, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 3.078947368421053e-05, |
|
"loss": 0.3086, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 3.0526315789473684e-05, |
|
"loss": 0.3119, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 3.0263157894736844e-05, |
|
"loss": 0.2938, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 3e-05, |
|
"loss": 0.2777, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 2.9736842105263157e-05, |
|
"loss": 0.3168, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 2.9473684210526314e-05, |
|
"loss": 0.2967, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 2.9210526315789474e-05, |
|
"loss": 0.2739, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 2.8947368421052634e-05, |
|
"loss": 0.3006, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_dropoff": 0.2512764054221427, |
|
"eval_accuracy_undropoff": 0.9933664153507991, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.21795024828113063, |
|
"eval_iou_undropoff": 0.9621150543850036, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.28525277972221375, |
|
"eval_mean_accuracy": 0.6223214103864709, |
|
"eval_mean_iou": 0.5900326513330671, |
|
"eval_overall_accuracy": 0.9625108718872071, |
|
"eval_runtime": 2.8225, |
|
"eval_samples_per_second": 7.086, |
|
"eval_steps_per_second": 0.709, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 2.868421052631579e-05, |
|
"loss": 0.2879, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 2.8421052631578946e-05, |
|
"loss": 0.2735, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 2.8157894736842106e-05, |
|
"loss": 0.2937, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 2.7894736842105263e-05, |
|
"loss": 0.2574, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 2.7631578947368423e-05, |
|
"loss": 0.2815, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 2.736842105263158e-05, |
|
"loss": 0.2787, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 2.710526315789474e-05, |
|
"loss": 0.2641, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 2.6842105263157896e-05, |
|
"loss": 0.276, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 2.6578947368421052e-05, |
|
"loss": 0.2836, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 2.631578947368421e-05, |
|
"loss": 0.2715, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.3938209591963118, |
|
"eval_accuracy_undropoff": 0.9866918347385064, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.30137221931638275, |
|
"eval_iou_undropoff": 0.9614087878893061, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3021281659603119, |
|
"eval_mean_accuracy": 0.6902563969674091, |
|
"eval_mean_iou": 0.6313905036028444, |
|
"eval_overall_accuracy": 0.9620407104492188, |
|
"eval_runtime": 2.8685, |
|
"eval_samples_per_second": 6.972, |
|
"eval_steps_per_second": 0.697, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 2.605263157894737e-05, |
|
"loss": 0.2697, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 2.578947368421053e-05, |
|
"loss": 0.2747, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 2.5526315789473685e-05, |
|
"loss": 0.2667, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 2.526315789473684e-05, |
|
"loss": 0.273, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.2766, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 2.4736842105263158e-05, |
|
"loss": 0.2571, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 2.4473684210526318e-05, |
|
"loss": 0.2527, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 2.4210526315789474e-05, |
|
"loss": 0.2588, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 2.3947368421052634e-05, |
|
"loss": 0.2494, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 2.368421052631579e-05, |
|
"loss": 0.276, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_dropoff": 0.36899470171334203, |
|
"eval_accuracy_undropoff": 0.9876665834143468, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28731395261618253, |
|
"eval_iou_undropoff": 0.961349679347425, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.29498302936553955, |
|
"eval_mean_accuracy": 0.6783306425638445, |
|
"eval_mean_iou": 0.6243318159818038, |
|
"eval_overall_accuracy": 0.9619426727294922, |
|
"eval_runtime": 2.9069, |
|
"eval_samples_per_second": 6.88, |
|
"eval_steps_per_second": 0.688, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 2.3421052631578947e-05, |
|
"loss": 0.2756, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 2.3157894736842103e-05, |
|
"loss": 0.2639, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 2.2894736842105263e-05, |
|
"loss": 0.2542, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 2.2631578947368423e-05, |
|
"loss": 0.2554, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 2.236842105263158e-05, |
|
"loss": 0.2735, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 2.2105263157894736e-05, |
|
"loss": 0.2459, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 2.1842105263157896e-05, |
|
"loss": 0.2606, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 2.1578947368421053e-05, |
|
"loss": 0.2469, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 2.1315789473684212e-05, |
|
"loss": 0.2516, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 0.2622, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.34258583912475055, |
|
"eval_accuracy_undropoff": 0.9876144429175991, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.26650156478048465, |
|
"eval_iou_undropoff": 0.9602281104555205, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.2842712998390198, |
|
"eval_mean_accuracy": 0.6651001410211749, |
|
"eval_mean_iou": 0.6133648376180025, |
|
"eval_overall_accuracy": 0.9607946395874023, |
|
"eval_runtime": 2.7502, |
|
"eval_samples_per_second": 7.272, |
|
"eval_steps_per_second": 0.727, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 2.078947368421053e-05, |
|
"loss": 0.2548, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 2.0526315789473685e-05, |
|
"loss": 0.2528, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 2.0263157894736842e-05, |
|
"loss": 0.2533, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 1.9999999999999998e-05, |
|
"loss": 0.2781, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 1.9736842105263158e-05, |
|
"loss": 0.2403, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 1.9473684210526318e-05, |
|
"loss": 0.2467, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 1.9210526315789474e-05, |
|
"loss": 0.2601, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 1.894736842105263e-05, |
|
"loss": 0.2633, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 1.868421052631579e-05, |
|
"loss": 0.2563, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.2395, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_dropoff": 0.30940159177962795, |
|
"eval_accuracy_undropoff": 0.9895426462496156, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.24930694679569163, |
|
"eval_iou_undropoff": 0.9607580584577288, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.27518779039382935, |
|
"eval_mean_accuracy": 0.6494721190146218, |
|
"eval_mean_iou": 0.6050325026267102, |
|
"eval_overall_accuracy": 0.9612628936767578, |
|
"eval_runtime": 2.8287, |
|
"eval_samples_per_second": 7.07, |
|
"eval_steps_per_second": 0.707, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 1.8157894736842107e-05, |
|
"loss": 0.2407, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 1.7894736842105264e-05, |
|
"loss": 0.2391, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 1.7631578947368424e-05, |
|
"loss": 0.2464, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 1.736842105263158e-05, |
|
"loss": 0.2337, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 1.7105263157894737e-05, |
|
"loss": 0.2422, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 1.6842105263157893e-05, |
|
"loss": 0.2559, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 1.6578947368421053e-05, |
|
"loss": 0.2484, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.2366, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 1.605263157894737e-05, |
|
"loss": 0.2418, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 1.5789473684210526e-05, |
|
"loss": 0.2597, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_dropoff": 0.3878712814514094, |
|
"eval_accuracy_undropoff": 0.9868936304014918, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.29787953623882785, |
|
"eval_iou_undropoff": 0.9613636284329451, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.28125619888305664, |
|
"eval_mean_accuracy": 0.6873824559264506, |
|
"eval_mean_iou": 0.6296215823358865, |
|
"eval_overall_accuracy": 0.9619867324829101, |
|
"eval_runtime": 2.9574, |
|
"eval_samples_per_second": 6.763, |
|
"eval_steps_per_second": 0.676, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 1.5526315789473686e-05, |
|
"loss": 0.2314, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 1.5263157894736842e-05, |
|
"loss": 0.2463, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.2307, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 1.4736842105263157e-05, |
|
"loss": 0.2388, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 1.4473684210526317e-05, |
|
"loss": 0.2345, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 1.4210526315789473e-05, |
|
"loss": 0.2501, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 1.3947368421052631e-05, |
|
"loss": 0.2277, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 1.368421052631579e-05, |
|
"loss": 0.2326, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 1.3421052631578948e-05, |
|
"loss": 0.2304, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 1.3157894736842104e-05, |
|
"loss": 0.2294, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_dropoff": 0.32591573201220214, |
|
"eval_accuracy_undropoff": 0.9890421372827438, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2601947578710672, |
|
"eval_iou_undropoff": 0.9609405332578476, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.27471286058425903, |
|
"eval_mean_accuracy": 0.657478934647473, |
|
"eval_mean_iou": 0.6105676455644574, |
|
"eval_overall_accuracy": 0.9614698410034179, |
|
"eval_runtime": 2.8706, |
|
"eval_samples_per_second": 6.967, |
|
"eval_steps_per_second": 0.697, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 1.2894736842105264e-05, |
|
"loss": 0.2365, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 1.263157894736842e-05, |
|
"loss": 0.2242, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 1.2368421052631579e-05, |
|
"loss": 0.2365, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.2329, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 1.1842105263157895e-05, |
|
"loss": 0.2342, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 1.1578947368421052e-05, |
|
"loss": 0.227, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 1.1315789473684212e-05, |
|
"loss": 0.2209, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 1.1052631578947368e-05, |
|
"loss": 0.2359, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 1.0789473684210526e-05, |
|
"loss": 0.2215, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.2303, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_dropoff": 0.3022500516066882, |
|
"eval_accuracy_undropoff": 0.9902427617746475, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.24675310084486787, |
|
"eval_iou_undropoff": 0.9611482809222578, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.26064532995224, |
|
"eval_mean_accuracy": 0.6462464066906679, |
|
"eval_mean_iou": 0.6039506908835628, |
|
"eval_overall_accuracy": 0.9616365432739258, |
|
"eval_runtime": 2.7891, |
|
"eval_samples_per_second": 7.171, |
|
"eval_steps_per_second": 0.717, |
|
"step": 200 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 1.017979138473984e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|