|
{ |
|
"best_metric": 0.500433087348938, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGBD-b0_1/checkpoint-200", |
|
"epoch": 100.0, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 1.0746, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.0748, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 5e-06, |
|
"loss": 1.0693, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.0764, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 1.0684, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 1e-05, |
|
"loss": 1.0649, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 1.1666666666666668e-05, |
|
"loss": 1.0601, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.0556, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 1.0471, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.0495, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.20262391339250901, |
|
"eval_accuracy_undropoff": 0.5118628585529819, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.047444479293322045, |
|
"eval_iou_undropoff": 0.5081086334840771, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0889850854873657, |
|
"eval_mean_accuracy": 0.3572433859727454, |
|
"eval_mean_iou": 0.18518437092579973, |
|
"eval_overall_accuracy": 0.4990049362182617, |
|
"eval_runtime": 2.7639, |
|
"eval_samples_per_second": 7.236, |
|
"eval_steps_per_second": 0.724, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 1.8333333333333333e-05, |
|
"loss": 1.0418, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 2e-05, |
|
"loss": 1.0375, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 1.9912280701754387e-05, |
|
"loss": 1.034, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 1.9824561403508773e-05, |
|
"loss": 1.0282, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 1.9736842105263158e-05, |
|
"loss": 1.0212, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 1.9649122807017544e-05, |
|
"loss": 1.0081, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 1.9561403508771933e-05, |
|
"loss": 1.0061, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 1.9473684210526318e-05, |
|
"loss": 1.0027, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 1.9385964912280704e-05, |
|
"loss": 0.9979, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 1.929824561403509e-05, |
|
"loss": 0.9941, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.8224913415445309, |
|
"eval_accuracy_undropoff": 0.8489730610750296, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.19307967569325465, |
|
"eval_iou_undropoff": 0.8424968366811092, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0478662252426147, |
|
"eval_mean_accuracy": 0.8357322013097802, |
|
"eval_mean_iou": 0.34519217079145464, |
|
"eval_overall_accuracy": 0.847871971130371, |
|
"eval_runtime": 2.9295, |
|
"eval_samples_per_second": 6.827, |
|
"eval_steps_per_second": 0.683, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 1.9210526315789474e-05, |
|
"loss": 0.9861, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 1.912280701754386e-05, |
|
"loss": 0.9817, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 1.9035087719298245e-05, |
|
"loss": 0.9781, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 1.894736842105263e-05, |
|
"loss": 0.9729, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 1.885964912280702e-05, |
|
"loss": 0.9669, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 1.8771929824561405e-05, |
|
"loss": 0.9634, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 1.868421052631579e-05, |
|
"loss": 0.9642, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 1.8596491228070176e-05, |
|
"loss": 0.9586, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 1.8508771929824562e-05, |
|
"loss": 0.9559, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.9448, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.7351131906695108, |
|
"eval_accuracy_undropoff": 0.9082392930385471, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.23901293122734799, |
|
"eval_iou_undropoff": 0.8979626128799195, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.9838544130325317, |
|
"eval_mean_accuracy": 0.8216762418540289, |
|
"eval_mean_iou": 0.37899184803575586, |
|
"eval_overall_accuracy": 0.9010408401489258, |
|
"eval_runtime": 3.1655, |
|
"eval_samples_per_second": 6.318, |
|
"eval_steps_per_second": 0.632, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 1.8333333333333333e-05, |
|
"loss": 0.9406, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 1.824561403508772e-05, |
|
"loss": 0.9378, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 1.8157894736842107e-05, |
|
"loss": 0.935, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 1.8070175438596493e-05, |
|
"loss": 0.9249, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 1.7982456140350878e-05, |
|
"loss": 0.9162, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 1.7894736842105264e-05, |
|
"loss": 0.9153, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 1.780701754385965e-05, |
|
"loss": 0.8983, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 1.7719298245614035e-05, |
|
"loss": 0.918, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 1.763157894736842e-05, |
|
"loss": 0.9107, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 1.754385964912281e-05, |
|
"loss": 0.8912, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.4863276680657813, |
|
"eval_accuracy_undropoff": 0.9437063733796893, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.23034404840794775, |
|
"eval_iou_undropoff": 0.9232710241069885, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.9041398763656616, |
|
"eval_mean_accuracy": 0.7150170207227353, |
|
"eval_mean_iou": 0.3845383575049787, |
|
"eval_overall_accuracy": 0.9246889114379883, |
|
"eval_runtime": 2.9597, |
|
"eval_samples_per_second": 6.757, |
|
"eval_steps_per_second": 0.676, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 1.7456140350877195e-05, |
|
"loss": 0.8927, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 1.736842105263158e-05, |
|
"loss": 0.8849, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 1.7280701754385966e-05, |
|
"loss": 0.8909, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 1.719298245614035e-05, |
|
"loss": 0.8779, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 1.7105263157894737e-05, |
|
"loss": 0.8723, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 1.7017543859649125e-05, |
|
"loss": 0.8638, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 1.692982456140351e-05, |
|
"loss": 0.867, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 1.6842105263157896e-05, |
|
"loss": 0.8582, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 1.6754385964912282e-05, |
|
"loss": 0.8498, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.8458, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.38082983554668687, |
|
"eval_accuracy_undropoff": 0.9565176118458433, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.21875946972192284, |
|
"eval_iou_undropoff": 0.9316126713826287, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7996953129768372, |
|
"eval_mean_accuracy": 0.6686737236962651, |
|
"eval_mean_iou": 0.38345738036818383, |
|
"eval_overall_accuracy": 0.9325809478759766, |
|
"eval_runtime": 2.9373, |
|
"eval_samples_per_second": 6.809, |
|
"eval_steps_per_second": 0.681, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 1.6578947368421053e-05, |
|
"loss": 0.8489, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 1.649122807017544e-05, |
|
"loss": 0.8322, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 1.6403508771929827e-05, |
|
"loss": 0.8306, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.8211, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 1.62280701754386e-05, |
|
"loss": 0.8128, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 1.6140350877192984e-05, |
|
"loss": 0.8159, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 1.605263157894737e-05, |
|
"loss": 0.8077, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 1.5964912280701755e-05, |
|
"loss": 0.8115, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 1.5877192982456144e-05, |
|
"loss": 0.7824, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 1.578947368421053e-05, |
|
"loss": 0.8299, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.306841900043579, |
|
"eval_accuracy_undropoff": 0.9597373870247777, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.1934366306340696, |
|
"eval_iou_undropoff": 0.9317996330434056, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7386532425880432, |
|
"eval_mean_accuracy": 0.6332896435341784, |
|
"eval_mean_iou": 0.37507875455915846, |
|
"eval_overall_accuracy": 0.9325904846191406, |
|
"eval_runtime": 3.129, |
|
"eval_samples_per_second": 6.392, |
|
"eval_steps_per_second": 0.639, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 1.5701754385964915e-05, |
|
"loss": 0.7911, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 1.56140350877193e-05, |
|
"loss": 0.7694, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 1.5526315789473686e-05, |
|
"loss": 0.7954, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 1.543859649122807e-05, |
|
"loss": 0.7716, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 1.535087719298246e-05, |
|
"loss": 0.7495, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 1.5263157894736846e-05, |
|
"loss": 0.7472, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 1.517543859649123e-05, |
|
"loss": 0.7637, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 1.5087719298245615e-05, |
|
"loss": 0.7548, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 0.7478, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 1.4912280701754388e-05, |
|
"loss": 0.7518, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.2960801853253515, |
|
"eval_accuracy_undropoff": 0.9683274343591943, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.1975453738560891, |
|
"eval_iou_undropoff": 0.9397214711056545, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6810304522514343, |
|
"eval_mean_accuracy": 0.6322038098422729, |
|
"eval_mean_iou": 0.3790889483205812, |
|
"eval_overall_accuracy": 0.9403759002685547, |
|
"eval_runtime": 2.9719, |
|
"eval_samples_per_second": 6.73, |
|
"eval_steps_per_second": 0.673, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 1.4824561403508773e-05, |
|
"loss": 0.7382, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 1.4736842105263159e-05, |
|
"loss": 0.7568, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 1.4649122807017544e-05, |
|
"loss": 0.7411, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 1.4561403508771931e-05, |
|
"loss": 0.6921, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 1.4473684210526317e-05, |
|
"loss": 0.7174, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 1.4385964912280704e-05, |
|
"loss": 0.7151, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 1.429824561403509e-05, |
|
"loss": 0.73, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.7098, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 1.412280701754386e-05, |
|
"loss": 0.6912, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 1.4035087719298246e-05, |
|
"loss": 0.6943, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.2411156219179339, |
|
"eval_accuracy_undropoff": 0.9726190748643999, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.1691189482591643, |
|
"eval_iou_undropoff": 0.941739253422002, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6321617364883423, |
|
"eval_mean_accuracy": 0.6068673483911668, |
|
"eval_mean_iou": 0.3702860672270554, |
|
"eval_overall_accuracy": 0.9422037124633789, |
|
"eval_runtime": 2.9083, |
|
"eval_samples_per_second": 6.877, |
|
"eval_steps_per_second": 0.688, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 1.3947368421052631e-05, |
|
"loss": 0.7307, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 1.385964912280702e-05, |
|
"loss": 0.6983, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 1.3771929824561406e-05, |
|
"loss": 0.6995, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.6687, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 1.3596491228070177e-05, |
|
"loss": 0.672, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 1.3508771929824562e-05, |
|
"loss": 0.6712, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 1.3421052631578948e-05, |
|
"loss": 0.7337, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.6606, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 1.3245614035087719e-05, |
|
"loss": 0.7239, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 1.3157894736842108e-05, |
|
"loss": 0.6617, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_dropoff": 0.27343287690084633, |
|
"eval_accuracy_undropoff": 0.974586483073742, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.1892465949138013, |
|
"eval_iou_undropoff": 0.9448538377714254, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6071122884750366, |
|
"eval_mean_accuracy": 0.6240096799872942, |
|
"eval_mean_iou": 0.3780334775617422, |
|
"eval_overall_accuracy": 0.9454330444335938, |
|
"eval_runtime": 3.1015, |
|
"eval_samples_per_second": 6.448, |
|
"eval_steps_per_second": 0.645, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 1.3070175438596493e-05, |
|
"loss": 0.6397, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 1.2982456140350879e-05, |
|
"loss": 0.68, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 1.2894736842105264e-05, |
|
"loss": 0.6642, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 1.280701754385965e-05, |
|
"loss": 0.6341, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 1.2719298245614035e-05, |
|
"loss": 0.6783, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 1.263157894736842e-05, |
|
"loss": 0.6371, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 1.2543859649122808e-05, |
|
"loss": 0.6431, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 1.2456140350877195e-05, |
|
"loss": 0.6393, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 1.236842105263158e-05, |
|
"loss": 0.6309, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 1.2280701754385966e-05, |
|
"loss": 0.634, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.24070735567329526, |
|
"eval_accuracy_undropoff": 0.9804538810340934, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.18019759818954179, |
|
"eval_iou_undropoff": 0.9494230040635037, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5931987166404724, |
|
"eval_mean_accuracy": 0.6105806183536944, |
|
"eval_mean_iou": 0.3765402007510152, |
|
"eval_overall_accuracy": 0.9496957778930664, |
|
"eval_runtime": 2.967, |
|
"eval_samples_per_second": 6.741, |
|
"eval_steps_per_second": 0.674, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 1.2192982456140352e-05, |
|
"loss": 0.6392, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.6654, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 1.2017543859649124e-05, |
|
"loss": 0.6101, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 1.192982456140351e-05, |
|
"loss": 0.6192, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 1.1842105263157895e-05, |
|
"loss": 0.6381, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 1.1754385964912282e-05, |
|
"loss": 0.6018, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 1.1666666666666668e-05, |
|
"loss": 0.6106, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.6486, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 1.1491228070175439e-05, |
|
"loss": 0.6192, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 1.1403508771929826e-05, |
|
"loss": 0.6157, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_dropoff": 0.32805798298126104, |
|
"eval_accuracy_undropoff": 0.9795322679026485, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.24251662501229285, |
|
"eval_iou_undropoff": 0.9520137977134819, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5828702449798584, |
|
"eval_mean_accuracy": 0.6537951254419547, |
|
"eval_mean_iou": 0.39817680757525825, |
|
"eval_overall_accuracy": 0.9524444580078125, |
|
"eval_runtime": 2.9135, |
|
"eval_samples_per_second": 6.865, |
|
"eval_steps_per_second": 0.686, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 1.1315789473684212e-05, |
|
"loss": 0.6085, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 1.1228070175438597e-05, |
|
"loss": 0.5901, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 1.1140350877192983e-05, |
|
"loss": 0.6228, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 1.105263157894737e-05, |
|
"loss": 0.5944, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 1.0964912280701755e-05, |
|
"loss": 0.6076, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 1.0877192982456142e-05, |
|
"loss": 0.5844, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 1.0789473684210528e-05, |
|
"loss": 0.5846, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 1.0701754385964913e-05, |
|
"loss": 0.6332, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 1.0614035087719299e-05, |
|
"loss": 0.5968, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.5814, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.3607972659923393, |
|
"eval_accuracy_undropoff": 0.9790086738303464, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.25863605423162545, |
|
"eval_iou_undropoff": 0.9527589478644293, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5707733035087585, |
|
"eval_mean_accuracy": 0.6699029699113428, |
|
"eval_mean_iou": 0.4037983340320182, |
|
"eval_overall_accuracy": 0.9533039093017578, |
|
"eval_runtime": 2.8254, |
|
"eval_samples_per_second": 7.079, |
|
"eval_steps_per_second": 0.708, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 1.043859649122807e-05, |
|
"loss": 0.6022, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 1.0350877192982459e-05, |
|
"loss": 0.602, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 1.0263157894736844e-05, |
|
"loss": 0.6057, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 1.017543859649123e-05, |
|
"loss": 0.6125, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 1.0087719298245615e-05, |
|
"loss": 0.5939, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 1e-05, |
|
"loss": 0.5734, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 9.912280701754386e-06, |
|
"loss": 0.6002, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 9.824561403508772e-06, |
|
"loss": 0.5922, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 9.736842105263159e-06, |
|
"loss": 0.5674, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 9.649122807017545e-06, |
|
"loss": 0.5988, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_dropoff": 0.30607582742723455, |
|
"eval_accuracy_undropoff": 0.985094584254167, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.23568786890804982, |
|
"eval_iou_undropoff": 0.9564286666576498, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.557483434677124, |
|
"eval_mean_accuracy": 0.6455852058407008, |
|
"eval_mean_iou": 0.39737217852189993, |
|
"eval_overall_accuracy": 0.9568614959716797, |
|
"eval_runtime": 2.9471, |
|
"eval_samples_per_second": 6.786, |
|
"eval_steps_per_second": 0.679, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 9.56140350877193e-06, |
|
"loss": 0.5816, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 9.473684210526315e-06, |
|
"loss": 0.5569, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 9.385964912280703e-06, |
|
"loss": 0.5778, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 9.298245614035088e-06, |
|
"loss": 0.5604, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 9.210526315789474e-06, |
|
"loss": 0.5596, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 9.12280701754386e-06, |
|
"loss": 0.5729, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 9.035087719298246e-06, |
|
"loss": 0.5524, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.5629, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 8.859649122807017e-06, |
|
"loss": 0.5945, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 8.771929824561405e-06, |
|
"loss": 0.5583, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.43456501295901284, |
|
"eval_accuracy_undropoff": 0.9803396495641193, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3102744661338923, |
|
"eval_iou_undropoff": 0.9569846893142245, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5529803037643433, |
|
"eval_mean_accuracy": 0.7074523312615661, |
|
"eval_mean_iou": 0.4224197184827056, |
|
"eval_overall_accuracy": 0.9576467514038086, |
|
"eval_runtime": 3.0786, |
|
"eval_samples_per_second": 6.496, |
|
"eval_steps_per_second": 0.65, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 8.68421052631579e-06, |
|
"loss": 0.5576, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 8.596491228070176e-06, |
|
"loss": 0.5665, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 8.508771929824563e-06, |
|
"loss": 0.5575, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.5657, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.5605, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 8.24561403508772e-06, |
|
"loss": 0.5545, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 8.157894736842106e-06, |
|
"loss": 0.5388, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 8.070175438596492e-06, |
|
"loss": 0.5571, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 7.982456140350877e-06, |
|
"loss": 0.5423, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 7.894736842105265e-06, |
|
"loss": 0.5596, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_dropoff": 0.3167045115713663, |
|
"eval_accuracy_undropoff": 0.9876606131284597, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2510472422620433, |
|
"eval_iou_undropoff": 0.9592753402725737, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5263925194740295, |
|
"eval_mean_accuracy": 0.652182562349913, |
|
"eval_mean_iou": 0.4034408608448723, |
|
"eval_overall_accuracy": 0.9597627639770507, |
|
"eval_runtime": 3.1614, |
|
"eval_samples_per_second": 6.326, |
|
"eval_steps_per_second": 0.633, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 7.80701754385965e-06, |
|
"loss": 0.5983, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 7.719298245614036e-06, |
|
"loss": 0.5767, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 7.631578947368423e-06, |
|
"loss": 0.5348, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 7.5438596491228074e-06, |
|
"loss": 0.545, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 7.456140350877194e-06, |
|
"loss": 0.575, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 7.368421052631579e-06, |
|
"loss": 0.55, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 7.280701754385966e-06, |
|
"loss": 0.5571, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 7.192982456140352e-06, |
|
"loss": 0.5449, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 7.1052631578947375e-06, |
|
"loss": 0.5626, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 7.017543859649123e-06, |
|
"loss": 0.5524, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.44289089199293563, |
|
"eval_accuracy_undropoff": 0.9789680758863138, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.306547326108975, |
|
"eval_iou_undropoff": 0.9559503571403586, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5391871333122253, |
|
"eval_mean_accuracy": 0.7109294839396247, |
|
"eval_mean_iou": 0.4208325610831112, |
|
"eval_overall_accuracy": 0.9566783905029297, |
|
"eval_runtime": 3.3173, |
|
"eval_samples_per_second": 6.029, |
|
"eval_steps_per_second": 0.603, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 6.92982456140351e-06, |
|
"loss": 0.5627, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 6.842105263157896e-06, |
|
"loss": 0.5352, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 6.754385964912281e-06, |
|
"loss": 0.552, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.5852, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 6.578947368421054e-06, |
|
"loss": 0.5346, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 6.491228070175439e-06, |
|
"loss": 0.5303, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 6.403508771929825e-06, |
|
"loss": 0.554, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 6.31578947368421e-06, |
|
"loss": 0.5546, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 6.2280701754385975e-06, |
|
"loss": 0.5501, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 6.140350877192983e-06, |
|
"loss": 0.5294, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_dropoff": 0.4002156012752586, |
|
"eval_accuracy_undropoff": 0.9824469614727501, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.29085253847974585, |
|
"eval_iou_undropoff": 0.9575751639058075, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5256578326225281, |
|
"eval_mean_accuracy": 0.6913312813740043, |
|
"eval_mean_iou": 0.41614256746185113, |
|
"eval_overall_accuracy": 0.9582382202148437, |
|
"eval_runtime": 3.019, |
|
"eval_samples_per_second": 6.625, |
|
"eval_steps_per_second": 0.662, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 6.0526315789473685e-06, |
|
"loss": 0.5355, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 5.964912280701755e-06, |
|
"loss": 0.5288, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 5.877192982456141e-06, |
|
"loss": 0.5364, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 5.789473684210527e-06, |
|
"loss": 0.52, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 5.701754385964913e-06, |
|
"loss": 0.5331, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 5.6140350877192985e-06, |
|
"loss": 0.5493, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 5.526315789473685e-06, |
|
"loss": 0.5326, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 5.438596491228071e-06, |
|
"loss": 0.5376, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 5.350877192982457e-06, |
|
"loss": 0.5333, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.5477, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_dropoff": 0.409546090506663, |
|
"eval_accuracy_undropoff": 0.982919609105482, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.30352346825682835, |
|
"eval_iou_undropoff": 0.9584407580344879, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5177620649337769, |
|
"eval_mean_accuracy": 0.6962328498060726, |
|
"eval_mean_iou": 0.4206547420971054, |
|
"eval_overall_accuracy": 0.9590791702270508, |
|
"eval_runtime": 2.9033, |
|
"eval_samples_per_second": 6.889, |
|
"eval_steps_per_second": 0.689, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 5.175438596491229e-06, |
|
"loss": 0.5243, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 5.087719298245615e-06, |
|
"loss": 0.5293, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5235, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 4.912280701754386e-06, |
|
"loss": 0.5221, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 4.824561403508772e-06, |
|
"loss": 0.5189, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 4.736842105263158e-06, |
|
"loss": 0.569, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 4.649122807017544e-06, |
|
"loss": 0.5146, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 4.56140350877193e-06, |
|
"loss": 0.5204, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 4.473684210526316e-06, |
|
"loss": 0.5211, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 4.385964912280702e-06, |
|
"loss": 0.528, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_dropoff": 0.40474322805568935, |
|
"eval_accuracy_undropoff": 0.9830812048434939, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2964804871000477, |
|
"eval_iou_undropoff": 0.9583850127920605, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5185015201568604, |
|
"eval_mean_accuracy": 0.6939122164495917, |
|
"eval_mean_iou": 0.4182884999640361, |
|
"eval_overall_accuracy": 0.9590343475341797, |
|
"eval_runtime": 3.0397, |
|
"eval_samples_per_second": 6.58, |
|
"eval_steps_per_second": 0.658, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 4.298245614035088e-06, |
|
"loss": 0.5249, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 0.5145, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 4.12280701754386e-06, |
|
"loss": 0.5263, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 4.035087719298246e-06, |
|
"loss": 0.5393, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 3.947368421052632e-06, |
|
"loss": 0.5815, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 3.859649122807018e-06, |
|
"loss": 0.5069, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 3.7719298245614037e-06, |
|
"loss": 0.5108, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 3.6842105263157896e-06, |
|
"loss": 0.5116, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 3.596491228070176e-06, |
|
"loss": 0.5038, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 3.5087719298245615e-06, |
|
"loss": 0.5144, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_dropoff": 0.3716048533223239, |
|
"eval_accuracy_undropoff": 0.9859781865654637, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2859290895293225, |
|
"eval_iou_undropoff": 0.9598726815570374, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.500433087348938, |
|
"eval_mean_accuracy": 0.6787915199438938, |
|
"eval_mean_iou": 0.4152672570287866, |
|
"eval_overall_accuracy": 0.9604330062866211, |
|
"eval_runtime": 2.886, |
|
"eval_samples_per_second": 6.93, |
|
"eval_steps_per_second": 0.693, |
|
"step": 200 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 1.017979138473984e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|