|
{ |
|
"best_metric": 0.749171257019043, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/safety-utcustom-train-SF30-RGB-b0/checkpoint-240", |
|
"epoch": 120.0, |
|
"global_step": 240, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 7.5e-07, |
|
"loss": 1.1677, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.5e-06, |
|
"loss": 1.1594, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 2.25e-06, |
|
"loss": 1.1676, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 3e-06, |
|
"loss": 1.1627, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 3.75e-06, |
|
"loss": 1.1604, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.5e-06, |
|
"loss": 1.1658, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 5.2500000000000006e-06, |
|
"loss": 1.1583, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 6e-06, |
|
"loss": 1.1687, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 6.75e-06, |
|
"loss": 1.1569, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 7.5e-06, |
|
"loss": 1.1527, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_safe": 0.7703764811759412, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.14652629277186402, |
|
"eval_iou_safe": 0.030663997549793695, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.14642944151194123, |
|
"eval_loss": 1.1084816455841064, |
|
"eval_mean_accuracy": 0.4584513869739026, |
|
"eval_mean_iou": 0.059031146353911636, |
|
"eval_overall_accuracy": 0.16635894775390625, |
|
"eval_runtime": 1.4751, |
|
"eval_samples_per_second": 5.423, |
|
"eval_steps_per_second": 0.678, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 8.25e-06, |
|
"loss": 1.1521, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 9e-06, |
|
"loss": 1.1492, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 8.960526315789474e-06, |
|
"loss": 1.1456, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 8.921052631578947e-06, |
|
"loss": 1.1433, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 8.881578947368421e-06, |
|
"loss": 1.1379, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 8.842105263157895e-06, |
|
"loss": 1.1431, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 8.80263157894737e-06, |
|
"loss": 1.1302, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 8.763157894736842e-06, |
|
"loss": 1.1379, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 8.723684210526316e-06, |
|
"loss": 1.1259, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 8.68421052631579e-06, |
|
"loss": 1.1326, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_safe": 0.9694915254237289, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.24695220149698446, |
|
"eval_iou_safe": 0.04189444073553146, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.24695195825231817, |
|
"eval_loss": 1.1090760231018066, |
|
"eval_mean_accuracy": 0.6082218634603567, |
|
"eval_mean_iou": 0.09628213299594988, |
|
"eval_overall_accuracy": 0.26992225646972656, |
|
"eval_runtime": 1.192, |
|
"eval_samples_per_second": 6.711, |
|
"eval_steps_per_second": 0.839, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 8.644736842105263e-06, |
|
"loss": 1.1296, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 8.605263157894737e-06, |
|
"loss": 1.1125, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 8.56578947368421e-06, |
|
"loss": 1.1224, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 8.526315789473683e-06, |
|
"loss": 1.1105, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 8.486842105263159e-06, |
|
"loss": 1.108, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 8.447368421052632e-06, |
|
"loss": 1.1265, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 8.407894736842106e-06, |
|
"loss": 1.1047, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 8.36842105263158e-06, |
|
"loss": 1.115, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 8.328947368421052e-06, |
|
"loss": 1.092, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 8.289473684210526e-06, |
|
"loss": 1.0981, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_safe": 0.992200389980501, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.40550470282425555, |
|
"eval_iou_safe": 0.053544543556037626, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.40550470282425555, |
|
"eval_loss": 1.0979702472686768, |
|
"eval_mean_accuracy": 0.6988525464023783, |
|
"eval_mean_iou": 0.15301641546009773, |
|
"eval_overall_accuracy": 0.42415618896484375, |
|
"eval_runtime": 1.213, |
|
"eval_samples_per_second": 6.595, |
|
"eval_steps_per_second": 0.824, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 8.25e-06, |
|
"loss": 1.0895, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 8.210526315789473e-06, |
|
"loss": 1.0659, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 8.171052631578947e-06, |
|
"loss": 1.0868, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 8.131578947368421e-06, |
|
"loss": 1.079, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 8.092105263157896e-06, |
|
"loss": 1.0884, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 8.052631578947368e-06, |
|
"loss": 1.0499, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 8.013157894736842e-06, |
|
"loss": 1.0726, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 7.973684210526316e-06, |
|
"loss": 1.0718, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 7.934210526315789e-06, |
|
"loss": 1.0653, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 1.086, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_safe": 0.9926503674816259, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.5103054348671892, |
|
"eval_iou_safe": 0.06444250128047581, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.5103054348671892, |
|
"eval_loss": 1.0822405815124512, |
|
"eval_mean_accuracy": 0.7514779011744075, |
|
"eval_mean_iou": 0.19158264538255498, |
|
"eval_overall_accuracy": 0.525639533996582, |
|
"eval_runtime": 1.1977, |
|
"eval_samples_per_second": 6.679, |
|
"eval_steps_per_second": 0.835, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 7.855263157894737e-06, |
|
"loss": 1.0638, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 7.815789473684211e-06, |
|
"loss": 1.0491, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 7.776315789473685e-06, |
|
"loss": 1.058, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 7.736842105263158e-06, |
|
"loss": 1.0102, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 7.697368421052632e-06, |
|
"loss": 1.0523, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 7.657894736842106e-06, |
|
"loss": 1.0807, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 7.618421052631579e-06, |
|
"loss": 1.0437, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 7.578947368421053e-06, |
|
"loss": 1.0586, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 7.539473684210527e-06, |
|
"loss": 1.036, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 7.5e-06, |
|
"loss": 1.0466, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_safe": 0.9901604919754012, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.5916703521626885, |
|
"eval_iou_safe": 0.0760597426269381, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.5916525776602614, |
|
"eval_loss": 1.0541445016860962, |
|
"eval_mean_accuracy": 0.7909154220690449, |
|
"eval_mean_iou": 0.2225707734290665, |
|
"eval_overall_accuracy": 0.6043386459350586, |
|
"eval_runtime": 1.2115, |
|
"eval_samples_per_second": 6.603, |
|
"eval_steps_per_second": 0.825, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 7.460526315789474e-06, |
|
"loss": 1.0143, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 7.421052631578948e-06, |
|
"loss": 1.0743, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 7.381578947368421e-06, |
|
"loss": 1.0126, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 7.342105263157895e-06, |
|
"loss": 1.0943, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 7.302631578947368e-06, |
|
"loss": 1.0216, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 7.263157894736842e-06, |
|
"loss": 1.0104, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 7.223684210526317e-06, |
|
"loss": 1.0259, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 7.18421052631579e-06, |
|
"loss": 1.0378, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 7.144736842105263e-06, |
|
"loss": 1.001, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 7.1052631578947375e-06, |
|
"loss": 1.0533, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_safe": 0.9862906854657267, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.6471891895618873, |
|
"eval_iou_safe": 0.08607076662094097, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.6470951758858802, |
|
"eval_loss": 1.024947166442871, |
|
"eval_mean_accuracy": 0.816739937513807, |
|
"eval_mean_iou": 0.2443886475022737, |
|
"eval_overall_accuracy": 0.6579694747924805, |
|
"eval_runtime": 1.2639, |
|
"eval_samples_per_second": 6.33, |
|
"eval_steps_per_second": 0.791, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 7.065789473684211e-06, |
|
"loss": 1.0201, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 7.026315789473684e-06, |
|
"loss": 0.9958, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 6.986842105263158e-06, |
|
"loss": 1.0084, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 6.9473684210526315e-06, |
|
"loss": 1.0021, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 6.907894736842106e-06, |
|
"loss": 0.9748, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 6.86842105263158e-06, |
|
"loss": 1.0624, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 6.828947368421053e-06, |
|
"loss": 0.9931, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 6.7894736842105264e-06, |
|
"loss": 1.0179, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 6.75e-06, |
|
"loss": 1.0053, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 6.710526315789474e-06, |
|
"loss": 0.9779, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_safe": 0.9770661466926653, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.687357977071454, |
|
"eval_iou_safe": 0.09514260217010605, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.6870926801315042, |
|
"eval_loss": 1.0009846687316895, |
|
"eval_mean_accuracy": 0.8322120618820597, |
|
"eval_mean_iou": 0.26074509410053676, |
|
"eval_overall_accuracy": 0.6965680122375488, |
|
"eval_runtime": 1.3535, |
|
"eval_samples_per_second": 5.91, |
|
"eval_steps_per_second": 0.739, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 6.671052631578947e-06, |
|
"loss": 1.0052, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 6.6315789473684205e-06, |
|
"loss": 0.9521, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 6.592105263157895e-06, |
|
"loss": 0.9952, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 6.552631578947369e-06, |
|
"loss": 1.0184, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 6.513157894736842e-06, |
|
"loss": 0.9709, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 6.473684210526316e-06, |
|
"loss": 0.959, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 6.4342105263157896e-06, |
|
"loss": 0.9497, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 6.394736842105263e-06, |
|
"loss": 1.0239, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 6.355263157894737e-06, |
|
"loss": 0.9677, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 6.31578947368421e-06, |
|
"loss": 0.9161, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_safe": 0.9634618269086546, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.7338942182201074, |
|
"eval_iou_safe": 0.10909770285762813, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.7333943281692789, |
|
"eval_loss": 0.9695309400558472, |
|
"eval_mean_accuracy": 0.848678022564381, |
|
"eval_mean_iou": 0.28083067700896897, |
|
"eval_overall_accuracy": 0.7411923408508301, |
|
"eval_runtime": 1.2732, |
|
"eval_samples_per_second": 6.284, |
|
"eval_steps_per_second": 0.785, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 6.276315789473684e-06, |
|
"loss": 0.9493, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 6.236842105263159e-06, |
|
"loss": 0.9665, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 6.197368421052632e-06, |
|
"loss": 0.9623, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 6.157894736842105e-06, |
|
"loss": 0.9901, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 6.118421052631579e-06, |
|
"loss": 0.9523, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 6.078947368421053e-06, |
|
"loss": 0.9034, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 6.039473684210526e-06, |
|
"loss": 0.9353, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 6e-06, |
|
"loss": 1.0231, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 5.9605263157894735e-06, |
|
"loss": 0.9537, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 5.921052631578948e-06, |
|
"loss": 0.9843, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_safe": 0.9493925303734814, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.7768244190295703, |
|
"eval_iou_safe": 0.1253679077487413, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.7759008194701261, |
|
"eval_loss": 0.940314531326294, |
|
"eval_mean_accuracy": 0.8631084747015259, |
|
"eval_mean_iou": 0.3004229090729558, |
|
"eval_overall_accuracy": 0.7823104858398438, |
|
"eval_runtime": 1.3253, |
|
"eval_samples_per_second": 6.036, |
|
"eval_steps_per_second": 0.755, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 5.881578947368422e-06, |
|
"loss": 0.9646, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 5.842105263157895e-06, |
|
"loss": 0.9498, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 5.802631578947368e-06, |
|
"loss": 0.9407, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 5.7631578947368425e-06, |
|
"loss": 0.9684, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 5.723684210526316e-06, |
|
"loss": 0.8967, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 5.684210526315789e-06, |
|
"loss": 1.0137, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 5.644736842105263e-06, |
|
"loss": 0.9542, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 5.6052631578947374e-06, |
|
"loss": 0.8673, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 5.565789473684211e-06, |
|
"loss": 0.9138, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 5.526315789473685e-06, |
|
"loss": 0.9568, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_safe": 0.9190940452977351, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8135083196994605, |
|
"eval_iou_safe": 0.14123580174066971, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8116862567811934, |
|
"eval_loss": 0.9070925116539001, |
|
"eval_mean_accuracy": 0.8663011824985978, |
|
"eval_mean_iou": 0.3176406861739544, |
|
"eval_overall_accuracy": 0.8168649673461914, |
|
"eval_runtime": 1.2557, |
|
"eval_samples_per_second": 6.371, |
|
"eval_steps_per_second": 0.796, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 5.486842105263158e-06, |
|
"loss": 0.9128, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 5.4473684210526315e-06, |
|
"loss": 0.8691, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 5.407894736842106e-06, |
|
"loss": 0.9109, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 5.368421052631579e-06, |
|
"loss": 0.8425, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 5.328947368421052e-06, |
|
"loss": 0.895, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 5.289473684210527e-06, |
|
"loss": 0.9433, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 5.2500000000000006e-06, |
|
"loss": 0.9005, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 5.210526315789474e-06, |
|
"loss": 0.8744, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 5.171052631578947e-06, |
|
"loss": 0.8999, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 5.131578947368421e-06, |
|
"loss": 0.8443, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_safe": 0.8742462876856157, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8570024260249537, |
|
"eval_iou_safe": 0.1671743332931789, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8537115815119297, |
|
"eval_loss": 0.8626915812492371, |
|
"eval_mean_accuracy": 0.8656243568552847, |
|
"eval_mean_iou": 0.3402953049350362, |
|
"eval_overall_accuracy": 0.8575506210327148, |
|
"eval_runtime": 1.4008, |
|
"eval_samples_per_second": 5.711, |
|
"eval_steps_per_second": 0.714, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 5.092105263157895e-06, |
|
"loss": 0.93, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 5.052631578947368e-06, |
|
"loss": 0.8241, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 5.013157894736842e-06, |
|
"loss": 0.919, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 4.973684210526316e-06, |
|
"loss": 0.8839, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 4.9342105263157895e-06, |
|
"loss": 0.8664, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 4.894736842105264e-06, |
|
"loss": 0.8774, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 4.855263157894737e-06, |
|
"loss": 0.8846, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 4.81578947368421e-06, |
|
"loss": 0.931, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 4.7763157894736844e-06, |
|
"loss": 0.8527, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 4.736842105263158e-06, |
|
"loss": 0.8765, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_safe": 0.8591420428978551, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8658727336661936, |
|
"eval_iou_safe": 0.17292191207636712, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8619790415433206, |
|
"eval_loss": 0.8488205671310425, |
|
"eval_mean_accuracy": 0.8625073882820243, |
|
"eval_mean_iou": 0.34496698453989594, |
|
"eval_overall_accuracy": 0.8656587600708008, |
|
"eval_runtime": 1.4312, |
|
"eval_samples_per_second": 5.59, |
|
"eval_steps_per_second": 0.699, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 4.697368421052631e-06, |
|
"loss": 0.8832, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 4.657894736842106e-06, |
|
"loss": 0.9832, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 4.618421052631579e-06, |
|
"loss": 0.8575, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 4.578947368421053e-06, |
|
"loss": 0.8165, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 4.539473684210527e-06, |
|
"loss": 0.8491, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 4.5e-06, |
|
"loss": 0.9346, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 4.460526315789473e-06, |
|
"loss": 0.8498, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 4.4210526315789476e-06, |
|
"loss": 0.8806, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 4.381578947368421e-06, |
|
"loss": 0.8288, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 4.342105263157895e-06, |
|
"loss": 0.899, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_safe": 0.8547922603869806, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8710409646576527, |
|
"eval_iou_safe": 0.17724703986962054, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8669472125956462, |
|
"eval_loss": 0.842871367931366, |
|
"eval_mean_accuracy": 0.8629166125223167, |
|
"eval_mean_iou": 0.3480647508217556, |
|
"eval_overall_accuracy": 0.8705244064331055, |
|
"eval_runtime": 1.237, |
|
"eval_samples_per_second": 6.467, |
|
"eval_steps_per_second": 0.808, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 4.302631578947368e-06, |
|
"loss": 0.8852, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 4.263157894736842e-06, |
|
"loss": 0.8237, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 4.223684210526316e-06, |
|
"loss": 0.8323, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 4.18421052631579e-06, |
|
"loss": 0.8761, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 4.144736842105263e-06, |
|
"loss": 0.8837, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 4.1052631578947365e-06, |
|
"loss": 0.8374, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 4.065789473684211e-06, |
|
"loss": 0.8146, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 4.026315789473684e-06, |
|
"loss": 0.9205, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 3.986842105263158e-06, |
|
"loss": 0.8839, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 3.9473684210526315e-06, |
|
"loss": 0.7713, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_safe": 0.8025648717564122, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8968998493953653, |
|
"eval_iou_safe": 0.19830627825958047, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8911652674661912, |
|
"eval_loss": 0.8084838390350342, |
|
"eval_mean_accuracy": 0.8497323605758887, |
|
"eval_mean_iou": 0.3631571819085906, |
|
"eval_overall_accuracy": 0.8939008712768555, |
|
"eval_runtime": 1.2344, |
|
"eval_samples_per_second": 6.481, |
|
"eval_steps_per_second": 0.81, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 3.907894736842106e-06, |
|
"loss": 0.7833, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 3.868421052631579e-06, |
|
"loss": 0.9608, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 3.828947368421053e-06, |
|
"loss": 0.8081, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 3.7894736842105264e-06, |
|
"loss": 0.822, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 3.75e-06, |
|
"loss": 0.8508, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 3.710526315789474e-06, |
|
"loss": 0.8297, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 3.6710526315789476e-06, |
|
"loss": 0.7963, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 3.631578947368421e-06, |
|
"loss": 0.9092, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 3.592105263157895e-06, |
|
"loss": 0.765, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 3.5526315789473687e-06, |
|
"loss": 0.8505, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_safe": 0.7785660716964152, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9144912390260047, |
|
"eval_iou_safe": 0.2207962907822536, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9079488517260416, |
|
"eval_loss": 0.7821168303489685, |
|
"eval_mean_accuracy": 0.8465286553612099, |
|
"eval_mean_iou": 0.3762483808360984, |
|
"eval_overall_accuracy": 0.9101700782775879, |
|
"eval_runtime": 1.2538, |
|
"eval_samples_per_second": 6.38, |
|
"eval_steps_per_second": 0.798, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 3.513157894736842e-06, |
|
"loss": 0.8494, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 3.4736842105263158e-06, |
|
"loss": 0.8188, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 3.43421052631579e-06, |
|
"loss": 0.81, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 3.3947368421052632e-06, |
|
"loss": 0.9673, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 3.355263157894737e-06, |
|
"loss": 0.8161, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 3.3157894736842103e-06, |
|
"loss": 0.8539, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 3.2763157894736844e-06, |
|
"loss": 0.7757, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 3.236842105263158e-06, |
|
"loss": 0.8716, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 3.1973684210526314e-06, |
|
"loss": 0.8154, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 3.157894736842105e-06, |
|
"loss": 0.7352, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_safe": 0.755677216139193, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9226257607799527, |
|
"eval_iou_safe": 0.23039002730053915, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9153475450094228, |
|
"eval_loss": 0.7840872406959534, |
|
"eval_mean_accuracy": 0.8391514884595729, |
|
"eval_mean_iou": 0.3819125241033207, |
|
"eval_overall_accuracy": 0.9173183441162109, |
|
"eval_runtime": 1.4699, |
|
"eval_samples_per_second": 5.443, |
|
"eval_steps_per_second": 0.68, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 3.1184210526315793e-06, |
|
"loss": 0.7745, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 3.0789473684210526e-06, |
|
"loss": 0.885, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 3.0394736842105263e-06, |
|
"loss": 0.827, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 3e-06, |
|
"loss": 0.8288, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 2.960526315789474e-06, |
|
"loss": 0.8158, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 2.9210526315789475e-06, |
|
"loss": 0.8501, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 2.8815789473684213e-06, |
|
"loss": 0.7772, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 2.8421052631578946e-06, |
|
"loss": 0.8556, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 2.8026315789473687e-06, |
|
"loss": 0.7853, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 2.7631578947368424e-06, |
|
"loss": 0.7205, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_safe": 0.7412929353532324, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9387982754833581, |
|
"eval_iou_safe": 0.2613219967957361, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.930946709988782, |
|
"eval_loss": 0.7502496838569641, |
|
"eval_mean_accuracy": 0.8400456054182952, |
|
"eval_mean_iou": 0.39742290226150606, |
|
"eval_overall_accuracy": 0.9325194358825684, |
|
"eval_runtime": 1.338, |
|
"eval_samples_per_second": 5.979, |
|
"eval_steps_per_second": 0.747, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 2.7236842105263157e-06, |
|
"loss": 0.818, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 2.6842105263157895e-06, |
|
"loss": 0.8021, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 2.6447368421052636e-06, |
|
"loss": 0.7861, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 2.605263157894737e-06, |
|
"loss": 0.8586, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 2.5657894736842107e-06, |
|
"loss": 0.8099, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 2.526315789473684e-06, |
|
"loss": 0.923, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 2.486842105263158e-06, |
|
"loss": 0.7752, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 2.447368421052632e-06, |
|
"loss": 0.8386, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 2.407894736842105e-06, |
|
"loss": 0.8237, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 2.368421052631579e-06, |
|
"loss": 0.711, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_safe": 0.7483725813709314, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9372695744163209, |
|
"eval_iou_safe": 0.2590913575631059, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9296319200611187, |
|
"eval_loss": 0.7416911125183105, |
|
"eval_mean_accuracy": 0.8428210778936261, |
|
"eval_mean_iou": 0.3962410925414082, |
|
"eval_overall_accuracy": 0.9312644004821777, |
|
"eval_runtime": 1.3549, |
|
"eval_samples_per_second": 5.905, |
|
"eval_steps_per_second": 0.738, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 2.328947368421053e-06, |
|
"loss": 0.7583, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 2.2894736842105263e-06, |
|
"loss": 0.761, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 2.25e-06, |
|
"loss": 0.7724, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 2.2105263157894738e-06, |
|
"loss": 0.8581, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 2.1710526315789475e-06, |
|
"loss": 0.7619, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 2.131578947368421e-06, |
|
"loss": 0.8492, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 2.092105263157895e-06, |
|
"loss": 0.7315, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 2.0526315789473683e-06, |
|
"loss": 0.8353, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 2.013157894736842e-06, |
|
"loss": 0.7742, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 1.9736842105263157e-06, |
|
"loss": 0.7855, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_safe": 0.7473226338683066, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.940405283080569, |
|
"eval_iou_safe": 0.26829938127009256, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9327183843815211, |
|
"eval_loss": 0.728053867816925, |
|
"eval_mean_accuracy": 0.8438639584744378, |
|
"eval_mean_iou": 0.40033925521720454, |
|
"eval_overall_accuracy": 0.9342670440673828, |
|
"eval_runtime": 1.3581, |
|
"eval_samples_per_second": 5.89, |
|
"eval_steps_per_second": 0.736, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 1.9342105263157895e-06, |
|
"loss": 0.7316, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 1.8947368421052632e-06, |
|
"loss": 0.8882, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 1.855263157894737e-06, |
|
"loss": 0.7755, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 1.8157894736842104e-06, |
|
"loss": 0.8066, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 1.7763157894736844e-06, |
|
"loss": 0.8156, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 1.7368421052631579e-06, |
|
"loss": 0.6941, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 1.6973684210526316e-06, |
|
"loss": 0.8103, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 1.6578947368421051e-06, |
|
"loss": 0.8466, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 1.618421052631579e-06, |
|
"loss": 0.813, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 1.5789473684210526e-06, |
|
"loss": 0.7632, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_safe": 0.7544922753862306, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9292813233508103, |
|
"eval_iou_safe": 0.24304709997874027, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9218938484516696, |
|
"eval_loss": 0.7494363188743591, |
|
"eval_mean_accuracy": 0.8418867993685204, |
|
"eval_mean_iou": 0.3883136494768033, |
|
"eval_overall_accuracy": 0.9237246513366699, |
|
"eval_runtime": 1.5595, |
|
"eval_samples_per_second": 5.13, |
|
"eval_steps_per_second": 0.641, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.5, |
|
"learning_rate": 1.5394736842105263e-06, |
|
"loss": 0.7298, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 101.0, |
|
"learning_rate": 1.5e-06, |
|
"loss": 0.8036, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 101.5, |
|
"learning_rate": 1.4605263157894738e-06, |
|
"loss": 0.7798, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 102.0, |
|
"learning_rate": 1.4210526315789473e-06, |
|
"loss": 0.7799, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 102.5, |
|
"learning_rate": 1.3815789473684212e-06, |
|
"loss": 0.7405, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 103.0, |
|
"learning_rate": 1.3421052631578947e-06, |
|
"loss": 0.8975, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 103.5, |
|
"learning_rate": 1.3026315789473685e-06, |
|
"loss": 0.7924, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 104.0, |
|
"learning_rate": 1.263157894736842e-06, |
|
"loss": 0.6742, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 104.5, |
|
"learning_rate": 1.223684210526316e-06, |
|
"loss": 0.7551, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"learning_rate": 1.1842105263157894e-06, |
|
"loss": 0.8145, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"eval_accuracy_safe": 0.7550622468876557, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9274147714680554, |
|
"eval_iou_safe": 0.23866529491805066, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.920052473946246, |
|
"eval_loss": 0.7494801878929138, |
|
"eval_mean_accuracy": 0.8412385091778556, |
|
"eval_mean_iou": 0.3862392562880989, |
|
"eval_overall_accuracy": 0.9219355583190918, |
|
"eval_runtime": 1.2374, |
|
"eval_samples_per_second": 6.465, |
|
"eval_steps_per_second": 0.808, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 105.5, |
|
"learning_rate": 1.1447368421052632e-06, |
|
"loss": 0.7416, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 106.0, |
|
"learning_rate": 1.1052631578947369e-06, |
|
"loss": 0.7578, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 106.5, |
|
"learning_rate": 1.0657894736842104e-06, |
|
"loss": 0.7681, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 107.0, |
|
"learning_rate": 1.0263157894736841e-06, |
|
"loss": 0.8616, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 107.5, |
|
"learning_rate": 9.868421052631579e-07, |
|
"loss": 0.7086, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 108.0, |
|
"learning_rate": 9.473684210526316e-07, |
|
"loss": 0.7724, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 108.5, |
|
"learning_rate": 9.078947368421052e-07, |
|
"loss": 0.8152, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 109.0, |
|
"learning_rate": 8.684210526315789e-07, |
|
"loss": 0.7119, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 109.5, |
|
"learning_rate": 8.289473684210526e-07, |
|
"loss": 0.7505, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"learning_rate": 7.894736842105263e-07, |
|
"loss": 0.8217, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"eval_accuracy_safe": 0.7502324883755812, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9340910187827324, |
|
"eval_iou_safe": 0.253310847425009, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9265304088662178, |
|
"eval_loss": 0.7354978919029236, |
|
"eval_mean_accuracy": 0.8421617535791568, |
|
"eval_mean_iou": 0.39328041876374226, |
|
"eval_overall_accuracy": 0.928246021270752, |
|
"eval_runtime": 1.3172, |
|
"eval_samples_per_second": 6.074, |
|
"eval_steps_per_second": 0.759, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 110.5, |
|
"learning_rate": 7.5e-07, |
|
"loss": 0.7648, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 111.0, |
|
"learning_rate": 7.105263157894736e-07, |
|
"loss": 0.7398, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 111.5, |
|
"learning_rate": 6.710526315789474e-07, |
|
"loss": 0.801, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 112.0, |
|
"learning_rate": 6.31578947368421e-07, |
|
"loss": 0.9836, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 112.5, |
|
"learning_rate": 5.921052631578947e-07, |
|
"loss": 0.7536, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 113.0, |
|
"learning_rate": 5.526315789473684e-07, |
|
"loss": 0.8091, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 113.5, |
|
"learning_rate": 5.131578947368421e-07, |
|
"loss": 0.7385, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 114.0, |
|
"learning_rate": 4.736842105263158e-07, |
|
"loss": 0.7135, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 114.5, |
|
"learning_rate": 4.3421052631578947e-07, |
|
"loss": 0.801, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"learning_rate": 3.9473684210526315e-07, |
|
"loss": 0.7784, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"eval_accuracy_safe": 0.7339883005849708, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9481330048727347, |
|
"eval_iou_safe": 0.2863604976417027, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.939964201456159, |
|
"eval_loss": 0.7257604002952576, |
|
"eval_mean_accuracy": 0.8410606527288527, |
|
"eval_mean_iou": 0.40877489969928726, |
|
"eval_overall_accuracy": 0.9413251876831055, |
|
"eval_runtime": 1.4329, |
|
"eval_samples_per_second": 5.583, |
|
"eval_steps_per_second": 0.698, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 115.5, |
|
"learning_rate": 3.552631578947368e-07, |
|
"loss": 0.7701, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 116.0, |
|
"learning_rate": 3.157894736842105e-07, |
|
"loss": 0.761, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 116.5, |
|
"learning_rate": 2.763157894736842e-07, |
|
"loss": 0.7341, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 117.0, |
|
"learning_rate": 2.368421052631579e-07, |
|
"loss": 0.6928, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 117.5, |
|
"learning_rate": 1.9736842105263157e-07, |
|
"loss": 0.7853, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 118.0, |
|
"learning_rate": 1.5789473684210525e-07, |
|
"loss": 0.7847, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 118.5, |
|
"learning_rate": 1.1842105263157895e-07, |
|
"loss": 0.7022, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 119.0, |
|
"learning_rate": 7.894736842105262e-08, |
|
"loss": 0.9351, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 119.5, |
|
"learning_rate": 3.947368421052631e-08, |
|
"loss": 0.7541, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.8349, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"eval_accuracy_safe": 0.7575371231438428, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9287252977371875, |
|
"eval_iou_safe": 0.24184511952191234, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9214223764093669, |
|
"eval_loss": 0.749171257019043, |
|
"eval_mean_accuracy": 0.8431312104405151, |
|
"eval_mean_iou": 0.38775583197709307, |
|
"eval_overall_accuracy": 0.9232831001281738, |
|
"eval_runtime": 1.3, |
|
"eval_samples_per_second": 6.154, |
|
"eval_steps_per_second": 0.769, |
|
"step": 240 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 4.2070003679232e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|