|
{ |
|
"best_metric": 0.34277641773223877, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGBD-b5_1/checkpoint-240", |
|
"epoch": 120.0, |
|
"global_step": 240, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.5e-07, |
|
"loss": 0.8297, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 5e-07, |
|
"loss": 0.8296, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 7.5e-07, |
|
"loss": 0.8276, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1e-06, |
|
"loss": 0.8294, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.25e-06, |
|
"loss": 0.8244, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 1.5e-06, |
|
"loss": 0.822, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 1.7500000000000002e-06, |
|
"loss": 0.8221, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 2e-06, |
|
"loss": 0.8133, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 2.25e-06, |
|
"loss": 0.8076, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.8047, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.5048739649991972, |
|
"eval_accuracy_undropoff": 0.7580744633956797, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.08117782339287308, |
|
"eval_iou_undropoff": 0.7421699512154445, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.986731231212616, |
|
"eval_mean_accuracy": 0.6314742141974385, |
|
"eval_mean_iou": 0.2744492582027725, |
|
"eval_overall_accuracy": 0.7475465774536133, |
|
"eval_runtime": 3.2198, |
|
"eval_samples_per_second": 6.212, |
|
"eval_steps_per_second": 0.621, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 2.75e-06, |
|
"loss": 0.7953, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 3e-06, |
|
"loss": 0.7953, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 2.986842105263158e-06, |
|
"loss": 0.7885, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 2.973684210526316e-06, |
|
"loss": 0.7867, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 2.960526315789474e-06, |
|
"loss": 0.7783, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 2.9473684210526313e-06, |
|
"loss": 0.7697, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 2.9342105263157896e-06, |
|
"loss": 0.768, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 2.9210526315789475e-06, |
|
"loss": 0.7614, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 2.9078947368421054e-06, |
|
"loss": 0.7604, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 2.8947368421052634e-06, |
|
"loss": 0.7528, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.2405926741439024, |
|
"eval_accuracy_undropoff": 0.9508346957194045, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.11781963585717334, |
|
"eval_iou_undropoff": 0.9205125917871181, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8525959253311157, |
|
"eval_mean_accuracy": 0.5957136849316534, |
|
"eval_mean_iou": 0.34611074254809715, |
|
"eval_overall_accuracy": 0.9213033676147461, |
|
"eval_runtime": 3.4854, |
|
"eval_samples_per_second": 5.738, |
|
"eval_steps_per_second": 0.574, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 2.8815789473684213e-06, |
|
"loss": 0.7504, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 2.8684210526315787e-06, |
|
"loss": 0.7408, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 2.8552631578947367e-06, |
|
"loss": 0.7363, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 2.8421052631578946e-06, |
|
"loss": 0.7381, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 2.828947368421053e-06, |
|
"loss": 0.73, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 2.815789473684211e-06, |
|
"loss": 0.7244, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 2.8026315789473687e-06, |
|
"loss": 0.7226, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 2.789473684210526e-06, |
|
"loss": 0.7193, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 2.776315789473684e-06, |
|
"loss": 0.7159, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 2.763157894736842e-06, |
|
"loss": 0.7087, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.124291841555999, |
|
"eval_accuracy_undropoff": 0.9823552180796178, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.08867354586184664, |
|
"eval_iou_undropoff": 0.9464005897482354, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7022603154182434, |
|
"eval_mean_accuracy": 0.5533235298178084, |
|
"eval_mean_iou": 0.34502471187002737, |
|
"eval_overall_accuracy": 0.9466775894165039, |
|
"eval_runtime": 3.2954, |
|
"eval_samples_per_second": 6.069, |
|
"eval_steps_per_second": 0.607, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 2.75e-06, |
|
"loss": 0.7036, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 2.736842105263158e-06, |
|
"loss": 0.6985, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 2.723684210526316e-06, |
|
"loss": 0.6966, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 2.7105263157894737e-06, |
|
"loss": 0.699, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 2.6973684210526316e-06, |
|
"loss": 0.6958, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 2.6842105263157895e-06, |
|
"loss": 0.6815, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 2.6710526315789474e-06, |
|
"loss": 0.678, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 2.6578947368421053e-06, |
|
"loss": 0.6791, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 2.644736842105263e-06, |
|
"loss": 0.6869, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.6601, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.09481410124085415, |
|
"eval_accuracy_undropoff": 0.9831825006940458, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.06841004322585342, |
|
"eval_iou_undropoff": 0.9460320542390953, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6250978708267212, |
|
"eval_mean_accuracy": 0.5389983009674499, |
|
"eval_mean_iou": 0.3381473658216496, |
|
"eval_overall_accuracy": 0.9462448120117187, |
|
"eval_runtime": 3.2662, |
|
"eval_samples_per_second": 6.123, |
|
"eval_steps_per_second": 0.612, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 2.618421052631579e-06, |
|
"loss": 0.6667, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 2.605263157894737e-06, |
|
"loss": 0.6542, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 2.592105263157895e-06, |
|
"loss": 0.6651, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 2.5789473684210527e-06, |
|
"loss": 0.6444, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 2.5657894736842107e-06, |
|
"loss": 0.6565, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 2.5526315789473686e-06, |
|
"loss": 0.641, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 2.5394736842105265e-06, |
|
"loss": 0.6338, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 2.526315789473684e-06, |
|
"loss": 0.6353, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 2.5131578947368423e-06, |
|
"loss": 0.6452, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.6274, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.047913942980343585, |
|
"eval_accuracy_undropoff": 0.9876305626894943, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.037385151169158415, |
|
"eval_iou_undropoff": 0.9484551647559883, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5828429460525513, |
|
"eval_mean_accuracy": 0.517772252834919, |
|
"eval_mean_iou": 0.3286134386417156, |
|
"eval_overall_accuracy": 0.9485578536987305, |
|
"eval_runtime": 3.3109, |
|
"eval_samples_per_second": 6.041, |
|
"eval_steps_per_second": 0.604, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 2.486842105263158e-06, |
|
"loss": 0.623, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 2.473684210526316e-06, |
|
"loss": 0.6199, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 2.460526315789474e-06, |
|
"loss": 0.6178, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 2.4473684210526314e-06, |
|
"loss": 0.6155, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 2.4342105263157893e-06, |
|
"loss": 0.6147, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 2.4210526315789472e-06, |
|
"loss": 0.6052, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 2.4078947368421056e-06, |
|
"loss": 0.6126, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 2.3947368421052635e-06, |
|
"loss": 0.604, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 2.3815789473684214e-06, |
|
"loss": 0.5995, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 2.368421052631579e-06, |
|
"loss": 0.5929, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.0359320167893759, |
|
"eval_accuracy_undropoff": 0.9884172473598898, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.028387943187872154, |
|
"eval_iou_undropoff": 0.9487370409472924, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5477972626686096, |
|
"eval_mean_accuracy": 0.5121746320746329, |
|
"eval_mean_iou": 0.32570832804505484, |
|
"eval_overall_accuracy": 0.9488136291503906, |
|
"eval_runtime": 3.8634, |
|
"eval_samples_per_second": 5.177, |
|
"eval_steps_per_second": 0.518, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 2.3552631578947368e-06, |
|
"loss": 0.6041, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 2.3421052631578947e-06, |
|
"loss": 0.5861, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 2.3289473684210526e-06, |
|
"loss": 0.5903, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 2.3157894736842105e-06, |
|
"loss": 0.5788, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 2.302631578947369e-06, |
|
"loss": 0.5867, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 2.2894736842105263e-06, |
|
"loss": 0.5818, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 2.2763157894736842e-06, |
|
"loss": 0.5798, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 2.263157894736842e-06, |
|
"loss": 0.5674, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 2.25e-06, |
|
"loss": 0.5858, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 2.236842105263158e-06, |
|
"loss": 0.5672, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.028280465148283217, |
|
"eval_accuracy_undropoff": 0.9893241337861464, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.022722665536369164, |
|
"eval_iou_undropoff": 0.9493050509718401, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5236681699752808, |
|
"eval_mean_accuracy": 0.5088022994672148, |
|
"eval_mean_iou": 0.32400923883606975, |
|
"eval_overall_accuracy": 0.9493646621704102, |
|
"eval_runtime": 3.6124, |
|
"eval_samples_per_second": 5.536, |
|
"eval_steps_per_second": 0.554, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 2.223684210526316e-06, |
|
"loss": 0.5659, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 2.2105263157894738e-06, |
|
"loss": 0.5597, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 2.1973684210526313e-06, |
|
"loss": 0.5761, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 2.1842105263157896e-06, |
|
"loss": 0.5468, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 2.1710526315789475e-06, |
|
"loss": 0.556, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 2.1578947368421054e-06, |
|
"loss": 0.5491, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 2.1447368421052633e-06, |
|
"loss": 0.5502, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 2.1315789473684212e-06, |
|
"loss": 0.5538, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 2.1184210526315787e-06, |
|
"loss": 0.5474, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 2.1052631578947366e-06, |
|
"loss": 0.5454, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.02117479758710062, |
|
"eval_accuracy_undropoff": 0.993276861062492, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.018333611356035874, |
|
"eval_iou_undropoff": 0.9528160474538893, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4965994358062744, |
|
"eval_mean_accuracy": 0.5072258293247963, |
|
"eval_mean_iou": 0.48557482940496255, |
|
"eval_overall_accuracy": 0.9528575897216797, |
|
"eval_runtime": 3.3294, |
|
"eval_samples_per_second": 6.007, |
|
"eval_steps_per_second": 0.601, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 2.0921052631578945e-06, |
|
"loss": 0.5516, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 2.078947368421053e-06, |
|
"loss": 0.5463, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 2.0657894736842108e-06, |
|
"loss": 0.5311, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 2.0526315789473687e-06, |
|
"loss": 0.5386, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 2.039473684210526e-06, |
|
"loss": 0.5433, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 2.026315789473684e-06, |
|
"loss": 0.525, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 2.013157894736842e-06, |
|
"loss": 0.5319, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 2e-06, |
|
"loss": 0.5293, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 1.986842105263158e-06, |
|
"loss": 0.5236, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 1.973684210526316e-06, |
|
"loss": 0.5261, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_dropoff": 0.01632147526319411, |
|
"eval_accuracy_undropoff": 0.9960132420940977, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.014949579831932774, |
|
"eval_iou_undropoff": 0.9552480263667703, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4699672758579254, |
|
"eval_mean_accuracy": 0.5061673586786459, |
|
"eval_mean_iou": 0.32339920206623435, |
|
"eval_overall_accuracy": 0.9552783966064453, |
|
"eval_runtime": 3.4999, |
|
"eval_samples_per_second": 5.714, |
|
"eval_steps_per_second": 0.571, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 1.9605263157894736e-06, |
|
"loss": 0.5276, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 1.9473684210526315e-06, |
|
"loss": 0.5214, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 1.9342105263157895e-06, |
|
"loss": 0.5094, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 1.9210526315789474e-06, |
|
"loss": 0.5191, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 1.9078947368421053e-06, |
|
"loss": 0.5145, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 1.8947368421052632e-06, |
|
"loss": 0.4972, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 1.8815789473684209e-06, |
|
"loss": 0.5082, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 1.8684210526315792e-06, |
|
"loss": 0.5169, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 1.855263157894737e-06, |
|
"loss": 0.5011, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 1.8421052631578948e-06, |
|
"loss": 0.5012, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.010743365673524623, |
|
"eval_accuracy_undropoff": 0.9973704870857741, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.01012940728694509, |
|
"eval_iou_undropoff": 0.9563277663476536, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4576098322868347, |
|
"eval_mean_accuracy": 0.5040569263796494, |
|
"eval_mean_iou": 0.48322858681729935, |
|
"eval_overall_accuracy": 0.9563472747802735, |
|
"eval_runtime": 3.5475, |
|
"eval_samples_per_second": 5.638, |
|
"eval_steps_per_second": 0.564, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 1.8289473684210527e-06, |
|
"loss": 0.4964, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 1.8157894736842106e-06, |
|
"loss": 0.5012, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 1.8026315789473683e-06, |
|
"loss": 0.4912, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 1.7894736842105262e-06, |
|
"loss": 0.4951, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 1.7763157894736842e-06, |
|
"loss": 0.4964, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 1.7631578947368423e-06, |
|
"loss": 0.5006, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 1.7500000000000002e-06, |
|
"loss": 0.4905, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 1.736842105263158e-06, |
|
"loss": 0.4773, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 1.7236842105263158e-06, |
|
"loss": 0.4871, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 1.7105263157894737e-06, |
|
"loss": 0.4875, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_dropoff": 0.00584417073786096, |
|
"eval_accuracy_undropoff": 0.9978051238983578, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.005562735783149364, |
|
"eval_iou_undropoff": 0.9565495765992331, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4430149495601654, |
|
"eval_mean_accuracy": 0.5018246473181094, |
|
"eval_mean_iou": 0.48105615619119124, |
|
"eval_overall_accuracy": 0.9565601348876953, |
|
"eval_runtime": 3.4324, |
|
"eval_samples_per_second": 5.827, |
|
"eval_steps_per_second": 0.583, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 1.6973684210526316e-06, |
|
"loss": 0.4664, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 1.6842105263157895e-06, |
|
"loss": 0.4813, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 1.6710526315789472e-06, |
|
"loss": 0.4865, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 1.6578947368421056e-06, |
|
"loss": 0.4743, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 1.6447368421052632e-06, |
|
"loss": 0.4866, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 1.6315789473684212e-06, |
|
"loss": 0.4688, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 1.618421052631579e-06, |
|
"loss": 0.4641, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 1.605263157894737e-06, |
|
"loss": 0.4732, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 1.5921052631578947e-06, |
|
"loss": 0.4722, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 1.5789473684210526e-06, |
|
"loss": 0.4622, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.003073464987729076, |
|
"eval_accuracy_undropoff": 0.9983402605233752, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.002960213842313385, |
|
"eval_iou_undropoff": 0.9569523159125636, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.43276911973953247, |
|
"eval_mean_accuracy": 0.5007068627555521, |
|
"eval_mean_iou": 0.4799562648774385, |
|
"eval_overall_accuracy": 0.9569578170776367, |
|
"eval_runtime": 3.4879, |
|
"eval_samples_per_second": 5.734, |
|
"eval_steps_per_second": 0.573, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 1.5657894736842105e-06, |
|
"loss": 0.4616, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 1.5526315789473686e-06, |
|
"loss": 0.4515, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 1.5394736842105265e-06, |
|
"loss": 0.4533, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 1.5263157894736844e-06, |
|
"loss": 0.4503, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 1.5131578947368421e-06, |
|
"loss": 0.464, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 1.5e-06, |
|
"loss": 0.4444, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 1.486842105263158e-06, |
|
"loss": 0.4391, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 1.4736842105263156e-06, |
|
"loss": 0.4576, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 1.4605263157894738e-06, |
|
"loss": 0.4381, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 1.4473684210526317e-06, |
|
"loss": 0.4394, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_dropoff": 0.0021422509690589234, |
|
"eval_accuracy_undropoff": 0.9986413619416166, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0020771988506462892, |
|
"eval_iou_undropoff": 0.9572038677608956, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.41788777709007263, |
|
"eval_mean_accuracy": 0.5003918064553378, |
|
"eval_mean_iou": 0.47964053330577094, |
|
"eval_overall_accuracy": 0.9572076797485352, |
|
"eval_runtime": 3.4358, |
|
"eval_samples_per_second": 5.821, |
|
"eval_steps_per_second": 0.582, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 1.4342105263157894e-06, |
|
"loss": 0.4479, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 1.4210526315789473e-06, |
|
"loss": 0.4405, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 1.4078947368421054e-06, |
|
"loss": 0.465, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 1.394736842105263e-06, |
|
"loss": 0.4389, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 1.381578947368421e-06, |
|
"loss": 0.4295, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 1.368421052631579e-06, |
|
"loss": 0.4448, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 1.3552631578947368e-06, |
|
"loss": 0.4439, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 1.3421052631578947e-06, |
|
"loss": 0.4362, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 1.3289473684210526e-06, |
|
"loss": 0.4418, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 1.3157894736842106e-06, |
|
"loss": 0.4352, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.001646826762081699, |
|
"eval_accuracy_undropoff": 0.9987942012603274, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0016022923045337285, |
|
"eval_iou_undropoff": 0.9573306430246059, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.40482425689697266, |
|
"eval_mean_accuracy": 0.5002205140112045, |
|
"eval_mean_iou": 0.4794664676645698, |
|
"eval_overall_accuracy": 0.9573335647583008, |
|
"eval_runtime": 3.4008, |
|
"eval_samples_per_second": 5.881, |
|
"eval_steps_per_second": 0.588, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 1.3026315789473685e-06, |
|
"loss": 0.4334, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 1.2894736842105264e-06, |
|
"loss": 0.4271, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 1.2763157894736843e-06, |
|
"loss": 0.4209, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 1.263157894736842e-06, |
|
"loss": 0.4405, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 1.25e-06, |
|
"loss": 0.425, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 1.236842105263158e-06, |
|
"loss": 0.428, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 1.2236842105263157e-06, |
|
"loss": 0.4184, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 1.2105263157894736e-06, |
|
"loss": 0.4144, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 1.1973684210526317e-06, |
|
"loss": 0.4098, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 1.1842105263157894e-06, |
|
"loss": 0.426, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_dropoff": 0.0014541617927016674, |
|
"eval_accuracy_undropoff": 0.9991657520520371, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0014267261360925707, |
|
"eval_iou_undropoff": 0.957679097037079, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3881166875362396, |
|
"eval_mean_accuracy": 0.5003099569223693, |
|
"eval_mean_iou": 0.47955291158658575, |
|
"eval_overall_accuracy": 0.9576816558837891, |
|
"eval_runtime": 3.5184, |
|
"eval_samples_per_second": 5.684, |
|
"eval_steps_per_second": 0.568, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 1.1710526315789473e-06, |
|
"loss": 0.4211, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 1.1578947368421053e-06, |
|
"loss": 0.4342, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 1.1447368421052632e-06, |
|
"loss": 0.4111, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 1.131578947368421e-06, |
|
"loss": 0.4334, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 1.118421052631579e-06, |
|
"loss": 0.4134, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 1.1052631578947369e-06, |
|
"loss": 0.4297, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 1.0921052631578948e-06, |
|
"loss": 0.4033, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 1.0789473684210527e-06, |
|
"loss": 0.4025, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 1.0657894736842106e-06, |
|
"loss": 0.4079, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 1.0526315789473683e-06, |
|
"loss": 0.4175, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.0014495745315259525, |
|
"eval_accuracy_undropoff": 0.9993751100771461, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0014289913401315938, |
|
"eval_iou_undropoff": 0.9578795795339837, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3793652355670929, |
|
"eval_mean_accuracy": 0.5004123423043361, |
|
"eval_mean_iou": 0.4796542854370577, |
|
"eval_overall_accuracy": 0.9578821182250976, |
|
"eval_runtime": 3.782, |
|
"eval_samples_per_second": 5.288, |
|
"eval_steps_per_second": 0.529, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 1.0394736842105264e-06, |
|
"loss": 0.4045, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 1.0263157894736843e-06, |
|
"loss": 0.4083, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 1.013157894736842e-06, |
|
"loss": 0.4018, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 1e-06, |
|
"loss": 0.4039, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 9.86842105263158e-07, |
|
"loss": 0.4005, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 9.736842105263158e-07, |
|
"loss": 0.4034, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 9.605263157894737e-07, |
|
"loss": 0.3999, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 9.473684210526316e-07, |
|
"loss": 0.3986, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 9.342105263157896e-07, |
|
"loss": 0.4241, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 9.210526315789474e-07, |
|
"loss": 0.4087, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_dropoff": 0.0012202114727402004, |
|
"eval_accuracy_undropoff": 0.9992073450437174, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0011984897227253475, |
|
"eval_iou_undropoff": 0.957709646371066, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.3741573393344879, |
|
"eval_mean_accuracy": 0.5002137782582288, |
|
"eval_mean_iou": 0.3196360453645971, |
|
"eval_overall_accuracy": 0.9577117919921875, |
|
"eval_runtime": 3.5884, |
|
"eval_samples_per_second": 5.574, |
|
"eval_steps_per_second": 0.557, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 9.078947368421053e-07, |
|
"loss": 0.3876, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 8.947368421052631e-07, |
|
"loss": 0.4006, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 8.815789473684211e-07, |
|
"loss": 0.3881, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 8.68421052631579e-07, |
|
"loss": 0.3968, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 8.552631578947369e-07, |
|
"loss": 0.4054, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 8.421052631578948e-07, |
|
"loss": 0.3757, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 8.289473684210528e-07, |
|
"loss": 0.3861, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 8.157894736842106e-07, |
|
"loss": 0.393, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 8.026315789473685e-07, |
|
"loss": 0.394, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 7.894736842105263e-07, |
|
"loss": 0.3887, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_dropoff": 0.00025229936466432715, |
|
"eval_accuracy_undropoff": 0.9996370066180619, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.00025020585117755973, |
|
"eval_iou_undropoff": 0.9580829037780204, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.36448419094085693, |
|
"eval_mean_accuracy": 0.4999446529913631, |
|
"eval_mean_iou": 0.47916655481459897, |
|
"eval_overall_accuracy": 0.9580833435058593, |
|
"eval_runtime": 3.3953, |
|
"eval_samples_per_second": 5.891, |
|
"eval_steps_per_second": 0.589, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 7.763157894736843e-07, |
|
"loss": 0.3898, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 7.631578947368422e-07, |
|
"loss": 0.393, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 7.5e-07, |
|
"loss": 0.393, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 7.368421052631578e-07, |
|
"loss": 0.3792, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 7.236842105263158e-07, |
|
"loss": 0.4113, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 7.105263157894736e-07, |
|
"loss": 0.4003, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 6.973684210526316e-07, |
|
"loss": 0.3938, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 6.842105263157895e-07, |
|
"loss": 0.3876, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 6.710526315789474e-07, |
|
"loss": 0.3777, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 6.578947368421053e-07, |
|
"loss": 0.3799, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_dropoff": 0.00011009426821716094, |
|
"eval_accuracy_undropoff": 0.999701485705643, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.00010934189844871181, |
|
"eval_iou_undropoff": 0.9581390371965204, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.35395944118499756, |
|
"eval_mean_accuracy": 0.4999057899869301, |
|
"eval_mean_iou": 0.47912418954748454, |
|
"eval_overall_accuracy": 0.9581392288208008, |
|
"eval_runtime": 3.4245, |
|
"eval_samples_per_second": 5.84, |
|
"eval_steps_per_second": 0.584, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 6.447368421052632e-07, |
|
"loss": 0.386, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 6.31578947368421e-07, |
|
"loss": 0.385, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 6.18421052631579e-07, |
|
"loss": 0.3749, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 6.052631578947368e-07, |
|
"loss": 0.3891, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 5.921052631578947e-07, |
|
"loss": 0.3797, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 5.789473684210526e-07, |
|
"loss": 0.3742, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 5.657894736842105e-07, |
|
"loss": 0.387, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 5.526315789473684e-07, |
|
"loss": 0.3739, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 5.394736842105264e-07, |
|
"loss": 0.382, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 5.263157894736842e-07, |
|
"loss": 0.376, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_dropoff": 9.633248469001582e-05, |
|
"eval_accuracy_undropoff": 0.9997991993846626, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 9.588865956786178e-05, |
|
"eval_iou_undropoff": 0.9582321401357542, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.35109108686447144, |
|
"eval_mean_accuracy": 0.49994776593467627, |
|
"eval_mean_iou": 0.47916401439766104, |
|
"eval_overall_accuracy": 0.958232307434082, |
|
"eval_runtime": 3.4374, |
|
"eval_samples_per_second": 5.818, |
|
"eval_steps_per_second": 0.582, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.5, |
|
"learning_rate": 5.131578947368422e-07, |
|
"loss": 0.3714, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 101.0, |
|
"learning_rate": 5e-07, |
|
"loss": 0.3908, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 101.5, |
|
"learning_rate": 4.868421052631579e-07, |
|
"loss": 0.3989, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 102.0, |
|
"learning_rate": 4.736842105263158e-07, |
|
"loss": 0.3708, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 102.5, |
|
"learning_rate": 4.605263157894737e-07, |
|
"loss": 0.3732, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 103.0, |
|
"learning_rate": 4.4736842105263156e-07, |
|
"loss": 0.3795, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 103.5, |
|
"learning_rate": 4.342105263157895e-07, |
|
"loss": 0.3691, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 104.0, |
|
"learning_rate": 4.210526315789474e-07, |
|
"loss": 0.3826, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 104.5, |
|
"learning_rate": 4.078947368421053e-07, |
|
"loss": 0.3677, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"learning_rate": 3.9473684210526315e-07, |
|
"loss": 0.3677, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"eval_accuracy_dropoff": 8.715796233858575e-05, |
|
"eval_accuracy_undropoff": 0.9997800944698236, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 8.671839342765861e-05, |
|
"eval_iou_undropoff": 0.958213463984645, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3452190160751343, |
|
"eval_mean_accuracy": 0.4999336262160811, |
|
"eval_mean_iou": 0.47915009118903634, |
|
"eval_overall_accuracy": 0.9582136154174805, |
|
"eval_runtime": 3.5855, |
|
"eval_samples_per_second": 5.578, |
|
"eval_steps_per_second": 0.558, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 105.5, |
|
"learning_rate": 3.815789473684211e-07, |
|
"loss": 0.3857, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 106.0, |
|
"learning_rate": 3.684210526315789e-07, |
|
"loss": 0.3699, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 106.5, |
|
"learning_rate": 3.552631578947368e-07, |
|
"loss": 0.3611, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 107.0, |
|
"learning_rate": 3.4210526315789473e-07, |
|
"loss": 0.3791, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 107.5, |
|
"learning_rate": 3.2894736842105264e-07, |
|
"loss": 0.3763, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 108.0, |
|
"learning_rate": 3.157894736842105e-07, |
|
"loss": 0.375, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 108.5, |
|
"learning_rate": 3.026315789473684e-07, |
|
"loss": 0.3747, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 109.0, |
|
"learning_rate": 2.894736842105263e-07, |
|
"loss": 0.3788, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 109.5, |
|
"learning_rate": 2.763157894736842e-07, |
|
"loss": 0.3776, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"learning_rate": 2.631578947368421e-07, |
|
"loss": 0.358, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"eval_accuracy_dropoff": 8.25707011628707e-05, |
|
"eval_accuracy_undropoff": 0.9998001944323104, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 8.21921561285668e-05, |
|
"eval_iou_undropoff": 0.9582325455066336, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.34373170137405396, |
|
"eval_mean_accuracy": 0.49994138256673665, |
|
"eval_mean_iou": 0.4791573688313811, |
|
"eval_overall_accuracy": 0.9582326889038086, |
|
"eval_runtime": 3.8945, |
|
"eval_samples_per_second": 5.135, |
|
"eval_steps_per_second": 0.514, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 110.5, |
|
"learning_rate": 2.5e-07, |
|
"loss": 0.3756, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 111.0, |
|
"learning_rate": 2.368421052631579e-07, |
|
"loss": 0.3731, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 111.5, |
|
"learning_rate": 2.2368421052631578e-07, |
|
"loss": 0.3725, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 112.0, |
|
"learning_rate": 2.105263157894737e-07, |
|
"loss": 0.391, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 112.5, |
|
"learning_rate": 1.9736842105263157e-07, |
|
"loss": 0.3856, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 113.0, |
|
"learning_rate": 1.8421052631578946e-07, |
|
"loss": 0.3578, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 113.5, |
|
"learning_rate": 1.7105263157894736e-07, |
|
"loss": 0.3859, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 114.0, |
|
"learning_rate": 1.5789473684210525e-07, |
|
"loss": 0.3626, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 114.5, |
|
"learning_rate": 1.4473684210526316e-07, |
|
"loss": 0.364, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"learning_rate": 1.3157894736842104e-07, |
|
"loss": 0.3997, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"eval_accuracy_dropoff": 7.339617881144063e-05, |
|
"eval_accuracy_undropoff": 0.9998565141291791, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 7.315422739991587e-05, |
|
"eval_iou_undropoff": 0.9582861580998477, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3434307873249054, |
|
"eval_mean_accuracy": 0.49996495515399525, |
|
"eval_mean_iou": 0.47917965616362385, |
|
"eval_overall_accuracy": 0.9582862854003906, |
|
"eval_runtime": 3.4028, |
|
"eval_samples_per_second": 5.877, |
|
"eval_steps_per_second": 0.588, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 115.5, |
|
"learning_rate": 1.1842105263157895e-07, |
|
"loss": 0.377, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 116.0, |
|
"learning_rate": 1.0526315789473685e-07, |
|
"loss": 0.3659, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 116.5, |
|
"learning_rate": 9.210526315789473e-08, |
|
"loss": 0.3619, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 117.0, |
|
"learning_rate": 7.894736842105262e-08, |
|
"loss": 0.3815, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 117.5, |
|
"learning_rate": 6.578947368421052e-08, |
|
"loss": 0.3681, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 118.0, |
|
"learning_rate": 5.263157894736842e-08, |
|
"loss": 0.3679, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 118.5, |
|
"learning_rate": 3.947368421052631e-08, |
|
"loss": 0.3645, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 119.0, |
|
"learning_rate": 2.631578947368421e-08, |
|
"loss": 0.3636, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 119.5, |
|
"learning_rate": 1.3157894736842106e-08, |
|
"loss": 0.3704, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.3769, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"eval_accuracy_dropoff": 7.339617881144063e-05, |
|
"eval_accuracy_undropoff": 0.9998698477676603, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 7.317664384470087e-05, |
|
"eval_iou_undropoff": 0.9582989373746869, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.34277641773223877, |
|
"eval_mean_accuracy": 0.49997162197323586, |
|
"eval_mean_iou": 0.47918605700926575, |
|
"eval_overall_accuracy": 0.9582990646362305, |
|
"eval_runtime": 3.6519, |
|
"eval_samples_per_second": 5.477, |
|
"eval_steps_per_second": 0.548, |
|
"step": 240 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 2.778548611716219e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|