glenn-jocher
commited on
Commit
•
1f1917e
1
Parent(s):
24dd150
remove fast, add merge
Browse files- test.py +4 -3
- train.py +1 -2
- utils/utils.py +1 -6
test.py
CHANGED
@@ -19,7 +19,7 @@ def test(data,
|
|
19 |
verbose=False,
|
20 |
model=None,
|
21 |
dataloader=None,
|
22 |
-
|
23 |
# Initialize/load model and set device
|
24 |
if model is None:
|
25 |
training = False
|
@@ -65,7 +65,7 @@ def test(data,
|
|
65 |
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
|
66 |
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
|
67 |
|
68 |
-
|
69 |
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
|
70 |
dataset = LoadImagesAndLabels(path,
|
71 |
imgsz,
|
@@ -109,7 +109,7 @@ def test(data,
|
|
109 |
|
110 |
# Run NMS
|
111 |
t = torch_utils.time_synchronized()
|
112 |
-
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres,
|
113 |
t1 += torch_utils.time_synchronized() - t
|
114 |
|
115 |
# Statistics per image
|
@@ -254,6 +254,7 @@ if __name__ == '__main__':
|
|
254 |
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
255 |
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
|
256 |
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
|
|
257 |
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
|
258 |
opt = parser.parse_args()
|
259 |
opt.img_size = check_img_size(opt.img_size)
|
|
|
19 |
verbose=False,
|
20 |
model=None,
|
21 |
dataloader=None,
|
22 |
+
merge=False):
|
23 |
# Initialize/load model and set device
|
24 |
if model is None:
|
25 |
training = False
|
|
|
65 |
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
|
66 |
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
|
67 |
|
68 |
+
merge = opt.merge # use Merge NMS
|
69 |
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
|
70 |
dataset = LoadImagesAndLabels(path,
|
71 |
imgsz,
|
|
|
109 |
|
110 |
# Run NMS
|
111 |
t = torch_utils.time_synchronized()
|
112 |
+
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, merge=merge)
|
113 |
t1 += torch_utils.time_synchronized() - t
|
114 |
|
115 |
# Statistics per image
|
|
|
254 |
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
255 |
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
|
256 |
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
257 |
+
parser.add_argument('--merge', action='store_true', help='use Merge NMS')
|
258 |
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
|
259 |
opt = parser.parse_args()
|
260 |
opt.img_size = check_img_size(opt.img_size)
|
train.py
CHANGED
@@ -305,8 +305,7 @@ def train(hyp):
|
|
305 |
save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'),
|
306 |
model=ema.ema,
|
307 |
single_cls=opt.single_cls,
|
308 |
-
dataloader=testloader
|
309 |
-
fast=epoch < epochs / 2)
|
310 |
|
311 |
# Write
|
312 |
with open(results_file, 'a') as f:
|
|
|
305 |
save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'),
|
306 |
model=ema.ema,
|
307 |
single_cls=opt.single_cls,
|
308 |
+
dataloader=testloader)
|
|
|
309 |
|
310 |
# Write
|
311 |
with open(results_file, 'a') as f:
|
utils/utils.py
CHANGED
@@ -527,7 +527,7 @@ def build_targets(p, targets, model):
|
|
527 |
return tcls, tbox, indices, anch
|
528 |
|
529 |
|
530 |
-
def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6,
|
531 |
"""Performs Non-Maximum Suppression (NMS) on inference results
|
532 |
|
533 |
Returns:
|
@@ -544,12 +544,7 @@ def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, fast=False, c
|
|
544 |
max_det = 300 # maximum number of detections per image
|
545 |
time_limit = 10.0 # seconds to quit after
|
546 |
redundant = True # require redundant detections
|
547 |
-
fast |= conf_thres > 0.001 # fast mode
|
548 |
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
|
549 |
-
if fast:
|
550 |
-
merge = False
|
551 |
-
else:
|
552 |
-
merge = True # merge for best mAP (adds 0.5ms/img)
|
553 |
|
554 |
t = time.time()
|
555 |
output = [None] * prediction.shape[0]
|
|
|
527 |
return tcls, tbox, indices, anch
|
528 |
|
529 |
|
530 |
+
def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):
|
531 |
"""Performs Non-Maximum Suppression (NMS) on inference results
|
532 |
|
533 |
Returns:
|
|
|
544 |
max_det = 300 # maximum number of detections per image
|
545 |
time_limit = 10.0 # seconds to quit after
|
546 |
redundant = True # require redundant detections
|
|
|
547 |
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
|
|
|
|
|
|
|
|
|
548 |
|
549 |
t = time.time()
|
550 |
output = [None] * prediction.shape[0]
|