yonomitt pre-commit-ci[bot] glenn-jocher commited on
Commit
8f35436
1 Parent(s): 8f875d9

Fix Detections class `tolist()` method (#5945)

Browse files

* Fix tolist() to add the file for each Detection

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* Fix PEP8 requirement for 2 spaces before an inline comment

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* Cleanup

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>

Files changed (1) hide show
  1. models/common.py +7 -5
models/common.py CHANGED
@@ -525,7 +525,7 @@ class AutoShape(nn.Module):
525
 
526
  class Detections:
527
  # YOLOv5 detections class for inference results
528
- def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
529
  super().__init__()
530
  d = pred[0].device # device
531
  gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations
@@ -533,6 +533,7 @@ class Detections:
533
  self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
534
  self.names = names # class names
535
  self.files = files # image filenames
 
536
  self.xyxy = pred # xyxy pixels
537
  self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
538
  self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
@@ -612,10 +613,11 @@ class Detections:
612
 
613
  def tolist(self):
614
  # return a list of Detections objects, i.e. 'for result in results.tolist():'
615
- x = [Detections([self.imgs[i]], [self.pred[i]], names=self.names, shape=self.s) for i in range(self.n)]
616
- for d in x:
617
- for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
618
- setattr(d, k, getattr(d, k)[0]) # pop out of list
 
619
  return x
620
 
621
  def __len__(self):
 
525
 
526
  class Detections:
527
  # YOLOv5 detections class for inference results
528
+ def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None):
529
  super().__init__()
530
  d = pred[0].device # device
531
  gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations
 
533
  self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
534
  self.names = names # class names
535
  self.files = files # image filenames
536
+ self.times = times # profiling times
537
  self.xyxy = pred # xyxy pixels
538
  self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
539
  self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
 
613
 
614
  def tolist(self):
615
  # return a list of Detections objects, i.e. 'for result in results.tolist():'
616
+ r = range(self.n) # iterable
617
+ x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
618
+ # for d in x:
619
+ # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
620
+ # setattr(d, k, getattr(d, k)[0]) # pop out of list
621
  return x
622
 
623
  def __len__(self):