sklum commited on
Commit
3e2a0ca
β€’
1 Parent(s): f9cf0c4

Fix modules

Browse files
detection_metrics/__init__.py β†’ __init__.py RENAMED
File without changes
detection_metrics/pycocotools/coco.py β†’ coco.py RENAMED
@@ -54,6 +54,7 @@ __version__ = "2.0"
54
  import copy
55
  import itertools
56
  import json
 
57
  # from . import mask as maskUtils
58
  import os
59
  import sys
 
54
  import copy
55
  import itertools
56
  import json
57
+
58
  # from . import mask as maskUtils
59
  import os
60
  import sys
detection_metrics/coco_evaluate.py β†’ coco_evaluate.py RENAMED
@@ -6,10 +6,14 @@ from typing import Dict, List, Union
6
  import numpy as np
7
  import torch
8
 
9
- from detection_metrics.pycocotools.coco import COCO
10
- from detection_metrics.pycocotools.cocoeval import COCOeval
11
- from detection_metrics.utils import (_TYPING_BOX, _TYPING_PREDICTIONS, convert_to_xywh,
12
- create_common_coco_eval)
 
 
 
 
13
 
14
  _SUPPORTED_TYPES = ["bbox"]
15
 
@@ -64,7 +68,6 @@ class COCOEvaluator(object):
64
  eval_imgs = coco_eval.evaluate()
65
  self.eval_imgs[iou_type].append(eval_imgs)
66
 
67
-
68
  def synchronize_between_processes(self) -> None:
69
  """
70
  Synchronizes evaluation images between processes.
 
6
  import numpy as np
7
  import torch
8
 
9
+ from .coco import COCO
10
+ from .cocoeval import COCOeval
11
+ from .utils import (
12
+ _TYPING_BOX,
13
+ _TYPING_PREDICTIONS,
14
+ convert_to_xywh,
15
+ create_common_coco_eval,
16
+ )
17
 
18
  _SUPPORTED_TYPES = ["bbox"]
19
 
 
68
  eval_imgs = coco_eval.evaluate()
69
  self.eval_imgs[iou_type].append(eval_imgs)
70
 
 
71
  def synchronize_between_processes(self) -> None:
72
  """
73
  Synchronizes evaluation images between processes.
detection_metrics/pycocotools/cocoeval.py β†’ cocoeval.py RENAMED
File without changes
detection_metrics.py CHANGED
@@ -4,10 +4,9 @@ import datasets
4
  import torch
5
  import evaluate
6
  import json
7
- from tqdm import tqdm
8
- from detection_metrics.pycocotools.coco import COCO
9
- from detection_metrics.coco_evaluate import COCOEvaluator
10
- from detection_metrics.utils import _TYPING_PREDICTION, _TYPING_REFERENCE
11
 
12
  _DESCRIPTION = "This class evaluates object detection models using the COCO dataset \
13
  and its evaluation metrics."
@@ -37,6 +36,7 @@ Args:
37
  **kwargs: Additional keyword arguments forwarded to evaluate.Metrics.
38
  """
39
 
 
40
  class EvaluateObjectDetection(evaluate.Metric):
41
  """
42
  Class for evaluating object detection models.
@@ -73,8 +73,10 @@ class EvaluateObjectDetection(evaluate.Metric):
73
  self.coco_evaluator.coco_eval["bbox"].cocoGt.cats = cats
74
  self.coco_evaluator.coco_gt.cats = cats
75
  self.coco_evaluator.coco_gt.dataset["categories"] = list(cats.values())
76
- self.coco_evaluator.coco_eval["bbox"].params.catIds = [c["id"] for c in cats.values()]
77
-
 
 
78
  def _info(self):
79
  """
80
  Returns the MetricInfo object with information about the module.
@@ -147,15 +149,15 @@ class EvaluateObjectDetection(evaluate.Metric):
147
  for prediction in predictions:
148
  ret.append({k: v for k, v in prediction.items() if k in required})
149
  return ret
150
-
151
  def _clear_references(self, references):
152
  required = [""]
153
  ret = []
154
  for ref in references:
155
  ret.append({k: v for k, v in ref.items() if k in required})
156
  return ret
157
-
158
- def add(self, *, prediction = None, reference = None, **kwargs):
159
  """
160
  Preprocesses the predictions and references and calls the parent class function.
161
 
@@ -167,13 +169,15 @@ class EvaluateObjectDetection(evaluate.Metric):
167
  if prediction is not None:
168
  prediction = self._clear_predictions(prediction)
169
  prediction = self._preprocess(prediction)
170
-
171
  res = {} # {image_id} : prediction
172
  for output, target in zip(prediction, reference):
173
  res[target["image_id"][0]] = output
174
  self.coco_evaluator.update(res)
175
 
176
- super(evaluate.Metric, self).add(prediction=prediction, references=reference, **kwargs)
 
 
177
 
178
  def _compute(
179
  self,
@@ -192,12 +196,12 @@ class EvaluateObjectDetection(evaluate.Metric):
192
  """
193
  print("Synchronizing processes")
194
  self.coco_evaluator.synchronize_between_processes()
195
-
196
  print("Accumulating values")
197
  self.coco_evaluator.accumulate()
198
-
199
  print("Summarizing results")
200
  self.coco_evaluator.summarize()
201
-
202
  stats = self.coco_evaluator.get_results()
203
  return stats
 
4
  import torch
5
  import evaluate
6
  import json
7
+ from .coco import COCO
8
+ from .coco_evaluate import COCOEvaluator
9
+ from .utils import _TYPING_PREDICTION, _TYPING_REFERENCE
 
10
 
11
  _DESCRIPTION = "This class evaluates object detection models using the COCO dataset \
12
  and its evaluation metrics."
 
36
  **kwargs: Additional keyword arguments forwarded to evaluate.Metrics.
37
  """
38
 
39
+
40
  class EvaluateObjectDetection(evaluate.Metric):
41
  """
42
  Class for evaluating object detection models.
 
73
  self.coco_evaluator.coco_eval["bbox"].cocoGt.cats = cats
74
  self.coco_evaluator.coco_gt.cats = cats
75
  self.coco_evaluator.coco_gt.dataset["categories"] = list(cats.values())
76
+ self.coco_evaluator.coco_eval["bbox"].params.catIds = [
77
+ c["id"] for c in cats.values()
78
+ ]
79
+
80
  def _info(self):
81
  """
82
  Returns the MetricInfo object with information about the module.
 
149
  for prediction in predictions:
150
  ret.append({k: v for k, v in prediction.items() if k in required})
151
  return ret
152
+
153
  def _clear_references(self, references):
154
  required = [""]
155
  ret = []
156
  for ref in references:
157
  ret.append({k: v for k, v in ref.items() if k in required})
158
  return ret
159
+
160
+ def add(self, *, prediction=None, reference=None, **kwargs):
161
  """
162
  Preprocesses the predictions and references and calls the parent class function.
163
 
 
169
  if prediction is not None:
170
  prediction = self._clear_predictions(prediction)
171
  prediction = self._preprocess(prediction)
172
+
173
  res = {} # {image_id} : prediction
174
  for output, target in zip(prediction, reference):
175
  res[target["image_id"][0]] = output
176
  self.coco_evaluator.update(res)
177
 
178
+ super(evaluate.Metric, self).add(
179
+ prediction=prediction, references=reference, **kwargs
180
+ )
181
 
182
  def _compute(
183
  self,
 
196
  """
197
  print("Synchronizing processes")
198
  self.coco_evaluator.synchronize_between_processes()
199
+
200
  print("Accumulating values")
201
  self.coco_evaluator.accumulate()
202
+
203
  print("Summarizing results")
204
  self.coco_evaluator.summarize()
205
+
206
  stats = self.coco_evaluator.get_results()
207
  return stats
detection_metrics/pycocotools/mask.py β†’ mask.py RENAMED
@@ -1,6 +1,6 @@
1
- __author__ = 'tsungyi'
2
 
3
- from detection_metrics.pycocotools import _mask
4
 
5
  # Interface for manipulating masks stored in RLE format.
6
  #
@@ -73,22 +73,25 @@ from detection_metrics.pycocotools import _mask
73
  # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
74
  # Licensed under the Simplified BSD License [see coco/license.txt]
75
 
76
- iou = _mask.iou
77
- merge = _mask.merge
78
  frPyObjects = _mask.frPyObjects
79
 
 
80
  def encode(bimask):
81
  if len(bimask.shape) == 3:
82
  return _mask.encode(bimask)
83
  elif len(bimask.shape) == 2:
84
  h, w = bimask.shape
85
- return _mask.encode(bimask.reshape((h, w, 1), order='F'))[0]
 
86
 
87
  def decode(rleObjs):
88
  if type(rleObjs) == list:
89
  return _mask.decode(rleObjs)
90
  else:
91
- return _mask.decode([rleObjs])[:,:,0]
 
92
 
93
  def area(rleObjs):
94
  if type(rleObjs) == list:
@@ -96,8 +99,9 @@ def area(rleObjs):
96
  else:
97
  return _mask.area([rleObjs])[0]
98
 
 
99
  def toBbox(rleObjs):
100
  if type(rleObjs) == list:
101
  return _mask.toBbox(rleObjs)
102
  else:
103
- return _mask.toBbox([rleObjs])[0]
 
1
+ __author__ = "tsungyi"
2
 
3
+ from pycocotools import _mask
4
 
5
  # Interface for manipulating masks stored in RLE format.
6
  #
 
73
  # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
74
  # Licensed under the Simplified BSD License [see coco/license.txt]
75
 
76
+ iou = _mask.iou
77
+ merge = _mask.merge
78
  frPyObjects = _mask.frPyObjects
79
 
80
+
81
  def encode(bimask):
82
  if len(bimask.shape) == 3:
83
  return _mask.encode(bimask)
84
  elif len(bimask.shape) == 2:
85
  h, w = bimask.shape
86
+ return _mask.encode(bimask.reshape((h, w, 1), order="F"))[0]
87
+
88
 
89
  def decode(rleObjs):
90
  if type(rleObjs) == list:
91
  return _mask.decode(rleObjs)
92
  else:
93
+ return _mask.decode([rleObjs])[:, :, 0]
94
+
95
 
96
  def area(rleObjs):
97
  if type(rleObjs) == list:
 
99
  else:
100
  return _mask.area([rleObjs])[0]
101
 
102
+
103
  def toBbox(rleObjs):
104
  if type(rleObjs) == list:
105
  return _mask.toBbox(rleObjs)
106
  else:
107
+ return _mask.toBbox([rleObjs])[0]
detection_metrics/pycocotools/mask_utils.py β†’ mask_utils.py RENAMED
File without changes
detection_metrics/utils.py β†’ utils.py RENAMED
@@ -1,13 +1,12 @@
1
  import copy
2
  import pickle
3
  from typing import Dict, List, Tuple, Union
4
- from tqdm import tqdm
5
  import numpy as np
6
  import torch
7
  import torch.distributed as dist
8
  from datasets import Dataset
9
 
10
- from detection_metrics.pycocotools.cocoeval import COCOeval
11
 
12
  # Typings
13
  _TYPING_BOX = Tuple[float, float, float, float]
@@ -19,6 +18,7 @@ _TYPING_PREDICTION = Dict[str, _TYPING_PRED_REF]
19
  _TYPING_REFERENCE = Dict[str, _TYPING_PRED_REF]
20
  _TYPING_PREDICTIONS = Dict[int, _TYPING_PREDICTION]
21
 
 
22
  def convert_to_xywh(boxes: torch.Tensor) -> torch.Tensor:
23
  """
24
  Convert bounding boxes from (xmin, ymin, xmax, ymax) format to (x, y, width, height) format.
 
1
  import copy
2
  import pickle
3
  from typing import Dict, List, Tuple, Union
 
4
  import numpy as np
5
  import torch
6
  import torch.distributed as dist
7
  from datasets import Dataset
8
 
9
+ from .cocoeval import COCOeval
10
 
11
  # Typings
12
  _TYPING_BOX = Tuple[float, float, float, float]
 
18
  _TYPING_REFERENCE = Dict[str, _TYPING_PRED_REF]
19
  _TYPING_PREDICTIONS = Dict[int, _TYPING_PREDICTION]
20
 
21
+
22
  def convert_to_xywh(boxes: torch.Tensor) -> torch.Tensor:
23
  """
24
  Convert bounding boxes from (xmin, ymin, xmax, ymax) format to (x, y, width, height) format.