gil.simas@sea.ai commited on
Commit
137b0e5
1 Parent(s): 930daa4

add only_tp feature

Browse files
README.md CHANGED
@@ -1,5 +1,11 @@
1
  ---
2
  title: box-metrics
 
 
 
 
 
 
3
  emoji: 🐠
4
  colorFrom: red
5
  colorTo: blue
 
1
  ---
2
  title: box-metrics
3
+ tags:
4
+ - evaluate
5
+ - metric
6
+ description: >-
7
+ Modified cocoevals.py which is wrapped into torchmetrics' mAP metric with
8
+ numpy instead of torch dependency.
9
  emoji: 🐠
10
  colorFrom: red
11
  colorTo: blue
__pycache__/box_metrics.cpython-39.pyc CHANGED
Binary files a/__pycache__/box_metrics.cpython-39.pyc and b/__pycache__/box_metrics.cpython-39.pyc differ
 
__pycache__/utils.cpython-39.pyc CHANGED
Binary files a/__pycache__/utils.cpython-39.pyc and b/__pycache__/utils.cpython-39.pyc differ
 
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import evaluate
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ module = evaluate.load("box_metrics.py")
6
+ launch_gradio_widget(module)
box_metrics.py CHANGED
@@ -7,36 +7,6 @@ import torch
7
  from utils import bbox_iou, bbox_bep
8
  import datasets
9
 
10
- # _DESCRIPTION = """\
11
- # The box-metrics package provides a set of metrics to evaluate
12
- # the performance of object detection algorithms in ther of sizing and positioning
13
- # of the bounding boxes."""
14
-
15
- # _KWARGS_DESCRIPTION = """
16
- # Calculates how good are predictions given some references, using certain scores
17
- # Args:
18
- # predictions: list of predictions to score. Each predictions
19
- # should be a string with tokens separated by spaces.
20
- # references: list of reference for each prediction. Each
21
- # reference should be a string with tokens separated by spaces.
22
- # max_iou (`float`, *optional*):
23
- # If specified, this is the minimum Intersection over Union (IoU) threshold to consider a detection as a true positive.
24
- # Default is 0.5.
25
- # """
26
-
27
- # _CITATION = """\
28
- # @InProceedings{huggingface:module,
29
- # title = {A great new module},
30
- # authors={huggingface, Inc.},
31
- # year={2020}
32
- # }\
33
- # @article{milan2016mot16,
34
- # title={Are object detection assessment criteria ready for maritime computer vision?},
35
- # author={Dilip K. Prasad1, Deepu Rajan and Chai Quek},
36
- # journal={arXiv:1809.04659v1},
37
- # year={2018}
38
- # }
39
- # """
40
 
41
  _CITATION = """\
42
  @InProceedings{huggingface:module,
@@ -71,12 +41,11 @@ Args:
71
  Default is 0.5.
72
  """
73
 
74
- # @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
75
- class BoxMetrics(evaluate.Metric):
76
 
77
- def __init__(self, max_iou: float = 0.01, **kwargs):
78
- # super().__init__(**kwargs)
79
- self.max_iou = max_iou
80
  self.boxes = {}
81
  self.gt_field = "ground_truth_det"
82
 
@@ -109,6 +78,7 @@ class BoxMetrics(evaluate.Metric):
109
  self.add(payload)
110
 
111
  def add(self, payload: Payload):
 
112
  self.gt_field = payload.gt_field_name
113
  for sequence in payload.sequences:
114
  self.boxes[sequence] = {}
@@ -121,77 +91,148 @@ class BoxMetrics(evaluate.Metric):
121
  preds = payload.sequences[sequence][model]
122
  preds_tm = self.payload_preds_to_rm(preds, resolution)
123
  self.boxes[sequence][model] = preds_tm
124
-
125
- def compute(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  """Compute the metric value"""
127
 
128
  output = {}
129
 
130
  for sequence in self.boxes:
131
- ious = []
132
- beps = []
133
- bottom_x = []
134
- bottom_y = []
135
- widths = []
136
- heights = []
 
 
 
 
 
137
  output[sequence] = {}
138
 
139
- target = self.boxes[sequence][self.gt_field]
140
  for model in self.boxes[sequence]:
141
- preds = self.boxes[sequence][model]
142
 
143
- for i in range(len(preds)):
144
 
145
- target_tm_bbs = target[i][:, 1:]
146
- pred_tm_bbs = preds[i][:, :4]
147
 
148
- if target_tm_bbs.shape[0] == 0 or pred_tm_bbs.shape[0] == 0:
149
- continue
150
 
151
- for t_box in target_tm_bbs:
152
- iou = bbox_iou(t_box.unsqueeze(0), pred_tm_bbs, xywh=False)
153
- bep = bbox_bep(t_box.unsqueeze(0), pred_tm_bbs, xywh=False)
154
- matches = pred_tm_bbs[iou.squeeze(1) > self.max_iou]
155
 
156
- bep = bep[iou>self.max_iou]
157
- iou = iou[iou>self.max_iou]
158
 
159
- if torch.any(iou <= 0):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  raise ValueError("IoU should be greater than 0, pls contact code maintainer")
161
- if torch.any(bep <= 0):
162
  raise ValueError("BEP should be greater than 0, pls contact code maintainer")
163
-
164
- ious.extend(iou.tolist())
165
- beps.extend(bep.tolist())
166
-
167
- for match in matches:
168
- t_xc = (match[0].item()+match[2].item())/2
169
- p_xc = (t_box[0].item()+t_box[2].item())/2
170
- t_w = t_box[2].item()-t_box[0].item()
171
- p_w = match[2].item()-match[0].item()
172
- t_h = t_box[3].item()-t_box[1].item()
173
- p_h = match[3].item()-match[1].item()
174
-
175
-
176
- bottom_x.append(abs(t_xc-p_xc))
177
- widths.append(abs(t_w-p_w))
178
- bottom_y.append(abs(t_box[1].item()-match[1].item()))
179
- heights.append(abs(t_h-p_h))
180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  output[sequence][model] = {
182
  "iou_mean": np.mean(ious),
183
  "bep_mean": np.mean(beps),
184
- "bottom_x_mean": np.mean(bottom_x),
185
- "bottom_y_mean": np.mean(bottom_y),
186
- "width_mean": np.mean(widths),
187
- "height_mean": np.mean(heights),
188
- "bottom_x_std": np.std(bottom_x),
189
- "bottom_y_std": np.std(bottom_y),
190
- "width_std": np.std(widths),
191
- "height_std": np.std(heights)
 
 
 
 
 
 
 
 
 
192
  }
 
193
  return output
194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
  @staticmethod
196
  def payload_labels_to_tm(labels, resolution):
197
  """Convert the labels of a payload sequence to the format of torch metrics"""
@@ -225,5 +266,26 @@ class BoxMetrics(evaluate.Metric):
225
 
226
  return preds_tm
227
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
 
229
 
 
7
  from utils import bbox_iou, bbox_bep
8
  import datasets
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  _CITATION = """\
12
  @InProceedings{huggingface:module,
 
41
  Default is 0.5.
42
  """
43
 
44
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
45
+ class box_metrics(evaluate.Metric):
46
 
47
+ def __init__(self, **kwargs):
48
+ super().__init__(**kwargs)
 
49
  self.boxes = {}
50
  self.gt_field = "ground_truth_det"
51
 
 
78
  self.add(payload)
79
 
80
  def add(self, payload: Payload):
81
+ """Convert a payload to the format of the tracking metrics library"""
82
  self.gt_field = payload.gt_field_name
83
  for sequence in payload.sequences:
84
  self.boxes[sequence] = {}
 
91
  preds = payload.sequences[sequence][model]
92
  preds_tm = self.payload_preds_to_rm(preds, resolution)
93
  self.boxes[sequence][model] = preds_tm
94
+
95
+ def add_batch(self, predictions, references, sequence_name = "sequence"):
96
+ """Add a batch of predictions and references to the metric
97
+ Mainly for testing purposes
98
+ references: list of tm boxes as [n, 5] tensors
99
+ box format: label, x1, y1, x2, y2
100
+ predictions: dict of {model_name: list of tm boxes as [n, 6] tensors}
101
+ box format: x1, y1, x2, y2, conf, label
102
+ """
103
+ self.boxes[sequence_name] = {}
104
+ self.boxes[sequence_name][self.gt_field] = []
105
+ self.boxes[sequence_name][self.gt_field] = references
106
+ for model in predictions:
107
+ self.boxes[sequence_name][model] = predictions[model]
108
+
109
+
110
+ def compute(self,
111
+ iou_threshold: float = 0.01,
112
+ only_tp = True):
113
  """Compute the metric value"""
114
 
115
  output = {}
116
 
117
  for sequence in self.boxes:
118
+ ious = np.array([])
119
+ beps = np.array([])
120
+ e_bottom_x = np.array([])
121
+ e_bottom_y = np.array([])
122
+ e_widths = np.array([])
123
+ e_heights = np.array([])
124
+ e_n_widths = np.array([])
125
+ e_n_heights = np.array([])
126
+ e_n_bottom_x = np.array([])
127
+ e_n_bottom_y = np.array([])
128
+
129
  output[sequence] = {}
130
 
131
+ labels = self.boxes[sequence][self.gt_field]
132
  for model in self.boxes[sequence]:
133
+ detections = self.boxes[sequence][model]
134
 
135
+ for i in range(len(detections)):
136
 
137
+ frame_labels = labels[i]
138
+ frame_detections = detections[i]
139
 
140
+ iou = self.box_iou(frame_labels[:, 1:], frame_detections[:, :4])
 
141
 
142
+ x = torch.where(iou > iou_threshold)
 
 
 
143
 
144
+ if x[0].shape[0]:
 
145
 
146
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
147
+
148
+ if x[0].shape[0] > 1 and only_tp:
149
+ matches = matches[matches[:, 2].argsort()[::-1]]
150
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
151
+ matches = matches[matches[:, 2].argsort()[::-1]]
152
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
153
+
154
+ else:
155
+ matches = np.zeros((0, 3))
156
+
157
+ labels_i, detections_i, ious_v = matches.transpose()
158
+ labels_i = labels_i.astype(int)
159
+ detections_i = detections_i.astype(int)
160
+
161
+ for pair in zip(labels_i, detections_i, ious_v):
162
+ iou = pair[2]
163
+ t_box = frame_labels[pair[0]][1:]
164
+ p_box = frame_detections[pair[1]][:4]
165
+
166
+ bep = bbox_bep(t_box.unsqueeze(0), p_box.unsqueeze(0))
167
+ if iou < 0:
168
  raise ValueError("IoU should be greater than 0, pls contact code maintainer")
169
+ if bep < 0:
170
  raise ValueError("BEP should be greater than 0, pls contact code maintainer")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
 
172
+ ious = np.append(ious, iou)
173
+ beps = np.append(beps, bep)
174
+
175
+ t_xc = (p_box[0].item()+p_box[2].item())/2
176
+ p_xc = (t_box[0].item()+t_box[2].item())/2
177
+ t_yc = p_box[3].item()
178
+ p_yc = t_box[3].item()
179
+ t_w = t_box[2].item()-t_box[0].item()
180
+ p_w = p_box[2].item()-p_box[0].item()
181
+ t_h = t_box[3].item()-t_box[1].item()
182
+ p_h = p_box[3].item()-p_box[1].item()
183
+
184
+ e_widths = np.append(e_widths, p_w-t_w)
185
+ e_heights = np.append(e_heights, p_h-t_h)
186
+ e_bottom_x = np.append(e_bottom_x, p_xc-t_xc)
187
+ e_bottom_y = np.append(e_bottom_y, p_yc-t_yc)
188
+
189
+ e_n_widths = np.append(e_n_widths, (p_w-t_w)/t_w)
190
+ e_n_heights = np.append(e_n_heights, (p_h-t_h)/t_h)
191
+ e_n_bottom_x = np.append(e_n_bottom_x, (p_xc-t_xc)/t_w)
192
+ e_n_bottom_y = np.append(e_n_bottom_y, (p_yc-t_yc)/t_h)
193
+
194
  output[sequence][model] = {
195
  "iou_mean": np.mean(ious),
196
  "bep_mean": np.mean(beps),
197
+ "e_bottom_x_mean": np.mean(e_bottom_x),
198
+ "e_bottom_y_mean": np.mean(e_bottom_y),
199
+ "e_width_mean": np.mean(e_widths),
200
+ "e_height_mean": np.mean(e_heights),
201
+ "e_n_bottom_x_mean": np.mean(e_n_bottom_x),
202
+ "e_n_bottom_y_mean": np.mean(e_n_bottom_y),
203
+ "e_n_width_mean": np.mean(e_n_widths),
204
+ "e_n_height_mean": np.mean(e_n_heights),
205
+ "e_bottom_x_std": np.std(e_bottom_x),
206
+ "e_bottom_y_std": np.std(e_bottom_y),
207
+ "e_width_std": np.std(e_widths),
208
+ "e_height_std": np.std(e_heights),
209
+ "e_n_bottom_x_std": np.std(e_n_bottom_x),
210
+ "e_n_bottom_y_std": np.std(e_n_bottom_y),
211
+ "e_n_width_std": np.std(e_n_widths),
212
+ "e_n_height_std": np.std(e_n_heights),
213
+ "n_matches": len(e_n_heights),
214
  }
215
+
216
  return output
217
 
218
+ @staticmethod
219
+ def summarize(result):
220
+ """Summarize the results by model insteaf by sequence: model"""
221
+ summary = {}
222
+ for sequence in result:
223
+ for model in result[sequence]:
224
+ if model not in summary:
225
+ summary[model] = {}
226
+ for metric in result[sequence][model]:
227
+ if metric not in summary[model]:
228
+ summary[model][metric] = []
229
+ summary[model][metric].append(result[sequence][model][metric])
230
+ #average the results
231
+ for model in summary:
232
+ for metric in summary[model]:
233
+ summary[model][metric] = np.mean(summary[model][metric])
234
+ return summary
235
+
236
  @staticmethod
237
  def payload_labels_to_tm(labels, resolution):
238
  """Convert the labels of a payload sequence to the format of torch metrics"""
 
266
 
267
  return preds_tm
268
 
269
+ @staticmethod
270
+ def box_iou(box1, box2, eps=1e-7):
271
+ # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
272
+ """
273
+ Return intersection-over-union (Jaccard index) of boxes.
274
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
275
+ Arguments:
276
+ box1 (Tensor[N, 4])
277
+ box2 (Tensor[M, 4])
278
+ Returns:
279
+ iou (Tensor[N, M]): the NxM matrix containing the pairwise
280
+ IoU values for every element in boxes1 and boxes2
281
+ """
282
+
283
+ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
284
+ (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)
285
+ inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)
286
+
287
+ # IoU = inter / (area1 + area2 - inter)
288
+ return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)
289
+
290
 
291
 
compute.py DELETED
@@ -1,88 +0,0 @@
1
- import torch
2
- import numpy as np
3
- from utils import BoxMetrics, concat_labels, concat_preds
4
- import fiftyone as fo
5
- from seametrics.fo_utils.utils import fo_to_payload
6
- from const import INDEX_MAPPING, CLASS_MAPPING, INDEX_MAPPING_INV
7
- from tqdm import tqdm
8
-
9
- tags = ["WHALES"]
10
- cameras = ["thermal_narrow"]
11
-
12
- dataset_name = "SENTRY_VIDEOS_DATASET_QA"
13
- #dataset_name = "SENTRY_VIDEOS_DATASET_QA"
14
- model = "cerulean-level-17_11_2023_RL_SPLIT_ep147_CNN"
15
- det_gt_field = "ground_truth_det"
16
-
17
- cm = BoxMetrics(nc=10, conf=0, iou_thres=0)
18
-
19
- if dataset_name == "SAILING_DATASET_QA":
20
- cameras = ["thermal_left"]
21
- dataset_view = fo.load_dataset(dataset_name).match_tags(tags).select_group_slices(cameras).filter_labels(f"{model}", True, only_matches=False)
22
- sequences = dataset_view.distinct("sequence")
23
- if dataset_name == "SENTRY_VIDEOS_DATASET_QA":
24
- cameras = ["thermal_wide"]
25
- dataset_view = fo.load_dataset(dataset_name).match_tags(tags).select_group_slices(cameras).filter_labels(f"frames.{model}", True, only_matches=False)
26
- sequences = dataset_view.distinct("sequence")
27
-
28
- for sequence in tqdm(sequences):
29
- payload = fo_to_payload(dataset = dataset_name,
30
- gt_field = det_gt_field,
31
- models = [model],
32
- tracking_mode = True,
33
- sequence_list = [sequence],
34
- excluded_classes = ["BIRD"],)
35
-
36
- target = payload["sequences"][sequence][det_gt_field]
37
- preds = payload["sequences"][sequence][model]
38
- resolution = payload["sequences"][sequence]["resolution"]
39
- target_tm = []
40
- preds_tm = []
41
- for frame in target:
42
- target_tm_batch = []
43
- for det in frame:
44
- if CLASS_MAPPING[det["label"]] is not None:
45
- label = INDEX_MAPPING[CLASS_MAPPING[det["label"]]]-1
46
- else:
47
- continue
48
- box = det["bounding_box"]
49
- x1, y1, x2, y2 = box[0], box[1], box[0]+box[2], box[1]+box[3]
50
- x1, y1, x2, y2 = x1*resolution[1], y1*resolution[0], x2*resolution[1], y2*resolution[0]
51
- target_tm_batch.append([label, x1, y1, x2, y2])
52
- target_tm.append(torch.tensor(target_tm_batch) if len(target_tm_batch) > 0 else torch.empty((0, 5)))
53
-
54
- for frame in preds:
55
- pred_tm_batch = []
56
- for det in frame:
57
- label = INDEX_MAPPING[det["label"]]-1
58
- box = det["bounding_box"]
59
- x1, y1, x2, y2 = box[0], box[1], box[0]+box[2], box[1]+box[3]
60
- x1, y1, x2, y2 = x1*resolution[1], y1*resolution[0], x2*resolution[1], y2*resolution[0]
61
- conf = 1
62
- pred_tm_batch.append([x1, y1, x2, y2, conf, label])
63
-
64
- preds_tm.append(torch.tensor(pred_tm_batch) if len(pred_tm_batch) > 0 else torch.empty((0, 6)))
65
-
66
- for i in range(len(target_tm)):
67
- target_batch = target_tm[i]
68
- pred_batch = preds_tm[i]
69
- cm.process_batch(pred_batch, target_batch)
70
-
71
- print("SUMMARY: ")
72
- print("\nmodel: ", model)
73
- print("\nconfusion matrix: ")
74
- print(cm.matrix.astype(int))
75
-
76
- tp = cm.matrix[:-1, :-1].sum()
77
- fp = cm.matrix[:-1, -1].sum()
78
- fn = cm.matrix[-1, :-1].sum()
79
- print("\nTP: ", tp, "FP: ", fp, "FN: ", fn, "support: ", tp + fn)
80
- #Detection Rates:
81
- print("\nDetection Rates:")
82
- for i in range(10):
83
- tp = cm.matrix[:-1, i].sum()
84
- fn = cm.matrix[-1, i].sum()
85
- if tp + fn == 0:
86
- print(f"{INDEX_MAPPING_INV[i+1]}: NaN")
87
- else:
88
- print(f"{INDEX_MAPPING_INV[i+1]}: {tp/(tp+fn)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test.py CHANGED
@@ -1,11 +1,12 @@
1
  import torch
2
  import numpy as np
3
  import fiftyone as fo
4
- from box_metrics import BoxMetrics
5
  from seametrics.fo_utils.utils import fo_to_payload
6
  from tqdm import tqdm
 
7
 
8
- tags = ["WHALES"]
9
  dataset_name = "SENTRY_VIDEOS_DATASET_QA"
10
  model = "cerulean-level-17_11_2023_RL_SPLIT_ep147_CNN"
11
  det_gt_field = "ground_truth_det"
@@ -14,13 +15,31 @@ dataset = fo.load_dataset(dataset_name)
14
  dataset_view = fo.load_dataset(dataset_name).match_tags(tags) if tags else fo.load_dataset(dataset_name)
15
  sequences = dataset_view.distinct("sequence")
16
 
17
- bbox_metric = BoxMetrics(max_iou=0.01)
18
- payload = fo_to_payload(dataset = dataset_name,
19
- gt_field = det_gt_field,
20
- models = [model],
21
- tracking_mode = True,
22
- sequence_list = sequences)
23
- print(payload)
24
- bbox_metric.add_payload(payload)
 
 
 
 
 
 
 
 
 
25
  result = bbox_metric.compute()
26
- print(result)
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
  import numpy as np
3
  import fiftyone as fo
4
+ # from box_metrics import BoxMetrics
5
  from seametrics.fo_utils.utils import fo_to_payload
6
  from tqdm import tqdm
7
+ import evaluate
8
 
9
+ tags = []
10
  dataset_name = "SENTRY_VIDEOS_DATASET_QA"
11
  model = "cerulean-level-17_11_2023_RL_SPLIT_ep147_CNN"
12
  det_gt_field = "ground_truth_det"
 
15
  dataset_view = fo.load_dataset(dataset_name).match_tags(tags) if tags else fo.load_dataset(dataset_name)
16
  sequences = dataset_view.distinct("sequence")
17
 
18
+ # bbox_metric = BoxMetrics(max_iou=0.01)
19
+ bbox_metric = evaluate.load("box_metrics.py")
20
+
21
+ for sequence in tqdm(sequences[:2]):
22
+ try:
23
+ payload = fo_to_payload(dataset = dataset_name,
24
+ gt_field = det_gt_field,
25
+ models = [model],
26
+ tracking_mode = False,
27
+ sequence_list = [sequence],
28
+ group_slices = ["thermal_wide"],
29
+ excluded_classes = ["SUN_REFLECTIONS"],
30
+ )
31
+ bbox_metric.add_payload(payload)
32
+ except Exception as e:
33
+ print(f"Error in {sequence}: {e}")
34
+
35
  result = bbox_metric.compute()
36
+ summary = bbox_metric.summarize(result)
37
+
38
+
39
+ for sequence in result:
40
+ print(f"Sequence: {sequence}")
41
+ for model in result[sequence]:
42
+ print(f"Model: {model}")
43
+ for metric in result[sequence][model]:
44
+ print(f"{metric}: {result[sequence][model][metric]}")
45
+ print("\n")
unit_test.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import fiftyone as fo
4
+ # from box_metrics import BoxMetrics
5
+ from seametrics.fo_utils.utils import fo_to_payload
6
+ from tqdm import tqdm
7
+ import evaluate
8
+
9
+ bbox_metric = evaluate.load("box_metrics.py")
10
+
11
+ references = [torch.tensor([
12
+ [0,0,0,50,50],
13
+ [0,50,50,100,100],
14
+ [0,100,100,150,150]
15
+ ])]
16
+
17
+ predictions = {"model": [torch.tensor([
18
+ [0,0,50,50,0,0],
19
+ [50,50,90,90,0,0],
20
+ [100,100,140,140,0,0],
21
+ [100,100,130,130,0,0]
22
+ ])]
23
+ }
24
+
25
+ bbox_metric.add_batch(predictions, references)
26
+ print(bbox_metric.boxes)
27
+
28
+ result = bbox_metric.compute()
29
+
30
+ for metric in result["sequence"]["model"]:
31
+ print(metric, result["sequence"]["model"][metric])
utils.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
  import numpy as np
3
  import math
4
 
5
- def bbox_bep(box1, box2, xywh=True, eps=1e-7, bep1 = True):
6
  """
7
  Calculates bottom edge proximity between two boxes
8
 
@@ -85,89 +85,3 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7
85
  c_area = cw * ch + eps # convex area
86
  return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf
87
  return iou # IoU
88
-
89
- class BoxMetrics:
90
- # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
91
- def __init__(self):
92
- self.preds_tm = []
93
- self.target_tm = []
94
- self.bottom_x = []
95
- self.bottom_y = []
96
- self.widths = []
97
- self.heights = []
98
- self.ious = []
99
- self.beps = []
100
-
101
- def add_batch(self, preds, target):
102
- """
103
- Return intersection-over-union (Jaccard index) of boxes.
104
- Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
105
- Arguments:
106
- detections torch(Array[N, 6]), x1, y1, x2, y2, conf, class
107
- labels torch(Array[M, 5]), class, x1, y1, x2, y2
108
- Returns:
109
- None, updates confusion matrix accordingly
110
- """
111
- self.preds_tm.extend(preds)
112
- self.target_tm.extend(target)
113
-
114
- def compute(self):
115
- """
116
- Computes bbox iou, bep and location/size statistics
117
- """
118
-
119
- for i in range(len(self.target_tm)):
120
- target_batch_boxes = self.target_tm[i][:, 1:]
121
- pred_batch_boxes = self.preds_tm[i][:, :4]
122
-
123
- if pred_batch_boxes.shape[0] == 0:
124
- continue
125
-
126
- if target_batch_boxes.shape[0] == 0:
127
- continue
128
-
129
- for t_box in target_batch_boxes:
130
- iou = bbox_iou(t_box.unsqueeze(0), pred_batch_boxes, xywh=False)
131
- bep = bbox_bep(t_box.unsqueeze(0), pred_batch_boxes, xywh=False)
132
-
133
- matches = pred_batch_boxes[iou.squeeze(1) > 0.1]
134
-
135
- bep = bep[iou > 0]
136
- iou = iou[iou > 0]
137
- # if any iou value is 0 or less, raise error
138
- if torch.any(iou <= 0):
139
- raise ValueError("IoU values must be greater than 0.")
140
- #same for bep
141
- if torch.any(bep <= 0):
142
- print(t_box.unsqueeze(0))
143
- print(pred_batch_boxes)
144
- print(bep)
145
- print(iou)
146
- raise ValueError("BEP values must be greater than 0.")
147
-
148
- self.ious.extend(iou.tolist())
149
- self.beps.extend(bep.tolist())
150
-
151
- for match in matches:
152
- t_xc = (match[0].item()+match[2].item())/2
153
- p_xc = (t_box[0].item()+t_box[2].item())/2
154
- t_w = t_box[2].item()-t_box[0].item()
155
- p_w = match[2].item()-match[0].item()
156
- t_h = t_box[3].item()-t_box[1].item()
157
- p_h = match[3].item()-match[1].item()
158
-
159
- self.bottom_x.append(p_xc - t_xc)
160
- self.bottom_y.append(match[3].item()-t_box[3].item())
161
- self.widths.append(p_w-t_w)
162
- self.heights.append(p_h-t_h)
163
-
164
- return {"iou_mean": np.mean(self.ious),
165
- "bep_mean": np.mean(self.beps),
166
- "bottom_x_std": np.std(self.bottom_x),
167
- "bottom_y_std": np.std(self.bottom_y),
168
- "widths_std": np.std(self.widths),
169
- "heights_std": np.std(self.heights),
170
- "bottom_x_mean": np.mean(self.bottom_x),
171
- "bottom_y_mean": np.mean(self.bottom_y),
172
- "widths_mean": np.mean(self.widths),
173
- "heights_mean": np.mean(self.heights)}
 
2
  import numpy as np
3
  import math
4
 
5
+ def bbox_bep(box1, box2, xywh=False, eps=1e-7, bep1 = True):
6
  """
7
  Calculates bottom edge proximity between two boxes
8
 
 
85
  c_area = cw * ch + eps # convex area
86
  return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf
87
  return iou # IoU