abidlabs HF staff commited on
Commit
d02fec5
1 Parent(s): c58b9bb

Upload mean_iou.py

Browse files
Files changed (1) hide show
  1. mean_iou.py +314 -0
mean_iou.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Mean IoU (Intersection-over-Union) metric."""
15
+
16
+ from typing import Dict, Optional
17
+
18
+ import datasets
19
+ import numpy as np
20
+
21
+ import evaluate
22
+
23
+
24
+ _DESCRIPTION = """
25
+ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
26
+ between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
27
+ the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
28
+ """
29
+
30
+ _KWARGS_DESCRIPTION = """
31
+ Args:
32
+ predictions (`List[ndarray]`):
33
+ List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
34
+ references (`List[ndarray]`):
35
+ List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
36
+ num_labels (`int`):
37
+ Number of classes (categories).
38
+ ignore_index (`int`):
39
+ Index that will be ignored during evaluation.
40
+ nan_to_num (`int`, *optional*):
41
+ If specified, NaN values will be replaced by the number defined by the user.
42
+ label_map (`dict`, *optional*):
43
+ If specified, dictionary mapping old label indices to new label indices.
44
+ reduce_labels (`bool`, *optional*, defaults to `False`):
45
+ Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
46
+ and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
47
+
48
+ Returns:
49
+ `Dict[str, float | ndarray]` comprising various elements:
50
+ - *mean_iou* (`float`):
51
+ Mean Intersection-over-Union (IoU averaged over all categories).
52
+ - *mean_accuracy* (`float`):
53
+ Mean accuracy (averaged over all categories).
54
+ - *overall_accuracy* (`float`):
55
+ Overall accuracy on all images.
56
+ - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
57
+ Per category accuracy.
58
+ - *per_category_iou* (`ndarray` of shape `(num_labels,)`):
59
+ Per category IoU.
60
+
61
+ Examples:
62
+
63
+ >>> import numpy as np
64
+
65
+ >>> mean_iou = evaluate.load("mean_iou")
66
+
67
+ >>> # suppose one has 3 different segmentation maps predicted
68
+ >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
69
+ >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
70
+
71
+ >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
72
+ >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
73
+
74
+ >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
75
+ >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
76
+
77
+ >>> predicted = [predicted_1, predicted_2, predicted_3]
78
+ >>> ground_truth = [actual_1, actual_2, actual_3]
79
+
80
+ >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
81
+ >>> print(results) # doctest: +NORMALIZE_WHITESPACE
82
+ {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
83
+ """
84
+
85
+ _CITATION = """\
86
+ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
87
+ author = {{MMSegmentation Contributors}},
88
+ license = {Apache-2.0},
89
+ month = {7},
90
+ title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
91
+ url = {https://github.com/open-mmlab/mmsegmentation},
92
+ year = {2020}
93
+ }"""
94
+
95
+
96
+ def intersect_and_union(
97
+ pred_label,
98
+ label,
99
+ num_labels,
100
+ ignore_index: bool,
101
+ label_map: Optional[Dict[int, int]] = None,
102
+ reduce_labels: bool = False,
103
+ ):
104
+ """Calculate intersection and Union.
105
+
106
+ Args:
107
+ pred_label (`ndarray`):
108
+ Prediction segmentation map of shape (height, width).
109
+ label (`ndarray`):
110
+ Ground truth segmentation map of shape (height, width).
111
+ num_labels (`int`):
112
+ Number of categories.
113
+ ignore_index (`int`):
114
+ Index that will be ignored during evaluation.
115
+ label_map (`dict`, *optional*):
116
+ Mapping old labels to new labels. The parameter will work only when label is str.
117
+ reduce_labels (`bool`, *optional*, defaults to `False`):
118
+ Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
119
+ and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
120
+
121
+ Returns:
122
+ area_intersect (`ndarray`):
123
+ The intersection of prediction and ground truth histogram on all classes.
124
+ area_union (`ndarray`):
125
+ The union of prediction and ground truth histogram on all classes.
126
+ area_pred_label (`ndarray`):
127
+ The prediction histogram on all classes.
128
+ area_label (`ndarray`):
129
+ The ground truth histogram on all classes.
130
+ """
131
+ if label_map is not None:
132
+ for old_id, new_id in label_map.items():
133
+ label[label == old_id] = new_id
134
+
135
+ # turn into Numpy arrays
136
+ pred_label = np.array(pred_label)
137
+ label = np.array(label)
138
+
139
+ if reduce_labels:
140
+ label[label == 0] = 255
141
+ label = label - 1
142
+ label[label == 254] = 255
143
+
144
+ mask = label != ignore_index
145
+ mask = np.not_equal(label, ignore_index)
146
+ pred_label = pred_label[mask]
147
+ label = np.array(label)[mask]
148
+
149
+ intersect = pred_label[pred_label == label]
150
+
151
+ area_intersect = np.histogram(intersect, bins=num_labels, range=(0, num_labels - 1))[0]
152
+ area_pred_label = np.histogram(pred_label, bins=num_labels, range=(0, num_labels - 1))[0]
153
+ area_label = np.histogram(label, bins=num_labels, range=(0, num_labels - 1))[0]
154
+
155
+ area_union = area_pred_label + area_label - area_intersect
156
+
157
+ return area_intersect, area_union, area_pred_label, area_label
158
+
159
+
160
+ def total_intersect_and_union(
161
+ results,
162
+ gt_seg_maps,
163
+ num_labels,
164
+ ignore_index: bool,
165
+ label_map: Optional[Dict[int, int]] = None,
166
+ reduce_labels: bool = False,
167
+ ):
168
+ """Calculate Total Intersection and Union, by calculating `intersect_and_union` for each (predicted, ground truth) pair.
169
+
170
+ Args:
171
+ results (`ndarray`):
172
+ List of prediction segmentation maps, each of shape (height, width).
173
+ gt_seg_maps (`ndarray`):
174
+ List of ground truth segmentation maps, each of shape (height, width).
175
+ num_labels (`int`):
176
+ Number of categories.
177
+ ignore_index (`int`):
178
+ Index that will be ignored during evaluation.
179
+ label_map (`dict`, *optional*):
180
+ Mapping old labels to new labels. The parameter will work only when label is str.
181
+ reduce_labels (`bool`, *optional*, defaults to `False`):
182
+ Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
183
+ and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
184
+
185
+ Returns:
186
+ total_area_intersect (`ndarray`):
187
+ The intersection of prediction and ground truth histogram on all classes.
188
+ total_area_union (`ndarray`):
189
+ The union of prediction and ground truth histogram on all classes.
190
+ total_area_pred_label (`ndarray`):
191
+ The prediction histogram on all classes.
192
+ total_area_label (`ndarray`):
193
+ The ground truth histogram on all classes.
194
+ """
195
+ total_area_intersect = np.zeros((num_labels,), dtype=np.float64)
196
+ total_area_union = np.zeros((num_labels,), dtype=np.float64)
197
+ total_area_pred_label = np.zeros((num_labels,), dtype=np.float64)
198
+ total_area_label = np.zeros((num_labels,), dtype=np.float64)
199
+ for result, gt_seg_map in zip(results, gt_seg_maps):
200
+ area_intersect, area_union, area_pred_label, area_label = intersect_and_union(
201
+ result, gt_seg_map, num_labels, ignore_index, label_map, reduce_labels
202
+ )
203
+ total_area_intersect += area_intersect
204
+ total_area_union += area_union
205
+ total_area_pred_label += area_pred_label
206
+ total_area_label += area_label
207
+ return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
208
+
209
+
210
+ def mean_iou(
211
+ results,
212
+ gt_seg_maps,
213
+ num_labels,
214
+ ignore_index: bool,
215
+ nan_to_num: Optional[int] = None,
216
+ label_map: Optional[Dict[int, int]] = None,
217
+ reduce_labels: bool = False,
218
+ ):
219
+ """Calculate Mean Intersection and Union (mIoU).
220
+
221
+ Args:
222
+ results (`ndarray`):
223
+ List of prediction segmentation maps, each of shape (height, width).
224
+ gt_seg_maps (`ndarray`):
225
+ List of ground truth segmentation maps, each of shape (height, width).
226
+ num_labels (`int`):
227
+ Number of categories.
228
+ ignore_index (`int`):
229
+ Index that will be ignored during evaluation.
230
+ nan_to_num (`int`, *optional*):
231
+ If specified, NaN values will be replaced by the number defined by the user.
232
+ label_map (`dict`, *optional*):
233
+ Mapping old labels to new labels. The parameter will work only when label is str.
234
+ reduce_labels (`bool`, *optional*, defaults to `False`):
235
+ Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
236
+ and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
237
+
238
+ Returns:
239
+ `Dict[str, float | ndarray]` comprising various elements:
240
+ - *mean_iou* (`float`):
241
+ Mean Intersection-over-Union (IoU averaged over all categories).
242
+ - *mean_accuracy* (`float`):
243
+ Mean accuracy (averaged over all categories).
244
+ - *overall_accuracy* (`float`):
245
+ Overall accuracy on all images.
246
+ - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
247
+ Per category accuracy.
248
+ - *per_category_iou* (`ndarray` of shape `(num_labels,)`):
249
+ Per category IoU.
250
+ """
251
+ total_area_intersect, total_area_union, total_area_pred_label, total_area_label = total_intersect_and_union(
252
+ results, gt_seg_maps, num_labels, ignore_index, label_map, reduce_labels
253
+ )
254
+
255
+ # compute metrics
256
+ metrics = dict()
257
+
258
+ all_acc = total_area_intersect.sum() / total_area_label.sum()
259
+ iou = total_area_intersect / total_area_union
260
+ acc = total_area_intersect / total_area_label
261
+
262
+ metrics["mean_iou"] = np.nanmean(iou)
263
+ metrics["mean_accuracy"] = np.nanmean(acc)
264
+ metrics["overall_accuracy"] = all_acc
265
+ metrics["per_category_iou"] = iou
266
+ metrics["per_category_accuracy"] = acc
267
+
268
+ if nan_to_num is not None:
269
+ metrics = dict(
270
+ {metric: np.nan_to_num(metric_value, nan=nan_to_num) for metric, metric_value in metrics.items()}
271
+ )
272
+
273
+ return metrics
274
+
275
+
276
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
277
+ class MeanIoU(evaluate.Metric):
278
+ def _info(self):
279
+ return evaluate.MetricInfo(
280
+ description=_DESCRIPTION,
281
+ citation=_CITATION,
282
+ inputs_description=_KWARGS_DESCRIPTION,
283
+ features=datasets.Features(
284
+ # 1st Seq - height dim, 2nd - width dim
285
+ {
286
+ "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
287
+ "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
288
+ }
289
+ ),
290
+ reference_urls=[
291
+ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
292
+ ],
293
+ )
294
+
295
+ def _compute(
296
+ self,
297
+ predictions,
298
+ references,
299
+ num_labels: int,
300
+ ignore_index: bool,
301
+ nan_to_num: Optional[int] = None,
302
+ label_map: Optional[Dict[int, int]] = None,
303
+ reduce_labels: bool = False,
304
+ ):
305
+ iou_result = mean_iou(
306
+ results=predictions,
307
+ gt_seg_maps=references,
308
+ num_labels=num_labels,
309
+ ignore_index=ignore_index,
310
+ nan_to_num=nan_to_num,
311
+ label_map=label_map,
312
+ reduce_labels=reduce_labels,
313
+ )
314
+ return iou_result