Spaces:
Runtime error
Runtime error
Update cocoevaluate.py
Browse files- cocoevaluate.py +41 -19
cocoevaluate.py
CHANGED
@@ -17,7 +17,7 @@ import evaluate
|
|
17 |
import datasets
|
18 |
import pyarrow as pa
|
19 |
|
20 |
-
from .coco_utils import CocoEvaluator, get_coco_api_from_dataset
|
21 |
|
22 |
# TODO: Add BibTeX citation
|
23 |
_CITATION = """\
|
@@ -72,9 +72,10 @@ def summarize_if_long_list(obj):
|
|
72 |
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
73 |
class COCOEvaluate(evaluate.Metric):
|
74 |
"""TODO: Short description of my evaluation module."""
|
75 |
-
def __init__(self,
|
76 |
super().__init__(**kwargs)
|
77 |
-
self.coco_dataset = CocoDetection(coco_path, feature_extractor, annotation_path)
|
|
|
78 |
base_ds = get_coco_api_from_dataset(self.coco_dataset)
|
79 |
self.coco_evaluator = CocoEvaluator(base_ds, iou_types)
|
80 |
|
@@ -92,22 +93,23 @@ class COCOEvaluate(evaluate.Metric):
|
|
92 |
'predictions': [
|
93 |
datasets.Features(
|
94 |
{
|
95 |
-
'scores': datasets.
|
96 |
-
'labels': datasets.
|
97 |
-
'boxes': datasets.
|
98 |
})
|
99 |
]
|
100 |
,
|
101 |
'references': [
|
102 |
datasets.Features(
|
103 |
{
|
104 |
-
'size': datasets.Value(
|
105 |
-
'image_id': datasets.Value(
|
106 |
-
'boxes': datasets.
|
107 |
-
'class_labels': datasets.Value(
|
108 |
-
'iscrowd': datasets.Value(
|
109 |
-
'orig_size': datasets.Value(
|
110 |
-
'area': datasets.Value(
|
|
|
111 |
}
|
112 |
)
|
113 |
],
|
@@ -125,16 +127,36 @@ class COCOEvaluate(evaluate.Metric):
|
|
125 |
# TODO: Download external resources if needed
|
126 |
pass
|
127 |
|
128 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
"""Returns the scores"""
|
130 |
for pred, ref in zip(predictions, references):
|
131 |
-
res = {
|
|
|
|
|
132 |
self.coco_evaluator.update(res)
|
133 |
self.coco_evaluator.synchronize_between_processes()
|
134 |
self.coco_evaluator.accumulate()
|
|
|
135 |
self.coco_evaluator.summarize()
|
136 |
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
}
|
|
|
17 |
import datasets
|
18 |
import pyarrow as pa
|
19 |
|
20 |
+
from .coco_utils import CocoEvaluator, get_coco_api_from_dataset
|
21 |
|
22 |
# TODO: Add BibTeX citation
|
23 |
_CITATION = """\
|
|
|
72 |
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
73 |
class COCOEvaluate(evaluate.Metric):
|
74 |
"""TODO: Short description of my evaluation module."""
|
75 |
+
def __init__(self, coco_dataset, iou_types=['bbox'], **kwargs):
|
76 |
super().__init__(**kwargs)
|
77 |
+
# self.coco_dataset = CocoDetection(coco_path, feature_extractor, annotation_path)
|
78 |
+
self.coco_dataset = coco_dataset
|
79 |
base_ds = get_coco_api_from_dataset(self.coco_dataset)
|
80 |
self.coco_evaluator = CocoEvaluator(base_ds, iou_types)
|
81 |
|
|
|
93 |
'predictions': [
|
94 |
datasets.Features(
|
95 |
{
|
96 |
+
'scores': datasets.Sequence(datasets.Value("float")),
|
97 |
+
'labels': datasets.Sequence(datasets.Value("int64")),
|
98 |
+
'boxes': datasets.Sequence(datasets.Sequence(datasets.Value("float"))),
|
99 |
})
|
100 |
]
|
101 |
,
|
102 |
'references': [
|
103 |
datasets.Features(
|
104 |
{
|
105 |
+
'size': datasets.Sequence(datasets.Value("int64")),
|
106 |
+
'image_id': datasets.Sequence(datasets.Value("int64")),
|
107 |
+
'boxes': datasets.Sequence(datasets.Sequence(datasets.Value("float"))),
|
108 |
+
'class_labels': datasets.Sequence(datasets.Value("int64")),
|
109 |
+
'iscrowd': datasets.Sequence(datasets.Value("int64")),
|
110 |
+
'orig_size': datasets.Sequence(datasets.Value("int64")),
|
111 |
+
'area': datasets.Sequence(datasets.Value("int64")),
|
112 |
+
|
113 |
}
|
114 |
)
|
115 |
],
|
|
|
127 |
# TODO: Download external resources if needed
|
128 |
pass
|
129 |
|
130 |
+
def _preprocess(self, predictions):
|
131 |
+
"""Optional: preprocess the predictions and references before computing the scores"""
|
132 |
+
processed_predictions = []
|
133 |
+
for pred in predictions:
|
134 |
+
processed_pred = {}
|
135 |
+
for key in pred.keys():
|
136 |
+
processed_pred[key] = pred[key].detach().cpu().tolist()
|
137 |
+
processed_predictions.append(processed_pred)
|
138 |
+
return processed_predictions
|
139 |
+
|
140 |
+
def add(self, *, prediction=None, reference=None, **kwargs):
|
141 |
+
"""Preprocesses the predictions and references and calls the function of the parent class."""
|
142 |
+
if prediction is not None:
|
143 |
+
prediction = self._preprocess(prediction)
|
144 |
+
if reference is not None:
|
145 |
+
reference = self._preprocess(reference)
|
146 |
+
super().add(prediction=prediction, references=reference, **kwargs)
|
147 |
+
|
148 |
+
def _compute(self, predictions, references):
|
149 |
"""Returns the scores"""
|
150 |
for pred, ref in zip(predictions, references):
|
151 |
+
res = {}
|
152 |
+
for target, output in zip(ref, pred):
|
153 |
+
res[target["image_id"][0]] = output
|
154 |
self.coco_evaluator.update(res)
|
155 |
self.coco_evaluator.synchronize_between_processes()
|
156 |
self.coco_evaluator.accumulate()
|
157 |
+
|
158 |
self.coco_evaluator.summarize()
|
159 |
|
160 |
+
stats = self.coco_evaluator.get_results()
|
161 |
+
|
162 |
+
return stats
|
|