Spaces:
Running
Running
File size: 5,871 Bytes
0b8359d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs evaluation using OpenImages groundtruth and predictions.
Uses Open Images Challenge 2018, 2019 metrics
Example usage:
python models/research/object_detection/metrics/oid_od_challenge_evaluation.py \
--input_annotations_boxes=/path/to/input/annotations-human-bbox.csv \
--input_annotations_labels=/path/to/input/annotations-label.csv \
--input_class_labelmap=/path/to/input/class_labelmap.pbtxt \
--input_predictions=/path/to/input/predictions.csv \
--output_metrics=/path/to/output/metric.csv \
--input_annotations_segm=[/path/to/input/annotations-human-mask.csv] \
If optional flag has_masks is True, Mask column is also expected in CSV.
CSVs with bounding box annotations, instance segmentations and image label
can be downloaded from the Open Images Challenge website:
https://storage.googleapis.com/openimages/web/challenge.html
The format of the input csv and the metrics itself are described on the
challenge website as well.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from absl import app
from absl import flags
import pandas as pd
from google.protobuf import text_format
from object_detection.metrics import io_utils
from object_detection.metrics import oid_challenge_evaluation_utils as utils
from object_detection.protos import string_int_label_map_pb2
from object_detection.utils import object_detection_evaluation
flags.DEFINE_string('input_annotations_boxes', None,
'File with groundtruth boxes annotations.')
flags.DEFINE_string('input_annotations_labels', None,
'File with groundtruth labels annotations.')
flags.DEFINE_string(
'input_predictions', None,
"""File with detection predictions; NOTE: no postprocessing is applied in the evaluation script."""
)
flags.DEFINE_string('input_class_labelmap', None,
'Open Images Challenge labelmap.')
flags.DEFINE_string('output_metrics', None, 'Output file with csv metrics.')
flags.DEFINE_string(
'input_annotations_segm', None,
'File with groundtruth instance segmentation annotations [OPTIONAL].')
FLAGS = flags.FLAGS
def _load_labelmap(labelmap_path):
"""Loads labelmap from the labelmap path.
Args:
labelmap_path: Path to the labelmap.
Returns:
A dictionary mapping class name to class numerical id
A list with dictionaries, one dictionary per category.
"""
label_map = string_int_label_map_pb2.StringIntLabelMap()
with open(labelmap_path, 'r') as fid:
label_map_string = fid.read()
text_format.Merge(label_map_string, label_map)
labelmap_dict = {}
categories = []
for item in label_map.item:
labelmap_dict[item.name] = item.id
categories.append({'id': item.id, 'name': item.name})
return labelmap_dict, categories
def main(unused_argv):
flags.mark_flag_as_required('input_annotations_boxes')
flags.mark_flag_as_required('input_annotations_labels')
flags.mark_flag_as_required('input_predictions')
flags.mark_flag_as_required('input_class_labelmap')
flags.mark_flag_as_required('output_metrics')
all_location_annotations = pd.read_csv(FLAGS.input_annotations_boxes)
all_label_annotations = pd.read_csv(FLAGS.input_annotations_labels)
all_label_annotations.rename(
columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
is_instance_segmentation_eval = False
if FLAGS.input_annotations_segm:
is_instance_segmentation_eval = True
all_segm_annotations = pd.read_csv(FLAGS.input_annotations_segm)
# Note: this part is unstable as it requires the float point numbers in both
# csvs are exactly the same;
# Will be replaced by more stable solution: merge on LabelName and ImageID
# and filter down by IoU.
all_location_annotations = utils.merge_boxes_and_masks(
all_location_annotations, all_segm_annotations)
all_annotations = pd.concat([all_location_annotations, all_label_annotations])
class_label_map, categories = _load_labelmap(FLAGS.input_class_labelmap)
challenge_evaluator = (
object_detection_evaluation.OpenImagesChallengeEvaluator(
categories, evaluate_masks=is_instance_segmentation_eval))
all_predictions = pd.read_csv(FLAGS.input_predictions)
images_processed = 0
for _, groundtruth in enumerate(all_annotations.groupby('ImageID')):
logging.info('Processing image %d', images_processed)
image_id, image_groundtruth = groundtruth
groundtruth_dictionary = utils.build_groundtruth_dictionary(
image_groundtruth, class_label_map)
challenge_evaluator.add_single_ground_truth_image_info(
image_id, groundtruth_dictionary)
prediction_dictionary = utils.build_predictions_dictionary(
all_predictions.loc[all_predictions['ImageID'] == image_id],
class_label_map)
challenge_evaluator.add_single_detected_image_info(image_id,
prediction_dictionary)
images_processed += 1
metrics = challenge_evaluator.evaluate()
with open(FLAGS.output_metrics, 'w') as fid:
io_utils.write_csv(fid, metrics)
if __name__ == '__main__':
app.run(main)
|