code
stringlengths
1
5.19M
package
stringlengths
1
81
path
stringlengths
9
304
filename
stringlengths
4
145
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.box_coder.keypoint_box_coder.""" import numpy as np import tensorflow.compat.v1 as tf from object_detection.box_coders import keypoint_box_coder from object_detection.core import box_list from object_detection.core import standard_fields as fields from object_detection.utils import test_case class KeypointBoxCoderTest(test_case.TestCase): def test_get_correct_relative_codes_after_encoding(self): boxes = np.array([[10., 10., 20., 15.], [0.2, 0.1, 0.5, 0.4]], np.float32) keypoints = np.array([[[15., 12.], [10., 15.]], [[0.5, 0.3], [0.2, 0.4]]], np.float32) num_keypoints = len(keypoints[0]) anchors = np.array([[15., 12., 30., 18.], [0.1, 0.0, 0.7, 0.9]], np.float32) expected_rel_codes = [ [-0.5, -0.416666, -0.405465, -0.182321, -0.5, -0.5, -0.833333, 0.], [-0.083333, -0.222222, -0.693147, -1.098612, 0.166667, -0.166667, -0.333333, -0.055556] ] def graph_fn(boxes, keypoints, anchors): boxes = box_list.BoxList(boxes) boxes.add_field(fields.BoxListFields.keypoints, keypoints) anchors = box_list.BoxList(anchors) coder = keypoint_box_coder.KeypointBoxCoder(num_keypoints) rel_codes = coder.encode(boxes, anchors) return rel_codes rel_codes_out = self.execute(graph_fn, [boxes, keypoints, anchors]) self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, atol=1e-04) def test_get_correct_relative_codes_after_encoding_with_scaling(self): boxes = np.array([[10., 10., 20., 15.], [0.2, 0.1, 0.5, 0.4]], np.float32) keypoints = np.array([[[15., 12.], [10., 15.]], [[0.5, 0.3], [0.2, 0.4]]], np.float32) num_keypoints = len(keypoints[0]) anchors = np.array([[15., 12., 30., 18.], [0.1, 0.0, 0.7, 0.9]], np.float32) expected_rel_codes = [ [-1., -1.25, -1.62186, -0.911608, -1.0, -1.5, -1.666667, 0.], [-0.166667, -0.666667, -2.772588, -5.493062, 0.333333, -0.5, -0.666667, -0.166667] ] def graph_fn(boxes, keypoints, anchors): scale_factors = [2, 3, 4, 5] boxes = box_list.BoxList(boxes) boxes.add_field(fields.BoxListFields.keypoints, keypoints) anchors = box_list.BoxList(anchors) coder = keypoint_box_coder.KeypointBoxCoder( num_keypoints, scale_factors=scale_factors) rel_codes = coder.encode(boxes, anchors) return rel_codes rel_codes_out = self.execute(graph_fn, [boxes, keypoints, anchors]) self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, atol=1e-04) def test_get_correct_boxes_after_decoding(self): anchors = np.array([[15., 12., 30., 18.], [0.1, 0.0, 0.7, 0.9]], np.float32) rel_codes = np.array([ [-0.5, -0.416666, -0.405465, -0.182321, -0.5, -0.5, -0.833333, 0.], [-0.083333, -0.222222, -0.693147, -1.098612, 0.166667, -0.166667, -0.333333, -0.055556] ], np.float32) expected_boxes = [[10., 10., 20., 15.], [0.2, 0.1, 0.5, 0.4]] expected_keypoints = [[[15., 12.], [10., 15.]], [[0.5, 0.3], [0.2, 0.4]]] num_keypoints = len(expected_keypoints[0]) def graph_fn(rel_codes, anchors): anchors = box_list.BoxList(anchors) coder = keypoint_box_coder.KeypointBoxCoder(num_keypoints) boxes = coder.decode(rel_codes, anchors) return boxes.get(), boxes.get_field(fields.BoxListFields.keypoints) boxes_out, keypoints_out = self.execute(graph_fn, [rel_codes, anchors]) self.assertAllClose(keypoints_out, expected_keypoints, rtol=1e-04, atol=1e-04) self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, atol=1e-04) def test_get_correct_boxes_after_decoding_with_scaling(self): anchors = np.array([[15., 12., 30., 18.], [0.1, 0.0, 0.7, 0.9]], np.float32) rel_codes = np.array([ [-1., -1.25, -1.62186, -0.911608, -1.0, -1.5, -1.666667, 0.], [-0.166667, -0.666667, -2.772588, -5.493062, 0.333333, -0.5, -0.666667, -0.166667] ], np.float32) expected_boxes = [[10., 10., 20., 15.], [0.2, 0.1, 0.5, 0.4]] expected_keypoints = [[[15., 12.], [10., 15.]], [[0.5, 0.3], [0.2, 0.4]]] num_keypoints = len(expected_keypoints[0]) def graph_fn(rel_codes, anchors): scale_factors = [2, 3, 4, 5] anchors = box_list.BoxList(anchors) coder = keypoint_box_coder.KeypointBoxCoder( num_keypoints, scale_factors=scale_factors) boxes = coder.decode(rel_codes, anchors) return boxes.get(), boxes.get_field(fields.BoxListFields.keypoints) boxes_out, keypoints_out = self.execute(graph_fn, [rel_codes, anchors]) self.assertAllClose(keypoints_out, expected_keypoints, rtol=1e-04, atol=1e-04) self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, atol=1e-04) def test_very_small_width_nan_after_encoding(self): boxes = np.array([[10., 10., 10.0000001, 20.]], np.float32) keypoints = np.array([[[10., 10.], [10.0000001, 20.]]], np.float32) anchors = np.array([[15., 12., 30., 18.]], np.float32) expected_rel_codes = [[-0.833333, 0., -21.128731, 0.510826, -0.833333, -0.833333, -0.833333, 0.833333]] def graph_fn(boxes, keypoints, anchors): boxes = box_list.BoxList(boxes) boxes.add_field(fields.BoxListFields.keypoints, keypoints) anchors = box_list.BoxList(anchors) coder = keypoint_box_coder.KeypointBoxCoder(2) rel_codes = coder.encode(boxes, anchors) return rel_codes rel_codes_out = self.execute(graph_fn, [boxes, keypoints, anchors]) self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, atol=1e-04) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/box_coders/keypoint_box_coder_test.py
keypoint_box_coder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keypoint box coder. The keypoint box coder follows the coding schema described below (this is similar to the FasterRcnnBoxCoder, except that it encodes keypoints in addition to box coordinates): ty = (y - ya) / ha tx = (x - xa) / wa th = log(h / ha) tw = log(w / wa) tky0 = (ky0 - ya) / ha tkx0 = (kx0 - xa) / wa tky1 = (ky1 - ya) / ha tkx1 = (kx1 - xa) / wa ... where x, y, w, h denote the box's center coordinates, width and height respectively. Similarly, xa, ya, wa, ha denote the anchor's center coordinates, width and height. tx, ty, tw and th denote the anchor-encoded center, width and height respectively. ky0, kx0, ky1, kx1, ... denote the keypoints' coordinates, and tky0, tkx0, tky1, tkx1, ... denote the anchor-encoded keypoint coordinates. """ import tensorflow.compat.v1 as tf from object_detection.core import box_coder from object_detection.core import box_list from object_detection.core import standard_fields as fields EPSILON = 1e-8 class KeypointBoxCoder(box_coder.BoxCoder): """Keypoint box coder.""" def __init__(self, num_keypoints, scale_factors=None): """Constructor for KeypointBoxCoder. Args: num_keypoints: Number of keypoints to encode/decode. scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. In addition to scaling ty and tx, the first 2 scalars are used to scale the y and x coordinates of the keypoints as well. If set to None, does not perform scaling. """ self._num_keypoints = num_keypoints if scale_factors: assert len(scale_factors) == 4 for scalar in scale_factors: assert scalar > 0 self._scale_factors = scale_factors self._keypoint_scale_factors = None if scale_factors is not None: self._keypoint_scale_factors = tf.expand_dims( tf.tile([ tf.cast(scale_factors[0], dtype=tf.float32), tf.cast(scale_factors[1], dtype=tf.float32) ], [num_keypoints]), 1) @property def code_size(self): return 4 + self._num_keypoints * 2 def _encode(self, boxes, anchors): """Encode a box and keypoint collection with respect to anchor collection. Args: boxes: BoxList holding N boxes and keypoints to be encoded. Boxes are tensors with the shape [N, 4], and keypoints are tensors with the shape [N, num_keypoints, 2]. anchors: BoxList of anchors. Returns: a tensor representing N anchor-encoded boxes of the format [ty, tx, th, tw, tky0, tkx0, tky1, tkx1, ...] where tky0 and tkx0 represent the y and x coordinates of the first keypoint, tky1 and tkx1 represent the y and x coordinates of the second keypoint, and so on. """ # Convert anchors to the center coordinate representation. ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() keypoints = boxes.get_field(fields.BoxListFields.keypoints) keypoints = tf.transpose(tf.reshape(keypoints, [-1, self._num_keypoints * 2])) num_boxes = boxes.num_boxes() # Avoid NaN in division and log below. ha += EPSILON wa += EPSILON h += EPSILON w += EPSILON tx = (xcenter - xcenter_a) / wa ty = (ycenter - ycenter_a) / ha tw = tf.log(w / wa) th = tf.log(h / ha) tiled_anchor_centers = tf.tile( tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1]) tiled_anchor_sizes = tf.tile( tf.stack([ha, wa]), [self._num_keypoints, 1]) tkeypoints = (keypoints - tiled_anchor_centers) / tiled_anchor_sizes # Scales location targets as used in paper for joint training. if self._scale_factors: ty *= self._scale_factors[0] tx *= self._scale_factors[1] th *= self._scale_factors[2] tw *= self._scale_factors[3] tkeypoints *= tf.tile(self._keypoint_scale_factors, [1, num_boxes]) tboxes = tf.stack([ty, tx, th, tw]) return tf.transpose(tf.concat([tboxes, tkeypoints], 0)) def _decode(self, rel_codes, anchors): """Decode relative codes to boxes and keypoints. Args: rel_codes: a tensor with shape [N, 4 + 2 * num_keypoints] representing N anchor-encoded boxes and keypoints anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes and keypoints. """ ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() num_codes = tf.shape(rel_codes)[0] result = tf.unstack(tf.transpose(rel_codes)) ty, tx, th, tw = result[:4] tkeypoints = result[4:] if self._scale_factors: ty /= self._scale_factors[0] tx /= self._scale_factors[1] th /= self._scale_factors[2] tw /= self._scale_factors[3] tkeypoints /= tf.tile(self._keypoint_scale_factors, [1, num_codes]) w = tf.exp(tw) * wa h = tf.exp(th) * ha ycenter = ty * ha + ycenter_a xcenter = tx * wa + xcenter_a ymin = ycenter - h / 2. xmin = xcenter - w / 2. ymax = ycenter + h / 2. xmax = xcenter + w / 2. decoded_boxes_keypoints = box_list.BoxList( tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) tiled_anchor_centers = tf.tile( tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1]) tiled_anchor_sizes = tf.tile( tf.stack([ha, wa]), [self._num_keypoints, 1]) keypoints = tkeypoints * tiled_anchor_sizes + tiled_anchor_centers keypoints = tf.reshape(tf.transpose(keypoints), [-1, self._num_keypoints, 2]) decoded_boxes_keypoints.add_field(fields.BoxListFields.keypoints, keypoints) return decoded_boxes_keypoints
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/box_coders/keypoint_box_coder.py
keypoint_box_coder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.box_coder.mean_stddev_boxcoder.""" import numpy as np import tensorflow.compat.v1 as tf from object_detection.box_coders import mean_stddev_box_coder from object_detection.core import box_list from object_detection.utils import test_case class MeanStddevBoxCoderTest(test_case.TestCase): def testGetCorrectRelativeCodesAfterEncoding(self): boxes = np.array([[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]], np.float32) anchors = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]], np.float32) expected_rel_codes = [[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]] def graph_fn(boxes, anchors): anchors = box_list.BoxList(anchors) boxes = box_list.BoxList(boxes) coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) rel_codes = coder.encode(boxes, anchors) return rel_codes rel_codes_out = self.execute(graph_fn, [boxes, anchors]) self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, atol=1e-04) def testGetCorrectBoxesAfterDecoding(self): rel_codes = np.array([[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]], np.float32) expected_box_corners = [[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]] anchors = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]], np.float32) def graph_fn(rel_codes, anchors): anchors = box_list.BoxList(anchors) coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) decoded_boxes = coder.decode(rel_codes, anchors).get() return decoded_boxes decoded_boxes_out = self.execute(graph_fn, [rel_codes, anchors]) self.assertAllClose(decoded_boxes_out, expected_box_corners, rtol=1e-04, atol=1e-04) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/box_coders/mean_stddev_box_coder_test.py
mean_stddev_box_coder_test.py
# Lint as: python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for context_rcnn_lib.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from absl.testing import parameterized import tensorflow.compat.v1 as tf from object_detection.meta_architectures import context_rcnn_lib from object_detection.utils import test_case from object_detection.utils import tf_version _NEGATIVE_PADDING_VALUE = -100000 @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class ContextRcnnLibTest(parameterized.TestCase, test_case.TestCase, tf.test.TestCase): """Tests for the functions in context_rcnn_lib.""" def test_compute_valid_mask(self): num_elements = tf.constant(3, tf.int32) num_valid_elementss = tf.constant((1, 2), tf.int32) valid_mask = context_rcnn_lib.compute_valid_mask(num_valid_elementss, num_elements) expected_valid_mask = tf.constant([[1, 0, 0], [1, 1, 0]], tf.float32) self.assertAllEqual(valid_mask, expected_valid_mask) def test_filter_weight_value(self): weights = tf.ones((2, 3, 2), tf.float32) * 4 values = tf.ones((2, 2, 4), tf.float32) valid_mask = tf.constant([[True, True], [True, False]], tf.bool) filtered_weights, filtered_values = context_rcnn_lib.filter_weight_value( weights, values, valid_mask) expected_weights = tf.constant([[[4, 4], [4, 4], [4, 4]], [[4, _NEGATIVE_PADDING_VALUE], [4, _NEGATIVE_PADDING_VALUE], [4, _NEGATIVE_PADDING_VALUE]]]) expected_values = tf.constant([[[1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [0, 0, 0, 0]]]) self.assertAllEqual(filtered_weights, expected_weights) self.assertAllEqual(filtered_values, expected_values) # Changes the valid_mask so the results will be different. valid_mask = tf.constant([[True, True], [False, False]], tf.bool) filtered_weights, filtered_values = context_rcnn_lib.filter_weight_value( weights, values, valid_mask) expected_weights = tf.constant( [[[4, 4], [4, 4], [4, 4]], [[_NEGATIVE_PADDING_VALUE, _NEGATIVE_PADDING_VALUE], [_NEGATIVE_PADDING_VALUE, _NEGATIVE_PADDING_VALUE], [_NEGATIVE_PADDING_VALUE, _NEGATIVE_PADDING_VALUE]]]) expected_values = tf.constant([[[1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0]]]) self.assertAllEqual(filtered_weights, expected_weights) self.assertAllEqual(filtered_values, expected_values) @parameterized.parameters((2, True, True), (2, False, True), (10, True, False), (10, False, False)) def test_project_features(self, projection_dimension, is_training, normalize): features = tf.ones([2, 3, 4], tf.float32) projected_features = context_rcnn_lib.project_features( features, projection_dimension, is_training=is_training, normalize=normalize) # Makes sure the shape is correct. self.assertAllEqual(projected_features.shape, [2, 3, projection_dimension]) @parameterized.parameters( (2, 10, 1), (3, 10, 2), (4, 20, 3), (5, 20, 4), (7, 20, 5), ) def test_attention_block(self, bottleneck_dimension, output_dimension, attention_temperature): input_features = tf.ones([2, 3, 4], tf.float32) context_features = tf.ones([2, 2, 3], tf.float32) valid_mask = tf.constant([[True, True], [False, False]], tf.bool) box_valid_mask = tf.constant([[True, True, True], [False, False, False]], tf.bool) is_training = False output_features = context_rcnn_lib.attention_block( input_features, context_features, bottleneck_dimension, output_dimension, attention_temperature, keys_values_valid_mask=valid_mask, queries_valid_mask=box_valid_mask, is_training=is_training) # Makes sure the shape is correct. self.assertAllEqual(output_features.shape, [2, 3, output_dimension]) @parameterized.parameters(True, False) def test_compute_box_context_attention(self, is_training): box_features = tf.ones([2 * 3, 4, 4, 4], tf.float32) context_features = tf.ones([2, 5, 6], tf.float32) valid_context_size = tf.constant((2, 3), tf.int32) num_proposals = tf.constant((2, 3), tf.int32) bottleneck_dimension = 10 attention_temperature = 1 attention_features = context_rcnn_lib._compute_box_context_attention( box_features, num_proposals, context_features, valid_context_size, bottleneck_dimension, attention_temperature, is_training, max_num_proposals=3) # Makes sure the shape is correct. self.assertAllEqual(attention_features.shape, [2, 3, 1, 1, 4]) @parameterized.parameters(True, False) def test_compute_box_context_attention_with_self_attention(self, is_training): box_features = tf.ones([2 * 3, 4, 4, 4], tf.float32) context_features = tf.ones([2, 5, 6], tf.float32) valid_context_size = tf.constant((2, 3), tf.int32) num_proposals = tf.constant((2, 3), tf.int32) bottleneck_dimension = 10 attention_temperature = 1 attention_features = context_rcnn_lib._compute_box_context_attention( box_features, num_proposals, context_features, valid_context_size, bottleneck_dimension, attention_temperature, is_training, max_num_proposals=3, use_self_attention=True) # Makes sure the shape is correct. self.assertAllEqual(attention_features.shape, [2, 3, 1, 1, 4]) @parameterized.parameters(True, False) def test_compute_box_context_attention_with_layers_and_heads( self, is_training): box_features = tf.ones([2 * 3, 4, 4, 4], tf.float32) context_features = tf.ones([2, 5, 6], tf.float32) valid_context_size = tf.constant((2, 3), tf.int32) num_proposals = tf.constant((2, 3), tf.int32) bottleneck_dimension = 10 attention_temperature = 1 attention_features = context_rcnn_lib._compute_box_context_attention( box_features, num_proposals, context_features, valid_context_size, bottleneck_dimension, attention_temperature, is_training, max_num_proposals=3, num_attention_layers=3, num_attention_heads=3) # Makes sure the shape is correct. self.assertAllEqual(attention_features.shape, [2, 3, 1, 1, 4]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/context_rcnn_lib_tf1_test.py
context_rcnn_lib_tf1_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the CenterNet Meta architecture code.""" from __future__ import division import functools import unittest from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf from object_detection.builders import post_processing_builder from object_detection.core import keypoint_ops from object_detection.core import losses from object_detection.core import preprocessor from object_detection.core import standard_fields as fields from object_detection.core import target_assigner as cn_assigner from object_detection.meta_architectures import center_net_meta_arch as cnma from object_detection.models import center_net_resnet_feature_extractor from object_detection.protos import post_processing_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetMetaArchPredictionHeadTest( test_case.TestCase, parameterized.TestCase): """Test CenterNet meta architecture prediction head.""" @parameterized.parameters([True, False]) def test_prediction_head(self, use_depthwise): head = cnma.make_prediction_net(num_out_channels=7, use_depthwise=use_depthwise) output = head(np.zeros((4, 128, 128, 8))) self.assertEqual((4, 128, 128, 7), output.shape) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetMetaArchHelpersTest(test_case.TestCase, parameterized.TestCase): """Test for CenterNet meta architecture related functions.""" def test_row_col_channel_indices_from_flattened_indices(self): """Tests that the computation of row, col, channel indices is correct.""" r_grid, c_grid, ch_grid = (np.zeros((5, 4, 3), dtype=np.int), np.zeros((5, 4, 3), dtype=np.int), np.zeros((5, 4, 3), dtype=np.int)) r_grid[..., 0] = r_grid[..., 1] = r_grid[..., 2] = np.array( [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]] ) c_grid[..., 0] = c_grid[..., 1] = c_grid[..., 2] = np.array( [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]] ) for i in range(3): ch_grid[..., i] = i indices = np.arange(60) ri, ci, chi = cnma.row_col_channel_indices_from_flattened_indices( indices, 4, 3) np.testing.assert_array_equal(ri, r_grid.flatten()) np.testing.assert_array_equal(ci, c_grid.flatten()) np.testing.assert_array_equal(chi, ch_grid.flatten()) def test_row_col_indices_from_flattened_indices(self): """Tests that the computation of row, col indices is correct.""" r_grid = np.array([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]) c_grid = np.array([[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]) indices = np.arange(20) ri, ci, = cnma.row_col_indices_from_flattened_indices(indices, 4) np.testing.assert_array_equal(ri, r_grid.flatten()) np.testing.assert_array_equal(ci, c_grid.flatten()) def test_flattened_indices_from_row_col_indices(self): r = np.array( [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]] ) c = np.array( [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]] ) idx = cnma.flattened_indices_from_row_col_indices(r, c, 4) np.testing.assert_array_equal(np.arange(12), idx.flatten()) def test_get_valid_anchor_weights_in_flattened_image(self): """Tests that the anchor weights are valid upon flattening out.""" valid_weights = np.zeros((2, 5, 5), dtype=np.float) valid_weights[0, :3, :4] = 1.0 valid_weights[1, :2, :2] = 1.0 def graph_fn(): true_image_shapes = tf.constant([[3, 4], [2, 2]]) w = cnma.get_valid_anchor_weights_in_flattened_image( true_image_shapes, 5, 5) return w w = self.execute(graph_fn, []) np.testing.assert_allclose(w, valid_weights.reshape(2, -1)) self.assertEqual((2, 25), w.shape) def test_convert_strided_predictions_to_normalized_boxes(self): """Tests that boxes have correct coordinates in normalized input space.""" def graph_fn(): boxes = np.zeros((2, 3, 4), dtype=np.float32) boxes[0] = [[10, 20, 30, 40], [20, 30, 50, 100], [50, 60, 100, 180]] boxes[1] = [[-5, -5, 5, 5], [45, 60, 110, 120], [150, 150, 200, 250]] true_image_shapes = tf.constant([[100, 90, 3], [150, 150, 3]]) clipped_boxes = ( cnma.convert_strided_predictions_to_normalized_boxes( boxes, 2, true_image_shapes)) return clipped_boxes clipped_boxes = self.execute(graph_fn, []) expected_boxes = np.zeros((2, 3, 4), dtype=np.float32) expected_boxes[0] = [[0.2, 4./9, 0.6, 8./9], [0.4, 2./3, 1, 1], [1, 1, 1, 1]] expected_boxes[1] = [[0., 0, 1./15, 1./15], [3./5, 4./5, 1, 1], [1, 1, 1, 1]] np.testing.assert_allclose(expected_boxes, clipped_boxes) @parameterized.parameters( {'clip_to_window': True}, {'clip_to_window': False} ) def test_convert_strided_predictions_to_normalized_keypoints( self, clip_to_window): """Tests that keypoints have correct coordinates in normalized coords.""" keypoint_coords_np = np.array( [ # Example 0. [ [[-10., 8.], [60., 22.], [60., 120.]], [[20., 20.], [0., 0.], [0., 0.]], ], # Example 1. [ [[40., 50.], [20., 160.], [200., 150.]], [[10., 0.], [40., 10.], [0., 0.]], ], ], dtype=np.float32) keypoint_scores_np = np.array( [ # Example 0. [ [1.0, 0.9, 0.2], [0.7, 0.0, 0.0], ], # Example 1. [ [1.0, 1.0, 0.2], [0.7, 0.6, 0.0], ], ], dtype=np.float32) def graph_fn(): keypoint_coords = tf.constant(keypoint_coords_np, dtype=tf.float32) keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32) true_image_shapes = tf.constant([[320, 400, 3], [640, 640, 3]]) stride = 4 keypoint_coords_out, keypoint_scores_out = ( cnma.convert_strided_predictions_to_normalized_keypoints( keypoint_coords, keypoint_scores, stride, true_image_shapes, clip_to_window)) return keypoint_coords_out, keypoint_scores_out keypoint_coords_out, keypoint_scores_out = self.execute(graph_fn, []) if clip_to_window: expected_keypoint_coords_np = np.array( [ # Example 0. [ [[0.0, 0.08], [0.75, 0.22], [0.75, 1.0]], [[0.25, 0.2], [0., 0.], [0.0, 0.0]], ], # Example 1. [ [[0.25, 0.3125], [0.125, 1.0], [1.0, 0.9375]], [[0.0625, 0.], [0.25, 0.0625], [0., 0.]], ], ], dtype=np.float32) expected_keypoint_scores_np = np.array( [ # Example 0. [ [0.0, 0.9, 0.0], [0.7, 0.0, 0.0], ], # Example 1. [ [1.0, 1.0, 0.0], [0.7, 0.6, 0.0], ], ], dtype=np.float32) else: expected_keypoint_coords_np = np.array( [ # Example 0. [ [[-0.125, 0.08], [0.75, 0.22], [0.75, 1.2]], [[0.25, 0.2], [0., 0.], [0., 0.]], ], # Example 1. [ [[0.25, 0.3125], [0.125, 1.0], [1.25, 0.9375]], [[0.0625, 0.], [0.25, 0.0625], [0., 0.]], ], ], dtype=np.float32) expected_keypoint_scores_np = np.array( [ # Example 0. [ [1.0, 0.9, 0.2], [0.7, 0.0, 0.0], ], # Example 1. [ [1.0, 1.0, 0.2], [0.7, 0.6, 0.0], ], ], dtype=np.float32) np.testing.assert_allclose(expected_keypoint_coords_np, keypoint_coords_out) np.testing.assert_allclose(expected_keypoint_scores_np, keypoint_scores_out) def test_convert_strided_predictions_to_instance_masks(self): def graph_fn(): boxes = tf.constant( [ [[0.5, 0.5, 1.0, 1.0], [0.0, 0.5, 0.5, 1.0], [0.0, 0.0, 0.0, 0.0]], ], tf.float32) classes = tf.constant( [ [0, 1, 0], ], tf.int32) masks_np = np.zeros((1, 4, 4, 2), dtype=np.float32) masks_np[0, :, 2:, 0] = 1 # Class 0. masks_np[0, :, :3, 1] = 1 # Class 1. masks = tf.constant(masks_np) true_image_shapes = tf.constant([[6, 8, 3]]) instance_masks, _ = cnma.convert_strided_predictions_to_instance_masks( boxes, classes, masks, stride=2, mask_height=2, mask_width=2, true_image_shapes=true_image_shapes) return instance_masks instance_masks = self.execute_cpu(graph_fn, []) expected_instance_masks = np.array( [ [ # Mask 0 (class 0). [[1, 1], [1, 1]], # Mask 1 (class 1). [[1, 0], [1, 0]], # Mask 2 (class 0). [[0, 0], [0, 0]], ] ]) np.testing.assert_array_equal(expected_instance_masks, instance_masks) def test_convert_strided_predictions_raises_error_with_one_tensor(self): def graph_fn(): boxes = tf.constant( [ [[0.5, 0.5, 1.0, 1.0], [0.0, 0.5, 0.5, 1.0], [0.0, 0.0, 0.0, 0.0]], ], tf.float32) classes = tf.constant( [ [0, 1, 0], ], tf.int32) masks_np = np.zeros((1, 4, 4, 2), dtype=np.float32) masks_np[0, :, 2:, 0] = 1 # Class 0. masks_np[0, :, :3, 1] = 1 # Class 1. masks = tf.constant(masks_np) true_image_shapes = tf.constant([[6, 8, 3]]) densepose_part_heatmap = tf.random.uniform( [1, 4, 4, 24]) instance_masks, _ = cnma.convert_strided_predictions_to_instance_masks( boxes, classes, masks, true_image_shapes, densepose_part_heatmap=densepose_part_heatmap, densepose_surface_coords=None) return instance_masks with self.assertRaises(ValueError): self.execute_cpu(graph_fn, []) def test_crop_and_threshold_masks(self): boxes_np = np.array( [[0., 0., 0.5, 0.5], [0.25, 0.25, 1.0, 1.0]], dtype=np.float32) classes_np = np.array([0, 2], dtype=np.int32) masks_np = np.zeros((4, 4, _NUM_CLASSES), dtype=np.float32) masks_np[0, 0, 0] = 0.8 masks_np[1, 1, 0] = 0.6 masks_np[3, 3, 2] = 0.7 part_heatmap_np = np.zeros((4, 4, _DENSEPOSE_NUM_PARTS), dtype=np.float32) part_heatmap_np[0, 0, 4] = 1 part_heatmap_np[0, 0, 2] = 0.6 # Lower scoring. part_heatmap_np[1, 1, 8] = 0.2 part_heatmap_np[3, 3, 4] = 0.5 surf_coords_np = np.zeros((4, 4, 2 * _DENSEPOSE_NUM_PARTS), dtype=np.float32) surf_coords_np[:, :, 8:10] = 0.2, 0.9 surf_coords_np[:, :, 16:18] = 0.3, 0.5 true_height, true_width = 10, 10 input_height, input_width = 10, 10 mask_height = 4 mask_width = 4 def graph_fn(): elems = [ tf.constant(boxes_np), tf.constant(classes_np), tf.constant(masks_np), tf.constant(part_heatmap_np), tf.constant(surf_coords_np), tf.constant(true_height, dtype=tf.int32), tf.constant(true_width, dtype=tf.int32) ] part_masks, surface_coords = cnma.crop_and_threshold_masks( elems, input_height, input_width, mask_height=mask_height, mask_width=mask_width, densepose_class_index=0) return part_masks, surface_coords part_masks, surface_coords = self.execute_cpu(graph_fn, []) expected_part_masks = np.zeros((2, 4, 4), dtype=np.uint8) expected_part_masks[0, 0, 0] = 5 # Recall classes are 1-indexed in output. expected_part_masks[0, 2, 2] = 9 # Recall classes are 1-indexed in output. expected_part_masks[1, 3, 3] = 1 # Standard instance segmentation mask. expected_surface_coords = np.zeros((2, 4, 4, 2), dtype=np.float32) expected_surface_coords[0, 0, 0, :] = 0.2, 0.9 expected_surface_coords[0, 2, 2, :] = 0.3, 0.5 np.testing.assert_allclose(expected_part_masks, part_masks) np.testing.assert_allclose(expected_surface_coords, surface_coords) def test_gather_surface_coords_for_parts(self): surface_coords_cropped_np = np.zeros((2, 5, 5, _DENSEPOSE_NUM_PARTS, 2), dtype=np.float32) surface_coords_cropped_np[0, 0, 0, 5] = 0.3, 0.4 surface_coords_cropped_np[0, 1, 0, 9] = 0.5, 0.6 highest_scoring_part_np = np.zeros((2, 5, 5), dtype=np.int32) highest_scoring_part_np[0, 0, 0] = 5 highest_scoring_part_np[0, 1, 0] = 9 def graph_fn(): surface_coords_cropped = tf.constant(surface_coords_cropped_np, tf.float32) highest_scoring_part = tf.constant(highest_scoring_part_np, tf.int32) surface_coords_gathered = cnma.gather_surface_coords_for_parts( surface_coords_cropped, highest_scoring_part) return surface_coords_gathered surface_coords_gathered = self.execute_cpu(graph_fn, []) np.testing.assert_allclose([0.3, 0.4], surface_coords_gathered[0, 0, 0]) np.testing.assert_allclose([0.5, 0.6], surface_coords_gathered[0, 1, 0]) def test_top_k_feature_map_locations(self): feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) feature_map_np[0, 2, 0, 1] = 1.0 feature_map_np[0, 2, 1, 1] = 0.9 # Get's filtered due to max pool. feature_map_np[0, 0, 1, 0] = 0.7 feature_map_np[0, 2, 2, 0] = 0.5 feature_map_np[0, 2, 2, 1] = -0.3 feature_map_np[1, 2, 1, 1] = 0.7 feature_map_np[1, 1, 0, 0] = 0.4 feature_map_np[1, 1, 2, 0] = 0.1 def graph_fn(): feature_map = tf.constant(feature_map_np) scores, y_inds, x_inds, channel_inds = ( cnma.top_k_feature_map_locations( feature_map, max_pool_kernel_size=3, k=3)) return scores, y_inds, x_inds, channel_inds scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) np.testing.assert_allclose([1.0, 0.7, 0.5], scores[0]) np.testing.assert_array_equal([2, 0, 2], y_inds[0]) np.testing.assert_array_equal([0, 1, 2], x_inds[0]) np.testing.assert_array_equal([1, 0, 0], channel_inds[0]) np.testing.assert_allclose([0.7, 0.4, 0.1], scores[1]) np.testing.assert_array_equal([2, 1, 1], y_inds[1]) np.testing.assert_array_equal([1, 0, 2], x_inds[1]) np.testing.assert_array_equal([1, 0, 0], channel_inds[1]) def test_top_k_feature_map_locations_no_pooling(self): feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) feature_map_np[0, 2, 0, 1] = 1.0 feature_map_np[0, 2, 1, 1] = 0.9 feature_map_np[0, 0, 1, 0] = 0.7 feature_map_np[0, 2, 2, 0] = 0.5 feature_map_np[0, 2, 2, 1] = -0.3 feature_map_np[1, 2, 1, 1] = 0.7 feature_map_np[1, 1, 0, 0] = 0.4 feature_map_np[1, 1, 2, 0] = 0.1 def graph_fn(): feature_map = tf.constant(feature_map_np) scores, y_inds, x_inds, channel_inds = ( cnma.top_k_feature_map_locations( feature_map, max_pool_kernel_size=1, k=3)) return scores, y_inds, x_inds, channel_inds scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) np.testing.assert_allclose([1.0, 0.9, 0.7], scores[0]) np.testing.assert_array_equal([2, 2, 0], y_inds[0]) np.testing.assert_array_equal([0, 1, 1], x_inds[0]) np.testing.assert_array_equal([1, 1, 0], channel_inds[0]) np.testing.assert_allclose([0.7, 0.4, 0.1], scores[1]) np.testing.assert_array_equal([2, 1, 1], y_inds[1]) np.testing.assert_array_equal([1, 0, 2], x_inds[1]) np.testing.assert_array_equal([1, 0, 0], channel_inds[1]) def test_top_k_feature_map_locations_per_channel(self): feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) feature_map_np[0, 2, 0, 0] = 1.0 # Selected. feature_map_np[0, 2, 1, 0] = 0.9 # Get's filtered due to max pool. feature_map_np[0, 0, 1, 0] = 0.7 # Selected. feature_map_np[0, 2, 2, 1] = 0.5 # Selected. feature_map_np[0, 0, 0, 1] = 0.3 # Selected. feature_map_np[1, 2, 1, 0] = 0.7 # Selected. feature_map_np[1, 1, 0, 0] = 0.4 # Get's filtered due to max pool. feature_map_np[1, 1, 2, 0] = 0.3 # Get's filtered due to max pool. feature_map_np[1, 1, 0, 1] = 0.8 # Selected. feature_map_np[1, 1, 2, 1] = 0.3 # Selected. def graph_fn(): feature_map = tf.constant(feature_map_np) scores, y_inds, x_inds, channel_inds = ( cnma.top_k_feature_map_locations( feature_map, max_pool_kernel_size=3, k=2, per_channel=True)) return scores, y_inds, x_inds, channel_inds scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) np.testing.assert_allclose([1.0, 0.7, 0.5, 0.3], scores[0]) np.testing.assert_array_equal([2, 0, 2, 0], y_inds[0]) np.testing.assert_array_equal([0, 1, 2, 0], x_inds[0]) np.testing.assert_array_equal([0, 0, 1, 1], channel_inds[0]) np.testing.assert_allclose([0.7, 0.0, 0.8, 0.3], scores[1]) np.testing.assert_array_equal([2, 0, 1, 1], y_inds[1]) np.testing.assert_array_equal([1, 0, 0, 2], x_inds[1]) np.testing.assert_array_equal([0, 0, 1, 1], channel_inds[1]) def test_top_k_feature_map_locations_k1(self): feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) feature_map_np[0, 2, 0, 0] = 1.0 # Selected. feature_map_np[0, 2, 1, 0] = 0.9 feature_map_np[0, 0, 1, 0] = 0.7 feature_map_np[0, 2, 2, 1] = 0.5 feature_map_np[0, 0, 0, 1] = 0.3 feature_map_np[1, 2, 1, 0] = 0.7 feature_map_np[1, 1, 0, 0] = 0.4 feature_map_np[1, 1, 2, 0] = 0.3 feature_map_np[1, 1, 0, 1] = 0.8 # Selected. feature_map_np[1, 1, 2, 1] = 0.3 def graph_fn(): feature_map = tf.constant(feature_map_np) scores, y_inds, x_inds, channel_inds = ( cnma.top_k_feature_map_locations( feature_map, max_pool_kernel_size=3, k=1, per_channel=False)) return scores, y_inds, x_inds, channel_inds scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) np.testing.assert_allclose([1.0], scores[0]) np.testing.assert_array_equal([2], y_inds[0]) np.testing.assert_array_equal([0], x_inds[0]) np.testing.assert_array_equal([0], channel_inds[0]) np.testing.assert_allclose([0.8], scores[1]) np.testing.assert_array_equal([1], y_inds[1]) np.testing.assert_array_equal([0], x_inds[1]) np.testing.assert_array_equal([1], channel_inds[1]) def test_top_k_feature_map_locations_k1_per_channel(self): feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) feature_map_np[0, 2, 0, 0] = 1.0 # Selected. feature_map_np[0, 2, 1, 0] = 0.9 feature_map_np[0, 0, 1, 0] = 0.7 feature_map_np[0, 2, 2, 1] = 0.5 # Selected. feature_map_np[0, 0, 0, 1] = 0.3 feature_map_np[1, 2, 1, 0] = 0.7 # Selected. feature_map_np[1, 1, 0, 0] = 0.4 feature_map_np[1, 1, 2, 0] = 0.3 feature_map_np[1, 1, 0, 1] = 0.8 # Selected. feature_map_np[1, 1, 2, 1] = 0.3 def graph_fn(): feature_map = tf.constant(feature_map_np) scores, y_inds, x_inds, channel_inds = ( cnma.top_k_feature_map_locations( feature_map, max_pool_kernel_size=3, k=1, per_channel=True)) return scores, y_inds, x_inds, channel_inds scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) np.testing.assert_allclose([1.0, 0.5], scores[0]) np.testing.assert_array_equal([2, 2], y_inds[0]) np.testing.assert_array_equal([0, 2], x_inds[0]) np.testing.assert_array_equal([0, 1], channel_inds[0]) np.testing.assert_allclose([0.7, 0.8], scores[1]) np.testing.assert_array_equal([2, 1], y_inds[1]) np.testing.assert_array_equal([1, 0], x_inds[1]) np.testing.assert_array_equal([0, 1], channel_inds[1]) def test_box_prediction(self): class_pred = np.zeros((3, 128, 128, 5), dtype=np.float32) hw_pred = np.zeros((3, 128, 128, 2), dtype=np.float32) offset_pred = np.zeros((3, 128, 128, 2), dtype=np.float32) # Sample 1, 2 boxes class_pred[0, 10, 20] = [0.3, .7, 0.0, 0.0, 0.0] hw_pred[0, 10, 20] = [40, 60] offset_pred[0, 10, 20] = [1, 2] class_pred[0, 50, 60] = [0.55, 0.0, 0.0, 0.0, 0.45] hw_pred[0, 50, 60] = [50, 50] offset_pred[0, 50, 60] = [0, 0] # Sample 2, 2 boxes (at same location) class_pred[1, 100, 100] = [0.0, 0.1, 0.9, 0.0, 0.0] hw_pred[1, 100, 100] = [10, 10] offset_pred[1, 100, 100] = [1, 3] # Sample 3, 3 boxes class_pred[2, 60, 90] = [0.0, 0.0, 0.0, 0.2, 0.8] hw_pred[2, 60, 90] = [40, 30] offset_pred[2, 60, 90] = [0, 0] class_pred[2, 65, 95] = [0.0, 0.7, 0.3, 0.0, 0.0] hw_pred[2, 65, 95] = [20, 20] offset_pred[2, 65, 95] = [1, 2] class_pred[2, 75, 85] = [1.0, 0.0, 0.0, 0.0, 0.0] hw_pred[2, 75, 85] = [21, 25] offset_pred[2, 75, 85] = [5, 2] def graph_fn(): class_pred_tensor = tf.constant(class_pred) hw_pred_tensor = tf.constant(hw_pred) offset_pred_tensor = tf.constant(offset_pred) _, y_indices, x_indices, _ = ( cnma.top_k_feature_map_locations( class_pred_tensor, max_pool_kernel_size=3, k=2)) boxes = cnma.prediction_tensors_to_boxes( y_indices, x_indices, hw_pred_tensor, offset_pred_tensor) return boxes boxes = self.execute(graph_fn, []) np.testing.assert_allclose( [[0, 0, 31, 52], [25, 35, 75, 85]], boxes[0]) np.testing.assert_allclose( [[96, 98, 106, 108], [96, 98, 106, 108]], boxes[1]) np.testing.assert_allclose( [[69.5, 74.5, 90.5, 99.5], [40, 75, 80, 105]], boxes[2]) def test_offset_prediction(self): class_pred = np.zeros((3, 128, 128, 5), dtype=np.float32) offset_pred = np.zeros((3, 128, 128, 2), dtype=np.float32) # Sample 1, 2 boxes class_pred[0, 10, 20] = [0.3, .7, 0.0, 0.0, 0.0] offset_pred[0, 10, 20] = [1, 2] class_pred[0, 50, 60] = [0.55, 0.0, 0.0, 0.0, 0.45] offset_pred[0, 50, 60] = [0, 0] # Sample 2, 2 boxes (at same location) class_pred[1, 100, 100] = [0.0, 0.1, 0.9, 0.0, 0.0] offset_pred[1, 100, 100] = [1, 3] # Sample 3, 3 boxes class_pred[2, 60, 90] = [0.0, 0.0, 0.0, 0.2, 0.8] offset_pred[2, 60, 90] = [0, 0] class_pred[2, 65, 95] = [0.0, 0.7, 0.3, 0.0, 0.0] offset_pred[2, 65, 95] = [1, 2] class_pred[2, 75, 85] = [1.0, 0.0, 0.0, 0.0, 0.0] offset_pred[2, 75, 85] = [5, 2] def graph_fn(): class_pred_tensor = tf.constant(class_pred) offset_pred_tensor = tf.constant(offset_pred) _, y_indices, x_indices, _ = ( cnma.top_k_feature_map_locations( class_pred_tensor, max_pool_kernel_size=3, k=2)) offsets = cnma.prediction_tensors_to_temporal_offsets( y_indices, x_indices, offset_pred_tensor) return offsets offsets = self.execute(graph_fn, []) np.testing.assert_allclose( [[1, 2], [0, 0]], offsets[0]) np.testing.assert_allclose( [[1, 3], [1, 3]], offsets[1]) np.testing.assert_allclose( [[5, 2], [0, 0]], offsets[2]) def test_keypoint_candidate_prediction(self): keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32) keypoint_heatmap_np[0, 0, 0, 0] = 1.0 keypoint_heatmap_np[0, 2, 1, 0] = 0.7 keypoint_heatmap_np[0, 1, 1, 0] = 0.6 keypoint_heatmap_np[0, 0, 2, 1] = 0.7 keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score. keypoint_heatmap_np[0, 2, 2, 1] = 0.2 keypoint_heatmap_np[1, 1, 0, 0] = 0.6 keypoint_heatmap_np[1, 2, 1, 0] = 0.5 keypoint_heatmap_np[1, 0, 0, 0] = 0.4 keypoint_heatmap_np[1, 0, 0, 1] = 1.0 keypoint_heatmap_np[1, 0, 1, 1] = 0.9 keypoint_heatmap_np[1, 2, 0, 1] = 0.8 keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 2), dtype=np.float32) keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25] keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5] keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0] keypoint_heatmap_offsets_np[0, 0, 2] = [1.0, 0.0] keypoint_heatmap_offsets_np[0, 2, 2] = [1.0, 1.0] keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5] keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0] keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, -0.5] keypoint_heatmap_offsets_np[1, 0, 1] = [0.5, -0.5] keypoint_heatmap_offsets_np[1, 2, 0] = [-1.0, -0.5] def graph_fn(): keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32) keypoint_heatmap_offsets = tf.constant( keypoint_heatmap_offsets_np, dtype=tf.float32) (keypoint_cands, keypoint_scores, num_keypoint_candidates, _) = ( cnma.prediction_tensors_to_keypoint_candidates( keypoint_heatmap, keypoint_heatmap_offsets, keypoint_score_threshold=0.5, max_pool_kernel_size=1, max_candidates=2)) return keypoint_cands, keypoint_scores, num_keypoint_candidates (keypoint_cands, keypoint_scores, num_keypoint_candidates) = self.execute(graph_fn, []) expected_keypoint_candidates = [ [ # Example 0. [[0.5, 0.25], [1.0, 2.0]], # Keypoint 1. [[1.75, 1.5], [1.0, 1.0]], # Keypoint 2. ], [ # Example 1. [[1.25, 0.5], [0.0, -0.5]], # Keypoint 1. [[2.5, 1.0], [0.5, 0.5]], # Keypoint 2. ], ] expected_keypoint_scores = [ [ # Example 0. [1.0, 0.7], # Keypoint 1. [0.7, 0.3], # Keypoint 2. ], [ # Example 1. [0.6, 1.0], # Keypoint 1. [0.5, 0.9], # Keypoint 2. ], ] expected_num_keypoint_candidates = [ [2, 1], [2, 2] ] np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands) np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores) np.testing.assert_array_equal(expected_num_keypoint_candidates, num_keypoint_candidates) def test_prediction_to_single_instance_keypoints(self): image_size = (9, 9) object_heatmap_np = np.zeros((1, image_size[0], image_size[1], 1), dtype=np.float32) # This should be picked. object_heatmap_np[0, 4, 4, 0] = 0.9 # This shouldn't be picked since it's farther away from the center. object_heatmap_np[0, 2, 2, 0] = 1.0 keypoint_heatmap_np = np.zeros((1, image_size[0], image_size[1], 4), dtype=np.float32) # Top-left corner should be picked. keypoint_heatmap_np[0, 1, 1, 0] = 0.9 keypoint_heatmap_np[0, 4, 4, 0] = 1.0 # Top-right corner should be picked. keypoint_heatmap_np[0, 1, 7, 1] = 0.9 keypoint_heatmap_np[0, 4, 4, 1] = 1.0 # Bottom-left corner should be picked. keypoint_heatmap_np[0, 7, 1, 2] = 0.9 keypoint_heatmap_np[0, 4, 4, 2] = 1.0 # Bottom-right corner should be picked. keypoint_heatmap_np[0, 7, 7, 3] = 0.9 keypoint_heatmap_np[0, 4, 4, 3] = 1.0 keypoint_offset_np = np.zeros((1, image_size[0], image_size[1], 8), dtype=np.float32) keypoint_offset_np[0, 1, 1] = [0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] keypoint_offset_np[0, 1, 7] = [0.0, 0.0, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0] keypoint_offset_np[0, 7, 1] = [0.0, 0.0, 0.0, 0.0, -0.5, 0.5, 0.0, 0.0] keypoint_offset_np[0, 7, 7] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5, -0.5] keypoint_regression_np = np.zeros((1, image_size[0], image_size[1], 8), dtype=np.float32) keypoint_regression_np[0, 4, 4] = [-3, -3, -3, 3, 3, -3, 3, 3] kp_params = get_fake_kp_params( candidate_ranking_mode='score_distance_ratio') def graph_fn(): object_heatmap = tf.constant(object_heatmap_np, dtype=tf.float32) keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32) keypoint_offset = tf.constant(keypoint_offset_np, dtype=tf.float32) keypoint_regression = tf.constant( keypoint_regression_np, dtype=tf.float32) (keypoint_cands, keypoint_scores, _) = ( cnma.prediction_to_single_instance_keypoints( object_heatmap, keypoint_heatmap, keypoint_offset, keypoint_regression, kp_params=kp_params)) return keypoint_cands, keypoint_scores (keypoint_cands, keypoint_scores) = self.execute(graph_fn, []) expected_keypoint_candidates = [[[ [1.5, 1.5], # top-left [1.5, 6.5], # top-right [6.5, 1.5], # bottom-left [6.5, 6.5], # bottom-right ]]] expected_keypoint_scores = [[[0.9, 0.9, 0.9, 0.9]]] np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands) np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores) def test_keypoint_candidate_prediction_per_keypoints(self): keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32) keypoint_heatmap_np[0, 0, 0, 0] = 1.0 keypoint_heatmap_np[0, 2, 1, 0] = 0.7 keypoint_heatmap_np[0, 1, 1, 0] = 0.6 keypoint_heatmap_np[0, 0, 2, 1] = 0.7 keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score. keypoint_heatmap_np[0, 2, 2, 1] = 0.2 keypoint_heatmap_np[1, 1, 0, 0] = 0.6 keypoint_heatmap_np[1, 2, 1, 0] = 0.5 keypoint_heatmap_np[1, 0, 0, 0] = 0.4 keypoint_heatmap_np[1, 0, 0, 1] = 1.0 keypoint_heatmap_np[1, 0, 1, 1] = 0.9 keypoint_heatmap_np[1, 2, 0, 1] = 0.8 # Note that the keypoint offsets are now per keypoint (as opposed to # keypoint agnostic, in the test test_keypoint_candidate_prediction). keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 4), dtype=np.float32) keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25, 0.0, 0.0] keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5, 0.0, 0.0] keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0, 0.0, 0.0] keypoint_heatmap_offsets_np[0, 0, 2] = [0.0, 0.0, 1.0, 0.0] keypoint_heatmap_offsets_np[0, 2, 2] = [0.0, 0.0, 1.0, 1.0] keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5, 0.0, 0.0] keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0, 0.0, 0.0] keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, 0.0, 0.0, -0.5] keypoint_heatmap_offsets_np[1, 0, 1] = [0.0, 0.0, 0.5, -0.5] keypoint_heatmap_offsets_np[1, 2, 0] = [0.0, 0.0, -1.0, -0.5] def graph_fn(): keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32) keypoint_heatmap_offsets = tf.constant( keypoint_heatmap_offsets_np, dtype=tf.float32) (keypoint_cands, keypoint_scores, num_keypoint_candidates, _) = ( cnma.prediction_tensors_to_keypoint_candidates( keypoint_heatmap, keypoint_heatmap_offsets, keypoint_score_threshold=0.5, max_pool_kernel_size=1, max_candidates=2)) return keypoint_cands, keypoint_scores, num_keypoint_candidates (keypoint_cands, keypoint_scores, num_keypoint_candidates) = self.execute(graph_fn, []) expected_keypoint_candidates = [ [ # Example 0. [[0.5, 0.25], [1.0, 2.0]], # Candidate 1 of keypoint 1, 2. [[1.75, 1.5], [1.0, 1.0]], # Candidate 2 of keypoint 1, 2. ], [ # Example 1. [[1.25, 0.5], [0.0, -0.5]], # Candidate 1 of keypoint 1, 2. [[2.5, 1.0], [0.5, 0.5]], # Candidate 2 of keypoint 1, 2. ], ] expected_keypoint_scores = [ [ # Example 0. [1.0, 0.7], # Candidate 1 scores of keypoint 1, 2. [0.7, 0.3], # Candidate 2 scores of keypoint 1, 2. ], [ # Example 1. [0.6, 1.0], # Candidate 1 scores of keypoint 1, 2. [0.5, 0.9], # Candidate 2 scores of keypoint 1, 2. ], ] expected_num_keypoint_candidates = [ [2, 1], [2, 2] ] np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands) np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores) np.testing.assert_array_equal(expected_num_keypoint_candidates, num_keypoint_candidates) @parameterized.parameters({'per_keypoint_depth': True}, {'per_keypoint_depth': False}) def test_keypoint_candidate_prediction_depth(self, per_keypoint_depth): keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32) keypoint_heatmap_np[0, 0, 0, 0] = 1.0 keypoint_heatmap_np[0, 2, 1, 0] = 0.7 keypoint_heatmap_np[0, 1, 1, 0] = 0.6 keypoint_heatmap_np[0, 0, 2, 1] = 0.7 keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score. keypoint_heatmap_np[0, 2, 2, 1] = 0.2 keypoint_heatmap_np[1, 1, 0, 0] = 0.6 keypoint_heatmap_np[1, 2, 1, 0] = 0.5 keypoint_heatmap_np[1, 0, 0, 0] = 0.4 keypoint_heatmap_np[1, 0, 0, 1] = 1.0 keypoint_heatmap_np[1, 0, 1, 1] = 0.9 keypoint_heatmap_np[1, 2, 0, 1] = 0.8 if per_keypoint_depth: keypoint_depths_np = np.zeros((2, 3, 3, 2), dtype=np.float32) keypoint_depths_np[0, 0, 0, 0] = -1.5 keypoint_depths_np[0, 2, 1, 0] = -1.0 keypoint_depths_np[0, 0, 2, 1] = 1.5 else: keypoint_depths_np = np.zeros((2, 3, 3, 1), dtype=np.float32) keypoint_depths_np[0, 0, 0, 0] = -1.5 keypoint_depths_np[0, 2, 1, 0] = -1.0 keypoint_depths_np[0, 0, 2, 0] = 1.5 keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 2), dtype=np.float32) keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25] keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5] keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0] keypoint_heatmap_offsets_np[0, 0, 2] = [1.0, 0.0] keypoint_heatmap_offsets_np[0, 2, 2] = [1.0, 1.0] keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5] keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0] keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, -0.5] keypoint_heatmap_offsets_np[1, 0, 1] = [0.5, -0.5] keypoint_heatmap_offsets_np[1, 2, 0] = [-1.0, -0.5] def graph_fn(): keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32) keypoint_heatmap_offsets = tf.constant( keypoint_heatmap_offsets_np, dtype=tf.float32) keypoint_depths = tf.constant(keypoint_depths_np, dtype=tf.float32) (keypoint_cands, keypoint_scores, num_keypoint_candidates, keypoint_depths) = ( cnma.prediction_tensors_to_keypoint_candidates( keypoint_heatmap, keypoint_heatmap_offsets, keypoint_score_threshold=0.5, max_pool_kernel_size=1, max_candidates=2, keypoint_depths=keypoint_depths)) return (keypoint_cands, keypoint_scores, num_keypoint_candidates, keypoint_depths) (_, keypoint_scores, _, keypoint_depths) = self.execute(graph_fn, []) expected_keypoint_scores = [ [ # Example 0. [1.0, 0.7], # Keypoint 1. [0.7, 0.3], # Keypoint 2. ], [ # Example 1. [0.6, 1.0], # Keypoint 1. [0.5, 0.9], # Keypoint 2. ], ] expected_keypoint_depths = [ [ [-1.5, 1.5], [-1.0, 0.0], ], [ [0., 0.], [0., 0.], ], ] np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores) np.testing.assert_allclose(expected_keypoint_depths, keypoint_depths) def test_regressed_keypoints_at_object_centers(self): batch_size = 2 num_keypoints = 5 num_instances = 6 regressed_keypoint_feature_map_np = np.random.randn( batch_size, 10, 10, 2 * num_keypoints).astype(np.float32) y_indices = np.random.choice(10, (batch_size, num_instances)) x_indices = np.random.choice(10, (batch_size, num_instances)) offsets = np.stack([y_indices, x_indices], axis=2).astype(np.float32) def graph_fn(): regressed_keypoint_feature_map = tf.constant( regressed_keypoint_feature_map_np, dtype=tf.float32) gathered_regressed_keypoints = ( cnma.regressed_keypoints_at_object_centers( regressed_keypoint_feature_map, tf.constant(y_indices, dtype=tf.int32), tf.constant(x_indices, dtype=tf.int32))) return gathered_regressed_keypoints gathered_regressed_keypoints = self.execute(graph_fn, []) expected_gathered_keypoints_0 = regressed_keypoint_feature_map_np[ 0, y_indices[0], x_indices[0], :] expected_gathered_keypoints_1 = regressed_keypoint_feature_map_np[ 1, y_indices[1], x_indices[1], :] expected_gathered_keypoints = np.stack([ expected_gathered_keypoints_0, expected_gathered_keypoints_1], axis=0) expected_gathered_keypoints = np.reshape( expected_gathered_keypoints, [batch_size, num_instances, num_keypoints, 2]) expected_gathered_keypoints += np.expand_dims(offsets, axis=2) expected_gathered_keypoints = np.reshape( expected_gathered_keypoints, [batch_size, num_instances, -1]) np.testing.assert_allclose(expected_gathered_keypoints, gathered_regressed_keypoints) @parameterized.parameters( {'candidate_ranking_mode': 'min_distance'}, {'candidate_ranking_mode': 'score_distance_ratio'}, ) def test_refine_keypoints(self, candidate_ranking_mode): regressed_keypoints_np = np.array( [ # Example 0. [ [[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0. [[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1. ], # Example 1. [ [[6.0, 2.0], [0.0, 0.0], [0.1, 0.1]], # Instance 0. [[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. ], ], dtype=np.float32) keypoint_candidates_np = np.array( [ # Example 0. [ [[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0. [[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1. [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], # Candidate 2. ], # Example 1. [ [[6.0, 1.5], [0.1, 0.4], [0.0, 0.0]], # Candidate 0. [[1.0, 4.0], [0.0, 0.3], [0.0, 0.0]], # Candidate 1. [[0.0, 0.0], [0.1, 0.3], [0.0, 0.0]], # Candidate 2. ] ], dtype=np.float32) keypoint_scores_np = np.array( [ # Example 0. [ [0.8, 0.9, 1.0], # Candidate 0. [0.6, 0.1, 0.9], # Candidate 1. [0.0, 0.0, 0.0], # Candidate 1. ], # Example 1. [ [0.7, 0.3, 0.0], # Candidate 0. [0.6, 0.1, 0.0], # Candidate 1. [0.0, 0.28, 0.0], # Candidate 1. ] ], dtype=np.float32) num_keypoints_candidates_np = np.array( [ # Example 0. [2, 2, 2], # Example 1. [2, 3, 0], ], dtype=np.int32) unmatched_keypoint_score = 0.1 def graph_fn(): regressed_keypoints = tf.constant( regressed_keypoints_np, dtype=tf.float32) keypoint_candidates = tf.constant( keypoint_candidates_np, dtype=tf.float32) keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32) num_keypoint_candidates = tf.constant(num_keypoints_candidates_np, dtype=tf.int32) # The behavior of bboxes=None is different now. We provide the bboxes # explicitly by using the regressed keypoints to create the same # behavior. regressed_keypoints_flattened = tf.reshape( regressed_keypoints, [-1, 3, 2]) bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes( regressed_keypoints_flattened) (refined_keypoints, refined_scores, _) = cnma.refine_keypoints( regressed_keypoints, keypoint_candidates, keypoint_scores, num_keypoint_candidates, bboxes=bboxes_flattened, unmatched_keypoint_score=unmatched_keypoint_score, box_scale=1.2, candidate_search_scale=0.3, candidate_ranking_mode=candidate_ranking_mode) return refined_keypoints, refined_scores refined_keypoints, refined_scores = self.execute(graph_fn, []) if candidate_ranking_mode == 'min_distance': expected_refined_keypoints = np.array( [ # Example 0. [ [[2.0, 2.5], [6.0, 10.5], [14.0, 7.0]], # Instance 0. [[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1. ], # Example 1. [ [[6.0, 1.5], [0.0, 0.3], [0.1, 0.1]], # Instance 0. [[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. ], ], dtype=np.float32) expected_refined_scores = np.array( [ # Example 0. [ [0.8, 0.9, unmatched_keypoint_score], # Instance 0. [unmatched_keypoint_score, # Instance 1. unmatched_keypoint_score, 1.0], ], # Example 1. [ [0.7, 0.1, unmatched_keypoint_score], # Instance 0. [unmatched_keypoint_score, # Instance 1. 0.1, unmatched_keypoint_score], ], ], dtype=np.float32) else: expected_refined_keypoints = np.array( [ # Example 0. [ [[2.0, 2.5], [6.0, 10.5], [14.0, 7.0]], # Instance 0. [[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1. ], # Example 1. [ [[6.0, 1.5], [0.1, 0.3], [0.1, 0.1]], # Instance 0. [[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. ], ], dtype=np.float32) expected_refined_scores = np.array( [ # Example 0. [ [0.8, 0.9, unmatched_keypoint_score], # Instance 0. [unmatched_keypoint_score, # Instance 1. unmatched_keypoint_score, 1.0], ], # Example 1. [ [0.7, 0.28, unmatched_keypoint_score], # Instance 0. [unmatched_keypoint_score, # Instance 1. 0.1, unmatched_keypoint_score], ], ], dtype=np.float32) np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints) np.testing.assert_allclose(expected_refined_scores, refined_scores) def test_refine_keypoints_without_bbox(self): regressed_keypoints_np = np.array( [ # Example 0. [ [[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0. [[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1. ], ], dtype=np.float32) keypoint_candidates_np = np.array( [ # Example 0. [ [[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0. [[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1. [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], # Candidate 2. ], ], dtype=np.float32) keypoint_scores_np = np.array( [ # Example 0. [ [0.8, 0.9, 1.0], # Candidate 0. [0.6, 0.1, 0.9], # Candidate 1. [0.0, 0.0, 0.0], # Candidate 1. ], ], dtype=np.float32) num_keypoints_candidates_np = np.array( [ # Example 0. [2, 2, 2], ], dtype=np.int32) unmatched_keypoint_score = 0.1 def graph_fn(): regressed_keypoints = tf.constant( regressed_keypoints_np, dtype=tf.float32) keypoint_candidates = tf.constant( keypoint_candidates_np, dtype=tf.float32) keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32) num_keypoint_candidates = tf.constant(num_keypoints_candidates_np, dtype=tf.int32) (refined_keypoints, refined_scores, _) = cnma.refine_keypoints( regressed_keypoints, keypoint_candidates, keypoint_scores, num_keypoint_candidates, bboxes=None, unmatched_keypoint_score=unmatched_keypoint_score, box_scale=1.2, candidate_search_scale=0.3, candidate_ranking_mode='min_distance') return refined_keypoints, refined_scores refined_keypoints, refined_scores = self.execute(graph_fn, []) # The expected refined keypoints pick the ones that are closest to the # regressed keypoint locations without filtering out the candidates which # are outside of the bounding box. expected_refined_keypoints = np.array( [ # Example 0. [ [[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Instance 0. [[1.0, 8.0], [0.0, 0.0], [4.0, 7.0]], # Instance 1. ], ], dtype=np.float32) expected_refined_scores = np.array( [ # Example 0. [ [0.8, 0.9, 1.0], # Instance 0. [0.6, 0.1, 1.0], # Instance 1. ], ], dtype=np.float32) np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints) np.testing.assert_allclose(expected_refined_scores, refined_scores) @parameterized.parameters({'predict_depth': True}, {'predict_depth': False}) def test_refine_keypoints_with_bboxes(self, predict_depth): regressed_keypoints_np = np.array( [ # Example 0. [ [[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0. [[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1. ], # Example 1. [ [[6.0, 2.0], [0.0, 0.0], [0.1, 0.1]], # Instance 0. [[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. ], ], dtype=np.float32) keypoint_candidates_np = np.array( [ # Example 0. [ [[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0. [[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1. ], # Example 1. [ [[6.0, 1.5], [5.0, 5.0], [0.0, 0.0]], # Candidate 0. [[1.0, 4.0], [0.0, 0.3], [0.0, 0.0]], # Candidate 1. ] ], dtype=np.float32) keypoint_scores_np = np.array( [ # Example 0. [ [0.8, 0.9, 1.0], # Candidate 0. [0.6, 0.1, 0.9], # Candidate 1. ], # Example 1. [ [0.7, 0.4, 0.0], # Candidate 0. [0.6, 0.1, 0.0], # Candidate 1. ] ], dtype=np.float32) keypoint_depths_np = np.array( [ # Example 0. [ [-0.8, -0.9, -1.0], # Candidate 0. [-0.6, -0.1, -0.9], # Candidate 1. ], # Example 1. [ [-0.7, -0.4, -0.0], # Candidate 0. [-0.6, -0.1, -0.0], # Candidate 1. ] ], dtype=np.float32) num_keypoints_candidates_np = np.array( [ # Example 0. [2, 2, 2], # Example 1. [2, 2, 0], ], dtype=np.int32) bboxes_np = np.array( [ # Example 0. [ [2.0, 2.0, 14.0, 10.0], # Instance 0. [0.0, 3.0, 5.0, 7.0], # Instance 1. ], # Example 1. [ [0.0, 0.0, 6.0, 2.0], # Instance 0. [5.0, 1.4, 9.0, 5.0], # Instance 1. ], ], dtype=np.float32) unmatched_keypoint_score = 0.1 def graph_fn(): regressed_keypoints = tf.constant( regressed_keypoints_np, dtype=tf.float32) keypoint_candidates = tf.constant( keypoint_candidates_np, dtype=tf.float32) keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32) if predict_depth: keypoint_depths = tf.constant(keypoint_depths_np, dtype=tf.float32) else: keypoint_depths = None num_keypoint_candidates = tf.constant(num_keypoints_candidates_np, dtype=tf.int32) bboxes = tf.constant(bboxes_np, dtype=tf.float32) (refined_keypoints, refined_scores, refined_depths) = cnma.refine_keypoints( regressed_keypoints, keypoint_candidates, keypoint_scores, num_keypoint_candidates, bboxes=bboxes, unmatched_keypoint_score=unmatched_keypoint_score, box_scale=1.0, candidate_search_scale=0.3, keypoint_depth_candidates=keypoint_depths) if predict_depth: return refined_keypoints, refined_scores, refined_depths else: return refined_keypoints, refined_scores expected_refined_keypoints = np.array( [ # Example 0. [ [[2.0, 2.5], [6.0, 10.0], [14.0, 7.0]], # Instance 0. [[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1. ], # Example 1. [ [[6.0, 1.5], [0.0, 0.3], [0.1, 0.1]], # Instance 0. [[6.0, 1.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. ], ], dtype=np.float32) expected_refined_scores = np.array( [ # Example 0. [ [0.8, unmatched_keypoint_score, # Instance 0. unmatched_keypoint_score], [unmatched_keypoint_score, # Instance 1. unmatched_keypoint_score, 1.0], ], # Example 1. [ [0.7, 0.1, unmatched_keypoint_score], # Instance 0. [0.7, 0.4, unmatched_keypoint_score], # Instance 1. ], ], dtype=np.float32) if predict_depth: refined_keypoints, refined_scores, refined_depths = self.execute( graph_fn, []) expected_refined_depths = np.array([[[-0.8, 0.0, 0.0], [0.0, 0.0, -1.0]], [[-0.7, -0.1, 0.0], [-0.7, -0.4, 0.0]]]) np.testing.assert_allclose(expected_refined_depths, refined_depths) else: refined_keypoints, refined_scores = self.execute(graph_fn, []) np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints) np.testing.assert_allclose(expected_refined_scores, refined_scores) def test_pad_to_full_keypoint_dim(self): batch_size = 4 num_instances = 8 num_keypoints = 2 keypoint_inds = [1, 3] num_total_keypoints = 5 kpt_coords_np = np.random.randn(batch_size, num_instances, num_keypoints, 2) kpt_scores_np = np.random.randn(batch_size, num_instances, num_keypoints) def graph_fn(): kpt_coords = tf.constant(kpt_coords_np) kpt_scores = tf.constant(kpt_scores_np) kpt_coords_padded, kpt_scores_padded = ( cnma._pad_to_full_keypoint_dim( kpt_coords, kpt_scores, keypoint_inds, num_total_keypoints)) return kpt_coords_padded, kpt_scores_padded kpt_coords_padded, kpt_scores_padded = self.execute(graph_fn, []) self.assertAllEqual([batch_size, num_instances, num_total_keypoints, 2], kpt_coords_padded.shape) self.assertAllEqual([batch_size, num_instances, num_total_keypoints], kpt_scores_padded.shape) for i, kpt_ind in enumerate(keypoint_inds): np.testing.assert_allclose(kpt_coords_np[:, :, i, :], kpt_coords_padded[:, :, kpt_ind, :]) np.testing.assert_allclose(kpt_scores_np[:, :, i], kpt_scores_padded[:, :, kpt_ind]) def test_pad_to_full_instance_dim(self): batch_size = 4 max_instances = 8 num_keypoints = 6 num_instances = 2 instance_inds = [1, 3] kpt_coords_np = np.random.randn(batch_size, num_instances, num_keypoints, 2) kpt_scores_np = np.random.randn(batch_size, num_instances, num_keypoints) def graph_fn(): kpt_coords = tf.constant(kpt_coords_np) kpt_scores = tf.constant(kpt_scores_np) kpt_coords_padded, kpt_scores_padded = ( cnma._pad_to_full_instance_dim( kpt_coords, kpt_scores, instance_inds, max_instances)) return kpt_coords_padded, kpt_scores_padded kpt_coords_padded, kpt_scores_padded = self.execute(graph_fn, []) self.assertAllEqual([batch_size, max_instances, num_keypoints, 2], kpt_coords_padded.shape) self.assertAllEqual([batch_size, max_instances, num_keypoints], kpt_scores_padded.shape) for i, inst_ind in enumerate(instance_inds): np.testing.assert_allclose(kpt_coords_np[:, i, :, :], kpt_coords_padded[:, inst_ind, :, :]) np.testing.assert_allclose(kpt_scores_np[:, i, :], kpt_scores_padded[:, inst_ind, :]) def test_predicted_embeddings_at_object_centers(self): batch_size = 2 embedding_size = 5 num_instances = 6 predicted_embedding_feature_map_np = np.random.randn( batch_size, 10, 10, embedding_size).astype(np.float32) y_indices = np.random.choice(10, (batch_size, num_instances)) x_indices = np.random.choice(10, (batch_size, num_instances)) def graph_fn(): predicted_embedding_feature_map = tf.constant( predicted_embedding_feature_map_np, dtype=tf.float32) gathered_predicted_embeddings = ( cnma.predicted_embeddings_at_object_centers( predicted_embedding_feature_map, tf.constant(y_indices, dtype=tf.int32), tf.constant(x_indices, dtype=tf.int32))) return gathered_predicted_embeddings gathered_predicted_embeddings = self.execute(graph_fn, []) expected_gathered_embeddings_0 = predicted_embedding_feature_map_np[ 0, y_indices[0], x_indices[0], :] expected_gathered_embeddings_1 = predicted_embedding_feature_map_np[ 1, y_indices[1], x_indices[1], :] expected_gathered_embeddings = np.stack([ expected_gathered_embeddings_0, expected_gathered_embeddings_1], axis=0) expected_gathered_embeddings = np.reshape( expected_gathered_embeddings, [batch_size, num_instances, embedding_size]) np.testing.assert_allclose(expected_gathered_embeddings, gathered_predicted_embeddings) # Common parameters for setting up testing examples across tests. _NUM_CLASSES = 10 _KEYPOINT_INDICES = [0, 1, 2, 3] _NUM_KEYPOINTS = len(_KEYPOINT_INDICES) _DENSEPOSE_NUM_PARTS = 24 _TASK_NAME = 'human_pose' _NUM_TRACK_IDS = 3 _REID_EMBED_SIZE = 2 _NUM_FC_LAYERS = 1 def get_fake_center_params(max_box_predictions=5): """Returns the fake object center parameter namedtuple.""" return cnma.ObjectCenterParams( classification_loss=losses.WeightedSigmoidClassificationLoss(), object_center_loss_weight=1.0, min_box_overlap_iou=1.0, max_box_predictions=max_box_predictions, use_labeled_classes=False, center_head_num_filters=[128], center_head_kernel_sizes=[5]) def get_fake_od_params(): """Returns the fake object detection parameter namedtuple.""" return cnma.ObjectDetectionParams( localization_loss=losses.L1LocalizationLoss(), offset_loss_weight=1.0, scale_loss_weight=0.1) def get_fake_kp_params(num_candidates_per_keypoint=100, per_keypoint_offset=False, predict_depth=False, per_keypoint_depth=False, peak_radius=0, candidate_ranking_mode='min_distance'): """Returns the fake keypoint estimation parameter namedtuple.""" return cnma.KeypointEstimationParams( task_name=_TASK_NAME, class_id=1, keypoint_indices=_KEYPOINT_INDICES, keypoint_std_dev=[0.00001] * len(_KEYPOINT_INDICES), classification_loss=losses.WeightedSigmoidClassificationLoss(), localization_loss=losses.L1LocalizationLoss(), unmatched_keypoint_score=0.1, keypoint_candidate_score_threshold=0.1, num_candidates_per_keypoint=num_candidates_per_keypoint, per_keypoint_offset=per_keypoint_offset, predict_depth=predict_depth, per_keypoint_depth=per_keypoint_depth, offset_peak_radius=peak_radius, candidate_ranking_mode=candidate_ranking_mode) def get_fake_mask_params(): """Returns the fake mask estimation parameter namedtuple.""" return cnma.MaskParams( classification_loss=losses.WeightedSoftmaxClassificationLoss(), task_loss_weight=1.0, mask_height=4, mask_width=4, mask_head_num_filters=[96], mask_head_kernel_sizes=[3]) def get_fake_densepose_params(): """Returns the fake DensePose estimation parameter namedtuple.""" return cnma.DensePoseParams( class_id=1, classification_loss=losses.WeightedSoftmaxClassificationLoss(), localization_loss=losses.L1LocalizationLoss(), part_loss_weight=1.0, coordinate_loss_weight=1.0, num_parts=_DENSEPOSE_NUM_PARTS, task_loss_weight=1.0, upsample_to_input_res=True, upsample_method='nearest') def get_fake_track_params(): """Returns the fake object tracking parameter namedtuple.""" return cnma.TrackParams( num_track_ids=_NUM_TRACK_IDS, reid_embed_size=_REID_EMBED_SIZE, num_fc_layers=_NUM_FC_LAYERS, classification_loss=losses.WeightedSoftmaxClassificationLoss(), task_loss_weight=1.0) def get_fake_temporal_offset_params(): """Returns the fake temporal offset parameter namedtuple.""" return cnma.TemporalOffsetParams( localization_loss=losses.WeightedSmoothL1LocalizationLoss(), task_loss_weight=1.0) def build_center_net_meta_arch(build_resnet=False, num_classes=_NUM_CLASSES, max_box_predictions=5, apply_non_max_suppression=False, detection_only=False, per_keypoint_offset=False, predict_depth=False, per_keypoint_depth=False, peak_radius=0, keypoint_only=False, candidate_ranking_mode='min_distance'): """Builds the CenterNet meta architecture.""" if build_resnet: feature_extractor = ( center_net_resnet_feature_extractor.CenterNetResnetFeatureExtractor( 'resnet_v2_101')) else: feature_extractor = DummyFeatureExtractor( channel_means=(1.0, 2.0, 3.0), channel_stds=(10., 20., 30.), bgr_ordering=False, num_feature_outputs=2, stride=4) image_resizer_fn = functools.partial( preprocessor.resize_to_range, min_dimension=128, max_dimension=128, pad_to_max_dimesnion=True) non_max_suppression_fn = None if apply_non_max_suppression: post_processing_proto = post_processing_pb2.PostProcessing() post_processing_proto.batch_non_max_suppression.iou_threshold = 1.0 post_processing_proto.batch_non_max_suppression.score_threshold = 0.6 (post_processing_proto.batch_non_max_suppression.max_total_detections ) = max_box_predictions (post_processing_proto.batch_non_max_suppression.max_detections_per_class ) = max_box_predictions (post_processing_proto.batch_non_max_suppression.change_coordinate_frame ) = False non_max_suppression_fn, _ = post_processing_builder.build( post_processing_proto) if keypoint_only: num_candidates_per_keypoint = 100 if max_box_predictions > 1 else 1 return cnma.CenterNetMetaArch( is_training=True, add_summaries=False, num_classes=num_classes, feature_extractor=feature_extractor, image_resizer_fn=image_resizer_fn, object_center_params=get_fake_center_params(max_box_predictions), keypoint_params_dict={ _TASK_NAME: get_fake_kp_params(num_candidates_per_keypoint, per_keypoint_offset, predict_depth, per_keypoint_depth, peak_radius, candidate_ranking_mode) }, non_max_suppression_fn=non_max_suppression_fn) elif detection_only: return cnma.CenterNetMetaArch( is_training=True, add_summaries=False, num_classes=num_classes, feature_extractor=feature_extractor, image_resizer_fn=image_resizer_fn, object_center_params=get_fake_center_params(max_box_predictions), object_detection_params=get_fake_od_params(), non_max_suppression_fn=non_max_suppression_fn) elif num_classes == 1: num_candidates_per_keypoint = 100 if max_box_predictions > 1 else 1 return cnma.CenterNetMetaArch( is_training=True, add_summaries=False, num_classes=num_classes, feature_extractor=feature_extractor, image_resizer_fn=image_resizer_fn, object_center_params=get_fake_center_params(max_box_predictions), object_detection_params=get_fake_od_params(), keypoint_params_dict={ _TASK_NAME: get_fake_kp_params(num_candidates_per_keypoint, per_keypoint_offset, predict_depth, per_keypoint_depth, peak_radius, candidate_ranking_mode) }, non_max_suppression_fn=non_max_suppression_fn) else: return cnma.CenterNetMetaArch( is_training=True, add_summaries=False, num_classes=num_classes, feature_extractor=feature_extractor, image_resizer_fn=image_resizer_fn, object_center_params=get_fake_center_params(), object_detection_params=get_fake_od_params(), keypoint_params_dict={_TASK_NAME: get_fake_kp_params( candidate_ranking_mode=candidate_ranking_mode)}, mask_params=get_fake_mask_params(), densepose_params=get_fake_densepose_params(), track_params=get_fake_track_params(), temporal_offset_params=get_fake_temporal_offset_params(), non_max_suppression_fn=non_max_suppression_fn) def _logit(p): return np.log( (p + np.finfo(np.float32).eps) / (1 - p + np.finfo(np.float32).eps)) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetMetaArchLibTest(test_case.TestCase): """Test for CenterNet meta architecture related functions.""" def test_get_keypoint_name(self): self.assertEqual('human_pose/keypoint_offset', cnma.get_keypoint_name('human_pose', 'keypoint_offset')) def test_get_num_instances_from_weights(self): weight1 = tf.constant([0.0, 0.0, 0.0], dtype=tf.float32) weight2 = tf.constant([0.5, 0.9, 0.0], dtype=tf.float32) weight3 = tf.constant([0.0, 0.0, 1.0], dtype=tf.float32) def graph_fn_1(): # Total of three elements with non-zero values. num_instances = cnma.get_num_instances_from_weights( [weight1, weight2, weight3]) return num_instances num_instances = self.execute(graph_fn_1, []) self.assertAlmostEqual(3, num_instances) # No non-zero value in the weights. Return minimum value: 1. def graph_fn_2(): # Total of three elements with non-zero values. num_instances = cnma.get_num_instances_from_weights([weight1, weight1]) return num_instances num_instances = self.execute(graph_fn_2, []) self.assertAlmostEqual(1, num_instances) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetMetaArchTest(test_case.TestCase, parameterized.TestCase): """Tests for the CenterNet meta architecture.""" def test_construct_prediction_heads(self): model = build_center_net_meta_arch() fake_feature_map = np.zeros((4, 128, 128, 8)) # Check the dictionary contains expected keys and corresponding heads with # correct dimensions. # "object center" head: output = model._prediction_head_dict[cnma.OBJECT_CENTER][-1]( fake_feature_map) self.assertEqual((4, 128, 128, _NUM_CLASSES), output.shape) # "object scale" (height/width) head: output = model._prediction_head_dict[cnma.BOX_SCALE][-1](fake_feature_map) self.assertEqual((4, 128, 128, 2), output.shape) # "object offset" head: output = model._prediction_head_dict[cnma.BOX_OFFSET][-1](fake_feature_map) self.assertEqual((4, 128, 128, 2), output.shape) # "keypoint offset" head: output = model._prediction_head_dict[ cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET)][-1]( fake_feature_map) self.assertEqual((4, 128, 128, 2), output.shape) # "keypoint heatmap" head: output = model._prediction_head_dict[cnma.get_keypoint_name( _TASK_NAME, cnma.KEYPOINT_HEATMAP)][-1]( fake_feature_map) self.assertEqual((4, 128, 128, _NUM_KEYPOINTS), output.shape) # "keypoint regression" head: output = model._prediction_head_dict[cnma.get_keypoint_name( _TASK_NAME, cnma.KEYPOINT_REGRESSION)][-1]( fake_feature_map) self.assertEqual((4, 128, 128, 2 * _NUM_KEYPOINTS), output.shape) # "mask" head: output = model._prediction_head_dict[cnma.SEGMENTATION_HEATMAP][-1]( fake_feature_map) self.assertEqual((4, 128, 128, _NUM_CLASSES), output.shape) # "densepose parts" head: output = model._prediction_head_dict[cnma.DENSEPOSE_HEATMAP][-1]( fake_feature_map) self.assertEqual((4, 128, 128, _DENSEPOSE_NUM_PARTS), output.shape) # "densepose surface coordinates" head: output = model._prediction_head_dict[cnma.DENSEPOSE_REGRESSION][-1]( fake_feature_map) self.assertEqual((4, 128, 128, 2 * _DENSEPOSE_NUM_PARTS), output.shape) # "track embedding" head: output = model._prediction_head_dict[cnma.TRACK_REID][-1]( fake_feature_map) self.assertEqual((4, 128, 128, _REID_EMBED_SIZE), output.shape) # "temporal offset" head: output = model._prediction_head_dict[cnma.TEMPORAL_OFFSET][-1]( fake_feature_map) self.assertEqual((4, 128, 128, 2), output.shape) def test_initialize_target_assigners(self): model = build_center_net_meta_arch() assigner_dict = model._initialize_target_assigners( stride=2, min_box_overlap_iou=0.7) # Check whether the correponding target assigner class is initialized. # object center target assigner: self.assertIsInstance(assigner_dict[cnma.OBJECT_CENTER], cn_assigner.CenterNetCenterHeatmapTargetAssigner) # object detection target assigner: self.assertIsInstance(assigner_dict[cnma.DETECTION_TASK], cn_assigner.CenterNetBoxTargetAssigner) # keypoint estimation target assigner: self.assertIsInstance(assigner_dict[_TASK_NAME], cn_assigner.CenterNetKeypointTargetAssigner) # mask estimation target assigner: self.assertIsInstance(assigner_dict[cnma.SEGMENTATION_TASK], cn_assigner.CenterNetMaskTargetAssigner) # DensePose estimation target assigner: self.assertIsInstance(assigner_dict[cnma.DENSEPOSE_TASK], cn_assigner.CenterNetDensePoseTargetAssigner) # Track estimation target assigner: self.assertIsInstance(assigner_dict[cnma.TRACK_TASK], cn_assigner.CenterNetTrackTargetAssigner) # Temporal Offset target assigner: self.assertIsInstance(assigner_dict[cnma.TEMPORALOFFSET_TASK], cn_assigner.CenterNetTemporalOffsetTargetAssigner) def test_predict(self): """Test the predict function.""" model = build_center_net_meta_arch() def graph_fn(): prediction_dict = model.predict(tf.zeros([2, 128, 128, 3]), None) return prediction_dict prediction_dict = self.execute(graph_fn, []) self.assertEqual(prediction_dict['preprocessed_inputs'].shape, (2, 128, 128, 3)) self.assertEqual(prediction_dict[cnma.OBJECT_CENTER][0].shape, (2, 32, 32, _NUM_CLASSES)) self.assertEqual(prediction_dict[cnma.BOX_SCALE][0].shape, (2, 32, 32, 2)) self.assertEqual(prediction_dict[cnma.BOX_OFFSET][0].shape, (2, 32, 32, 2)) self.assertEqual(prediction_dict[cnma.SEGMENTATION_HEATMAP][0].shape, (2, 32, 32, _NUM_CLASSES)) self.assertEqual(prediction_dict[cnma.DENSEPOSE_HEATMAP][0].shape, (2, 32, 32, _DENSEPOSE_NUM_PARTS)) self.assertEqual(prediction_dict[cnma.DENSEPOSE_REGRESSION][0].shape, (2, 32, 32, 2 * _DENSEPOSE_NUM_PARTS)) self.assertEqual(prediction_dict[cnma.TRACK_REID][0].shape, (2, 32, 32, _REID_EMBED_SIZE)) self.assertEqual(prediction_dict[cnma.TEMPORAL_OFFSET][0].shape, (2, 32, 32, 2)) def test_loss(self): """Test the loss function.""" groundtruth_dict = get_fake_groundtruth_dict(16, 32, 4) model = build_center_net_meta_arch() model.provide_groundtruth( groundtruth_boxes_list=groundtruth_dict[fields.BoxListFields.boxes], groundtruth_weights_list=groundtruth_dict[fields.BoxListFields.weights], groundtruth_classes_list=groundtruth_dict[fields.BoxListFields.classes], groundtruth_keypoints_list=groundtruth_dict[ fields.BoxListFields.keypoints], groundtruth_masks_list=groundtruth_dict[ fields.BoxListFields.masks], groundtruth_dp_num_points_list=groundtruth_dict[ fields.BoxListFields.densepose_num_points], groundtruth_dp_part_ids_list=groundtruth_dict[ fields.BoxListFields.densepose_part_ids], groundtruth_dp_surface_coords_list=groundtruth_dict[ fields.BoxListFields.densepose_surface_coords], groundtruth_track_ids_list=groundtruth_dict[ fields.BoxListFields.track_ids], groundtruth_track_match_flags_list=groundtruth_dict[ fields.BoxListFields.track_match_flags], groundtruth_temporal_offsets_list=groundtruth_dict[ fields.BoxListFields.temporal_offsets]) kernel_initializer = tf.constant_initializer( [[1, 1, 0], [-1000000, -1000000, 1000000]]) model.track_reid_classification_net = tf.keras.layers.Dense( _NUM_TRACK_IDS, kernel_initializer=kernel_initializer, input_shape=(_REID_EMBED_SIZE,)) prediction_dict = get_fake_prediction_dict( input_height=16, input_width=32, stride=4) def graph_fn(): loss_dict = model.loss(prediction_dict, tf.constant([[16, 24, 3], [16, 24, 3]])) return loss_dict loss_dict = self.execute(graph_fn, []) # The prediction and groundtruth are curated to produce very low loss. self.assertGreater( 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.OBJECT_CENTER)]) self.assertGreater( 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_SCALE)]) self.assertGreater( 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_OFFSET)]) self.assertGreater( 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP))]) self.assertGreater( 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET))]) self.assertGreater( 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.get_keypoint_name( _TASK_NAME, cnma.KEYPOINT_REGRESSION))]) self.assertGreater( 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.SEGMENTATION_HEATMAP)]) self.assertGreater( 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.DENSEPOSE_HEATMAP)]) self.assertGreater( 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.DENSEPOSE_REGRESSION)]) self.assertGreater( 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.TRACK_REID)]) self.assertGreater( 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.TEMPORAL_OFFSET)]) @parameterized.parameters( {'target_class_id': 1}, {'target_class_id': 2}, ) def test_postprocess(self, target_class_id): """Test the postprocess function.""" model = build_center_net_meta_arch() max_detection = model._center_params.max_box_predictions num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices) unmatched_keypoint_score = ( model._kp_params_dict[_TASK_NAME].unmatched_keypoint_score) class_center = np.zeros((1, 32, 32, 10), dtype=np.float32) height_width = np.zeros((1, 32, 32, 2), dtype=np.float32) offset = np.zeros((1, 32, 32, 2), dtype=np.float32) keypoint_heatmaps = np.ones( (1, 32, 32, num_keypoints), dtype=np.float32) * _logit(0.001) keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32) keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2) class_probs = np.ones(10) * _logit(0.25) class_probs[target_class_id] = _logit(0.75) class_center[0, 16, 16] = class_probs height_width[0, 16, 16] = [5, 10] offset[0, 16, 16] = [.25, .5] keypoint_regression[0, 16, 16] = [ -1., -1., -1., 1., 1., -1., 1., 1.] keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9) keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9) keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9) keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score. segmentation_heatmap = np.zeros((1, 32, 32, 10), dtype=np.float32) segmentation_heatmap[:, 14:18, 14:18, target_class_id] = 1.0 segmentation_heatmap = _logit(segmentation_heatmap) dp_part_ind = 4 dp_part_heatmap = np.zeros((1, 32, 32, _DENSEPOSE_NUM_PARTS), dtype=np.float32) dp_part_heatmap[0, 14:18, 14:18, dp_part_ind] = 1.0 dp_part_heatmap = _logit(dp_part_heatmap) dp_surf_coords = np.random.randn(1, 32, 32, 2 * _DENSEPOSE_NUM_PARTS) embedding_size = 100 track_reid_embedding = np.zeros((1, 32, 32, embedding_size), dtype=np.float32) track_reid_embedding[0, 16, 16, :] = np.ones(embedding_size) temporal_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32) temporal_offsets[..., 1] = 1 class_center = tf.constant(class_center) height_width = tf.constant(height_width) offset = tf.constant(offset) keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32) keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32) keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32) segmentation_heatmap = tf.constant(segmentation_heatmap, dtype=tf.float32) dp_part_heatmap = tf.constant(dp_part_heatmap, dtype=tf.float32) dp_surf_coords = tf.constant(dp_surf_coords, dtype=tf.float32) track_reid_embedding = tf.constant(track_reid_embedding, dtype=tf.float32) temporal_offsets = tf.constant(temporal_offsets, dtype=tf.float32) prediction_dict = { cnma.OBJECT_CENTER: [class_center], cnma.BOX_SCALE: [height_width], cnma.BOX_OFFSET: [offset], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): [keypoint_heatmaps], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): [keypoint_offsets], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): [keypoint_regression], cnma.SEGMENTATION_HEATMAP: [segmentation_heatmap], cnma.DENSEPOSE_HEATMAP: [dp_part_heatmap], cnma.DENSEPOSE_REGRESSION: [dp_surf_coords], cnma.TRACK_REID: [track_reid_embedding], cnma.TEMPORAL_OFFSET: [temporal_offsets], } def graph_fn(): detections = model.postprocess(prediction_dict, tf.constant([[128, 128, 3]])) return detections detections = self.execute_cpu(graph_fn, []) self.assertAllClose(detections['detection_boxes'][0, 0], np.array([55, 46, 75, 86]) / 128.0) self.assertAllClose(detections['detection_scores'][0], [.75, .5, .5, .5, .5]) expected_multiclass_scores = [.25] * 10 expected_multiclass_scores[target_class_id] = .75 self.assertAllClose(expected_multiclass_scores, detections['detection_multiclass_scores'][0][0]) # The output embedding extracted at the object center will be a 3-D array of # shape [batch, num_boxes, embedding_size]. The valid predicted embedding # will be the first embedding in the first batch. It is a 1-D array of # shape [embedding_size] with values all ones. All the values of the # embedding will then be divided by the square root of 'embedding_size' # after the L2 normalization. self.assertAllClose(detections['detection_embeddings'][0, 0], np.ones(embedding_size) / embedding_size**0.5) self.assertEqual(detections['detection_classes'][0, 0], target_class_id) self.assertEqual(detections['num_detections'], [5]) self.assertAllEqual([1, max_detection, num_keypoints, 2], detections['detection_keypoints'].shape) self.assertAllEqual([1, max_detection, num_keypoints], detections['detection_keypoint_scores'].shape) self.assertAllEqual([1, max_detection, 4, 4], detections['detection_masks'].shape) self.assertAllEqual([1, max_detection, embedding_size], detections['detection_embeddings'].shape) self.assertAllEqual([1, max_detection, 2], detections['detection_temporal_offsets'].shape) # Masks should be empty for everything but the first detection. self.assertAllEqual( detections['detection_masks'][0, 1:, :, :], np.zeros_like(detections['detection_masks'][0, 1:, :, :])) self.assertAllEqual( detections['detection_surface_coords'][0, 1:, :, :], np.zeros_like(detections['detection_surface_coords'][0, 1:, :, :])) if target_class_id == 1: expected_kpts_for_obj_0 = np.array( [[14., 14.], [14., 18.], [18., 14.], [17., 17.]]) / 32. expected_kpt_scores_for_obj_0 = np.array( [0.9, 0.9, 0.9, unmatched_keypoint_score]) np.testing.assert_allclose(detections['detection_keypoints'][0][0], expected_kpts_for_obj_0, rtol=1e-6) np.testing.assert_allclose(detections['detection_keypoint_scores'][0][0], expected_kpt_scores_for_obj_0, rtol=1e-6) # First detection has DensePose parts. self.assertSameElements( np.unique(detections['detection_masks'][0, 0, :, :]), set([0, dp_part_ind + 1])) self.assertGreater(np.sum(np.abs(detections['detection_surface_coords'])), 0.0) else: # All keypoint outputs should be zeros. np.testing.assert_allclose( detections['detection_keypoints'][0][0], np.zeros([num_keypoints, 2], np.float), rtol=1e-6) np.testing.assert_allclose( detections['detection_keypoint_scores'][0][0], np.zeros([num_keypoints], np.float), rtol=1e-6) # Binary segmentation mask. self.assertSameElements( np.unique(detections['detection_masks'][0, 0, :, :]), set([0, 1])) # No DensePose surface coordinates. np.testing.assert_allclose( detections['detection_surface_coords'][0, 0, :, :], np.zeros_like(detections['detection_surface_coords'][0, 0, :, :])) def test_postprocess_kpts_no_od(self): """Test the postprocess function.""" target_class_id = 1 model = build_center_net_meta_arch(keypoint_only=True) max_detection = model._center_params.max_box_predictions num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices) class_center = np.zeros((1, 32, 32, 10), dtype=np.float32) keypoint_heatmaps = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32) keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32) keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2) class_probs = np.ones(10) * _logit(0.25) class_probs[target_class_id] = _logit(0.75) class_center[0, 16, 16] = class_probs keypoint_regression[0, 16, 16] = [ -1., -1., -1., 1., 1., -1., 1., 1.] keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9) keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9) keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9) keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score. class_center = tf.constant(class_center) keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32) keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32) keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32) prediction_dict = { cnma.OBJECT_CENTER: [class_center], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): [keypoint_heatmaps], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): [keypoint_offsets], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): [keypoint_regression], } # def graph_fn(): detections = model.postprocess(prediction_dict, tf.constant([[128, 128, 3]])) # return detections # detections = self.execute_cpu(graph_fn, []) self.assertAllClose(detections['detection_scores'][0], [.75, .5, .5, .5, .5]) expected_multiclass_scores = [.25] * 10 expected_multiclass_scores[target_class_id] = .75 self.assertAllClose(expected_multiclass_scores, detections['detection_multiclass_scores'][0][0]) self.assertEqual(detections['detection_classes'][0, 0], target_class_id) self.assertEqual(detections['num_detections'], [5]) self.assertAllEqual([1, max_detection, num_keypoints, 2], detections['detection_keypoints'].shape) self.assertAllEqual([1, max_detection, num_keypoints], detections['detection_keypoint_scores'].shape) def test_non_max_suppression(self): """Tests application of NMS on CenterNet detections.""" target_class_id = 1 model = build_center_net_meta_arch(apply_non_max_suppression=True, detection_only=True) class_center = np.zeros((1, 32, 32, 10), dtype=np.float32) height_width = np.zeros((1, 32, 32, 2), dtype=np.float32) offset = np.zeros((1, 32, 32, 2), dtype=np.float32) class_probs = np.ones(10) * _logit(0.25) class_probs[target_class_id] = _logit(0.75) class_center[0, 16, 16] = class_probs height_width[0, 16, 16] = [5, 10] offset[0, 16, 16] = [.25, .5] class_center = tf.constant(class_center) height_width = tf.constant(height_width) offset = tf.constant(offset) prediction_dict = { cnma.OBJECT_CENTER: [class_center], cnma.BOX_SCALE: [height_width], cnma.BOX_OFFSET: [offset], } def graph_fn(): detections = model.postprocess(prediction_dict, tf.constant([[128, 128, 3]])) return detections detections = self.execute_cpu(graph_fn, []) num_detections = int(detections['num_detections']) self.assertEqual(num_detections, 1) self.assertAllClose(detections['detection_boxes'][0, 0], np.array([55, 46, 75, 86]) / 128.0) self.assertAllClose(detections['detection_scores'][0][:num_detections], [.75]) expected_multiclass_scores = [.25] * 10 expected_multiclass_scores[target_class_id] = .75 self.assertAllClose(expected_multiclass_scores, detections['detection_multiclass_scores'][0][0]) def test_postprocess_single_class(self): """Test the postprocess function.""" model = build_center_net_meta_arch(num_classes=1) max_detection = model._center_params.max_box_predictions num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices) class_center = np.zeros((1, 32, 32, 1), dtype=np.float32) height_width = np.zeros((1, 32, 32, 2), dtype=np.float32) offset = np.zeros((1, 32, 32, 2), dtype=np.float32) keypoint_heatmaps = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32) keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32) keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2) class_probs = np.zeros(1) class_probs[0] = _logit(0.75) class_center[0, 16, 16] = class_probs height_width[0, 16, 16] = [5, 10] offset[0, 16, 16] = [.25, .5] keypoint_regression[0, 16, 16] = [ -1., -1., -1., 1., 1., -1., 1., 1.] keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9) keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9) keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9) keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score. class_center = tf.constant(class_center) height_width = tf.constant(height_width) offset = tf.constant(offset) keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32) keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32) keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32) prediction_dict = { cnma.OBJECT_CENTER: [class_center], cnma.BOX_SCALE: [height_width], cnma.BOX_OFFSET: [offset], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): [keypoint_heatmaps], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): [keypoint_offsets], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): [keypoint_regression], } def graph_fn(): detections = model.postprocess(prediction_dict, tf.constant([[128, 128, 3]])) return detections detections = self.execute_cpu(graph_fn, []) self.assertAllClose(detections['detection_boxes'][0, 0], np.array([55, 46, 75, 86]) / 128.0) self.assertAllClose(detections['detection_scores'][0], [.75, .5, .5, .5, .5]) self.assertEqual(detections['detection_classes'][0, 0], 0) self.assertEqual(detections['num_detections'], [5]) self.assertAllEqual([1, max_detection, num_keypoints, 2], detections['detection_keypoints'].shape) self.assertAllEqual([1, max_detection, num_keypoints], detections['detection_keypoint_scores'].shape) def test_postprocess_single_instance(self): """Test the postprocess single instance function.""" model = build_center_net_meta_arch( num_classes=1, candidate_ranking_mode='score_distance_ratio') num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices) class_center = np.zeros((1, 32, 32, 1), dtype=np.float32) keypoint_heatmaps = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32) keypoint_offsets = np.zeros( (1, 32, 32, num_keypoints * 2), dtype=np.float32) keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2) class_probs = np.zeros(1) class_probs[0] = _logit(0.75) class_center[0, 16, 16] = class_probs keypoint_regression[0, 16, 16] = [ -1., -1., -1., 1., 1., -1., 1., 1.] keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9) keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9) keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9) keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score. class_center = tf.constant(class_center) keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32) keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32) keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32) prediction_dict = { cnma.OBJECT_CENTER: [class_center], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): [keypoint_heatmaps], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): [keypoint_offsets], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): [keypoint_regression], } def graph_fn(): detections = model.postprocess_single_instance_keypoints( prediction_dict, tf.constant([[128, 128, 3]])) return detections detections = self.execute_cpu(graph_fn, []) self.assertAllEqual([1, 1, num_keypoints, 2], detections['detection_keypoints'].shape) self.assertAllEqual([1, 1, num_keypoints], detections['detection_keypoint_scores'].shape) @parameterized.parameters( {'per_keypoint_depth': False}, {'per_keypoint_depth': True}, ) def test_postprocess_single_class_depth(self, per_keypoint_depth): """Test the postprocess function.""" model = build_center_net_meta_arch( num_classes=1, per_keypoint_offset=per_keypoint_depth, predict_depth=True, per_keypoint_depth=per_keypoint_depth) num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices) class_center = np.zeros((1, 32, 32, 1), dtype=np.float32) height_width = np.zeros((1, 32, 32, 2), dtype=np.float32) offset = np.zeros((1, 32, 32, 2), dtype=np.float32) keypoint_heatmaps = np.ones( (1, 32, 32, num_keypoints), dtype=np.float32) * _logit(0.001) keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32) keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2) class_probs = np.zeros(1) class_probs[0] = _logit(0.75) class_center[0, 16, 16] = class_probs height_width[0, 16, 16] = [5, 10] offset[0, 16, 16] = [.25, .5] keypoint_regression[0, 16, 16] = [-1., -1., -1., 1., 1., -1., 1., 1.] keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9) keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9) keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9) keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score. if per_keypoint_depth: keypoint_depth = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32) keypoint_depth[0, 14, 14, 0] = -1.0 keypoint_depth[0, 14, 18, 1] = -1.1 keypoint_depth[0, 18, 14, 2] = -1.2 keypoint_depth[0, 18, 18, 3] = -1.3 else: keypoint_depth = np.zeros((1, 32, 32, 1), dtype=np.float32) keypoint_depth[0, 14, 14, 0] = -1.0 keypoint_depth[0, 14, 18, 0] = -1.1 keypoint_depth[0, 18, 14, 0] = -1.2 keypoint_depth[0, 18, 18, 0] = -1.3 class_center = tf.constant(class_center) height_width = tf.constant(height_width) offset = tf.constant(offset) keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32) keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32) keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32) keypoint_depth = tf.constant(keypoint_depth, dtype=tf.float32) prediction_dict = { cnma.OBJECT_CENTER: [class_center], cnma.BOX_SCALE: [height_width], cnma.BOX_OFFSET: [offset], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): [keypoint_heatmaps], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): [keypoint_offsets], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): [keypoint_regression], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_DEPTH): [keypoint_depth] } def graph_fn(): detections = model.postprocess(prediction_dict, tf.constant([[128, 128, 3]])) return detections detections = self.execute_cpu(graph_fn, []) self.assertAllClose(detections['detection_keypoint_depths'][0, 0], np.array([-1.0, -1.1, -1.2, 0.0])) self.assertAllClose(detections['detection_keypoint_scores'][0, 0], np.array([0.9, 0.9, 0.9, 0.1])) def test_get_instance_indices(self): classes = tf.constant([[0, 1, 2, 0], [2, 1, 2, 2]], dtype=tf.int32) num_detections = tf.constant([1, 3], dtype=tf.int32) batch_index = 1 class_id = 2 model = build_center_net_meta_arch() valid_indices = model._get_instance_indices( classes, num_detections, batch_index, class_id) self.assertAllEqual(valid_indices.numpy(), [0, 2]) def test_rescore_instances(self): feature_extractor = DummyFeatureExtractor( channel_means=(1.0, 2.0, 3.0), channel_stds=(10., 20., 30.), bgr_ordering=False, num_feature_outputs=2, stride=4) image_resizer_fn = functools.partial( preprocessor.resize_to_range, min_dimension=128, max_dimension=128, pad_to_max_dimesnion=True) kp_params_1 = cnma.KeypointEstimationParams( task_name='kpt_task_1', class_id=0, keypoint_indices=[0, 1, 2], keypoint_std_dev=[0.00001] * 3, classification_loss=losses.WeightedSigmoidClassificationLoss(), localization_loss=losses.L1LocalizationLoss(), keypoint_candidate_score_threshold=0.1, rescore_instances=True) # Note rescoring for class_id = 0. kp_params_2 = cnma.KeypointEstimationParams( task_name='kpt_task_2', class_id=1, keypoint_indices=[3, 4], keypoint_std_dev=[0.00001] * 2, classification_loss=losses.WeightedSigmoidClassificationLoss(), localization_loss=losses.L1LocalizationLoss(), keypoint_candidate_score_threshold=0.1, rescore_instances=False) model = cnma.CenterNetMetaArch( is_training=True, add_summaries=False, num_classes=2, feature_extractor=feature_extractor, image_resizer_fn=image_resizer_fn, object_center_params=get_fake_center_params(), object_detection_params=get_fake_od_params(), keypoint_params_dict={ 'kpt_task_1': kp_params_1, 'kpt_task_2': kp_params_2, }) def graph_fn(): classes = tf.constant([[1, 0]], dtype=tf.int32) scores = tf.constant([[0.5, 0.75]], dtype=tf.float32) keypoint_scores = tf.constant( [ [[0.1, 0.0, 0.3, 0.4, 0.5], [0.1, 0.2, 0.3, 0.4, 0.5]], ]) new_scores = model._rescore_instances(classes, scores, keypoint_scores) return new_scores new_scores = self.execute_cpu(graph_fn, []) expected_scores = np.array( [[0.5, 0.75 * (0.1 + 0.3)/2]] ) self.assertAllClose(expected_scores, new_scores) def get_fake_prediction_dict(input_height, input_width, stride, per_keypoint_depth=False): """Prepares the fake prediction dictionary.""" output_height = input_height // stride output_width = input_width // stride object_center = np.zeros((2, output_height, output_width, _NUM_CLASSES), dtype=np.float32) # Box center: # y: floor((0.54 + 0.56) / 2 * 4) = 2, # x: floor((0.54 + 0.56) / 2 * 8) = 4 object_center[0, 2, 4, 1] = 1.0 object_center = _logit(object_center) # Box size: # height: (0.56 - 0.54) * 4 = 0.08 # width: (0.56 - 0.54) * 8 = 0.16 object_scale = np.zeros((2, output_height, output_width, 2), dtype=np.float32) object_scale[0, 2, 4] = 0.08, 0.16 # Box center offset coordinate (0.55, 0.55): # y-offset: 0.55 * 4 - 2 = 0.2 # x-offset: 0.55 * 8 - 4 = 0.4 object_offset = np.zeros((2, output_height, output_width, 2), dtype=np.float32) object_offset[0, 2, 4] = 0.2, 0.4 keypoint_heatmap = np.zeros((2, output_height, output_width, _NUM_KEYPOINTS), dtype=np.float32) keypoint_heatmap[0, 2, 4, 1] = 1.0 keypoint_heatmap[0, 2, 4, 3] = 1.0 keypoint_heatmap = _logit(keypoint_heatmap) keypoint_offset = np.zeros((2, output_height, output_width, 2), dtype=np.float32) keypoint_offset[0, 2, 4] = 0.2, 0.4 keypoint_depth = np.zeros((2, output_height, output_width, _NUM_KEYPOINTS if per_keypoint_depth else 1), dtype=np.float32) keypoint_depth[0, 2, 4] = 3.0 keypoint_regression = np.zeros( (2, output_height, output_width, 2 * _NUM_KEYPOINTS), dtype=np.float32) keypoint_regression[0, 2, 4] = 0.0, 0.0, 0.2, 0.4, 0.0, 0.0, 0.2, 0.4 mask_heatmap = np.zeros((2, output_height, output_width, _NUM_CLASSES), dtype=np.float32) mask_heatmap[0, 2, 4, 1] = 1.0 mask_heatmap = _logit(mask_heatmap) densepose_heatmap = np.zeros((2, output_height, output_width, _DENSEPOSE_NUM_PARTS), dtype=np.float32) densepose_heatmap[0, 2, 4, 5] = 1.0 densepose_heatmap = _logit(densepose_heatmap) densepose_regression = np.zeros((2, output_height, output_width, 2 * _DENSEPOSE_NUM_PARTS), dtype=np.float32) # The surface coordinate indices for part index 5 are: # (5 * 2, 5 * 2 + 1), or (10, 11). densepose_regression[0, 2, 4, 10:12] = 0.4, 0.7 track_reid_embedding = np.zeros((2, output_height, output_width, _REID_EMBED_SIZE), dtype=np.float32) track_reid_embedding[0, 2, 4, :] = np.arange(_REID_EMBED_SIZE) temporal_offsets = np.zeros((2, output_height, output_width, 2), dtype=np.float32) temporal_offsets[0, 2, 4, :] = 5 prediction_dict = { 'preprocessed_inputs': tf.zeros((2, input_height, input_width, 3)), cnma.OBJECT_CENTER: [ tf.constant(object_center), tf.constant(object_center) ], cnma.BOX_SCALE: [tf.constant(object_scale), tf.constant(object_scale)], cnma.BOX_OFFSET: [tf.constant(object_offset), tf.constant(object_offset)], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): [ tf.constant(keypoint_heatmap), tf.constant(keypoint_heatmap) ], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): [ tf.constant(keypoint_offset), tf.constant(keypoint_offset) ], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): [ tf.constant(keypoint_regression), tf.constant(keypoint_regression) ], cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_DEPTH): [ tf.constant(keypoint_depth), tf.constant(keypoint_depth) ], cnma.SEGMENTATION_HEATMAP: [ tf.constant(mask_heatmap), tf.constant(mask_heatmap) ], cnma.DENSEPOSE_HEATMAP: [ tf.constant(densepose_heatmap), tf.constant(densepose_heatmap), ], cnma.DENSEPOSE_REGRESSION: [ tf.constant(densepose_regression), tf.constant(densepose_regression), ], cnma.TRACK_REID: [ tf.constant(track_reid_embedding), tf.constant(track_reid_embedding), ], cnma.TEMPORAL_OFFSET: [ tf.constant(temporal_offsets), tf.constant(temporal_offsets), ], } return prediction_dict def get_fake_groundtruth_dict(input_height, input_width, stride, has_depth=False): """Prepares the fake groundtruth dictionary.""" # A small box with center at (0.55, 0.55). boxes = [ tf.constant([[0.54, 0.54, 0.56, 0.56]]), tf.constant([[0.0, 0.0, 0.5, 0.5]]), ] classes = [ tf.one_hot([1], depth=_NUM_CLASSES), tf.one_hot([0], depth=_NUM_CLASSES), ] weights = [ tf.constant([1.]), tf.constant([0.]), ] keypoints = [ tf.tile( tf.expand_dims( tf.constant([[float('nan'), 0.55, float('nan'), 0.55, 0.55, 0.0]]), axis=2), multiples=[1, 1, 2]), tf.tile( tf.expand_dims( tf.constant([[float('nan'), 0.55, float('nan'), 0.55, 0.55, 0.0]]), axis=2), multiples=[1, 1, 2]), ] if has_depth: keypoint_depths = [ tf.constant([[float('nan'), 3.0, float('nan'), 3.0, 0.55, 0.0]]), tf.constant([[float('nan'), 0.55, float('nan'), 0.55, 0.55, 0.0]]) ] keypoint_depth_weights = [ tf.constant([[1.0, 1.0, 1.0, 1.0, 0.0, 0.0]]), tf.constant([[1.0, 1.0, 1.0, 1.0, 0.0, 0.0]]) ] else: keypoint_depths = [ tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]), tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) ] keypoint_depth_weights = [ tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]), tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) ] labeled_classes = [ tf.one_hot([1], depth=_NUM_CLASSES) + tf.one_hot([2], depth=_NUM_CLASSES), tf.one_hot([0], depth=_NUM_CLASSES) + tf.one_hot([1], depth=_NUM_CLASSES), ] mask = np.zeros((1, input_height, input_width), dtype=np.float32) mask[0, 8:8+stride, 16:16+stride] = 1 masks = [ tf.constant(mask), tf.zeros_like(mask), ] densepose_num_points = [ tf.constant([1], dtype=tf.int32), tf.constant([0], dtype=tf.int32), ] densepose_part_ids = [ tf.constant([[5, 0, 0]], dtype=tf.int32), tf.constant([[0, 0, 0]], dtype=tf.int32), ] densepose_surface_coords_np = np.zeros((1, 3, 4), dtype=np.float32) densepose_surface_coords_np[0, 0, :] = 0.55, 0.55, 0.4, 0.7 densepose_surface_coords = [ tf.constant(densepose_surface_coords_np), tf.zeros_like(densepose_surface_coords_np) ] track_ids = [ tf.constant([2], dtype=tf.int32), tf.constant([1], dtype=tf.int32), ] temporal_offsets = [ tf.constant([[5.0, 5.0]], dtype=tf.float32), tf.constant([[2.0, 3.0]], dtype=tf.float32), ] track_match_flags = [ tf.constant([1.0], dtype=tf.float32), tf.constant([1.0], dtype=tf.float32), ] groundtruth_dict = { fields.BoxListFields.boxes: boxes, fields.BoxListFields.weights: weights, fields.BoxListFields.classes: classes, fields.BoxListFields.keypoints: keypoints, fields.BoxListFields.keypoint_depths: keypoint_depths, fields.BoxListFields.keypoint_depth_weights: keypoint_depth_weights, fields.BoxListFields.masks: masks, fields.BoxListFields.densepose_num_points: densepose_num_points, fields.BoxListFields.densepose_part_ids: densepose_part_ids, fields.BoxListFields.densepose_surface_coords: densepose_surface_coords, fields.BoxListFields.track_ids: track_ids, fields.BoxListFields.temporal_offsets: temporal_offsets, fields.BoxListFields.track_match_flags: track_match_flags, fields.InputDataFields.groundtruth_labeled_classes: labeled_classes, } return groundtruth_dict @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetMetaComputeLossTest(test_case.TestCase, parameterized.TestCase): """Test for CenterNet loss compuation related functions.""" def setUp(self): self.model = build_center_net_meta_arch() self.classification_loss_fn = self.model._center_params.classification_loss self.localization_loss_fn = self.model._od_params.localization_loss self.true_image_shapes = tf.constant([[16, 24, 3], [16, 24, 3]]) self.input_height = 16 self.input_width = 32 self.stride = 4 self.per_pixel_weights = self.get_per_pixel_weights(self.true_image_shapes, self.input_height, self.input_width, self.stride) self.prediction_dict = get_fake_prediction_dict(self.input_height, self.input_width, self.stride) self.model._groundtruth_lists = get_fake_groundtruth_dict( self.input_height, self.input_width, self.stride) super(CenterNetMetaComputeLossTest, self).setUp() def get_per_pixel_weights(self, true_image_shapes, input_height, input_width, stride): output_height, output_width = (input_height // stride, input_width // stride) # TODO(vighneshb) Explore whether using floor here is safe. output_true_image_shapes = tf.ceil(tf.to_float(true_image_shapes) / stride) per_pixel_weights = cnma.get_valid_anchor_weights_in_flattened_image( output_true_image_shapes, output_height, output_width) per_pixel_weights = tf.expand_dims(per_pixel_weights, 2) return per_pixel_weights def test_compute_object_center_loss(self): def graph_fn(): loss = self.model._compute_object_center_loss( object_center_predictions=self.prediction_dict[cnma.OBJECT_CENTER], input_height=self.input_height, input_width=self.input_width, per_pixel_weights=self.per_pixel_weights) return loss loss = self.execute(graph_fn, []) # The prediction and groundtruth are curated to produce very low loss. self.assertGreater(0.01, loss) default_value = self.model._center_params.use_labeled_classes self.model._center_params = ( self.model._center_params._replace(use_labeled_classes=True)) loss = self.model._compute_object_center_loss( object_center_predictions=self.prediction_dict[cnma.OBJECT_CENTER], input_height=self.input_height, input_width=self.input_width, per_pixel_weights=self.per_pixel_weights) self.model._center_params = ( self.model._center_params._replace(use_labeled_classes=default_value)) # The prediction and groundtruth are curated to produce very low loss. self.assertGreater(0.01, loss) def test_compute_box_scale_and_offset_loss(self): def graph_fn(): scale_loss, offset_loss = self.model._compute_box_scale_and_offset_loss( scale_predictions=self.prediction_dict[cnma.BOX_SCALE], offset_predictions=self.prediction_dict[cnma.BOX_OFFSET], input_height=self.input_height, input_width=self.input_width) return scale_loss, offset_loss scale_loss, offset_loss = self.execute(graph_fn, []) # The prediction and groundtruth are curated to produce very low loss. self.assertGreater(0.01, scale_loss) self.assertGreater(0.01, offset_loss) def test_compute_kp_heatmap_loss(self): def graph_fn(): loss = self.model._compute_kp_heatmap_loss( input_height=self.input_height, input_width=self.input_width, task_name=_TASK_NAME, heatmap_predictions=self.prediction_dict[cnma.get_keypoint_name( _TASK_NAME, cnma.KEYPOINT_HEATMAP)], classification_loss_fn=self.classification_loss_fn, per_pixel_weights=self.per_pixel_weights) return loss loss = self.execute(graph_fn, []) # The prediction and groundtruth are curated to produce very low loss. self.assertGreater(0.01, loss) def test_compute_kp_offset_loss(self): def graph_fn(): loss = self.model._compute_kp_offset_loss( input_height=self.input_height, input_width=self.input_width, task_name=_TASK_NAME, offset_predictions=self.prediction_dict[cnma.get_keypoint_name( _TASK_NAME, cnma.KEYPOINT_OFFSET)], localization_loss_fn=self.localization_loss_fn) return loss loss = self.execute(graph_fn, []) # The prediction and groundtruth are curated to produce very low loss. self.assertGreater(0.01, loss) def test_compute_kp_regression_loss(self): def graph_fn(): loss = self.model._compute_kp_regression_loss( input_height=self.input_height, input_width=self.input_width, task_name=_TASK_NAME, regression_predictions=self.prediction_dict[cnma.get_keypoint_name( _TASK_NAME, cnma.KEYPOINT_REGRESSION,)], localization_loss_fn=self.localization_loss_fn) return loss loss = self.execute(graph_fn, []) # The prediction and groundtruth are curated to produce very low loss. self.assertGreater(0.01, loss) @parameterized.parameters( {'per_keypoint_depth': False}, {'per_keypoint_depth': True}, ) def test_compute_kp_depth_loss(self, per_keypoint_depth): prediction_dict = get_fake_prediction_dict( self.input_height, self.input_width, self.stride, per_keypoint_depth=per_keypoint_depth) model = build_center_net_meta_arch( num_classes=1, per_keypoint_offset=per_keypoint_depth, predict_depth=True, per_keypoint_depth=per_keypoint_depth, peak_radius=1 if per_keypoint_depth else 0) model._groundtruth_lists = get_fake_groundtruth_dict( self.input_height, self.input_width, self.stride, has_depth=True) def graph_fn(): loss = model._compute_kp_depth_loss( input_height=self.input_height, input_width=self.input_width, task_name=_TASK_NAME, depth_predictions=prediction_dict[cnma.get_keypoint_name( _TASK_NAME, cnma.KEYPOINT_DEPTH)], localization_loss_fn=self.localization_loss_fn) return loss loss = self.execute(graph_fn, []) if per_keypoint_depth: # The loss is computed on a disk with radius 1 but only the center pixel # has the accurate prediction. The final loss is (4 * |3-0|) / 5 = 2.4 self.assertAlmostEqual(2.4, loss, delta=1e-4) else: # The prediction and groundtruth are curated to produce very low loss. self.assertGreater(0.01, loss) def test_compute_track_embedding_loss(self): default_fc = self.model.track_reid_classification_net # Initialize the kernel to extreme values so that the classification score # is close to (0, 0, 1) after the softmax layer. kernel_initializer = tf.constant_initializer( [[1, 1, 0], [-1000000, -1000000, 1000000]]) self.model.track_reid_classification_net = tf.keras.layers.Dense( _NUM_TRACK_IDS, kernel_initializer=kernel_initializer, input_shape=(_REID_EMBED_SIZE,)) loss = self.model._compute_track_embedding_loss( input_height=self.input_height, input_width=self.input_width, object_reid_predictions=self.prediction_dict[cnma.TRACK_REID]) self.model.track_reid_classification_net = default_fc # The prediction and groundtruth are curated to produce very low loss. self.assertGreater(0.01, loss) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetMetaArchRestoreTest(test_case.TestCase): def test_restore_map_resnet(self): """Test restore map for a resnet backbone.""" model = build_center_net_meta_arch(build_resnet=True) restore_from_objects_map = model.restore_from_objects('classification') self.assertIsInstance(restore_from_objects_map['feature_extractor'], tf.keras.Model) def test_retore_map_detection(self): """Test that detection checkpoints can be restored.""" model = build_center_net_meta_arch(build_resnet=True) restore_from_objects_map = model.restore_from_objects('detection') self.assertIsInstance(restore_from_objects_map['model']._feature_extractor, tf.keras.Model) class DummyFeatureExtractor(cnma.CenterNetFeatureExtractor): def __init__(self, channel_means, channel_stds, bgr_ordering, num_feature_outputs, stride): self._num_feature_outputs = num_feature_outputs self._stride = stride super(DummyFeatureExtractor, self).__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) def predict(self): pass def loss(self): pass def postprocess(self): pass def call(self, inputs): batch_size, input_height, input_width, _ = inputs.shape fake_output = tf.ones([ batch_size, input_height // self._stride, input_width // self._stride, 64 ], dtype=tf.float32) return [fake_output] * self._num_feature_outputs @property def out_stride(self): return self._stride @property def num_feature_outputs(self): return self._num_feature_outputs @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetFeatureExtractorTest(test_case.TestCase): """Test the base feature extractor class.""" def test_preprocess(self): feature_extractor = DummyFeatureExtractor( channel_means=(1.0, 2.0, 3.0), channel_stds=(10., 20., 30.), bgr_ordering=False, num_feature_outputs=2, stride=4) img = np.zeros((2, 32, 32, 3)) img[:, :, :] = 11, 22, 33 def graph_fn(): output = feature_extractor.preprocess(img) return output output = self.execute(graph_fn, []) self.assertAlmostEqual(output.sum(), 2 * 32 * 32 * 3) def test_bgr_ordering(self): feature_extractor = DummyFeatureExtractor( channel_means=(0.0, 0.0, 0.0), channel_stds=(1., 1., 1.), bgr_ordering=True, num_feature_outputs=2, stride=4) img = np.zeros((2, 32, 32, 3), dtype=np.float32) img[:, :, :] = 1, 2, 3 def graph_fn(): output = feature_extractor.preprocess(img) return output output = self.execute(graph_fn, []) self.assertAllClose(output[..., 2], 1 * np.ones((2, 32, 32))) self.assertAllClose(output[..., 1], 2 * np.ones((2, 32, 32))) self.assertAllClose(output[..., 0], 3 * np.ones((2, 32, 32))) def test_default_ordering(self): feature_extractor = DummyFeatureExtractor( channel_means=(0.0, 0.0, 0.0), channel_stds=(1., 1., 1.), bgr_ordering=False, num_feature_outputs=2, stride=4) img = np.zeros((2, 32, 32, 3), dtype=np.float32) img[:, :, :] = 1, 2, 3 def graph_fn(): output = feature_extractor.preprocess(img) return output output = self.execute(graph_fn, []) self.assertAllClose(output[..., 0], 1 * np.ones((2, 32, 32))) self.assertAllClose(output[..., 1], 2 * np.ones((2, 32, 32))) self.assertAllClose(output[..., 2], 3 * np.ones((2, 32, 32))) class Dummy1dFeatureExtractor(cnma.CenterNetFeatureExtractor): """Returns a static tensor.""" def __init__(self, tensor, out_stride=1, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False): """Intializes the feature extractor. Args: tensor: The tensor to return as the processed feature. out_stride: The out_stride to return if asked. channel_means: Ignored, but provided for API compatability. channel_stds: Ignored, but provided for API compatability. bgr_ordering: Ignored, but provided for API compatability. """ super().__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) self._tensor = tensor self._out_stride = out_stride def call(self, inputs): return [self._tensor] @property def out_stride(self): """The stride in the output image of the network.""" return self._out_stride @property def num_feature_outputs(self): """Ther number of feature outputs returned by the feature extractor.""" return 1 @property def supported_sub_model_types(self): return ['detection'] def get_sub_model(self, sub_model_type): if sub_model_type == 'detection': return self._network else: ValueError('Sub model type "{}" not supported.'.format(sub_model_type)) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetMetaArch1dTest(test_case.TestCase, parameterized.TestCase): @parameterized.parameters([1, 2]) def test_outputs_with_correct_shape(self, stride): # The 1D case reuses code from the 2D cases. These tests only check that # the output shapes are correct, and relies on other tests for correctness. batch_size = 2 height = 1 width = 32 channels = 16 unstrided_inputs = np.random.randn( batch_size, height, width, channels) fixed_output_features = np.random.randn( batch_size, height, width // stride, channels) max_boxes = 10 num_classes = 3 feature_extractor = Dummy1dFeatureExtractor(fixed_output_features, stride) arch = cnma.CenterNetMetaArch( is_training=True, add_summaries=True, num_classes=num_classes, feature_extractor=feature_extractor, image_resizer_fn=None, object_center_params=cnma.ObjectCenterParams( classification_loss=losses.PenaltyReducedLogisticFocalLoss(), object_center_loss_weight=1.0, max_box_predictions=max_boxes, ), object_detection_params=cnma.ObjectDetectionParams( localization_loss=losses.L1LocalizationLoss(), scale_loss_weight=1.0, offset_loss_weight=1.0, ), keypoint_params_dict=None, mask_params=None, densepose_params=None, track_params=None, temporal_offset_params=None, use_depthwise=False, compute_heatmap_sparse=False, non_max_suppression_fn=None, unit_height_conv=True) arch.provide_groundtruth( groundtruth_boxes_list=[ tf.constant([[0, 0.5, 1.0, 0.75], [0, 0.1, 1.0, 0.25]], tf.float32), tf.constant([[0, 0, 1.0, 1.0], [0, 0, 0.0, 0.0]], tf.float32) ], groundtruth_classes_list=[ tf.constant([[0, 0, 1], [0, 1, 0]], tf.float32), tf.constant([[1, 0, 0], [0, 0, 0]], tf.float32) ], groundtruth_weights_list=[ tf.constant([1.0, 1.0]), tf.constant([1.0, 0.0])] ) predictions = arch.predict(None, None) # input is hardcoded above. predictions['preprocessed_inputs'] = tf.constant(unstrided_inputs) true_shapes = tf.constant([[1, 32, 16], [1, 24, 16]], tf.int32) postprocess_output = arch.postprocess(predictions, true_shapes) losses_output = arch.loss(predictions, true_shapes) self.assertIn('%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.OBJECT_CENTER), losses_output) self.assertEqual((), losses_output['%s/%s' % ( cnma.LOSS_KEY_PREFIX, cnma.OBJECT_CENTER)].shape) self.assertIn('%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_SCALE), losses_output) self.assertEqual((), losses_output['%s/%s' % ( cnma.LOSS_KEY_PREFIX, cnma.BOX_SCALE)].shape) self.assertIn('%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_OFFSET), losses_output) self.assertEqual((), losses_output['%s/%s' % ( cnma.LOSS_KEY_PREFIX, cnma.BOX_OFFSET)].shape) self.assertIn('detection_scores', postprocess_output) self.assertEqual(postprocess_output['detection_scores'].shape, (batch_size, max_boxes)) self.assertIn('detection_multiclass_scores', postprocess_output) self.assertEqual(postprocess_output['detection_multiclass_scores'].shape, (batch_size, max_boxes, num_classes)) self.assertIn('detection_classes', postprocess_output) self.assertEqual(postprocess_output['detection_classes'].shape, (batch_size, max_boxes)) self.assertIn('num_detections', postprocess_output) self.assertEqual(postprocess_output['num_detections'].shape, (batch_size,)) self.assertIn('detection_boxes', postprocess_output) self.assertEqual(postprocess_output['detection_boxes'].shape, (batch_size, max_boxes, 4)) self.assertIn('detection_boxes_strided', postprocess_output) self.assertEqual(postprocess_output['detection_boxes_strided'].shape, (batch_size, max_boxes, 4)) self.assertIn(cnma.OBJECT_CENTER, predictions) self.assertEqual(predictions[cnma.OBJECT_CENTER][0].shape, (batch_size, height, width // stride, num_classes)) self.assertIn(cnma.BOX_SCALE, predictions) self.assertEqual(predictions[cnma.BOX_SCALE][0].shape, (batch_size, height, width // stride, 2)) self.assertIn(cnma.BOX_OFFSET, predictions) self.assertEqual(predictions[cnma.BOX_OFFSET][0].shape, (batch_size, height, width // stride, 2)) self.assertIn('preprocessed_inputs', predictions) if __name__ == '__main__': tf.enable_v2_behavior() tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/center_net_meta_arch_tf2_test.py
center_net_meta_arch_tf2_test.py
# Lint as: python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for context_rcnn_lib.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from absl.testing import parameterized import tensorflow.compat.v1 as tf from object_detection.meta_architectures import context_rcnn_lib_tf2 as context_rcnn_lib from object_detection.utils import test_case from object_detection.utils import tf_version _NEGATIVE_PADDING_VALUE = -100000 @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class ContextRcnnLibTest(parameterized.TestCase, test_case.TestCase): """Tests for the functions in context_rcnn_lib.""" def test_compute_valid_mask(self): num_elements = tf.constant(3, tf.int32) num_valid_elementss = tf.constant((1, 2), tf.int32) valid_mask = context_rcnn_lib.compute_valid_mask(num_valid_elementss, num_elements) expected_valid_mask = tf.constant([[1, 0, 0], [1, 1, 0]], tf.float32) self.assertAllEqual(valid_mask, expected_valid_mask) def test_filter_weight_value(self): weights = tf.ones((2, 3, 2), tf.float32) * 4 values = tf.ones((2, 2, 4), tf.float32) valid_mask = tf.constant([[True, True], [True, False]], tf.bool) filtered_weights, filtered_values = context_rcnn_lib.filter_weight_value( weights, values, valid_mask) expected_weights = tf.constant([[[4, 4], [4, 4], [4, 4]], [[4, _NEGATIVE_PADDING_VALUE + 4], [4, _NEGATIVE_PADDING_VALUE + 4], [4, _NEGATIVE_PADDING_VALUE + 4]]]) expected_values = tf.constant([[[1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [0, 0, 0, 0]]]) self.assertAllEqual(filtered_weights, expected_weights) self.assertAllEqual(filtered_values, expected_values) # Changes the valid_mask so the results will be different. valid_mask = tf.constant([[True, True], [False, False]], tf.bool) filtered_weights, filtered_values = context_rcnn_lib.filter_weight_value( weights, values, valid_mask) expected_weights = tf.constant( [[[4, 4], [4, 4], [4, 4]], [[_NEGATIVE_PADDING_VALUE + 4, _NEGATIVE_PADDING_VALUE + 4], [_NEGATIVE_PADDING_VALUE + 4, _NEGATIVE_PADDING_VALUE + 4], [_NEGATIVE_PADDING_VALUE + 4, _NEGATIVE_PADDING_VALUE + 4]]]) expected_values = tf.constant([[[1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0]]]) self.assertAllEqual(filtered_weights, expected_weights) self.assertAllEqual(filtered_values, expected_values) @parameterized.parameters((2, True, True), (2, False, True), (10, True, False), (10, False, False)) def test_project_features(self, projection_dimension, is_training, normalize): features = tf.ones([2, 3, 4], tf.float32) projected_features = context_rcnn_lib.project_features( features, projection_dimension, is_training, context_rcnn_lib.ContextProjection(projection_dimension), normalize=normalize) # Makes sure the shape is correct. self.assertAllEqual(projected_features.shape, [2, 3, projection_dimension]) @parameterized.parameters( (2, 10, 1), (3, 10, 2), (4, None, 3), (5, 20, 4), (7, None, 5), ) def test_attention_block(self, bottleneck_dimension, output_dimension, attention_temperature): input_features = tf.ones([2 * 8, 3, 3, 3], tf.float32) context_features = tf.ones([2, 20, 10], tf.float32) num_proposals = tf.convert_to_tensor([6, 3]) attention_block = context_rcnn_lib.AttentionBlock( bottleneck_dimension, attention_temperature, output_dimension=output_dimension, is_training=False, max_num_proposals=8) valid_context_size = tf.random_uniform((2,), minval=0, maxval=10, dtype=tf.int32) output_features = attention_block(input_features, context_features, valid_context_size, num_proposals) # Makes sure the shape is correct. self.assertAllEqual(output_features.shape, [2, 8, 1, 1, (output_dimension or 3)]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/context_rcnn_lib_tf2_test.py
context_rcnn_lib_tf2_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.meta_architectures.rfcn_meta_arch.""" import tensorflow.compat.v1 as tf from object_detection.meta_architectures import faster_rcnn_meta_arch_test_lib from object_detection.meta_architectures import rfcn_meta_arch class RFCNMetaArchTest( faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase): def _get_second_stage_box_predictor_text_proto( self, share_box_across_classes=False): del share_box_across_classes box_predictor_text_proto = """ rfcn_box_predictor { conv_hyperparams { op: CONV activation: NONE regularizer { l2_regularizer { weight: 0.0005 } } initializer { variance_scaling_initializer { factor: 1.0 uniform: true mode: FAN_AVG } } } } """ return box_predictor_text_proto def _get_model(self, box_predictor, **common_kwargs): return rfcn_meta_arch.RFCNMetaArch( second_stage_rfcn_box_predictor=box_predictor, **common_kwargs) def _get_box_classifier_features_shape(self, image_size, batch_size, max_num_proposals, initial_crop_size, maxpool_stride, num_features): return (batch_size, image_size, image_size, num_features) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/rfcn_meta_arch_test.py
rfcn_meta_arch_test.py
# Lint as: python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library functions for ContextRCNN.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf import tf_slim as slim # The negative value used in padding the invalid weights. _NEGATIVE_PADDING_VALUE = -100000 def filter_weight_value(weights, values, valid_mask): """Filters weights and values based on valid_mask. _NEGATIVE_PADDING_VALUE will be added to invalid elements in the weights to avoid their contribution in softmax. 0 will be set for the invalid elements in the values. Args: weights: A float Tensor of shape [batch_size, input_size, context_size]. values: A float Tensor of shape [batch_size, context_size, projected_dimension]. valid_mask: A boolean Tensor of shape [batch_size, context_size]. True means valid and False means invalid. Returns: weights: A float Tensor of shape [batch_size, input_size, context_size]. values: A float Tensor of shape [batch_size, context_size, projected_dimension]. Raises: ValueError: If shape of doesn't match. """ w_batch_size, _, w_context_size = weights.shape v_batch_size, v_context_size, _ = values.shape m_batch_size, m_context_size = valid_mask.shape if w_batch_size != v_batch_size or v_batch_size != m_batch_size: raise ValueError("Please make sure the first dimension of the input" " tensors are the same.") if w_context_size != v_context_size: raise ValueError("Please make sure the third dimension of weights matches" " the second dimension of values.") if w_context_size != m_context_size: raise ValueError("Please make sure the third dimension of the weights" " matches the second dimension of the valid_mask.") valid_mask = valid_mask[..., tf.newaxis] # Force the invalid weights to be very negative so it won't contribute to # the softmax. very_negative_mask = tf.ones( weights.shape, dtype=weights.dtype) * _NEGATIVE_PADDING_VALUE valid_weight_mask = tf.tile(tf.transpose(valid_mask, perm=[0, 2, 1]), [1, weights.shape[1], 1]) weights = tf.where(valid_weight_mask, x=weights, y=very_negative_mask) # Force the invalid values to be 0. values *= tf.cast(valid_mask, values.dtype) return weights, values def compute_valid_mask(num_valid_elements, num_elements): """Computes mask of valid entries within padded context feature. Args: num_valid_elements: A int32 Tensor of shape [batch_size]. num_elements: An int32 Tensor. Returns: A boolean Tensor of the shape [batch_size, num_elements]. True means valid and False means invalid. """ batch_size = num_valid_elements.shape[0] element_idxs = tf.range(num_elements, dtype=tf.int32) batch_element_idxs = tf.tile(element_idxs[tf.newaxis, ...], [batch_size, 1]) num_valid_elements = num_valid_elements[..., tf.newaxis] valid_mask = tf.less(batch_element_idxs, num_valid_elements) return valid_mask def project_features(features, projection_dimension, is_training, normalize): """Projects features to another feature space. Args: features: A float Tensor of shape [batch_size, features_size, num_features]. projection_dimension: A int32 Tensor. is_training: A boolean Tensor (affecting batch normalization). normalize: A boolean Tensor. If true, the output features will be l2 normalized on the last dimension. Returns: A float Tensor of shape [batch, features_size, projection_dimension]. """ # TODO(guanhangwu) Figure out a better way of specifying the batch norm # params. batch_norm_params = { "is_training": is_training, "decay": 0.97, "epsilon": 0.001, "center": True, "scale": True } batch_size, _, num_features = features.shape features = tf.reshape(features, [-1, num_features]) projected_features = slim.fully_connected( features, num_outputs=projection_dimension, activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params) projected_features = tf.reshape(projected_features, [batch_size, -1, projection_dimension]) if normalize: projected_features = tf.math.l2_normalize(projected_features, axis=-1) return projected_features def attention_block(input_features, context_features, bottleneck_dimension, output_dimension, attention_temperature, keys_values_valid_mask, queries_valid_mask, is_training, block_name="AttentionBlock"): """Generic attention block. Args: input_features: A float Tensor of shape [batch_size, input_size, num_input_features]. context_features: A float Tensor of shape [batch_size, context_size, num_context_features]. bottleneck_dimension: A int32 Tensor representing the bottleneck dimension for intermediate projections. output_dimension: A int32 Tensor representing the last dimension of the output feature. attention_temperature: A float Tensor. It controls the temperature of the softmax for weights calculation. The formula for calculation as follows: weights = exp(weights / temperature) / sum(exp(weights / temperature)) keys_values_valid_mask: A boolean Tensor of shape [batch_size, context_size]. queries_valid_mask: A boolean Tensor of shape [batch_size, max_num_proposals]. is_training: A boolean Tensor (affecting batch normalization). block_name: A string to specify names for different attention blocks Returns: A float Tensor of shape [batch_size, input_size, output_dimension]. """ with tf.variable_scope(block_name): queries = project_features( input_features, bottleneck_dimension, is_training, normalize=True) keys = project_features( context_features, bottleneck_dimension, is_training, normalize=True) values = project_features( context_features, bottleneck_dimension, is_training, normalize=True) # masking out any keys which are padding keys *= tf.cast(keys_values_valid_mask[..., tf.newaxis], keys.dtype) queries *= tf.cast(queries_valid_mask[..., tf.newaxis], queries.dtype) weights = tf.matmul(queries, keys, transpose_b=True) weights, values = filter_weight_value(weights, values, keys_values_valid_mask) weights = tf.identity(tf.nn.softmax(weights / attention_temperature), name=block_name+"AttentionWeights") features = tf.matmul(weights, values) output_features = project_features( features, output_dimension, is_training, normalize=False) return output_features def _compute_box_context_attention(box_features, num_proposals, context_features, valid_context_size, bottleneck_dimension, attention_temperature, is_training, max_num_proposals, use_self_attention=False, use_long_term_attention=True, self_attention_in_sequence=False, num_attention_heads=1, num_attention_layers=1): """Computes the attention feature from the context given a batch of box. Args: box_features: A float Tensor of shape [batch_size * max_num_proposals, height, width, channels]. It is pooled features from first stage proposals. num_proposals: The number of valid box proposals. context_features: A float Tensor of shape [batch_size, context_size, num_context_features]. valid_context_size: A int32 Tensor of shape [batch_size]. bottleneck_dimension: A int32 Tensor representing the bottleneck dimension for intermediate projections. attention_temperature: A float Tensor. It controls the temperature of the softmax for weights calculation. The formula for calculation as follows: weights = exp(weights / temperature) / sum(exp(weights / temperature)) is_training: A boolean Tensor (affecting batch normalization). max_num_proposals: The number of box proposals for each image. use_self_attention: Whether to use an attention block across the first stage predicted box features for the input image. use_long_term_attention: Whether to use an attention block into the context features. self_attention_in_sequence: Whether self-attention and long term attention should be in sequence or parallel. num_attention_heads: Number of heads for multi-headed attention. num_attention_layers: Number of heads for multi-layered attention. Returns: A float Tensor of shape [batch_size, max_num_proposals, 1, 1, channels]. """ _, context_size, _ = context_features.shape context_valid_mask = compute_valid_mask(valid_context_size, context_size) total_proposals, height, width, channels = box_features.shape batch_size = total_proposals // max_num_proposals box_features = tf.reshape( box_features, [batch_size, max_num_proposals, height, width, channels]) # Average pools over height and width dimension so that the shape of # box_features becomes [batch_size, max_num_proposals, channels]. box_features = tf.reduce_mean(box_features, [2, 3]) box_valid_mask = compute_valid_mask( num_proposals, box_features.shape[1]) if use_self_attention: self_attention_box_features = attention_block( box_features, box_features, bottleneck_dimension, channels.value, attention_temperature, keys_values_valid_mask=box_valid_mask, queries_valid_mask=box_valid_mask, is_training=is_training, block_name="SelfAttentionBlock") if use_long_term_attention: if use_self_attention and self_attention_in_sequence: input_features = tf.add(self_attention_box_features, box_features) input_features = tf.divide(input_features, 2) else: input_features = box_features original_input_features = input_features for jdx in range(num_attention_layers): layer_features = tf.zeros_like(input_features) for idx in range(num_attention_heads): block_name = "AttentionBlock" + str(idx) + "_AttentionLayer" +str(jdx) attention_features = attention_block( input_features, context_features, bottleneck_dimension, channels.value, attention_temperature, keys_values_valid_mask=context_valid_mask, queries_valid_mask=box_valid_mask, is_training=is_training, block_name=block_name) layer_features = tf.add(layer_features, attention_features) layer_features = tf.divide(layer_features, num_attention_heads) input_features = tf.add(input_features, layer_features) output_features = tf.add(input_features, original_input_features) if not self_attention_in_sequence and use_self_attention: output_features = tf.add(self_attention_box_features, output_features) elif use_self_attention: output_features = self_attention_box_features else: output_features = tf.zeros(self_attention_box_features.shape) # Expands the dimension back to match with the original feature map. output_features = output_features[:, :, tf.newaxis, tf.newaxis, :] return output_features
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/context_rcnn_lib.py
context_rcnn_lib.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The CenterNet meta architecture as described in the "Objects as Points" paper [1]. [1]: https://arxiv.org/abs/1904.07850 """ import abc import collections import functools import tensorflow.compat.v1 as tf import tensorflow.compat.v2 as tf2 from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import keypoint_ops from object_detection.core import model from object_detection.core import standard_fields as fields from object_detection.core import target_assigner as cn_assigner from object_detection.utils import shape_utils from object_detection.utils import target_assigner_utils as ta_utils from object_detection.utils import tf_version # Number of channels needed to predict size and offsets. NUM_OFFSET_CHANNELS = 2 NUM_SIZE_CHANNELS = 2 # Error range for detecting peaks. PEAK_EPSILON = 1e-6 class CenterNetFeatureExtractor(tf.keras.Model): """Base class for feature extractors for the CenterNet meta architecture. Child classes are expected to override the _output_model property which will return 1 or more tensors predicted by the feature extractor. """ __metaclass__ = abc.ABCMeta def __init__(self, name=None, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False): """Initializes a CenterNet feature extractor. Args: name: str, the name used for the underlying keras model. channel_means: A tuple of floats, denoting the mean of each channel which will be subtracted from it. If None or empty, we use 0s. channel_stds: A tuple of floats, denoting the standard deviation of each channel. Each channel will be divided by its standard deviation value. If None or empty, we use 1s. bgr_ordering: bool, if set will change the channel ordering to be in the [blue, red, green] order. """ super(CenterNetFeatureExtractor, self).__init__(name=name) if channel_means is None or len(channel_means) == 0: # pylint:disable=g-explicit-length-test channel_means = [0., 0., 0.] if channel_stds is None or len(channel_stds) == 0: # pylint:disable=g-explicit-length-test channel_stds = [1., 1., 1.] self._channel_means = channel_means self._channel_stds = channel_stds self._bgr_ordering = bgr_ordering def preprocess(self, inputs): """Converts a batch of unscaled images to a scale suitable for the model. This method normalizes the image using the given `channel_means` and `channels_stds` values at initialization time while optionally flipping the channel order if `bgr_ordering` is set. Args: inputs: a [batch, height, width, channels] float32 tensor Returns: outputs: a [batch, height, width, channels] float32 tensor """ if self._bgr_ordering: red, green, blue = tf.unstack(inputs, axis=3) inputs = tf.stack([blue, green, red], axis=3) channel_means = tf.reshape(tf.constant(self._channel_means), [1, 1, 1, -1]) channel_stds = tf.reshape(tf.constant(self._channel_stds), [1, 1, 1, -1]) return (inputs - channel_means)/channel_stds @property @abc.abstractmethod def out_stride(self): """The stride in the output image of the network.""" pass @property @abc.abstractmethod def num_feature_outputs(self): """Ther number of feature outputs returned by the feature extractor.""" pass @property def classification_backbone(self): raise NotImplementedError( 'Classification backbone not supported for {}'.format(type(self))) def make_prediction_net(num_out_channels, kernel_sizes=(3), num_filters=(256), bias_fill=None, use_depthwise=False, name=None, unit_height_conv=True): """Creates a network to predict the given number of output channels. This function is intended to make the prediction heads for the CenterNet meta architecture. Args: num_out_channels: Number of output channels. kernel_sizes: A list representing the sizes of the conv kernel in the intermediate layer. Note that the length of the list indicates the number of intermediate conv layers and it must be the same as the length of the num_filters. num_filters: A list representing the number of filters in the intermediate conv layer. Note that the length of the list indicates the number of intermediate conv layers. bias_fill: If not None, is used to initialize the bias in the final conv layer. use_depthwise: If true, use SeparableConv2D to construct the Sequential layers instead of Conv2D. name: Optional name for the prediction net. unit_height_conv: If True, Conv2Ds have asymmetric kernels with height=1. Returns: net: A keras module which when called on an input tensor of size [batch_size, height, width, num_in_channels] returns an output of size [batch_size, height, width, num_out_channels] """ if isinstance(kernel_sizes, int) and isinstance(num_filters, int): kernel_sizes = [kernel_sizes] num_filters = [num_filters] assert len(kernel_sizes) == len(num_filters) if use_depthwise: conv_fn = tf.keras.layers.SeparableConv2D else: conv_fn = tf.keras.layers.Conv2D # We name the convolution operations explicitly because Keras, by default, # uses different names during training and evaluation. By setting the names # here, we avoid unexpected pipeline breakage in TF1. out_conv = tf.keras.layers.Conv2D( num_out_channels, kernel_size=1, name='conv1' if tf_version.is_tf1() else None) if bias_fill is not None: out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill) layers = [] for idx, (kernel_size, num_filter) in enumerate(zip(kernel_sizes, num_filters)): layers.append( conv_fn( num_filter, kernel_size=[1, kernel_size] if unit_height_conv else kernel_size, padding='same', name='conv2_%d' % idx if tf_version.is_tf1() else None)) layers.append(tf.keras.layers.ReLU()) layers.append(out_conv) net = tf.keras.Sequential(layers, name=name) return net def _to_float32(x): return tf.cast(x, tf.float32) def _get_shape(tensor, num_dims): assert len(tensor.shape.as_list()) == num_dims return shape_utils.combined_static_and_dynamic_shape(tensor) def _flatten_spatial_dimensions(batch_images): batch_size, height, width, channels = _get_shape(batch_images, 4) return tf.reshape(batch_images, [batch_size, height * width, channels]) def _multi_range(limit, value_repetitions=1, range_repetitions=1, dtype=tf.int32): """Creates a sequence with optional value duplication and range repetition. As an example (see the Args section for more details), _multi_range(limit=2, value_repetitions=3, range_repetitions=4) returns: [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1] Args: limit: A 0-D Tensor (scalar). Upper limit of sequence, exclusive. value_repetitions: Integer. The number of times a value in the sequence is repeated. With value_repetitions=3, the result is [0, 0, 0, 1, 1, 1, ..]. range_repetitions: Integer. The number of times the range is repeated. With range_repetitions=3, the result is [0, 1, 2, .., 0, 1, 2, ..]. dtype: The type of the elements of the resulting tensor. Returns: A 1-D tensor of type `dtype` and size [`limit` * `value_repetitions` * `range_repetitions`] that contains the specified range with given repetitions. """ return tf.reshape( tf.tile( tf.expand_dims(tf.range(limit, dtype=dtype), axis=-1), multiples=[range_repetitions, value_repetitions]), [-1]) def top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100, per_channel=False): """Returns the top k scores and their locations in a feature map. Given a feature map, the top k values (based on activation) are returned. If `per_channel` is True, the top k values **per channel** are returned. Note that when k equals to 1, ths function uses reduce_max and argmax instead of top_k to make the logics more efficient. The `max_pool_kernel_size` argument allows for selecting local peaks in a region. This filtering is done per channel, so nothing prevents two values at the same location to be returned. Args: feature_map: [batch, height, width, channels] float32 feature map. max_pool_kernel_size: integer, the max pool kernel size to use to pull off peak score locations in a neighborhood (independently for each channel). For example, to make sure no two neighboring values (in the same channel) are returned, set max_pool_kernel_size=3. If None or 1, will not apply max pooling. k: The number of highest scoring locations to return. per_channel: If True, will return the top k scores and locations per feature map channel. If False, the top k across the entire feature map (height x width x channels) are returned. Returns: Tuple of scores: A [batch, N] float32 tensor with scores from the feature map in descending order. If per_channel is False, N = k. Otherwise, N = k * channels, and the first k elements correspond to channel 0, the second k correspond to channel 1, etc. y_indices: A [batch, N] int tensor with y indices of the top k feature map locations. If per_channel is False, N = k. Otherwise, N = k * channels. x_indices: A [batch, N] int tensor with x indices of the top k feature map locations. If per_channel is False, N = k. Otherwise, N = k * channels. channel_indices: A [batch, N] int tensor with channel indices of the top k feature map locations. If per_channel is False, N = k. Otherwise, N = k * channels. """ if not max_pool_kernel_size or max_pool_kernel_size == 1: feature_map_peaks = feature_map else: feature_map_max_pool = tf.nn.max_pool( feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME') feature_map_peak_mask = tf.math.abs( feature_map - feature_map_max_pool) < PEAK_EPSILON # Zero out everything that is not a peak. feature_map_peaks = ( feature_map * _to_float32(feature_map_peak_mask)) batch_size, _, width, num_channels = _get_shape(feature_map, 4) if per_channel: if k == 1: feature_map_flattened = tf.reshape( feature_map_peaks, [batch_size, -1, num_channels]) scores = tf.math.reduce_max(feature_map_flattened, axis=1) peak_flat_indices = tf.math.argmax( feature_map_flattened, axis=1, output_type=tf.dtypes.int32) peak_flat_indices = tf.expand_dims(peak_flat_indices, axis=-1) else: # Perform top k over batch and channels. feature_map_peaks_transposed = tf.transpose(feature_map_peaks, perm=[0, 3, 1, 2]) feature_map_peaks_transposed = tf.reshape( feature_map_peaks_transposed, [batch_size, num_channels, -1]) scores, peak_flat_indices = tf.math.top_k( feature_map_peaks_transposed, k=k) # Convert the indices such that they represent the location in the full # (flattened) feature map of size [batch, height * width * channels]. channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis] peak_flat_indices = num_channels * peak_flat_indices + channel_idx scores = tf.reshape(scores, [batch_size, -1]) peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, -1]) else: if k == 1: feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1]) scores = tf.math.reduce_max(feature_map_peaks_flat, axis=1, keepdims=True) peak_flat_indices = tf.expand_dims(tf.math.argmax( feature_map_peaks_flat, axis=1, output_type=tf.dtypes.int32), axis=-1) else: feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1]) scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_flat, k=k) # Get x, y and channel indices corresponding to the top indices in the flat # array. y_indices, x_indices, channel_indices = ( row_col_channel_indices_from_flattened_indices( peak_flat_indices, width, num_channels)) return scores, y_indices, x_indices, channel_indices def prediction_tensors_to_boxes(y_indices, x_indices, height_width_predictions, offset_predictions): """Converts CenterNet class-center, offset and size predictions to boxes. Args: y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to object center locations (expressed in output coordinate frame). x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to object center locations (expressed in output coordinate frame). height_width_predictions: A float tensor of shape [batch_size, height, width, 2] representing the height and width of a box centered at each pixel. offset_predictions: A float tensor of shape [batch_size, height, width, 2] representing the y and x offsets of a box centered at each pixel. This helps reduce the error from downsampling. Returns: detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the the raw bounding box coordinates of boxes. """ batch_size, num_boxes = _get_shape(y_indices, 2) _, height, width, _ = _get_shape(height_width_predictions, 4) height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_boxes), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]) ], axis=1) new_height_width = tf.gather_nd(height_width_predictions, combined_indices) new_height_width = tf.reshape(new_height_width, [batch_size, num_boxes, 2]) new_offsets = tf.gather_nd(offset_predictions, combined_indices) offsets = tf.reshape(new_offsets, [batch_size, num_boxes, 2]) y_indices = _to_float32(y_indices) x_indices = _to_float32(x_indices) height_width = tf.maximum(new_height_width, 0) heights, widths = tf.unstack(height_width, axis=2) y_offsets, x_offsets = tf.unstack(offsets, axis=2) ymin = y_indices + y_offsets - heights / 2.0 xmin = x_indices + x_offsets - widths / 2.0 ymax = y_indices + y_offsets + heights / 2.0 xmax = x_indices + x_offsets + widths / 2.0 ymin = tf.clip_by_value(ymin, 0., height) xmin = tf.clip_by_value(xmin, 0., width) ymax = tf.clip_by_value(ymax, 0., height) xmax = tf.clip_by_value(xmax, 0., width) boxes = tf.stack([ymin, xmin, ymax, xmax], axis=2) return boxes def prediction_tensors_to_temporal_offsets( y_indices, x_indices, offset_predictions): """Converts CenterNet temporal offset map predictions to batched format. This function is similar to the box offset conversion function, as both temporal offsets and box offsets are size-2 vectors. Args: y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to object center locations (expressed in output coordinate frame). x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to object center locations (expressed in output coordinate frame). offset_predictions: A float tensor of shape [batch_size, height, width, 2] representing the y and x offsets of a box's center across adjacent frames. Returns: offsets: A tensor of shape [batch_size, num_boxes, 2] holding the the object temporal offsets of (y, x) dimensions. """ batch_size, num_boxes = _get_shape(y_indices, 2) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_boxes), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]) ], axis=1) new_offsets = tf.gather_nd(offset_predictions, combined_indices) offsets = tf.reshape(new_offsets, [batch_size, num_boxes, -1]) return offsets def prediction_tensors_to_keypoint_candidates(keypoint_heatmap_predictions, keypoint_heatmap_offsets, keypoint_score_threshold=0.1, max_pool_kernel_size=1, max_candidates=20, keypoint_depths=None): """Convert keypoint heatmap predictions and offsets to keypoint candidates. Args: keypoint_heatmap_predictions: A float tensor of shape [batch_size, height, width, num_keypoints] representing the per-keypoint heatmaps. keypoint_heatmap_offsets: A float tensor of shape [batch_size, height, width, 2] (or [batch_size, height, width, 2 * num_keypoints] if 'per_keypoint_offset' is set True) representing the per-keypoint offsets. keypoint_score_threshold: float, the threshold for considering a keypoint a candidate. max_pool_kernel_size: integer, the max pool kernel size to use to pull off peak score locations in a neighborhood. For example, to make sure no two neighboring values for the same keypoint are returned, set max_pool_kernel_size=3. If None or 1, will not apply any local filtering. max_candidates: integer, maximum number of keypoint candidates per keypoint type. keypoint_depths: (optional) A float tensor of shape [batch_size, height, width, 1] (or [batch_size, height, width, num_keypoints] if 'per_keypoint_depth' is set True) representing the per-keypoint depths. Returns: keypoint_candidates: A tensor of shape [batch_size, max_candidates, num_keypoints, 2] holding the location of keypoint candidates in [y, x] format (expressed in absolute coordinates in the output coordinate frame). keypoint_scores: A float tensor of shape [batch_size, max_candidates, num_keypoints] with the scores for each keypoint candidate. The scores come directly from the heatmap predictions. num_keypoint_candidates: An integer tensor of shape [batch_size, num_keypoints] with the number of candidates for each keypoint type, as it's possible to filter some candidates due to the score threshold. depth_candidates: A tensor of shape [batch_size, max_candidates, num_keypoints] representing the estimated depth of each keypoint candidate. Return None if the input keypoint_depths is None. """ batch_size, _, _, num_keypoints = _get_shape(keypoint_heatmap_predictions, 4) # Get x, y and channel indices corresponding to the top indices in the # keypoint heatmap predictions. # Note that the top k candidates are produced for **each keypoint type**. # Might be worth eventually trying top k in the feature map, independent of # the keypoint type. keypoint_scores, y_indices, x_indices, channel_indices = ( top_k_feature_map_locations(keypoint_heatmap_predictions, max_pool_kernel_size=max_pool_kernel_size, k=max_candidates, per_channel=True)) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. _, num_indices = _get_shape(y_indices, 2) combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_indices), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]) ], axis=1) selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets, combined_indices) selected_offsets = tf.reshape(selected_offsets_flat, [batch_size, num_indices, -1]) y_indices = _to_float32(y_indices) x_indices = _to_float32(x_indices) _, _, num_channels = _get_shape(selected_offsets, 3) if num_channels > 2: # Offsets are per keypoint and the last dimension of selected_offsets # contains all those offsets, so reshape the offsets to make sure that the # last dimension contains (y_offset, x_offset) for a single keypoint. reshaped_offsets = tf.reshape(selected_offsets, [batch_size, num_indices, -1, 2]) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. In this # case, channel_indices indicates which keypoint to use the offset from. channel_combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_indices), _multi_range(num_indices, range_repetitions=batch_size), tf.reshape(channel_indices, [-1]) ], axis=1) offsets = tf.gather_nd(reshaped_offsets, channel_combined_indices) offsets = tf.reshape(offsets, [batch_size, num_indices, -1]) else: offsets = selected_offsets y_offsets, x_offsets = tf.unstack(offsets, axis=2) keypoint_candidates = tf.stack([y_indices + y_offsets, x_indices + x_offsets], axis=2) keypoint_candidates = tf.reshape( keypoint_candidates, [batch_size, num_keypoints, max_candidates, 2]) keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3]) keypoint_scores = tf.reshape( keypoint_scores, [batch_size, num_keypoints, max_candidates]) keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1]) num_candidates = tf.reduce_sum( tf.to_int32(keypoint_scores >= keypoint_score_threshold), axis=1) depth_candidates = None if keypoint_depths is not None: selected_depth_flat = tf.gather_nd(keypoint_depths, combined_indices) selected_depth = tf.reshape(selected_depth_flat, [batch_size, num_indices, -1]) _, _, num_depth_channels = _get_shape(selected_depth, 3) if num_depth_channels > 1: combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_indices), _multi_range(num_indices, range_repetitions=batch_size), tf.reshape(channel_indices, [-1]) ], axis=1) depth = tf.gather_nd(selected_depth, combined_indices) depth = tf.reshape(depth, [batch_size, num_indices, -1]) else: depth = selected_depth depth_candidates = tf.reshape(depth, [batch_size, num_keypoints, max_candidates]) depth_candidates = tf.transpose(depth_candidates, [0, 2, 1]) return keypoint_candidates, keypoint_scores, num_candidates, depth_candidates def argmax_feature_map_locations(feature_map): """Returns the peak locations in the feature map.""" batch_size, _, width, num_channels = _get_shape(feature_map, 4) feature_map_flattened = tf.reshape( feature_map, [batch_size, -1, num_channels]) peak_flat_indices = tf.math.argmax( feature_map_flattened, axis=1, output_type=tf.dtypes.int32) # Get x and y indices corresponding to the top indices in the flat array. y_indices, x_indices = ( row_col_indices_from_flattened_indices(peak_flat_indices, width)) channel_indices = tf.tile( tf.range(num_channels)[tf.newaxis, :], [batch_size, 1]) return y_indices, x_indices, channel_indices def prediction_tensors_to_single_instance_kpts( keypoint_heatmap_predictions, keypoint_heatmap_offsets, keypoint_score_heatmap=None): """Convert keypoint heatmap predictions and offsets to keypoint candidates. Args: keypoint_heatmap_predictions: A float tensor of shape [batch_size, height, width, num_keypoints] representing the per-keypoint heatmaps which is used for finding the best keypoint candidate locations. keypoint_heatmap_offsets: A float tensor of shape [batch_size, height, width, 2] (or [batch_size, height, width, 2 * num_keypoints] if 'per_keypoint_offset' is set True) representing the per-keypoint offsets. keypoint_score_heatmap: (optional) A float tensor of shape [batch_size, height, width, num_keypoints] representing the heatmap which is used for reporting the confidence scores. If not provided, then the values in the keypoint_heatmap_predictions will be used. Returns: keypoint_candidates: A tensor of shape [batch_size, max_candidates, num_keypoints, 2] holding the location of keypoint candidates in [y, x] format (expressed in absolute coordinates in the output coordinate frame). keypoint_scores: A float tensor of shape [batch_size, max_candidates, num_keypoints] with the scores for each keypoint candidate. The scores come directly from the heatmap predictions. num_keypoint_candidates: An integer tensor of shape [batch_size, num_keypoints] with the number of candidates for each keypoint type, as it's possible to filter some candidates due to the score threshold. """ batch_size, height, width, num_keypoints = _get_shape( keypoint_heatmap_predictions, 4) # Get x, y and channel indices corresponding to the top indices in the # keypoint heatmap predictions. y_indices, x_indices, channel_indices = argmax_feature_map_locations( keypoint_heatmap_predictions) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. _, num_keypoints = _get_shape(y_indices, 2) combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_keypoints), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]), tf.reshape(channel_indices, [-1]) ], axis=1) # Reshape the offsets predictions to shape: # [batch_size, height, width, num_keypoints, 2] keypoint_heatmap_offsets = tf.reshape( keypoint_heatmap_offsets, [batch_size, height, width, num_keypoints, -1]) # shape: [num_keypoints, 2] selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets, combined_indices) y_offsets, x_offsets = tf.unstack(selected_offsets_flat, axis=1) keypoint_candidates = tf.stack([ tf.cast(y_indices, dtype=tf.float32) + tf.expand_dims(y_offsets, axis=0), tf.cast(x_indices, dtype=tf.float32) + tf.expand_dims(x_offsets, axis=0) ], axis=2) keypoint_candidates = tf.expand_dims(keypoint_candidates, axis=0) if keypoint_score_heatmap is None: keypoint_scores = tf.gather_nd( keypoint_heatmap_predictions, combined_indices) else: keypoint_scores = tf.gather_nd(keypoint_score_heatmap, combined_indices) keypoint_scores = tf.expand_dims( tf.expand_dims(keypoint_scores, axis=0), axis=0) return keypoint_candidates, keypoint_scores def _score_to_distance_map(y_grid, x_grid, heatmap, points_y, points_x, score_distance_offset): """Rescores heatmap using the distance information. Rescore the heatmap scores using the formula: score / (d + score_distance_offset), where the d is the distance from each pixel location to the target point location. Args: y_grid: A float tensor with shape [height, width] representing the y-coordinate of each pixel grid. x_grid: A float tensor with shape [height, width] representing the x-coordinate of each pixel grid. heatmap: A float tensor with shape [1, height, width, channel] representing the heatmap to be rescored. points_y: A float tensor with shape [channel] representing the y coordinates of the target points for each channel. points_x: A float tensor with shape [channel] representing the x coordinates of the target points for each channel. score_distance_offset: A constant used in the above formula. Returns: A float tensor with shape [1, height, width, channel] representing the rescored heatmap. """ y_diff = y_grid[:, :, tf.newaxis] - points_y x_diff = x_grid[:, :, tf.newaxis] - points_x distance = tf.math.sqrt(y_diff**2 + x_diff**2) return tf.math.divide(heatmap, distance + score_distance_offset) def prediction_to_single_instance_keypoints( object_heatmap, keypoint_heatmap, keypoint_offset, keypoint_regression, kp_params, keypoint_depths=None): """Postprocess function to predict single instance keypoints. This is a simplified postprocessing function based on the assumption that there is only one instance in the image. If there are multiple instances in the image, the model prefers to predict the one that is closest to the image center. Here is a high-level description of what this function does: 1) Object heatmap re-weighted by the distance between each pixel to the image center is used to determine the instance center. 2) Regressed keypoint locations are retrieved from the instance center. The Gaussian kernel is applied to the regressed keypoint locations to re-weight the keypoint heatmap. This is to select the keypoints that are associated with the center instance without using top_k op. 3) The keypoint locations are computed by the re-weighted keypoint heatmap and the keypoint offset. Args: object_heatmap: A float tensor of shape [1, height, width, 1] representing the heapmap of the class. keypoint_heatmap: A float tensor of shape [1, height, width, num_keypoints] representing the per-keypoint heatmaps. keypoint_offset: A float tensor of shape [1, height, width, 2] (or [1, height, width, 2 * num_keypoints] if 'per_keypoint_offset' is set True) representing the per-keypoint offsets. keypoint_regression: A float tensor of shape [1, height, width, 2 * num_keypoints] representing the joint regression prediction. kp_params: A `KeypointEstimationParams` object with parameters for a single keypoint class. keypoint_depths: (optional) A float tensor of shape [batch_size, height, width, 1] (or [batch_size, height, width, num_keypoints] if 'per_keypoint_depth' is set True) representing the per-keypoint depths. Returns: A tuple of two tensors: keypoint_candidates: A float tensor with shape [1, 1, num_keypoints, 2] representing the yx-coordinates of the keypoints in the output feature map space. keypoint_scores: A float tensor with shape [1, 1, num_keypoints] representing the keypoint prediction scores. Raises: ValueError: if the input keypoint_std_dev doesn't have valid number of elements (1 or num_keypoints). """ # TODO(yuhuic): add the keypoint depth prediction logics in the browser # postprocessing back. del keypoint_depths num_keypoints = len(kp_params.keypoint_std_dev) batch_size, height, width, _ = _get_shape(keypoint_heatmap, 4) # Create the image center location. image_center_y = tf.convert_to_tensor([0.5 * height], dtype=tf.float32) image_center_x = tf.convert_to_tensor([0.5 * width], dtype=tf.float32) (y_grid, x_grid) = ta_utils.image_shape_to_grids(height, width) # Rescore the object heatmap by the distnace to the image center. object_heatmap = _score_to_distance_map( y_grid, x_grid, object_heatmap, image_center_y, image_center_x, kp_params.score_distance_offset) # Pick the highest score and location of the weighted object heatmap. y_indices, x_indices, _ = argmax_feature_map_locations(object_heatmap) _, num_indices = _get_shape(y_indices, 2) combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_indices), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]) ], axis=1) # Select the regression vectors from the object center. selected_regression_flat = tf.gather_nd(keypoint_regression, combined_indices) # shape: [num_keypoints, 2] regression_offsets = tf.reshape(selected_regression_flat, [num_keypoints, -1]) (y_reg, x_reg) = tf.unstack(regression_offsets, axis=1) y_regressed = tf.cast(y_indices, dtype=tf.float32) + y_reg x_regressed = tf.cast(x_indices, dtype=tf.float32) + x_reg if kp_params.candidate_ranking_mode == 'score_distance_ratio': reweighted_keypoint_heatmap = _score_to_distance_map( y_grid, x_grid, keypoint_heatmap, y_regressed, x_regressed, kp_params.score_distance_offset) else: raise ValueError('Unsupported candidate_ranking_mode: %s' % kp_params.candidate_ranking_mode) # Get the keypoint locations/scores: # keypoint_candidates: [1, 1, num_keypoints, 2] # keypoint_scores: [1, 1, num_keypoints] # depth_candidates: [1, 1, num_keypoints] (keypoint_candidates, keypoint_scores ) = prediction_tensors_to_single_instance_kpts( reweighted_keypoint_heatmap, keypoint_offset, keypoint_score_heatmap=keypoint_heatmap) return keypoint_candidates, keypoint_scores, None def regressed_keypoints_at_object_centers(regressed_keypoint_predictions, y_indices, x_indices): """Returns the regressed keypoints at specified object centers. The original keypoint predictions are regressed relative to each feature map location. The returned keypoints are expressed in absolute coordinates in the output frame (i.e. the center offsets are added to each individual regressed set of keypoints). Args: regressed_keypoint_predictions: A float tensor of shape [batch_size, height, width, 2 * num_keypoints] holding regressed keypoints. The last dimension has keypoint coordinates ordered as follows: [y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints. y_indices: A [batch, num_instances] int tensor holding y indices for object centers. These indices correspond to locations in the output feature map. x_indices: A [batch, num_instances] int tensor holding x indices for object centers. These indices correspond to locations in the output feature map. Returns: A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where regressed keypoints are gathered at the provided locations, and converted to absolute coordinates in the output coordinate frame. """ batch_size, num_instances = _get_shape(y_indices, 2) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_instances), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]) ], axis=1) relative_regressed_keypoints = tf.gather_nd(regressed_keypoint_predictions, combined_indices) relative_regressed_keypoints = tf.reshape( relative_regressed_keypoints, [batch_size, num_instances, -1, 2]) relative_regressed_keypoints_y, relative_regressed_keypoints_x = tf.unstack( relative_regressed_keypoints, axis=3) y_indices = _to_float32(tf.expand_dims(y_indices, axis=-1)) x_indices = _to_float32(tf.expand_dims(x_indices, axis=-1)) absolute_regressed_keypoints = tf.stack( [y_indices + relative_regressed_keypoints_y, x_indices + relative_regressed_keypoints_x], axis=3) return tf.reshape(absolute_regressed_keypoints, [batch_size, num_instances, -1]) def refine_keypoints(regressed_keypoints, keypoint_candidates, keypoint_scores, num_keypoint_candidates, bboxes=None, unmatched_keypoint_score=0.1, box_scale=1.2, candidate_search_scale=0.3, candidate_ranking_mode='min_distance', score_distance_offset=1e-6, keypoint_depth_candidates=None, keypoint_score_threshold=0.1): """Refines regressed keypoints by snapping to the nearest candidate keypoints. The initial regressed keypoints represent a full set of keypoints regressed from the centers of the objects. The keypoint candidates are estimated independently from heatmaps, and are not associated with any object instances. This function refines the regressed keypoints by "snapping" to the nearest/highest score/highest score-distance ratio (depending on the candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose"). If no candidates are nearby, the regressed keypoint remains unchanged. In order to snap a regressed keypoint to a candidate keypoint, the following must be satisfied: - the candidate keypoint must be of the same type as the regressed keypoint - the candidate keypoint must not lie outside the predicted boxes (or the boxes which encloses the regressed keypoints for the instance if `bboxes` is not provided). Note that the box is scaled by `regressed_box_scale` in height and width, to provide some margin around the keypoints - the distance to the closest candidate keypoint cannot exceed candidate_search_scale * max(height, width), where height and width refer to the bounding box for the instance. Note that the same candidate keypoint is allowed to snap to regressed keypoints in difference instances. Args: regressed_keypoints: A float tensor of shape [batch_size, num_instances, num_keypoints, 2] with the initial regressed keypoints. keypoint_candidates: A tensor of shape [batch_size, max_candidates, num_keypoints, 2] holding the location of keypoint candidates in [y, x] format (expressed in absolute coordinates in the output coordinate frame). keypoint_scores: A float tensor of shape [batch_size, max_candidates, num_keypoints] indicating the scores for keypoint candidates. num_keypoint_candidates: An integer tensor of shape [batch_size, num_keypoints] indicating the number of valid candidates for each keypoint type, as there may be padding (dim 1) of `keypoint_candidates` and `keypoint_scores`. bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted bounding boxes for each instance, expressed in the output coordinate frame. If not provided, boxes will be computed from regressed keypoints. unmatched_keypoint_score: float, the default score to use for regressed keypoints that are not successfully snapped to a nearby candidate. box_scale: float, the multiplier to expand the bounding boxes (either the provided boxes or those which tightly cover the regressed keypoints) for an instance. This scale is typically larger than 1.0 when not providing `bboxes`. candidate_search_scale: float, the scale parameter that multiplies the largest dimension of a bounding box. The resulting distance becomes a search radius for candidates in the vicinity of each regressed keypoint. candidate_ranking_mode: A string as one of ['min_distance', 'score_distance_ratio'] indicating how to select the candidate. If invalid value is provided, an ValueError will be raised. score_distance_offset: The distance offset to apply in the denominator when candidate_ranking_mode is 'score_distance_ratio'. The metric to maximize in this scenario is score / (distance + score_distance_offset). Larger values of score_distance_offset make the keypoint score gain more relative importance. keypoint_depth_candidates: (optional) A float tensor of shape [batch_size, max_candidates, num_keypoints] indicating the depths for keypoint candidates. keypoint_score_threshold: float, The heatmap score threshold for a keypoint to become a valid candidate. Returns: A tuple with: refined_keypoints: A float tensor of shape [batch_size, num_instances, num_keypoints, 2] with the final, refined keypoints. refined_scores: A float tensor of shape [batch_size, num_instances, num_keypoints] with scores associated with all instances and keypoints in `refined_keypoints`. Raises: ValueError: if provided candidate_ranking_mode is not one of ['min_distance', 'score_distance_ratio'] """ batch_size, num_instances, num_keypoints, _ = ( shape_utils.combined_static_and_dynamic_shape(regressed_keypoints)) max_candidates = keypoint_candidates.shape[1] # Replace all invalid (i.e. padded) keypoint candidates with NaN. # This will prevent them from being considered. range_tiled = tf.tile( tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]), [batch_size, 1, num_keypoints]) num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1), [1, max_candidates, 1]) invalid_candidates = range_tiled >= num_candidates_tiled # Pairwise squared distances between regressed keypoints and candidate # keypoints (for a single keypoint type). # Shape [batch_size, num_instances, 1, num_keypoints, 2]. regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints, axis=2) # Shape [batch_size, 1, max_candidates, num_keypoints, 2]. keypoint_candidates_expanded = tf.expand_dims( keypoint_candidates, axis=1) # Use explicit tensor shape broadcasting (since the tensor dimensions are # expanded to 5D) to make it tf.lite compatible. regressed_keypoint_expanded = tf.tile( regressed_keypoint_expanded, multiples=[1, 1, max_candidates, 1, 1]) keypoint_candidates_expanded = tf.tile( keypoint_candidates_expanded, multiples=[1, num_instances, 1, 1, 1]) # Replace tf.math.squared_difference by "-" operator and tf.multiply ops since # tf.lite convert doesn't support squared_difference with undetermined # dimension. diff = regressed_keypoint_expanded - keypoint_candidates_expanded sqrd_distances = tf.math.reduce_sum(tf.multiply(diff, diff), axis=-1) distances = tf.math.sqrt(sqrd_distances) # Replace the invalid candidated with large constant (10^5) to make sure the # following reduce_min/argmin behaves properly. max_dist = 1e5 distances = tf.where( tf.tile( tf.expand_dims(invalid_candidates, axis=1), multiples=[1, num_instances, 1, 1]), tf.ones_like(distances) * max_dist, distances ) # Determine the candidates that have the minimum distance to the regressed # keypoints. Shape [batch_size, num_instances, num_keypoints]. min_distances = tf.math.reduce_min(distances, axis=2) if candidate_ranking_mode == 'min_distance': nearby_candidate_inds = tf.math.argmin(distances, axis=2) elif candidate_ranking_mode == 'score_distance_ratio': # tiled_keypoint_scores: # Shape [batch_size, num_instances, max_candidates, num_keypoints]. tiled_keypoint_scores = tf.tile( tf.expand_dims(keypoint_scores, axis=1), multiples=[1, num_instances, 1, 1]) ranking_scores = tiled_keypoint_scores / (distances + score_distance_offset) nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2) else: raise ValueError('Not recognized candidate_ranking_mode: %s' % candidate_ranking_mode) # Gather the coordinates and scores corresponding to the closest candidates. # Shape of tensors are [batch_size, num_instances, num_keypoints, 2] and # [batch_size, num_instances, num_keypoints], respectively. (nearby_candidate_coords, nearby_candidate_scores, nearby_candidate_depths) = ( _gather_candidates_at_indices(keypoint_candidates, keypoint_scores, nearby_candidate_inds, keypoint_depth_candidates)) if bboxes is None: # Filter out the chosen candidate with score lower than unmatched # keypoint score. mask = tf.cast(nearby_candidate_scores < keypoint_score_threshold, tf.int32) else: bboxes_flattened = tf.reshape(bboxes, [-1, 4]) # Scale the bounding boxes. # Shape [batch_size, num_instances, 4]. boxlist = box_list.BoxList(bboxes_flattened) boxlist_scaled = box_list_ops.scale_height_width( boxlist, box_scale, box_scale) bboxes_scaled = boxlist_scaled.get() bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4]) # Get ymin, xmin, ymax, xmax bounding box coordinates, tiled per keypoint. # Shape [batch_size, num_instances, num_keypoints]. bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1]) ymin, xmin, ymax, xmax = tf.unstack(bboxes_tiled, axis=3) # Produce a mask that indicates whether the original regressed keypoint # should be used instead of a candidate keypoint. # Shape [batch_size, num_instances, num_keypoints]. search_radius = ( tf.math.maximum(ymax - ymin, xmax - xmin) * candidate_search_scale) mask = (tf.cast(nearby_candidate_coords[:, :, :, 0] < ymin, tf.int32) + tf.cast(nearby_candidate_coords[:, :, :, 0] > ymax, tf.int32) + tf.cast(nearby_candidate_coords[:, :, :, 1] < xmin, tf.int32) + tf.cast(nearby_candidate_coords[:, :, :, 1] > xmax, tf.int32) + # Filter out the chosen candidate with score lower than unmatched # keypoint score. tf.cast(nearby_candidate_scores < keypoint_score_threshold, tf.int32) + tf.cast(min_distances > search_radius, tf.int32)) mask = mask > 0 # Create refined keypoints where candidate keypoints replace original # regressed keypoints if they are in the vicinity of the regressed keypoints. # Shape [batch_size, num_instances, num_keypoints, 2]. refined_keypoints = tf.where( tf.tile(tf.expand_dims(mask, -1), [1, 1, 1, 2]), regressed_keypoints, nearby_candidate_coords) # Update keypoints scores. In the case where we use the original regressed # keypoints, we use a default score of `unmatched_keypoint_score`. # Shape [batch_size, num_instances, num_keypoints]. refined_scores = tf.where( mask, unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores), nearby_candidate_scores) refined_depths = None if nearby_candidate_depths is not None: refined_depths = tf.where(mask, tf.zeros_like(nearby_candidate_depths), nearby_candidate_depths) return refined_keypoints, refined_scores, refined_depths def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds, num_total_keypoints): """Scatter keypoint elements into tensors with full keypoints dimension. Args: keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32 tensor. keypoint_scores: a [batch_size, num_instances, num_keypoints] float32 tensor. keypoint_inds: a list of integers that indicate the keypoint indices for this specific keypoint class. These indices are used to scatter into tensors that have a `num_total_keypoints` dimension. num_total_keypoints: The total number of keypoints that this model predicts. Returns: A tuple with keypoint_coords_padded: a [batch_size, num_instances, num_total_keypoints,2] float32 tensor. keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints] float32 tensor. """ batch_size, num_instances, _, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoint_coords)) kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3]) kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1]) kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1) kpt_coords_scattered = tf.scatter_nd( indices=kpt_inds_tensor, updates=kpt_coords_transposed, shape=[num_total_keypoints, batch_size, num_instances, 2]) kpt_scores_scattered = tf.scatter_nd( indices=kpt_inds_tensor, updates=kpt_scores_transposed, shape=[num_total_keypoints, batch_size, num_instances]) keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3]) keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0]) return keypoint_coords_padded, keypoint_scores_padded def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds, max_instances): """Scatter keypoint elements into tensors with full instance dimension. Args: keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32 tensor. keypoint_scores: a [batch_size, num_instances, num_keypoints] float32 tensor. instance_inds: a list of integers that indicate the instance indices for these keypoints. These indices are used to scatter into tensors that have a `max_instances` dimension. max_instances: The maximum number of instances detected by the model. Returns: A tuple with keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2] float32 tensor. keypoint_scores_padded: a [batch_size, max_instances, num_keypoints] float32 tensor. """ batch_size, _, num_keypoints, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoint_coords)) kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3]) kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2]) instance_inds = tf.expand_dims(instance_inds, axis=-1) kpt_coords_scattered = tf.scatter_nd( indices=instance_inds, updates=kpt_coords_transposed, shape=[max_instances, batch_size, num_keypoints, 2]) kpt_scores_scattered = tf.scatter_nd( indices=instance_inds, updates=kpt_scores_transposed, shape=[max_instances, batch_size, num_keypoints]) keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3]) keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2]) return keypoint_coords_padded, keypoint_scores_padded def _gather_candidates_at_indices(keypoint_candidates, keypoint_scores, indices, keypoint_depth_candidates=None): """Gathers keypoint candidate coordinates and scores at indices. Args: keypoint_candidates: a float tensor of shape [batch_size, max_candidates, num_keypoints, 2] with candidate coordinates. keypoint_scores: a float tensor of shape [batch_size, max_candidates, num_keypoints] with keypoint scores. indices: an integer tensor of shape [batch_size, num_indices, num_keypoints] with indices. keypoint_depth_candidates: (optional) a float tensor of shape [batch_size, max_candidates, num_keypoints] with keypoint depths. Returns: A tuple with gathered_keypoint_candidates: a float tensor of shape [batch_size, num_indices, num_keypoints, 2] with gathered coordinates. gathered_keypoint_scores: a float tensor of shape [batch_size, num_indices, num_keypoints]. gathered_keypoint_depths: a float tensor of shape [batch_size, num_indices, num_keypoints]. Return None if the input keypoint_depth_candidates is None. """ batch_size, num_indices, num_keypoints = _get_shape(indices, 3) # Transpose tensors so that all batch dimensions are up front. keypoint_candidates_transposed = tf.transpose(keypoint_candidates, [0, 2, 1, 3]) keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1]) nearby_candidate_inds_transposed = tf.transpose(indices, [0, 2, 1]) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. combined_indices = tf.stack([ _multi_range( batch_size, value_repetitions=num_keypoints * num_indices, dtype=tf.int64), _multi_range( num_keypoints, value_repetitions=num_indices, range_repetitions=batch_size, dtype=tf.int64), tf.reshape(nearby_candidate_inds_transposed, [-1]) ], axis=1) nearby_candidate_coords_transposed = tf.gather_nd( keypoint_candidates_transposed, combined_indices) nearby_candidate_coords_transposed = tf.reshape( nearby_candidate_coords_transposed, [batch_size, num_keypoints, num_indices, -1]) nearby_candidate_scores_transposed = tf.gather_nd(keypoint_scores_transposed, combined_indices) nearby_candidate_scores_transposed = tf.reshape( nearby_candidate_scores_transposed, [batch_size, num_keypoints, num_indices]) gathered_keypoint_candidates = tf.transpose( nearby_candidate_coords_transposed, [0, 2, 1, 3]) gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed, [0, 2, 1]) gathered_keypoint_depths = None if keypoint_depth_candidates is not None: keypoint_depths_transposed = tf.transpose(keypoint_depth_candidates, [0, 2, 1]) nearby_candidate_depths_transposed = tf.gather_nd( keypoint_depths_transposed, combined_indices) nearby_candidate_depths_transposed = tf.reshape( nearby_candidate_depths_transposed, [batch_size, num_keypoints, num_indices]) gathered_keypoint_depths = tf.transpose(nearby_candidate_depths_transposed, [0, 2, 1]) return (gathered_keypoint_candidates, gathered_keypoint_scores, gathered_keypoint_depths) def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols): """Get the index in a flattened array given row and column indices.""" return (row_indices * num_cols) + col_indices def row_col_channel_indices_from_flattened_indices(indices, num_cols, num_channels): """Computes row, column and channel indices from flattened indices. Args: indices: An integer tensor of any shape holding the indices in the flattened space. num_cols: Number of columns in the image (width). num_channels: Number of channels in the image. Returns: row_indices: The row indices corresponding to each of the input indices. Same shape as indices. col_indices: The column indices corresponding to each of the input indices. Same shape as indices. channel_indices. The channel indices corresponding to each of the input indices. """ # Be careful with this function when running a model in float16 precision # (e.g. TF.js with WebGL) because the array indices may not be represented # accurately if they are too large, resulting in incorrect channel indices. # See: # https://en.wikipedia.org/wiki/Half-precision_floating-point_format#Precision_limitations_on_integer_values # # Avoid using mod operator to make the ops more easy to be compatible with # different environments, e.g. WASM. row_indices = (indices // num_channels) // num_cols col_indices = (indices // num_channels) - row_indices * num_cols channel_indices_temp = indices // num_channels channel_indices = indices - channel_indices_temp * num_channels return row_indices, col_indices, channel_indices def row_col_indices_from_flattened_indices(indices, num_cols): """Computes row and column indices from flattened indices. Args: indices: An integer tensor of any shape holding the indices in the flattened space. num_cols: Number of columns in the image (width). Returns: row_indices: The row indices corresponding to each of the input indices. Same shape as indices. col_indices: The column indices corresponding to each of the input indices. Same shape as indices. """ # Avoid using mod operator to make the ops more easy to be compatible with # different environments, e.g. WASM. row_indices = indices // num_cols col_indices = indices - row_indices * num_cols return row_indices, col_indices def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height, width): """Computes valid anchor weights for an image assuming pixels will be flattened. This function is useful when we only want to penalize valid areas in the image in the case when padding is used. The function assumes that the loss function will be applied after flattening the spatial dimensions and returns anchor weights accordingly. Args: true_image_shapes: An integer tensor of shape [batch_size, 3] representing the true image shape (without padding) for each sample in the batch. height: height of the prediction from the network. width: width of the prediction from the network. Returns: valid_anchor_weights: a float tensor of shape [batch_size, height * width] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. """ indices = tf.reshape(tf.range(height * width), [1, -1]) batch_size = tf.shape(true_image_shapes)[0] batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices( batch_indices, width, 1) max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1] max_x = _to_float32(tf.expand_dims(max_x, 1)) max_y = _to_float32(tf.expand_dims(max_y, 1)) x_coords = _to_float32(x_coords) y_coords = _to_float32(y_coords) valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y) return _to_float32(valid_mask) def convert_strided_predictions_to_normalized_boxes(boxes, stride, true_image_shapes): """Converts predictions in the output space to normalized boxes. Boxes falling outside the valid image boundary are clipped to be on the boundary. Args: boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw coordinates of boxes in the model's output space. stride: The stride in the output space. true_image_shapes: A tensor of shape [batch_size, 3] representing the true shape of the input not considering padding. Returns: boxes: A tensor of shape [batch_size, num_boxes, 4] representing the coordinates of the normalized boxes. """ # Note: We use tf ops instead of functions in box_list_ops to make this # function compatible with dynamic batch size. boxes = boxes * stride true_image_shapes = tf.tile(true_image_shapes[:, tf.newaxis, :2], [1, 1, 2]) boxes = boxes / tf.cast(true_image_shapes, tf.float32) boxes = tf.clip_by_value(boxes, 0.0, 1.0) return boxes def convert_strided_predictions_to_normalized_keypoints( keypoint_coords, keypoint_scores, stride, true_image_shapes, clip_out_of_frame_keypoints=False): """Converts predictions in the output space to normalized keypoints. If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside the valid image boundary are normalized but not clipped; If clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the valid image boundary are clipped to the closest image boundary and the scores will be set to 0.0. Args: keypoint_coords: A tensor of shape [batch_size, num_instances, num_keypoints, 2] holding the raw coordinates of keypoints in the model's output space. keypoint_scores: A tensor of shape [batch_size, num_instances, num_keypoints] holding the keypoint scores. stride: The stride in the output space. true_image_shapes: A tensor of shape [batch_size, 3] representing the true shape of the input not considering padding. clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside the image boundary should be clipped. If True, keypoint coords will be clipped to image boundary. If False, keypoints are normalized but not filtered based on their location. Returns: keypoint_coords_normalized: A tensor of shape [batch_size, num_instances, num_keypoints, 2] representing the coordinates of the normalized keypoints. keypoint_scores: A tensor of shape [batch_size, num_instances, num_keypoints] representing the updated keypoint scores. """ # Flatten keypoints and scores. batch_size, _, _, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoint_coords)) # Scale and normalize keypoints. true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1) yscale = float(stride) / tf.cast(true_heights, tf.float32) xscale = float(stride) / tf.cast(true_widths, tf.float32) yx_scale = tf.stack([yscale, xscale], axis=1) keypoint_coords_normalized = keypoint_coords * tf.reshape( yx_scale, [batch_size, 1, 1, 2]) if clip_out_of_frame_keypoints: # Determine the keypoints that are in the true image regions. valid_indices = tf.logical_and( tf.logical_and(keypoint_coords_normalized[:, :, :, 0] >= 0.0, keypoint_coords_normalized[:, :, :, 0] <= 1.0), tf.logical_and(keypoint_coords_normalized[:, :, :, 1] >= 0.0, keypoint_coords_normalized[:, :, :, 1] <= 1.0)) batch_window = tf.tile( tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32), multiples=[batch_size, 1]) def clip_to_window(inputs): keypoints, window = inputs return keypoint_ops.clip_to_window(keypoints, window) # Specify the TensorSpec explicitly in the tf.map_fn to make it tf.lite # compatible. kpts_dims = _get_shape(keypoint_coords_normalized, 4) output_spec = tf.TensorSpec( shape=[kpts_dims[1], kpts_dims[2], kpts_dims[3]], dtype=tf.float32) keypoint_coords_normalized = tf.map_fn( clip_to_window, (keypoint_coords_normalized, batch_window), dtype=tf.float32, back_prop=False, fn_output_signature=output_spec) keypoint_scores = tf.where(valid_indices, keypoint_scores, tf.zeros_like(keypoint_scores)) return keypoint_coords_normalized, keypoint_scores def convert_strided_predictions_to_instance_masks( boxes, classes, masks, true_image_shapes, densepose_part_heatmap=None, densepose_surface_coords=None, stride=4, mask_height=256, mask_width=256, score_threshold=0.5, densepose_class_index=-1): """Converts predicted full-image masks into instance masks. For each predicted detection box: * Crop and resize the predicted mask (and optionally DensePose coordinates) based on the detected bounding box coordinates and class prediction. Uses bilinear resampling. * Binarize the mask using the provided score threshold. Args: boxes: A tensor of shape [batch, max_detections, 4] holding the predicted boxes, in normalized coordinates (relative to the true image dimensions). classes: An integer tensor of shape [batch, max_detections] containing the detected class for each box (0-indexed). masks: A [batch, output_height, output_width, num_classes] float32 tensor with class probabilities. true_image_shapes: A tensor of shape [batch, 3] representing the true shape of the inputs not considering padding. densepose_part_heatmap: (Optional) A [batch, output_height, output_width, num_parts] float32 tensor with part scores (i.e. logits). densepose_surface_coords: (Optional) A [batch, output_height, output_width, 2 * num_parts] float32 tensor with predicted part coordinates (in vu-format). stride: The stride in the output space. mask_height: The desired resized height for instance masks. mask_width: The desired resized width for instance masks. score_threshold: The threshold at which to convert predicted mask into foreground pixels. densepose_class_index: The class index (0-indexed) corresponding to the class which has DensePose labels (e.g. person class). Returns: A tuple of masks and surface_coords. instance_masks: A [batch_size, max_detections, mask_height, mask_width] uint8 tensor with predicted foreground mask for each instance. If DensePose tensors are provided, then each pixel value in the mask encodes the 1-indexed part. surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2] float32 tensor with (v, u) coordinates. Note that v, u coordinates are only defined on instance masks, and the coordinates at each location of the foreground mask correspond to coordinates on a local part coordinate system (the specific part can be inferred from the `instance_masks` output. If DensePose feature maps are not passed to this function, this output will be None. Raises: ValueError: If one but not both of `densepose_part_heatmap` and `densepose_surface_coords` is provided. """ batch_size, output_height, output_width, _ = ( shape_utils.combined_static_and_dynamic_shape(masks)) input_height = stride * output_height input_width = stride * output_width true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1) # If necessary, create dummy DensePose tensors to simplify the map function. densepose_present = True if ((densepose_part_heatmap is not None) ^ (densepose_surface_coords is not None)): raise ValueError('To use DensePose, both `densepose_part_heatmap` and ' '`densepose_surface_coords` must be provided') if densepose_part_heatmap is None and densepose_surface_coords is None: densepose_present = False densepose_part_heatmap = tf.zeros( (batch_size, output_height, output_width, 1), dtype=tf.float32) densepose_surface_coords = tf.zeros( (batch_size, output_height, output_width, 2), dtype=tf.float32) crop_and_threshold_fn = functools.partial( crop_and_threshold_masks, input_height=input_height, input_width=input_width, mask_height=mask_height, mask_width=mask_width, score_threshold=score_threshold, densepose_class_index=densepose_class_index) instance_masks, surface_coords = shape_utils.static_or_dynamic_map_fn( crop_and_threshold_fn, elems=[boxes, classes, masks, densepose_part_heatmap, densepose_surface_coords, true_heights, true_widths], dtype=[tf.uint8, tf.float32], back_prop=False) surface_coords = surface_coords if densepose_present else None return instance_masks, surface_coords def crop_and_threshold_masks(elems, input_height, input_width, mask_height=256, mask_width=256, score_threshold=0.5, densepose_class_index=-1): """Crops and thresholds masks based on detection boxes. Args: elems: A tuple of boxes - float32 tensor of shape [max_detections, 4] classes - int32 tensor of shape [max_detections] (0-indexed) masks - float32 tensor of shape [output_height, output_width, num_classes] part_heatmap - float32 tensor of shape [output_height, output_width, num_parts] surf_coords - float32 tensor of shape [output_height, output_width, 2 * num_parts] true_height - scalar int tensor true_width - scalar int tensor input_height: Input height to network. input_width: Input width to network. mask_height: Height for resizing mask crops. mask_width: Width for resizing mask crops. score_threshold: The threshold at which to convert predicted mask into foreground pixels. densepose_class_index: scalar int tensor with the class index (0-indexed) for DensePose. Returns: A tuple of all_instances: A [max_detections, mask_height, mask_width] uint8 tensor with a predicted foreground mask for each instance. Background is encoded as 0, and foreground is encoded as a positive integer. Specific part indices are encoded as 1-indexed parts (for classes that have part information). surface_coords: A [max_detections, mask_height, mask_width, 2] float32 tensor with (v, u) coordinates. for each part. """ (boxes, classes, masks, part_heatmap, surf_coords, true_height, true_width) = elems # Boxes are in normalized coordinates relative to true image shapes. Convert # coordinates to be normalized relative to input image shapes (since masks # may still have padding). boxlist = box_list.BoxList(boxes) y_scale = true_height / input_height x_scale = true_width / input_width boxlist = box_list_ops.scale(boxlist, y_scale, x_scale) boxes = boxlist.get() # Convert masks from [output_height, output_width, num_classes] to # [num_classes, output_height, output_width, 1]. num_classes = tf.shape(masks)[-1] masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis] # Tile part and surface coordinate masks for all classes. part_heatmap_4d = tf.tile(part_heatmap[tf.newaxis, :, :, :], multiples=[num_classes, 1, 1, 1]) surf_coords_4d = tf.tile(surf_coords[tf.newaxis, :, :, :], multiples=[num_classes, 1, 1, 1]) feature_maps_concat = tf.concat([masks_4d, part_heatmap_4d, surf_coords_4d], axis=-1) # The following tensor has shape # [max_detections, mask_height, mask_width, 1 + 3 * num_parts]. cropped_masks = tf2.image.crop_and_resize( feature_maps_concat, boxes=boxes, box_indices=classes, crop_size=[mask_height, mask_width], method='bilinear') # Split the cropped masks back into instance masks, part masks, and surface # coordinates. num_parts = tf.shape(part_heatmap)[-1] instance_masks, part_heatmap_cropped, surface_coords_cropped = tf.split( cropped_masks, [1, num_parts, 2 * num_parts], axis=-1) # Threshold the instance masks. Resulting tensor has shape # [max_detections, mask_height, mask_width, 1]. instance_masks_int = tf.cast( tf.math.greater_equal(instance_masks, score_threshold), dtype=tf.int32) # Produce a binary mask that is 1.0 only: # - in the foreground region for an instance # - in detections corresponding to the DensePose class det_with_parts = tf.equal(classes, densepose_class_index) det_with_parts = tf.cast( tf.reshape(det_with_parts, [-1, 1, 1, 1]), dtype=tf.int32) instance_masks_with_parts = tf.math.multiply(instance_masks_int, det_with_parts) # Similarly, produce a binary mask that holds the foreground masks only for # instances without parts (i.e. non-DensePose classes). det_without_parts = 1 - det_with_parts instance_masks_without_parts = tf.math.multiply(instance_masks_int, det_without_parts) # Assemble a tensor that has standard instance segmentation masks for # non-DensePose classes (with values in [0, 1]), and part segmentation masks # for DensePose classes (with vaues in [0, 1, ..., num_parts]). part_mask_int_zero_indexed = tf.math.argmax( part_heatmap_cropped, axis=-1, output_type=tf.int32)[:, :, :, tf.newaxis] part_mask_int_one_indexed = part_mask_int_zero_indexed + 1 all_instances = (instance_masks_without_parts + instance_masks_with_parts * part_mask_int_one_indexed) # Gather the surface coordinates for the parts. surface_coords_cropped = tf.reshape( surface_coords_cropped, [-1, mask_height, mask_width, num_parts, 2]) surface_coords = gather_surface_coords_for_parts(surface_coords_cropped, part_mask_int_zero_indexed) surface_coords = ( surface_coords * tf.cast(instance_masks_with_parts, tf.float32)) return [tf.squeeze(all_instances, axis=3), surface_coords] def gather_surface_coords_for_parts(surface_coords_cropped, highest_scoring_part): """Gathers the (v, u) coordinates for the highest scoring DensePose parts. Args: surface_coords_cropped: A [max_detections, height, width, num_parts, 2] float32 tensor with (v, u) surface coordinates. highest_scoring_part: A [max_detections, height, width] integer tensor with the highest scoring part (0-indexed) indices for each location. Returns: A [max_detections, height, width, 2] float32 tensor with the (v, u) coordinates selected from the highest scoring parts. """ max_detections, height, width, num_parts, _ = ( shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped)) flattened_surface_coords = tf.reshape(surface_coords_cropped, [-1, 2]) flattened_part_ids = tf.reshape(highest_scoring_part, [-1]) # Produce lookup indices that represent the locations of the highest scoring # parts in the `flattened_surface_coords` tensor. flattened_lookup_indices = ( num_parts * tf.range(max_detections * height * width) + flattened_part_ids) vu_coords_flattened = tf.gather(flattened_surface_coords, flattened_lookup_indices, axis=0) return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2]) def predicted_embeddings_at_object_centers(embedding_predictions, y_indices, x_indices): """Returns the predicted embeddings at specified object centers. Args: embedding_predictions: A float tensor of shape [batch_size, height, width, reid_embed_size] holding predicted embeddings. y_indices: A [batch, num_instances] int tensor holding y indices for object centers. These indices correspond to locations in the output feature map. x_indices: A [batch, num_instances] int tensor holding x indices for object centers. These indices correspond to locations in the output feature map. Returns: A float tensor of shape [batch_size, num_objects, reid_embed_size] where predicted embeddings are gathered at the provided locations. """ batch_size, _, width, _ = _get_shape(embedding_predictions, 4) flattened_indices = flattened_indices_from_row_col_indices( y_indices, x_indices, width) _, num_instances = _get_shape(flattened_indices, 2) embeddings_flat = _flatten_spatial_dimensions(embedding_predictions) embeddings = tf.gather(embeddings_flat, flattened_indices, batch_dims=1) embeddings = tf.reshape(embeddings, [batch_size, num_instances, -1]) return embeddings class ObjectDetectionParams( collections.namedtuple('ObjectDetectionParams', [ 'localization_loss', 'scale_loss_weight', 'offset_loss_weight', 'task_loss_weight', 'scale_head_num_filters', 'scale_head_kernel_sizes', 'offset_head_num_filters', 'offset_head_kernel_sizes' ])): """Namedtuple to host object detection related parameters. This is a wrapper class over the fields that are either the hyper-parameters or the loss functions needed for the object detection task. The class is immutable after constructed. Please see the __new__ function for detailed information for each fields. """ __slots__ = () def __new__(cls, localization_loss, scale_loss_weight, offset_loss_weight, task_loss_weight=1.0, scale_head_num_filters=(256), scale_head_kernel_sizes=(3), offset_head_num_filters=(256), offset_head_kernel_sizes=(3)): """Constructor with default values for ObjectDetectionParams. Args: localization_loss: a object_detection.core.losses.Loss object to compute the loss for the center offset and height/width predictions in CenterNet. scale_loss_weight: float, The weight for localizing box size. Note that the scale loss is dependent on the input image size, since we penalize the raw height and width. This constant may need to be adjusted depending on the input size. offset_loss_weight: float, The weight for localizing center offsets. task_loss_weight: float, the weight of the object detection loss. scale_head_num_filters: filter numbers of the convolutional layers used by the object detection box scale prediction head. scale_head_kernel_sizes: kernel size of the convolutional layers used by the object detection box scale prediction head. offset_head_num_filters: filter numbers of the convolutional layers used by the object detection box offset prediction head. offset_head_kernel_sizes: kernel size of the convolutional layers used by the object detection box offset prediction head. Returns: An initialized ObjectDetectionParams namedtuple. """ return super(ObjectDetectionParams, cls).__new__(cls, localization_loss, scale_loss_weight, offset_loss_weight, task_loss_weight, scale_head_num_filters, scale_head_kernel_sizes, offset_head_num_filters, offset_head_kernel_sizes) class KeypointEstimationParams( collections.namedtuple('KeypointEstimationParams', [ 'task_name', 'class_id', 'keypoint_indices', 'classification_loss', 'localization_loss', 'keypoint_labels', 'keypoint_std_dev', 'keypoint_heatmap_loss_weight', 'keypoint_offset_loss_weight', 'keypoint_regression_loss_weight', 'keypoint_candidate_score_threshold', 'heatmap_bias_init', 'num_candidates_per_keypoint', 'task_loss_weight', 'peak_max_pool_kernel_size', 'unmatched_keypoint_score', 'box_scale', 'candidate_search_scale', 'candidate_ranking_mode', 'offset_peak_radius', 'per_keypoint_offset', 'predict_depth', 'per_keypoint_depth', 'keypoint_depth_loss_weight', 'score_distance_offset', 'clip_out_of_frame_keypoints', 'rescore_instances', 'heatmap_head_num_filters', 'heatmap_head_kernel_sizes', 'offset_head_num_filters', 'offset_head_kernel_sizes', 'regress_head_num_filters', 'regress_head_kernel_sizes' ])): """Namedtuple to host object detection related parameters. This is a wrapper class over the fields that are either the hyper-parameters or the loss functions needed for the keypoint estimation task. The class is immutable after constructed. Please see the __new__ function for detailed information for each fields. """ __slots__ = () def __new__(cls, task_name, class_id, keypoint_indices, classification_loss, localization_loss, keypoint_labels=None, keypoint_std_dev=None, keypoint_heatmap_loss_weight=1.0, keypoint_offset_loss_weight=1.0, keypoint_regression_loss_weight=1.0, keypoint_candidate_score_threshold=0.1, heatmap_bias_init=-2.19, num_candidates_per_keypoint=100, task_loss_weight=1.0, peak_max_pool_kernel_size=3, unmatched_keypoint_score=0.1, box_scale=1.2, candidate_search_scale=0.3, candidate_ranking_mode='min_distance', offset_peak_radius=0, per_keypoint_offset=False, predict_depth=False, per_keypoint_depth=False, keypoint_depth_loss_weight=1.0, score_distance_offset=1e-6, clip_out_of_frame_keypoints=False, rescore_instances=False, heatmap_head_num_filters=(256), heatmap_head_kernel_sizes=(3), offset_head_num_filters=(256), offset_head_kernel_sizes=(3), regress_head_num_filters=(256), regress_head_kernel_sizes=(3)): """Constructor with default values for KeypointEstimationParams. Args: task_name: string, the name of the task this namedtuple corresponds to. Note that it should be an unique identifier of the task. class_id: int, the ID of the class that contains the target keypoints to considered in this task. For example, if the task is human pose estimation, the class id should correspond to the "human" class. Note that the ID is 0-based, meaning that class 0 corresponds to the first non-background object class. keypoint_indices: A list of integers representing the indicies of the keypoints to be considered in this task. This is used to retrieve the subset of the keypoints from gt_keypoints that should be considered in this task. classification_loss: an object_detection.core.losses.Loss object to compute the loss for the class predictions in CenterNet. localization_loss: an object_detection.core.losses.Loss object to compute the loss for the center offset and height/width predictions in CenterNet. keypoint_labels: A list of strings representing the label text of each keypoint, e.g. "nose", 'left_shoulder". Note that the length of this list should be equal to keypoint_indices. keypoint_std_dev: A list of float represent the standard deviation of the Gaussian kernel used to generate the keypoint heatmap. It is to provide the flexibility of using different sizes of Gaussian kernel for each keypoint class. keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap. keypoint_offset_loss_weight: float, The weight for the keypoint offsets loss. keypoint_regression_loss_weight: float, The weight for keypoint regression loss. Note that the loss is dependent on the input image size, since we penalize the raw height and width. This constant may need to be adjusted depending on the input size. keypoint_candidate_score_threshold: float, The heatmap score threshold for a keypoint to become a valid candidate. heatmap_bias_init: float, the initial value of bias in the convolutional kernel of the class prediction head. If set to None, the bias is initialized with zeros. num_candidates_per_keypoint: The maximum number of candidates to retrieve for each keypoint. task_loss_weight: float, the weight of the keypoint estimation loss. peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak score locations in a neighborhood (independently for each keypoint types). unmatched_keypoint_score: The default score to use for regressed keypoints that are not successfully snapped to a nearby candidate. box_scale: The multiplier to expand the bounding boxes (either the provided boxes or those which tightly cover the regressed keypoints). candidate_search_scale: The scale parameter that multiplies the largest dimension of a bounding box. The resulting distance becomes a search radius for candidates in the vicinity of each regressed keypoint. candidate_ranking_mode: One of ['min_distance', 'score_distance_ratio'] indicating how to select the keypoint candidate. offset_peak_radius: The radius (in the unit of output pixel) around groundtruth heatmap peak to assign the offset targets. If set 0, then the offset target will only be assigned to the heatmap peak (same behavior as the original paper). per_keypoint_offset: A bool indicates whether to assign offsets for each keypoint channel separately. If set False, the output offset target has the shape [batch_size, out_height, out_width, 2] (same behavior as the original paper). If set True, the output offset target has the shape [batch_size, out_height, out_width, 2 * num_keypoints] (recommended when the offset_peak_radius is not zero). predict_depth: A bool indicates whether to predict the depth of each keypoints. per_keypoint_depth: A bool indicates whether the model predicts the depth of each keypoints in independent channels. Similar to per_keypoint_offset but for the keypoint depth. keypoint_depth_loss_weight: The weight of the keypoint depth loss. score_distance_offset: The distance offset to apply in the denominator when candidate_ranking_mode is 'score_distance_ratio'. The metric to maximize in this scenario is score / (distance + score_distance_offset). Larger values of score_distance_offset make the keypoint score gain more relative importance. clip_out_of_frame_keypoints: Whether keypoints outside the image frame should be clipped back to the image boundary. If True, the keypoints that are clipped have scores set to 0.0. rescore_instances: Whether to rescore instances based on a combination of detection score and keypoint scores. heatmap_head_num_filters: filter numbers of the convolutional layers used by the keypoint heatmap prediction head. heatmap_head_kernel_sizes: kernel size of the convolutional layers used by the keypoint heatmap prediction head. offset_head_num_filters: filter numbers of the convolutional layers used by the keypoint offset prediction head. offset_head_kernel_sizes: kernel size of the convolutional layers used by the keypoint offset prediction head. regress_head_num_filters: filter numbers of the convolutional layers used by the keypoint regression prediction head. regress_head_kernel_sizes: kernel size of the convolutional layers used by the keypoint regression prediction head. Returns: An initialized KeypointEstimationParams namedtuple. """ return super(KeypointEstimationParams, cls).__new__( cls, task_name, class_id, keypoint_indices, classification_loss, localization_loss, keypoint_labels, keypoint_std_dev, keypoint_heatmap_loss_weight, keypoint_offset_loss_weight, keypoint_regression_loss_weight, keypoint_candidate_score_threshold, heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight, peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale, candidate_search_scale, candidate_ranking_mode, offset_peak_radius, per_keypoint_offset, predict_depth, per_keypoint_depth, keypoint_depth_loss_weight, score_distance_offset, clip_out_of_frame_keypoints, rescore_instances, heatmap_head_num_filters, heatmap_head_kernel_sizes, offset_head_num_filters, offset_head_kernel_sizes, regress_head_num_filters, regress_head_kernel_sizes) class ObjectCenterParams( collections.namedtuple('ObjectCenterParams', [ 'classification_loss', 'object_center_loss_weight', 'heatmap_bias_init', 'min_box_overlap_iou', 'max_box_predictions', 'use_labeled_classes', 'keypoint_weights_for_center', 'center_head_num_filters', 'center_head_kernel_sizes' ])): """Namedtuple to store object center prediction related parameters.""" __slots__ = () def __new__(cls, classification_loss, object_center_loss_weight, heatmap_bias_init=-2.19, min_box_overlap_iou=0.7, max_box_predictions=100, use_labeled_classes=False, keypoint_weights_for_center=None, center_head_num_filters=(256), center_head_kernel_sizes=(3)): """Constructor with default values for ObjectCenterParams. Args: classification_loss: an object_detection.core.losses.Loss object to compute the loss for the class predictions in CenterNet. object_center_loss_weight: float, The weight for the object center loss. heatmap_bias_init: float, the initial value of bias in the convolutional kernel of the object center prediction head. If set to None, the bias is initialized with zeros. min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes need have with groundtruth boxes to not be penalized. This is used for computing the class specific center heatmaps. max_box_predictions: int, the maximum number of boxes to predict. use_labeled_classes: boolean, compute the loss only labeled classes. keypoint_weights_for_center: (optional) The keypoint weights used for calculating the location of object center. If provided, the number of weights need to be the same as the number of keypoints. The object center is calculated by the weighted mean of the keypoint locations. If not provided, the object center is determined by the center of the bounding box (default behavior). center_head_num_filters: filter numbers of the convolutional layers used by the object center prediction head. center_head_kernel_sizes: kernel size of the convolutional layers used by the object center prediction head. Returns: An initialized ObjectCenterParams namedtuple. """ return super(ObjectCenterParams, cls).__new__(cls, classification_loss, object_center_loss_weight, heatmap_bias_init, min_box_overlap_iou, max_box_predictions, use_labeled_classes, keypoint_weights_for_center, center_head_num_filters, center_head_kernel_sizes) class MaskParams( collections.namedtuple('MaskParams', [ 'classification_loss', 'task_loss_weight', 'mask_height', 'mask_width', 'score_threshold', 'heatmap_bias_init', 'mask_head_num_filters', 'mask_head_kernel_sizes' ])): """Namedtuple to store mask prediction related parameters.""" __slots__ = () def __new__(cls, classification_loss, task_loss_weight=1.0, mask_height=256, mask_width=256, score_threshold=0.5, heatmap_bias_init=-2.19, mask_head_num_filters=(256), mask_head_kernel_sizes=(3)): """Constructor with default values for MaskParams. Args: classification_loss: an object_detection.core.losses.Loss object to compute the loss for the semantic segmentation predictions in CenterNet. task_loss_weight: float, The loss weight for the segmentation task. mask_height: The height of the resized instance segmentation mask. mask_width: The width of the resized instance segmentation mask. score_threshold: The threshold at which to convert predicted mask probabilities (after passing through sigmoid) into foreground pixels. heatmap_bias_init: float, the initial value of bias in the convolutional kernel of the semantic segmentation prediction head. If set to None, the bias is initialized with zeros. mask_head_num_filters: filter numbers of the convolutional layers used by the mask prediction head. mask_head_kernel_sizes: kernel size of the convolutional layers used by the mask prediction head. Returns: An initialized MaskParams namedtuple. """ return super(MaskParams, cls).__new__(cls, classification_loss, task_loss_weight, mask_height, mask_width, score_threshold, heatmap_bias_init, mask_head_num_filters, mask_head_kernel_sizes) class DensePoseParams( collections.namedtuple('DensePoseParams', [ 'class_id', 'classification_loss', 'localization_loss', 'part_loss_weight', 'coordinate_loss_weight', 'num_parts', 'task_loss_weight', 'upsample_to_input_res', 'upsample_method', 'heatmap_bias_init' ])): """Namedtuple to store DensePose prediction related parameters.""" __slots__ = () def __new__(cls, class_id, classification_loss, localization_loss, part_loss_weight=1.0, coordinate_loss_weight=1.0, num_parts=24, task_loss_weight=1.0, upsample_to_input_res=True, upsample_method='bilinear', heatmap_bias_init=-2.19): """Constructor with default values for DensePoseParams. Args: class_id: the ID of the class that contains the DensePose groundtruth. This should typically correspond to the "person" class. Note that the ID is 0-based, meaning that class 0 corresponds to the first non-background object class. classification_loss: an object_detection.core.losses.Loss object to compute the loss for the body part predictions in CenterNet. localization_loss: an object_detection.core.losses.Loss object to compute the loss for the surface coordinate regression in CenterNet. part_loss_weight: The loss weight to apply to part prediction. coordinate_loss_weight: The loss weight to apply to surface coordinate prediction. num_parts: The number of DensePose parts to predict. task_loss_weight: float, the loss weight for the DensePose task. upsample_to_input_res: Whether to upsample the DensePose feature maps to the input resolution before applying loss. Note that the prediction outputs are still at the standard CenterNet output stride. upsample_method: Method for upsampling DensePose feature maps. Options are either 'bilinear' or 'nearest'). This takes no effect when `upsample_to_input_res` is False. heatmap_bias_init: float, the initial value of bias in the convolutional kernel of the part prediction head. If set to None, the bias is initialized with zeros. Returns: An initialized DensePoseParams namedtuple. """ return super(DensePoseParams, cls).__new__(cls, class_id, classification_loss, localization_loss, part_loss_weight, coordinate_loss_weight, num_parts, task_loss_weight, upsample_to_input_res, upsample_method, heatmap_bias_init) class TrackParams( collections.namedtuple('TrackParams', [ 'num_track_ids', 'reid_embed_size', 'num_fc_layers', 'classification_loss', 'task_loss_weight' ])): """Namedtuple to store tracking prediction related parameters.""" __slots__ = () def __new__(cls, num_track_ids, reid_embed_size, num_fc_layers, classification_loss, task_loss_weight=1.0): """Constructor with default values for TrackParams. Args: num_track_ids: int. The maximum track ID in the dataset. Used for ReID embedding classification task. reid_embed_size: int. The embedding size for ReID task. num_fc_layers: int. The number of (fully-connected, batch-norm, relu) layers for track ID classification head. classification_loss: an object_detection.core.losses.Loss object to compute the loss for the ReID embedding in CenterNet. task_loss_weight: float, the loss weight for the tracking task. Returns: An initialized TrackParams namedtuple. """ return super(TrackParams, cls).__new__(cls, num_track_ids, reid_embed_size, num_fc_layers, classification_loss, task_loss_weight) class TemporalOffsetParams( collections.namedtuple('TemporalOffsetParams', [ 'localization_loss', 'task_loss_weight' ])): """Namedtuple to store temporal offset related parameters.""" __slots__ = () def __new__(cls, localization_loss, task_loss_weight=1.0): """Constructor with default values for TrackParams. Args: localization_loss: an object_detection.core.losses.Loss object to compute the loss for the temporal offset in CenterNet. task_loss_weight: float, the loss weight for the temporal offset task. Returns: An initialized TemporalOffsetParams namedtuple. """ return super(TemporalOffsetParams, cls).__new__(cls, localization_loss, task_loss_weight) # The following constants are used to generate the keys of the # (prediction, loss, target assigner,...) dictionaries used in CenterNetMetaArch # class. DETECTION_TASK = 'detection_task' OBJECT_CENTER = 'object_center' BOX_SCALE = 'box/scale' BOX_OFFSET = 'box/offset' KEYPOINT_REGRESSION = 'keypoint/regression' KEYPOINT_HEATMAP = 'keypoint/heatmap' KEYPOINT_OFFSET = 'keypoint/offset' KEYPOINT_DEPTH = 'keypoint/depth' SEGMENTATION_TASK = 'segmentation_task' SEGMENTATION_HEATMAP = 'segmentation/heatmap' DENSEPOSE_TASK = 'densepose_task' DENSEPOSE_HEATMAP = 'densepose/heatmap' DENSEPOSE_REGRESSION = 'densepose/regression' LOSS_KEY_PREFIX = 'Loss' TRACK_TASK = 'track_task' TRACK_REID = 'track/reid' TEMPORALOFFSET_TASK = 'temporal_offset_task' TEMPORAL_OFFSET = 'track/offset' def get_keypoint_name(task_name, head_name): return '%s/%s' % (task_name, head_name) def get_num_instances_from_weights(groundtruth_weights_list): """Computes the number of instances/boxes from the weights in a batch. Args: groundtruth_weights_list: A list of float tensors with shape [max_num_instances] representing whether there is an actual instance in the image (with non-zero value) or is padded to match the max_num_instances (with value 0.0). The list represents the batch dimension. Returns: A scalar integer tensor incidating how many instances/boxes are in the images in the batch. Note that this function is usually used to normalize the loss so the minimum return value is 1 to avoid weird behavior. """ num_instances = tf.reduce_sum( [tf.math.count_nonzero(w) for w in groundtruth_weights_list]) num_instances = tf.maximum(num_instances, 1) return num_instances class CenterNetMetaArch(model.DetectionModel): """The CenterNet meta architecture [1]. [1]: https://arxiv.org/abs/1904.07850 """ def __init__(self, is_training, add_summaries, num_classes, feature_extractor, image_resizer_fn, object_center_params, object_detection_params=None, keypoint_params_dict=None, mask_params=None, densepose_params=None, track_params=None, temporal_offset_params=None, use_depthwise=False, compute_heatmap_sparse=False, non_max_suppression_fn=None, unit_height_conv=False): """Initializes a CenterNet model. Args: is_training: Set to True if this model is being built for training. add_summaries: Whether to add tf summaries in the model. num_classes: int, The number of classes that the model should predict. feature_extractor: A CenterNetFeatureExtractor to use to extract features from an image. image_resizer_fn: a callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions and a 1-D tensor of shape [3] indicating shape of true image within the resized image tensor as the resized image tensor could be padded. See builders/image_resizer_builder.py. object_center_params: An ObjectCenterParams namedtuple. This object holds the hyper-parameters for object center prediction. This is required by either object detection or keypoint estimation tasks. object_detection_params: An ObjectDetectionParams namedtuple. This object holds the hyper-parameters necessary for object detection. Please see the class definition for more details. keypoint_params_dict: A dictionary that maps from task name to the corresponding KeypointEstimationParams namedtuple. This object holds the hyper-parameters necessary for multiple keypoint estimations. Please see the class definition for more details. mask_params: A MaskParams namedtuple. This object holds the hyper-parameters for segmentation. Please see the class definition for more details. densepose_params: A DensePoseParams namedtuple. This object holds the hyper-parameters for DensePose prediction. Please see the class definition for more details. Note that if this is provided, it is expected that `mask_params` is also provided. track_params: A TrackParams namedtuple. This object holds the hyper-parameters for tracking. Please see the class definition for more details. temporal_offset_params: A TemporalOffsetParams namedtuple. This object holds the hyper-parameters for offset prediction based tracking. use_depthwise: If true, all task heads will be constructed using separable_conv. Otherwise, standard convoltuions will be used. compute_heatmap_sparse: bool, whether or not to use the sparse version of the Op that computes the center heatmaps. The sparse version scales better with number of channels in the heatmap, but in some cases is known to cause an OOM error. See b/170989061. non_max_suppression_fn: Optional Non Max Suppression function to apply. unit_height_conv: If True, Conv2Ds in prediction heads have asymmetric kernels with height=1. """ assert object_detection_params or keypoint_params_dict # Shorten the name for convenience and better formatting. self._is_training = is_training # The Objects as Points paper attaches loss functions to multiple # (`num_feature_outputs`) feature maps in the the backbone. E.g. # for the hourglass backbone, `num_feature_outputs` is 2. self._num_classes = num_classes self._feature_extractor = feature_extractor self._num_feature_outputs = feature_extractor.num_feature_outputs self._stride = self._feature_extractor.out_stride self._image_resizer_fn = image_resizer_fn self._center_params = object_center_params self._od_params = object_detection_params self._kp_params_dict = keypoint_params_dict self._mask_params = mask_params if densepose_params is not None and mask_params is None: raise ValueError('To run DensePose prediction, `mask_params` must also ' 'be supplied.') self._densepose_params = densepose_params self._track_params = track_params self._temporal_offset_params = temporal_offset_params self._use_depthwise = use_depthwise self._compute_heatmap_sparse = compute_heatmap_sparse # subclasses may not implement the unit_height_conv arg, so only provide it # as a kwarg if it is True. kwargs = {'unit_height_conv': unit_height_conv} if unit_height_conv else {} # Construct the prediction head nets. self._prediction_head_dict = self._construct_prediction_heads( num_classes, self._num_feature_outputs, class_prediction_bias_init=self._center_params.heatmap_bias_init, **kwargs) # Initialize the target assigners. self._target_assigner_dict = self._initialize_target_assigners( stride=self._stride, min_box_overlap_iou=self._center_params.min_box_overlap_iou) # Will be used in VOD single_frame_meta_arch for tensor reshape. self._batched_prediction_tensor_names = [] self._non_max_suppression_fn = non_max_suppression_fn super(CenterNetMetaArch, self).__init__(num_classes) @property def batched_prediction_tensor_names(self): if not self._batched_prediction_tensor_names: raise RuntimeError('Must call predict() method to get batched prediction ' 'tensor names.') return self._batched_prediction_tensor_names def _make_prediction_net_list(self, num_feature_outputs, num_out_channels, kernel_sizes=(3), num_filters=(256), bias_fill=None, name=None, unit_height_conv=False): prediction_net_list = [] for i in range(num_feature_outputs): prediction_net_list.append( make_prediction_net( num_out_channels, kernel_sizes=kernel_sizes, num_filters=num_filters, bias_fill=bias_fill, use_depthwise=self._use_depthwise, name='{}_{}'.format(name, i) if name else name, unit_height_conv=unit_height_conv)) return prediction_net_list def _construct_prediction_heads(self, num_classes, num_feature_outputs, class_prediction_bias_init, unit_height_conv=False): """Constructs the prediction heads based on the specific parameters. Args: num_classes: An integer indicating how many classes in total to predict. num_feature_outputs: An integer indicating how many feature outputs to use for calculating the loss. The Objects as Points paper attaches loss functions to multiple (`num_feature_outputs`) feature maps in the the backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2. class_prediction_bias_init: float, the initial value of bias in the convolutional kernel of the class prediction head. If set to None, the bias is initialized with zeros. unit_height_conv: If True, Conv2Ds have asymmetric kernels with height=1. Returns: A dictionary of keras modules generated by calling make_prediction_net function. It will also create and set a private member of the class when learning the tracking task. """ prediction_heads = {} prediction_heads[OBJECT_CENTER] = self._make_prediction_net_list( num_feature_outputs, num_classes, kernel_sizes=self._center_params.center_head_kernel_sizes, num_filters=self._center_params.center_head_num_filters, bias_fill=class_prediction_bias_init, name='center', unit_height_conv=unit_height_conv) if self._od_params is not None: prediction_heads[BOX_SCALE] = self._make_prediction_net_list( num_feature_outputs, NUM_SIZE_CHANNELS, kernel_sizes=self._od_params.scale_head_kernel_sizes, num_filters=self._od_params.scale_head_num_filters, name='box_scale', unit_height_conv=unit_height_conv) prediction_heads[BOX_OFFSET] = self._make_prediction_net_list( num_feature_outputs, NUM_OFFSET_CHANNELS, kernel_sizes=self._od_params.offset_head_kernel_sizes, num_filters=self._od_params.offset_head_num_filters, name='box_offset', unit_height_conv=unit_height_conv) if self._kp_params_dict is not None: for task_name, kp_params in self._kp_params_dict.items(): num_keypoints = len(kp_params.keypoint_indices) prediction_heads[get_keypoint_name( task_name, KEYPOINT_HEATMAP)] = self._make_prediction_net_list( num_feature_outputs, num_keypoints, kernel_sizes=kp_params.heatmap_head_kernel_sizes, num_filters=kp_params.heatmap_head_num_filters, bias_fill=kp_params.heatmap_bias_init, name='kpt_heatmap', unit_height_conv=unit_height_conv) prediction_heads[get_keypoint_name( task_name, KEYPOINT_REGRESSION)] = self._make_prediction_net_list( num_feature_outputs, NUM_OFFSET_CHANNELS * num_keypoints, kernel_sizes=kp_params.regress_head_kernel_sizes, num_filters=kp_params.regress_head_num_filters, name='kpt_regress', unit_height_conv=unit_height_conv) if kp_params.per_keypoint_offset: prediction_heads[get_keypoint_name( task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list( num_feature_outputs, NUM_OFFSET_CHANNELS * num_keypoints, kernel_sizes=kp_params.offset_head_kernel_sizes, num_filters=kp_params.offset_head_num_filters, name='kpt_offset', unit_height_conv=unit_height_conv) else: prediction_heads[get_keypoint_name( task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list( num_feature_outputs, NUM_OFFSET_CHANNELS, kernel_sizes=kp_params.offset_head_kernel_sizes, num_filters=kp_params.offset_head_num_filters, name='kpt_offset', unit_height_conv=unit_height_conv) if kp_params.predict_depth: num_depth_channel = ( num_keypoints if kp_params.per_keypoint_depth else 1) prediction_heads[get_keypoint_name( task_name, KEYPOINT_DEPTH)] = self._make_prediction_net_list( num_feature_outputs, num_depth_channel, name='kpt_depth', unit_height_conv=unit_height_conv) if self._mask_params is not None: prediction_heads[SEGMENTATION_HEATMAP] = self._make_prediction_net_list( num_feature_outputs, num_classes, kernel_sizes=self._mask_params.mask_head_kernel_sizes, num_filters=self._mask_params.mask_head_num_filters, bias_fill=self._mask_params.heatmap_bias_init, name='seg_heatmap', unit_height_conv=unit_height_conv) if self._densepose_params is not None: prediction_heads[DENSEPOSE_HEATMAP] = self._make_prediction_net_list( num_feature_outputs, self._densepose_params.num_parts, bias_fill=self._densepose_params.heatmap_bias_init, name='dense_pose_heatmap', unit_height_conv=unit_height_conv) prediction_heads[DENSEPOSE_REGRESSION] = self._make_prediction_net_list( num_feature_outputs, 2 * self._densepose_params.num_parts, name='dense_pose_regress', unit_height_conv=unit_height_conv) if self._track_params is not None: prediction_heads[TRACK_REID] = self._make_prediction_net_list( num_feature_outputs, self._track_params.reid_embed_size, name='track_reid', unit_height_conv=unit_height_conv) # Creates a classification network to train object embeddings by learning # a projection from embedding space to object track ID space. self.track_reid_classification_net = tf.keras.Sequential() for _ in range(self._track_params.num_fc_layers - 1): self.track_reid_classification_net.add( tf.keras.layers.Dense(self._track_params.reid_embed_size, input_shape=( self._track_params.reid_embed_size,))) self.track_reid_classification_net.add( tf.keras.layers.BatchNormalization()) self.track_reid_classification_net.add(tf.keras.layers.ReLU()) self.track_reid_classification_net.add( tf.keras.layers.Dense(self._track_params.num_track_ids, input_shape=( self._track_params.reid_embed_size,))) if self._temporal_offset_params is not None: prediction_heads[TEMPORAL_OFFSET] = self._make_prediction_net_list( num_feature_outputs, NUM_OFFSET_CHANNELS, name='temporal_offset', unit_height_conv=unit_height_conv) return prediction_heads def _initialize_target_assigners(self, stride, min_box_overlap_iou): """Initializes the target assigners and puts them in a dictionary. Args: stride: An integer indicating the stride of the image. min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes need have with groundtruth boxes to not be penalized. This is used for computing the class specific center heatmaps. Returns: A dictionary of initialized target assigners for each task. """ target_assigners = {} keypoint_weights_for_center = ( self._center_params.keypoint_weights_for_center) if not keypoint_weights_for_center: target_assigners[OBJECT_CENTER] = ( cn_assigner.CenterNetCenterHeatmapTargetAssigner( stride, min_box_overlap_iou, self._compute_heatmap_sparse)) self._center_from_keypoints = False else: # Determining the object center location by keypoint location is only # supported when there is exactly one keypoint prediction task and no # object detection task is specified. assert len(self._kp_params_dict) == 1 and self._od_params is None kp_params = next(iter(self._kp_params_dict.values())) # The number of keypoint_weights_for_center needs to be the same as the # number of keypoints. assert len(keypoint_weights_for_center) == len(kp_params.keypoint_indices) target_assigners[OBJECT_CENTER] = ( cn_assigner.CenterNetCenterHeatmapTargetAssigner( stride, min_box_overlap_iou, self._compute_heatmap_sparse, keypoint_class_id=kp_params.class_id, keypoint_indices=kp_params.keypoint_indices, keypoint_weights_for_center=keypoint_weights_for_center)) self._center_from_keypoints = True if self._od_params is not None: target_assigners[DETECTION_TASK] = ( cn_assigner.CenterNetBoxTargetAssigner(stride)) if self._kp_params_dict is not None: for task_name, kp_params in self._kp_params_dict.items(): target_assigners[task_name] = ( cn_assigner.CenterNetKeypointTargetAssigner( stride=stride, class_id=kp_params.class_id, keypoint_indices=kp_params.keypoint_indices, keypoint_std_dev=kp_params.keypoint_std_dev, peak_radius=kp_params.offset_peak_radius, per_keypoint_offset=kp_params.per_keypoint_offset, compute_heatmap_sparse=self._compute_heatmap_sparse, per_keypoint_depth=kp_params.per_keypoint_depth)) if self._mask_params is not None: target_assigners[SEGMENTATION_TASK] = ( cn_assigner.CenterNetMaskTargetAssigner(stride)) if self._densepose_params is not None: dp_stride = 1 if self._densepose_params.upsample_to_input_res else stride target_assigners[DENSEPOSE_TASK] = ( cn_assigner.CenterNetDensePoseTargetAssigner(dp_stride)) if self._track_params is not None: target_assigners[TRACK_TASK] = ( cn_assigner.CenterNetTrackTargetAssigner( stride, self._track_params.num_track_ids)) if self._temporal_offset_params is not None: target_assigners[TEMPORALOFFSET_TASK] = ( cn_assigner.CenterNetTemporalOffsetTargetAssigner(stride)) return target_assigners def _compute_object_center_loss(self, input_height, input_width, object_center_predictions, per_pixel_weights): """Computes the object center loss. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. object_center_predictions: A list of float tensors of shape [batch_size, out_height, out_width, num_classes] representing the object center feature maps. per_pixel_weights: A float tensor of shape [batch_size, out_height * out_width, 1] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. Returns: A float scalar tensor representing the object center loss per instance. """ gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) if self._center_params.use_labeled_classes: gt_labeled_classes_list = self.groundtruth_lists( fields.InputDataFields.groundtruth_labeled_classes) batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0) batch_labeled_classes_shape = tf.shape(batch_labeled_classes) batch_labeled_classes = tf.reshape( batch_labeled_classes, [batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[-1]]) per_pixel_weights = per_pixel_weights * batch_labeled_classes # Convert the groundtruth to targets. assigner = self._target_assigner_dict[OBJECT_CENTER] if self._center_from_keypoints: gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) heatmap_targets = assigner.assign_center_targets_from_keypoints( height=input_height, width=input_width, gt_classes_list=gt_classes_list, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list) else: gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) heatmap_targets = assigner.assign_center_targets_from_boxes( height=input_height, width=input_width, gt_boxes_list=gt_boxes_list, gt_classes_list=gt_classes_list, gt_weights_list=gt_weights_list) flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets) num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list)) loss = 0.0 object_center_loss = self._center_params.classification_loss # Loop through each feature output head. for pred in object_center_predictions: pred = _flatten_spatial_dimensions(pred) loss += object_center_loss( pred, flattened_heatmap_targets, weights=per_pixel_weights) loss_per_instance = tf.reduce_sum(loss) / ( float(len(object_center_predictions)) * num_boxes) return loss_per_instance def _compute_object_detection_losses(self, input_height, input_width, prediction_dict, per_pixel_weights): """Computes the weighted object detection losses. This wrapper function calls the function which computes the losses for object detection task and applies corresponding weights to the losses. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. prediction_dict: A dictionary holding predicted tensors output by "predict" function. See "predict" function for more detailed description. per_pixel_weights: A float tensor of shape [batch_size, out_height * out_width, 1] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. Returns: A dictionary of scalar float tensors representing the weighted losses for object detection task: BOX_SCALE: the weighted scale (height/width) loss. BOX_OFFSET: the weighted object offset loss. """ od_scale_loss, od_offset_loss = self._compute_box_scale_and_offset_loss( scale_predictions=prediction_dict[BOX_SCALE], offset_predictions=prediction_dict[BOX_OFFSET], input_height=input_height, input_width=input_width) loss_dict = {} loss_dict[BOX_SCALE] = ( self._od_params.scale_loss_weight * od_scale_loss) loss_dict[BOX_OFFSET] = ( self._od_params.offset_loss_weight * od_offset_loss) return loss_dict def _compute_box_scale_and_offset_loss(self, input_height, input_width, scale_predictions, offset_predictions): """Computes the scale loss of the object detection task. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. scale_predictions: A list of float tensors of shape [batch_size, out_height, out_width, 2] representing the prediction heads of the model for object scale (i.e height and width). offset_predictions: A list of float tensors of shape [batch_size, out_height, out_width, 2] representing the prediction heads of the model for object offset. Returns: A tuple of two losses: scale_loss: A float scalar tensor representing the object height/width loss normalized by total number of boxes. offset_loss: A float scalar tensor representing the object offset loss normalized by total number of boxes """ # TODO(vighneshb) Explore a size invariant version of scale loss. gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list)) num_predictions = float(len(scale_predictions)) assigner = self._target_assigner_dict[DETECTION_TASK] (batch_indices, batch_height_width_targets, batch_offset_targets, batch_weights) = assigner.assign_size_and_offset_targets( height=input_height, width=input_width, gt_boxes_list=gt_boxes_list, gt_weights_list=gt_weights_list) batch_weights = tf.expand_dims(batch_weights, -1) scale_loss = 0 offset_loss = 0 localization_loss_fn = self._od_params.localization_loss for scale_pred, offset_pred in zip(scale_predictions, offset_predictions): # Compute the scale loss. scale_pred = cn_assigner.get_batch_predictions_from_indices( scale_pred, batch_indices) scale_loss += localization_loss_fn( scale_pred, batch_height_width_targets, weights=batch_weights) # Compute the offset loss. offset_pred = cn_assigner.get_batch_predictions_from_indices( offset_pred, batch_indices) offset_loss += localization_loss_fn( offset_pred, batch_offset_targets, weights=batch_weights) scale_loss = tf.reduce_sum(scale_loss) / ( num_predictions * num_boxes) offset_loss = tf.reduce_sum(offset_loss) / ( num_predictions * num_boxes) return scale_loss, offset_loss def _compute_keypoint_estimation_losses(self, task_name, input_height, input_width, prediction_dict, per_pixel_weights): """Computes the weighted keypoint losses.""" kp_params = self._kp_params_dict[task_name] heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP) offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET) regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION) depth_key = get_keypoint_name(task_name, KEYPOINT_DEPTH) heatmap_loss = self._compute_kp_heatmap_loss( input_height=input_height, input_width=input_width, task_name=task_name, heatmap_predictions=prediction_dict[heatmap_key], classification_loss_fn=kp_params.classification_loss, per_pixel_weights=per_pixel_weights) offset_loss = self._compute_kp_offset_loss( input_height=input_height, input_width=input_width, task_name=task_name, offset_predictions=prediction_dict[offset_key], localization_loss_fn=kp_params.localization_loss) reg_loss = self._compute_kp_regression_loss( input_height=input_height, input_width=input_width, task_name=task_name, regression_predictions=prediction_dict[regression_key], localization_loss_fn=kp_params.localization_loss) loss_dict = {} loss_dict[heatmap_key] = ( kp_params.keypoint_heatmap_loss_weight * heatmap_loss) loss_dict[offset_key] = ( kp_params.keypoint_offset_loss_weight * offset_loss) loss_dict[regression_key] = ( kp_params.keypoint_regression_loss_weight * reg_loss) if kp_params.predict_depth: depth_loss = self._compute_kp_depth_loss( input_height=input_height, input_width=input_width, task_name=task_name, depth_predictions=prediction_dict[depth_key], localization_loss_fn=kp_params.localization_loss) loss_dict[depth_key] = kp_params.keypoint_depth_loss_weight * depth_loss return loss_dict def _compute_kp_heatmap_loss(self, input_height, input_width, task_name, heatmap_predictions, classification_loss_fn, per_pixel_weights): """Computes the heatmap loss of the keypoint estimation task. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. task_name: A string representing the name of the keypoint task. heatmap_predictions: A list of float tensors of shape [batch_size, out_height, out_width, num_keypoints] representing the prediction heads of the model for keypoint heatmap. classification_loss_fn: An object_detection.core.losses.Loss object to compute the loss for the class predictions in CenterNet. per_pixel_weights: A float tensor of shape [batch_size, out_height * out_width, 1] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. Returns: loss: A float scalar tensor representing the object keypoint heatmap loss normalized by number of instances. """ gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) assigner = self._target_assigner_dict[task_name] (keypoint_heatmap, num_instances_per_kp_type, valid_mask_batch) = assigner.assign_keypoint_heatmap_targets( height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list, gt_classes_list=gt_classes_list, gt_boxes_list=gt_boxes_list) flattened_valid_mask = _flatten_spatial_dimensions(valid_mask_batch) flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap) # Sum over the number of instances per keypoint types to get the total # number of keypoints. Note that this is used to normalized the loss and we # keep the minimum value to be 1 to avoid generating weird loss value when # no keypoint is in the image batch. num_instances = tf.maximum( tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32), 1.0) loss = 0.0 # Loop through each feature output head. for pred in heatmap_predictions: pred = _flatten_spatial_dimensions(pred) unweighted_loss = classification_loss_fn( pred, flattened_heapmap_targets, weights=tf.ones_like(per_pixel_weights)) # Apply the weights after the loss function to have full control over it. loss += unweighted_loss * per_pixel_weights * flattened_valid_mask loss = tf.reduce_sum(loss) / ( float(len(heatmap_predictions)) * num_instances) return loss def _compute_kp_offset_loss(self, input_height, input_width, task_name, offset_predictions, localization_loss_fn): """Computes the offset loss of the keypoint estimation task. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. task_name: A string representing the name of the keypoint task. offset_predictions: A list of float tensors of shape [batch_size, out_height, out_width, 2] representing the prediction heads of the model for keypoint offset. localization_loss_fn: An object_detection.core.losses.Loss object to compute the loss for the keypoint offset predictions in CenterNet. Returns: loss: A float scalar tensor representing the keypoint offset loss normalized by number of total keypoints. """ gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) assigner = self._target_assigner_dict[task_name] (batch_indices, batch_offsets, batch_weights) = assigner.assign_keypoints_offset_targets( height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list, gt_classes_list=gt_classes_list) # Keypoint offset loss. loss = 0.0 for prediction in offset_predictions: batch_size, out_height, out_width, channels = _get_shape(prediction, 4) if channels > 2: prediction = tf.reshape( prediction, shape=[batch_size, out_height, out_width, -1, 2]) prediction = cn_assigner.get_batch_predictions_from_indices( prediction, batch_indices) # The dimensions passed are not as per the doc string but the loss # still computes the correct value. unweighted_loss = localization_loss_fn( prediction, batch_offsets, weights=tf.expand_dims(tf.ones_like(batch_weights), -1)) # Apply the weights after the loss function to have full control over it. loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1) loss = tf.reduce_sum(loss) / ( float(len(offset_predictions)) * tf.maximum(tf.reduce_sum(batch_weights), 1.0)) return loss def _compute_kp_regression_loss(self, input_height, input_width, task_name, regression_predictions, localization_loss_fn): """Computes the keypoint regression loss of the keypoint estimation task. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. task_name: A string representing the name of the keypoint task. regression_predictions: A list of float tensors of shape [batch_size, out_height, out_width, 2 * num_keypoints] representing the prediction heads of the model for keypoint regression offset. localization_loss_fn: An object_detection.core.losses.Loss object to compute the loss for the keypoint regression offset predictions in CenterNet. Returns: loss: A float scalar tensor representing the keypoint regression offset loss normalized by number of total keypoints. """ gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) # keypoint regression offset loss. assigner = self._target_assigner_dict[task_name] (batch_indices, batch_regression_offsets, batch_weights) = assigner.assign_joint_regression_targets( height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_classes_list=gt_classes_list, gt_weights_list=gt_weights_list, gt_boxes_list=gt_boxes_list) loss = 0.0 for prediction in regression_predictions: batch_size, out_height, out_width, _ = _get_shape(prediction, 4) reshaped_prediction = tf.reshape( prediction, shape=[batch_size, out_height, out_width, -1, 2]) reg_prediction = cn_assigner.get_batch_predictions_from_indices( reshaped_prediction, batch_indices) unweighted_loss = localization_loss_fn( reg_prediction, batch_regression_offsets, weights=tf.expand_dims(tf.ones_like(batch_weights), -1)) # Apply the weights after the loss function to have full control over it. loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1) loss = tf.reduce_sum(loss) / ( float(len(regression_predictions)) * tf.maximum(tf.reduce_sum(batch_weights), 1.0)) return loss def _compute_kp_depth_loss(self, input_height, input_width, task_name, depth_predictions, localization_loss_fn): """Computes the loss of the keypoint depth estimation. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. task_name: A string representing the name of the keypoint task. depth_predictions: A list of float tensors of shape [batch_size, out_height, out_width, 1 (or num_keypoints)] representing the prediction heads of the model for keypoint depth. localization_loss_fn: An object_detection.core.losses.Loss object to compute the loss for the keypoint offset predictions in CenterNet. Returns: loss: A float scalar tensor representing the keypoint depth loss normalized by number of total keypoints. """ kp_params = self._kp_params_dict[task_name] gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) gt_keypoint_depths_list = self.groundtruth_lists( fields.BoxListFields.keypoint_depths) gt_keypoint_depth_weights_list = self.groundtruth_lists( fields.BoxListFields.keypoint_depth_weights) assigner = self._target_assigner_dict[task_name] (batch_indices, batch_depths, batch_weights) = assigner.assign_keypoints_depth_targets( height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list, gt_classes_list=gt_classes_list, gt_keypoint_depths_list=gt_keypoint_depths_list, gt_keypoint_depth_weights_list=gt_keypoint_depth_weights_list) # Keypoint offset loss. loss = 0.0 for prediction in depth_predictions: if kp_params.per_keypoint_depth: prediction = tf.expand_dims(prediction, axis=-1) selected_depths = cn_assigner.get_batch_predictions_from_indices( prediction, batch_indices) # The dimensions passed are not as per the doc string but the loss # still computes the correct value. unweighted_loss = localization_loss_fn( selected_depths, batch_depths, weights=tf.expand_dims(tf.ones_like(batch_weights), -1)) # Apply the weights after the loss function to have full control over it. loss += batch_weights * tf.squeeze(unweighted_loss, axis=1) loss = tf.reduce_sum(loss) / ( float(len(depth_predictions)) * tf.maximum(tf.reduce_sum(batch_weights), 1.0)) return loss def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights): """Computes all the losses associated with segmentation. Args: prediction_dict: The dictionary returned from the predict() method. per_pixel_weights: A float tensor of shape [batch_size, out_height * out_width, 1] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. Returns: A dictionary with segmentation losses. """ segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP] mask_loss = self._compute_mask_loss( segmentation_heatmap, per_pixel_weights) losses = { SEGMENTATION_HEATMAP: mask_loss } return losses def _compute_mask_loss(self, segmentation_predictions, per_pixel_weights): """Computes the mask loss. Args: segmentation_predictions: A list of float32 tensors of shape [batch_size, out_height, out_width, num_classes]. per_pixel_weights: A float tensor of shape [batch_size, out_height * out_width, 1] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. Returns: A float scalar tensor representing the mask loss. """ gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks) gt_mask_weights_list = None if self.groundtruth_has_field(fields.BoxListFields.mask_weights): gt_mask_weights_list = self.groundtruth_lists( fields.BoxListFields.mask_weights) gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) # Convert the groundtruth to targets. assigner = self._target_assigner_dict[SEGMENTATION_TASK] heatmap_targets, heatmap_weight = assigner.assign_segmentation_targets( gt_masks_list=gt_masks_list, gt_classes_list=gt_classes_list, gt_boxes_list=gt_boxes_list, gt_mask_weights_list=gt_mask_weights_list) flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets) flattened_heatmap_mask = _flatten_spatial_dimensions( heatmap_weight[:, :, :, tf.newaxis]) per_pixel_weights *= flattened_heatmap_mask loss = 0.0 mask_loss_fn = self._mask_params.classification_loss total_pixels_in_loss = tf.math.maximum( tf.reduce_sum(per_pixel_weights), 1) # Loop through each feature output head. for pred in segmentation_predictions: pred = _flatten_spatial_dimensions(pred) loss += mask_loss_fn( pred, flattened_heatmap_targets, weights=per_pixel_weights) # TODO(ronnyvotel): Consider other ways to normalize loss. total_loss = tf.reduce_sum(loss) / ( float(len(segmentation_predictions)) * total_pixels_in_loss) return total_loss def _compute_densepose_losses(self, input_height, input_width, prediction_dict): """Computes the weighted DensePose losses. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. prediction_dict: A dictionary holding predicted tensors output by the "predict" function. See the "predict" function for more detailed description. Returns: A dictionary of scalar float tensors representing the weighted losses for the DensePose task: DENSEPOSE_HEATMAP: the weighted part segmentation loss. DENSEPOSE_REGRESSION: the weighted part surface coordinate loss. """ dp_heatmap_loss, dp_regression_loss = ( self._compute_densepose_part_and_coordinate_losses( input_height=input_height, input_width=input_width, part_predictions=prediction_dict[DENSEPOSE_HEATMAP], surface_coord_predictions=prediction_dict[DENSEPOSE_REGRESSION])) loss_dict = {} loss_dict[DENSEPOSE_HEATMAP] = ( self._densepose_params.part_loss_weight * dp_heatmap_loss) loss_dict[DENSEPOSE_REGRESSION] = ( self._densepose_params.coordinate_loss_weight * dp_regression_loss) return loss_dict def _compute_densepose_part_and_coordinate_losses( self, input_height, input_width, part_predictions, surface_coord_predictions): """Computes the individual losses for the DensePose task. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. part_predictions: A list of float tensors of shape [batch_size, out_height, out_width, num_parts]. surface_coord_predictions: A list of float tensors of shape [batch_size, out_height, out_width, 2 * num_parts]. Returns: A tuple with two scalar loss tensors: part_prediction_loss and surface_coord_loss. """ gt_dp_num_points_list = self.groundtruth_lists( fields.BoxListFields.densepose_num_points) gt_dp_part_ids_list = self.groundtruth_lists( fields.BoxListFields.densepose_part_ids) gt_dp_surface_coords_list = self.groundtruth_lists( fields.BoxListFields.densepose_surface_coords) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) assigner = self._target_assigner_dict[DENSEPOSE_TASK] batch_indices, batch_part_ids, batch_surface_coords, batch_weights = ( assigner.assign_part_and_coordinate_targets( height=input_height, width=input_width, gt_dp_num_points_list=gt_dp_num_points_list, gt_dp_part_ids_list=gt_dp_part_ids_list, gt_dp_surface_coords_list=gt_dp_surface_coords_list, gt_weights_list=gt_weights_list)) part_prediction_loss = 0 surface_coord_loss = 0 classification_loss_fn = self._densepose_params.classification_loss localization_loss_fn = self._densepose_params.localization_loss num_predictions = float(len(part_predictions)) num_valid_points = tf.math.count_nonzero(batch_weights) num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32) for part_pred, surface_coord_pred in zip(part_predictions, surface_coord_predictions): # Potentially upsample the feature maps, so that better quality (i.e. # higher res) groundtruth can be applied. if self._densepose_params.upsample_to_input_res: part_pred = tf.keras.layers.UpSampling2D( self._stride, interpolation=self._densepose_params.upsample_method)( part_pred) surface_coord_pred = tf.keras.layers.UpSampling2D( self._stride, interpolation=self._densepose_params.upsample_method)( surface_coord_pred) # Compute the part prediction loss. part_pred = cn_assigner.get_batch_predictions_from_indices( part_pred, batch_indices[:, 0:3]) part_prediction_loss += classification_loss_fn( part_pred[:, tf.newaxis, :], batch_part_ids[:, tf.newaxis, :], weights=batch_weights[:, tf.newaxis, tf.newaxis]) # Compute the surface coordinate loss. batch_size, out_height, out_width, _ = _get_shape( surface_coord_pred, 4) surface_coord_pred = tf.reshape( surface_coord_pred, [batch_size, out_height, out_width, -1, 2]) surface_coord_pred = cn_assigner.get_batch_predictions_from_indices( surface_coord_pred, batch_indices) surface_coord_loss += localization_loss_fn( surface_coord_pred, batch_surface_coords, weights=batch_weights[:, tf.newaxis]) part_prediction_loss = tf.reduce_sum(part_prediction_loss) / ( num_predictions * num_valid_points) surface_coord_loss = tf.reduce_sum(surface_coord_loss) / ( num_predictions * num_valid_points) return part_prediction_loss, surface_coord_loss def _compute_track_losses(self, input_height, input_width, prediction_dict): """Computes all the losses associated with tracking. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. prediction_dict: The dictionary returned from the predict() method. Returns: A dictionary with tracking losses. """ object_reid_predictions = prediction_dict[TRACK_REID] embedding_loss = self._compute_track_embedding_loss( input_height=input_height, input_width=input_width, object_reid_predictions=object_reid_predictions) losses = { TRACK_REID: embedding_loss } return losses def _compute_track_embedding_loss(self, input_height, input_width, object_reid_predictions): """Computes the object ReID loss. The embedding is trained as a classification task where the target is the ID of each track among all tracks in the whole dataset. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. object_reid_predictions: A list of float tensors of shape [batch_size, out_height, out_width, reid_embed_size] representing the object embedding feature maps. Returns: A float scalar tensor representing the object ReID loss per instance. """ gt_track_ids_list = self.groundtruth_lists(fields.BoxListFields.track_ids) gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list)) # Convert the groundtruth to targets. assigner = self._target_assigner_dict[TRACK_TASK] batch_indices, batch_weights, track_targets = assigner.assign_track_targets( height=input_height, width=input_width, gt_track_ids_list=gt_track_ids_list, gt_boxes_list=gt_boxes_list, gt_weights_list=gt_weights_list) batch_weights = tf.expand_dims(batch_weights, -1) loss = 0.0 object_reid_loss = self._track_params.classification_loss # Loop through each feature output head. for pred in object_reid_predictions: embedding_pred = cn_assigner.get_batch_predictions_from_indices( pred, batch_indices) reid_classification = self.track_reid_classification_net(embedding_pred) loss += object_reid_loss( reid_classification, track_targets, weights=batch_weights) loss_per_instance = tf.reduce_sum(loss) / ( float(len(object_reid_predictions)) * num_boxes) return loss_per_instance def _compute_temporal_offset_loss(self, input_height, input_width, prediction_dict): """Computes the temporal offset loss for tracking. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. prediction_dict: The dictionary returned from the predict() method. Returns: A dictionary with track/temporal_offset losses. """ gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) gt_offsets_list = self.groundtruth_lists( fields.BoxListFields.temporal_offsets) gt_match_list = self.groundtruth_lists( fields.BoxListFields.track_match_flags) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) num_boxes = tf.cast( get_num_instances_from_weights(gt_weights_list), tf.float32) offset_predictions = prediction_dict[TEMPORAL_OFFSET] num_predictions = float(len(offset_predictions)) assigner = self._target_assigner_dict[TEMPORALOFFSET_TASK] (batch_indices, batch_offset_targets, batch_weights) = assigner.assign_temporal_offset_targets( height=input_height, width=input_width, gt_boxes_list=gt_boxes_list, gt_offsets_list=gt_offsets_list, gt_match_list=gt_match_list, gt_weights_list=gt_weights_list) batch_weights = tf.expand_dims(batch_weights, -1) offset_loss_fn = self._temporal_offset_params.localization_loss loss_dict = {} offset_loss = 0 for offset_pred in offset_predictions: offset_pred = cn_assigner.get_batch_predictions_from_indices( offset_pred, batch_indices) offset_loss += offset_loss_fn(offset_pred[:, None], batch_offset_targets[:, None], weights=batch_weights) offset_loss = tf.reduce_sum(offset_loss) / (num_predictions * num_boxes) loss_dict[TEMPORAL_OFFSET] = offset_loss return loss_dict def _should_clip_keypoints(self): """Returns a boolean indicating whether keypoint clipping should occur. If there is only one keypoint task, clipping is controlled by the field `clip_out_of_frame_keypoints`. If there are multiple keypoint tasks, clipping logic is defined based on unanimous agreement of keypoint parameters. If there is any ambiguity, clip_out_of_frame_keypoints is set to False (default). """ kp_params_iterator = iter(self._kp_params_dict.values()) if len(self._kp_params_dict) == 1: kp_params = next(kp_params_iterator) return kp_params.clip_out_of_frame_keypoints # Multi-task setting. kp_params = next(kp_params_iterator) should_clip = kp_params.clip_out_of_frame_keypoints for kp_params in kp_params_iterator: if kp_params.clip_out_of_frame_keypoints != should_clip: return False return should_clip def _rescore_instances(self, classes, scores, keypoint_scores): """Rescores instances based on detection and keypoint scores. Args: classes: A [batch, max_detections] int32 tensor with detection classes. scores: A [batch, max_detections] float32 tensor with detection scores. keypoint_scores: A [batch, max_detections, total_num_keypoints] float32 tensor with keypoint scores. Returns: A [batch, max_detections] float32 tensor with possibly altered detection scores. """ batch, max_detections, total_num_keypoints = ( shape_utils.combined_static_and_dynamic_shape(keypoint_scores)) classes_tiled = tf.tile(classes[:, :, tf.newaxis], multiples=[1, 1, total_num_keypoints]) # TODO(yuhuic): Investigate whether this function will create subgraphs in # tflite that will cause the model to run slower at inference. for kp_params in self._kp_params_dict.values(): if not kp_params.rescore_instances: continue class_id = kp_params.class_id keypoint_indices = kp_params.keypoint_indices kpt_mask = tf.reduce_sum( tf.one_hot(keypoint_indices, depth=total_num_keypoints), axis=0) kpt_mask_tiled = tf.tile(kpt_mask[tf.newaxis, tf.newaxis, :], multiples=[batch, max_detections, 1]) class_and_keypoint_mask = tf.math.logical_and( classes_tiled == class_id, kpt_mask_tiled == 1.0) class_and_keypoint_mask_float = tf.cast(class_and_keypoint_mask, dtype=tf.float32) visible_keypoints = tf.math.greater(keypoint_scores, 0.0) num_visible_keypoints = tf.reduce_sum( class_and_keypoint_mask_float * tf.cast(visible_keypoints, tf.float32), axis=-1) num_visible_keypoints = tf.math.maximum(num_visible_keypoints, 1.0) scores_for_class = (1./num_visible_keypoints) * ( tf.reduce_sum(class_and_keypoint_mask_float * scores[:, :, tf.newaxis] * keypoint_scores, axis=-1)) scores = tf.where(classes == class_id, scores_for_class, scores) return scores def preprocess(self, inputs): outputs = shape_utils.resize_images_and_return_shapes( inputs, self._image_resizer_fn) resized_inputs, true_image_shapes = outputs return (self._feature_extractor.preprocess(resized_inputs), true_image_shapes) def predict(self, preprocessed_inputs, _): """Predicts CenterNet prediction tensors given an input batch. Feature extractors are free to produce predictions from multiple feature maps and therefore we return a dictionary mapping strings to lists. E.g. the hourglass backbone produces two feature maps. Args: preprocessed_inputs: a [batch, height, width, channels] float32 tensor representing a batch of images. Returns: prediction_dict: a dictionary holding predicted tensors with 'preprocessed_inputs' - The input image after being resized and preprocessed by the feature extractor. 'object_center' - A list of size num_feature_outputs containing float tensors of size [batch_size, output_height, output_width, num_classes] representing the predicted object center heatmap logits. 'box/scale' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, 2] representing the predicted box height and width at each output location. This field exists only when object detection task is specified. 'box/offset' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, 2] representing the predicted y and x offsets at each output location. '$TASK_NAME/keypoint_heatmap' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, num_keypoints] representing the predicted keypoint heatmap logits. '$TASK_NAME/keypoint_offset' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, 2] representing the predicted keypoint offsets at each output location. '$TASK_NAME/keypoint_regression' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, 2 * num_keypoints] representing the predicted keypoint regression at each output location. 'segmentation/heatmap' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, num_classes] representing the mask logits. 'densepose/heatmap' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, num_parts] representing the mask logits for each part. 'densepose/regression' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, 2 * num_parts] representing the DensePose surface coordinate predictions. Note the $TASK_NAME is provided by the KeypointEstimation namedtuple used to differentiate between different keypoint tasks. """ features_list = self._feature_extractor(preprocessed_inputs) predictions = {} for head_name, heads in self._prediction_head_dict.items(): predictions[head_name] = [ head(feature) for (feature, head) in zip(features_list, heads) ] predictions['preprocessed_inputs'] = preprocessed_inputs self._batched_prediction_tensor_names = predictions.keys() return predictions def loss(self, prediction_dict, true_image_shapes, scope=None): """Computes scalar loss tensors with respect to provided groundtruth. This function implements the various CenterNet losses. Args: prediction_dict: a dictionary holding predicted tensors returned by "predict" function. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. scope: Optional scope name. Returns: A dictionary mapping the keys [ 'Loss/object_center', 'Loss/box/scale', (optional) 'Loss/box/offset', (optional) 'Loss/$TASK_NAME/keypoint/heatmap', (optional) 'Loss/$TASK_NAME/keypoint/offset', (optional) 'Loss/$TASK_NAME/keypoint/regression', (optional) 'Loss/segmentation/heatmap', (optional) 'Loss/densepose/heatmap', (optional) 'Loss/densepose/regression', (optional) 'Loss/track/reid'] (optional) 'Loss/track/offset'] (optional) scalar tensors corresponding to the losses for different tasks. Note the $TASK_NAME is provided by the KeypointEstimation namedtuple used to differentiate between different keypoint tasks. """ _, input_height, input_width, _ = _get_shape( prediction_dict['preprocessed_inputs'], 4) output_height, output_width = (tf.maximum(input_height // self._stride, 1), tf.maximum(input_width // self._stride, 1)) # TODO(vighneshb) Explore whether using floor here is safe. output_true_image_shapes = tf.ceil( tf.to_float(true_image_shapes) / self._stride) valid_anchor_weights = get_valid_anchor_weights_in_flattened_image( output_true_image_shapes, output_height, output_width) valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2) object_center_loss = self._compute_object_center_loss( object_center_predictions=prediction_dict[OBJECT_CENTER], input_height=input_height, input_width=input_width, per_pixel_weights=valid_anchor_weights) losses = { OBJECT_CENTER: self._center_params.object_center_loss_weight * object_center_loss } if self._od_params is not None: od_losses = self._compute_object_detection_losses( input_height=input_height, input_width=input_width, prediction_dict=prediction_dict, per_pixel_weights=valid_anchor_weights) for key in od_losses: od_losses[key] = od_losses[key] * self._od_params.task_loss_weight losses.update(od_losses) if self._kp_params_dict is not None: for task_name, params in self._kp_params_dict.items(): kp_losses = self._compute_keypoint_estimation_losses( task_name=task_name, input_height=input_height, input_width=input_width, prediction_dict=prediction_dict, per_pixel_weights=valid_anchor_weights) for key in kp_losses: kp_losses[key] = kp_losses[key] * params.task_loss_weight losses.update(kp_losses) if self._mask_params is not None: seg_losses = self._compute_segmentation_losses( prediction_dict=prediction_dict, per_pixel_weights=valid_anchor_weights) for key in seg_losses: seg_losses[key] = seg_losses[key] * self._mask_params.task_loss_weight losses.update(seg_losses) if self._densepose_params is not None: densepose_losses = self._compute_densepose_losses( input_height=input_height, input_width=input_width, prediction_dict=prediction_dict) for key in densepose_losses: densepose_losses[key] = ( densepose_losses[key] * self._densepose_params.task_loss_weight) losses.update(densepose_losses) if self._track_params is not None: track_losses = self._compute_track_losses( input_height=input_height, input_width=input_width, prediction_dict=prediction_dict) for key in track_losses: track_losses[key] = ( track_losses[key] * self._track_params.task_loss_weight) losses.update(track_losses) if self._temporal_offset_params is not None: offset_losses = self._compute_temporal_offset_loss( input_height=input_height, input_width=input_width, prediction_dict=prediction_dict) for key in offset_losses: offset_losses[key] = ( offset_losses[key] * self._temporal_offset_params.task_loss_weight) losses.update(offset_losses) # Prepend the LOSS_KEY_PREFIX to the keys in the dictionary such that the # losses will be grouped together in Tensorboard. return dict([('%s/%s' % (LOSS_KEY_PREFIX, key), val) for key, val in losses.items()]) def postprocess(self, prediction_dict, true_image_shapes, **params): """Produces boxes given a prediction dict returned by predict(). Although predict returns a list of tensors, only the last tensor in each list is used for making box predictions. Args: prediction_dict: a dictionary holding predicted tensors from "predict" function. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **params: Currently ignored. Returns: detections: a dictionary containing the following fields detection_boxes - A tensor of shape [batch, max_detections, 4] holding the predicted boxes. detection_boxes_strided: A tensor of shape [batch_size, num_detections, 4] holding the predicted boxes in absolute coordinates of the feature extractor's final layer output. detection_scores: A tensor of shape [batch, max_detections] holding the predicted score for each box. detection_multiclass_scores: A tensor of shape [batch, max_detection, num_classes] holding multiclass score for each box. detection_classes: An integer tensor of shape [batch, max_detections] containing the detected class for each box. num_detections: An integer tensor of shape [batch] containing the number of detected boxes for each sample in the batch. detection_keypoints: (Optional) A float tensor of shape [batch, max_detections, num_keypoints, 2] with normalized keypoints. Any invalid keypoints have their coordinates and scores set to 0.0. detection_keypoint_scores: (Optional) A float tensor of shape [batch, max_detection, num_keypoints] with scores for each keypoint. detection_masks: (Optional) A uint8 tensor of shape [batch, max_detections, mask_height, mask_width] with masks for each detection. Background is specified with 0, and foreground is specified with positive integers (1 for standard instance segmentation mask, and 1-indexed parts for DensePose task). detection_surface_coords: (Optional) A float32 tensor of shape [batch, max_detection, mask_height, mask_width, 2] with DensePose surface coordinates, in (v, u) format. detection_embeddings: (Optional) A float tensor of shape [batch, max_detections, reid_embed_size] containing object embeddings. """ object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1]) # Get x, y and channel indices corresponding to the top indices in the class # center predictions. detection_scores, y_indices, x_indices, channel_indices = ( top_k_feature_map_locations( object_center_prob, max_pool_kernel_size=3, k=self._center_params.max_box_predictions)) multiclass_scores = tf.gather_nd( object_center_prob, tf.stack([y_indices, x_indices], -1), batch_dims=1) num_detections = tf.reduce_sum(tf.to_int32(detection_scores > 0), axis=1) postprocess_dict = { fields.DetectionResultFields.detection_scores: detection_scores, fields.DetectionResultFields.detection_multiclass_scores: multiclass_scores, fields.DetectionResultFields.detection_classes: channel_indices, fields.DetectionResultFields.num_detections: num_detections, } boxes_strided = None if self._od_params: boxes_strided = ( prediction_tensors_to_boxes(y_indices, x_indices, prediction_dict[BOX_SCALE][-1], prediction_dict[BOX_OFFSET][-1])) boxes = convert_strided_predictions_to_normalized_boxes( boxes_strided, self._stride, true_image_shapes) postprocess_dict.update({ fields.DetectionResultFields.detection_boxes: boxes, 'detection_boxes_strided': boxes_strided }) if self._kp_params_dict: # If the model is trained to predict only one class of object and its # keypoint, we fall back to a simpler postprocessing function which uses # the ops that are supported by tf.lite on GPU. clip_keypoints = self._should_clip_keypoints() if len(self._kp_params_dict) == 1 and self._num_classes == 1: (keypoints, keypoint_scores, keypoint_depths) = self._postprocess_keypoints_single_class( prediction_dict, channel_indices, y_indices, x_indices, boxes_strided, num_detections) keypoints, keypoint_scores = ( convert_strided_predictions_to_normalized_keypoints( keypoints, keypoint_scores, self._stride, true_image_shapes, clip_out_of_frame_keypoints=clip_keypoints)) if keypoint_depths is not None: postprocess_dict.update({ fields.DetectionResultFields.detection_keypoint_depths: keypoint_depths }) else: # Multi-class keypoint estimation task does not support depth # estimation. assert all([ not kp_dict.predict_depth for kp_dict in self._kp_params_dict.values() ]) keypoints, keypoint_scores = self._postprocess_keypoints_multi_class( prediction_dict, channel_indices, y_indices, x_indices, None, num_detections) keypoints, keypoint_scores = ( convert_strided_predictions_to_normalized_keypoints( keypoints, keypoint_scores, self._stride, true_image_shapes, clip_out_of_frame_keypoints=clip_keypoints)) # Update instance scores based on keypoints. scores = self._rescore_instances( channel_indices, detection_scores, keypoint_scores) postprocess_dict.update({ fields.DetectionResultFields.detection_scores: scores, fields.DetectionResultFields.detection_keypoints: keypoints, fields.DetectionResultFields.detection_keypoint_scores: keypoint_scores }) if self._od_params is None: # Still output the box prediction by enclosing the keypoints for # evaluation purpose. boxes = keypoint_ops.keypoints_to_enclosing_bounding_boxes( keypoints, keypoints_axis=2) postprocess_dict.update({ fields.DetectionResultFields.detection_boxes: boxes, }) if self._mask_params: masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][-1]) densepose_part_heatmap, densepose_surface_coords = None, None densepose_class_index = 0 if self._densepose_params: densepose_part_heatmap = prediction_dict[DENSEPOSE_HEATMAP][-1] densepose_surface_coords = prediction_dict[DENSEPOSE_REGRESSION][-1] densepose_class_index = self._densepose_params.class_id instance_masks, surface_coords = ( convert_strided_predictions_to_instance_masks( boxes, channel_indices, masks, true_image_shapes, densepose_part_heatmap, densepose_surface_coords, stride=self._stride, mask_height=self._mask_params.mask_height, mask_width=self._mask_params.mask_width, score_threshold=self._mask_params.score_threshold, densepose_class_index=densepose_class_index)) postprocess_dict[ fields.DetectionResultFields.detection_masks] = instance_masks if self._densepose_params: postprocess_dict[ fields.DetectionResultFields.detection_surface_coords] = ( surface_coords) if self._track_params: embeddings = self._postprocess_embeddings(prediction_dict, y_indices, x_indices) postprocess_dict.update({ fields.DetectionResultFields.detection_embeddings: embeddings }) if self._temporal_offset_params: offsets = prediction_tensors_to_temporal_offsets( y_indices, x_indices, prediction_dict[TEMPORAL_OFFSET][-1]) postprocess_dict[fields.DetectionResultFields.detection_offsets] = offsets if self._non_max_suppression_fn: boxes = tf.expand_dims( postprocess_dict.pop(fields.DetectionResultFields.detection_boxes), axis=-2) multiclass_scores = postprocess_dict[ fields.DetectionResultFields.detection_multiclass_scores] num_valid_boxes = postprocess_dict.pop( fields.DetectionResultFields.num_detections) # Remove scores and classes as NMS will compute these form multiclass # scores. postprocess_dict.pop(fields.DetectionResultFields.detection_scores) postprocess_dict.pop(fields.DetectionResultFields.detection_classes) (nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields, num_detections) = self._non_max_suppression_fn( boxes, multiclass_scores, additional_fields=postprocess_dict, num_valid_boxes=num_valid_boxes) postprocess_dict = nmsed_additional_fields postprocess_dict[ fields.DetectionResultFields.detection_boxes] = nmsed_boxes postprocess_dict[ fields.DetectionResultFields.detection_scores] = nmsed_scores postprocess_dict[ fields.DetectionResultFields.detection_classes] = nmsed_classes postprocess_dict[ fields.DetectionResultFields.num_detections] = num_detections postprocess_dict.update(nmsed_additional_fields) return postprocess_dict def postprocess_single_instance_keypoints( self, prediction_dict, true_image_shapes): """Postprocess for predicting single instance keypoints. This postprocess function is a special case of predicting the keypoint of a single instance in the image (original CenterNet postprocess supports multi-instance prediction). Due to the simplification assumption, this postprocessing function achieves much faster inference time. Here is a short list of the modifications made in this function: 1) Assume the model predicts only single class keypoint. 2) Assume there is only one instance in the image. If multiple instances appear in the image, the model tends to predict the one that is closer to the image center (the other ones are considered as background and are rejected by the model). 3) Avoid using top_k ops in the postprocessing logics since it is slower than using argmax. 4) The predictions other than the keypoints are ignored, e.g. boxes. 5) The input batch size is assumed to be 1. Args: prediction_dict: a dictionary holding predicted tensors from "predict" function. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: detections: a dictionary containing the following fields detection_keypoints: A float tensor of shape [1, 1, num_keypoints, 2] with normalized keypoints. Any invalid keypoints have their coordinates and scores set to 0.0. detection_keypoint_scores: A float tensor of shape [1, 1, num_keypoints] with scores for each keypoint. """ # The number of keypoint task is expected to be 1. assert len(self._kp_params_dict) == 1 task_name, kp_params = next(iter(self._kp_params_dict.items())) keypoint_heatmap = tf.nn.sigmoid(prediction_dict[get_keypoint_name( task_name, KEYPOINT_HEATMAP)][-1]) keypoint_offset = prediction_dict[get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1] keypoint_regression = prediction_dict[get_keypoint_name( task_name, KEYPOINT_REGRESSION)][-1] object_heatmap = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1]) keypoint_depths = None if kp_params.predict_depth: keypoint_depths = prediction_dict[get_keypoint_name( task_name, KEYPOINT_DEPTH)][-1] keypoints, keypoint_scores, keypoint_depths = ( prediction_to_single_instance_keypoints( object_heatmap=object_heatmap, keypoint_heatmap=keypoint_heatmap, keypoint_offset=keypoint_offset, keypoint_regression=keypoint_regression, kp_params=kp_params, keypoint_depths=keypoint_depths)) keypoints, keypoint_scores = ( convert_strided_predictions_to_normalized_keypoints( keypoints, keypoint_scores, self._stride, true_image_shapes, clip_out_of_frame_keypoints=False)) postprocess_dict = { fields.DetectionResultFields.detection_keypoints: keypoints, fields.DetectionResultFields.detection_keypoint_scores: keypoint_scores } if kp_params.predict_depth: postprocess_dict.update({ fields.DetectionResultFields.detection_keypoint_depths: keypoint_depths }) return postprocess_dict def _postprocess_embeddings(self, prediction_dict, y_indices, x_indices): """Performs postprocessing on embedding predictions. Args: prediction_dict: a dictionary holding predicted tensors, returned from the predict() method. This dictionary should contain embedding prediction feature maps for tracking task. y_indices: A [batch_size, max_detections] int tensor with y indices for all object centers. x_indices: A [batch_size, max_detections] int tensor with x indices for all object centers. Returns: embeddings: A [batch_size, max_detection, reid_embed_size] float32 tensor with L2 normalized embeddings extracted from detection box centers. """ embedding_predictions = prediction_dict[TRACK_REID][-1] embeddings = predicted_embeddings_at_object_centers( embedding_predictions, y_indices, x_indices) embeddings, _ = tf.linalg.normalize(embeddings, axis=-1) return embeddings def _scatter_keypoints_to_batch(self, num_ind, kpt_coords_for_example, kpt_scores_for_example, instance_inds_for_example, max_detections, total_num_keypoints): """Helper function to convert scattered keypoints into batch.""" def left_fn(kpt_coords_for_example, kpt_scores_for_example, instance_inds_for_example): # Scatter into tensor where instances align with original detection # instances. New shape of keypoint coordinates and scores are # [1, max_detections, num_total_keypoints, 2] and # [1, max_detections, num_total_keypoints], respectively. return _pad_to_full_instance_dim( kpt_coords_for_example, kpt_scores_for_example, instance_inds_for_example, self._center_params.max_box_predictions) def right_fn(): kpt_coords_for_example_all_det = tf.zeros( [1, max_detections, total_num_keypoints, 2], dtype=tf.float32) kpt_scores_for_example_all_det = tf.zeros( [1, max_detections, total_num_keypoints], dtype=tf.float32) return (kpt_coords_for_example_all_det, kpt_scores_for_example_all_det) left_fn = functools.partial(left_fn, kpt_coords_for_example, kpt_scores_for_example, instance_inds_for_example) # Use dimension values instead of tf.size for tf.lite compatibility. return tf.cond(num_ind[0] > 0, left_fn, right_fn) def _postprocess_keypoints_multi_class(self, prediction_dict, classes, y_indices, x_indices, boxes, num_detections): """Performs postprocessing on keypoint predictions. This is the most general keypoint postprocessing function which supports multiple keypoint tasks (e.g. human and dog keypoints) and multiple object detection classes. Note that it is the most expensive postprocessing logics and is currently not tf.lite/tf.js compatible. See _postprocess_keypoints_single_class if you plan to export the model in more portable format. Args: prediction_dict: a dictionary holding predicted tensors, returned from the predict() method. This dictionary should contain keypoint prediction feature maps for each keypoint task. classes: A [batch_size, max_detections] int tensor with class indices for all detected objects. y_indices: A [batch_size, max_detections] int tensor with y indices for all object centers. x_indices: A [batch_size, max_detections] int tensor with x indices for all object centers. boxes: A [batch_size, max_detections, 4] float32 tensor with bounding boxes in (un-normalized) output space. num_detections: A [batch_size] int tensor with the number of valid detections for each image. Returns: A tuple of keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32 tensor with keypoints in the output (strided) coordinate frame. keypoint_scores: a [batch_size, max_detections, num_total_keypoints] float32 tensor with keypoint scores. """ total_num_keypoints = sum(len(kp_dict.keypoint_indices) for kp_dict in self._kp_params_dict.values()) batch_size, max_detections = _get_shape(classes, 2) kpt_coords_for_example_list = [] kpt_scores_for_example_list = [] for ex_ind in range(batch_size): kpt_coords_for_class_list = [] kpt_scores_for_class_list = [] instance_inds_for_class_list = [] for task_name, kp_params in self._kp_params_dict.items(): keypoint_heatmap = prediction_dict[ get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1] keypoint_offsets = prediction_dict[ get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1] keypoint_regression = prediction_dict[ get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1] instance_inds = self._get_instance_indices( classes, num_detections, ex_ind, kp_params.class_id) num_ind = _get_shape(instance_inds, 1) def true_fn(keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params): """Logics to execute when instance_inds is not an empty set.""" # Gather the feature map locations corresponding to the object class. y_indices_for_kpt_class = tf.gather(y_indices, instance_inds, axis=1) x_indices_for_kpt_class = tf.gather(x_indices, instance_inds, axis=1) if boxes is None: boxes_for_kpt_class = None else: boxes_for_kpt_class = tf.gather(boxes, instance_inds, axis=1) # Postprocess keypoints and scores for class and single image. Shapes # are [1, num_instances_i, num_keypoints_i, 2] and # [1, num_instances_i, num_keypoints_i], respectively. Note that # num_instances_i and num_keypoints_i refers to the number of # instances and keypoints for class i, respectively. (kpt_coords_for_class, kpt_scores_for_class, _) = ( self._postprocess_keypoints_for_class_and_image( keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices_for_kpt_class, x_indices_for_kpt_class, boxes_for_kpt_class, ex_ind, kp_params)) # Expand keypoint dimension (with padding) so that coordinates and # scores have shape [1, num_instances_i, num_total_keypoints, 2] and # [1, num_instances_i, num_total_keypoints], respectively. kpts_coords_for_class_padded, kpt_scores_for_class_padded = ( _pad_to_full_keypoint_dim(kpt_coords_for_class, kpt_scores_for_class, kp_params.keypoint_indices, total_num_keypoints)) return kpts_coords_for_class_padded, kpt_scores_for_class_padded def false_fn(): """Logics to execute when the instance_inds is an empty set.""" return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32), tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32)) true_fn = functools.partial( true_fn, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params) # Use dimension values instead of tf.size for tf.lite compatibility. results = tf.cond(num_ind[0] > 0, true_fn, false_fn) kpt_coords_for_class_list.append(results[0]) kpt_scores_for_class_list.append(results[1]) instance_inds_for_class_list.append(instance_inds) # Concatenate all keypoints across all classes (single example). kpt_coords_for_example = tf.concat(kpt_coords_for_class_list, axis=1) kpt_scores_for_example = tf.concat(kpt_scores_for_class_list, axis=1) instance_inds_for_example = tf.concat(instance_inds_for_class_list, axis=0) (kpt_coords_for_example_all_det, kpt_scores_for_example_all_det) = self._scatter_keypoints_to_batch( num_ind, kpt_coords_for_example, kpt_scores_for_example, instance_inds_for_example, max_detections, total_num_keypoints) kpt_coords_for_example_list.append(kpt_coords_for_example_all_det) kpt_scores_for_example_list.append(kpt_scores_for_example_all_det) # Concatenate all keypoints and scores from all examples in the batch. # Shapes are [batch_size, max_detections, num_total_keypoints, 2] and # [batch_size, max_detections, num_total_keypoints], respectively. keypoints = tf.concat(kpt_coords_for_example_list, axis=0) keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0) return keypoints, keypoint_scores def _postprocess_keypoints_single_class(self, prediction_dict, classes, y_indices, x_indices, boxes, num_detections): """Performs postprocessing on keypoint predictions (single class only). This function handles the special case of keypoint task that the model predicts only one class of the bounding box/keypoint (e.g. person). By the assumption, the function uses only tf.lite supported ops and should run faster. Args: prediction_dict: a dictionary holding predicted tensors, returned from the predict() method. This dictionary should contain keypoint prediction feature maps for each keypoint task. classes: A [batch_size, max_detections] int tensor with class indices for all detected objects. y_indices: A [batch_size, max_detections] int tensor with y indices for all object centers. x_indices: A [batch_size, max_detections] int tensor with x indices for all object centers. boxes: A [batch_size, max_detections, 4] float32 tensor with bounding boxes in (un-normalized) output space. num_detections: A [batch_size] int tensor with the number of valid detections for each image. Returns: A tuple of keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32 tensor with keypoints in the output (strided) coordinate frame. keypoint_scores: a [batch_size, max_detections, num_total_keypoints] float32 tensor with keypoint scores. """ # This function only works when there is only one keypoint task and the # number of classes equal to one. For more general use cases, please use # _postprocess_keypoints instead. assert len(self._kp_params_dict) == 1 and self._num_classes == 1 task_name, kp_params = next(iter(self._kp_params_dict.items())) keypoint_heatmap = prediction_dict[ get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1] keypoint_offsets = prediction_dict[ get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1] keypoint_regression = prediction_dict[ get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1] keypoint_depth_predictions = None if kp_params.predict_depth: keypoint_depth_predictions = prediction_dict[get_keypoint_name( task_name, KEYPOINT_DEPTH)][-1] batch_size, _ = _get_shape(classes, 2) kpt_coords_for_example_list = [] kpt_scores_for_example_list = [] kpt_depths_for_example_list = [] for ex_ind in range(batch_size): # Postprocess keypoints and scores for class and single image. Shapes # are [1, max_detections, num_keypoints, 2] and # [1, max_detections, num_keypoints], respectively. (kpt_coords_for_class, kpt_scores_for_class, kpt_depths_for_class) = ( self._postprocess_keypoints_for_class_and_image( keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, ex_ind, kp_params, keypoint_depth_predictions=keypoint_depth_predictions)) kpt_coords_for_example_list.append(kpt_coords_for_class) kpt_scores_for_example_list.append(kpt_scores_for_class) kpt_depths_for_example_list.append(kpt_depths_for_class) # Concatenate all keypoints and scores from all examples in the batch. # Shapes are [batch_size, max_detections, num_keypoints, 2] and # [batch_size, max_detections, num_keypoints], respectively. keypoints = tf.concat(kpt_coords_for_example_list, axis=0) keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0) keypoint_depths = None if kp_params.predict_depth: keypoint_depths = tf.concat(kpt_depths_for_example_list, axis=0) return keypoints, keypoint_scores, keypoint_depths def _get_instance_indices(self, classes, num_detections, batch_index, class_id): """Gets the instance indices that match the target class ID. Args: classes: A [batch_size, max_detections] int tensor with class indices for all detected objects. num_detections: A [batch_size] int tensor with the number of valid detections for each image. batch_index: An integer specifying the index for an example in the batch. class_id: Class id Returns: instance_inds: A [num_instances] int32 tensor where each element indicates the instance location within the `classes` tensor. This is useful to associate the refined keypoints with the original detections (i.e. boxes) """ classes = classes[batch_index:batch_index+1, ...] _, max_detections = shape_utils.combined_static_and_dynamic_shape( classes) # Get the detection indices corresponding to the target class. # Call tf.math.equal with matched tensor shape to make it tf.lite # compatible. valid_detections_with_kpt_class = tf.math.logical_and( tf.range(max_detections) < num_detections[batch_index], tf.math.equal(classes[0], tf.fill(classes[0].shape, class_id))) instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0] # Cast the indices tensor to int32 for tf.lite compatibility. return tf.cast(instance_inds, tf.int32) def _postprocess_keypoints_for_class_and_image( self, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, batch_index, kp_params, keypoint_depth_predictions=None): """Postprocess keypoints for a single image and class. Args: keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32 tensor with keypoint heatmaps. keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with local offsets to keypoint centers. keypoint_regression: A [batch_size, height, width, 2 * num_keypoints] float32 tensor with regressed offsets to all keypoints. classes: A [batch_size, max_detections] int tensor with class indices for all detected objects. y_indices: A [batch_size, max_detections] int tensor with y indices for all object centers. x_indices: A [batch_size, max_detections] int tensor with x indices for all object centers. boxes: A [batch_size, max_detections, 4] float32 tensor with detected boxes in the output (strided) frame. batch_index: An integer specifying the index for an example in the batch. kp_params: A `KeypointEstimationParams` object with parameters for a single keypoint class. keypoint_depth_predictions: (optional) A [batch_size, height, width, 1] float32 tensor representing the keypoint depth prediction. Returns: A tuple of refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor with refined keypoints for a single class in a single image, expressed in the output (strided) coordinate frame. Note that `num_instances` is a dynamic dimension, and corresponds to the number of valid detections for the specific class. refined_scores: A [1, num_instances, num_keypoints] float32 tensor with keypoint scores. refined_depths: A [1, num_instances, num_keypoints] float32 tensor with keypoint depths. Return None if the input keypoint_depth_predictions is None. """ num_keypoints = len(kp_params.keypoint_indices) keypoint_heatmap = tf.nn.sigmoid( keypoint_heatmap[batch_index:batch_index+1, ...]) keypoint_offsets = keypoint_offsets[batch_index:batch_index+1, ...] keypoint_regression = keypoint_regression[batch_index:batch_index+1, ...] keypoint_depths = None if keypoint_depth_predictions is not None: keypoint_depths = keypoint_depth_predictions[batch_index:batch_index + 1, ...] y_indices = y_indices[batch_index:batch_index+1, ...] x_indices = x_indices[batch_index:batch_index+1, ...] if boxes is None: boxes_slice = None else: boxes_slice = boxes[batch_index:batch_index+1, ...] # Gather the regressed keypoints. Final tensor has shape # [1, num_instances, num_keypoints, 2]. regressed_keypoints_for_objects = regressed_keypoints_at_object_centers( keypoint_regression, y_indices, x_indices) regressed_keypoints_for_objects = tf.reshape( regressed_keypoints_for_objects, [1, -1, num_keypoints, 2]) # Get the candidate keypoints and scores. # The shape of keypoint_candidates and keypoint_scores is: # [1, num_candidates_per_keypoint, num_keypoints, 2] and # [1, num_candidates_per_keypoint, num_keypoints], respectively. (keypoint_candidates, keypoint_scores, num_keypoint_candidates, keypoint_depth_candidates) = ( prediction_tensors_to_keypoint_candidates( keypoint_heatmap, keypoint_offsets, keypoint_score_threshold=( kp_params.keypoint_candidate_score_threshold), max_pool_kernel_size=kp_params.peak_max_pool_kernel_size, max_candidates=kp_params.num_candidates_per_keypoint, keypoint_depths=keypoint_depths)) # Get the refined keypoints and scores, of shape # [1, num_instances, num_keypoints, 2] and # [1, num_instances, num_keypoints], respectively. (refined_keypoints, refined_scores, refined_depths) = refine_keypoints( regressed_keypoints_for_objects, keypoint_candidates, keypoint_scores, num_keypoint_candidates, bboxes=boxes_slice, unmatched_keypoint_score=kp_params.unmatched_keypoint_score, box_scale=kp_params.box_scale, candidate_search_scale=kp_params.candidate_search_scale, candidate_ranking_mode=kp_params.candidate_ranking_mode, score_distance_offset=kp_params.score_distance_offset, keypoint_depth_candidates=keypoint_depth_candidates, keypoint_score_threshold=( kp_params.keypoint_candidate_score_threshold)) return refined_keypoints, refined_scores, refined_depths def regularization_losses(self): return [] def restore_map(self, fine_tune_checkpoint_type='detection', load_all_detection_checkpoint_vars=False): raise RuntimeError('CenterNetMetaArch not supported under TF1.x.') def restore_from_objects(self, fine_tune_checkpoint_type='detection'): """Returns a map of Trackable objects to load from a foreign checkpoint. Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module or Checkpoint). This enables the model to initialize based on weights from another task. For example, the feature extractor variables from a classification model can be used to bootstrap training of an object detector. When loading from an object detection model, the checkpoint model should have the same parameters as this detection model with exception of the num_classes parameter. Note that this function is intended to be used to restore Keras-based models when running Tensorflow 2, whereas restore_map (not implemented in CenterNet) is intended to be used to restore Slim-based models when running Tensorflow 1.x. TODO(jonathanhuang): Make this function consistent with other meta-architectures. Args: fine_tune_checkpoint_type: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`, `fine_tune`. Default 'detection'. 'detection': used when loading models pre-trained on other detection tasks. With this checkpoint type the weights of the feature extractor are expected under the attribute 'feature_extractor'. 'classification': used when loading models pre-trained on an image classification task. Note that only the encoder section of the network is loaded and not the upsampling layers. With this checkpoint type, the weights of only the encoder section are expected under the attribute 'feature_extractor'. 'fine_tune': used when loading the entire CenterNet feature extractor pre-trained on other tasks. The checkpoints saved during CenterNet model training can be directly loaded using this type. With this checkpoint type, the weights of the feature extractor are expected under the attribute 'model._feature_extractor'. For more details, see the tensorflow section on Loading mechanics. https://www.tensorflow.org/guide/checkpoint#loading_mechanics Returns: A dict mapping keys to Trackable objects (tf.Module or Checkpoint). """ if fine_tune_checkpoint_type == 'detection': feature_extractor_model = tf.train.Checkpoint( _feature_extractor=self._feature_extractor) return {'model': feature_extractor_model} elif fine_tune_checkpoint_type == 'classification': return { 'feature_extractor': self._feature_extractor.classification_backbone } elif fine_tune_checkpoint_type == 'full': return {'model': self} elif fine_tune_checkpoint_type == 'fine_tune': raise ValueError(('"fine_tune" is no longer supported for CenterNet. ' 'Please set fine_tune_checkpoint_type to "detection"' ' which has the same functionality. If you are using' ' the ExtremeNet checkpoint, download the new version' ' from the model zoo.')) else: raise ValueError('Unknown fine tune checkpoint type {}'.format( fine_tune_checkpoint_type)) def updates(self): if tf_version.is_tf2(): raise RuntimeError('This model is intended to be used with model_lib_v2 ' 'which does not support updates()') else: update_ops = [] slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Copy the slim ops to avoid modifying the collection if slim_update_ops: update_ops.extend(slim_update_ops) return update_ops
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/center_net_meta_arch.py
center_net_meta_arch.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helper functions for SSD models meta architecture tests.""" import functools import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import post_processing_builder from object_detection.core import anchor_generator from object_detection.core import balanced_positive_negative_sampler as sampler from object_detection.core import box_list from object_detection.core import losses from object_detection.core import post_processing from object_detection.core import region_similarity_calculator as sim_calc from object_detection.core import target_assigner from object_detection.meta_architectures import ssd_meta_arch from object_detection.protos import calibration_pb2 from object_detection.protos import model_pb2 from object_detection.utils import ops from object_detection.utils import test_case from object_detection.utils import test_utils from object_detection.utils import tf_version # pylint: disable=g-import-not-at-top try: import tf_slim as slim except ImportError: # TF 2.0 doesn't ship with contrib. pass # pylint: enable=g-import-not-at-top keras = tf.keras.layers class FakeSSDFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """Fake ssd feature extracture for ssd meta arch tests.""" def __init__(self): super(FakeSSDFeatureExtractor, self).__init__( is_training=True, depth_multiplier=0, min_depth=0, pad_to_multiple=1, conv_hyperparams_fn=None) def preprocess(self, resized_inputs): return tf.identity(resized_inputs) def extract_features(self, preprocessed_inputs): with tf.variable_scope('mock_model'): features = slim.conv2d( inputs=preprocessed_inputs, num_outputs=32, kernel_size=1, scope='layer1') return [features] class FakeSSDKerasFeatureExtractor(ssd_meta_arch.SSDKerasFeatureExtractor): """Fake keras based ssd feature extracture for ssd meta arch tests.""" def __init__(self): with tf.name_scope('mock_model'): super(FakeSSDKerasFeatureExtractor, self).__init__( is_training=True, depth_multiplier=0, min_depth=0, pad_to_multiple=1, conv_hyperparams=None, freeze_batchnorm=False, inplace_batchnorm_update=False, ) self._conv = keras.Conv2D(filters=32, kernel_size=1, name='layer1') def preprocess(self, resized_inputs): return tf.identity(resized_inputs) def _extract_features(self, preprocessed_inputs, **kwargs): with tf.name_scope('mock_model'): return [self._conv(preprocessed_inputs)] class MockAnchorGenerator2x2(anchor_generator.AnchorGenerator): """A simple 2x2 anchor grid on the unit square used for test only.""" def name_scope(self): return 'MockAnchorGenerator' def num_anchors_per_location(self): return [1] def _generate(self, feature_map_shape_list, im_height, im_width): return [ box_list.BoxList( tf.constant( [ [0, 0, .5, .5], [0, .5, .5, 1], [.5, 0, 1, .5], [1., 1., 1.5, 1.5] # Anchor that is outside clip_window. ], tf.float32)) ] def num_anchors(self): return 4 class SSDMetaArchTestBase(test_case.TestCase): """Base class to test SSD based meta architectures.""" def _create_model( self, model_fn=ssd_meta_arch.SSDMetaArch, apply_hard_mining=True, normalize_loc_loss_by_codesize=False, add_background_class=True, random_example_sampling=False, expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE, min_num_negative_samples=1, desired_negative_sampling_ratio=3, predict_mask=False, use_static_shapes=False, nms_max_size_per_class=5, calibration_mapping_value=None, return_raw_detections_during_predict=False): is_training = False num_classes = 1 mock_anchor_generator = MockAnchorGenerator2x2() use_keras = tf_version.is_tf2() if use_keras: mock_box_predictor = test_utils.MockKerasBoxPredictor( is_training, num_classes, add_background_class=add_background_class) else: mock_box_predictor = test_utils.MockBoxPredictor( is_training, num_classes, add_background_class=add_background_class) mock_box_coder = test_utils.MockBoxCoder() if use_keras: fake_feature_extractor = FakeSSDKerasFeatureExtractor() else: fake_feature_extractor = FakeSSDFeatureExtractor() mock_matcher = test_utils.MockMatcher() region_similarity_calculator = sim_calc.IouSimilarity() encode_background_as_zeros = False def image_resizer_fn(image): return [tf.identity(image), tf.shape(image)] classification_loss = losses.WeightedSigmoidClassificationLoss() localization_loss = losses.WeightedSmoothL1LocalizationLoss() non_max_suppression_fn = functools.partial( post_processing.batch_multiclass_non_max_suppression, score_thresh=-20.0, iou_thresh=1.0, max_size_per_class=nms_max_size_per_class, max_total_size=nms_max_size_per_class, use_static_shapes=use_static_shapes) score_conversion_fn = tf.identity calibration_config = calibration_pb2.CalibrationConfig() if calibration_mapping_value: calibration_text_proto = """ function_approximation { x_y_pairs { x_y_pair { x: 0.0 y: %f } x_y_pair { x: 1.0 y: %f }}}""" % (calibration_mapping_value, calibration_mapping_value) text_format.Merge(calibration_text_proto, calibration_config) score_conversion_fn = ( post_processing_builder._build_calibrated_score_converter( # pylint: disable=protected-access tf.identity, calibration_config)) classification_loss_weight = 1.0 localization_loss_weight = 1.0 negative_class_weight = 1.0 normalize_loss_by_num_matches = False hard_example_miner = None if apply_hard_mining: # This hard example miner is expected to be a no-op. hard_example_miner = losses.HardExampleMiner( num_hard_examples=None, iou_threshold=1.0) random_example_sampler = None if random_example_sampling: random_example_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=0.5) target_assigner_instance = target_assigner.TargetAssigner( region_similarity_calculator, mock_matcher, mock_box_coder, negative_class_weight=negative_class_weight) model_config = model_pb2.DetectionModel() if expected_loss_weights == model_config.ssd.loss.NONE: expected_loss_weights_fn = None else: raise ValueError('Not a valid value for expected_loss_weights.') code_size = 4 kwargs = {} if predict_mask: kwargs.update({ 'mask_prediction_fn': test_utils.MockMaskHead(num_classes=1).predict, }) model = model_fn( is_training=is_training, anchor_generator=mock_anchor_generator, box_predictor=mock_box_predictor, box_coder=mock_box_coder, feature_extractor=fake_feature_extractor, encode_background_as_zeros=encode_background_as_zeros, image_resizer_fn=image_resizer_fn, non_max_suppression_fn=non_max_suppression_fn, score_conversion_fn=score_conversion_fn, classification_loss=classification_loss, localization_loss=localization_loss, classification_loss_weight=classification_loss_weight, localization_loss_weight=localization_loss_weight, normalize_loss_by_num_matches=normalize_loss_by_num_matches, hard_example_miner=hard_example_miner, target_assigner_instance=target_assigner_instance, add_summaries=False, normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize, freeze_batchnorm=False, inplace_batchnorm_update=False, add_background_class=add_background_class, random_example_sampler=random_example_sampler, expected_loss_weights_fn=expected_loss_weights_fn, return_raw_detections_during_predict=( return_raw_detections_during_predict), **kwargs) return model, num_classes, mock_anchor_generator.num_anchors(), code_size def _get_value_for_matching_key(self, dictionary, suffix): for key in dictionary.keys(): if key.endswith(suffix): return dictionary[key] raise ValueError('key not found {}'.format(suffix))
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/ssd_meta_arch_test_lib.py
ssd_meta_arch_test_lib.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Faster R-CNN meta-architecture definition. General tensorflow implementation of Faster R-CNN detection models. See Faster R-CNN: Ren, Shaoqing, et al. "Faster R-CNN: Towards real-time object detection with region proposal networks." Advances in neural information processing systems. 2015. We allow for three modes: number_of_stages={1, 2, 3}. In case of 1 stage, all of the user facing methods (e.g., predict, postprocess, loss) can be used as if the model consisted only of the RPN, returning class agnostic proposals (these can be thought of as approximate detections with no associated class information). In case of 2 stages, proposals are computed, then passed through a second stage "box classifier" to yield (multi-class) detections. Finally, in case of 3 stages which is only used during eval, proposals are computed, then passed through a second stage "box classifier" that will compute refined boxes and classes, and then features are pooled from the refined and non-maximum suppressed boxes and are passed through the box classifier again. If number of stages is 3 during training it will be reduced to two automatically. Implementations of Faster R-CNN models must define a new FasterRCNNFeatureExtractor and override three methods: `preprocess`, `_extract_proposal_features` (the first stage of the model), and `_extract_box_classifier_features` (the second stage of the model). Optionally, the `restore_fn` method can be overridden. See tests for an example. A few important notes: + Batching conventions: We support batched inference and training where all images within a batch have the same resolution. Batch sizes are determined dynamically via the shape of the input tensors (rather than being specified directly as, e.g., a model constructor). A complication is that due to non-max suppression, we are not guaranteed to get the same number of proposals from the first stage RPN (region proposal network) for each image (though in practice, we should often get the same number of proposals). For this reason we pad to a max number of proposals per image within a batch. This `self.max_num_proposals` property is set to the `first_stage_max_proposals` parameter at inference time and the `second_stage_batch_size` at training time since we subsample the batch to be sent through the box classifier during training. For the second stage of the pipeline, we arrange the proposals for all images within the batch along a single batch dimension. For example, the input to _extract_box_classifier_features is a tensor of shape `[total_num_proposals, crop_height, crop_width, depth]` where total_num_proposals is batch_size * self.max_num_proposals. (And note that per the above comment, a subset of these entries correspond to zero paddings.) + Coordinate representations: Following the API (see model.DetectionModel definition), our outputs after postprocessing operations are always normalized boxes however, internally, we sometimes convert to absolute --- e.g. for loss computation. In particular, anchors and proposal_boxes are both represented as absolute coordinates. Images are resized in the `preprocess` method. The Faster R-CNN meta architecture has two post-processing methods `_postprocess_rpn` which is applied after first stage and `_postprocess_box_classifier` which is applied after second stage. There are three different ways post-processing can happen depending on number_of_stages configured in the meta architecture: 1. When number_of_stages is 1: `_postprocess_rpn` is run as part of the `postprocess` method where true_image_shapes is used to clip proposals, perform non-max suppression and normalize them. 2. When number of stages is 2: `_postprocess_rpn` is run as part of the `_predict_second_stage` method where `resized_image_shapes` is used to clip proposals, perform non-max suppression and normalize them. In this case `postprocess` method skips `_postprocess_rpn` and only runs `_postprocess_box_classifier` using `true_image_shapes` to clip detections, perform non-max suppression and normalize them. 3. When number of stages is 3: `_postprocess_rpn` is run as part of the `_predict_second_stage` using `resized_image_shapes` to clip proposals, perform non-max suppression and normalize them. Subsequently, `_postprocess_box_classifier` is run as part of `_predict_third_stage` using `true_image_shapes` to clip detections, peform non-max suppression and normalize them. In this case, the `postprocess` method skips both `_postprocess_rpn` and `_postprocess_box_classifier`. """ from __future__ import print_function import abc import functools import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import box_predictor from object_detection.core import losses from object_detection.core import model from object_detection.core import standard_fields as fields from object_detection.core import target_assigner from object_detection.utils import ops from object_detection.utils import shape_utils from object_detection.utils import variables_helper _UNINITIALIZED_FEATURE_EXTRACTOR = '__uninitialized__' class FasterRCNNFeatureExtractor(object): """Faster R-CNN Feature Extractor definition.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0): """Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. first_stage_features_stride: Output stride of extracted RPN feature map. batch_norm_trainable: Whether to update batch norm parameters during training or not. When training with a relative large batch size (e.g. 8), it could be desirable to enable batch norm update. reuse_weights: Whether to reuse variables. Default is None. weight_decay: float weight decay for feature extractor (default: 0.0). """ self._is_training = is_training self._first_stage_features_stride = first_stage_features_stride self._train_batch_norm = (batch_norm_trainable and is_training) self._reuse_weights = tf.AUTO_REUSE if reuse_weights else None self._weight_decay = weight_decay @abc.abstractmethod def preprocess(self, resized_inputs): """Feature-extractor specific preprocessing (minus image resizing).""" pass def extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. This function is responsible for extracting feature maps from preprocessed images. These features are used by the region proposal network (RPN) to predict proposals. Args: preprocessed_inputs: A [batch, height, width, channels] float tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] activations: A dictionary mapping activation tensor names to tensors. """ with tf.variable_scope(scope, values=[preprocessed_inputs]): return self._extract_proposal_features(preprocessed_inputs, scope) @abc.abstractmethod def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features, to be overridden.""" pass def extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ with tf.variable_scope( scope, values=[proposal_feature_maps], reuse=tf.AUTO_REUSE): return self._extract_box_classifier_features(proposal_feature_maps, scope) @abc.abstractmethod def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features, to be overridden.""" pass def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): for scope_name in [first_stage_feature_extractor_scope, second_stage_feature_extractor_scope]: if variable.op.name.startswith(scope_name): var_name = variable.op.name.replace(scope_name + '/', '') variables_to_restore[var_name] = variable return variables_to_restore class FasterRCNNKerasFeatureExtractor(object): """Keras-based Faster R-CNN Feature Extractor definition.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, weight_decay=0.0): """Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. first_stage_features_stride: Output stride of extracted RPN feature map. batch_norm_trainable: Whether to update batch norm parameters during training or not. When training with a relative large batch size (e.g. 8), it could be desirable to enable batch norm update. weight_decay: float weight decay for feature extractor (default: 0.0). """ self._is_training = is_training self._first_stage_features_stride = first_stage_features_stride self._train_batch_norm = (batch_norm_trainable and is_training) self._weight_decay = weight_decay @abc.abstractmethod def preprocess(self, resized_inputs): """Feature-extractor specific preprocessing (minus image resizing).""" pass @abc.abstractmethod def get_proposal_feature_extractor_model(self, name): """Get model that extracts first stage RPN features, to be overridden.""" pass @abc.abstractmethod def get_box_classifier_feature_extractor_model(self, name): """Get model that extracts second stage box classifier features.""" pass class FasterRCNNMetaArch(model.DetectionModel): """Faster R-CNN Meta-architecture definition.""" def __init__(self, is_training, num_classes, image_resizer_fn, feature_extractor, number_of_stages, first_stage_anchor_generator, first_stage_target_assigner, first_stage_atrous_rate, first_stage_box_predictor_arg_scope_fn, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_sampler, first_stage_non_max_suppression_fn, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, crop_and_resize_fn, initial_crop_size, maxpool_kernel_size, maxpool_stride, second_stage_target_assigner, second_stage_mask_rcnn_box_predictor, second_stage_batch_size, second_stage_sampler, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, second_stage_classification_loss, second_stage_mask_prediction_loss_weight=1.0, hard_example_miner=None, parallel_iterations=16, add_summaries=True, clip_anchors_to_image=False, use_static_shapes=False, resize_masks=True, freeze_batchnorm=False, return_raw_detections_during_predict=False, output_final_box_features=False, output_final_box_rpn_features=False): """FasterRCNNMetaArch Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. num_classes: Number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). image_resizer_fn: A callable for image resizing. This callable takes a rank-3 image tensor of shape [height, width, channels] (corresponding to a single image), an optional rank-3 instance mask tensor of shape [num_masks, height, width] and returns a resized rank-3 image tensor, a resized mask tensor if one was provided in the input. In addition this callable must also return a 1-D tensor of the form [height, width, channels] containing the size of the true image, as the image resizer can perform zero padding. See protos/image_resizer.proto. feature_extractor: A FasterRCNNFeatureExtractor object. number_of_stages: An integer values taking values in {1, 2, 3}. If 1, the function will construct only the Region Proposal Network (RPN) part of the model. If 2, the function will perform box refinement and other auxiliary predictions all in the second stage. If 3, it will extract features from refined boxes and perform the auxiliary predictions on the non-maximum suppressed refined boxes. If is_training is true and the value of number_of_stages is 3, it is reduced to 2 since all the model heads are trained in parallel in second stage during training. first_stage_anchor_generator: An anchor_generator.AnchorGenerator object (note that currently we only support grid_anchor_generator.GridAnchorGenerator objects) first_stage_target_assigner: Target assigner to use for first stage of Faster R-CNN (RPN). first_stage_atrous_rate: A single integer indicating the atrous rate for the single convolution op which is applied to the `rpn_features_to_crop` tensor to obtain a tensor to be used for box prediction. Some feature extractors optionally allow for producing feature maps computed at denser resolutions. The atrous rate is used to compensate for the denser feature maps by using an effectively larger receptive field. (This should typically be set to 1). first_stage_box_predictor_arg_scope_fn: Either a Keras layer hyperparams object or a function to construct tf-slim arg_scope for conv2d, separable_conv2d and fully_connected ops. Used for the RPN box predictor. If it is a keras hyperparams object the RPN box predictor will be a Keras model. If it is a function to construct an arg scope it will be a tf-slim box predictor. first_stage_box_predictor_kernel_size: Kernel size to use for the convolution op just prior to RPN box predictions. first_stage_box_predictor_depth: Output depth for the convolution op just prior to RPN box predictions. first_stage_minibatch_size: The "batch size" to use for computing the objectness and location loss of the region proposal network. This "batch size" refers to the number of anchors selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. first_stage_sampler: Sampler to use for first stage loss (RPN loss). first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores` and optional `clip_window`(with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`. This is used to perform non max suppression on the boxes predicted by the Region Proposal Network (RPN). See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. first_stage_max_proposals: Maximum number of boxes to retain after performing Non-Max Suppression (NMS) on the boxes predicted by the Region Proposal Network (RPN). first_stage_localization_loss_weight: A float first_stage_objectness_loss_weight: A float crop_and_resize_fn: A differentiable resampler to use for cropping RPN proposal features. initial_crop_size: A single integer indicating the output size (width and height are set to be the same) of the initial bilinear interpolation based cropping during ROI pooling. maxpool_kernel_size: A single integer indicating the kernel size of the max pool op on the cropped feature map during ROI pooling. maxpool_stride: A single integer indicating the stride of the max pool op on the cropped feature map during ROI pooling. second_stage_target_assigner: Target assigner to use for second stage of Faster R-CNN. If the model is configured with multiple prediction heads, this target assigner is used to generate targets for all heads (with the correct `unmatched_class_label`). second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for the second stage. second_stage_batch_size: The batch size used for computing the classification and refined location loss of the box classifier. This "batch size" refers to the number of proposals selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. second_stage_sampler: Sampler to use for second stage loss (box classifier loss). second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores`, optional `clip_window` and optional (kwarg) `mask` inputs (with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`, and (optionally) `detection_masks`. See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. second_stage_score_conversion_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). This is usually used to convert logits to probabilities. second_stage_localization_loss_weight: A float indicating the scale factor for second stage localization loss. second_stage_classification_loss_weight: A float indicating the scale factor for second stage classification loss. second_stage_classification_loss: Classification loss used by the second stage classifier. Either losses.WeightedSigmoidClassificationLoss or losses.WeightedSoftmaxClassificationLoss. second_stage_mask_prediction_loss_weight: A float indicating the scale factor for second stage mask prediction loss. This is applicable only if second stage box predictor is configured to predict masks. hard_example_miner: A losses.HardExampleMiner object (can be None). parallel_iterations: (Optional) The number of iterations allowed to run in parallel for calls to tf.map_fn. add_summaries: boolean (default: True) controlling whether summary ops should be added to tensorflow graph. clip_anchors_to_image: Normally, anchors generated for a given image size are pruned during training if they lie outside the image window. This option clips the anchors to be within the image instead of pruning. use_static_shapes: If True, uses implementation of ops with static shape guarantees. resize_masks: Indicates whether the masks presend in the groundtruth should be resized in the model with `image_resizer_fn` freeze_batchnorm: Whether to freeze batch norm parameters in the first stage box predictor during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. return_raw_detections_during_predict: Whether to return raw detection boxes in the predict() method. These are decoded boxes that have not been through postprocessing (i.e. NMS). Default False. output_final_box_features: Whether to output final box features. If true, it crops the rpn feature map and passes it through box_classifier then returns in the output dict as `detection_features`. output_final_box_rpn_features: Whether to output rpn box features. If true, it crops the rpn feature map and returns in the output dict as `detection_features`. Raises: ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at training time. ValueError: If first_stage_anchor_generator is not of type grid_anchor_generator.GridAnchorGenerator. """ # TODO(rathodv): add_summaries is currently unused. Respect that directive # in the future. super(FasterRCNNMetaArch, self).__init__(num_classes=num_classes) self._is_training = is_training self._image_resizer_fn = image_resizer_fn self._resize_masks = resize_masks self._feature_extractor = feature_extractor if isinstance(feature_extractor, FasterRCNNKerasFeatureExtractor): # We delay building the feature extractor until it is used, # to avoid creating the variables when a model is built just for data # preprocessing. (This prevents a subtle bug where variable names are # mismatched across workers, causing only one worker to be able to train) self._feature_extractor_for_proposal_features = ( _UNINITIALIZED_FEATURE_EXTRACTOR) self._feature_extractor_for_box_classifier_features = ( _UNINITIALIZED_FEATURE_EXTRACTOR) else: self._feature_extractor_for_proposal_features = None self._feature_extractor_for_box_classifier_features = None self._number_of_stages = number_of_stages self._proposal_target_assigner = first_stage_target_assigner self._detector_target_assigner = second_stage_target_assigner # Both proposal and detector target assigners use the same box coder self._box_coder = self._proposal_target_assigner.box_coder # (First stage) Region proposal network parameters self._first_stage_anchor_generator = first_stage_anchor_generator self._first_stage_atrous_rate = first_stage_atrous_rate self._first_stage_box_predictor_depth = first_stage_box_predictor_depth self._first_stage_box_predictor_kernel_size = ( first_stage_box_predictor_kernel_size) self._first_stage_minibatch_size = first_stage_minibatch_size self._first_stage_sampler = first_stage_sampler if isinstance(first_stage_box_predictor_arg_scope_fn, hyperparams_builder.KerasLayerHyperparams): num_anchors_per_location = ( self._first_stage_anchor_generator.num_anchors_per_location()) conv_hyperparams = ( first_stage_box_predictor_arg_scope_fn) self._first_stage_box_predictor_first_conv = ( tf.keras.Sequential([ tf.keras.layers.Conv2D( self._first_stage_box_predictor_depth, kernel_size=[self._first_stage_box_predictor_kernel_size, self._first_stage_box_predictor_kernel_size], dilation_rate=self._first_stage_atrous_rate, padding='SAME', name='RPNConv', **conv_hyperparams.params()), conv_hyperparams.build_batch_norm( (self._is_training and not freeze_batchnorm), name='RPNBatchNorm'), tf.keras.layers.Lambda( tf.nn.relu6, name='RPNActivation') ], name='FirstStageRPNFeatures')) self._first_stage_box_predictor = ( box_predictor_builder.build_convolutional_keras_box_predictor( is_training=self._is_training, num_classes=1, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=False, num_predictions_per_location_list=num_anchors_per_location, use_dropout=False, dropout_keep_prob=1.0, box_code_size=self._box_coder.code_size, kernel_size=1, num_layers_before_predictor=0, min_depth=0, max_depth=0, name=self.first_stage_box_predictor_scope)) else: self._first_stage_box_predictor_arg_scope_fn = ( first_stage_box_predictor_arg_scope_fn) def rpn_box_predictor_feature_extractor(single_rpn_features_to_crop): with slim.arg_scope(self._first_stage_box_predictor_arg_scope_fn()): return slim.conv2d( single_rpn_features_to_crop, self._first_stage_box_predictor_depth, kernel_size=[ self._first_stage_box_predictor_kernel_size, self._first_stage_box_predictor_kernel_size ], rate=self._first_stage_atrous_rate, activation_fn=tf.nn.relu6, scope='Conv', reuse=tf.AUTO_REUSE) self._first_stage_box_predictor_first_conv = ( rpn_box_predictor_feature_extractor) self._first_stage_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=self._is_training, num_classes=1, conv_hyperparams_fn=self._first_stage_box_predictor_arg_scope_fn, use_dropout=False, dropout_keep_prob=1.0, box_code_size=self._box_coder.code_size, kernel_size=1, num_layers_before_predictor=0, min_depth=0, max_depth=0)) self._first_stage_nms_fn = first_stage_non_max_suppression_fn self._first_stage_max_proposals = first_stage_max_proposals self._use_static_shapes = use_static_shapes self._first_stage_localization_loss = ( losses.WeightedSmoothL1LocalizationLoss()) self._first_stage_objectness_loss = ( losses.WeightedSoftmaxClassificationLoss()) self._first_stage_loc_loss_weight = first_stage_localization_loss_weight self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight # Per-region cropping parameters self._crop_and_resize_fn = crop_and_resize_fn self._initial_crop_size = initial_crop_size self._maxpool_kernel_size = maxpool_kernel_size self._maxpool_stride = maxpool_stride # If max pooling is to be used, build the layer if maxpool_kernel_size: self._maxpool_layer = tf.keras.layers.MaxPooling2D( [self._maxpool_kernel_size, self._maxpool_kernel_size], strides=self._maxpool_stride, name='MaxPool2D') self._mask_rcnn_box_predictor = second_stage_mask_rcnn_box_predictor self._second_stage_batch_size = second_stage_batch_size self._second_stage_sampler = second_stage_sampler self._second_stage_nms_fn = second_stage_non_max_suppression_fn self._second_stage_score_conversion_fn = second_stage_score_conversion_fn self._second_stage_localization_loss = ( losses.WeightedSmoothL1LocalizationLoss()) self._second_stage_classification_loss = second_stage_classification_loss self._second_stage_mask_loss = ( losses.WeightedSigmoidClassificationLoss()) self._second_stage_loc_loss_weight = second_stage_localization_loss_weight self._second_stage_cls_loss_weight = second_stage_classification_loss_weight self._second_stage_mask_loss_weight = ( second_stage_mask_prediction_loss_weight) self._hard_example_miner = hard_example_miner self._parallel_iterations = parallel_iterations self.clip_anchors_to_image = clip_anchors_to_image if self._number_of_stages <= 0 or self._number_of_stages > 3: raise ValueError('Number of stages should be a value in {1, 2, 3}.') self._batched_prediction_tensor_names = [] self._return_raw_detections_during_predict = ( return_raw_detections_during_predict) self._output_final_box_features = output_final_box_features self._output_final_box_rpn_features = output_final_box_rpn_features @property def first_stage_feature_extractor_scope(self): return 'FirstStageFeatureExtractor' @property def second_stage_feature_extractor_scope(self): return 'SecondStageFeatureExtractor' @property def first_stage_box_predictor_scope(self): return 'FirstStageBoxPredictor' @property def second_stage_box_predictor_scope(self): return 'SecondStageBoxPredictor' @property def max_num_proposals(self): """Max number of proposals (to pad to) for each image in the input batch. At training time, this is set to be the `second_stage_batch_size` if hard example miner is not configured, else it is set to `first_stage_max_proposals`. At inference time, this is always set to `first_stage_max_proposals`. Returns: A positive integer. """ if self._is_training and not self._hard_example_miner: return self._second_stage_batch_size return self._first_stage_max_proposals @property def anchors(self): if not self._anchors: raise RuntimeError('anchors have not been constructed yet!') if not isinstance(self._anchors, box_list.BoxList): raise RuntimeError('anchors should be a BoxList object, but is not.') return self._anchors @property def batched_prediction_tensor_names(self): if not self._batched_prediction_tensor_names: raise RuntimeError('Must call predict() method to get batched prediction ' 'tensor names.') return self._batched_prediction_tensor_names @property def feature_extractor(self): return self._feature_extractor def preprocess(self, inputs): """Feature-extractor specific preprocessing. See base class. For Faster R-CNN, we perform image resizing in the base class --- each class subclassing FasterRCNNMetaArch is responsible for any additional preprocessing (e.g., scaling pixel values to be in [-1, 1]). Args: inputs: a [batch, height_in, width_in, channels] float tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: a [batch, height_out, width_out, channels] float tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Raises: ValueError: if inputs tensor does not have type tf.float32 """ with tf.name_scope('Preprocessor'): (resized_inputs, true_image_shapes) = shape_utils.resize_images_and_return_shapes( inputs, self._image_resizer_fn) return (self._feature_extractor.preprocess(resized_inputs), true_image_shapes) def _compute_clip_window(self, image_shapes): """Computes clip window for non max suppression based on image shapes. This function assumes that the clip window's left top corner is at (0, 0). Args: image_shapes: A 2-D int32 tensor of shape [batch_size, 3] containing shapes of images in the batch. Each row represents [height, width, channels] of an image. Returns: A 2-D float32 tensor of shape [batch_size, 4] containing the clip window for each image in the form [ymin, xmin, ymax, xmax]. """ clip_heights = image_shapes[:, 0] clip_widths = image_shapes[:, 1] clip_window = tf.cast( tf.stack([ tf.zeros_like(clip_heights), tf.zeros_like(clip_heights), clip_heights, clip_widths ], axis=1), dtype=tf.float32) return clip_window def _proposal_postprocess(self, rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape, true_image_shapes): """Wraps over FasterRCNNMetaArch._postprocess_rpn().""" image_shape_2d = self._image_batch_shape_2d(image_shape) proposal_boxes_normalized, _, _, num_proposals, _, _ = \ self._postprocess_rpn( rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape_2d, true_image_shapes) return proposal_boxes_normalized, num_proposals def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs): """Predicts unpostprocessed tensors from input tensor. This function takes an input batch of images and runs it through the forward pass of the network to yield "raw" un-postprocessed predictions. If `number_of_stages` is 1, this function only returns first stage RPN predictions (un-postprocessed). Otherwise it returns both first stage RPN predictions as well as second stage box classifier predictions. Other remarks: + Anchor pruning vs. clipping: following the recommendation of the Faster R-CNN paper, we prune anchors that venture outside the image window at training time and clip anchors to the image window at inference time. + Proposal padding: as described at the top of the file, proposals are padded to self._max_num_proposals and flattened so that proposals from all images within the input batch are arranged along the same batch dimension. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **side_inputs: additional tensors that are required by the network. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) rpn_box_predictor_features: A list of 4-D float32 tensor with shape [batch_size, height_i, width_j, depth] to be used for predicting proposal boxes and corresponding objectness scores. 2) rpn_features_to_crop: A list of 4-D float32 tensor with shape [batch_size, height, width, depth] representing image features to crop using the proposal boxes predicted by the RPN. 3) image_shape: a 1-D tensor of shape [4] representing the input image shape. 4) rpn_box_encodings: 3-D float tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted boxes. 5) rpn_objectness_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). 6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors for the first stage RPN (in absolute coordinates). Note that `num_anchors` can differ depending on whether the model is created in training or inference mode. 7) feature_maps: A single element list containing a 4-D float32 tensor with shape batch_size, height, width, depth] representing the RPN features to crop. (and if number_of_stages > 1): 8) refined_box_encodings: a 3-D tensor with shape [total_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. 9) class_predictions_with_background: a 3-D tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 10) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 11) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. 12) mask_predictions: (optional) a 4-D tensor with shape [total_num_padded_proposals, num_classes, mask_height, mask_width] containing instance mask predictions. 13) raw_detection_boxes: (optional) a [batch_size, self.max_num_proposals, num_classes, 4] float32 tensor with detections prior to NMS in normalized coordinates. 14) raw_detection_feature_map_indices: (optional) a [batch_size, self.max_num_proposals, num_classes] int32 tensor with indices indicating which feature map each raw detection box was produced from. The indices correspond to the elements in the 'feature_maps' field. Raises: ValueError: If `predict` is called before `preprocess`. """ prediction_dict = self._predict_first_stage(preprocessed_inputs) if self._number_of_stages >= 2: prediction_dict.update( self._predict_second_stage( prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_background'], prediction_dict['rpn_features_to_crop'], prediction_dict['anchors'], prediction_dict['image_shape'], true_image_shapes, **side_inputs)) if self._number_of_stages == 3: prediction_dict = self._predict_third_stage(prediction_dict, true_image_shapes) self._batched_prediction_tensor_names = [ x for x in prediction_dict if x not in ('image_shape', 'anchors') ] return prediction_dict def _predict_first_stage(self, preprocessed_inputs): """First stage of prediction. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) rpn_box_predictor_features: A list of 4-D float32/bfloat16 tensor with shape [batch_size, height_i, width_j, depth] to be used for predicting proposal boxes and corresponding objectness scores. 2) rpn_features_to_crop: A list of 4-D float32/bfloat16 tensor with shape [batch_size, height, width, depth] representing image features to crop using the proposal boxes predicted by the RPN. 3) image_shape: a 1-D tensor of shape [4] representing the input image shape. 4) rpn_box_encodings: 3-D float32 tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted boxes. 5) rpn_objectness_predictions_with_background: 3-D float32 tensor of shape [batch_size, num_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). 6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors for the first stage RPN (in absolute coordinates). Note that `num_anchors` can differ depending on whether the model is created in training or inference mode. 7) feature_maps: A single element list containing a 4-D float32 tensor with shape batch_size, height, width, depth] representing the RPN features to crop. """ (rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist, image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs) (rpn_box_encodings, rpn_objectness_predictions_with_background ) = self._predict_rpn_proposals(rpn_box_predictor_features) # The Faster R-CNN paper recommends pruning anchors that venture outside # the image window at training time and clipping at inference time. clip_window = tf.cast(tf.stack([0, 0, image_shape[1], image_shape[2]]), dtype=tf.float32) if self._is_training: if self.clip_anchors_to_image: anchors_boxlist = box_list_ops.clip_to_window( anchors_boxlist, clip_window, filter_nonoverlapping=False) else: (rpn_box_encodings, rpn_objectness_predictions_with_background, anchors_boxlist) = self._remove_invalid_anchors_and_predictions( rpn_box_encodings, rpn_objectness_predictions_with_background, anchors_boxlist, clip_window) else: anchors_boxlist = box_list_ops.clip_to_window( anchors_boxlist, clip_window, filter_nonoverlapping=not self._use_static_shapes) self._anchors = anchors_boxlist prediction_dict = { 'rpn_box_predictor_features': rpn_box_predictor_features, 'rpn_features_to_crop': rpn_features_to_crop, 'image_shape': image_shape, 'rpn_box_encodings': tf.cast(rpn_box_encodings, dtype=tf.float32), 'rpn_objectness_predictions_with_background': tf.cast(rpn_objectness_predictions_with_background, dtype=tf.float32), 'anchors': anchors_boxlist.data['boxes'], fields.PredictionFields.feature_maps: rpn_features_to_crop } return prediction_dict def _image_batch_shape_2d(self, image_batch_shape_1d): """Takes a 1-D image batch shape tensor and converts it to a 2-D tensor. Example: If 1-D image batch shape tensor is [2, 300, 300, 3]. The corresponding 2-D image batch tensor would be [[300, 300, 3], [300, 300, 3]] Args: image_batch_shape_1d: 1-D tensor of the form [batch_size, height, width, channels]. Returns: image_batch_shape_2d: 2-D tensor of shape [batch_size, 3] were each row is of the form [height, width, channels]. """ return tf.tile(tf.expand_dims(image_batch_shape_1d[1:], 0), [image_batch_shape_1d[0], 1]) def _predict_second_stage(self, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features_to_crop, anchors, image_shape, true_image_shapes, **side_inputs): """Predicts the output tensors from second stage of Faster R-CNN. Args: rpn_box_encodings: 3-D float tensor of shape [batch_size, num_valid_anchors, self._box_coder.code_size] containing predicted boxes. rpn_objectness_predictions_with_background: 2-D float tensor of shape [batch_size, num_valid_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). rpn_features_to_crop: A list of 4-D float32 or bfloat16 tensor with shape [batch_size, height_i, width_i, depth] representing image features to crop using the proposal boxes predicted by the RPN. anchors: 2-D float tensor of shape [num_anchors, self._box_coder.code_size]. image_shape: A 1D int32 tensors of size [4] containing the image shape. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **side_inputs: additional tensors that are required by the network. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D float32 tensor with shape [total_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. 2) class_predictions_with_background: a 3-D float32 tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 4) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. 5) proposal_boxes_normalized: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in normalized coordinates. Can be used to override the boxes proposed by the RPN, thus enabling one to extract features and get box classification and prediction for externally selected areas of the image. 6) box_classifier_features: a 4-D float32/bfloat16 tensor representing the features for each proposal. If self._return_raw_detections_during_predict is True, the dictionary will also contain: 7) raw_detection_boxes: a 4-D float32 tensor with shape [batch_size, self.max_num_proposals, num_classes, 4] in normalized coordinates. 8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape [batch_size, self.max_num_proposals, num_classes]. """ proposal_boxes_normalized, num_proposals = self._proposal_postprocess( rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape, true_image_shapes) prediction_dict = self._box_prediction(rpn_features_to_crop, proposal_boxes_normalized, image_shape, true_image_shapes, **side_inputs) prediction_dict['num_proposals'] = num_proposals return prediction_dict def _box_prediction(self, rpn_features_to_crop, proposal_boxes_normalized, image_shape, true_image_shapes, **side_inputs): """Predicts the output tensors from second stage of Faster R-CNN. Args: rpn_features_to_crop: A list 4-D float32 or bfloat16 tensor with shape [batch_size, height_i, width_i, depth] representing image features to crop using the proposal boxes predicted by the RPN. proposal_boxes_normalized: A float tensor with shape [batch_size, max_num_proposals, 4] representing the (potentially zero padded) proposal boxes for all images in the batch. These boxes are represented as normalized coordinates. image_shape: A 1D int32 tensors of size [4] containing the image shape. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **side_inputs: additional tensors that are required by the network. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D float32 tensor with shape [total_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. 2) class_predictions_with_background: a 3-D float32 tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. 4) proposal_boxes_normalized: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in normalized coordinates. Can be used to override the boxes proposed by the RPN, thus enabling one to extract features and get box classification and prediction for externally selected areas of the image. 5) box_classifier_features: a 4-D float32/bfloat16 tensor representing the features for each proposal. If self._return_raw_detections_during_predict is True, the dictionary will also contain: 6) raw_detection_boxes: a 4-D float32 tensor with shape [batch_size, self.max_num_proposals, num_classes, 4] in normalized coordinates. 7) raw_detection_feature_map_indices: a 3-D int32 tensor with shape [batch_size, self.max_num_proposals, num_classes]. 8) final_anchors: a 3-D float tensor of shape [batch_size, self.max_num_proposals, 4] containing the reference anchors for raw detection boxes in normalized coordinates. """ flattened_proposal_feature_maps = ( self._compute_second_stage_input_feature_maps( rpn_features_to_crop, proposal_boxes_normalized, image_shape, **side_inputs)) box_classifier_features = self._extract_box_classifier_features( flattened_proposal_feature_maps, **side_inputs) if self._mask_rcnn_box_predictor.is_keras_model: box_predictions = self._mask_rcnn_box_predictor( [box_classifier_features], prediction_stage=2) else: box_predictions = self._mask_rcnn_box_predictor.predict( [box_classifier_features], num_predictions_per_location=[1], scope=self.second_stage_box_predictor_scope, prediction_stage=2) refined_box_encodings = tf.squeeze( box_predictions[box_predictor.BOX_ENCODINGS], axis=1, name='all_refined_box_encodings') class_predictions_with_background = tf.squeeze( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1, name='all_class_predictions_with_background') absolute_proposal_boxes = ops.normalized_to_image_coordinates( proposal_boxes_normalized, image_shape, self._parallel_iterations) prediction_dict = { 'refined_box_encodings': tf.cast(refined_box_encodings, dtype=tf.float32), 'class_predictions_with_background': tf.cast(class_predictions_with_background, dtype=tf.float32), 'proposal_boxes': absolute_proposal_boxes, 'box_classifier_features': box_classifier_features, 'proposal_boxes_normalized': proposal_boxes_normalized, 'final_anchors': proposal_boxes_normalized } if self._return_raw_detections_during_predict: prediction_dict.update(self._raw_detections_and_feature_map_inds( refined_box_encodings, absolute_proposal_boxes, true_image_shapes)) return prediction_dict def _raw_detections_and_feature_map_inds( self, refined_box_encodings, absolute_proposal_boxes, true_image_shapes): """Returns raw detections and feat map inds from where they originated. Args: refined_box_encodings: [total_num_proposals, num_classes, self._box_coder.code_size] float32 tensor. absolute_proposal_boxes: [batch_size, self.max_num_proposals, 4] float32 tensor representing decoded proposal bounding boxes in absolute coordinates. true_image_shapes: [batch, 3] int32 tensor where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: A dictionary with raw detection boxes, and the feature map indices from which they originated. """ box_encodings_batch = tf.reshape( refined_box_encodings, [-1, self.max_num_proposals, refined_box_encodings.shape[1], self._box_coder.code_size]) raw_detection_boxes_absolute = self._batch_decode_boxes( box_encodings_batch, absolute_proposal_boxes) raw_detection_boxes_normalized = shape_utils.static_or_dynamic_map_fn( self._normalize_and_clip_boxes, elems=[raw_detection_boxes_absolute, true_image_shapes], dtype=tf.float32) detection_feature_map_indices = tf.zeros_like( raw_detection_boxes_normalized[:, :, :, 0], dtype=tf.int32) return { fields.PredictionFields.raw_detection_boxes: raw_detection_boxes_normalized, fields.PredictionFields.raw_detection_feature_map_indices: detection_feature_map_indices } def _extract_box_classifier_features(self, flattened_feature_maps): if self._feature_extractor_for_box_classifier_features == ( _UNINITIALIZED_FEATURE_EXTRACTOR): self._feature_extractor_for_box_classifier_features = ( self._feature_extractor.get_box_classifier_feature_extractor_model( name=self.second_stage_feature_extractor_scope)) if self._feature_extractor_for_box_classifier_features: box_classifier_features = ( self._feature_extractor_for_box_classifier_features( flattened_feature_maps)) else: box_classifier_features = ( self._feature_extractor.extract_box_classifier_features( flattened_feature_maps, scope=self.second_stage_feature_extractor_scope)) return box_classifier_features def _predict_third_stage(self, prediction_dict, image_shapes): """Predicts non-box, non-class outputs using refined detections. For training, masks as predicted directly on the box_classifier_features, which are region-features from the initial anchor boxes. For inference, this happens after calling the post-processing stage, such that masks are only calculated for the top scored boxes. Args: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D tensor with shape [total_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. 2) class_predictions_with_background: a 3-D tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 4) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. 5) box_classifier_features: a 4-D float32 tensor representing the features for each proposal. 6) image_shape: a 1-D tensor of shape [4] representing the input image shape. image_shapes: A 2-D int32 tensors of shape [batch_size, 3] containing shapes of images in the batch. Returns: prediction_dict: a dictionary that in addition to the input predictions does hold the following predictions as well: 1) mask_predictions: a 4-D tensor with shape [batch_size, max_detection, mask_height, mask_width] containing instance mask predictions. """ if self._is_training: curr_box_classifier_features = prediction_dict['box_classifier_features'] detection_classes = prediction_dict['class_predictions_with_background'] if self._mask_rcnn_box_predictor.is_keras_model: mask_predictions = self._mask_rcnn_box_predictor( [curr_box_classifier_features], prediction_stage=3) else: mask_predictions = self._mask_rcnn_box_predictor.predict( [curr_box_classifier_features], num_predictions_per_location=[1], scope=self.second_stage_box_predictor_scope, prediction_stage=3) prediction_dict['mask_predictions'] = tf.squeeze(mask_predictions[ box_predictor.MASK_PREDICTIONS], axis=1) else: detections_dict = self._postprocess_box_classifier( prediction_dict['refined_box_encodings'], prediction_dict['class_predictions_with_background'], prediction_dict['proposal_boxes'], prediction_dict['num_proposals'], image_shapes) prediction_dict.update(detections_dict) detection_boxes = detections_dict[ fields.DetectionResultFields.detection_boxes] detection_classes = detections_dict[ fields.DetectionResultFields.detection_classes] rpn_features_to_crop = prediction_dict['rpn_features_to_crop'] image_shape = prediction_dict['image_shape'] batch_size = tf.shape(detection_boxes)[0] max_detection = tf.shape(detection_boxes)[1] flattened_detected_feature_maps = ( self._compute_second_stage_input_feature_maps( rpn_features_to_crop, detection_boxes, image_shape)) curr_box_classifier_features = self._extract_box_classifier_features( flattened_detected_feature_maps) if self._mask_rcnn_box_predictor.is_keras_model: mask_predictions = self._mask_rcnn_box_predictor( [curr_box_classifier_features], prediction_stage=3) else: mask_predictions = self._mask_rcnn_box_predictor.predict( [curr_box_classifier_features], num_predictions_per_location=[1], scope=self.second_stage_box_predictor_scope, prediction_stage=3) detection_masks = tf.squeeze(mask_predictions[ box_predictor.MASK_PREDICTIONS], axis=1) _, num_classes, mask_height, mask_width = ( detection_masks.get_shape().as_list()) _, max_detection = detection_classes.get_shape().as_list() prediction_dict['mask_predictions'] = tf.reshape( detection_masks, [-1, num_classes, mask_height, mask_width]) if num_classes > 1: detection_masks = self._gather_instance_masks( detection_masks, detection_classes) detection_masks = tf.cast(detection_masks, tf.float32) prediction_dict[fields.DetectionResultFields.detection_masks] = ( tf.reshape(tf.sigmoid(detection_masks), [batch_size, max_detection, mask_height, mask_width])) return prediction_dict def _gather_instance_masks(self, instance_masks, classes): """Gathers the masks that correspond to classes. Args: instance_masks: A 4-D float32 tensor with shape [K, num_classes, mask_height, mask_width]. classes: A 2-D int32 tensor with shape [batch_size, max_detection]. Returns: masks: a 3-D float32 tensor with shape [K, mask_height, mask_width]. """ _, num_classes, height, width = instance_masks.get_shape().as_list() k = tf.shape(instance_masks)[0] instance_masks = tf.reshape(instance_masks, [-1, height, width]) classes = tf.cast(tf.reshape(classes, [-1]), dtype=tf.int32) gather_idx = tf.range(k) * num_classes + classes return tf.gather(instance_masks, gather_idx) def _extract_rpn_feature_maps(self, preprocessed_inputs): """Extracts RPN features. This function extracts two feature maps: a feature map to be directly fed to a box predictor (to predict location and objectness scores for proposals) and a feature map from which to crop regions which will then be sent to the second stage box classifier. Args: preprocessed_inputs: a [batch, height, width, channels] image tensor. Returns: rpn_box_predictor_features: A list of 4-D float32 tensor with shape [batch, height_i, width_j, depth] to be used for predicting proposal boxes and corresponding objectness scores. rpn_features_to_crop: A list of 4-D float32 tensor with shape [batch, height, width, depth] representing image features to crop using the proposals boxes. anchors: A list of BoxList representing anchors (for the RPN) in absolute coordinates. image_shape: A 1-D tensor representing the input image shape. """ image_shape = tf.shape(preprocessed_inputs) rpn_features_to_crop, self.endpoints = self._extract_proposal_features( preprocessed_inputs) # Decide if rpn_features_to_crop is a list. If not make it a list if not isinstance(rpn_features_to_crop, list): rpn_features_to_crop = [rpn_features_to_crop] feature_map_shapes = [] rpn_box_predictor_features = [] for single_rpn_features_to_crop in rpn_features_to_crop: single_shape = tf.shape(single_rpn_features_to_crop) feature_map_shapes.append((single_shape[1], single_shape[2])) single_rpn_box_predictor_features = ( self._first_stage_box_predictor_first_conv( single_rpn_features_to_crop)) rpn_box_predictor_features.append(single_rpn_box_predictor_features) anchors = box_list_ops.concatenate( self._first_stage_anchor_generator.generate(feature_map_shapes)) return (rpn_box_predictor_features, rpn_features_to_crop, anchors, image_shape) def _extract_proposal_features(self, preprocessed_inputs): if self._feature_extractor_for_proposal_features == ( _UNINITIALIZED_FEATURE_EXTRACTOR): self._feature_extractor_for_proposal_features = ( self._feature_extractor.get_proposal_feature_extractor_model( name=self.first_stage_feature_extractor_scope)) if self._feature_extractor_for_proposal_features: proposal_features = ( self._feature_extractor_for_proposal_features(preprocessed_inputs), {}) else: proposal_features = ( self._feature_extractor.extract_proposal_features( preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)) return proposal_features def _predict_rpn_proposals(self, rpn_box_predictor_features): """Adds box predictors to RPN feature map to predict proposals. Note resulting tensors will not have been postprocessed. Args: rpn_box_predictor_features: A list of 4-D float32 tensor with shape [batch, height_i, width_j, depth] to be used for predicting proposal boxes and corresponding objectness scores. Returns: box_encodings: 3-D float tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted boxes. objectness_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). Raises: RuntimeError: if the anchor generator generates anchors corresponding to multiple feature maps. We currently assume that a single feature map is generated for the RPN. """ num_anchors_per_location = ( self._first_stage_anchor_generator.num_anchors_per_location()) if self._first_stage_box_predictor.is_keras_model: box_predictions = self._first_stage_box_predictor( rpn_box_predictor_features) else: box_predictions = self._first_stage_box_predictor.predict( rpn_box_predictor_features, num_anchors_per_location, scope=self.first_stage_box_predictor_scope) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (tf.squeeze(box_encodings, axis=2), objectness_predictions_with_background) def _remove_invalid_anchors_and_predictions( self, box_encodings, objectness_predictions_with_background, anchors_boxlist, clip_window): """Removes anchors that (partially) fall outside an image. Also removes associated box encodings and objectness predictions. Args: box_encodings: 3-D float tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted boxes. objectness_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). anchors_boxlist: A BoxList representing num_anchors anchors (for the RPN) in absolute coordinates. clip_window: a 1-D tensor representing the [ymin, xmin, ymax, xmax] extent of the window to clip/prune to. Returns: box_encodings: 4-D float tensor of shape [batch_size, num_valid_anchors, self._box_coder.code_size] containing predicted boxes, where num_valid_anchors <= num_anchors objectness_predictions_with_background: 2-D float tensor of shape [batch_size, num_valid_anchors, 2] containing class predictions (logits) for each of the anchors, where num_valid_anchors <= num_anchors. Note that this tensor *includes* background class predictions (at class index 0). anchors: A BoxList representing num_valid_anchors anchors (for the RPN) in absolute coordinates. """ pruned_anchors_boxlist, keep_indices = box_list_ops.prune_outside_window( anchors_boxlist, clip_window) def _batch_gather_kept_indices(predictions_tensor): return shape_utils.static_or_dynamic_map_fn( functools.partial(tf.gather, indices=keep_indices), elems=predictions_tensor, dtype=tf.float32, parallel_iterations=self._parallel_iterations, back_prop=True) return (_batch_gather_kept_indices(box_encodings), _batch_gather_kept_indices(objectness_predictions_with_background), pruned_anchors_boxlist) def _flatten_first_two_dimensions(self, inputs): """Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor. Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape [A * B, ..., depth]. Args: inputs: A float tensor with shape [A, B, ..., depth]. Note that the first two and last dimensions must be statically defined. Returns: A float tensor with shape [A * B, ..., depth] (where the first and last dimension are statically defined. """ combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs) flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] + combined_shape[2:]) return tf.reshape(inputs, flattened_shape) def postprocess(self, prediction_dict, true_image_shapes): """Convert prediction tensors to final detections. This function converts raw predictions tensors to final detection results. See base class for output format conventions. Note also that by default, scores are to be interpreted as logits, but if a score_converter is used, then scores are remapped (and may thus have a different interpretation). If number_of_stages=1, the returned results represent proposals from the first stage RPN and are padded to have self.max_num_proposals for each image; otherwise, the results can be interpreted as multiclass detections from the full two-stage model and are padded to self._max_detections. Args: prediction_dict: a dictionary holding prediction tensors (see the documentation for the predict method. If number_of_stages=1, we expect prediction_dict to contain `rpn_box_encodings`, `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`, and `anchors` fields. Otherwise we expect prediction_dict to additionally contain `refined_box_encodings`, `class_predictions_with_background`, `num_proposals`, `proposal_boxes` and, optionally, `mask_predictions` fields. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: detections: a dictionary containing the following fields detection_boxes: [batch, max_detection, 4] detection_scores: [batch, max_detections] detection_multiclass_scores: [batch, max_detections, 2] detection_anchor_indices: [batch, max_detections] detection_classes: [batch, max_detections] (this entry is only created if rpn_mode=False) num_detections: [batch] raw_detection_boxes: [batch, total_detections, 4] raw_detection_scores: [batch, total_detections, num_classes + 1] Raises: ValueError: If `predict` is called before `preprocess`. ValueError: If `_output_final_box_features` is true but rpn_features_to_crop is not in the prediction_dict. """ with tf.name_scope('FirstStagePostprocessor'): if self._number_of_stages == 1: image_shapes = self._image_batch_shape_2d( prediction_dict['image_shape']) (proposal_boxes, proposal_scores, proposal_multiclass_scores, num_proposals, raw_proposal_boxes, raw_proposal_scores) = self._postprocess_rpn( prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_background'], prediction_dict['anchors'], image_shapes, true_image_shapes) return { fields.DetectionResultFields.detection_boxes: proposal_boxes, fields.DetectionResultFields.detection_scores: proposal_scores, fields.DetectionResultFields.detection_multiclass_scores: proposal_multiclass_scores, fields.DetectionResultFields.num_detections: tf.cast(num_proposals, dtype=tf.float32), fields.DetectionResultFields.raw_detection_boxes: raw_proposal_boxes, fields.DetectionResultFields.raw_detection_scores: raw_proposal_scores } # TODO(jrru): Remove mask_predictions from _post_process_box_classifier. if (self._number_of_stages == 2 or (self._number_of_stages == 3 and self._is_training)): with tf.name_scope('SecondStagePostprocessor'): mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS) detections_dict = self._postprocess_box_classifier( prediction_dict['refined_box_encodings'], prediction_dict['class_predictions_with_background'], prediction_dict['proposal_boxes'], prediction_dict['num_proposals'], true_image_shapes, mask_predictions=mask_predictions) if self._output_final_box_features: if 'rpn_features_to_crop' not in prediction_dict: raise ValueError( 'Please make sure rpn_features_to_crop is in the prediction_dict.' ) detections_dict[ 'detection_features'] = ( self._add_detection_box_boxclassifier_features_output_node( detections_dict[ fields.DetectionResultFields.detection_boxes], prediction_dict['rpn_features_to_crop'], prediction_dict['image_shape'])) if self._output_final_box_rpn_features: if 'rpn_features_to_crop' not in prediction_dict: raise ValueError( 'Please make sure rpn_features_to_crop is in the prediction_dict.' ) detections_dict['cropped_rpn_box_features'] = ( self._add_detection_box_rpn_features_output_node( detections_dict[fields.DetectionResultFields.detection_boxes], prediction_dict['rpn_features_to_crop'], prediction_dict['image_shape'])) return detections_dict if self._number_of_stages == 3: # Post processing is already performed in 3rd stage. We need to transfer # postprocessed tensors from `prediction_dict` to `detections_dict`. # Remove any items from the prediction dictionary if they are not pure # Tensors. non_tensor_predictions = [ k for k, v in prediction_dict.items() if not isinstance(v, tf.Tensor)] for k in non_tensor_predictions: tf.logging.info('Removing {0} from prediction_dict'.format(k)) prediction_dict.pop(k) return prediction_dict def _add_detection_box_boxclassifier_features_output_node( self, detection_boxes, rpn_features_to_crop, image_shape): """Add detection features to outputs. This function extracts box features for each box in rpn_features_to_crop. It returns the extracted box features, reshaped to [batch size, max_detections, height, width, depth], and average pools the extracted features across the spatial dimensions and adds a graph node to the pooled features named 'pooled_detection_features' Args: detection_boxes: a 3-D float32 tensor of shape [batch_size, max_detections, 4] which represents the bounding boxes. rpn_features_to_crop: A list of 4-D float32 tensor with shape [batch, height, width, depth] representing image features to crop using the proposals boxes. image_shape: a 1-D tensor of shape [4] representing the image shape. Returns: detection_features: a 4-D float32 tensor of shape [batch size, max_detections, height, width, depth] representing cropped image features """ with tf.name_scope('SecondStageDetectionFeaturesExtract'): flattened_detected_feature_maps = ( self._compute_second_stage_input_feature_maps( rpn_features_to_crop, detection_boxes, image_shape)) detection_features_unpooled = self._extract_box_classifier_features( flattened_detected_feature_maps) batch_size = tf.shape(detection_boxes)[0] max_detections = tf.shape(detection_boxes)[1] detection_features_pool = tf.reduce_mean( detection_features_unpooled, axis=[1, 2]) reshaped_detection_features_pool = tf.reshape( detection_features_pool, [batch_size, max_detections, tf.shape(detection_features_pool)[-1]]) reshaped_detection_features_pool = tf.identity( reshaped_detection_features_pool, 'pooled_detection_features') # TODO(sbeery) add node to extract rpn features here!! reshaped_detection_features = tf.reshape( detection_features_unpooled, [batch_size, max_detections, tf.shape(detection_features_unpooled)[1], tf.shape(detection_features_unpooled)[2], tf.shape(detection_features_unpooled)[3]]) return reshaped_detection_features def _add_detection_box_rpn_features_output_node(self, detection_boxes, rpn_features_to_crop, image_shape): """Add detection features to outputs. This function extracts box features for each box in rpn_features_to_crop. It returns the extracted box features, reshaped to [batch size, max_detections, height, width, depth] Args: detection_boxes: a 3-D float32 tensor of shape [batch_size, max_detections, 4] which represents the bounding boxes. rpn_features_to_crop: A list of 4-D float32 tensor with shape [batch, height, width, depth] representing image features to crop using the proposals boxes. image_shape: a 1-D tensor of shape [4] representing the image shape. Returns: detection_features: a 4-D float32 tensor of shape [batch size, max_detections, height, width, depth] representing cropped image features """ with tf.name_scope('FirstStageDetectionFeaturesExtract'): flattened_detected_feature_maps = ( self._compute_second_stage_input_feature_maps( rpn_features_to_crop, detection_boxes, image_shape)) batch_size = tf.shape(detection_boxes)[0] max_detections = tf.shape(detection_boxes)[1] reshaped_detection_features = tf.reshape( flattened_detected_feature_maps, [batch_size, max_detections, tf.shape(flattened_detected_feature_maps)[1], tf.shape(flattened_detected_feature_maps)[2], tf.shape(flattened_detected_feature_maps)[3]]) return reshaped_detection_features def _postprocess_rpn(self, rpn_box_encodings_batch, rpn_objectness_predictions_with_background_batch, anchors, image_shapes, true_image_shapes): """Converts first stage prediction tensors from the RPN to proposals. This function decodes the raw RPN predictions, runs non-max suppression on the result. Note that the behavior of this function is slightly modified during training --- specifically, we stop the gradient from passing through the proposal boxes and we only return a balanced sampled subset of proposals with size `second_stage_batch_size`. Args: rpn_box_encodings_batch: A 3-D float32 tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted proposal box encodings. rpn_objectness_predictions_with_background_batch: A 3-D float tensor of shape [batch_size, num_anchors, 2] containing objectness predictions (logits) for each of the anchors with 0 corresponding to background and 1 corresponding to object. anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors for the first stage RPN. Note that `num_anchors` can differ depending on whether the model is created in training or inference mode. image_shapes: A 2-D tensor of shape [batch, 3] containing the shapes of images in the batch. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: proposal_boxes: A float tensor with shape [batch_size, max_num_proposals, 4] representing the (potentially zero padded) proposal boxes for all images in the batch. These boxes are represented as normalized coordinates. proposal_scores: A float tensor with shape [batch_size, max_num_proposals] representing the (potentially zero padded) proposal objectness scores for all images in the batch. proposal_multiclass_scores: A float tensor with shape [batch_size, max_num_proposals, 2] representing the (potentially zero padded) proposal multiclass scores for all images in the batch. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. raw_detection_boxes: [batch, total_detections, 4] tensor with decoded proposal boxes before Non-Max Suppression. raw_detection_scores: [batch, total_detections, num_classes_with_background] tensor of multi-class scores for raw proposal boxes. """ rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2) rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape( rpn_box_encodings_batch) tiled_anchor_boxes = tf.tile( tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1]) proposal_boxes = self._batch_decode_boxes(rpn_box_encodings_batch, tiled_anchor_boxes) raw_proposal_boxes = tf.squeeze(proposal_boxes, axis=2) rpn_objectness_softmax = tf.nn.softmax( rpn_objectness_predictions_with_background_batch) rpn_objectness_softmax_without_background = rpn_objectness_softmax[:, :, 1] clip_window = self._compute_clip_window(true_image_shapes) additional_fields = {'multiclass_scores': rpn_objectness_softmax} (proposal_boxes, proposal_scores, _, _, nmsed_additional_fields, num_proposals) = self._first_stage_nms_fn( tf.expand_dims(raw_proposal_boxes, axis=2), tf.expand_dims(rpn_objectness_softmax_without_background, axis=2), additional_fields=additional_fields, clip_window=clip_window) if self._is_training: proposal_boxes = tf.stop_gradient(proposal_boxes) if not self._hard_example_miner: (groundtruth_boxlists, groundtruth_classes_with_background_list, _, groundtruth_weights_list ) = self._format_groundtruth_data(image_shapes) (proposal_boxes, proposal_scores, num_proposals) = self._sample_box_classifier_batch( proposal_boxes, proposal_scores, num_proposals, groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list) # normalize proposal boxes def normalize_boxes(args): proposal_boxes_per_image = args[0] image_shape = args[1] normalized_boxes_per_image = box_list_ops.to_normalized_coordinates( box_list.BoxList(proposal_boxes_per_image), image_shape[0], image_shape[1], check_range=False).get() return normalized_boxes_per_image normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn( normalize_boxes, elems=[proposal_boxes, image_shapes], dtype=tf.float32) raw_normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn( normalize_boxes, elems=[raw_proposal_boxes, image_shapes], dtype=tf.float32) proposal_multiclass_scores = ( nmsed_additional_fields.get('multiclass_scores') if nmsed_additional_fields else None) return (normalized_proposal_boxes, proposal_scores, proposal_multiclass_scores, num_proposals, raw_normalized_proposal_boxes, rpn_objectness_softmax) def _sample_box_classifier_batch( self, proposal_boxes, proposal_scores, num_proposals, groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list): """Samples a minibatch for second stage. Args: proposal_boxes: A float tensor with shape [batch_size, num_proposals, 4] representing the (potentially zero padded) proposal boxes for all images in the batch. These boxes are represented in absolute coordinates. proposal_scores: A float tensor with shape [batch_size, num_proposals] representing the (potentially zero padded) proposal objectness scores for all images in the batch. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates of the groundtruth boxes. groundtruth_classes_with_background_list: A list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the class targets with the 0th index assumed to map to the background class. groundtruth_weights_list: A list of 1-D tensors of shape [num_boxes] indicating the weight associated with the groundtruth boxes. Returns: proposal_boxes: A float tensor with shape [batch_size, second_stage_batch_size, 4] representing the (potentially zero padded) proposal boxes for all images in the batch. These boxes are represented in absolute coordinates. proposal_scores: A float tensor with shape [batch_size, second_stage_batch_size] representing the (potentially zero padded) proposal objectness scores for all images in the batch. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. """ single_image_proposal_box_sample = [] single_image_proposal_score_sample = [] single_image_num_proposals_sample = [] for (single_image_proposal_boxes, single_image_proposal_scores, single_image_num_proposals, single_image_groundtruth_boxlist, single_image_groundtruth_classes_with_background, single_image_groundtruth_weights) in zip( tf.unstack(proposal_boxes), tf.unstack(proposal_scores), tf.unstack(num_proposals), groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list): single_image_boxlist = box_list.BoxList(single_image_proposal_boxes) single_image_boxlist.add_field(fields.BoxListFields.scores, single_image_proposal_scores) sampled_boxlist = self._sample_box_classifier_minibatch_single_image( single_image_boxlist, single_image_num_proposals, single_image_groundtruth_boxlist, single_image_groundtruth_classes_with_background, single_image_groundtruth_weights) sampled_padded_boxlist = box_list_ops.pad_or_clip_box_list( sampled_boxlist, num_boxes=self._second_stage_batch_size) single_image_num_proposals_sample.append(tf.minimum( sampled_boxlist.num_boxes(), self._second_stage_batch_size)) bb = sampled_padded_boxlist.get() single_image_proposal_box_sample.append(bb) single_image_proposal_score_sample.append( sampled_padded_boxlist.get_field(fields.BoxListFields.scores)) return (tf.stack(single_image_proposal_box_sample), tf.stack(single_image_proposal_score_sample), tf.stack(single_image_num_proposals_sample)) def _format_groundtruth_data(self, image_shapes): """Helper function for preparing groundtruth data for target assignment. In order to be consistent with the model.DetectionModel interface, groundtruth boxes are specified in normalized coordinates and classes are specified as label indices with no assumed background category. To prepare for target assignment, we: 1) convert boxes to absolute coordinates, 2) add a background class at class index 0 3) groundtruth instance masks, if available, are resized to match image_shape. Args: image_shapes: a 2-D int32 tensor of shape [batch_size, 3] containing shapes of input image in the batch. Returns: groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates of the groundtruth boxes. groundtruth_classes_with_background_list: A list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the class targets with the 0th index assumed to map to the background class. groundtruth_masks_list: If present, a list of 3-D tf.float32 tensors of shape [num_boxes, image_height, image_width] containing instance masks. This is set to None if no masks exist in the provided groundtruth. """ # pylint: disable=g-complex-comprehension groundtruth_boxlists = [ box_list_ops.to_absolute_coordinates( box_list.BoxList(boxes), image_shapes[i, 0], image_shapes[i, 1]) for i, boxes in enumerate( self.groundtruth_lists(fields.BoxListFields.boxes)) ] groundtruth_classes_with_background_list = [] for one_hot_encoding in self.groundtruth_lists( fields.BoxListFields.classes): groundtruth_classes_with_background_list.append( tf.cast( tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'), dtype=tf.float32)) groundtruth_masks_list = self._groundtruth_lists.get( fields.BoxListFields.masks) # TODO(rathodv): Remove mask resizing once the legacy pipeline is deleted. if groundtruth_masks_list is not None and self._resize_masks: resized_masks_list = [] for mask in groundtruth_masks_list: _, resized_mask, _ = self._image_resizer_fn( # Reuse the given `image_resizer_fn` to resize groundtruth masks. # `mask` tensor for an image is of the shape [num_masks, # image_height, image_width]. Below we create a dummy image of the # the shape [image_height, image_width, 1] to use with # `image_resizer_fn`. image=tf.zeros(tf.stack([tf.shape(mask)[1], tf.shape(mask)[2], 1])), masks=mask) resized_masks_list.append(resized_mask) groundtruth_masks_list = resized_masks_list # Masks could be set to bfloat16 in the input pipeline for performance # reasons. Convert masks back to floating point space here since the rest of # this module assumes groundtruth to be of float32 type. float_groundtruth_masks_list = [] if groundtruth_masks_list: for mask in groundtruth_masks_list: float_groundtruth_masks_list.append(tf.cast(mask, tf.float32)) groundtruth_masks_list = float_groundtruth_masks_list if self.groundtruth_has_field(fields.BoxListFields.weights): groundtruth_weights_list = self.groundtruth_lists( fields.BoxListFields.weights) else: # Set weights for all batch elements equally to 1.0 groundtruth_weights_list = [] for groundtruth_classes in groundtruth_classes_with_background_list: num_gt = tf.shape(groundtruth_classes)[0] groundtruth_weights = tf.ones(num_gt) groundtruth_weights_list.append(groundtruth_weights) return (groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_masks_list, groundtruth_weights_list) def _sample_box_classifier_minibatch_single_image( self, proposal_boxlist, num_valid_proposals, groundtruth_boxlist, groundtruth_classes_with_background, groundtruth_weights): """Samples a mini-batch of proposals to be sent to the box classifier. Helper function for self._postprocess_rpn. Args: proposal_boxlist: A BoxList containing K proposal boxes in absolute coordinates. num_valid_proposals: Number of valid proposals in the proposal boxlist. groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in absolute coordinates. groundtruth_classes_with_background: A tensor with shape `[N, self.num_classes + 1]` representing groundtruth classes. The classes are assumed to be k-hot encoded, and include background as the zero-th class. groundtruth_weights: Weights attached to the groundtruth_boxes. Returns: a BoxList contained sampled proposals. """ (cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign( proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background, unmatched_class_label=tf.constant( [1] + self._num_classes * [0], dtype=tf.float32), groundtruth_weights=groundtruth_weights) # Selects all boxes as candidates if none of them is selected according # to cls_weights. This could happen as boxes within certain IOU ranges # are ignored. If triggered, the selected boxes will still be ignored # during loss computation. cls_weights = tf.reduce_mean(cls_weights, axis=-1) positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0) valid_indicator = tf.logical_and( tf.range(proposal_boxlist.num_boxes()) < num_valid_proposals, cls_weights > 0 ) selected_positions = self._second_stage_sampler.subsample( valid_indicator, self._second_stage_batch_size, positive_indicator) return box_list_ops.boolean_mask( proposal_boxlist, selected_positions, use_static_shapes=self._use_static_shapes, indicator_sum=(self._second_stage_batch_size if self._use_static_shapes else None)) def _compute_second_stage_input_feature_maps(self, features_to_crop, proposal_boxes_normalized, image_shape, **side_inputs): """Crops to a set of proposals from the feature map for a batch of images. Helper function for self._postprocess_rpn. This function calls `tf.image.crop_and_resize` to create the feature map to be passed to the second stage box classifier for each proposal. Args: features_to_crop: A float32 tensor with shape [batch_size, height, width, depth] proposal_boxes_normalized: A float32 tensor with shape [batch_size, num_proposals, box_code_size] containing proposal boxes in normalized coordinates. image_shape: A 1D int32 tensors of size [4] containing the image shape. **side_inputs: additional tensors that are required by the network. Returns: A float32 tensor with shape [K, new_height, new_width, depth]. """ num_levels = len(features_to_crop) box_levels = None if num_levels != 1: # If there are multiple levels to select, get the box levels # unit_scale_index: num_levels-2 is chosen based on section 4.2 of # https://arxiv.org/pdf/1612.03144.pdf and works best for Resnet based # feature extractor. box_levels = ops.fpn_feature_levels( num_levels, num_levels - 2, tf.sqrt(tf.cast(image_shape[1] * image_shape[2], tf.float32)) / 224.0, proposal_boxes_normalized) cropped_regions = self._flatten_first_two_dimensions( self._crop_and_resize_fn( features_to_crop, proposal_boxes_normalized, box_levels, [self._initial_crop_size, self._initial_crop_size])) return self._maxpool_layer(cropped_regions) def _postprocess_box_classifier(self, refined_box_encodings, class_predictions_with_background, proposal_boxes, num_proposals, image_shapes, mask_predictions=None): """Converts predictions from the second stage box classifier to detections. Args: refined_box_encodings: a 3-D float tensor with shape [total_num_padded_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings. If using a shared box across classes the shape will instead be [total_num_padded_proposals, 1, 4] class_predictions_with_background: a 2-D tensor float with shape [total_num_padded_proposals, num_classes + 1] containing class predictions (logits) for each of the proposals. Note that this tensor *includes* background class predictions (at class index 0). proposal_boxes: a 3-D float tensor with shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. num_proposals: a 1-D int32 tensor of shape [batch] representing the number of proposals predicted for each image in the batch. image_shapes: a 2-D int32 tensor containing shapes of input image in the batch. mask_predictions: (optional) a 4-D float tensor with shape [total_num_padded_proposals, num_classes, mask_height, mask_width] containing instance mask prediction logits. Returns: A dictionary containing: `detection_boxes`: [batch, max_detection, 4] in normalized co-ordinates. `detection_scores`: [batch, max_detections] `detection_multiclass_scores`: [batch, max_detections, num_classes_with_background] tensor with class score distribution for post-processed detection boxes including background class if any. `detection_anchor_indices`: [batch, max_detections] with anchor indices. `detection_classes`: [batch, max_detections] `num_detections`: [batch] `detection_masks`: (optional) [batch, max_detections, mask_height, mask_width]. Note that a pixel-wise sigmoid score converter is applied to the detection masks. `raw_detection_boxes`: [batch, total_detections, 4] tensor with decoded detection boxes in normalized coordinates, before Non-Max Suppression. The value total_detections is the number of second stage anchors (i.e. the total number of boxes before NMS). `raw_detection_scores`: [batch, total_detections, num_classes_with_background] tensor of multi-class scores for raw detection boxes. The value total_detections is the number of second stage anchors (i.e. the total number of boxes before NMS). """ refined_box_encodings_batch = tf.reshape( refined_box_encodings, [-1, self.max_num_proposals, refined_box_encodings.shape[1], self._box_coder.code_size]) class_predictions_with_background_batch = tf.reshape( class_predictions_with_background, [-1, self.max_num_proposals, self.num_classes + 1] ) refined_decoded_boxes_batch = self._batch_decode_boxes( refined_box_encodings_batch, proposal_boxes) class_predictions_with_background_batch_normalized = ( self._second_stage_score_conversion_fn( class_predictions_with_background_batch)) class_predictions_batch = tf.reshape( tf.slice(class_predictions_with_background_batch_normalized, [0, 0, 1], [-1, -1, -1]), [-1, self.max_num_proposals, self.num_classes]) clip_window = self._compute_clip_window(image_shapes) mask_predictions_batch = None if mask_predictions is not None: mask_height = shape_utils.get_dim_as_int(mask_predictions.shape[2]) mask_width = shape_utils.get_dim_as_int(mask_predictions.shape[3]) mask_predictions = tf.sigmoid(mask_predictions) mask_predictions_batch = tf.reshape( mask_predictions, [-1, self.max_num_proposals, self.num_classes, mask_height, mask_width]) batch_size = shape_utils.combined_static_and_dynamic_shape( refined_box_encodings_batch)[0] batch_anchor_indices = tf.tile( tf.expand_dims(tf.range(self.max_num_proposals), 0), multiples=[batch_size, 1]) additional_fields = { 'multiclass_scores': class_predictions_with_background_batch_normalized, 'anchor_indices': tf.cast(batch_anchor_indices, tf.float32) } (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections) = self._second_stage_nms_fn( refined_decoded_boxes_batch, class_predictions_batch, clip_window=clip_window, change_coordinate_frame=True, num_valid_boxes=num_proposals, additional_fields=additional_fields, masks=mask_predictions_batch) if refined_decoded_boxes_batch.shape[2] > 1: class_ids = tf.expand_dims( tf.argmax(class_predictions_with_background_batch[:, :, 1:], axis=2, output_type=tf.int32), axis=-1) raw_detection_boxes = tf.squeeze( tf.batch_gather(refined_decoded_boxes_batch, class_ids), axis=2) else: raw_detection_boxes = tf.squeeze(refined_decoded_boxes_batch, axis=2) raw_normalized_detection_boxes = shape_utils.static_or_dynamic_map_fn( self._normalize_and_clip_boxes, elems=[raw_detection_boxes, image_shapes], dtype=tf.float32) detections = { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.detection_multiclass_scores: nmsed_additional_fields['multiclass_scores'], fields.DetectionResultFields.detection_anchor_indices: tf.cast(nmsed_additional_fields['anchor_indices'], tf.int32), fields.DetectionResultFields.num_detections: tf.cast(num_detections, dtype=tf.float32), fields.DetectionResultFields.raw_detection_boxes: raw_normalized_detection_boxes, fields.DetectionResultFields.raw_detection_scores: class_predictions_with_background_batch_normalized } if nmsed_masks is not None: detections[fields.DetectionResultFields.detection_masks] = nmsed_masks return detections def _batch_decode_boxes(self, box_encodings, anchor_boxes): """Decodes box encodings with respect to the anchor boxes. Args: box_encodings: a 4-D tensor with shape [batch_size, num_anchors, num_classes, self._box_coder.code_size] representing box encodings. anchor_boxes: [batch_size, num_anchors, self._box_coder.code_size] representing decoded bounding boxes. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. Returns: decoded_boxes: a [batch_size, num_anchors, num_classes, self._box_coder.code_size] float tensor representing bounding box predictions (for each image in batch, proposal and class). If using a shared box across classes the shape will instead be [batch_size, num_anchors, 1, self._box_coder.code_size]. """ combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) num_classes = combined_shape[2] tiled_anchor_boxes = tf.tile( tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1]) tiled_anchors_boxlist = box_list.BoxList( tf.reshape(tiled_anchor_boxes, [-1, 4])) decoded_boxes = self._box_coder.decode( tf.reshape(box_encodings, [-1, self._box_coder.code_size]), tiled_anchors_boxlist) return tf.reshape(decoded_boxes.get(), tf.stack([combined_shape[0], combined_shape[1], num_classes, 4])) def _normalize_and_clip_boxes(self, boxes_and_image_shape): """Normalize and clip boxes.""" boxes_per_image = boxes_and_image_shape[0] image_shape = boxes_and_image_shape[1] boxes_contains_classes_dim = boxes_per_image.shape.ndims == 3 if boxes_contains_classes_dim: boxes_per_image = shape_utils.flatten_first_n_dimensions( boxes_per_image, 2) normalized_boxes_per_image = box_list_ops.to_normalized_coordinates( box_list.BoxList(boxes_per_image), image_shape[0], image_shape[1], check_range=False).get() normalized_boxes_per_image = box_list_ops.clip_to_window( box_list.BoxList(normalized_boxes_per_image), tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32), filter_nonoverlapping=False).get() if boxes_contains_classes_dim: max_num_proposals, num_classes, _ = ( shape_utils.combined_static_and_dynamic_shape( boxes_and_image_shape[0])) normalized_boxes_per_image = shape_utils.expand_first_dimension( normalized_boxes_per_image, [max_num_proposals, num_classes]) return normalized_boxes_per_image def loss(self, prediction_dict, true_image_shapes, scope=None): """Compute scalar loss tensors given prediction tensors. If number_of_stages=1, only RPN related losses are computed (i.e., `rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all losses are computed. Args: prediction_dict: a dictionary holding prediction tensors (see the documentation for the predict method. If number_of_stages=1, we expect prediction_dict to contain `rpn_box_encodings`, `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`, `image_shape`, and `anchors` fields. Otherwise we expect prediction_dict to additionally contain `refined_box_encodings`, `class_predictions_with_background`, `num_proposals`, and `proposal_boxes` fields. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. scope: Optional scope name. Returns: a dictionary mapping loss keys (`first_stage_localization_loss`, `first_stage_objectness_loss`, 'second_stage_localization_loss', 'second_stage_classification_loss') to scalar tensors representing corresponding loss values. """ with tf.name_scope(scope, 'Loss', prediction_dict.values()): (groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_masks_list, groundtruth_weights_list ) = self._format_groundtruth_data( self._image_batch_shape_2d(prediction_dict['image_shape'])) loss_dict = self._loss_rpn( prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_background'], prediction_dict['anchors'], groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list) if self._number_of_stages > 1: loss_dict.update( self._loss_box_classifier( prediction_dict['refined_box_encodings'], prediction_dict['class_predictions_with_background'], prediction_dict['proposal_boxes'], prediction_dict['num_proposals'], groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list, prediction_dict['image_shape'], prediction_dict.get('mask_predictions'), groundtruth_masks_list, prediction_dict.get( fields.DetectionResultFields.detection_boxes), prediction_dict.get( fields.DetectionResultFields.num_detections))) return loss_dict def _loss_rpn(self, rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list): """Computes scalar RPN loss tensors. Uses self._proposal_target_assigner to obtain regression and classification targets for the first stage RPN, samples a "minibatch" of anchors to participate in the loss computation, and returns the RPN losses. Args: rpn_box_encodings: A 3-D float tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted proposal box encodings. rpn_objectness_predictions_with_background: A 2-D float tensor of shape [batch_size, num_anchors, 2] containing objectness predictions (logits) for each of the anchors with 0 corresponding to background and 1 corresponding to object. anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors for the first stage RPN. Note that `num_anchors` can differ depending on whether the model is created in training or inference mode. groundtruth_boxlists: A list of BoxLists containing coordinates of the groundtruth boxes. groundtruth_classes_with_background_list: A list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the class targets with the 0th index assumed to map to the background class. groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. Returns: a dictionary mapping loss keys (`first_stage_localization_loss`, `first_stage_objectness_loss`) to scalar tensors representing corresponding loss values. """ with tf.name_scope('RPNLoss'): (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, _) = target_assigner.batch_assign_targets( target_assigner=self._proposal_target_assigner, anchors_batch=box_list.BoxList(anchors), gt_box_batch=groundtruth_boxlists, gt_class_targets_batch=(len(groundtruth_boxlists) * [None]), gt_weights_batch=groundtruth_weights_list) batch_cls_weights = tf.reduce_mean(batch_cls_weights, axis=2) batch_cls_targets = tf.squeeze(batch_cls_targets, axis=2) def _minibatch_subsample_fn(inputs): cls_targets, cls_weights = inputs return self._first_stage_sampler.subsample( tf.cast(cls_weights, tf.bool), self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool)) batch_sampled_indices = tf.cast(shape_utils.static_or_dynamic_map_fn( _minibatch_subsample_fn, [batch_cls_targets, batch_cls_weights], dtype=tf.bool, parallel_iterations=self._parallel_iterations, back_prop=True), dtype=tf.float32) # Normalize by number of examples in sampled minibatch normalizer = tf.maximum( tf.reduce_sum(batch_sampled_indices, axis=1), 1.0) batch_one_hot_targets = tf.one_hot( tf.cast(batch_cls_targets, dtype=tf.int32), depth=2) sampled_reg_indices = tf.multiply(batch_sampled_indices, batch_reg_weights) losses_mask = None if self.groundtruth_has_field(fields.InputDataFields.is_annotated): losses_mask = tf.stack(self.groundtruth_lists( fields.InputDataFields.is_annotated)) localization_losses = self._first_stage_localization_loss( rpn_box_encodings, batch_reg_targets, weights=sampled_reg_indices, losses_mask=losses_mask) objectness_losses = self._first_stage_objectness_loss( rpn_objectness_predictions_with_background, batch_one_hot_targets, weights=tf.expand_dims(batch_sampled_indices, axis=-1), losses_mask=losses_mask) localization_loss = tf.reduce_mean( tf.reduce_sum(localization_losses, axis=1) / normalizer) objectness_loss = tf.reduce_mean( tf.reduce_sum(objectness_losses, axis=1) / normalizer) localization_loss = tf.multiply(self._first_stage_loc_loss_weight, localization_loss, name='localization_loss') objectness_loss = tf.multiply(self._first_stage_obj_loss_weight, objectness_loss, name='objectness_loss') loss_dict = {'Loss/RPNLoss/localization_loss': localization_loss, 'Loss/RPNLoss/objectness_loss': objectness_loss} return loss_dict def _loss_box_classifier(self, refined_box_encodings, class_predictions_with_background, proposal_boxes, num_proposals, groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list, image_shape, prediction_masks=None, groundtruth_masks_list=None, detection_boxes=None, num_detections=None): """Computes scalar box classifier loss tensors. Uses self._detector_target_assigner to obtain regression and classification targets for the second stage box classifier, optionally performs hard mining, and returns losses. All losses are computed independently for each image and then averaged across the batch. Please note that for boxes and masks with multiple labels, the box regression and mask prediction losses are only computed for one label. This function assumes that the proposal boxes in the "padded" regions are actually zero (and thus should not be matched to). Args: refined_box_encodings: a 3-D tensor with shape [total_num_proposals, num_classes, box_coder.code_size] representing predicted (final) refined box encodings. If using a shared box across classes this will instead have shape [total_num_proposals, 1, box_coder.code_size]. class_predictions_with_background: a 2-D tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). proposal_boxes: [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. groundtruth_boxlists: a list of BoxLists containing coordinates of the groundtruth boxes. groundtruth_classes_with_background_list: a list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes + 1] containing the class targets with the 0th index assumed to map to the background class. groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. image_shape: a 1-D tensor of shape [4] representing the image shape. prediction_masks: an optional 4-D tensor with shape [total_num_proposals, num_classes, mask_height, mask_width] containing the instance masks for each box. groundtruth_masks_list: an optional list of 3-D tensors of shape [num_boxes, image_height, image_width] containing the instance masks for each of the boxes. detection_boxes: 3-D float tensor of shape [batch, max_total_detections, 4] containing post-processed detection boxes in normalized co-ordinates. num_detections: 1-D int32 tensor of shape [batch] containing number of valid detections in `detection_boxes`. Returns: a dictionary mapping loss keys ('second_stage_localization_loss', 'second_stage_classification_loss') to scalar tensors representing corresponding loss values. Raises: ValueError: if `predict_instance_masks` in second_stage_mask_rcnn_box_predictor is True and `groundtruth_masks_list` is not provided. """ with tf.name_scope('BoxClassifierLoss'): paddings_indicator = self._padded_batched_proposals_indicator( num_proposals, proposal_boxes.shape[1]) proposal_boxlists = [ box_list.BoxList(proposal_boxes_single_image) for proposal_boxes_single_image in tf.unstack(proposal_boxes)] batch_size = len(proposal_boxlists) num_proposals_or_one = tf.cast(tf.expand_dims( tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1), dtype=tf.float32) normalizer = tf.tile(num_proposals_or_one, [1, self.max_num_proposals]) * batch_size (batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets, batch_reg_weights, _) = target_assigner.batch_assign_targets( target_assigner=self._detector_target_assigner, anchors_batch=proposal_boxlists, gt_box_batch=groundtruth_boxlists, gt_class_targets_batch=groundtruth_classes_with_background_list, unmatched_class_label=tf.constant( [1] + self._num_classes * [0], dtype=tf.float32), gt_weights_batch=groundtruth_weights_list) if self.groundtruth_has_field( fields.InputDataFields.groundtruth_labeled_classes): gt_labeled_classes = self.groundtruth_lists( fields.InputDataFields.groundtruth_labeled_classes) gt_labeled_classes = tf.pad( gt_labeled_classes, [[0, 0], [1, 0]], mode='CONSTANT', constant_values=1) batch_cls_weights *= tf.expand_dims(gt_labeled_classes, 1) class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, self.max_num_proposals, -1]) flat_cls_targets_with_background = tf.reshape( batch_cls_targets_with_background, [batch_size * self.max_num_proposals, -1]) one_hot_flat_cls_targets_with_background = tf.argmax( flat_cls_targets_with_background, axis=1) one_hot_flat_cls_targets_with_background = tf.one_hot( one_hot_flat_cls_targets_with_background, flat_cls_targets_with_background.get_shape()[1]) # If using a shared box across classes use directly if refined_box_encodings.shape[1] == 1: reshaped_refined_box_encodings = tf.reshape( refined_box_encodings, [batch_size, self.max_num_proposals, self._box_coder.code_size]) # For anchors with multiple labels, picks refined_location_encodings # for just one class to avoid over-counting for regression loss and # (optionally) mask loss. else: reshaped_refined_box_encodings = ( self._get_refined_encodings_for_postitive_class( refined_box_encodings, one_hot_flat_cls_targets_with_background, batch_size)) losses_mask = None if self.groundtruth_has_field(fields.InputDataFields.is_annotated): losses_mask = tf.stack(self.groundtruth_lists( fields.InputDataFields.is_annotated)) second_stage_loc_losses = self._second_stage_localization_loss( reshaped_refined_box_encodings, batch_reg_targets, weights=batch_reg_weights, losses_mask=losses_mask) / normalizer second_stage_cls_losses = ops.reduce_sum_trailing_dimensions( self._second_stage_classification_loss( class_predictions_with_background, batch_cls_targets_with_background, weights=batch_cls_weights, losses_mask=losses_mask), ndims=2) / normalizer second_stage_loc_loss = tf.reduce_sum( second_stage_loc_losses * tf.cast(paddings_indicator, dtype=tf.float32)) second_stage_cls_loss = tf.reduce_sum( second_stage_cls_losses * tf.cast(paddings_indicator, dtype=tf.float32)) if self._hard_example_miner: (second_stage_loc_loss, second_stage_cls_loss ) = self._unpad_proposals_and_apply_hard_mining( proposal_boxlists, second_stage_loc_losses, second_stage_cls_losses, num_proposals) localization_loss = tf.multiply(self._second_stage_loc_loss_weight, second_stage_loc_loss, name='localization_loss') classification_loss = tf.multiply(self._second_stage_cls_loss_weight, second_stage_cls_loss, name='classification_loss') loss_dict = {'Loss/BoxClassifierLoss/localization_loss': localization_loss, 'Loss/BoxClassifierLoss/classification_loss': classification_loss} second_stage_mask_loss = None if prediction_masks is not None: if groundtruth_masks_list is None: raise ValueError('Groundtruth instance masks not provided. ' 'Please configure input reader.') if not self._is_training: (proposal_boxes, proposal_boxlists, paddings_indicator, one_hot_flat_cls_targets_with_background ) = self._get_mask_proposal_boxes_and_classes( detection_boxes, num_detections, image_shape, groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list) unmatched_mask_label = tf.zeros(image_shape[1:3], dtype=tf.float32) (batch_mask_targets, _, _, batch_mask_target_weights, _) = target_assigner.batch_assign_targets( target_assigner=self._detector_target_assigner, anchors_batch=proposal_boxlists, gt_box_batch=groundtruth_boxlists, gt_class_targets_batch=groundtruth_masks_list, unmatched_class_label=unmatched_mask_label, gt_weights_batch=groundtruth_weights_list) # Pad the prediction_masks with to add zeros for background class to be # consistent with class predictions. if prediction_masks.get_shape().as_list()[1] == 1: # Class agnostic masks or masks for one-class prediction. Logic for # both cases is the same since background predictions are ignored # through the batch_mask_target_weights. prediction_masks_masked_by_class_targets = prediction_masks else: prediction_masks_with_background = tf.pad( prediction_masks, [[0, 0], [1, 0], [0, 0], [0, 0]]) prediction_masks_masked_by_class_targets = tf.boolean_mask( prediction_masks_with_background, tf.greater(one_hot_flat_cls_targets_with_background, 0)) mask_height = shape_utils.get_dim_as_int(prediction_masks.shape[2]) mask_width = shape_utils.get_dim_as_int(prediction_masks.shape[3]) reshaped_prediction_masks = tf.reshape( prediction_masks_masked_by_class_targets, [batch_size, -1, mask_height * mask_width]) batch_mask_targets_shape = tf.shape(batch_mask_targets) flat_gt_masks = tf.reshape(batch_mask_targets, [-1, batch_mask_targets_shape[2], batch_mask_targets_shape[3]]) # Use normalized proposals to crop mask targets from image masks. flat_normalized_proposals = box_list_ops.to_normalized_coordinates( box_list.BoxList(tf.reshape(proposal_boxes, [-1, 4])), image_shape[1], image_shape[2], check_range=False).get() flat_cropped_gt_mask = self._crop_and_resize_fn( [tf.expand_dims(flat_gt_masks, -1)], tf.expand_dims(flat_normalized_proposals, axis=1), None, [mask_height, mask_width]) # Without stopping gradients into cropped groundtruth masks the # performance with 100-padded groundtruth masks when batch size > 1 is # about 4% worse. # TODO(rathodv): Investigate this since we don't expect any variables # upstream of flat_cropped_gt_mask. flat_cropped_gt_mask = tf.stop_gradient(flat_cropped_gt_mask) batch_cropped_gt_mask = tf.reshape( flat_cropped_gt_mask, [batch_size, -1, mask_height * mask_width]) mask_losses_weights = ( batch_mask_target_weights * tf.cast(paddings_indicator, dtype=tf.float32)) mask_losses = self._second_stage_mask_loss( reshaped_prediction_masks, batch_cropped_gt_mask, weights=tf.expand_dims(mask_losses_weights, axis=-1), losses_mask=losses_mask) total_mask_loss = tf.reduce_sum(mask_losses) normalizer = tf.maximum( tf.reduce_sum(mask_losses_weights * mask_height * mask_width), 1.0) second_stage_mask_loss = total_mask_loss / normalizer if second_stage_mask_loss is not None: mask_loss = tf.multiply(self._second_stage_mask_loss_weight, second_stage_mask_loss, name='mask_loss') loss_dict['Loss/BoxClassifierLoss/mask_loss'] = mask_loss return loss_dict def _get_mask_proposal_boxes_and_classes( self, detection_boxes, num_detections, image_shape, groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list): """Returns proposal boxes and class targets to compute evaluation mask loss. During evaluation, detection boxes are used to extract features for mask prediction. Therefore, to compute mask loss during evaluation detection boxes must be used to compute correct class and mask targets. This function returns boxes and classes in the correct format for computing mask targets during evaluation. Args: detection_boxes: A 3-D float tensor of shape [batch, max_detection_boxes, 4] containing detection boxes in normalized co-ordinates. num_detections: A 1-D float tensor of shape [batch] containing number of valid boxes in `detection_boxes`. image_shape: A 1-D tensor of shape [4] containing image tensor shape. groundtruth_boxlists: A list of groundtruth boxlists. groundtruth_classes_with_background_list: A list of groundtruth classes. groundtruth_weights_list: A list of groundtruth weights. Return: mask_proposal_boxes: detection boxes to use for mask proposals in absolute co-ordinates. mask_proposal_boxlists: `mask_proposal_boxes` in a list of BoxLists in absolute co-ordinates. mask_proposal_paddings_indicator: a tensor indicating valid boxes. mask_proposal_one_hot_flat_cls_targets_with_background: Class targets computed using detection boxes. """ batch, max_num_detections, _ = detection_boxes.shape.as_list() proposal_boxes = tf.reshape(box_list_ops.to_absolute_coordinates( box_list.BoxList(tf.reshape(detection_boxes, [-1, 4])), image_shape[1], image_shape[2]).get(), [batch, max_num_detections, 4]) proposal_boxlists = [ box_list.BoxList(detection_boxes_single_image) for detection_boxes_single_image in tf.unstack(proposal_boxes) ] paddings_indicator = self._padded_batched_proposals_indicator( tf.cast(num_detections, dtype=tf.int32), detection_boxes.shape[1]) (batch_cls_targets_with_background, _, _, _, _) = target_assigner.batch_assign_targets( target_assigner=self._detector_target_assigner, anchors_batch=proposal_boxlists, gt_box_batch=groundtruth_boxlists, gt_class_targets_batch=groundtruth_classes_with_background_list, unmatched_class_label=tf.constant( [1] + self._num_classes * [0], dtype=tf.float32), gt_weights_batch=groundtruth_weights_list) flat_cls_targets_with_background = tf.reshape( batch_cls_targets_with_background, [-1, self._num_classes + 1]) one_hot_flat_cls_targets_with_background = tf.argmax( flat_cls_targets_with_background, axis=1) one_hot_flat_cls_targets_with_background = tf.one_hot( one_hot_flat_cls_targets_with_background, flat_cls_targets_with_background.get_shape()[1]) return (proposal_boxes, proposal_boxlists, paddings_indicator, one_hot_flat_cls_targets_with_background) def _get_refined_encodings_for_postitive_class( self, refined_box_encodings, flat_cls_targets_with_background, batch_size): # We only predict refined location encodings for the non background # classes, but we now pad it to make it compatible with the class # predictions refined_box_encodings_with_background = tf.pad(refined_box_encodings, [[0, 0], [1, 0], [0, 0]]) refined_box_encodings_masked_by_class_targets = ( box_list_ops.boolean_mask( box_list.BoxList( tf.reshape(refined_box_encodings_with_background, [-1, self._box_coder.code_size])), tf.reshape(tf.greater(flat_cls_targets_with_background, 0), [-1]), use_static_shapes=self._use_static_shapes, indicator_sum=batch_size * self.max_num_proposals if self._use_static_shapes else None).get()) return tf.reshape( refined_box_encodings_masked_by_class_targets, [ batch_size, self.max_num_proposals, self._box_coder.code_size ]) def _padded_batched_proposals_indicator(self, num_proposals, max_num_proposals): """Creates indicator matrix of non-pad elements of padded batch proposals. Args: num_proposals: Tensor of type tf.int32 with shape [batch_size]. max_num_proposals: Maximum number of proposals per image (integer). Returns: A Tensor of type tf.bool with shape [batch_size, max_num_proposals]. """ batch_size = tf.size(num_proposals) tiled_num_proposals = tf.tile( tf.expand_dims(num_proposals, 1), [1, max_num_proposals]) tiled_proposal_index = tf.tile( tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1]) return tf.greater(tiled_num_proposals, tiled_proposal_index) def _unpad_proposals_and_apply_hard_mining(self, proposal_boxlists, second_stage_loc_losses, second_stage_cls_losses, num_proposals): """Unpads proposals and applies hard mining. Args: proposal_boxlists: A list of `batch_size` BoxLists each representing `self.max_num_proposals` representing decoded proposal bounding boxes for each image. second_stage_loc_losses: A Tensor of type `float32`. A tensor of shape `[batch_size, self.max_num_proposals]` representing per-anchor second stage localization loss values. second_stage_cls_losses: A Tensor of type `float32`. A tensor of shape `[batch_size, self.max_num_proposals]` representing per-anchor second stage classification loss values. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. Returns: second_stage_loc_loss: A scalar float32 tensor representing the second stage localization loss. second_stage_cls_loss: A scalar float32 tensor representing the second stage classification loss. """ for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss, single_image_num_proposals) in zip( proposal_boxlists, tf.unstack(second_stage_loc_losses), tf.unstack(second_stage_cls_losses), tf.unstack(num_proposals)): proposal_boxlist = box_list.BoxList( tf.slice(proposal_boxlist.get(), [0, 0], [single_image_num_proposals, -1])) single_image_loc_loss = tf.slice(single_image_loc_loss, [0], [single_image_num_proposals]) single_image_cls_loss = tf.slice(single_image_cls_loss, [0], [single_image_num_proposals]) return self._hard_example_miner( location_losses=tf.expand_dims(single_image_loc_loss, 0), cls_losses=tf.expand_dims(single_image_cls_loss, 0), decoded_boxlist_list=[proposal_boxlist]) def regularization_losses(self): """Returns a list of regularization losses for this model. Returns a list of regularization losses for this model that the estimator needs to use during training/optimization. Returns: A list of regularization loss tensors. """ all_losses = [] slim_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # Copy the slim losses to avoid modifying the collection if slim_losses: all_losses.extend(slim_losses) # TODO(kaftan): Possibly raise an error if the feature extractors are # uninitialized in Keras. if self._feature_extractor_for_proposal_features: if (self._feature_extractor_for_proposal_features != _UNINITIALIZED_FEATURE_EXTRACTOR): all_losses.extend(self._feature_extractor_for_proposal_features.losses) if isinstance(self._first_stage_box_predictor_first_conv, tf.keras.Model): all_losses.extend( self._first_stage_box_predictor_first_conv.losses) if self._first_stage_box_predictor.is_keras_model: all_losses.extend(self._first_stage_box_predictor.losses) if self._feature_extractor_for_box_classifier_features: if (self._feature_extractor_for_box_classifier_features != _UNINITIALIZED_FEATURE_EXTRACTOR): all_losses.extend( self._feature_extractor_for_box_classifier_features.losses) if self._mask_rcnn_box_predictor: if self._mask_rcnn_box_predictor.is_keras_model: all_losses.extend(self._mask_rcnn_box_predictor.losses) return all_losses def restore_map(self, fine_tune_checkpoint_type='detection', load_all_detection_checkpoint_vars=False): """Returns a map of variables to load from a foreign checkpoint. See parent class for details. Args: fine_tune_checkpoint_type: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`. Default 'detection'. load_all_detection_checkpoint_vars: whether to load all variables (when `fine_tune_checkpoint_type` is `detection`). If False, only variables within the feature extractor scopes are included. Default False. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. Raises: ValueError: if fine_tune_checkpoint_type is neither `classification` nor `detection`. """ if fine_tune_checkpoint_type not in ['detection', 'classification']: raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( fine_tune_checkpoint_type)) if fine_tune_checkpoint_type == 'classification': return self._feature_extractor.restore_from_classification_checkpoint_fn( self.first_stage_feature_extractor_scope, self.second_stage_feature_extractor_scope) variables_to_restore = variables_helper.get_global_variables_safely() variables_to_restore.append(tf.train.get_or_create_global_step()) # Only load feature extractor variables to be consistent with loading from # a classification checkpoint. include_patterns = None if not load_all_detection_checkpoint_vars: include_patterns = [ self.first_stage_feature_extractor_scope, self.second_stage_feature_extractor_scope ] feature_extractor_variables = slim.filter_variables( variables_to_restore, include_patterns=include_patterns) return {var.op.name: var for var in feature_extractor_variables} def restore_from_objects(self, fine_tune_checkpoint_type='detection'): """Returns a map of Trackable objects to load from a foreign checkpoint. Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module or Checkpoint). This enables the model to initialize based on weights from another task. For example, the feature extractor variables from a classification model can be used to bootstrap training of an object detector. When loading from an object detection model, the checkpoint model should have the same parameters as this detection model with exception of the num_classes parameter. Note that this function is intended to be used to restore Keras-based models when running Tensorflow 2, whereas restore_map (above) is intended to be used to restore Slim-based models when running Tensorflow 1.x. Args: fine_tune_checkpoint_type: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`. Default 'detection'. Returns: A dict mapping keys to Trackable objects (tf.Module or Checkpoint). """ if fine_tune_checkpoint_type == 'classification': return { 'feature_extractor': self._feature_extractor.classification_backbone } elif fine_tune_checkpoint_type == 'detection': fake_model = tf.train.Checkpoint( _feature_extractor_for_box_classifier_features= self._feature_extractor_for_box_classifier_features, _feature_extractor_for_proposal_features= self._feature_extractor_for_proposal_features) return {'model': fake_model} elif fine_tune_checkpoint_type == 'full': return {'model': self} else: raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( fine_tune_checkpoint_type)) def updates(self): """Returns a list of update operators for this model. Returns a list of update operators for this model that must be executed at each training step. The estimator's train op needs to have a control dependency on these updates. Returns: A list of update operators. """ update_ops = [] slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Copy the slim ops to avoid modifying the collection if slim_update_ops: update_ops.extend(slim_update_ops) # Passing None to get_updates_for grabs updates that should always be # executed and don't depend on any model inputs in the graph. # (E.g. if there was some count that should be incremented every time a # model is run). # # Passing inputs grabs updates that are transitively computed from the # model inputs being passed in. # (E.g. a batchnorm update depends on the observed inputs) if self._feature_extractor_for_proposal_features: if (self._feature_extractor_for_proposal_features != _UNINITIALIZED_FEATURE_EXTRACTOR): update_ops.extend( self._feature_extractor_for_proposal_features.get_updates_for(None)) update_ops.extend( self._feature_extractor_for_proposal_features.get_updates_for( self._feature_extractor_for_proposal_features.inputs)) if isinstance(self._first_stage_box_predictor_first_conv, tf.keras.Model): update_ops.extend( self._first_stage_box_predictor_first_conv.get_updates_for( None)) update_ops.extend( self._first_stage_box_predictor_first_conv.get_updates_for( self._first_stage_box_predictor_first_conv.inputs)) if self._first_stage_box_predictor.is_keras_model: update_ops.extend( self._first_stage_box_predictor.get_updates_for(None)) update_ops.extend( self._first_stage_box_predictor.get_updates_for( self._first_stage_box_predictor.inputs)) if self._feature_extractor_for_box_classifier_features: if (self._feature_extractor_for_box_classifier_features != _UNINITIALIZED_FEATURE_EXTRACTOR): update_ops.extend( self._feature_extractor_for_box_classifier_features.get_updates_for( None)) update_ops.extend( self._feature_extractor_for_box_classifier_features.get_updates_for( self._feature_extractor_for_box_classifier_features.inputs)) if self._mask_rcnn_box_predictor: if self._mask_rcnn_box_predictor.is_keras_model: update_ops.extend( self._mask_rcnn_box_predictor.get_updates_for(None)) update_ops.extend( self._mask_rcnn_box_predictor.get_updates_for( self._mask_rcnn_box_predictor.inputs)) return update_ops
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/faster_rcnn_meta_arch.py
faster_rcnn_meta_arch.py
"""Deep Mask heads above CenterNet (DeepMAC) architecture. TODO(vighneshb) Add link to paper when done. """ import collections import numpy as np import tensorflow as tf from object_detection.builders import losses_builder from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import losses from object_detection.core import preprocessor from object_detection.core import standard_fields as fields from object_detection.meta_architectures import center_net_meta_arch from object_detection.models.keras_models import hourglass_network from object_detection.models.keras_models import resnet_v1 from object_detection.protos import losses_pb2 from object_detection.protos import preprocessor_pb2 from object_detection.utils import shape_utils from object_detection.utils import spatial_transform_ops INSTANCE_EMBEDDING = 'INSTANCE_EMBEDDING' PIXEL_EMBEDDING = 'PIXEL_EMBEDDING' DEEP_MASK_ESTIMATION = 'deep_mask_estimation' LOSS_KEY_PREFIX = center_net_meta_arch.LOSS_KEY_PREFIX class DeepMACParams( collections.namedtuple('DeepMACParams', [ 'classification_loss', 'dim', 'task_loss_weight', 'pixel_embedding_dim', 'allowed_masked_classes_ids', 'mask_size', 'mask_num_subsamples', 'use_xy', 'network_type', 'use_instance_embedding', 'num_init_channels', 'predict_full_resolution_masks', 'postprocess_crop_size', 'max_roi_jitter_ratio', 'roi_jitter_mode' ])): """Class holding the DeepMAC network configutration.""" __slots__ = () def __new__(cls, classification_loss, dim, task_loss_weight, pixel_embedding_dim, allowed_masked_classes_ids, mask_size, mask_num_subsamples, use_xy, network_type, use_instance_embedding, num_init_channels, predict_full_resolution_masks, postprocess_crop_size, max_roi_jitter_ratio, roi_jitter_mode): return super(DeepMACParams, cls).__new__(cls, classification_loss, dim, task_loss_weight, pixel_embedding_dim, allowed_masked_classes_ids, mask_size, mask_num_subsamples, use_xy, network_type, use_instance_embedding, num_init_channels, predict_full_resolution_masks, postprocess_crop_size, max_roi_jitter_ratio, roi_jitter_mode) def subsample_instances(classes, weights, boxes, masks, num_subsamples): """Randomly subsamples instances to the desired number. Args: classes: [num_instances, num_classes] float tensor of one-hot encoded classes. weights: [num_instances] float tensor of weights of each instance. boxes: [num_instances, 4] tensor of box coordinates. masks: [num_instances, height, width] tensor of per-instance masks. num_subsamples: int, the desired number of samples. Returns: classes: [num_subsamples, num_classes] float tensor of classes. weights: [num_subsamples] float tensor of weights. boxes: [num_subsamples, 4] float tensor of box coordinates. masks: [num_subsamples, height, width] float tensor of per-instance masks. """ if num_subsamples <= -1: return classes, weights, boxes, masks num_instances = tf.reduce_sum(tf.cast(weights > 0.5, tf.int32)) if num_instances <= num_subsamples: return (classes[:num_subsamples], weights[:num_subsamples], boxes[:num_subsamples], masks[:num_subsamples]) else: random_index = tf.random.uniform([num_subsamples], 0, num_instances, dtype=tf.int32) return (tf.gather(classes, random_index), tf.gather(weights, random_index), tf.gather(boxes, random_index), tf.gather(masks, random_index)) def _get_deepmac_network_by_type(name, num_init_channels, mask_size=None): """Get DeepMAC network model given a string type.""" if name.startswith('hourglass'): if name == 'hourglass10': return hourglass_network.hourglass_10(num_init_channels, initial_downsample=False) elif name == 'hourglass20': return hourglass_network.hourglass_20(num_init_channels, initial_downsample=False) elif name == 'hourglass32': return hourglass_network.hourglass_32(num_init_channels, initial_downsample=False) elif name == 'hourglass52': return hourglass_network.hourglass_52(num_init_channels, initial_downsample=False) elif name == 'hourglass100': return hourglass_network.hourglass_100(num_init_channels, initial_downsample=False) elif name == 'hourglass20_uniform_size': return hourglass_network.hourglass_20_uniform_size(num_init_channels) elif name == 'hourglass20_no_shortcut': return hourglass_network.hourglass_20_no_shortcut(num_init_channels) elif name == 'fully_connected': if not mask_size: raise ValueError('Mask size must be set.') return FullyConnectedMaskHead(num_init_channels, mask_size) elif name.startswith('resnet'): return ResNetMaskNetwork(name, num_init_channels) raise ValueError('Unknown network type {}'.format(name)) def crop_masks_within_boxes(masks, boxes, output_size): """Crops masks to lie tightly within the boxes. Args: masks: A [num_instances, height, width] float tensor of masks. boxes: A [num_instances, 4] sized tensor of normalized bounding boxes. output_size: The height and width of the output masks. Returns: masks: A [num_instances, output_size, output_size] tensor of masks which are cropped to be tightly within the gives boxes and resized. """ masks = spatial_transform_ops.matmul_crop_and_resize( masks[:, :, :, tf.newaxis], boxes[:, tf.newaxis, :], [output_size, output_size]) return masks[:, 0, :, :, 0] def resize_instance_masks(masks, shape): height, width = shape masks_ex = masks[:, :, :, tf.newaxis] masks_ex = tf.image.resize(masks_ex, (height, width), method=tf.image.ResizeMethod.BILINEAR) masks = masks_ex[:, :, :, 0] return masks def filter_masked_classes(masked_class_ids, classes, weights, masks): """Filter out masks whose class IDs are not present in masked_class_ids. Args: masked_class_ids: A list of class IDs allowed to have masks. These class IDs are 1-indexed. classes: A [num_instances, num_classes] float tensor containing the one-hot encoded classes. weights: A [num_instances] float tensor containing the weights of each sample. masks: A [num_instances, height, width] tensor containing the mask per instance. Returns: classes_filtered: A [num_instances, num_classes] float tensor containing the one-hot encoded classes with classes not in masked_class_ids zeroed out. weights_filtered: A [num_instances] float tensor containing the weights of each sample with instances whose classes aren't in masked_class_ids zeroed out. masks_filtered: A [num_instances, height, width] tensor containing the mask per instance with masks not belonging to masked_class_ids zeroed out. """ if len(masked_class_ids) == 0: # pylint:disable=g-explicit-length-test return classes, weights, masks if tf.shape(classes)[0] == 0: return classes, weights, masks masked_class_ids = tf.constant(np.array(masked_class_ids, dtype=np.int32)) label_id_offset = 1 masked_class_ids -= label_id_offset class_ids = tf.argmax(classes, axis=1, output_type=tf.int32) matched_classes = tf.equal( class_ids[:, tf.newaxis], masked_class_ids[tf.newaxis, :] ) matched_classes = tf.reduce_any(matched_classes, axis=1) matched_classes = tf.cast(matched_classes, tf.float32) return ( classes * matched_classes[:, tf.newaxis], weights * matched_classes, masks * matched_classes[:, tf.newaxis, tf.newaxis] ) class ResNetMaskNetwork(tf.keras.layers.Layer): """A small wrapper around ResNet blocks to predict masks.""" def __init__(self, resnet_type, num_init_channels): """Creates the ResNet mask network. Args: resnet_type: A string of the for resnetN where N where N is in [4, 8, 12, 16, 20] num_init_channels: Number of filters in the ResNet block. """ super(ResNetMaskNetwork, self).__init__() nc = num_init_channels if resnet_type == 'resnet4': channel_dims = [nc * 2] blocks = [2] elif resnet_type == 'resnet8': channel_dims = [nc * 2] blocks = [4] elif resnet_type == 'resnet12': channel_dims = [nc * 2] blocks = [6] elif resnet_type == 'resnet16': channel_dims = [nc * 2] blocks = [8] # Defined such that the channels are roughly similar to the hourglass20. elif resnet_type == 'resnet20': channel_dims = [nc * 2, nc * 3] blocks = [8, 2] else: raise ValueError('Unknown resnet type "{}"'.format(resnet_type)) self.input_layer = tf.keras.layers.Conv2D(nc, 1, 1) # Last channel has to be defined so that batch norm can initialize properly. model_input = tf.keras.layers.Input([None, None, nc]) output = model_input for i, (num_blocks, channels) in enumerate(zip(blocks, channel_dims)): output = resnet_v1.stack_basic(output, filters=channels, blocks=num_blocks, stride1=1, name='resnet_mask_block_%d' % i) self.model = tf.keras.Model(inputs=model_input, outputs=output) def __call__(self, inputs): return self.model(self.input_layer(inputs)) class FullyConnectedMaskHead(tf.keras.layers.Layer): """A 2 layer fully connected mask head.""" def __init__(self, num_init_channels, mask_size): super(FullyConnectedMaskHead, self).__init__() self.fc1 = tf.keras.layers.Dense(units=1024, activation='relu') self.fc2 = tf.keras.layers.Dense(units=mask_size*mask_size) self.mask_size = mask_size self.num_input_channels = num_init_channels self.input_layer = tf.keras.layers.Conv2D(num_init_channels, 1, 1) model_input = tf.keras.layers.Input( [mask_size * mask_size * num_init_channels,]) output = self.fc2(self.fc1(model_input)) self.model = tf.keras.Model(inputs=model_input, outputs=output) def __call__(self, inputs): inputs = self.input_layer(inputs) inputs_shape = tf.shape(inputs) num_instances = inputs_shape[0] height = inputs_shape[1] width = inputs_shape[2] dims = inputs_shape[3] flattened_inputs = tf.reshape(inputs, [num_instances, height * width * dims]) flattened_masks = self.model(flattened_inputs) return tf.reshape(flattened_masks, [num_instances, self.mask_size, self.mask_size, 1]) class MaskHeadNetwork(tf.keras.layers.Layer): """Mask head class for DeepMAC.""" def __init__(self, network_type, num_init_channels=64, use_instance_embedding=True, mask_size=None): """Initializes the network. Args: network_type: A string denoting the kind of network we want to use internally. num_init_channels: int, the number of channels in the first block. The number of channels in the following blocks depend on the network type used. use_instance_embedding: bool, if set, we concatenate the instance embedding to the input while predicting the mask. mask_size: int, size of the output mask. Required only with `fully_connected` mask type. """ super(MaskHeadNetwork, self).__init__() self._net = _get_deepmac_network_by_type( network_type, num_init_channels, mask_size) self._use_instance_embedding = use_instance_embedding self.project_out = tf.keras.layers.Conv2D( filters=1, kernel_size=1, activation=None) def __call__(self, instance_embedding, pixel_embedding, training): """Returns mask logits given object center and spatial embeddings. Args: instance_embedding: A [num_instances, embedding_size] float tensor representing the center emedding vector of each instance. pixel_embedding: A [num_instances, height, width, pixel_embedding_size] float tensor representing the per-pixel spatial embedding for each instance. training: boolean flag indicating training or testing mode. Returns: mask: A [num_instances, height, width] float tensor containing the mask logits for each instance. """ height = tf.shape(pixel_embedding)[1] width = tf.shape(pixel_embedding)[2] instance_embedding = instance_embedding[:, tf.newaxis, tf.newaxis, :] instance_embedding = tf.tile(instance_embedding, [1, height, width, 1]) if self._use_instance_embedding: inputs = tf.concat([pixel_embedding, instance_embedding], axis=3) else: inputs = pixel_embedding out = self._net(inputs) if isinstance(out, list): out = out[-1] if out.shape[-1] > 1: out = self.project_out(out) return tf.squeeze(out, axis=-1) def deepmac_proto_to_params(deepmac_config): """Convert proto to named tuple.""" loss = losses_pb2.Loss() # Add dummy localization loss to avoid the loss_builder throwing error. loss.localization_loss.weighted_l2.CopyFrom( losses_pb2.WeightedL2LocalizationLoss()) loss.classification_loss.CopyFrom(deepmac_config.classification_loss) classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss)) jitter_mode = preprocessor_pb2.RandomJitterBoxes.JitterMode.Name( deepmac_config.jitter_mode).lower() return DeepMACParams( dim=deepmac_config.dim, classification_loss=classification_loss, task_loss_weight=deepmac_config.task_loss_weight, pixel_embedding_dim=deepmac_config.pixel_embedding_dim, allowed_masked_classes_ids=deepmac_config.allowed_masked_classes_ids, mask_size=deepmac_config.mask_size, mask_num_subsamples=deepmac_config.mask_num_subsamples, use_xy=deepmac_config.use_xy, network_type=deepmac_config.network_type, use_instance_embedding=deepmac_config.use_instance_embedding, num_init_channels=deepmac_config.num_init_channels, predict_full_resolution_masks= deepmac_config.predict_full_resolution_masks, postprocess_crop_size=deepmac_config.postprocess_crop_size, max_roi_jitter_ratio=deepmac_config.max_roi_jitter_ratio, roi_jitter_mode=jitter_mode ) class DeepMACMetaArch(center_net_meta_arch.CenterNetMetaArch): """The experimental CenterNet DeepMAC[1] model. [1]: https://arxiv.org/abs/2104.00613 """ def __init__(self, is_training, add_summaries, num_classes, feature_extractor, image_resizer_fn, object_center_params, object_detection_params, deepmac_params, compute_heatmap_sparse=False): """Constructs the super class with object center & detection params only.""" self._deepmac_params = deepmac_params super(DeepMACMetaArch, self).__init__( is_training=is_training, add_summaries=add_summaries, num_classes=num_classes, feature_extractor=feature_extractor, image_resizer_fn=image_resizer_fn, object_center_params=object_center_params, object_detection_params=object_detection_params, compute_heatmap_sparse=compute_heatmap_sparse) def _construct_prediction_heads(self, num_classes, num_feature_outputs, class_prediction_bias_init): super_instance = super(DeepMACMetaArch, self) prediction_heads = super_instance._construct_prediction_heads( # pylint:disable=protected-access num_classes, num_feature_outputs, class_prediction_bias_init) if self._deepmac_params is not None: prediction_heads[INSTANCE_EMBEDDING] = [ center_net_meta_arch.make_prediction_net(self._deepmac_params.dim) for _ in range(num_feature_outputs) ] prediction_heads[PIXEL_EMBEDDING] = [ center_net_meta_arch.make_prediction_net( self._deepmac_params.pixel_embedding_dim) for _ in range(num_feature_outputs) ] self._mask_net = MaskHeadNetwork( network_type=self._deepmac_params.network_type, use_instance_embedding=self._deepmac_params.use_instance_embedding, num_init_channels=self._deepmac_params.num_init_channels) return prediction_heads def _get_mask_head_input(self, boxes, pixel_embedding): """Get the input to the mask network, given bounding boxes. Args: boxes: A [num_instances, 4] float tensor containing bounding boxes in normalized coordinates. pixel_embedding: A [height, width, embedding_size] float tensor containing spatial pixel embeddings. Returns: embedding: A [num_instances, mask_height, mask_width, embedding_size + 2] float tensor containing the inputs to the mask network. For each bounding box, we concatenate the normalized box coordinates to the cropped pixel embeddings. If predict_full_resolution_masks is set, mask_height and mask_width are the same as height and width of pixel_embedding. If not, mask_height and mask_width are the same as mask_size. """ num_instances = tf.shape(boxes)[0] mask_size = self._deepmac_params.mask_size if self._deepmac_params.predict_full_resolution_masks: num_instances = tf.shape(boxes)[0] pixel_embedding = pixel_embedding[tf.newaxis, :, :, :] pixel_embeddings_processed = tf.tile(pixel_embedding, [num_instances, 1, 1, 1]) else: # TODO(vighneshb) Explore multilevel_roi_align and align_corners=False. pixel_embeddings_cropped = spatial_transform_ops.matmul_crop_and_resize( pixel_embedding[tf.newaxis], boxes[tf.newaxis], [mask_size, mask_size]) pixel_embeddings_processed = pixel_embeddings_cropped[0] mask_shape = tf.shape(pixel_embeddings_processed) mask_height, mask_width = mask_shape[1], mask_shape[2] y_grid, x_grid = tf.meshgrid(tf.linspace(-1.0, 1.0, mask_height), tf.linspace(-1.0, 1.0, mask_width), indexing='ij') coords = tf.stack([y_grid, x_grid], axis=2) coords = coords[tf.newaxis, :, :, :] coords = tf.tile(coords, [num_instances, 1, 1, 1]) if self._deepmac_params.use_xy: return tf.concat([coords, pixel_embeddings_processed], axis=3) else: return pixel_embeddings_processed def _get_instance_embeddings(self, boxes, instance_embedding): """Return the instance embeddings from bounding box centers. Args: boxes: A [num_instances, 4] float tensor holding bounding boxes. The coordinates are in normalized input space. instance_embedding: A [height, width, embedding_size] float tensor containing the instance embeddings. Returns: instance_embeddings: A [num_instances, embedding_size] shaped float tensor containing the center embedding for each instance. """ blist = box_list.BoxList(boxes) output_height = tf.shape(instance_embedding)[0] output_width = tf.shape(instance_embedding)[1] blist_output = box_list_ops.to_absolute_coordinates( blist, output_height, output_width, check_range=False) (y_center_output, x_center_output, _, _) = blist_output.get_center_coordinates_and_sizes() center_coords_output = tf.stack([y_center_output, x_center_output], axis=1) center_coords_output_int = tf.cast(center_coords_output, tf.int32) center_latents = tf.gather_nd(instance_embedding, center_coords_output_int) return center_latents def _get_groundtruth_mask_output(self, boxes, masks): """Get the expected mask output for each box. Args: boxes: A [num_instances, 4] float tensor containing bounding boxes in normalized coordinates. masks: A [num_instances, height, width] float tensor containing binary ground truth masks. Returns: masks: If predict_full_resolution_masks is set, masks are not resized and the size of this tensor is [num_instances, input_height, input_width]. Otherwise, returns a tensor of size [num_instances, mask_size, mask_size]. """ mask_size = self._deepmac_params.mask_size if self._deepmac_params.predict_full_resolution_masks: return masks else: cropped_masks = spatial_transform_ops.matmul_crop_and_resize( masks[:, :, :, tf.newaxis], boxes[:, tf.newaxis, :], [mask_size, mask_size]) cropped_masks = tf.stop_gradient(cropped_masks) cropped_masks = tf.squeeze(cropped_masks, axis=[1, 4]) # TODO(vighneshb) should we discretize masks? return cropped_masks def _resize_logits_like_gt(self, logits, gt): height, width = tf.shape(gt)[1], tf.shape(gt)[2] return resize_instance_masks(logits, (height, width)) def _compute_per_instance_mask_loss( self, boxes, masks, instance_embedding, pixel_embedding): """Returns the mask loss per instance. Args: boxes: A [num_instances, 4] float tensor holding bounding boxes. The coordinates are in normalized input space. masks: A [num_instances, input_height, input_width] float tensor containing the instance masks. instance_embedding: A [output_height, output_width, embedding_size] float tensor containing the instance embeddings. pixel_embedding: optional [output_height, output_width, pixel_embedding_size] float tensor containing the per-pixel embeddings. Returns: mask_loss: A [num_instances] shaped float tensor containing the mask loss for each instance. """ num_instances = tf.shape(boxes)[0] if tf.keras.backend.learning_phase(): boxes = preprocessor.random_jitter_boxes( boxes, self._deepmac_params.max_roi_jitter_ratio, jitter_mode=self._deepmac_params.roi_jitter_mode) mask_input = self._get_mask_head_input( boxes, pixel_embedding) instance_embeddings = self._get_instance_embeddings( boxes, instance_embedding) mask_logits = self._mask_net( instance_embeddings, mask_input, training=tf.keras.backend.learning_phase()) mask_gt = self._get_groundtruth_mask_output(boxes, masks) mask_logits = self._resize_logits_like_gt(mask_logits, mask_gt) mask_logits = tf.reshape(mask_logits, [num_instances, -1, 1]) mask_gt = tf.reshape(mask_gt, [num_instances, -1, 1]) loss = self._deepmac_params.classification_loss( prediction_tensor=mask_logits, target_tensor=mask_gt, weights=tf.ones_like(mask_logits)) # TODO(vighneshb) Make this configurable via config. if isinstance(self._deepmac_params.classification_loss, losses.WeightedDiceClassificationLoss): return tf.reduce_sum(loss, axis=1) else: return tf.reduce_mean(loss, axis=[1, 2]) def _compute_instance_masks_loss(self, prediction_dict): """Computes the mask loss. Args: prediction_dict: dict from predict() method containing INSTANCE_EMBEDDING and PIXEL_EMBEDDING prediction. Both of these are lists of tensors, each of size [batch_size, height, width, embedding_size]. Returns: loss: float, the mask loss as a scalar. """ gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks) gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) allowed_masked_classes_ids = ( self._deepmac_params.allowed_masked_classes_ids) total_loss = 0.0 # Iterate over multiple preidctions by backbone (for hourglass length=2) for instance_pred, pixel_pred in zip( prediction_dict[INSTANCE_EMBEDDING], prediction_dict[PIXEL_EMBEDDING]): # Iterate over samples in batch # TODO(vighneshb) find out how autograph is handling this. Converting # to a single op may give speed improvements for i, (boxes, weights, classes, masks) in enumerate( zip(gt_boxes_list, gt_weights_list, gt_classes_list, gt_masks_list)): _, weights, masks = filter_masked_classes(allowed_masked_classes_ids, classes, weights, masks) num_subsample = self._deepmac_params.mask_num_subsamples _, weights, boxes, masks = subsample_instances( classes, weights, boxes, masks, num_subsample) per_instance_loss = self._compute_per_instance_mask_loss( boxes, masks, instance_pred[i], pixel_pred[i]) per_instance_loss *= weights num_instances = tf.maximum(tf.reduce_sum(weights), 1.0) total_loss += tf.reduce_sum(per_instance_loss) / num_instances batch_size = len(gt_boxes_list) num_predictions = len(prediction_dict[INSTANCE_EMBEDDING]) return total_loss / float(batch_size * num_predictions) def loss(self, prediction_dict, true_image_shapes, scope=None): losses_dict = super(DeepMACMetaArch, self).loss( prediction_dict, true_image_shapes, scope) if self._deepmac_params is not None: mask_loss = self._compute_instance_masks_loss( prediction_dict=prediction_dict) key = LOSS_KEY_PREFIX + '/' + DEEP_MASK_ESTIMATION losses_dict[key] = ( self._deepmac_params.task_loss_weight * mask_loss ) return losses_dict def postprocess(self, prediction_dict, true_image_shapes, **params): """Produces boxes given a prediction dict returned by predict(). Args: prediction_dict: a dictionary holding predicted tensors from "predict" function. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **params: Currently ignored. Returns: detections: a dictionary containing the following fields detection_masks: (Optional) A uint8 tensor of shape [batch, max_detections, mask_height, mask_width] with masks for each detection. Background is specified with 0, and foreground is specified with positive integers (1 for standard instance segmentation mask, and 1-indexed parts for DensePose task). And all other fields returned by the super class method. """ postprocess_dict = super(DeepMACMetaArch, self).postprocess( prediction_dict, true_image_shapes, **params) boxes_strided = postprocess_dict['detection_boxes_strided'] if self._deepmac_params is not None: masks = self._postprocess_masks( boxes_strided, prediction_dict[INSTANCE_EMBEDDING][-1], prediction_dict[PIXEL_EMBEDDING][-1]) postprocess_dict[fields.DetectionResultFields.detection_masks] = masks return postprocess_dict def _postprocess_masks(self, boxes_output_stride, instance_embedding, pixel_embedding): """Postprocess masks with the deep mask network. Args: boxes_output_stride: A [batch_size, num_instances, 4] float tensor containing the batch of boxes in the absolute output space of the feature extractor. instance_embedding: A [batch_size, output_height, output_width, embedding_size] float tensor containing instance embeddings. pixel_embedding: A [batch_size, output_height, output_width, pixel_embedding_size] float tensor containing the per-pixel embedding. Returns: masks: A float tensor of size [batch_size, num_instances, mask_size, mask_size] containing binary per-box instance masks. """ def process(elems): boxes, instance_embedding, pixel_embedding = elems return self._postprocess_sample(boxes, instance_embedding, pixel_embedding) max_instances = self._center_params.max_box_predictions return tf.map_fn(process, [boxes_output_stride, instance_embedding, pixel_embedding], dtype=tf.float32, parallel_iterations=max_instances) def _postprocess_sample(self, boxes_output_stride, instance_embedding, pixel_embedding): """Post process masks for a single sample. Args: boxes_output_stride: A [num_instances, 4] float tensor containing bounding boxes in the absolute output space. instance_embedding: A [output_height, output_width, embedding_size] float tensor containing instance embeddings. pixel_embedding: A [batch_size, output_height, output_width, pixel_embedding_size] float tensor containing the per-pixel embedding. Returns: masks: A float tensor of size [num_instances, mask_height, mask_width] containing binary per-box instance masks. If predict_full_resolution_masks is set, the masks will be resized to postprocess_crop_size. Otherwise, mask_height=mask_width=mask_size """ height, width = (tf.shape(instance_embedding)[0], tf.shape(instance_embedding)[1]) height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32) blist = box_list.BoxList(boxes_output_stride) blist = box_list_ops.to_normalized_coordinates( blist, height, width, check_range=False) boxes = blist.get() mask_input = self._get_mask_head_input(boxes, pixel_embedding) instance_embeddings = self._get_instance_embeddings( boxes, instance_embedding) mask_logits = self._mask_net( instance_embeddings, mask_input, training=tf.keras.backend.learning_phase()) # TODO(vighneshb) Explore sweeping mask thresholds. if self._deepmac_params.predict_full_resolution_masks: height, width = tf.shape(mask_logits)[1], tf.shape(mask_logits)[2] height *= self._stride width *= self._stride mask_logits = resize_instance_masks(mask_logits, (height, width)) mask_logits = crop_masks_within_boxes( mask_logits, boxes, self._deepmac_params.postprocess_crop_size) masks_prob = tf.nn.sigmoid(mask_logits) return masks_prob def _transform_boxes_to_feature_coordinates(self, provided_boxes, true_image_shapes, resized_image_shape, instance_embedding): """Transforms normalzied boxes to feature map coordinates. Args: provided_boxes: A [batch, num_instances, 4] float tensor containing normalized bounding boxes. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. resized_image_shape: A 4D int32 tensor containing shapes of the preprocessed inputs (N, H, W, C). instance_embedding: A [batch, output_height, output_width, embedding_size] float tensor containing instance embeddings. Returns: A float tensor of size [batch, num_instances, 4] containing boxes whose coordinates have been transformed to the absolute output space of the feature extractor. """ # Input boxes must be normalized. shape_utils.assert_box_normalized(provided_boxes) # Transform the provided boxes to the absolute output space of the feature # extractor. height, width = (tf.shape(instance_embedding)[1], tf.shape(instance_embedding)[2]) resized_image_height = resized_image_shape[1] resized_image_width = resized_image_shape[2] def transform_boxes(elems): boxes_per_image, true_image_shape = elems blist = box_list.BoxList(boxes_per_image) # First transform boxes from image space to resized image space since # there may have paddings in the resized images. blist = box_list_ops.scale(blist, true_image_shape[0] / resized_image_height, true_image_shape[1] / resized_image_width) # Then transform boxes from resized image space (normalized) to the # feature map space (absolute). blist = box_list_ops.to_absolute_coordinates( blist, height, width, check_range=False) return blist.get() return tf.map_fn( transform_boxes, [provided_boxes, true_image_shapes], dtype=tf.float32) def predict_masks_from_boxes(self, prediction_dict, true_image_shapes, provided_boxes, **params): """Produces masks for the provided boxes. Args: prediction_dict: a dictionary holding predicted tensors from "predict" function. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. provided_boxes: float tensor of shape [batch, num_boxes, 4] containing boxes coordinates (normalized) from which we will produce masks. **params: Currently ignored. Returns: detections: a dictionary containing the following fields detection_masks: (Optional) A uint8 tensor of shape [batch, max_detections, mask_height, mask_width] with masks for each detection. Background is specified with 0, and foreground is specified with positive integers (1 for standard instance segmentation mask, and 1-indexed parts for DensePose task). And all other fields returned by the super class method. """ postprocess_dict = super(DeepMACMetaArch, self).postprocess(prediction_dict, true_image_shapes, **params) instance_embedding = prediction_dict[INSTANCE_EMBEDDING][-1] resized_image_shapes = shape_utils.combined_static_and_dynamic_shape( prediction_dict['preprocessed_inputs']) boxes_strided = self._transform_boxes_to_feature_coordinates( provided_boxes, true_image_shapes, resized_image_shapes, instance_embedding) if self._deepmac_params is not None: masks = self._postprocess_masks( boxes_strided, instance_embedding, prediction_dict[PIXEL_EMBEDDING][-1]) postprocess_dict[fields.DetectionResultFields.detection_masks] = masks return postprocess_dict
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/deepmac_meta_arch.py
deepmac_meta_arch.py
"""Tests for google3.third_party.tensorflow_models.object_detection.meta_architectures.deepmac_meta_arch.""" import functools import unittest from absl.testing import parameterized import numpy as np import tensorflow as tf from object_detection.core import losses from object_detection.core import preprocessor from object_detection.meta_architectures import center_net_meta_arch from object_detection.meta_architectures import deepmac_meta_arch from object_detection.utils import tf_version class DummyFeatureExtractor(center_net_meta_arch.CenterNetFeatureExtractor): def __init__(self, channel_means, channel_stds, bgr_ordering, num_feature_outputs, stride): self._num_feature_outputs = num_feature_outputs self._stride = stride super(DummyFeatureExtractor, self).__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) def predict(self): pass def loss(self): pass def postprocess(self): pass def call(self, inputs): batch_size, input_height, input_width, _ = inputs.shape fake_output = tf.ones([ batch_size, input_height // self._stride, input_width // self._stride, 64 ], dtype=tf.float32) return [fake_output] * self._num_feature_outputs @property def out_stride(self): return self._stride @property def num_feature_outputs(self): return self._num_feature_outputs class MockMaskNet(tf.keras.layers.Layer): def __call__(self, instance_embedding, pixel_embedding, training): return tf.zeros_like(pixel_embedding[:, :, :, 0]) + 0.9 def build_meta_arch(predict_full_resolution_masks=False, use_dice_loss=False): """Builds the DeepMAC meta architecture.""" feature_extractor = DummyFeatureExtractor( channel_means=(1.0, 2.0, 3.0), channel_stds=(10., 20., 30.), bgr_ordering=False, num_feature_outputs=2, stride=4) image_resizer_fn = functools.partial( preprocessor.resize_to_range, min_dimension=128, max_dimension=128, pad_to_max_dimesnion=True) object_center_params = center_net_meta_arch.ObjectCenterParams( classification_loss=losses.WeightedSigmoidClassificationLoss(), object_center_loss_weight=1.0, min_box_overlap_iou=1.0, max_box_predictions=5, use_labeled_classes=False) if use_dice_loss: classification_loss = losses.WeightedDiceClassificationLoss(False) else: classification_loss = losses.WeightedSigmoidClassificationLoss() deepmac_params = deepmac_meta_arch.DeepMACParams( classification_loss=classification_loss, dim=8, task_loss_weight=1.0, pixel_embedding_dim=2, allowed_masked_classes_ids=[], mask_size=16, mask_num_subsamples=-1, use_xy=True, network_type='hourglass10', use_instance_embedding=True, num_init_channels=8, predict_full_resolution_masks=predict_full_resolution_masks, postprocess_crop_size=128, max_roi_jitter_ratio=0.0, roi_jitter_mode='random' ) object_detection_params = center_net_meta_arch.ObjectDetectionParams( localization_loss=losses.L1LocalizationLoss(), offset_loss_weight=1.0, scale_loss_weight=0.1 ) return deepmac_meta_arch.DeepMACMetaArch( is_training=True, add_summaries=False, num_classes=6, feature_extractor=feature_extractor, object_center_params=object_center_params, deepmac_params=deepmac_params, object_detection_params=object_detection_params, image_resizer_fn=image_resizer_fn) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class DeepMACUtilsTest(tf.test.TestCase): def test_subsample_trivial(self): """Test subsampling masks.""" boxes = np.arange(4).reshape(4, 1) * np.ones((4, 4)) masks = np.arange(4).reshape(4, 1, 1) * np.ones((4, 32, 32)) weights = np.ones(4) classes = tf.one_hot(tf.range(4), depth=4) result = deepmac_meta_arch.subsample_instances( classes, weights, boxes, masks, 4) self.assertAllClose(result[0], classes) self.assertAllClose(result[1], weights) self.assertAllClose(result[2], boxes) self.assertAllClose(result[3], masks) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class DeepMACMetaArchTest(tf.test.TestCase): def setUp(self): # pylint:disable=g-missing-super-call self.model = build_meta_arch() def test_mask_network(self): net = deepmac_meta_arch.MaskHeadNetwork('hourglass10', 8) out = net(tf.zeros((2, 4)), tf.zeros((2, 32, 32, 16)), training=True) self.assertEqual(out.shape, (2, 32, 32)) def test_mask_network_hourglass20(self): net = deepmac_meta_arch.MaskHeadNetwork('hourglass20', 8) out = net(tf.zeros((2, 4)), tf.zeros((2, 32, 32, 16)), training=True) self.assertEqual(out.shape, (2, 32, 32)) def test_mask_network_resnet(self): net = deepmac_meta_arch.MaskHeadNetwork('resnet4') out = net(tf.zeros((2, 4)), tf.zeros((2, 32, 32, 16)), training=True) self.assertEqual(out.shape, (2, 32, 32)) def test_mask_network_resnet_tf_function(self): net = deepmac_meta_arch.MaskHeadNetwork('resnet8') call_func = tf.function(net.__call__) out = call_func(tf.zeros((2, 4)), tf.zeros((2, 32, 32, 16)), training=True) self.assertEqual(out.shape, (2, 32, 32)) def test_get_mask_head_input(self): boxes = tf.constant([[0., 0., 0.25, 0.25], [0.75, 0.75, 1.0, 1.0]], dtype=tf.float32) pixel_embedding = np.zeros((32, 32, 4), dtype=np.float32) pixel_embedding[:16, :16] = 1.0 pixel_embedding[16:, 16:] = 2.0 pixel_embedding = tf.constant(pixel_embedding) mask_inputs = self.model._get_mask_head_input(boxes, pixel_embedding) self.assertEqual(mask_inputs.shape, (2, 16, 16, 6)) y_grid, x_grid = tf.meshgrid(np.linspace(-1.0, 1.0, 16), np.linspace(-1.0, 1.0, 16), indexing='ij') for i in range(2): mask_input = mask_inputs[i] self.assertAllClose(y_grid, mask_input[:, :, 0]) self.assertAllClose(x_grid, mask_input[:, :, 1]) pixel_embedding = mask_input[:, :, 2:] self.assertAllClose(np.zeros((16, 16, 4)) + i + 1, pixel_embedding) def test_get_mask_head_input_no_crop_resize(self): model = build_meta_arch(predict_full_resolution_masks=True) boxes = tf.constant([[0., 0., 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], dtype=tf.float32) pixel_embedding_np = np.random.randn(32, 32, 4).astype(np.float32) pixel_embedding = tf.constant(pixel_embedding_np) mask_inputs = model._get_mask_head_input(boxes, pixel_embedding) self.assertEqual(mask_inputs.shape, (2, 32, 32, 6)) y_grid, x_grid = tf.meshgrid(np.linspace(-1.0, 1.0, 32), np.linspace(-1.0, 1.0, 32), indexing='ij') for i in range(2): mask_input = mask_inputs[i] self.assertAllClose(y_grid, mask_input[:, :, 0]) self.assertAllClose(x_grid, mask_input[:, :, 1]) pixel_embedding = mask_input[:, :, 2:] self.assertAllClose(pixel_embedding_np, pixel_embedding) def test_get_instance_embeddings(self): embeddings = np.zeros((32, 32, 2)) embeddings[8, 8] = 1.0 embeddings[24, 16] = 2.0 embeddings = tf.constant(embeddings) boxes = tf.constant([[0., 0., 0.5, 0.5], [0.5, 0.0, 1.0, 1.0]]) center_embeddings = self.model._get_instance_embeddings(boxes, embeddings) self.assertAllClose(center_embeddings, [[1.0, 1.0], [2.0, 2.0]]) def test_get_groundtruth_mask_output(self): boxes = tf.constant([[0., 0., 0.25, 0.25], [0.75, 0.75, 1.0, 1.0]], dtype=tf.float32) masks = np.zeros((2, 32, 32), dtype=np.float32) masks[0, :16, :16] = 0.5 masks[1, 16:, 16:] = 0.1 masks = self.model._get_groundtruth_mask_output(boxes, masks) self.assertEqual(masks.shape, (2, 16, 16)) self.assertAllClose(masks[0], np.zeros((16, 16)) + 0.5) self.assertAllClose(masks[1], np.zeros((16, 16)) + 0.1) def test_get_groundtruth_mask_output_crop_resize(self): model = build_meta_arch(predict_full_resolution_masks=True) boxes = tf.constant([[0., 0., 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], dtype=tf.float32) masks = tf.ones((2, 32, 32)) masks = model._get_groundtruth_mask_output(boxes, masks) self.assertAllClose(masks, np.ones((2, 32, 32))) def test_per_instance_loss(self): model = build_meta_arch() model._mask_net = MockMaskNet() boxes = tf.constant([[0.0, 0.0, 0.25, 0.25], [0.75, 0.75, 1.0, 1.0]]) masks = np.zeros((2, 32, 32), dtype=np.float32) masks[0, :16, :16] = 1.0 masks[1, 16:, 16:] = 1.0 masks = tf.constant(masks) loss = model._compute_per_instance_mask_loss( boxes, masks, tf.zeros((32, 32, 2)), tf.zeros((32, 32, 2))) self.assertAllClose( loss, np.zeros(2) - tf.math.log(tf.nn.sigmoid(0.9))) def test_per_instance_loss_no_crop_resize(self): model = build_meta_arch(predict_full_resolution_masks=True) model._mask_net = MockMaskNet() boxes = tf.constant([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]) masks = np.ones((2, 128, 128), dtype=np.float32) masks = tf.constant(masks) loss = model._compute_per_instance_mask_loss( boxes, masks, tf.zeros((32, 32, 2)), tf.zeros((32, 32, 2))) self.assertAllClose( loss, np.zeros(2) - tf.math.log(tf.nn.sigmoid(0.9))) def test_per_instance_loss_no_crop_resize_dice(self): model = build_meta_arch(predict_full_resolution_masks=True, use_dice_loss=True) model._mask_net = MockMaskNet() boxes = tf.constant([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]) masks = np.ones((2, 128, 128), dtype=np.float32) masks = tf.constant(masks) loss = model._compute_per_instance_mask_loss( boxes, masks, tf.zeros((32, 32, 2)), tf.zeros((32, 32, 2))) pred = tf.nn.sigmoid(0.9) expected = (1.0 - ((2.0 * pred) / (1.0 + pred))) self.assertAllClose(loss, [expected, expected], rtol=1e-3) def test_empty_masks(self): boxes = tf.zeros([0, 4]) masks = tf.zeros([0, 128, 128]) loss = self.model._compute_per_instance_mask_loss( boxes, masks, tf.zeros((32, 32, 2)), tf.zeros((32, 32, 2))) self.assertEqual(loss.shape, (0,)) def test_postprocess(self): model = build_meta_arch() model._mask_net = MockMaskNet() boxes = np.zeros((2, 3, 4), dtype=np.float32) boxes[:, :, [0, 2]] = 0.0 boxes[:, :, [1, 3]] = 8.0 boxes = tf.constant(boxes) masks = model._postprocess_masks( boxes, tf.zeros((2, 32, 32, 2)), tf.zeros((2, 32, 32, 2))) prob = tf.nn.sigmoid(0.9).numpy() self.assertAllClose(masks, prob * np.ones((2, 3, 16, 16))) def test_postprocess_no_crop_resize_shape(self): model = build_meta_arch(predict_full_resolution_masks=True) model._mask_net = MockMaskNet() boxes = np.zeros((2, 3, 4), dtype=np.float32) boxes[:, :, [0, 2]] = 0.0 boxes[:, :, [1, 3]] = 8.0 boxes = tf.constant(boxes) masks = model._postprocess_masks( boxes, tf.zeros((2, 32, 32, 2)), tf.zeros((2, 32, 32, 2))) prob = tf.nn.sigmoid(0.9).numpy() self.assertAllClose(masks, prob * np.ones((2, 3, 128, 128))) def test_crop_masks_within_boxes(self): masks = np.zeros((2, 32, 32)) masks[0, :16, :16] = 1.0 masks[1, 16:, 16:] = 1.0 boxes = tf.constant([[0.0, 0.0, 15.0 / 32, 15.0 / 32], [0.5, 0.5, 1.0, 1]]) masks = deepmac_meta_arch.crop_masks_within_boxes( masks, boxes, 128) masks = (masks.numpy() > 0.0).astype(np.float32) self.assertAlmostEqual(masks.sum(), 2 * 128 * 128) def test_transform_boxes_to_feature_coordinates(self): batch_size = 2 model = build_meta_arch() model._mask_net = MockMaskNet() boxes = np.zeros((batch_size, 3, 4), dtype=np.float32) boxes[:, :, [0, 2]] = 0.1 boxes[:, :, [1, 3]] = 0.5 boxes = tf.constant(boxes) true_image_shapes = tf.constant([ [64, 32, 3], # Image 1 is padded during resizing. [64, 64, 3], # Image 2 is not padded. ]) resized_image_height = 64 resized_image_width = 64 resized_image_shape = [ batch_size, resized_image_height, resized_image_width, 3 ] feature_map_height = 32 feature_map_width = 32 instance_embedding = tf.zeros( (batch_size, feature_map_height, feature_map_width, 2)) expected_boxes = np.array([ [ # Image 1 # 0.1 * (64 / resized_image_height) * feature_map_height -> 3.2 # 0.5 * (32 / resized_image_width) * feature_map_width -> 8.0 [3.2, 8., 3.2, 8.], [3.2, 8., 3.2, 8.], [3.2, 8., 3.2, 8.], ], [ # Image 2 # 0.1 * (64 / resized_image_height) * feature_map_height -> 3.2 # 0.5 * (64 / resized_image_width) * feature_map_width -> 16 [3.2, 16., 3.2, 16.], [3.2, 16., 3.2, 16.], [3.2, 16., 3.2, 16.], ], ]) box_strided = model._transform_boxes_to_feature_coordinates( boxes, true_image_shapes, resized_image_shape, instance_embedding) self.assertAllClose(box_strided, expected_boxes) def test_fc_tf_function(self): net = deepmac_meta_arch.MaskHeadNetwork('fully_connected', 8, mask_size=32) call_func = tf.function(net.__call__) out = call_func(tf.zeros((2, 4)), tf.zeros((2, 32, 32, 8)), training=True) self.assertEqual(out.shape, (2, 32, 32)) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class FullyConnectedMaskHeadTest(tf.test.TestCase): def test_fc_mask_head(self): head = deepmac_meta_arch.FullyConnectedMaskHead(512, 16) inputs = tf.random.uniform([100, 16, 16, 512]) output = head(inputs) self.assertAllEqual([100, 16, 16, 1], output.numpy().shape) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class ResNetMaskHeadTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters(['resnet4', 'resnet8', 'resnet20']) def test_pass(self, name): net = deepmac_meta_arch.ResNetMaskNetwork(name, 8) out = net(tf.zeros((3, 32, 32, 16))) self.assertEqual(out.shape[:3], (3, 32, 32)) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/deepmac_meta_arch_test.py
deepmac_meta_arch_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Context R-CNN meta-architecture definition. This adds the ability to use attention into contextual features within the Faster R-CNN object detection framework to improve object detection performance. See https://arxiv.org/abs/1912.03538 for more information. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import tensorflow.compat.v1 as tf from object_detection.core import box_predictor from object_detection.core import standard_fields as fields from object_detection.meta_architectures import context_rcnn_lib from object_detection.meta_architectures import context_rcnn_lib_tf2 from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.protos import faster_rcnn_pb2 from object_detection.utils import ops from object_detection.utils import tf_version _UNINITIALIZED_FEATURE_EXTRACTOR = '__uninitialized__' class ContextRCNNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch): """Context R-CNN Meta-architecture definition.""" def __init__(self, is_training, num_classes, image_resizer_fn, feature_extractor, number_of_stages, first_stage_anchor_generator, first_stage_target_assigner, first_stage_atrous_rate, first_stage_box_predictor_arg_scope_fn, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_sampler, first_stage_non_max_suppression_fn, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, crop_and_resize_fn, initial_crop_size, maxpool_kernel_size, maxpool_stride, second_stage_target_assigner, second_stage_mask_rcnn_box_predictor, second_stage_batch_size, second_stage_sampler, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, second_stage_classification_loss, second_stage_mask_prediction_loss_weight=1.0, hard_example_miner=None, parallel_iterations=16, add_summaries=True, clip_anchors_to_image=False, use_static_shapes=False, resize_masks=True, freeze_batchnorm=False, return_raw_detections_during_predict=False, output_final_box_features=False, output_final_box_rpn_features=False, attention_bottleneck_dimension=None, attention_temperature=None, use_self_attention=False, use_long_term_attention=True, self_attention_in_sequence=False, num_attention_heads=1, num_attention_layers=1, attention_position=( faster_rcnn_pb2.AttentionPosition.POST_BOX_CLASSIFIER) ): """ContextRCNNMetaArch Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. num_classes: Number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). image_resizer_fn: A callable for image resizing. This callable takes a rank-3 image tensor of shape [height, width, channels] (corresponding to a single image), an optional rank-3 instance mask tensor of shape [num_masks, height, width] and returns a resized rank-3 image tensor, a resized mask tensor if one was provided in the input. In addition this callable must also return a 1-D tensor of the form [height, width, channels] containing the size of the true image, as the image resizer can perform zero padding. See protos/image_resizer.proto. feature_extractor: A FasterRCNNFeatureExtractor object. number_of_stages: An integer values taking values in {1, 2, 3}. If 1, the function will construct only the Region Proposal Network (RPN) part of the model. If 2, the function will perform box refinement and other auxiliary predictions all in the second stage. If 3, it will extract features from refined boxes and perform the auxiliary predictions on the non-maximum suppressed refined boxes. If is_training is true and the value of number_of_stages is 3, it is reduced to 2 since all the model heads are trained in parallel in second stage during training. first_stage_anchor_generator: An anchor_generator.AnchorGenerator object (note that currently we only support grid_anchor_generator.GridAnchorGenerator objects) first_stage_target_assigner: Target assigner to use for first stage of Faster R-CNN (RPN). first_stage_atrous_rate: A single integer indicating the atrous rate for the single convolution op which is applied to the `rpn_features_to_crop` tensor to obtain a tensor to be used for box prediction. Some feature extractors optionally allow for producing feature maps computed at denser resolutions. The atrous rate is used to compensate for the denser feature maps by using an effectively larger receptive field. (This should typically be set to 1). first_stage_box_predictor_arg_scope_fn: Either a Keras layer hyperparams object or a function to construct tf-slim arg_scope for conv2d, separable_conv2d and fully_connected ops. Used for the RPN box predictor. If it is a keras hyperparams object the RPN box predictor will be a Keras model. If it is a function to construct an arg scope it will be a tf-slim box predictor. first_stage_box_predictor_kernel_size: Kernel size to use for the convolution op just prior to RPN box predictions. first_stage_box_predictor_depth: Output depth for the convolution op just prior to RPN box predictions. first_stage_minibatch_size: The "batch size" to use for computing the objectness and location loss of the region proposal network. This "batch size" refers to the number of anchors selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. first_stage_sampler: Sampler to use for first stage loss (RPN loss). first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores` and optional `clip_window`(with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`. This is used to perform non max suppression on the boxes predicted by the Region Proposal Network (RPN). See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. first_stage_max_proposals: Maximum number of boxes to retain after performing Non-Max Suppression (NMS) on the boxes predicted by the Region Proposal Network (RPN). first_stage_localization_loss_weight: A float first_stage_objectness_loss_weight: A float crop_and_resize_fn: A differentiable resampler to use for cropping RPN proposal features. initial_crop_size: A single integer indicating the output size (width and height are set to be the same) of the initial bilinear interpolation based cropping during ROI pooling. maxpool_kernel_size: A single integer indicating the kernel size of the max pool op on the cropped feature map during ROI pooling. maxpool_stride: A single integer indicating the stride of the max pool op on the cropped feature map during ROI pooling. second_stage_target_assigner: Target assigner to use for second stage of Faster R-CNN. If the model is configured with multiple prediction heads, this target assigner is used to generate targets for all heads (with the correct `unmatched_class_label`). second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for the second stage. second_stage_batch_size: The batch size used for computing the classification and refined location loss of the box classifier. This "batch size" refers to the number of proposals selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. second_stage_sampler: Sampler to use for second stage loss (box classifier loss). second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores`, optional `clip_window` and optional (kwarg) `mask` inputs (with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`, and (optionally) `detection_masks`. See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. second_stage_score_conversion_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). This is usually used to convert logits to probabilities. second_stage_localization_loss_weight: A float indicating the scale factor for second stage localization loss. second_stage_classification_loss_weight: A float indicating the scale factor for second stage classification loss. second_stage_classification_loss: Classification loss used by the second stage classifier. Either losses.WeightedSigmoidClassificationLoss or losses.WeightedSoftmaxClassificationLoss. second_stage_mask_prediction_loss_weight: A float indicating the scale factor for second stage mask prediction loss. This is applicable only if second stage box predictor is configured to predict masks. hard_example_miner: A losses.HardExampleMiner object (can be None). parallel_iterations: (Optional) The number of iterations allowed to run in parallel for calls to tf.map_fn. add_summaries: boolean (default: True) controlling whether summary ops should be added to tensorflow graph. clip_anchors_to_image: Normally, anchors generated for a given image size are pruned during training if they lie outside the image window. This option clips the anchors to be within the image instead of pruning. use_static_shapes: If True, uses implementation of ops with static shape guarantees. resize_masks: Indicates whether the masks presend in the groundtruth should be resized in the model with `image_resizer_fn` freeze_batchnorm: Whether to freeze batch norm parameters in the first stage box predictor during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. return_raw_detections_during_predict: Whether to return raw detection boxes in the predict() method. These are decoded boxes that have not been through postprocessing (i.e. NMS). Default False. output_final_box_features: Whether to output final box features. If true, it crops the feature map based on the final box prediction and returns it in the output dict as detection_features. output_final_box_rpn_features: Whether to output rpn box features. If true, it crops the rpn feature map based on the final box prediction and returns it in the output dict as detection_features. attention_bottleneck_dimension: A single integer. The bottleneck feature dimension of the attention block. attention_temperature: A single float. The attention temperature. use_self_attention: Whether to use self-attention within the box features in the current frame. use_long_term_attention: Whether to use attention into the context features. self_attention_in_sequence: Whether self attention and long term attention are in sequence or parallel. num_attention_heads: The number of attention heads to use. num_attention_layers: The number of attention layers to use. attention_position: Whether attention should occur post rpn or post box classifier. Options are specified in the faster rcnn proto, default is post box classifier. Raises: ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at training time. ValueError: If first_stage_anchor_generator is not of type grid_anchor_generator.GridAnchorGenerator. """ super(ContextRCNNMetaArch, self).__init__( is_training, num_classes, image_resizer_fn, feature_extractor, number_of_stages, first_stage_anchor_generator, first_stage_target_assigner, first_stage_atrous_rate, first_stage_box_predictor_arg_scope_fn, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_sampler, first_stage_non_max_suppression_fn, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, crop_and_resize_fn, initial_crop_size, maxpool_kernel_size, maxpool_stride, second_stage_target_assigner, second_stage_mask_rcnn_box_predictor, second_stage_batch_size, second_stage_sampler, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, second_stage_classification_loss, second_stage_mask_prediction_loss_weight=( second_stage_mask_prediction_loss_weight), hard_example_miner=hard_example_miner, parallel_iterations=parallel_iterations, add_summaries=add_summaries, clip_anchors_to_image=clip_anchors_to_image, use_static_shapes=use_static_shapes, resize_masks=resize_masks, freeze_batchnorm=freeze_batchnorm, return_raw_detections_during_predict=( return_raw_detections_during_predict), output_final_box_features=output_final_box_features, output_final_box_rpn_features=output_final_box_rpn_features) self._attention_position = attention_position if tf_version.is_tf1(): self._context_feature_extract_fn = functools.partial( context_rcnn_lib._compute_box_context_attention, bottleneck_dimension=attention_bottleneck_dimension, attention_temperature=attention_temperature, is_training=is_training, max_num_proposals=self.max_num_proposals, use_self_attention=use_self_attention, use_long_term_attention=use_long_term_attention, self_attention_in_sequence=self_attention_in_sequence, num_attention_heads=num_attention_heads, num_attention_layers=num_attention_layers) else: if use_self_attention: raise NotImplementedError if self_attention_in_sequence: raise NotImplementedError if not use_long_term_attention: raise NotImplementedError if num_attention_heads > 1: raise NotImplementedError if num_attention_layers > 1: raise NotImplementedError self._context_feature_extract_fn = context_rcnn_lib_tf2.AttentionBlock( bottleneck_dimension=attention_bottleneck_dimension, attention_temperature=attention_temperature, is_training=is_training, max_num_proposals=self.max_num_proposals) @staticmethod def get_side_inputs(features): """Overrides the get_side_inputs function in the base class. This function returns context_features and valid_context_size, which will be used in the _compute_second_stage_input_feature_maps function. Args: features: A dictionary of tensors. Returns: A dictionary of tensors contains context_features and valid_context_size. Raises: ValueError: If context_features or valid_context_size is not in the features. """ if (fields.InputDataFields.context_features not in features or fields.InputDataFields.valid_context_size not in features): raise ValueError( 'Please make sure context_features and valid_context_size are in the ' 'features') return { fields.InputDataFields.context_features: features[fields.InputDataFields.context_features], fields.InputDataFields.valid_context_size: features[fields.InputDataFields.valid_context_size] } def _predict_second_stage(self, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features_to_crop, anchors, image_shape, true_image_shapes, **side_inputs): """Predicts the output tensors from second stage of Faster R-CNN. Args: rpn_box_encodings: 3-D float tensor of shape [batch_size, num_valid_anchors, self._box_coder.code_size] containing predicted boxes. rpn_objectness_predictions_with_background: 2-D float tensor of shape [batch_size, num_valid_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). rpn_features_to_crop: A list of 4-D float32 or bfloat16 tensor with shape [batch_size, height_i, width_i, depth] representing image features to crop using the proposal boxes predicted by the RPN. anchors: 2-D float tensor of shape [num_anchors, self._box_coder.code_size]. image_shape: A 1D int32 tensors of size [4] containing the image shape. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **side_inputs: additional tensors that are required by the network. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D float32 tensor with shape [total_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. 2) class_predictions_with_background: a 3-D float32 tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 4) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. 5) proposal_boxes_normalized: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in normalized coordinates. Can be used to override the boxes proposed by the RPN, thus enabling one to extract features and get box classification and prediction for externally selected areas of the image. 6) box_classifier_features: a 4-D float32/bfloat16 tensor representing the features for each proposal. If self._return_raw_detections_during_predict is True, the dictionary will also contain: 7) raw_detection_boxes: a 4-D float32 tensor with shape [batch_size, self.max_num_proposals, num_classes, 4] in normalized coordinates. 8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape [batch_size, self.max_num_proposals, num_classes]. """ proposal_boxes_normalized, num_proposals = self._proposal_postprocess( rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape, true_image_shapes) prediction_dict = self._box_prediction(rpn_features_to_crop, proposal_boxes_normalized, image_shape, true_image_shapes, num_proposals, **side_inputs) prediction_dict['num_proposals'] = num_proposals return prediction_dict def _box_prediction(self, rpn_features_to_crop, proposal_boxes_normalized, image_shape, true_image_shapes, num_proposals, **side_inputs): """Predicts the output tensors from second stage of Faster R-CNN. Args: rpn_features_to_crop: A list 4-D float32 or bfloat16 tensor with shape [batch_size, height_i, width_i, depth] representing image features to crop using the proposal boxes predicted by the RPN. proposal_boxes_normalized: A float tensor with shape [batch_size, max_num_proposals, 4] representing the (potentially zero padded) proposal boxes for all images in the batch. These boxes are represented as normalized coordinates. image_shape: A 1D int32 tensors of size [4] containing the image shape. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. num_proposals: The number of valid box proposals. **side_inputs: additional tensors that are required by the network. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D float32 tensor with shape [total_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. 2) class_predictions_with_background: a 3-D float32 tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. 4) proposal_boxes_normalized: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in normalized coordinates. Can be used to override the boxes proposed by the RPN, thus enabling one to extract features and get box classification and prediction for externally selected areas of the image. 5) box_classifier_features: a 4-D float32/bfloat16 tensor representing the features for each proposal. If self._return_raw_detections_during_predict is True, the dictionary will also contain: 6) raw_detection_boxes: a 4-D float32 tensor with shape [batch_size, self.max_num_proposals, num_classes, 4] in normalized coordinates. 7) raw_detection_feature_map_indices: a 3-D int32 tensor with shape [batch_size, self.max_num_proposals, num_classes]. 8) final_anchors: a 3-D float tensor of shape [batch_size, self.max_num_proposals, 4] containing the reference anchors for raw detection boxes in normalized coordinates. """ flattened_proposal_feature_maps = ( self._compute_second_stage_input_feature_maps( rpn_features_to_crop, proposal_boxes_normalized, image_shape, num_proposals, **side_inputs)) box_classifier_features = self._extract_box_classifier_features( flattened_proposal_feature_maps, num_proposals, **side_inputs) if self._mask_rcnn_box_predictor.is_keras_model: box_predictions = self._mask_rcnn_box_predictor( [box_classifier_features], prediction_stage=2) else: box_predictions = self._mask_rcnn_box_predictor.predict( [box_classifier_features], num_predictions_per_location=[1], scope=self.second_stage_box_predictor_scope, prediction_stage=2) refined_box_encodings = tf.squeeze( box_predictions[box_predictor.BOX_ENCODINGS], axis=1, name='all_refined_box_encodings') class_predictions_with_background = tf.squeeze( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1, name='all_class_predictions_with_background') absolute_proposal_boxes = ops.normalized_to_image_coordinates( proposal_boxes_normalized, image_shape, self._parallel_iterations) prediction_dict = { 'refined_box_encodings': tf.cast(refined_box_encodings, dtype=tf.float32), 'class_predictions_with_background': tf.cast(class_predictions_with_background, dtype=tf.float32), 'proposal_boxes': absolute_proposal_boxes, 'box_classifier_features': box_classifier_features, 'proposal_boxes_normalized': proposal_boxes_normalized, 'final_anchors': proposal_boxes_normalized } if self._return_raw_detections_during_predict: prediction_dict.update(self._raw_detections_and_feature_map_inds( refined_box_encodings, absolute_proposal_boxes, true_image_shapes)) return prediction_dict def _compute_second_stage_input_feature_maps(self, features_to_crop, proposal_boxes_normalized, image_shape, num_proposals, context_features, valid_context_size): """Crops to a set of proposals from the feature map for a batch of images. This function overrides the one in the FasterRCNNMetaArch. Aside from cropping and resizing the feature maps, which is done in the parent class, it adds context attention features to the box features. Args: features_to_crop: A float32 Tensor with shape [batch_size, height, width, depth] proposal_boxes_normalized: A float32 Tensor with shape [batch_size, num_proposals, box_code_size] containing proposal boxes in normalized coordinates. image_shape: A 1D int32 tensors of size [4] containing the image shape. num_proposals: The number of valid box proposals. context_features: A float Tensor of shape [batch_size, context_size, num_context_features]. valid_context_size: A int32 Tensor of shape [batch_size]. Returns: A float32 Tensor with shape [K, new_height, new_width, depth]. """ del image_shape box_features = self._crop_and_resize_fn( features_to_crop, proposal_boxes_normalized, None, [self._initial_crop_size, self._initial_crop_size]) flattened_box_features = self._flatten_first_two_dimensions(box_features) flattened_box_features = self._maxpool_layer(flattened_box_features) if self._attention_position == ( faster_rcnn_pb2.AttentionPosition.POST_RPN): attention_features = self._context_feature_extract_fn( box_features=flattened_box_features, num_proposals=num_proposals, context_features=context_features, valid_context_size=valid_context_size) # Adds box features with attention features. flattened_box_features += self._flatten_first_two_dimensions( attention_features) return flattened_box_features def _extract_box_classifier_features( self, flattened_box_features, num_proposals, context_features, valid_context_size, attention_position=( faster_rcnn_pb2.AttentionPosition.POST_BOX_CLASSIFIER)): if self._feature_extractor_for_box_classifier_features == ( _UNINITIALIZED_FEATURE_EXTRACTOR): self._feature_extractor_for_box_classifier_features = ( self._feature_extractor.get_box_classifier_feature_extractor_model( name=self.second_stage_feature_extractor_scope)) if self._feature_extractor_for_box_classifier_features: box_classifier_features = ( self._feature_extractor_for_box_classifier_features( flattened_box_features)) else: box_classifier_features = ( self._feature_extractor.extract_box_classifier_features( flattened_box_features, scope=self.second_stage_feature_extractor_scope)) if self._attention_position == ( faster_rcnn_pb2.AttentionPosition.POST_BOX_CLASSIFIER): attention_features = self._context_feature_extract_fn( box_features=box_classifier_features, num_proposals=num_proposals, context_features=context_features, valid_context_size=valid_context_size) # Adds box features with attention features. box_classifier_features += self._flatten_first_two_dimensions( attention_features) return box_classifier_features
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/context_rcnn_meta_arch.py
context_rcnn_meta_arch.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.meta_architectures.faster_rcnn_meta_arch.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from six.moves import range import tensorflow.compat.v1 as tf from object_detection.meta_architectures import faster_rcnn_meta_arch_test_lib from object_detection.utils import test_utils class FasterRCNNMetaArchTest( faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase, parameterized.TestCase): def test_postprocess_second_stage_only_inference_mode_with_masks(self): with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=6) batch_size = 2 total_num_padded_proposals = batch_size * model.max_num_proposals def graph_fn(): proposal_boxes = tf.constant( [[[1, 1, 2, 3], [0, 0, 1, 1], [.5, .5, .6, .6], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], [[2, 3, 6, 8], [1, 2, 5, 3], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32) num_proposals = tf.constant([3, 2], dtype=tf.int32) refined_box_encodings = tf.zeros( [total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32) class_predictions_with_background = tf.ones( [total_num_padded_proposals, model.num_classes+1], dtype=tf.float32) image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32) mask_height = 2 mask_width = 2 mask_predictions = 30. * tf.ones( [total_num_padded_proposals, model.num_classes, mask_height, mask_width], dtype=tf.float32) _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) detections = model.postprocess({ 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'num_proposals': num_proposals, 'proposal_boxes': proposal_boxes, 'image_shape': image_shape, 'mask_predictions': mask_predictions }, true_image_shapes) return (detections['detection_boxes'], detections['detection_scores'], detections['detection_classes'], detections['num_detections'], detections['detection_masks']) (detection_boxes, detection_scores, detection_classes, num_detections, detection_masks) = self.execute_cpu(graph_fn, [], graph=g) exp_detection_masks = np.array([[[[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]]], [[[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[0, 0], [0, 0]]]]) self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) self.assertAllClose(detection_scores, [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]]) self.assertAllClose(detection_classes, [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]) self.assertAllClose(num_detections, [5, 4]) self.assertAllClose(detection_masks, exp_detection_masks) self.assertTrue(np.amax(detection_masks <= 1.0)) self.assertTrue(np.amin(detection_masks >= 0.0)) def test_postprocess_second_stage_only_inference_mode_with_calibration(self): with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=6, calibration_mapping_value=0.5) batch_size = 2 total_num_padded_proposals = batch_size * model.max_num_proposals def graph_fn(): proposal_boxes = tf.constant( [[[1, 1, 2, 3], [0, 0, 1, 1], [.5, .5, .6, .6], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], [[2, 3, 6, 8], [1, 2, 5, 3], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32) num_proposals = tf.constant([3, 2], dtype=tf.int32) refined_box_encodings = tf.zeros( [total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32) class_predictions_with_background = tf.ones( [total_num_padded_proposals, model.num_classes+1], dtype=tf.float32) image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32) mask_height = 2 mask_width = 2 mask_predictions = 30. * tf.ones( [total_num_padded_proposals, model.num_classes, mask_height, mask_width], dtype=tf.float32) _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) detections = model.postprocess({ 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'num_proposals': num_proposals, 'proposal_boxes': proposal_boxes, 'image_shape': image_shape, 'mask_predictions': mask_predictions }, true_image_shapes) return (detections['detection_boxes'], detections['detection_scores'], detections['detection_classes'], detections['num_detections'], detections['detection_masks']) (detection_boxes, detection_scores, detection_classes, num_detections, detection_masks) = self.execute_cpu(graph_fn, [], graph=g) exp_detection_masks = np.array([[[[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]]], [[[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[0, 0], [0, 0]]]]) self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) # All scores map to 0.5, except for the final one, which is pruned. self.assertAllClose(detection_scores, [[0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.0]]) self.assertAllClose(detection_classes, [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]) self.assertAllClose(num_detections, [5, 4]) self.assertAllClose(detection_masks, exp_detection_masks) self.assertTrue(np.amax(detection_masks <= 1.0)) self.assertTrue(np.amin(detection_masks >= 0.0)) def test_postprocess_second_stage_only_inference_mode_with_shared_boxes(self): with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=6) batch_size = 2 total_num_padded_proposals = batch_size * model.max_num_proposals def graph_fn(): proposal_boxes = tf.constant( [[[1, 1, 2, 3], [0, 0, 1, 1], [.5, .5, .6, .6], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], [[2, 3, 6, 8], [1, 2, 5, 3], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32) num_proposals = tf.constant([3, 2], dtype=tf.int32) # This has 1 box instead of one for each class. refined_box_encodings = tf.zeros( [total_num_padded_proposals, 1, 4], dtype=tf.float32) class_predictions_with_background = tf.ones( [total_num_padded_proposals, model.num_classes+1], dtype=tf.float32) image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32) _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) detections = model.postprocess({ 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'num_proposals': num_proposals, 'proposal_boxes': proposal_boxes, 'image_shape': image_shape, }, true_image_shapes) return (detections['detection_boxes'], detections['detection_scores'], detections['detection_classes'], detections['num_detections']) (detection_boxes, detection_scores, detection_classes, num_detections) = self.execute_cpu(graph_fn, [], graph=g) self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) self.assertAllClose(detection_scores, [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]]) self.assertAllClose(detection_classes, [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]) self.assertAllClose(num_detections, [5, 4]) @parameterized.parameters( {'masks_are_class_agnostic': False}, {'masks_are_class_agnostic': True}, ) def test_predict_correct_shapes_in_inference_mode_three_stages_with_masks( self, masks_are_class_agnostic): batch_size = 2 image_size = 10 with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=3, second_stage_batch_size=2, predict_masks=True, masks_are_class_agnostic=masks_are_class_agnostic) def graph_fn(): shape = [tf.random_uniform([], minval=batch_size, maxval=batch_size + 1, dtype=tf.int32), tf.random_uniform([], minval=image_size, maxval=image_size + 1, dtype=tf.int32), tf.random_uniform([], minval=image_size, maxval=image_size + 1, dtype=tf.int32), 3] image = tf.zeros(shape) _, true_image_shapes = model.preprocess(image) detections = model.predict(image, true_image_shapes) return (detections['detection_boxes'], detections['detection_classes'], detections['detection_scores'], detections['num_detections'], detections['detection_masks'], detections['mask_predictions']) (detection_boxes, detection_scores, detection_classes, num_detections, detection_masks, mask_predictions) = self.execute_cpu(graph_fn, [], graph=g) self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) self.assertAllEqual(detection_masks.shape, [2, 5, 14, 14]) self.assertAllEqual(detection_classes.shape, [2, 5]) self.assertAllEqual(detection_scores.shape, [2, 5]) self.assertAllEqual(num_detections.shape, [2]) num_classes = 1 if masks_are_class_agnostic else 2 self.assertAllEqual(mask_predictions.shape, [10, num_classes, 14, 14]) def test_raw_detection_boxes_and_anchor_indices_correct(self): batch_size = 2 image_size = 10 with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=2, share_box_across_classes=True, return_raw_detections_during_predict=True) def graph_fn(): shape = [tf.random_uniform([], minval=batch_size, maxval=batch_size + 1, dtype=tf.int32), tf.random_uniform([], minval=image_size, maxval=image_size + 1, dtype=tf.int32), tf.random_uniform([], minval=image_size, maxval=image_size + 1, dtype=tf.int32), 3] image = tf.zeros(shape) _, true_image_shapes = model.preprocess(image) predict_tensor_dict = model.predict(image, true_image_shapes) detections = model.postprocess(predict_tensor_dict, true_image_shapes) return (detections['detection_boxes'], detections['num_detections'], detections['detection_anchor_indices'], detections['raw_detection_boxes'], predict_tensor_dict['raw_detection_boxes']) (detection_boxes, num_detections, detection_anchor_indices, raw_detection_boxes, predict_raw_detection_boxes) = self.execute_cpu(graph_fn, [], graph=g) # Verify that the raw detections from predict and postprocess are the # same. self.assertAllClose( np.squeeze(predict_raw_detection_boxes), raw_detection_boxes) # Verify that the raw detection boxes at detection anchor indices are the # same as the postprocessed detections. for i in range(batch_size): num_detections_per_image = int(num_detections[i]) detection_boxes_per_image = detection_boxes[i][ :num_detections_per_image] detection_anchor_indices_per_image = detection_anchor_indices[i][ :num_detections_per_image] raw_detections_per_image = np.squeeze(raw_detection_boxes[i]) raw_detections_at_anchor_indices = raw_detections_per_image[ detection_anchor_indices_per_image] self.assertAllClose(detection_boxes_per_image, raw_detections_at_anchor_indices) @parameterized.parameters( {'masks_are_class_agnostic': False}, {'masks_are_class_agnostic': True}, ) def test_predict_gives_correct_shapes_in_train_mode_both_stages_with_masks( self, masks_are_class_agnostic): with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=True, number_of_stages=3, second_stage_batch_size=7, predict_masks=True, masks_are_class_agnostic=masks_are_class_agnostic) batch_size = 2 image_size = 10 max_num_proposals = 7 def graph_fn(): image_shape = (batch_size, image_size, image_size, 3) preprocessed_inputs = tf.zeros(image_shape, dtype=tf.float32) groundtruth_boxes_list = [ tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32), tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32) ] groundtruth_classes_list = [ tf.constant([[1, 0], [0, 1]], dtype=tf.float32), tf.constant([[1, 0], [1, 0]], dtype=tf.float32) ] groundtruth_weights_list = [ tf.constant([1, 1], dtype=tf.float32), tf.constant([1, 1], dtype=tf.float32)] _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) model.provide_groundtruth( groundtruth_boxes_list, groundtruth_classes_list, groundtruth_weights_list=groundtruth_weights_list) result_tensor_dict = model.predict(preprocessed_inputs, true_image_shapes) return result_tensor_dict['mask_predictions'] mask_shape_1 = 1 if masks_are_class_agnostic else model._num_classes mask_out = self.execute_cpu(graph_fn, [], graph=g) self.assertAllEqual(mask_out.shape, (2 * max_num_proposals, mask_shape_1, 14, 14)) def test_postprocess_third_stage_only_inference_mode(self): batch_size = 2 initial_crop_size = 3 maxpool_stride = 1 height = initial_crop_size // maxpool_stride width = initial_crop_size // maxpool_stride depth = 3 with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=3, second_stage_batch_size=6, predict_masks=True) total_num_padded_proposals = batch_size * model.max_num_proposals def graph_fn(images_shape, num_proposals, proposal_boxes, refined_box_encodings, class_predictions_with_background): _, true_image_shapes = model.preprocess( tf.zeros(images_shape)) detections = model.postprocess({ 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'num_proposals': num_proposals, 'proposal_boxes': proposal_boxes, 'image_shape': images_shape, 'detection_boxes': tf.zeros([2, 5, 4]), 'detection_masks': tf.zeros([2, 5, 14, 14]), 'detection_scores': tf.zeros([2, 5]), 'detection_classes': tf.zeros([2, 5]), 'num_detections': tf.zeros([2]), 'detection_features': tf.zeros([2, 5, width, height, depth]) }, true_image_shapes) return (detections['detection_boxes'], detections['detection_masks'], detections['detection_scores'], detections['detection_classes'], detections['num_detections'], detections['detection_features']) images_shape = np.array((2, 36, 48, 3), dtype=np.int32) proposal_boxes = np.array( [[[1, 1, 2, 3], [0, 0, 1, 1], [.5, .5, .6, .6], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], [[2, 3, 6, 8], [1, 2, 5, 3], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]]) num_proposals = np.array([3, 2], dtype=np.int32) refined_box_encodings = np.zeros( [total_num_padded_proposals, model.num_classes, 4]) class_predictions_with_background = np.ones( [total_num_padded_proposals, model.num_classes+1]) (detection_boxes, detection_masks, detection_scores, detection_classes, num_detections, detection_features) = self.execute_cpu(graph_fn, [images_shape, num_proposals, proposal_boxes, refined_box_encodings, class_predictions_with_background], graph=g) self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) self.assertAllEqual(detection_masks.shape, [2, 5, 14, 14]) self.assertAllClose(detection_scores.shape, [2, 5]) self.assertAllClose(detection_classes.shape, [2, 5]) self.assertAllClose(num_detections.shape, [2]) self.assertTrue(np.amax(detection_masks <= 1.0)) self.assertTrue(np.amin(detection_masks >= 0.0)) self.assertAllEqual(detection_features.shape, [2, 5, width, height, depth]) self.assertGreaterEqual(np.amax(detection_features), 0) def _get_box_classifier_features_shape(self, image_size, batch_size, max_num_proposals, initial_crop_size, maxpool_stride, num_features): return (batch_size * max_num_proposals, initial_crop_size // maxpool_stride, initial_crop_size // maxpool_stride, num_features) def test_output_final_box_features(self): with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=6, output_final_box_features=True) batch_size = 2 total_num_padded_proposals = batch_size * model.max_num_proposals def graph_fn(): proposal_boxes = tf.constant([[[1, 1, 2, 3], [0, 0, 1, 1], [.5, .5, .6, .6], 4 * [0], 4 * [0], 4 * [0], 4 * [0], 4 * [0]], [[2, 3, 6, 8], [1, 2, 5, 3], 4 * [0], 4 * [0], 4 * [0], 4 * [0], 4 * [0], 4 * [0]]], dtype=tf.float32) num_proposals = tf.constant([3, 2], dtype=tf.int32) refined_box_encodings = tf.zeros( [total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32) class_predictions_with_background = tf.ones( [total_num_padded_proposals, model.num_classes + 1], dtype=tf.float32) image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32) mask_height = 2 mask_width = 2 mask_predictions = 30. * tf.ones([ total_num_padded_proposals, model.num_classes, mask_height, mask_width ], dtype=tf.float32) _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) rpn_features_to_crop = tf.ones((batch_size, mask_height, mask_width, 3), tf.float32) detections = model.postprocess( { 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'num_proposals': num_proposals, 'proposal_boxes': proposal_boxes, 'image_shape': image_shape, 'mask_predictions': mask_predictions, 'rpn_features_to_crop': [rpn_features_to_crop] }, true_image_shapes) self.assertIn('detection_features', detections) return (detections['detection_boxes'], detections['detection_scores'], detections['detection_classes'], detections['num_detections'], detections['detection_masks']) (detection_boxes, detection_scores, detection_classes, num_detections, detection_masks) = self.execute_cpu(graph_fn, [], graph=g) exp_detection_masks = np.array([[[[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]]], [[[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[0, 0], [0, 0]]]]) self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) self.assertAllClose(detection_scores, [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]]) self.assertAllClose(detection_classes, [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]) self.assertAllClose(num_detections, [5, 4]) self.assertAllClose(detection_masks, exp_detection_masks) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/faster_rcnn_meta_arch_test.py
faster_rcnn_meta_arch_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """R-FCN meta-architecture definition. R-FCN: Dai, Jifeng, et al. "R-FCN: Object Detection via Region-based Fully Convolutional Networks." arXiv preprint arXiv:1605.06409 (2016). The R-FCN meta architecture is similar to Faster R-CNN and only differs in the second stage. Hence this class inherits FasterRCNNMetaArch and overrides only the `_predict_second_stage` method. Similar to Faster R-CNN we allow for two modes: number_of_stages=1 and number_of_stages=2. In the former setting, all of the user facing methods (e.g., predict, postprocess, loss) can be used as if the model consisted only of the RPN, returning class agnostic proposals (these can be thought of as approximate detections with no associated class information). In the latter setting, proposals are computed, then passed through a second stage "box classifier" to yield (multi-class) detections. Implementations of R-FCN models must define a new FasterRCNNFeatureExtractor and override three methods: `preprocess`, `_extract_proposal_features` (the first stage of the model), and `_extract_box_classifier_features` (the second stage of the model). Optionally, the `restore_fn` method can be overridden. See tests for an example. See notes in the documentation of Faster R-CNN meta-architecture as they all apply here. """ import tensorflow.compat.v1 as tf from object_detection.core import box_predictor from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.utils import ops class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch): """R-FCN Meta-architecture definition.""" def __init__(self, is_training, num_classes, image_resizer_fn, feature_extractor, number_of_stages, first_stage_anchor_generator, first_stage_target_assigner, first_stage_atrous_rate, first_stage_box_predictor_arg_scope_fn, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_sampler, first_stage_non_max_suppression_fn, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, crop_and_resize_fn, second_stage_target_assigner, second_stage_rfcn_box_predictor, second_stage_batch_size, second_stage_sampler, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, second_stage_classification_loss, hard_example_miner, parallel_iterations=16, add_summaries=True, clip_anchors_to_image=False, use_static_shapes=False, resize_masks=False, freeze_batchnorm=False, return_raw_detections_during_predict=False, output_final_box_features=False, output_final_box_rpn_features=False): """RFCNMetaArch Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. num_classes: Number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). image_resizer_fn: A callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions. See builders/image_resizer_builder.py. feature_extractor: A FasterRCNNFeatureExtractor object. number_of_stages: Valid values are {1, 2}. If 1 will only construct the Region Proposal Network (RPN) part of the model. first_stage_anchor_generator: An anchor_generator.AnchorGenerator object (note that currently we only support grid_anchor_generator.GridAnchorGenerator objects) first_stage_target_assigner: Target assigner to use for first stage of R-FCN (RPN). first_stage_atrous_rate: A single integer indicating the atrous rate for the single convolution op which is applied to the `rpn_features_to_crop` tensor to obtain a tensor to be used for box prediction. Some feature extractors optionally allow for producing feature maps computed at denser resolutions. The atrous rate is used to compensate for the denser feature maps by using an effectively larger receptive field. (This should typically be set to 1). first_stage_box_predictor_arg_scope_fn: Either a Keras layer hyperparams object or a function to construct tf-slim arg_scope for conv2d, separable_conv2d and fully_connected ops. Used for the RPN box predictor. If it is a keras hyperparams object the RPN box predictor will be a Keras model. If it is a function to construct an arg scope it will be a tf-slim box predictor. first_stage_box_predictor_kernel_size: Kernel size to use for the convolution op just prior to RPN box predictions. first_stage_box_predictor_depth: Output depth for the convolution op just prior to RPN box predictions. first_stage_minibatch_size: The "batch size" to use for computing the objectness and location loss of the region proposal network. This "batch size" refers to the number of anchors selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. first_stage_sampler: The sampler for the boxes used to calculate the RPN loss after the first stage. first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores` and optional `clip_window`(with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`. This is used to perform non max suppression on the boxes predicted by the Region Proposal Network (RPN). See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. first_stage_max_proposals: Maximum number of boxes to retain after performing Non-Max Suppression (NMS) on the boxes predicted by the Region Proposal Network (RPN). first_stage_localization_loss_weight: A float first_stage_objectness_loss_weight: A float crop_and_resize_fn: A differentiable resampler to use for cropping RPN proposal features. second_stage_target_assigner: Target assigner to use for second stage of R-FCN. If the model is configured with multiple prediction heads, this target assigner is used to generate targets for all heads (with the correct `unmatched_class_label`). second_stage_rfcn_box_predictor: RFCN box predictor to use for second stage. second_stage_batch_size: The batch size used for computing the classification and refined location loss of the box classifier. This "batch size" refers to the number of proposals selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. second_stage_sampler: The sampler for the boxes used for second stage box classifier. second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores`, optional `clip_window` and optional (kwarg) `mask` inputs (with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`, and (optionally) `detection_masks`. See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. second_stage_score_conversion_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). This is usually used to convert logits to probabilities. second_stage_localization_loss_weight: A float second_stage_classification_loss_weight: A float second_stage_classification_loss: A string indicating which loss function to use, supports 'softmax' and 'sigmoid'. hard_example_miner: A losses.HardExampleMiner object (can be None). parallel_iterations: (Optional) The number of iterations allowed to run in parallel for calls to tf.map_fn. add_summaries: boolean (default: True) controlling whether summary ops should be added to tensorflow graph. clip_anchors_to_image: The anchors generated are clip to the window size without filtering the nonoverlapping anchors. This generates a static number of anchors. This argument is unused. use_static_shapes: If True, uses implementation of ops with static shape guarantees. resize_masks: Indicates whether the masks presend in the groundtruth should be resized in the model with `image_resizer_fn` freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. return_raw_detections_during_predict: Whether to return raw detection boxes in the predict() method. These are decoded boxes that have not been through postprocessing (i.e. NMS). Default False. output_final_box_features: Whether to output final box features. If true, it crops the feature map based on the final box prediction and returns it in the dict as detection_features. output_final_box_rpn_features: Whether to output rpn box features. If true, it crops the rpn feature map based on the final box prediction and returns it in the dict as detection_features. Raises: ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` ValueError: If first_stage_anchor_generator is not of type grid_anchor_generator.GridAnchorGenerator. """ # TODO(rathodv): add_summaries and crop_and_resize_fn is currently # unused. Respect that directive in the future. super(RFCNMetaArch, self).__init__( is_training, num_classes, image_resizer_fn, feature_extractor, number_of_stages, first_stage_anchor_generator, first_stage_target_assigner, first_stage_atrous_rate, first_stage_box_predictor_arg_scope_fn, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_sampler, first_stage_non_max_suppression_fn, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, crop_and_resize_fn, None, # initial_crop_size is not used in R-FCN None, # maxpool_kernel_size is not use in R-FCN None, # maxpool_stride is not use in R-FCN second_stage_target_assigner, None, # fully_connected_box_predictor is not used in R-FCN. second_stage_batch_size, second_stage_sampler, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, second_stage_classification_loss, 1.0, # second stage mask prediction loss weight isn't used in R-FCN. hard_example_miner, parallel_iterations, add_summaries, clip_anchors_to_image, use_static_shapes, resize_masks, freeze_batchnorm=freeze_batchnorm, return_raw_detections_during_predict=( return_raw_detections_during_predict), output_final_box_features=output_final_box_features, output_final_box_rpn_features=output_final_box_rpn_features) self._rfcn_box_predictor = second_stage_rfcn_box_predictor def _predict_second_stage(self, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features, anchors, image_shape, true_image_shapes): """Predicts the output tensors from 2nd stage of R-FCN. Args: rpn_box_encodings: 3-D float tensor of shape [batch_size, num_valid_anchors, self._box_coder.code_size] containing predicted boxes. rpn_objectness_predictions_with_background: 3-D float tensor of shape [batch_size, num_valid_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). rpn_features: A list of single 4-D float32 tensor with shape [batch_size, height, width, depth] representing image features from the RPN. anchors: 2-D float tensor of shape [num_anchors, self._box_coder.code_size]. image_shape: A 1D int32 tensors of size [4] containing the image shape. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D tensor with shape [total_num_proposals, num_classes, 4] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals 2) class_predictions_with_background: a 2-D tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 4) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes (in absolute coordinates). 5) proposal_boxes_normalized: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes (in normalized coordinates). Can be used to override the boxes proposed by the RPN, thus enabling one to extract box classification and prediction for externally selected areas of the image. 6) box_classifier_features: a 4-D float32 tensor, of shape [batch_size, feature_map_height, feature_map_width, depth], representing the box classifier features. """ image_shape_2d = tf.tile(tf.expand_dims(image_shape[1:], 0), [image_shape[0], 1]) (proposal_boxes_normalized, _, _, num_proposals, _, _) = self._postprocess_rpn(rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape_2d, true_image_shapes) rpn_features = rpn_features[0] box_classifier_features = ( self._extract_box_classifier_features(rpn_features)) if self._rfcn_box_predictor.is_keras_model: box_predictions = self._rfcn_box_predictor( [box_classifier_features], proposal_boxes=proposal_boxes_normalized) else: box_predictions = self._rfcn_box_predictor.predict( [box_classifier_features], num_predictions_per_location=[1], scope=self.second_stage_box_predictor_scope, proposal_boxes=proposal_boxes_normalized) refined_box_encodings = tf.squeeze( tf.concat(box_predictions[box_predictor.BOX_ENCODINGS], axis=1), axis=1) class_predictions_with_background = tf.squeeze( tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1), axis=1) absolute_proposal_boxes = ops.normalized_to_image_coordinates( proposal_boxes_normalized, image_shape, parallel_iterations=self._parallel_iterations) prediction_dict = { 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'num_proposals': num_proposals, 'proposal_boxes': absolute_proposal_boxes, 'box_classifier_features': box_classifier_features, 'proposal_boxes_normalized': proposal_boxes_normalized, 'final_anchors': absolute_proposal_boxes } if self._return_raw_detections_during_predict: prediction_dict.update(self._raw_detections_and_feature_map_inds( refined_box_encodings, absolute_proposal_boxes)) return prediction_dict def regularization_losses(self): """Returns a list of regularization losses for this model. Returns a list of regularization losses for this model that the estimator needs to use during training/optimization. Returns: A list of regularization loss tensors. """ reg_losses = super(RFCNMetaArch, self).regularization_losses() if self._rfcn_box_predictor.is_keras_model: reg_losses.extend(self._rfcn_box_predictor.losses) return reg_losses def updates(self): """Returns a list of update operators for this model. Returns a list of update operators for this model that must be executed at each training step. The estimator's train op needs to have a control dependency on these updates. Returns: A list of update operators. """ update_ops = super(RFCNMetaArch, self).updates() if self._rfcn_box_predictor.is_keras_model: update_ops.extend( self._rfcn_box_predictor.get_updates_for(None)) update_ops.extend( self._rfcn_box_predictor.get_updates_for( self._rfcn_box_predictor.inputs)) return update_ops
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/rfcn_meta_arch.py
rfcn_meta_arch.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.meta_architectures.ssd_meta_arch.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np import six from six.moves import range import tensorflow.compat.v1 as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.meta_architectures import ssd_meta_arch_test_lib from object_detection.protos import model_pb2 from object_detection.utils import test_utils # pylint: disable=g-import-not-at-top try: import tf_slim as slim except ImportError: # TF 2.0 doesn't ship with contrib. pass # pylint: enable=g-import-not-at-top keras = tf.keras.layers class SsdMetaArchTest(ssd_meta_arch_test_lib.SSDMetaArchTestBase, parameterized.TestCase): def _create_model( self, apply_hard_mining=True, normalize_loc_loss_by_codesize=False, add_background_class=True, random_example_sampling=False, expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE, min_num_negative_samples=1, desired_negative_sampling_ratio=3, predict_mask=False, use_static_shapes=False, nms_max_size_per_class=5, calibration_mapping_value=None, return_raw_detections_during_predict=False): return super(SsdMetaArchTest, self)._create_model( model_fn=ssd_meta_arch.SSDMetaArch, apply_hard_mining=apply_hard_mining, normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize, add_background_class=add_background_class, random_example_sampling=random_example_sampling, expected_loss_weights=expected_loss_weights, min_num_negative_samples=min_num_negative_samples, desired_negative_sampling_ratio=desired_negative_sampling_ratio, predict_mask=predict_mask, use_static_shapes=use_static_shapes, nms_max_size_per_class=nms_max_size_per_class, calibration_mapping_value=calibration_mapping_value, return_raw_detections_during_predict=( return_raw_detections_during_predict)) def test_preprocess_preserves_shapes_with_dynamic_input_image(self): width = tf.random.uniform([], minval=5, maxval=10, dtype=tf.int32) batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) shape = tf.stack([batch, 5, width, 3]) image = tf.random.uniform(shape) model, _, _, _ = self._create_model() preprocessed_inputs, _ = model.preprocess(image) self.assertTrue( preprocessed_inputs.shape.is_compatible_with([None, 5, None, 3])) def test_preprocess_preserves_shape_with_static_input_image(self): image = tf.random.uniform([2, 3, 3, 3]) model, _, _, _ = self._create_model() preprocessed_inputs, _ = model.preprocess(image) self.assertTrue(preprocessed_inputs.shape.is_compatible_with([2, 3, 3, 3])) def test_predict_result_shapes_on_image_with_dynamic_shape(self): with test_utils.GraphContextOrNone() as g: model, num_classes, num_anchors, code_size = self._create_model() def graph_fn(): size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) shape = tf.stack([batch, size, size, 3]) image = tf.random.uniform(shape) prediction_dict = model.predict(image, true_image_shapes=None) self.assertIn('box_encodings', prediction_dict) self.assertIn('class_predictions_with_background', prediction_dict) self.assertIn('feature_maps', prediction_dict) self.assertIn('anchors', prediction_dict) self.assertIn('final_anchors', prediction_dict) return (prediction_dict['box_encodings'], prediction_dict['final_anchors'], prediction_dict['class_predictions_with_background'], tf.constant(num_anchors), batch) (box_encodings_out, final_anchors, class_predictions_with_background, num_anchors, batch_size) = self.execute_cpu(graph_fn, [], graph=g) self.assertAllEqual(box_encodings_out.shape, (batch_size, num_anchors, code_size)) self.assertAllEqual(final_anchors.shape, (batch_size, num_anchors, code_size)) self.assertAllEqual( class_predictions_with_background.shape, (batch_size, num_anchors, num_classes + 1)) def test_predict_result_shapes_on_image_with_static_shape(self): with test_utils.GraphContextOrNone() as g: model, num_classes, num_anchors, code_size = self._create_model() def graph_fn(input_image): predictions = model.predict(input_image, true_image_shapes=None) return (predictions['box_encodings'], predictions['class_predictions_with_background'], predictions['final_anchors']) batch_size = 3 image_size = 2 channels = 3 input_image = np.random.rand(batch_size, image_size, image_size, channels).astype(np.float32) expected_box_encodings_shape = (batch_size, num_anchors, code_size) expected_class_predictions_shape = (batch_size, num_anchors, num_classes+1) final_anchors_shape = (batch_size, num_anchors, 4) (box_encodings, class_predictions, final_anchors) = self.execute( graph_fn, [input_image], graph=g) self.assertAllEqual(box_encodings.shape, expected_box_encodings_shape) self.assertAllEqual(class_predictions.shape, expected_class_predictions_shape) self.assertAllEqual(final_anchors.shape, final_anchors_shape) def test_predict_with_raw_output_fields(self): with test_utils.GraphContextOrNone() as g: model, num_classes, num_anchors, code_size = self._create_model( return_raw_detections_during_predict=True) def graph_fn(input_image): predictions = model.predict(input_image, true_image_shapes=None) return (predictions['box_encodings'], predictions['class_predictions_with_background'], predictions['final_anchors'], predictions['raw_detection_boxes'], predictions['raw_detection_feature_map_indices']) batch_size = 3 image_size = 2 channels = 3 input_image = np.random.rand(batch_size, image_size, image_size, channels).astype(np.float32) expected_box_encodings_shape = (batch_size, num_anchors, code_size) expected_class_predictions_shape = (batch_size, num_anchors, num_classes+1) final_anchors_shape = (batch_size, num_anchors, 4) expected_raw_detection_boxes_shape = (batch_size, num_anchors, 4) (box_encodings, class_predictions, final_anchors, raw_detection_boxes, raw_detection_feature_map_indices) = self.execute( graph_fn, [input_image], graph=g) self.assertAllEqual(box_encodings.shape, expected_box_encodings_shape) self.assertAllEqual(class_predictions.shape, expected_class_predictions_shape) self.assertAllEqual(final_anchors.shape, final_anchors_shape) self.assertAllEqual(raw_detection_boxes.shape, expected_raw_detection_boxes_shape) self.assertAllEqual(raw_detection_feature_map_indices, np.zeros((batch_size, num_anchors))) def test_raw_detection_boxes_agree_predict_postprocess(self): with test_utils.GraphContextOrNone() as g: model, _, _, _ = self._create_model( return_raw_detections_during_predict=True) def graph_fn(): size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) shape = tf.stack([batch, size, size, 3]) image = tf.random.uniform(shape) preprocessed_inputs, true_image_shapes = model.preprocess( image) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) raw_detection_boxes_predict = prediction_dict['raw_detection_boxes'] detections = model.postprocess(prediction_dict, true_image_shapes) raw_detection_boxes_postprocess = detections['raw_detection_boxes'] return raw_detection_boxes_predict, raw_detection_boxes_postprocess (raw_detection_boxes_predict_out, raw_detection_boxes_postprocess_out) = self.execute_cpu(graph_fn, [], graph=g) self.assertAllEqual(raw_detection_boxes_predict_out, raw_detection_boxes_postprocess_out) def test_postprocess_results_are_correct(self): with test_utils.GraphContextOrNone() as g: model, _, _, _ = self._create_model() def graph_fn(): size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) shape = tf.stack([batch, size, size, 3]) image = tf.random.uniform(shape) preprocessed_inputs, true_image_shapes = model.preprocess( image) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) detections = model.postprocess(prediction_dict, true_image_shapes) return [ batch, detections['detection_boxes'], detections['detection_scores'], detections['detection_classes'], detections['detection_multiclass_scores'], detections['num_detections'], detections['raw_detection_boxes'], detections['raw_detection_scores'], detections['detection_anchor_indices'] ] expected_boxes = [ [ [0, 0, .5, .5], [0, .5, .5, 1], [.5, 0, 1, .5], [0, 0, 0, 0], # pruned prediction [0, 0, 0, 0] ], # padding [ [0, 0, .5, .5], [0, .5, .5, 1], [.5, 0, 1, .5], [0, 0, 0, 0], # pruned prediction [0, 0, 0, 0] ] ] # padding expected_scores = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] expected_multiclass_scores = [[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]], [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]] expected_classes = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] expected_num_detections = np.array([3, 3]) expected_raw_detection_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], [0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]], [[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], [0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]]] expected_raw_detection_scores = [[[0, 0], [0, 0], [0, 0], [0, 0]], [[0, 0], [0, 0], [0, 0], [0, 0]]] expected_detection_anchor_indices = [[0, 1, 2], [0, 1, 2]] (batch, detection_boxes, detection_scores, detection_classes, detection_multiclass_scores, num_detections, raw_detection_boxes, raw_detection_scores, detection_anchor_indices) = self.execute_cpu( graph_fn, [], graph=g) for image_idx in range(batch): self.assertTrue( test_utils.first_rows_close_as_set( detection_boxes[image_idx].tolist(), expected_boxes[image_idx])) self.assertSameElements(detection_anchor_indices[image_idx], expected_detection_anchor_indices[image_idx]) self.assertAllClose(detection_scores, expected_scores) self.assertAllClose(detection_classes, expected_classes) self.assertAllClose(detection_multiclass_scores, expected_multiclass_scores) self.assertAllClose(num_detections, expected_num_detections) self.assertAllEqual(raw_detection_boxes, expected_raw_detection_boxes) self.assertAllEqual(raw_detection_scores, expected_raw_detection_scores) def test_postprocess_results_are_correct_static(self): with test_utils.GraphContextOrNone() as g: model, _, _, _ = self._create_model(use_static_shapes=True, nms_max_size_per_class=4) def graph_fn(input_image): preprocessed_inputs, true_image_shapes = model.preprocess(input_image) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) detections = model.postprocess(prediction_dict, true_image_shapes) return (detections['detection_boxes'], detections['detection_scores'], detections['detection_classes'], detections['num_detections'], detections['detection_multiclass_scores']) expected_boxes = [ [ [0, 0, .5, .5], [0, .5, .5, 1], [.5, 0, 1, .5], [0, 0, 0, 0] ], # padding [ [0, 0, .5, .5], [0, .5, .5, 1], [.5, 0, 1, .5], [0, 0, 0, 0] ] ] # padding expected_scores = [[0, 0, 0, 0], [0, 0, 0, 0]] expected_multiclass_scores = [[[0, 0], [0, 0], [0, 0], [0, 0]], [[0, 0], [0, 0], [0, 0], [0, 0]]] expected_classes = [[0, 0, 0, 0], [0, 0, 0, 0]] expected_num_detections = np.array([3, 3]) batch_size = 2 image_size = 2 channels = 3 input_image = np.random.rand(batch_size, image_size, image_size, channels).astype(np.float32) (detection_boxes, detection_scores, detection_classes, num_detections, detection_multiclass_scores) = self.execute(graph_fn, [input_image], graph=g) for image_idx in range(batch_size): self.assertTrue(test_utils.first_rows_close_as_set( detection_boxes[image_idx][ 0:expected_num_detections[image_idx]].tolist(), expected_boxes[image_idx][0:expected_num_detections[image_idx]])) self.assertAllClose( detection_scores[image_idx][0:expected_num_detections[image_idx]], expected_scores[image_idx][0:expected_num_detections[image_idx]]) self.assertAllClose( detection_multiclass_scores[image_idx] [0:expected_num_detections[image_idx]], expected_multiclass_scores[image_idx] [0:expected_num_detections[image_idx]]) self.assertAllClose( detection_classes[image_idx][0:expected_num_detections[image_idx]], expected_classes[image_idx][0:expected_num_detections[image_idx]]) self.assertAllClose(num_detections, expected_num_detections) def test_postprocess_results_are_correct_with_calibration(self): with test_utils.GraphContextOrNone() as g: model, _, _, _ = self._create_model(calibration_mapping_value=0.5) def graph_fn(): size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) shape = tf.stack([batch, size, size, 3]) image = tf.random.uniform(shape) preprocessed_inputs, true_image_shapes = model.preprocess( image) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) detections = model.postprocess(prediction_dict, true_image_shapes) return detections['detection_scores'], detections['raw_detection_scores'] # Calibration mapping value below is set to map all scores to 0.5, except # for the last two detections in each batch (see expected number of # detections below. expected_scores = [[0.5, 0.5, 0.5, 0., 0.], [0.5, 0.5, 0.5, 0., 0.]] expected_raw_detection_scores = [ [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]] ] detection_scores, raw_detection_scores = self.execute_cpu(graph_fn, [], graph=g) self.assertAllClose(detection_scores, expected_scores) self.assertAllEqual(raw_detection_scores, expected_raw_detection_scores) def test_loss_results_are_correct(self): with test_utils.GraphContextOrNone() as g: model, num_classes, num_anchors, _ = self._create_model( apply_hard_mining=False) def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, groundtruth_classes1, groundtruth_classes2): groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list) prediction_dict = model.predict(preprocessed_tensor, true_image_shapes=None) loss_dict = model.loss(prediction_dict, true_image_shapes=None) return (self._get_value_for_matching_key(loss_dict, 'Loss/localization_loss'), self._get_value_for_matching_key(loss_dict, 'Loss/classification_loss')) batch_size = 2 preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) groundtruth_classes1 = np.array([[1]], dtype=np.float32) groundtruth_classes2 = np.array([[1]], dtype=np.float32) (localization_loss, classification_loss) = self.execute( graph_fn, [ preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, groundtruth_classes1, groundtruth_classes2 ], graph=g) expected_localization_loss = 0.0 expected_classification_loss = (batch_size * num_anchors * (num_classes+1) * np.log(2.0)) self.assertAllClose(localization_loss, expected_localization_loss) self.assertAllClose(classification_loss, expected_classification_loss) def test_loss_results_are_correct_with_normalize_by_codesize_true(self): with test_utils.GraphContextOrNone() as g: model, _, _, _ = self._create_model( apply_hard_mining=False, normalize_loc_loss_by_codesize=True) def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, groundtruth_classes1, groundtruth_classes2): groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list) prediction_dict = model.predict(preprocessed_tensor, true_image_shapes=None) loss_dict = model.loss(prediction_dict, true_image_shapes=None) return (self._get_value_for_matching_key(loss_dict, 'Loss/localization_loss'),) batch_size = 2 preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) groundtruth_boxes1 = np.array([[0, 0, 1, 1]], dtype=np.float32) groundtruth_boxes2 = np.array([[0, 0, 1, 1]], dtype=np.float32) groundtruth_classes1 = np.array([[1]], dtype=np.float32) groundtruth_classes2 = np.array([[1]], dtype=np.float32) expected_localization_loss = 0.5 / 4 localization_loss = self.execute(graph_fn, [preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, groundtruth_classes1, groundtruth_classes2], graph=g) self.assertAllClose(localization_loss, expected_localization_loss) def test_loss_results_are_correct_with_hard_example_mining(self): with test_utils.GraphContextOrNone() as g: model, num_classes, num_anchors, _ = self._create_model() def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, groundtruth_classes1, groundtruth_classes2): groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list) prediction_dict = model.predict(preprocessed_tensor, true_image_shapes=None) loss_dict = model.loss(prediction_dict, true_image_shapes=None) return (self._get_value_for_matching_key(loss_dict, 'Loss/localization_loss'), self._get_value_for_matching_key(loss_dict, 'Loss/classification_loss')) batch_size = 2 preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) groundtruth_classes1 = np.array([[1]], dtype=np.float32) groundtruth_classes2 = np.array([[1]], dtype=np.float32) expected_localization_loss = 0.0 expected_classification_loss = (batch_size * num_anchors * (num_classes+1) * np.log(2.0)) (localization_loss, classification_loss) = self.execute_cpu( graph_fn, [ preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, groundtruth_classes1, groundtruth_classes2 ], graph=g) self.assertAllClose(localization_loss, expected_localization_loss) self.assertAllClose(classification_loss, expected_classification_loss) def test_loss_results_are_correct_without_add_background_class(self): with test_utils.GraphContextOrNone() as g: model, num_classes, num_anchors, _ = self._create_model( apply_hard_mining=False, add_background_class=False) def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, groundtruth_classes1, groundtruth_classes2): groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list) prediction_dict = model.predict( preprocessed_tensor, true_image_shapes=None) loss_dict = model.loss(prediction_dict, true_image_shapes=None) return (loss_dict['Loss/localization_loss'], loss_dict['Loss/classification_loss']) batch_size = 2 preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) groundtruth_classes1 = np.array([[1]], dtype=np.float32) groundtruth_classes2 = np.array([[1]], dtype=np.float32) expected_localization_loss = 0.0 expected_classification_loss = ( batch_size * num_anchors * num_classes * np.log(2.0)) (localization_loss, classification_loss) = self.execute( graph_fn, [ preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, groundtruth_classes1, groundtruth_classes2 ], graph=g) self.assertAllClose(localization_loss, expected_localization_loss) self.assertAllClose(classification_loss, expected_classification_loss) def test_loss_results_are_correct_with_losses_mask(self): with test_utils.GraphContextOrNone() as g: model, num_classes, num_anchors, _ = self._create_model( apply_hard_mining=False) def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, groundtruth_boxes3, groundtruth_classes1, groundtruth_classes2, groundtruth_classes3): groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2, groundtruth_boxes3] groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2, groundtruth_classes3] is_annotated_list = [tf.constant(True), tf.constant(True), tf.constant(False)] model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list, is_annotated_list=is_annotated_list) prediction_dict = model.predict(preprocessed_tensor, true_image_shapes=None) loss_dict = model.loss(prediction_dict, true_image_shapes=None) return (self._get_value_for_matching_key(loss_dict, 'Loss/localization_loss'), self._get_value_for_matching_key(loss_dict, 'Loss/classification_loss')) batch_size = 3 preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) groundtruth_boxes3 = np.array([[0, 0, .5, .5]], dtype=np.float32) groundtruth_classes1 = np.array([[1]], dtype=np.float32) groundtruth_classes2 = np.array([[1]], dtype=np.float32) groundtruth_classes3 = np.array([[1]], dtype=np.float32) expected_localization_loss = 0.0 # Note that we are subtracting 1 from batch_size, since the final image is # not annotated. expected_classification_loss = ((batch_size - 1) * num_anchors * (num_classes+1) * np.log(2.0)) (localization_loss, classification_loss) = self.execute(graph_fn, [preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, groundtruth_boxes3, groundtruth_classes1, groundtruth_classes2, groundtruth_classes3], graph=g) self.assertAllClose(localization_loss, expected_localization_loss) self.assertAllClose(classification_loss, expected_classification_loss) def test_restore_map_for_detection_ckpt(self): # TODO(rathodv): Support TF2.X if self.is_tf2(): return model, _, _, _ = self._create_model() model.predict(tf.constant(np.array([[[[0, 0], [1, 1]], [[1, 0], [0, 1]]]], dtype=np.float32)), true_image_shapes=None) init_op = tf.global_variables_initializer() saver = tf.train.Saver() save_path = self.get_temp_dir() with self.session() as sess: sess.run(init_op) saved_model_path = saver.save(sess, save_path) var_map = model.restore_map( fine_tune_checkpoint_type='detection', load_all_detection_checkpoint_vars=False) self.assertIsInstance(var_map, dict) saver = tf.train.Saver(var_map) saver.restore(sess, saved_model_path) for var in sess.run(tf.report_uninitialized_variables()): self.assertNotIn('FeatureExtractor', var) def test_restore_map_for_classification_ckpt(self): # TODO(rathodv): Support TF2.X if self.is_tf2(): return # Define mock tensorflow classification graph and save variables. test_graph_classification = tf.Graph() with test_graph_classification.as_default(): image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3]) with tf.variable_scope('mock_model'): net = slim.conv2d(image, num_outputs=32, kernel_size=1, scope='layer1') slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2') init_op = tf.global_variables_initializer() saver = tf.train.Saver() save_path = self.get_temp_dir() with self.session(graph=test_graph_classification) as sess: sess.run(init_op) saved_model_path = saver.save(sess, save_path) # Create tensorflow detection graph and load variables from # classification checkpoint. test_graph_detection = tf.Graph() with test_graph_detection.as_default(): model, _, _, _ = self._create_model() inputs_shape = [2, 2, 2, 3] inputs = tf.cast(tf.random_uniform( inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) preprocessed_inputs, true_image_shapes = model.preprocess(inputs) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) model.postprocess(prediction_dict, true_image_shapes) another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable var_map = model.restore_map(fine_tune_checkpoint_type='classification') self.assertNotIn('another_variable', var_map) self.assertIsInstance(var_map, dict) saver = tf.train.Saver(var_map) with self.session(graph=test_graph_detection) as sess: saver.restore(sess, saved_model_path) for var in sess.run(tf.report_uninitialized_variables()): self.assertNotIn(six.ensure_binary('FeatureExtractor'), var) def test_load_all_det_checkpoint_vars(self): if self.is_tf2(): return test_graph_detection = tf.Graph() with test_graph_detection.as_default(): model, _, _, _ = self._create_model() inputs_shape = [2, 2, 2, 3] inputs = tf.cast( tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) preprocessed_inputs, true_image_shapes = model.preprocess(inputs) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) model.postprocess(prediction_dict, true_image_shapes) another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable var_map = model.restore_map( fine_tune_checkpoint_type='detection', load_all_detection_checkpoint_vars=True) self.assertIsInstance(var_map, dict) self.assertIn('another_variable', var_map) def test_load_checkpoint_vars_tf2(self): if not self.is_tf2(): self.skipTest('Not running TF2 checkpoint test with TF1.') model, _, _, _ = self._create_model() inputs_shape = [2, 2, 2, 3] inputs = tf.cast( tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) model(inputs) detection_var_names = sorted([ var.name for var in model.restore_from_objects('detection')[ 'model']._feature_extractor.weights ]) expected_detection_names = [ 'ssd_meta_arch/fake_ssd_keras_feature_extractor/mock_model/layer1/bias:0', 'ssd_meta_arch/fake_ssd_keras_feature_extractor/mock_model/layer1/kernel:0' ] self.assertEqual(detection_var_names, expected_detection_names) full_var_names = sorted([ var.name for var in model.restore_from_objects('full')['model'].weights ]) exepcted_full_names = ['box_predictor_var:0'] + expected_detection_names self.assertEqual(exepcted_full_names, full_var_names) # TODO(vighneshb) Add similar test for classification checkpoint type. # TODO(vighneshb) Test loading a checkpoint from disk to verify that # checkpoints are loaded correctly. def test_loss_results_are_correct_with_random_example_sampling(self): with test_utils.GraphContextOrNone() as g: model, num_classes, _, _ = self._create_model( random_example_sampling=True) def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, groundtruth_classes1, groundtruth_classes2): groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list) prediction_dict = model.predict( preprocessed_tensor, true_image_shapes=None) loss_dict = model.loss(prediction_dict, true_image_shapes=None) return (self._get_value_for_matching_key(loss_dict, 'Loss/localization_loss'), self._get_value_for_matching_key(loss_dict, 'Loss/classification_loss')) batch_size = 2 preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) groundtruth_classes1 = np.array([[1]], dtype=np.float32) groundtruth_classes2 = np.array([[1]], dtype=np.float32) expected_localization_loss = 0.0 # Among 4 anchors (1 positive, 3 negative) in this test, only 2 anchors are # selected (1 positive, 1 negative) since random sampler will adjust number # of negative examples to make sure positive example fraction in the batch # is 0.5. expected_classification_loss = ( batch_size * 2 * (num_classes + 1) * np.log(2.0)) (localization_loss, classification_loss) = self.execute_cpu( graph_fn, [ preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, groundtruth_classes1, groundtruth_classes2 ], graph=g) self.assertAllClose(localization_loss, expected_localization_loss) self.assertAllClose(classification_loss, expected_classification_loss) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/ssd_meta_arch_test.py
ssd_meta_arch_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.meta_architectures.faster_rcnn_meta_arch.""" import functools from absl.testing import parameterized import numpy as np import six import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.anchor_generators import grid_anchor_generator from object_detection.anchor_generators import multiscale_grid_anchor_generator from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.builders import post_processing_builder from object_detection.core import balanced_positive_negative_sampler as sampler from object_detection.core import losses from object_detection.core import post_processing from object_detection.core import target_assigner from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.protos import box_predictor_pb2 from object_detection.protos import hyperparams_pb2 from object_detection.protos import post_processing_pb2 from object_detection.utils import spatial_transform_ops as spatial_ops from object_detection.utils import test_case from object_detection.utils import test_utils from object_detection.utils import tf_version # pylint: disable=g-import-not-at-top try: import tf_slim as slim except ImportError: # TF 2.0 doesn't ship with contrib. pass # pylint: enable=g-import-not-at-top BOX_CODE_SIZE = 4 class FakeFasterRCNNFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Fake feature extractor to use in tests.""" def __init__(self): super(FakeFasterRCNNFeatureExtractor, self).__init__( is_training=False, first_stage_features_stride=32, reuse_weights=None, weight_decay=0.0) def preprocess(self, resized_inputs): return tf.identity(resized_inputs) def _extract_proposal_features(self, preprocessed_inputs, scope): with tf.variable_scope('mock_model'): proposal_features = 0 * slim.conv2d( preprocessed_inputs, num_outputs=3, kernel_size=1, scope='layer1') return proposal_features, {} def _extract_box_classifier_features(self, proposal_feature_maps, scope): with tf.variable_scope('mock_model'): return 0 * slim.conv2d( proposal_feature_maps, num_outputs=3, kernel_size=1, scope='layer2') class FakeFasterRCNNMultiLevelFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Fake feature extractor to use in tests.""" def __init__(self): super(FakeFasterRCNNMultiLevelFeatureExtractor, self).__init__( is_training=False, first_stage_features_stride=32, reuse_weights=None, weight_decay=0.0) def preprocess(self, resized_inputs): return tf.identity(resized_inputs) def _extract_proposal_features(self, preprocessed_inputs, scope): with tf.variable_scope('mock_model'): proposal_features_1 = 0 * slim.conv2d( preprocessed_inputs, num_outputs=3, kernel_size=3, scope='layer1', padding='VALID') proposal_features_2 = 0 * slim.conv2d( proposal_features_1, num_outputs=3, kernel_size=3, scope='layer2', padding='VALID') return [proposal_features_1, proposal_features_2], {} def _extract_box_classifier_features(self, proposal_feature_maps, scope): with tf.variable_scope('mock_model'): return 0 * slim.conv2d( proposal_feature_maps, num_outputs=3, kernel_size=1, scope='layer3') class FakeFasterRCNNKerasFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): """Fake feature extractor to use in tests.""" def __init__(self): super(FakeFasterRCNNKerasFeatureExtractor, self).__init__( is_training=False, first_stage_features_stride=32, weight_decay=0.0) def preprocess(self, resized_inputs): return tf.identity(resized_inputs) def get_proposal_feature_extractor_model(self, name): class ProposalFeatureExtractor(tf.keras.Model): """Dummy proposal feature extraction.""" def __init__(self, name): super(ProposalFeatureExtractor, self).__init__(name=name) self.conv = None def build(self, input_shape): self.conv = tf.keras.layers.Conv2D( 3, kernel_size=1, padding='SAME', name='layer1') def call(self, inputs): return self.conv(inputs) return ProposalFeatureExtractor(name=name) def get_box_classifier_feature_extractor_model(self, name): return tf.keras.Sequential([tf.keras.layers.Conv2D( 3, kernel_size=1, padding='SAME', name=name + '_layer2')]) class FakeFasterRCNNKerasMultilevelFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): """Fake feature extractor to use in tests.""" def __init__(self): super(FakeFasterRCNNKerasMultilevelFeatureExtractor, self).__init__( is_training=False, first_stage_features_stride=32, weight_decay=0.0) def preprocess(self, resized_inputs): return tf.identity(resized_inputs) def get_proposal_feature_extractor_model(self, name): class ProposalFeatureExtractor(tf.keras.Model): """Dummy proposal feature extraction.""" def __init__(self, name): super(ProposalFeatureExtractor, self).__init__(name=name) self.conv = None def build(self, input_shape): self.conv = tf.keras.layers.Conv2D( 3, kernel_size=3, name='layer1') self.conv_1 = tf.keras.layers.Conv2D( 3, kernel_size=3, name='layer1') def call(self, inputs): output_1 = self.conv(inputs) output_2 = self.conv_1(output_1) return [output_1, output_2] return ProposalFeatureExtractor(name=name) class FasterRCNNMetaArchTestBase(test_case.TestCase, parameterized.TestCase): """Base class to test Faster R-CNN and R-FCN meta architectures.""" def _build_arg_scope_with_hyperparams(self, hyperparams_text_proto, is_training): hyperparams = hyperparams_pb2.Hyperparams() text_format.Merge(hyperparams_text_proto, hyperparams) return hyperparams_builder.build(hyperparams, is_training=is_training) def _build_keras_layer_hyperparams(self, hyperparams_text_proto): hyperparams = hyperparams_pb2.Hyperparams() text_format.Merge(hyperparams_text_proto, hyperparams) return hyperparams_builder.KerasLayerHyperparams(hyperparams) def _get_second_stage_box_predictor_text_proto( self, share_box_across_classes=False): share_box_field = 'true' if share_box_across_classes else 'false' box_predictor_text_proto = """ mask_rcnn_box_predictor {{ fc_hyperparams {{ op: FC activation: NONE regularizer {{ l2_regularizer {{ weight: 0.0005 }} }} initializer {{ variance_scaling_initializer {{ factor: 1.0 uniform: true mode: FAN_AVG }} }} }} share_box_across_classes: {share_box_across_classes} }} """.format(share_box_across_classes=share_box_field) return box_predictor_text_proto def _add_mask_to_second_stage_box_predictor_text_proto( self, masks_are_class_agnostic=False): agnostic = 'true' if masks_are_class_agnostic else 'false' box_predictor_text_proto = """ mask_rcnn_box_predictor { predict_instance_masks: true masks_are_class_agnostic: """ + agnostic + """ mask_height: 14 mask_width: 14 conv_hyperparams { op: CONV regularizer { l2_regularizer { weight: 0.0 } } initializer { truncated_normal_initializer { stddev: 0.01 } } } } """ return box_predictor_text_proto def _get_second_stage_box_predictor(self, num_classes, is_training, predict_masks, masks_are_class_agnostic, share_box_across_classes=False, use_keras=False): box_predictor_proto = box_predictor_pb2.BoxPredictor() text_format.Merge(self._get_second_stage_box_predictor_text_proto( share_box_across_classes), box_predictor_proto) if predict_masks: text_format.Merge( self._add_mask_to_second_stage_box_predictor_text_proto( masks_are_class_agnostic), box_predictor_proto) if use_keras: return box_predictor_builder.build_keras( hyperparams_builder.KerasLayerHyperparams, inplace_batchnorm_update=False, freeze_batchnorm=False, box_predictor_config=box_predictor_proto, num_classes=num_classes, num_predictions_per_location_list=None, is_training=is_training) else: return box_predictor_builder.build( hyperparams_builder.build, box_predictor_proto, num_classes=num_classes, is_training=is_training) def _get_model(self, box_predictor, keras_model=False, **common_kwargs): return faster_rcnn_meta_arch.FasterRCNNMetaArch( initial_crop_size=3, maxpool_kernel_size=1, maxpool_stride=1, second_stage_mask_rcnn_box_predictor=box_predictor, **common_kwargs) def _build_model(self, is_training, number_of_stages, second_stage_batch_size, first_stage_max_proposals=8, num_classes=2, hard_mining=False, softmax_second_stage_classification_loss=True, predict_masks=False, pad_to_max_dimension=None, masks_are_class_agnostic=False, use_matmul_crop_and_resize=False, clip_anchors_to_image=False, use_matmul_gather_in_matcher=False, use_static_shapes=False, calibration_mapping_value=None, share_box_across_classes=False, return_raw_detections_during_predict=False, output_final_box_features=False, multi_level=False): use_keras = tf_version.is_tf2() def image_resizer_fn(image, masks=None): """Fake image resizer function.""" resized_inputs = [] resized_image = tf.identity(image) if pad_to_max_dimension is not None: resized_image = tf.image.pad_to_bounding_box(image, 0, 0, pad_to_max_dimension, pad_to_max_dimension) resized_inputs.append(resized_image) if masks is not None: resized_masks = tf.identity(masks) if pad_to_max_dimension is not None: resized_masks = tf.image.pad_to_bounding_box(tf.transpose(masks, [1, 2, 0]), 0, 0, pad_to_max_dimension, pad_to_max_dimension) resized_masks = tf.transpose(resized_masks, [2, 0, 1]) resized_inputs.append(resized_masks) resized_inputs.append(tf.shape(image)) return resized_inputs # anchors in this test are designed so that a subset of anchors are inside # the image and a subset of anchors are outside. first_stage_anchor_generator = None if multi_level: min_level = 0 max_level = 1 anchor_scale = 0.1 aspect_ratios = [1.0, 2.0, 0.5] scales_per_octave = 2 normalize_coordinates = False (first_stage_anchor_generator ) = multiscale_grid_anchor_generator.MultiscaleGridAnchorGenerator( min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, normalize_coordinates) else: first_stage_anchor_scales = (0.001, 0.005, 0.1) first_stage_anchor_aspect_ratios = (0.5, 1.0, 2.0) first_stage_anchor_strides = (1, 1) first_stage_anchor_generator = grid_anchor_generator.GridAnchorGenerator( first_stage_anchor_scales, first_stage_anchor_aspect_ratios, anchor_stride=first_stage_anchor_strides) first_stage_target_assigner = target_assigner.create_target_assigner( 'FasterRCNN', 'proposal', use_matmul_gather=use_matmul_gather_in_matcher) if use_keras: if multi_level: fake_feature_extractor = FakeFasterRCNNKerasMultilevelFeatureExtractor() else: fake_feature_extractor = FakeFasterRCNNKerasFeatureExtractor() else: if multi_level: fake_feature_extractor = FakeFasterRCNNMultiLevelFeatureExtractor() else: fake_feature_extractor = FakeFasterRCNNFeatureExtractor() first_stage_box_predictor_hyperparams_text_proto = """ op: CONV activation: RELU regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 } } """ if use_keras: first_stage_box_predictor_arg_scope_fn = ( self._build_keras_layer_hyperparams( first_stage_box_predictor_hyperparams_text_proto)) else: first_stage_box_predictor_arg_scope_fn = ( self._build_arg_scope_with_hyperparams( first_stage_box_predictor_hyperparams_text_proto, is_training)) first_stage_box_predictor_kernel_size = 3 first_stage_atrous_rate = 1 first_stage_box_predictor_depth = 512 first_stage_minibatch_size = 3 first_stage_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=0.5, is_static=use_static_shapes) first_stage_nms_score_threshold = -1.0 first_stage_nms_iou_threshold = 1.0 first_stage_max_proposals = first_stage_max_proposals first_stage_non_max_suppression_fn = functools.partial( post_processing.batch_multiclass_non_max_suppression, score_thresh=first_stage_nms_score_threshold, iou_thresh=first_stage_nms_iou_threshold, max_size_per_class=first_stage_max_proposals, max_total_size=first_stage_max_proposals, use_static_shapes=use_static_shapes) first_stage_localization_loss_weight = 1.0 first_stage_objectness_loss_weight = 1.0 post_processing_config = post_processing_pb2.PostProcessing() post_processing_text_proto = """ score_converter: IDENTITY batch_non_max_suppression { score_threshold: -20.0 iou_threshold: 1.0 max_detections_per_class: 5 max_total_detections: 5 use_static_shapes: """ +'{}'.format(use_static_shapes) + """ } """ if calibration_mapping_value: calibration_text_proto = """ calibration_config { function_approximation { x_y_pairs { x_y_pair { x: 0.0 y: %f } x_y_pair { x: 1.0 y: %f }}}}""" % (calibration_mapping_value, calibration_mapping_value) post_processing_text_proto = (post_processing_text_proto + ' ' + calibration_text_proto) text_format.Merge(post_processing_text_proto, post_processing_config) second_stage_non_max_suppression_fn, second_stage_score_conversion_fn = ( post_processing_builder.build(post_processing_config)) second_stage_target_assigner = target_assigner.create_target_assigner( 'FasterRCNN', 'detection', use_matmul_gather=use_matmul_gather_in_matcher) second_stage_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=1.0, is_static=use_static_shapes) second_stage_localization_loss_weight = 1.0 second_stage_classification_loss_weight = 1.0 if softmax_second_stage_classification_loss: second_stage_classification_loss = ( losses.WeightedSoftmaxClassificationLoss()) else: second_stage_classification_loss = ( losses.WeightedSigmoidClassificationLoss()) hard_example_miner = None if hard_mining: hard_example_miner = losses.HardExampleMiner( num_hard_examples=1, iou_threshold=0.99, loss_type='both', cls_loss_weight=second_stage_classification_loss_weight, loc_loss_weight=second_stage_localization_loss_weight, max_negatives_per_positive=None) crop_and_resize_fn = ( spatial_ops.multilevel_matmul_crop_and_resize if use_matmul_crop_and_resize else spatial_ops.multilevel_native_crop_and_resize) common_kwargs = { 'is_training': is_training, 'num_classes': num_classes, 'image_resizer_fn': image_resizer_fn, 'feature_extractor': fake_feature_extractor, 'number_of_stages': number_of_stages, 'first_stage_anchor_generator': first_stage_anchor_generator, 'first_stage_target_assigner': first_stage_target_assigner, 'first_stage_atrous_rate': first_stage_atrous_rate, 'first_stage_box_predictor_arg_scope_fn': first_stage_box_predictor_arg_scope_fn, 'first_stage_box_predictor_kernel_size': first_stage_box_predictor_kernel_size, 'first_stage_box_predictor_depth': first_stage_box_predictor_depth, 'first_stage_minibatch_size': first_stage_minibatch_size, 'first_stage_sampler': first_stage_sampler, 'first_stage_non_max_suppression_fn': first_stage_non_max_suppression_fn, 'first_stage_max_proposals': first_stage_max_proposals, 'first_stage_localization_loss_weight': first_stage_localization_loss_weight, 'first_stage_objectness_loss_weight': first_stage_objectness_loss_weight, 'second_stage_target_assigner': second_stage_target_assigner, 'second_stage_batch_size': second_stage_batch_size, 'second_stage_sampler': second_stage_sampler, 'second_stage_non_max_suppression_fn': second_stage_non_max_suppression_fn, 'second_stage_score_conversion_fn': second_stage_score_conversion_fn, 'second_stage_localization_loss_weight': second_stage_localization_loss_weight, 'second_stage_classification_loss_weight': second_stage_classification_loss_weight, 'second_stage_classification_loss': second_stage_classification_loss, 'hard_example_miner': hard_example_miner, 'crop_and_resize_fn': crop_and_resize_fn, 'clip_anchors_to_image': clip_anchors_to_image, 'use_static_shapes': use_static_shapes, 'resize_masks': True, 'return_raw_detections_during_predict': return_raw_detections_during_predict, 'output_final_box_features': output_final_box_features } return self._get_model( self._get_second_stage_box_predictor( num_classes=num_classes, is_training=is_training, use_keras=use_keras, predict_masks=predict_masks, masks_are_class_agnostic=masks_are_class_agnostic, share_box_across_classes=share_box_across_classes), **common_kwargs) @parameterized.parameters( {'use_static_shapes': False}, {'use_static_shapes': True}, ) def test_predict_gives_correct_shapes_in_inference_mode_first_stage_only( self, use_static_shapes=False): batch_size = 2 height = 10 width = 12 input_image_shape = (batch_size, height, width, 3) with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=1, second_stage_batch_size=2, clip_anchors_to_image=use_static_shapes, use_static_shapes=use_static_shapes) def graph_fn(images): """Function to construct tf graph for the test.""" preprocessed_inputs, true_image_shapes = model.preprocess(images) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) return (prediction_dict['rpn_box_predictor_features'][0], prediction_dict['rpn_features_to_crop'][0], prediction_dict['image_shape'], prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_background'], prediction_dict['anchors']) images = np.zeros(input_image_shape, dtype=np.float32) # In inference mode, anchors are clipped to the image window, but not # pruned. Since MockFasterRCNN.extract_proposal_features returns a # tensor with the same shape as its input, the expected number of anchors # is height * width * the number of anchors per location (i.e. 3x3). expected_num_anchors = height * width * 3 * 3 expected_output_shapes = { 'rpn_box_predictor_features': (batch_size, height, width, 512), 'rpn_features_to_crop': (batch_size, height, width, 3), 'rpn_box_encodings': (batch_size, expected_num_anchors, 4), 'rpn_objectness_predictions_with_background': (batch_size, expected_num_anchors, 2), 'anchors': (expected_num_anchors, 4) } if use_static_shapes: results = self.execute(graph_fn, [images], graph=g) else: results = self.execute_cpu(graph_fn, [images], graph=g) self.assertAllEqual(results[0].shape, expected_output_shapes['rpn_box_predictor_features']) self.assertAllEqual(results[1].shape, expected_output_shapes['rpn_features_to_crop']) self.assertAllEqual(results[2], input_image_shape) self.assertAllEqual(results[3].shape, expected_output_shapes['rpn_box_encodings']) self.assertAllEqual( results[4].shape, expected_output_shapes['rpn_objectness_predictions_with_background']) self.assertAllEqual(results[5].shape, expected_output_shapes['anchors']) # Check that anchors are clipped to window. anchors = results[5] self.assertTrue(np.all(np.greater_equal(anchors, 0))) self.assertTrue(np.all(np.less_equal(anchors[:, 0], height))) self.assertTrue(np.all(np.less_equal(anchors[:, 1], width))) self.assertTrue(np.all(np.less_equal(anchors[:, 2], height))) self.assertTrue(np.all(np.less_equal(anchors[:, 3], width))) @parameterized.parameters( {'use_static_shapes': False}, {'use_static_shapes': True}, ) def test_predict_shape_in_inference_mode_first_stage_only_multi_level( self, use_static_shapes): batch_size = 2 height = 50 width = 52 input_image_shape = (batch_size, height, width, 3) with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=1, second_stage_batch_size=2, clip_anchors_to_image=use_static_shapes, use_static_shapes=use_static_shapes, multi_level=True) def graph_fn(images): """Function to construct tf graph for the test.""" preprocessed_inputs, true_image_shapes = model.preprocess(images) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) return (prediction_dict['rpn_box_predictor_features'][0], prediction_dict['rpn_box_predictor_features'][1], prediction_dict['rpn_features_to_crop'][0], prediction_dict['rpn_features_to_crop'][1], prediction_dict['image_shape'], prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_background'], prediction_dict['anchors']) images = np.zeros(input_image_shape, dtype=np.float32) # In inference mode, anchors are clipped to the image window, but not # pruned. Since MockFasterRCNN.extract_proposal_features returns a # tensor with the same shape as its input, the expected number of anchors # is height * width * the number of anchors per location (i.e. 3x3). expected_num_anchors = ((height-2) * (width-2) + (height-4) * (width-4)) * 6 expected_output_shapes = { 'rpn_box_predictor_features_0': (batch_size, height-2, width-2, 512), 'rpn_box_predictor_features_1': (batch_size, height-4, width-4, 512), 'rpn_features_to_crop_0': (batch_size, height-2, width-2, 3), 'rpn_features_to_crop_1': (batch_size, height-4, width-4, 3), 'rpn_box_encodings': (batch_size, expected_num_anchors, 4), 'rpn_objectness_predictions_with_background': (batch_size, expected_num_anchors, 2), } if use_static_shapes: expected_output_shapes['anchors'] = (expected_num_anchors, 4) else: expected_output_shapes['anchors'] = (18300, 4) if use_static_shapes: results = self.execute(graph_fn, [images], graph=g) else: results = self.execute_cpu(graph_fn, [images], graph=g) self.assertAllEqual(results[0].shape, expected_output_shapes['rpn_box_predictor_features_0']) self.assertAllEqual(results[1].shape, expected_output_shapes['rpn_box_predictor_features_1']) self.assertAllEqual(results[2].shape, expected_output_shapes['rpn_features_to_crop_0']) self.assertAllEqual(results[3].shape, expected_output_shapes['rpn_features_to_crop_1']) self.assertAllEqual(results[4], input_image_shape) self.assertAllEqual(results[5].shape, expected_output_shapes['rpn_box_encodings']) self.assertAllEqual( results[6].shape, expected_output_shapes['rpn_objectness_predictions_with_background']) self.assertAllEqual(results[7].shape, expected_output_shapes['anchors']) # Check that anchors are clipped to window. anchors = results[5] self.assertTrue(np.all(np.greater_equal(anchors, 0))) self.assertTrue(np.all(np.less_equal(anchors[:, 0], height))) self.assertTrue(np.all(np.less_equal(anchors[:, 1], width))) self.assertTrue(np.all(np.less_equal(anchors[:, 2], height))) self.assertTrue(np.all(np.less_equal(anchors[:, 3], width))) def test_regularization_losses(self): with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=True, number_of_stages=1, second_stage_batch_size=2) def graph_fn(): batch_size = 2 height = 10 width = 12 input_image_shape = (batch_size, height, width, 3) image, true_image_shapes = model.preprocess(tf.zeros(input_image_shape)) model.predict(image, true_image_shapes) reg_losses = tf.math.add_n(model.regularization_losses()) return reg_losses reg_losses = self.execute(graph_fn, [], graph=g) self.assertGreaterEqual(reg_losses, 0) def test_predict_gives_valid_anchors_in_training_mode_first_stage_only(self): expected_output_keys = set([ 'rpn_box_predictor_features', 'rpn_features_to_crop', 'image_shape', 'rpn_box_encodings', 'rpn_objectness_predictions_with_background', 'anchors', 'feature_maps']) with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=True, number_of_stages=1, second_stage_batch_size=2,) batch_size = 2 height = 10 width = 12 input_image_shape = (batch_size, height, width, 3) def graph_fn(): image, true_image_shapes = model.preprocess(tf.zeros(input_image_shape)) prediction_dict = model.predict(image, true_image_shapes) self.assertEqual(set(prediction_dict.keys()), expected_output_keys) return (prediction_dict['image_shape'], prediction_dict['anchors'], prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_background']) (image_shape, anchors, rpn_box_encodings, rpn_objectness_predictions_with_background) = self.execute(graph_fn, [], graph=g) # At training time, anchors that exceed image bounds are pruned. Thus # the `expected_num_anchors` in the above inference mode test is now # a strict upper bound on the number of anchors. num_anchors_strict_upper_bound = height * width * 3 * 3 self.assertAllEqual(image_shape, input_image_shape) self.assertTrue(len(anchors.shape) == 2 and anchors.shape[1] == 4) num_anchors_out = anchors.shape[0] self.assertLess(num_anchors_out, num_anchors_strict_upper_bound) self.assertTrue(np.all(np.greater_equal(anchors, 0))) self.assertTrue(np.all(np.less_equal(anchors[:, 0], height))) self.assertTrue(np.all(np.less_equal(anchors[:, 1], width))) self.assertTrue(np.all(np.less_equal(anchors[:, 2], height))) self.assertTrue(np.all(np.less_equal(anchors[:, 3], width))) self.assertAllEqual(rpn_box_encodings.shape, (batch_size, num_anchors_out, 4)) self.assertAllEqual( rpn_objectness_predictions_with_background.shape, (batch_size, num_anchors_out, 2)) @parameterized.parameters( {'use_static_shapes': False}, {'use_static_shapes': True}, ) def test_predict_correct_shapes_in_inference_mode_two_stages( self, use_static_shapes): def compare_results(results, expected_output_shapes): """Checks if the shape of the predictions are as expected.""" self.assertAllEqual(results[0][0].shape, expected_output_shapes['rpn_box_predictor_features']) self.assertAllEqual(results[1][0].shape, expected_output_shapes['rpn_features_to_crop']) self.assertAllEqual(results[2].shape, expected_output_shapes['image_shape']) self.assertAllEqual(results[3].shape, expected_output_shapes['rpn_box_encodings']) self.assertAllEqual( results[4].shape, expected_output_shapes['rpn_objectness_predictions_with_background']) self.assertAllEqual(results[5].shape, expected_output_shapes['anchors']) self.assertAllEqual(results[6].shape, expected_output_shapes['refined_box_encodings']) self.assertAllEqual( results[7].shape, expected_output_shapes['class_predictions_with_background']) self.assertAllEqual(results[8].shape, expected_output_shapes['num_proposals']) self.assertAllEqual(results[9].shape, expected_output_shapes['proposal_boxes']) self.assertAllEqual(results[10].shape, expected_output_shapes['proposal_boxes_normalized']) self.assertAllEqual(results[11].shape, expected_output_shapes['box_classifier_features']) self.assertAllEqual(results[12].shape, expected_output_shapes['final_anchors']) batch_size = 2 image_size = 10 max_num_proposals = 8 initial_crop_size = 3 maxpool_stride = 1 with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=2, predict_masks=False, use_matmul_crop_and_resize=use_static_shapes, clip_anchors_to_image=use_static_shapes, use_static_shapes=use_static_shapes) def graph_fn(): """A function with TF compute.""" if use_static_shapes: images = tf.random_uniform((batch_size, image_size, image_size, 3)) else: images = tf.random_uniform((tf.random_uniform([], minval=batch_size, maxval=batch_size + 1, dtype=tf.int32), tf.random_uniform([], minval=image_size, maxval=image_size + 1, dtype=tf.int32), tf.random_uniform([], minval=image_size, maxval=image_size + 1, dtype=tf.int32), 3)) preprocessed_inputs, true_image_shapes = model.preprocess(images) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) return (prediction_dict['rpn_box_predictor_features'], prediction_dict['rpn_features_to_crop'], prediction_dict['image_shape'], prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_background'], prediction_dict['anchors'], prediction_dict['refined_box_encodings'], prediction_dict['class_predictions_with_background'], prediction_dict['num_proposals'], prediction_dict['proposal_boxes'], prediction_dict['proposal_boxes_normalized'], prediction_dict['box_classifier_features'], prediction_dict['final_anchors']) expected_num_anchors = image_size * image_size * 3 * 3 expected_shapes = { 'rpn_box_predictor_features': (2, image_size, image_size, 512), 'rpn_features_to_crop': (2, image_size, image_size, 3), 'image_shape': (4,), 'rpn_box_encodings': (2, expected_num_anchors, 4), 'rpn_objectness_predictions_with_background': (2, expected_num_anchors, 2), 'anchors': (expected_num_anchors, 4), 'refined_box_encodings': (2 * max_num_proposals, 2, 4), 'class_predictions_with_background': (2 * max_num_proposals, 2 + 1), 'num_proposals': (2,), 'proposal_boxes': (2, max_num_proposals, 4), 'proposal_boxes_normalized': (2, max_num_proposals, 4), 'box_classifier_features': self._get_box_classifier_features_shape(image_size, batch_size, max_num_proposals, initial_crop_size, maxpool_stride, 3), 'feature_maps': [(2, image_size, image_size, 512)], 'final_anchors': (2, max_num_proposals, 4) } if use_static_shapes: results = self.execute(graph_fn, [], graph=g) else: results = self.execute_cpu(graph_fn, [], graph=g) compare_results(results, expected_shapes) @parameterized.parameters( {'use_static_shapes': False}, {'use_static_shapes': True}, ) def test_predict_gives_correct_shapes_in_train_mode_both_stages( self, use_static_shapes=False): batch_size = 2 image_size = 10 max_num_proposals = 7 initial_crop_size = 3 maxpool_stride = 1 with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=True, number_of_stages=2, second_stage_batch_size=7, predict_masks=False, use_matmul_crop_and_resize=use_static_shapes, clip_anchors_to_image=use_static_shapes, use_static_shapes=use_static_shapes) def graph_fn(images, gt_boxes, gt_classes, gt_weights): """Function to construct tf graph for the test.""" preprocessed_inputs, true_image_shapes = model.preprocess(images) model.provide_groundtruth( groundtruth_boxes_list=tf.unstack(gt_boxes), groundtruth_classes_list=tf.unstack(gt_classes), groundtruth_weights_list=tf.unstack(gt_weights)) result_tensor_dict = model.predict(preprocessed_inputs, true_image_shapes) return (result_tensor_dict['refined_box_encodings'], result_tensor_dict['class_predictions_with_background'], result_tensor_dict['proposal_boxes'], result_tensor_dict['proposal_boxes_normalized'], result_tensor_dict['anchors'], result_tensor_dict['rpn_box_encodings'], result_tensor_dict['rpn_objectness_predictions_with_background'], result_tensor_dict['rpn_features_to_crop'][0], result_tensor_dict['rpn_box_predictor_features'][0], result_tensor_dict['final_anchors'], ) image_shape = (batch_size, image_size, image_size, 3) images = np.zeros(image_shape, dtype=np.float32) gt_boxes = np.stack([ np.array([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=np.float32), np.array([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=np.float32) ]) gt_classes = np.stack([ np.array([[1, 0], [0, 1]], dtype=np.float32), np.array([[1, 0], [1, 0]], dtype=np.float32) ]) gt_weights = np.stack([ np.array([1, 1], dtype=np.float32), np.array([1, 1], dtype=np.float32) ]) if use_static_shapes: results = self.execute(graph_fn, [images, gt_boxes, gt_classes, gt_weights], graph=g) else: results = self.execute_cpu(graph_fn, [images, gt_boxes, gt_classes, gt_weights], graph=g) expected_shapes = { 'rpn_box_predictor_features': (2, image_size, image_size, 512), 'rpn_features_to_crop': (2, image_size, image_size, 3), 'refined_box_encodings': (2 * max_num_proposals, 2, 4), 'class_predictions_with_background': (2 * max_num_proposals, 2 + 1), 'proposal_boxes': (2, max_num_proposals, 4), 'rpn_box_encodings': (2, image_size * image_size * 9, 4), 'proposal_boxes_normalized': (2, max_num_proposals, 4), 'box_classifier_features': self._get_box_classifier_features_shape( image_size, batch_size, max_num_proposals, initial_crop_size, maxpool_stride, 3), 'rpn_objectness_predictions_with_background': (2, image_size * image_size * 9, 2), 'final_anchors': (2, max_num_proposals, 4) } # TODO(rathodv): Possibly change utils/test_case.py to accept dictionaries # and return dicionaries so don't have to rely on the order of tensors. self.assertAllEqual(results[0].shape, expected_shapes['refined_box_encodings']) self.assertAllEqual(results[1].shape, expected_shapes['class_predictions_with_background']) self.assertAllEqual(results[2].shape, expected_shapes['proposal_boxes']) self.assertAllEqual(results[3].shape, expected_shapes['proposal_boxes_normalized']) anchors_shape = results[4].shape self.assertAllEqual(results[5].shape, [batch_size, anchors_shape[0], 4]) self.assertAllEqual(results[6].shape, [batch_size, anchors_shape[0], 2]) self.assertAllEqual(results[7].shape, expected_shapes['rpn_features_to_crop']) self.assertAllEqual(results[8].shape, expected_shapes['rpn_box_predictor_features']) self.assertAllEqual(results[9].shape, expected_shapes['final_anchors']) @parameterized.parameters( {'use_static_shapes': False, 'pad_to_max_dimension': None}, {'use_static_shapes': True, 'pad_to_max_dimension': None}, {'use_static_shapes': False, 'pad_to_max_dimension': 56,}, {'use_static_shapes': True, 'pad_to_max_dimension': 56}, ) def test_postprocess_first_stage_only_inference_mode( self, use_static_shapes=False, pad_to_max_dimension=None): batch_size = 2 first_stage_max_proposals = 4 if use_static_shapes else 8 with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=1, second_stage_batch_size=6, use_matmul_crop_and_resize=use_static_shapes, clip_anchors_to_image=use_static_shapes, use_static_shapes=use_static_shapes, use_matmul_gather_in_matcher=use_static_shapes, first_stage_max_proposals=first_stage_max_proposals, pad_to_max_dimension=pad_to_max_dimension) def graph_fn(images, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features_to_crop, anchors): """Function to construct tf graph for the test.""" preprocessed_images, true_image_shapes = model.preprocess(images) proposals = model.postprocess({ 'rpn_box_encodings': rpn_box_encodings, 'rpn_objectness_predictions_with_background': rpn_objectness_predictions_with_background, 'rpn_features_to_crop': rpn_features_to_crop, 'image_shape': tf.shape(preprocessed_images), 'anchors': anchors}, true_image_shapes) return (proposals['num_detections'], proposals['detection_boxes'], proposals['detection_scores'], proposals['raw_detection_boxes'], proposals['raw_detection_scores']) anchors = np.array( [[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [16, 16, 32, 32]], dtype=np.float32) rpn_box_encodings = np.zeros( (batch_size, anchors.shape[0], BOX_CODE_SIZE), dtype=np.float32) # use different numbers for the objectness category to break ties in # order of boxes returned by NMS rpn_objectness_predictions_with_background = np.array([ [[-10, 13], [10, -10], [10, -11], [-10, 12]], [[10, -10], [-10, 13], [-10, 12], [10, -11]]], dtype=np.float32) rpn_features_to_crop = np.ones((batch_size, 8, 8, 10), dtype=np.float32) image_shape = (batch_size, 32, 32, 3) images = np.zeros(image_shape, dtype=np.float32) if use_static_shapes: results = self.execute(graph_fn, [images, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features_to_crop, anchors], graph=g) else: results = self.execute_cpu(graph_fn, [images, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features_to_crop, anchors], graph=g) expected_proposal_boxes = [ [[0, 0, .5, .5], [.5, .5, 1, 1], [0, .5, .5, 1], [.5, 0, 1.0, .5]] + 4 * [4 * [0]], [[0, .5, .5, 1], [.5, 0, 1.0, .5], [0, 0, .5, .5], [.5, .5, 1, 1]] + 4 * [4 * [0]]] expected_proposal_scores = [[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0]] expected_num_proposals = [4, 4] expected_raw_proposal_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], [0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]], [[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], [0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]]] expected_raw_scores = [[[0., 1.], [1., 0.], [1., 0.], [0., 1.]], [[1., 0.], [0., 1.], [0., 1.], [1., 0.]]] if pad_to_max_dimension is not None: expected_raw_proposal_boxes = (np.array(expected_raw_proposal_boxes) * 32 / pad_to_max_dimension) expected_proposal_boxes = (np.array(expected_proposal_boxes) * 32 / pad_to_max_dimension) self.assertAllClose(results[0], expected_num_proposals) for indx, num_proposals in enumerate(expected_num_proposals): self.assertAllClose(results[1][indx][0:num_proposals], expected_proposal_boxes[indx][0:num_proposals]) self.assertAllClose(results[2][indx][0:num_proposals], expected_proposal_scores[indx][0:num_proposals]) self.assertAllClose(results[3], expected_raw_proposal_boxes) self.assertAllClose(results[4], expected_raw_scores) def _test_postprocess_first_stage_only_train_mode(self, pad_to_max_dimension=None): with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=True, number_of_stages=1, second_stage_batch_size=2, pad_to_max_dimension=pad_to_max_dimension) batch_size = 2 def graph_fn(): """A function with TF compute.""" anchors = tf.constant( [[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [16, 16, 32, 32]], dtype=tf.float32) rpn_box_encodings = tf.zeros( [batch_size, anchors.get_shape().as_list()[0], BOX_CODE_SIZE], dtype=tf.float32) # use different numbers for the objectness category to break ties in # order of boxes returned by NMS rpn_objectness_predictions_with_background = tf.constant([ [[-10, 13], [-10, 12], [-10, 11], [-10, 10]], [[-10, 13], [-10, 12], [-10, 11], [-10, 10]]], dtype=tf.float32) rpn_features_to_crop = tf.ones((batch_size, 8, 8, 10), dtype=tf.float32) image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) groundtruth_boxes_list = [ tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32), tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32)] groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]], dtype=tf.float32), tf.constant([[1, 0], [1, 0]], dtype=tf.float32)] groundtruth_weights_list = [ tf.constant([1, 1], dtype=tf.float32), tf.constant([1, 1], dtype=tf.float32) ] _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) model.provide_groundtruth( groundtruth_boxes_list, groundtruth_classes_list, groundtruth_weights_list=groundtruth_weights_list) proposals = model.postprocess({ 'rpn_box_encodings': rpn_box_encodings, 'rpn_objectness_predictions_with_background': rpn_objectness_predictions_with_background, 'rpn_features_to_crop': rpn_features_to_crop, 'anchors': anchors, 'image_shape': image_shape}, true_image_shapes) return (proposals['detection_boxes'], proposals['detection_scores'], proposals['num_detections'], proposals['detection_multiclass_scores'], proposals['raw_detection_boxes'], proposals['raw_detection_scores']) expected_proposal_boxes = [ [[0, 0, .5, .5], [.5, .5, 1, 1]], [[0, .5, .5, 1], [.5, 0, 1, .5]]] expected_proposal_scores = [[1, 1], [1, 1]] expected_proposal_multiclass_scores = [[[0., 1.], [0., 1.]], [[0., 1.], [0., 1.]]] expected_raw_proposal_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], [0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]], [[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], [0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]]] expected_raw_scores = [[[0., 1.], [0., 1.], [0., 1.], [0., 1.]], [[0., 1.], [0., 1.], [0., 1.], [0., 1.]]] (proposal_boxes, proposal_scores, batch_num_detections, batch_multiclass_scores, raw_detection_boxes, raw_detection_scores) = self.execute_cpu(graph_fn, [], graph=g) for image_idx in range(batch_size): num_detections = int(batch_num_detections[image_idx]) boxes = proposal_boxes[image_idx][:num_detections, :].tolist() scores = proposal_scores[image_idx][:num_detections].tolist() multiclass_scores = batch_multiclass_scores[ image_idx][:num_detections, :].tolist() expected_boxes = expected_proposal_boxes[image_idx] expected_scores = expected_proposal_scores[image_idx] expected_multiclass_scores = expected_proposal_multiclass_scores[ image_idx] self.assertTrue( test_utils.first_rows_close_as_set(boxes, expected_boxes)) self.assertTrue( test_utils.first_rows_close_as_set(scores, expected_scores)) self.assertTrue( test_utils.first_rows_close_as_set(multiclass_scores, expected_multiclass_scores)) self.assertAllClose(raw_detection_boxes, expected_raw_proposal_boxes) self.assertAllClose(raw_detection_scores, expected_raw_scores) @parameterized.parameters( {'pad_to_max_dimension': 56}, {'pad_to_max_dimension': None} ) def test_postprocess_first_stage_only_train_mode_padded_image( self, pad_to_max_dimension): self._test_postprocess_first_stage_only_train_mode(pad_to_max_dimension) @parameterized.parameters( {'use_static_shapes': False, 'pad_to_max_dimension': None}, {'use_static_shapes': True, 'pad_to_max_dimension': None}, {'use_static_shapes': False, 'pad_to_max_dimension': 56}, {'use_static_shapes': True, 'pad_to_max_dimension': 56}, ) def test_postprocess_second_stage_only_inference_mode( self, use_static_shapes=False, pad_to_max_dimension=None): batch_size = 2 num_classes = 2 image_shape = np.array((2, 36, 48, 3), dtype=np.int32) first_stage_max_proposals = 8 total_num_padded_proposals = batch_size * first_stage_max_proposals with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=6, use_matmul_crop_and_resize=use_static_shapes, clip_anchors_to_image=use_static_shapes, use_static_shapes=use_static_shapes, use_matmul_gather_in_matcher=use_static_shapes, pad_to_max_dimension=pad_to_max_dimension) def graph_fn(images, refined_box_encodings, class_predictions_with_background, num_proposals, proposal_boxes): """Function to construct tf graph for the test.""" _, true_image_shapes = model.preprocess(images) detections = model.postprocess({ 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'num_proposals': num_proposals, 'proposal_boxes': proposal_boxes, }, true_image_shapes) return (detections['num_detections'], detections['detection_boxes'], detections['detection_scores'], detections['detection_classes'], detections['raw_detection_boxes'], detections['raw_detection_scores'], detections['detection_multiclass_scores'], detections['detection_anchor_indices']) proposal_boxes = np.array( [[[1, 1, 2, 3], [0, 0, 1, 1], [.5, .5, .6, .6], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], [[2, 3, 6, 8], [1, 2, 5, 3], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=np.float32) num_proposals = np.array([3, 2], dtype=np.int32) refined_box_encodings = np.zeros( [total_num_padded_proposals, num_classes, 4], dtype=np.float32) class_predictions_with_background = np.ones( [total_num_padded_proposals, num_classes+1], dtype=np.float32) images = np.zeros(image_shape, dtype=np.float32) if use_static_shapes: results = self.execute(graph_fn, [images, refined_box_encodings, class_predictions_with_background, num_proposals, proposal_boxes], graph=g) else: results = self.execute_cpu(graph_fn, [images, refined_box_encodings, class_predictions_with_background, num_proposals, proposal_boxes], graph=g) # Note that max_total_detections=5 in the NMS config. expected_num_detections = [5, 4] expected_detection_classes = [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]] expected_detection_scores = [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]] expected_multiclass_scores = [[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]] # Note that a single anchor can be used for multiple detections (predictions # are made independently per class). expected_anchor_indices = [[0, 1, 2, 0, 1], [0, 1, 0, 1]] h = float(image_shape[1]) w = float(image_shape[2]) expected_raw_detection_boxes = np.array( [[[1 / h, 1 / w, 2 / h, 3 / w], [0, 0, 1 / h, 1 / w], [.5 / h, .5 / w, .6 / h, .6 / w], 4 * [0], 4 * [0], 4 * [0], 4 * [0], 4 * [0]], [[2 / h, 3 / w, 6 / h, 8 / w], [1 / h, 2 / w, 5 / h, 3 / w], 4 * [0], 4 * [0], 4 * [0], 4 * [0], 4 * [0], 4 * [0]]], dtype=np.float32) self.assertAllClose(results[0], expected_num_detections) for indx, num_proposals in enumerate(expected_num_detections): self.assertAllClose(results[2][indx][0:num_proposals], expected_detection_scores[indx][0:num_proposals]) self.assertAllClose(results[3][indx][0:num_proposals], expected_detection_classes[indx][0:num_proposals]) self.assertAllClose(results[6][indx][0:num_proposals], expected_multiclass_scores[indx][0:num_proposals]) self.assertAllClose(results[7][indx][0:num_proposals], expected_anchor_indices[indx][0:num_proposals]) self.assertAllClose(results[4], expected_raw_detection_boxes) self.assertAllClose(results[5], class_predictions_with_background.reshape([-1, 8, 3])) if not use_static_shapes: self.assertAllEqual(results[1].shape, [2, 5, 4]) def test_preprocess_preserves_dynamic_input_shapes(self): width = tf.random.uniform([], minval=5, maxval=10, dtype=tf.int32) batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) shape = tf.stack([batch, 5, width, 3]) image = tf.random.uniform(shape) model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=6) preprocessed_inputs, _ = model.preprocess(image) self.assertTrue( preprocessed_inputs.shape.is_compatible_with([None, 5, None, 3])) def test_preprocess_preserves_static_input_shapes(self): shape = tf.stack([2, 5, 5, 3]) image = tf.random.uniform(shape) model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=6) preprocessed_inputs, _ = model.preprocess(image) self.assertTrue( preprocessed_inputs.shape.is_compatible_with([2, 5, 5, 3])) # TODO(rathodv): Split test into two - with and without masks. def test_loss_first_stage_only_mode(self): with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=True, number_of_stages=1, second_stage_batch_size=6) batch_size = 2 def graph_fn(): """A function with TF compute.""" anchors = tf.constant( [[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [16, 16, 32, 32]], dtype=tf.float32) rpn_box_encodings = tf.zeros( [batch_size, anchors.get_shape().as_list()[0], BOX_CODE_SIZE], dtype=tf.float32) # use different numbers for the objectness category to break ties in # order of boxes returned by NMS rpn_objectness_predictions_with_background = tf.constant([ [[-10, 13], [10, -10], [10, -11], [-10, 12]], [[10, -10], [-10, 13], [-10, 12], [10, -11]]], dtype=tf.float32) image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) groundtruth_boxes_list = [ tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32), tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32)] groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]], dtype=tf.float32), tf.constant([[1, 0], [1, 0]], dtype=tf.float32)] prediction_dict = { 'rpn_box_encodings': rpn_box_encodings, 'rpn_objectness_predictions_with_background': rpn_objectness_predictions_with_background, 'image_shape': image_shape, 'anchors': anchors } _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list) loss_dict = model.loss(prediction_dict, true_image_shapes) self.assertNotIn('Loss/BoxClassifierLoss/localization_loss', loss_dict) self.assertNotIn('Loss/BoxClassifierLoss/classification_loss', loss_dict) return (loss_dict['Loss/RPNLoss/localization_loss'], loss_dict['Loss/RPNLoss/objectness_loss']) loc_loss, obj_loss = self.execute_cpu(graph_fn, [], graph=g) self.assertAllClose(loc_loss, 0) self.assertAllClose(obj_loss, 0) # TODO(rathodv): Split test into two - with and without masks. def test_loss_full(self): with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=True, number_of_stages=2, second_stage_batch_size=6) batch_size = 3 def graph_fn(): """A function with TF compute.""" anchors = tf.constant( [[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [16, 16, 32, 32]], dtype=tf.float32) rpn_box_encodings = tf.zeros( [batch_size, anchors.get_shape().as_list()[0], BOX_CODE_SIZE], dtype=tf.float32) # use different numbers for the objectness category to break ties in # order of boxes returned by NMS rpn_objectness_predictions_with_background = tf.constant( [[[-10, 13], [10, -10], [10, -11], [-10, 12]], [[10, -10], [-10, 13], [-10, 12], [10, -11]], [[10, -10], [-10, 13], [-10, 12], [10, -11]]], dtype=tf.float32) image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) num_proposals = tf.constant([6, 6, 6], dtype=tf.int32) proposal_boxes = tf.constant( 3 * [[[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [16, 16, 32, 32], [0, 0, 16, 16], [0, 16, 16, 32]]], dtype=tf.float32) refined_box_encodings = tf.zeros( (batch_size * model.max_num_proposals, model.num_classes, BOX_CODE_SIZE), dtype=tf.float32) class_predictions_with_background = tf.constant( [ [-10, 10, -10], # first image [10, -10, -10], [10, -10, -10], [-10, -10, 10], [-10, 10, -10], [10, -10, -10], [10, -10, -10], # second image [-10, 10, -10], [-10, 10, -10], [10, -10, -10], [10, -10, -10], [-10, 10, -10], [10, -10, -10], # third image [-10, 10, -10], [-10, 10, -10], [10, -10, -10], [10, -10, -10], [-10, 10, -10] ], dtype=tf.float32) mask_predictions_logits = 20 * tf.ones((batch_size * model.max_num_proposals, model.num_classes, 14, 14), dtype=tf.float32) groundtruth_boxes_list = [ tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32), tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32), tf.constant([[0, .5, .5, 1], [.5, 0, 1, 1]], dtype=tf.float32) ] groundtruth_classes_list = [ tf.constant([[1, 0], [0, 1]], dtype=tf.float32), tf.constant([[1, 0], [1, 0]], dtype=tf.float32), tf.constant([[1, 0], [0, 1]], dtype=tf.float32) ] # Set all elements of groundtruth mask to 1.0. In this case all proposal # crops of the groundtruth masks should return a mask that covers the # entire proposal. Thus, if mask_predictions_logits element values are all # greater than 20, the loss should be zero. groundtruth_masks_list = [ tf.convert_to_tensor(np.ones((2, 32, 32)), dtype=tf.float32), tf.convert_to_tensor(np.ones((2, 32, 32)), dtype=tf.float32), tf.convert_to_tensor(np.ones((2, 32, 32)), dtype=tf.float32) ] groundtruth_weights_list = [ tf.constant([1, 1], dtype=tf.float32), tf.constant([1, 1], dtype=tf.float32), tf.constant([1, 0], dtype=tf.float32) ] prediction_dict = { 'rpn_box_encodings': rpn_box_encodings, 'rpn_objectness_predictions_with_background': rpn_objectness_predictions_with_background, 'image_shape': image_shape, 'anchors': anchors, 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'proposal_boxes': proposal_boxes, 'num_proposals': num_proposals, 'mask_predictions': mask_predictions_logits } _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) model.provide_groundtruth( groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list, groundtruth_weights_list=groundtruth_weights_list) loss_dict = model.loss(prediction_dict, true_image_shapes) return (loss_dict['Loss/RPNLoss/localization_loss'], loss_dict['Loss/RPNLoss/objectness_loss'], loss_dict['Loss/BoxClassifierLoss/localization_loss'], loss_dict['Loss/BoxClassifierLoss/classification_loss'], loss_dict['Loss/BoxClassifierLoss/mask_loss']) (rpn_loc_loss, rpn_obj_loss, box_loc_loss, box_cls_loss, box_mask_loss) = self.execute_cpu(graph_fn, [], graph=g) self.assertAllClose(rpn_loc_loss, 0) self.assertAllClose(rpn_obj_loss, 0) self.assertAllClose(box_loc_loss, 0) self.assertAllClose(box_cls_loss, 0) self.assertAllClose(box_mask_loss, 0) def test_loss_full_zero_padded_proposals(self): with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=True, number_of_stages=2, second_stage_batch_size=6) batch_size = 1 def graph_fn(): """A function with TF compute.""" anchors = tf.constant( [[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [16, 16, 32, 32]], dtype=tf.float32) rpn_box_encodings = tf.zeros( [batch_size, anchors.get_shape().as_list()[0], BOX_CODE_SIZE], dtype=tf.float32) # use different numbers for the objectness category to break ties in # order of boxes returned by NMS rpn_objectness_predictions_with_background = tf.constant([ [[-10, 13], [10, -10], [10, -11], [10, -12]],], dtype=tf.float32) image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) # box_classifier_batch_size is 6, but here we assume that the number of # actual proposals (not counting zero paddings) is fewer (3). num_proposals = tf.constant([3], dtype=tf.int32) proposal_boxes = tf.constant( [[[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [0, 0, 0, 0], # begin paddings [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=tf.float32) refined_box_encodings = tf.zeros( (batch_size * model.max_num_proposals, model.num_classes, BOX_CODE_SIZE), dtype=tf.float32) class_predictions_with_background = tf.constant( [[-10, 10, -10], [10, -10, -10], [10, -10, -10], [0, 0, 0], # begin paddings [0, 0, 0], [0, 0, 0]], dtype=tf.float32) mask_predictions_logits = 20 * tf.ones((batch_size * model.max_num_proposals, model.num_classes, 14, 14), dtype=tf.float32) groundtruth_boxes_list = [ tf.constant([[0, 0, .5, .5]], dtype=tf.float32)] groundtruth_classes_list = [tf.constant([[1, 0]], dtype=tf.float32)] # Set all elements of groundtruth mask to 1.0. In this case all proposal # crops of the groundtruth masks should return a mask that covers the # entire proposal. Thus, if mask_predictions_logits element values are all # greater than 20, the loss should be zero. groundtruth_masks_list = [tf.convert_to_tensor(np.ones((1, 32, 32)), dtype=tf.float32)] prediction_dict = { 'rpn_box_encodings': rpn_box_encodings, 'rpn_objectness_predictions_with_background': rpn_objectness_predictions_with_background, 'image_shape': image_shape, 'anchors': anchors, 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'proposal_boxes': proposal_boxes, 'num_proposals': num_proposals, 'mask_predictions': mask_predictions_logits } _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list) loss_dict = model.loss(prediction_dict, true_image_shapes) return (loss_dict['Loss/RPNLoss/localization_loss'], loss_dict['Loss/RPNLoss/objectness_loss'], loss_dict['Loss/BoxClassifierLoss/localization_loss'], loss_dict['Loss/BoxClassifierLoss/classification_loss'], loss_dict['Loss/BoxClassifierLoss/mask_loss']) (rpn_loc_loss, rpn_obj_loss, box_loc_loss, box_cls_loss, box_mask_loss) = self.execute_cpu(graph_fn, [], graph=g) self.assertAllClose(rpn_loc_loss, 0) self.assertAllClose(rpn_obj_loss, 0) self.assertAllClose(box_loc_loss, 0) self.assertAllClose(box_cls_loss, 0) self.assertAllClose(box_mask_loss, 0) def test_loss_full_multiple_label_groundtruth(self): with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=True, number_of_stages=2, second_stage_batch_size=6, softmax_second_stage_classification_loss=False) batch_size = 1 def graph_fn(): """A function with TF compute.""" anchors = tf.constant( [[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [16, 16, 32, 32]], dtype=tf.float32) rpn_box_encodings = tf.zeros( [batch_size, anchors.get_shape().as_list()[0], BOX_CODE_SIZE], dtype=tf.float32) # use different numbers for the objectness category to break ties in # order of boxes returned by NMS rpn_objectness_predictions_with_background = tf.constant([ [[-10, 13], [10, -10], [10, -11], [10, -12]],], dtype=tf.float32) image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) # box_classifier_batch_size is 6, but here we assume that the number of # actual proposals (not counting zero paddings) is fewer (3). num_proposals = tf.constant([3], dtype=tf.int32) proposal_boxes = tf.constant( [[[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [0, 0, 0, 0], # begin paddings [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=tf.float32) # second_stage_localization_loss should only be computed for predictions # that match groundtruth. For multiple label groundtruth boxes, the loss # should only be computed once for the label with the smaller index. refined_box_encodings = tf.constant( [[[0, 0, 0, 0], [1, 1, -1, -1]], [[1, 1, -1, -1], [1, 1, 1, 1]], [[1, 1, -1, -1], [1, 1, 1, 1]], [[1, 1, -1, -1], [1, 1, 1, 1]], [[1, 1, -1, -1], [1, 1, 1, 1]], [[1, 1, -1, -1], [1, 1, 1, 1]]], dtype=tf.float32) class_predictions_with_background = tf.constant( [[-100, 100, 100], [100, -100, -100], [100, -100, -100], [0, 0, 0], # begin paddings [0, 0, 0], [0, 0, 0]], dtype=tf.float32) mask_predictions_logits = 20 * tf.ones((batch_size * model.max_num_proposals, model.num_classes, 14, 14), dtype=tf.float32) groundtruth_boxes_list = [ tf.constant([[0, 0, .5, .5]], dtype=tf.float32)] # Box contains two ground truth labels. groundtruth_classes_list = [tf.constant([[1, 1]], dtype=tf.float32)] # Set all elements of groundtruth mask to 1.0. In this case all proposal # crops of the groundtruth masks should return a mask that covers the # entire proposal. Thus, if mask_predictions_logits element values are all # greater than 20, the loss should be zero. groundtruth_masks_list = [tf.convert_to_tensor(np.ones((1, 32, 32)), dtype=tf.float32)] prediction_dict = { 'rpn_box_encodings': rpn_box_encodings, 'rpn_objectness_predictions_with_background': rpn_objectness_predictions_with_background, 'image_shape': image_shape, 'anchors': anchors, 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'proposal_boxes': proposal_boxes, 'num_proposals': num_proposals, 'mask_predictions': mask_predictions_logits } _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list) loss_dict = model.loss(prediction_dict, true_image_shapes) return (loss_dict['Loss/RPNLoss/localization_loss'], loss_dict['Loss/RPNLoss/objectness_loss'], loss_dict['Loss/BoxClassifierLoss/localization_loss'], loss_dict['Loss/BoxClassifierLoss/classification_loss'], loss_dict['Loss/BoxClassifierLoss/mask_loss']) (rpn_loc_loss, rpn_obj_loss, box_loc_loss, box_cls_loss, box_mask_loss) = self.execute_cpu(graph_fn, [], graph=g) self.assertAllClose(rpn_loc_loss, 0) self.assertAllClose(rpn_obj_loss, 0) self.assertAllClose(box_loc_loss, 0) self.assertAllClose(box_cls_loss, 0) self.assertAllClose(box_mask_loss, 0) @parameterized.parameters( {'use_static_shapes': False, 'shared_boxes': False}, {'use_static_shapes': False, 'shared_boxes': True}, {'use_static_shapes': True, 'shared_boxes': False}, {'use_static_shapes': True, 'shared_boxes': True}, ) def test_loss_full_zero_padded_proposals_nonzero_loss_with_two_images( self, use_static_shapes=False, shared_boxes=False): batch_size = 2 first_stage_max_proposals = 8 second_stage_batch_size = 6 num_classes = 2 with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=True, number_of_stages=2, second_stage_batch_size=second_stage_batch_size, first_stage_max_proposals=first_stage_max_proposals, num_classes=num_classes, use_matmul_crop_and_resize=use_static_shapes, clip_anchors_to_image=use_static_shapes, use_static_shapes=use_static_shapes) def graph_fn(anchors, rpn_box_encodings, rpn_objectness_predictions_with_background, images, num_proposals, proposal_boxes, refined_box_encodings, class_predictions_with_background, groundtruth_boxes, groundtruth_classes): """Function to construct tf graph for the test.""" prediction_dict = { 'rpn_box_encodings': rpn_box_encodings, 'rpn_objectness_predictions_with_background': rpn_objectness_predictions_with_background, 'image_shape': tf.shape(images), 'anchors': anchors, 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'proposal_boxes': proposal_boxes, 'num_proposals': num_proposals } _, true_image_shapes = model.preprocess(images) model.provide_groundtruth(tf.unstack(groundtruth_boxes), tf.unstack(groundtruth_classes)) loss_dict = model.loss(prediction_dict, true_image_shapes) return (loss_dict['Loss/RPNLoss/localization_loss'], loss_dict['Loss/RPNLoss/objectness_loss'], loss_dict['Loss/BoxClassifierLoss/localization_loss'], loss_dict['Loss/BoxClassifierLoss/classification_loss']) anchors = np.array( [[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [16, 16, 32, 32]], dtype=np.float32) rpn_box_encodings = np.zeros( [batch_size, anchors.shape[1], BOX_CODE_SIZE], dtype=np.float32) # use different numbers for the objectness category to break ties in # order of boxes returned by NMS rpn_objectness_predictions_with_background = np.array( [[[-10, 13], [10, -10], [10, -11], [10, -12]], [[-10, 13], [10, -10], [10, -11], [10, -12]]], dtype=np.float32) images = np.zeros([batch_size, 32, 32, 3], dtype=np.float32) # box_classifier_batch_size is 6, but here we assume that the number of # actual proposals (not counting zero paddings) is fewer. num_proposals = np.array([3, 2], dtype=np.int32) proposal_boxes = np.array( [[[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [0, 0, 0, 0], # begin paddings [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 16, 16], [0, 16, 16, 32], [0, 0, 0, 0], # begin paddings [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=np.float32) refined_box_encodings = np.zeros( (batch_size * second_stage_batch_size, 1 if shared_boxes else num_classes, BOX_CODE_SIZE), dtype=np.float32) class_predictions_with_background = np.array( [[-10, 10, -10], # first image [10, -10, -10], [10, -10, -10], [0, 0, 0], # begin paddings [0, 0, 0], [0, 0, 0], [-10, -10, 10], # second image [10, -10, -10], [0, 0, 0], # begin paddings [0, 0, 0], [0, 0, 0], [0, 0, 0],], dtype=np.float32) # The first groundtruth box is 4/5 of the anchor size in both directions # experiencing a loss of: # 2 * SmoothL1(5 * log(4/5)) / num_proposals # = 2 * (abs(5 * log(1/2)) - .5) / 3 # The second groundtruth box is identical to the prediction and thus # experiences zero loss. # Total average loss is (abs(5 * log(1/2)) - .5) / 3. groundtruth_boxes = np.stack([ np.array([[0.05, 0.05, 0.45, 0.45]], dtype=np.float32), np.array([[0.0, 0.0, 0.5, 0.5]], dtype=np.float32)]) groundtruth_classes = np.stack([np.array([[1, 0]], dtype=np.float32), np.array([[0, 1]], dtype=np.float32)]) execute_fn = self.execute_cpu if use_static_shapes: execute_fn = self.execute results = execute_fn(graph_fn, [ anchors, rpn_box_encodings, rpn_objectness_predictions_with_background, images, num_proposals, proposal_boxes, refined_box_encodings, class_predictions_with_background, groundtruth_boxes, groundtruth_classes ], graph=g) exp_loc_loss = (-5 * np.log(.8) - 0.5) / 3.0 self.assertAllClose(results[0], exp_loc_loss, rtol=1e-4, atol=1e-4) self.assertAllClose(results[1], 0.0) self.assertAllClose(results[2], exp_loc_loss, rtol=1e-4, atol=1e-4) self.assertAllClose(results[3], 0.0) def test_loss_with_hard_mining(self): with test_utils.GraphContextOrNone() as g: model = self._build_model(is_training=True, number_of_stages=2, second_stage_batch_size=None, first_stage_max_proposals=6, hard_mining=True) batch_size = 1 def graph_fn(): """A function with TF compute.""" anchors = tf.constant( [[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [16, 16, 32, 32]], dtype=tf.float32) rpn_box_encodings = tf.zeros( [batch_size, anchors.get_shape().as_list()[0], BOX_CODE_SIZE], dtype=tf.float32) # use different numbers for the objectness category to break ties in # order of boxes returned by NMS rpn_objectness_predictions_with_background = tf.constant( [[[-10, 13], [-10, 12], [10, -11], [10, -12]]], dtype=tf.float32) image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) # box_classifier_batch_size is 6, but here we assume that the number of # actual proposals (not counting zero paddings) is fewer (3). num_proposals = tf.constant([3], dtype=tf.int32) proposal_boxes = tf.constant( [[[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [0, 0, 0, 0], # begin paddings [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=tf.float32) refined_box_encodings = tf.zeros( (batch_size * model.max_num_proposals, model.num_classes, BOX_CODE_SIZE), dtype=tf.float32) class_predictions_with_background = tf.constant( [[-10, 10, -10], # first image [-10, -10, 10], [10, -10, -10], [0, 0, 0], # begin paddings [0, 0, 0], [0, 0, 0]], dtype=tf.float32) # The first groundtruth box is 4/5 of the anchor size in both directions # experiencing a loss of: # 2 * SmoothL1(5 * log(4/5)) / num_proposals # = 2 * (abs(5 * log(1/2)) - .5) / 3 # The second groundtruth box is 46/50 of the anchor size in both # directions experiencing a loss of: # 2 * SmoothL1(5 * log(42/50)) / num_proposals # = 2 * (.5(5 * log(.92))^2 - .5) / 3. # Since the first groundtruth box experiences greater loss, and we have # set num_hard_examples=1 in the HardMiner, the final localization loss # corresponds to that of the first groundtruth box. groundtruth_boxes_list = [ tf.constant([[0.05, 0.05, 0.45, 0.45], [0.02, 0.52, 0.48, 0.98],], dtype=tf.float32)] groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]], dtype=tf.float32)] prediction_dict = { 'rpn_box_encodings': rpn_box_encodings, 'rpn_objectness_predictions_with_background': rpn_objectness_predictions_with_background, 'image_shape': image_shape, 'anchors': anchors, 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'proposal_boxes': proposal_boxes, 'num_proposals': num_proposals } _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list) loss_dict = model.loss(prediction_dict, true_image_shapes) return (loss_dict['Loss/BoxClassifierLoss/localization_loss'], loss_dict['Loss/BoxClassifierLoss/classification_loss']) loc_loss, cls_loss = self.execute_cpu(graph_fn, [], graph=g) exp_loc_loss = 2 * (-5 * np.log(.8) - 0.5) / 3.0 self.assertAllClose(loc_loss, exp_loc_loss) self.assertAllClose(cls_loss, 0) def test_loss_with_hard_mining_and_losses_mask(self): with test_utils.GraphContextOrNone() as g: model = self._build_model(is_training=True, number_of_stages=2, second_stage_batch_size=None, first_stage_max_proposals=6, hard_mining=True) batch_size = 2 number_of_proposals = 3 def graph_fn(): """A function with TF compute.""" anchors = tf.constant( [[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], [16, 16, 32, 32]], dtype=tf.float32) rpn_box_encodings = tf.zeros( [batch_size, anchors.get_shape().as_list()[0], BOX_CODE_SIZE], dtype=tf.float32) # use different numbers for the objectness category to break ties in # order of boxes returned by NMS rpn_objectness_predictions_with_background = tf.constant( [[[-10, 13], [-10, 12], [10, -11], [10, -12]], [[-10, 13], [-10, 12], [10, -11], [10, -12]]], dtype=tf.float32) image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) # box_classifier_batch_size is 6, but here we assume that the number of # actual proposals (not counting zero paddings) is fewer (3). num_proposals = tf.constant([number_of_proposals, number_of_proposals], dtype=tf.int32) proposal_boxes = tf.constant( [[[0, 0, 16, 16], # first image [0, 16, 16, 32], [16, 0, 32, 16], [0, 0, 0, 0], # begin paddings [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 16, 16], # second image [0, 16, 16, 32], [16, 0, 32, 16], [0, 0, 0, 0], # begin paddings [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=tf.float32) refined_box_encodings = tf.zeros( (batch_size * model.max_num_proposals, model.num_classes, BOX_CODE_SIZE), dtype=tf.float32) class_predictions_with_background = tf.constant( [[-10, 10, -10], # first image [-10, -10, 10], [10, -10, -10], [0, 0, 0], # begin paddings [0, 0, 0], [0, 0, 0], [-10, 10, -10], # second image [-10, -10, 10], [10, -10, -10], [0, 0, 0], # begin paddings [0, 0, 0], [0, 0, 0]], dtype=tf.float32) # The first groundtruth box is 4/5 of the anchor size in both directions # experiencing a loss of: # 2 * SmoothL1(5 * log(4/5)) / (num_proposals * batch_size) # = 2 * (abs(5 * log(1/2)) - .5) / 3 # The second groundtruth box is 46/50 of the anchor size in both # directions experiencing a loss of: # 2 * SmoothL1(5 * log(42/50)) / (num_proposals * batch_size) # = 2 * (.5(5 * log(.92))^2 - .5) / 3. # Since the first groundtruth box experiences greater loss, and we have # set num_hard_examples=1 in the HardMiner, the final localization loss # corresponds to that of the first groundtruth box. groundtruth_boxes_list = [ tf.constant([[0.05, 0.05, 0.45, 0.45], [0.02, 0.52, 0.48, 0.98]], dtype=tf.float32), tf.constant([[0.05, 0.05, 0.45, 0.45], [0.02, 0.52, 0.48, 0.98]], dtype=tf.float32)] groundtruth_classes_list = [ tf.constant([[1, 0], [0, 1]], dtype=tf.float32), tf.constant([[1, 0], [0, 1]], dtype=tf.float32)] is_annotated_list = [tf.constant(True, dtype=tf.bool), tf.constant(False, dtype=tf.bool)] prediction_dict = { 'rpn_box_encodings': rpn_box_encodings, 'rpn_objectness_predictions_with_background': rpn_objectness_predictions_with_background, 'image_shape': image_shape, 'anchors': anchors, 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'proposal_boxes': proposal_boxes, 'num_proposals': num_proposals } _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list, is_annotated_list=is_annotated_list) loss_dict = model.loss(prediction_dict, true_image_shapes) return (loss_dict['Loss/BoxClassifierLoss/localization_loss'], loss_dict['Loss/BoxClassifierLoss/classification_loss']) exp_loc_loss = (2 * (-5 * np.log(.8) - 0.5) / (number_of_proposals * batch_size)) loc_loss, cls_loss = self.execute_cpu(graph_fn, [], graph=g) self.assertAllClose(loc_loss, exp_loc_loss) self.assertAllClose(cls_loss, 0) def test_restore_map_for_classification_ckpt(self): if tf_version.is_tf2(): self.skipTest('Skipping TF1 only test.') # Define mock tensorflow classification graph and save variables. test_graph_classification = tf.Graph() with test_graph_classification.as_default(): image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3]) with tf.variable_scope('mock_model'): net = slim.conv2d(image, num_outputs=3, kernel_size=1, scope='layer1') slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2') init_op = tf.global_variables_initializer() saver = tf.train.Saver() save_path = self.get_temp_dir() with self.test_session(graph=test_graph_classification) as sess: sess.run(init_op) saved_model_path = saver.save(sess, save_path) # Create tensorflow detection graph and load variables from # classification checkpoint. test_graph_detection = tf.Graph() with test_graph_detection.as_default(): model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=6) inputs_shape = (2, 20, 20, 3) inputs = tf.cast(tf.random_uniform( inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) preprocessed_inputs, true_image_shapes = model.preprocess(inputs) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) model.postprocess(prediction_dict, true_image_shapes) var_map = model.restore_map(fine_tune_checkpoint_type='classification') self.assertIsInstance(var_map, dict) saver = tf.train.Saver(var_map) with self.test_session(graph=test_graph_classification) as sess: saver.restore(sess, saved_model_path) for var in sess.run(tf.report_uninitialized_variables()): self.assertNotIn(model.first_stage_feature_extractor_scope, var) self.assertNotIn(model.second_stage_feature_extractor_scope, var) def test_restore_map_for_detection_ckpt(self): if tf_version.is_tf2(): self.skipTest('Skipping TF1 only test.') # Define mock tensorflow classification graph and save variables. # Define first detection graph and save variables. test_graph_detection1 = tf.Graph() with test_graph_detection1.as_default(): model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=6) inputs_shape = (2, 20, 20, 3) inputs = tf.cast(tf.random_uniform( inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) preprocessed_inputs, true_image_shapes = model.preprocess(inputs) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) model.postprocess(prediction_dict, true_image_shapes) another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable init_op = tf.global_variables_initializer() saver = tf.train.Saver() save_path = self.get_temp_dir() with self.test_session(graph=test_graph_detection1) as sess: sess.run(init_op) saved_model_path = saver.save(sess, save_path) # Define second detection graph and restore variables. test_graph_detection2 = tf.Graph() with test_graph_detection2.as_default(): model2 = self._build_model(is_training=False, number_of_stages=2, second_stage_batch_size=6, num_classes=42) inputs_shape2 = (2, 20, 20, 3) inputs2 = tf.cast(tf.random_uniform( inputs_shape2, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) preprocessed_inputs2, true_image_shapes = model2.preprocess(inputs2) prediction_dict2 = model2.predict(preprocessed_inputs2, true_image_shapes) model2.postprocess(prediction_dict2, true_image_shapes) another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable var_map = model2.restore_map(fine_tune_checkpoint_type='detection') self.assertIsInstance(var_map, dict) saver = tf.train.Saver(var_map) with self.test_session(graph=test_graph_detection2) as sess: saver.restore(sess, saved_model_path) uninitialized_vars_list = sess.run(tf.report_uninitialized_variables()) self.assertIn(six.b('another_variable'), uninitialized_vars_list) for var in uninitialized_vars_list: self.assertNotIn( six.b(model2.first_stage_feature_extractor_scope), var) self.assertNotIn( six.b(model2.second_stage_feature_extractor_scope), var) def test_load_all_det_checkpoint_vars(self): if tf_version.is_tf2(): self.skipTest('Skipping TF1 only test.') test_graph_detection = tf.Graph() with test_graph_detection.as_default(): model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=6, num_classes=42) inputs_shape = (2, 20, 20, 3) inputs = tf.cast( tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) preprocessed_inputs, true_image_shapes = model.preprocess(inputs) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) model.postprocess(prediction_dict, true_image_shapes) another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable var_map = model.restore_map( fine_tune_checkpoint_type='detection', load_all_detection_checkpoint_vars=True) self.assertIsInstance(var_map, dict) self.assertIn('another_variable', var_map) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/faster_rcnn_meta_arch_test_lib.py
faster_rcnn_meta_arch_test_lib.py
# Lint as: python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library functions for Context R-CNN.""" import tensorflow as tf from object_detection.core import freezable_batch_norm # The negative value used in padding the invalid weights. _NEGATIVE_PADDING_VALUE = -100000 class ContextProjection(tf.keras.layers.Layer): """Custom layer to do batch normalization and projection.""" def __init__(self, projection_dimension, **kwargs): self.batch_norm = freezable_batch_norm.FreezableBatchNorm( epsilon=0.001, center=True, scale=True, momentum=0.97, trainable=True) self.projection = tf.keras.layers.Dense(units=projection_dimension, use_bias=True) self.projection_dimension = projection_dimension super(ContextProjection, self).__init__(**kwargs) def build(self, input_shape): self.projection.build(input_shape) self.batch_norm.build(input_shape[:1] + [self.projection_dimension]) def call(self, input_features, is_training=False): return tf.nn.relu6(self.batch_norm(self.projection(input_features), is_training)) class AttentionBlock(tf.keras.layers.Layer): """Custom layer to perform all attention.""" def __init__(self, bottleneck_dimension, attention_temperature, output_dimension=None, is_training=False, name='AttentionBlock', max_num_proposals=100, **kwargs): """Constructs an attention block. Args: bottleneck_dimension: A int32 Tensor representing the bottleneck dimension for intermediate projections. attention_temperature: A float Tensor. It controls the temperature of the softmax for weights calculation. The formula for calculation as follows: weights = exp(weights / temperature) / sum(exp(weights / temperature)) output_dimension: A int32 Tensor representing the last dimension of the output feature. is_training: A boolean Tensor (affecting batch normalization). name: A string describing what to name the variables in this block. max_num_proposals: The number of box proposals for each image **kwargs: Additional keyword arguments. """ self._key_proj = ContextProjection(bottleneck_dimension) self._val_proj = ContextProjection(bottleneck_dimension) self._query_proj = ContextProjection(bottleneck_dimension) self._feature_proj = None self._attention_temperature = attention_temperature self._bottleneck_dimension = bottleneck_dimension self._is_training = is_training self._output_dimension = output_dimension self._max_num_proposals = max_num_proposals if self._output_dimension: self._feature_proj = ContextProjection(self._output_dimension) super(AttentionBlock, self).__init__(name=name, **kwargs) def build(self, input_shapes): """Finishes building the attention block. Args: input_shapes: the shape of the primary input box features. """ if not self._feature_proj: self._output_dimension = input_shapes[-1] self._feature_proj = ContextProjection(self._output_dimension) def call(self, box_features, context_features, valid_context_size, num_proposals): """Handles a call by performing attention. Args: box_features: A float Tensor of shape [batch_size * input_size, height, width, num_input_features]. context_features: A float Tensor of shape [batch_size, context_size, num_context_features]. valid_context_size: A int32 Tensor of shape [batch_size]. num_proposals: A [batch_size] int32 Tensor specifying the number of valid proposals per image in the batch. Returns: A float Tensor with shape [batch_size, input_size, num_input_features] containing output features after attention with context features. """ _, context_size, _ = context_features.shape keys_values_valid_mask = compute_valid_mask( valid_context_size, context_size) total_proposals, height, width, channels = box_features.shape batch_size = total_proposals // self._max_num_proposals box_features = tf.reshape( box_features, [batch_size, self._max_num_proposals, height, width, channels]) # Average pools over height and width dimension so that the shape of # box_features becomes [batch_size, max_num_proposals, channels]. box_features = tf.reduce_mean(box_features, [2, 3]) queries_valid_mask = compute_valid_mask(num_proposals, box_features.shape[1]) queries = project_features( box_features, self._bottleneck_dimension, self._is_training, self._query_proj, normalize=True) keys = project_features( context_features, self._bottleneck_dimension, self._is_training, self._key_proj, normalize=True) values = project_features( context_features, self._bottleneck_dimension, self._is_training, self._val_proj, normalize=True) # masking out any keys which are padding keys *= tf.cast(keys_values_valid_mask[..., tf.newaxis], keys.dtype) queries *= tf.cast(queries_valid_mask[..., tf.newaxis], queries.dtype) weights = tf.matmul(queries, keys, transpose_b=True) weights, values = filter_weight_value(weights, values, keys_values_valid_mask) weights = tf.nn.softmax(weights / self._attention_temperature) features = tf.matmul(weights, values) output_features = project_features( features, self._output_dimension, self._is_training, self._feature_proj, normalize=False) output_features = output_features[:, :, tf.newaxis, tf.newaxis, :] return output_features def filter_weight_value(weights, values, valid_mask): """Filters weights and values based on valid_mask. _NEGATIVE_PADDING_VALUE will be added to invalid elements in the weights to avoid their contribution in softmax. 0 will be set for the invalid elements in the values. Args: weights: A float Tensor of shape [batch_size, input_size, context_size]. values: A float Tensor of shape [batch_size, context_size, projected_dimension]. valid_mask: A boolean Tensor of shape [batch_size, context_size]. True means valid and False means invalid. Returns: weights: A float Tensor of shape [batch_size, input_size, context_size]. values: A float Tensor of shape [batch_size, context_size, projected_dimension]. Raises: ValueError: If shape of doesn't match. """ w_batch_size, _, w_context_size = weights.shape v_batch_size, v_context_size, _ = values.shape m_batch_size, m_context_size = valid_mask.shape if w_batch_size != v_batch_size or v_batch_size != m_batch_size: raise ValueError('Please make sure the first dimension of the input' ' tensors are the same.') if w_context_size != v_context_size: raise ValueError('Please make sure the third dimension of weights matches' ' the second dimension of values.') if w_context_size != m_context_size: raise ValueError('Please make sure the third dimension of the weights' ' matches the second dimension of the valid_mask.') valid_mask = valid_mask[..., tf.newaxis] # Force the invalid weights to be very negative so it won't contribute to # the softmax. weights += tf.transpose( tf.cast(tf.math.logical_not(valid_mask), weights.dtype) * _NEGATIVE_PADDING_VALUE, perm=[0, 2, 1]) # Force the invalid values to be 0. values *= tf.cast(valid_mask, values.dtype) return weights, values def project_features(features, bottleneck_dimension, is_training, layer, normalize=True): """Projects features to another feature space. Args: features: A float Tensor of shape [batch_size, features_size, num_features]. bottleneck_dimension: A int32 Tensor. is_training: A boolean Tensor (affecting batch normalization). layer: Contains a custom layer specific to the particular operation being performed (key, value, query, features) normalize: A boolean Tensor. If true, the output features will be l2 normalized on the last dimension. Returns: A float Tensor of shape [batch, features_size, projection_dimension]. """ shape_arr = features.shape batch_size, _, num_features = shape_arr features = tf.reshape(features, [-1, num_features]) projected_features = layer(features, is_training) projected_features = tf.reshape(projected_features, [batch_size, -1, bottleneck_dimension]) if normalize: projected_features = tf.keras.backend.l2_normalize(projected_features, axis=-1) return projected_features def compute_valid_mask(num_valid_elements, num_elements): """Computes mask of valid entries within padded context feature. Args: num_valid_elements: A int32 Tensor of shape [batch_size]. num_elements: An int32 Tensor. Returns: A boolean Tensor of the shape [batch_size, num_elements]. True means valid and False means invalid. """ batch_size = num_valid_elements.shape[0] element_idxs = tf.range(num_elements, dtype=tf.int32) batch_element_idxs = tf.tile(element_idxs[tf.newaxis, ...], [batch_size, 1]) num_valid_elements = num_valid_elements[..., tf.newaxis] valid_mask = tf.less(batch_element_idxs, num_valid_elements) return valid_mask
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/context_rcnn_lib_tf2.py
context_rcnn_lib_tf2.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.meta_architectures.context_meta_arch.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import unittest from unittest import mock # pylint: disable=g-importing-member from absl.testing import parameterized import tensorflow.compat.v1 as tf import tf_slim as slim from google.protobuf import text_format from object_detection.anchor_generators import grid_anchor_generator from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.builders import post_processing_builder from object_detection.core import balanced_positive_negative_sampler as sampler from object_detection.core import losses from object_detection.core import post_processing from object_detection.core import standard_fields as fields from object_detection.core import target_assigner from object_detection.meta_architectures import context_rcnn_meta_arch from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.protos import box_predictor_pb2 from object_detection.protos import hyperparams_pb2 from object_detection.protos import post_processing_pb2 from object_detection.utils import spatial_transform_ops as spatial_ops from object_detection.utils import test_case from object_detection.utils import test_utils from object_detection.utils import tf_version class FakeFasterRCNNFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Fake feature extractor to use in tests.""" def __init__(self): super(FakeFasterRCNNFeatureExtractor, self).__init__( is_training=False, first_stage_features_stride=32, reuse_weights=None, weight_decay=0.0) def preprocess(self, resized_inputs): return tf.identity(resized_inputs) def _extract_proposal_features(self, preprocessed_inputs, scope): with tf.variable_scope('mock_model'): proposal_features = 0 * slim.conv2d( preprocessed_inputs, num_outputs=3, kernel_size=1, scope='layer1') return proposal_features, {} def _extract_box_classifier_features(self, proposal_feature_maps, scope): with tf.variable_scope('mock_model'): return 0 * slim.conv2d( proposal_feature_maps, num_outputs=3, kernel_size=1, scope='layer2') class FakeFasterRCNNKerasFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): """Fake feature extractor to use in tests.""" def __init__(self): super(FakeFasterRCNNKerasFeatureExtractor, self).__init__( is_training=False, first_stage_features_stride=32, weight_decay=0.0) def preprocess(self, resized_inputs): return tf.identity(resized_inputs) def get_proposal_feature_extractor_model(self, name): class ProposalFeatureExtractor(tf.keras.Model): """Dummy proposal feature extraction.""" def __init__(self, name): super(ProposalFeatureExtractor, self).__init__(name=name) self.conv = None def build(self, input_shape): self.conv = tf.keras.layers.Conv2D( 3, kernel_size=1, padding='SAME', name='layer1') def call(self, inputs): return self.conv(inputs) return ProposalFeatureExtractor(name=name) def get_box_classifier_feature_extractor_model(self, name): return tf.keras.Sequential([ tf.keras.layers.Conv2D( 3, kernel_size=1, padding='SAME', name=name + '_layer2') ]) class ContextRCNNMetaArchTest(test_case.TestCase, parameterized.TestCase): def _get_model(self, box_predictor, **common_kwargs): return context_rcnn_meta_arch.ContextRCNNMetaArch( initial_crop_size=3, maxpool_kernel_size=1, maxpool_stride=1, second_stage_mask_rcnn_box_predictor=box_predictor, attention_bottleneck_dimension=10, attention_temperature=0.2, **common_kwargs) def _build_arg_scope_with_hyperparams(self, hyperparams_text_proto, is_training): hyperparams = hyperparams_pb2.Hyperparams() text_format.Merge(hyperparams_text_proto, hyperparams) return hyperparams_builder.build(hyperparams, is_training=is_training) def _build_keras_layer_hyperparams(self, hyperparams_text_proto): hyperparams = hyperparams_pb2.Hyperparams() text_format.Merge(hyperparams_text_proto, hyperparams) return hyperparams_builder.KerasLayerHyperparams(hyperparams) def _get_second_stage_box_predictor_text_proto(self, share_box_across_classes=False ): share_box_field = 'true' if share_box_across_classes else 'false' box_predictor_text_proto = """ mask_rcnn_box_predictor {{ fc_hyperparams {{ op: FC activation: NONE regularizer {{ l2_regularizer {{ weight: 0.0005 }} }} initializer {{ variance_scaling_initializer {{ factor: 1.0 uniform: true mode: FAN_AVG }} }} }} share_box_across_classes: {share_box_across_classes} }} """.format(share_box_across_classes=share_box_field) return box_predictor_text_proto def _get_box_classifier_features_shape(self, image_size, batch_size, max_num_proposals, initial_crop_size, maxpool_stride, num_features): return (batch_size * max_num_proposals, initial_crop_size/maxpool_stride, initial_crop_size/maxpool_stride, num_features) def _get_second_stage_box_predictor(self, num_classes, is_training, predict_masks, masks_are_class_agnostic, share_box_across_classes=False, use_keras=False): box_predictor_proto = box_predictor_pb2.BoxPredictor() text_format.Merge( self._get_second_stage_box_predictor_text_proto( share_box_across_classes), box_predictor_proto) if predict_masks: text_format.Merge( self._add_mask_to_second_stage_box_predictor_text_proto( masks_are_class_agnostic), box_predictor_proto) if use_keras: return box_predictor_builder.build_keras( hyperparams_builder.KerasLayerHyperparams, inplace_batchnorm_update=False, freeze_batchnorm=False, box_predictor_config=box_predictor_proto, num_classes=num_classes, num_predictions_per_location_list=None, is_training=is_training) else: return box_predictor_builder.build( hyperparams_builder.build, box_predictor_proto, num_classes=num_classes, is_training=is_training) def _build_model(self, is_training, number_of_stages, second_stage_batch_size, first_stage_max_proposals=8, num_classes=2, hard_mining=False, softmax_second_stage_classification_loss=True, predict_masks=False, pad_to_max_dimension=None, masks_are_class_agnostic=False, use_matmul_crop_and_resize=False, clip_anchors_to_image=False, use_matmul_gather_in_matcher=False, use_static_shapes=False, calibration_mapping_value=None, share_box_across_classes=False, return_raw_detections_during_predict=False): use_keras = tf_version.is_tf2() def image_resizer_fn(image, masks=None): """Fake image resizer function.""" resized_inputs = [] resized_image = tf.identity(image) if pad_to_max_dimension is not None: resized_image = tf.image.pad_to_bounding_box(image, 0, 0, pad_to_max_dimension, pad_to_max_dimension) resized_inputs.append(resized_image) if masks is not None: resized_masks = tf.identity(masks) if pad_to_max_dimension is not None: resized_masks = tf.image.pad_to_bounding_box( tf.transpose(masks, [1, 2, 0]), 0, 0, pad_to_max_dimension, pad_to_max_dimension) resized_masks = tf.transpose(resized_masks, [2, 0, 1]) resized_inputs.append(resized_masks) resized_inputs.append(tf.shape(image)) return resized_inputs # anchors in this test are designed so that a subset of anchors are inside # the image and a subset of anchors are outside. first_stage_anchor_scales = (0.001, 0.005, 0.1) first_stage_anchor_aspect_ratios = (0.5, 1.0, 2.0) first_stage_anchor_strides = (1, 1) first_stage_anchor_generator = grid_anchor_generator.GridAnchorGenerator( first_stage_anchor_scales, first_stage_anchor_aspect_ratios, anchor_stride=first_stage_anchor_strides) first_stage_target_assigner = target_assigner.create_target_assigner( 'FasterRCNN', 'proposal', use_matmul_gather=use_matmul_gather_in_matcher) if use_keras: fake_feature_extractor = FakeFasterRCNNKerasFeatureExtractor() else: fake_feature_extractor = FakeFasterRCNNFeatureExtractor() first_stage_box_predictor_hyperparams_text_proto = """ op: CONV activation: RELU regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 } } """ if use_keras: first_stage_box_predictor_arg_scope_fn = ( self._build_keras_layer_hyperparams( first_stage_box_predictor_hyperparams_text_proto)) else: first_stage_box_predictor_arg_scope_fn = ( self._build_arg_scope_with_hyperparams( first_stage_box_predictor_hyperparams_text_proto, is_training)) first_stage_box_predictor_kernel_size = 3 first_stage_atrous_rate = 1 first_stage_box_predictor_depth = 512 first_stage_minibatch_size = 3 first_stage_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=0.5, is_static=use_static_shapes) first_stage_nms_score_threshold = -1.0 first_stage_nms_iou_threshold = 1.0 first_stage_non_max_suppression_fn = functools.partial( post_processing.batch_multiclass_non_max_suppression, score_thresh=first_stage_nms_score_threshold, iou_thresh=first_stage_nms_iou_threshold, max_size_per_class=first_stage_max_proposals, max_total_size=first_stage_max_proposals, use_static_shapes=use_static_shapes) first_stage_localization_loss_weight = 1.0 first_stage_objectness_loss_weight = 1.0 post_processing_config = post_processing_pb2.PostProcessing() post_processing_text_proto = """ score_converter: IDENTITY batch_non_max_suppression { score_threshold: -20.0 iou_threshold: 1.0 max_detections_per_class: 5 max_total_detections: 5 use_static_shapes: """ + '{}'.format(use_static_shapes) + """ } """ if calibration_mapping_value: calibration_text_proto = """ calibration_config { function_approximation { x_y_pairs { x_y_pair { x: 0.0 y: %f } x_y_pair { x: 1.0 y: %f }}}}""" % (calibration_mapping_value, calibration_mapping_value) post_processing_text_proto = ( post_processing_text_proto + ' ' + calibration_text_proto) text_format.Merge(post_processing_text_proto, post_processing_config) second_stage_non_max_suppression_fn, second_stage_score_conversion_fn = ( post_processing_builder.build(post_processing_config)) second_stage_target_assigner = target_assigner.create_target_assigner( 'FasterRCNN', 'detection', use_matmul_gather=use_matmul_gather_in_matcher) second_stage_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=1.0, is_static=use_static_shapes) second_stage_localization_loss_weight = 1.0 second_stage_classification_loss_weight = 1.0 if softmax_second_stage_classification_loss: second_stage_classification_loss = ( losses.WeightedSoftmaxClassificationLoss()) else: second_stage_classification_loss = ( losses.WeightedSigmoidClassificationLoss()) hard_example_miner = None if hard_mining: hard_example_miner = losses.HardExampleMiner( num_hard_examples=1, iou_threshold=0.99, loss_type='both', cls_loss_weight=second_stage_classification_loss_weight, loc_loss_weight=second_stage_localization_loss_weight, max_negatives_per_positive=None) crop_and_resize_fn = ( spatial_ops.multilevel_matmul_crop_and_resize if use_matmul_crop_and_resize else spatial_ops.multilevel_native_crop_and_resize) common_kwargs = { 'is_training': is_training, 'num_classes': num_classes, 'image_resizer_fn': image_resizer_fn, 'feature_extractor': fake_feature_extractor, 'number_of_stages': number_of_stages, 'first_stage_anchor_generator': first_stage_anchor_generator, 'first_stage_target_assigner': first_stage_target_assigner, 'first_stage_atrous_rate': first_stage_atrous_rate, 'first_stage_box_predictor_arg_scope_fn': first_stage_box_predictor_arg_scope_fn, 'first_stage_box_predictor_kernel_size': first_stage_box_predictor_kernel_size, 'first_stage_box_predictor_depth': first_stage_box_predictor_depth, 'first_stage_minibatch_size': first_stage_minibatch_size, 'first_stage_sampler': first_stage_sampler, 'first_stage_non_max_suppression_fn': first_stage_non_max_suppression_fn, 'first_stage_max_proposals': first_stage_max_proposals, 'first_stage_localization_loss_weight': first_stage_localization_loss_weight, 'first_stage_objectness_loss_weight': first_stage_objectness_loss_weight, 'second_stage_target_assigner': second_stage_target_assigner, 'second_stage_batch_size': second_stage_batch_size, 'second_stage_sampler': second_stage_sampler, 'second_stage_non_max_suppression_fn': second_stage_non_max_suppression_fn, 'second_stage_score_conversion_fn': second_stage_score_conversion_fn, 'second_stage_localization_loss_weight': second_stage_localization_loss_weight, 'second_stage_classification_loss_weight': second_stage_classification_loss_weight, 'second_stage_classification_loss': second_stage_classification_loss, 'hard_example_miner': hard_example_miner, 'crop_and_resize_fn': crop_and_resize_fn, 'clip_anchors_to_image': clip_anchors_to_image, 'use_static_shapes': use_static_shapes, 'resize_masks': True, 'return_raw_detections_during_predict': return_raw_detections_during_predict } return self._get_model( self._get_second_stage_box_predictor( num_classes=num_classes, is_training=is_training, use_keras=use_keras, predict_masks=predict_masks, masks_are_class_agnostic=masks_are_class_agnostic, share_box_across_classes=share_box_across_classes), **common_kwargs) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') @mock.patch.object(context_rcnn_meta_arch, 'context_rcnn_lib') def test_prediction_mock_tf1(self, mock_context_rcnn_lib_v1): """Mocks the context_rcnn_lib_v1 module to test the prediction. Using mock object so that we can ensure _compute_box_context_attention is called in side the prediction function. Args: mock_context_rcnn_lib_v1: mock module for the context_rcnn_lib_v1. """ model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=6, num_classes=42) mock_tensor = tf.ones([2, 8, 3, 3, 3], tf.float32) mock_context_rcnn_lib_v1._compute_box_context_attention.return_value = mock_tensor inputs_shape = (2, 20, 20, 3) inputs = tf.cast( tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) preprocessed_inputs, true_image_shapes = model.preprocess(inputs) context_features = tf.random_uniform((2, 20, 10), minval=0, maxval=255, dtype=tf.float32) valid_context_size = tf.random_uniform((2,), minval=0, maxval=10, dtype=tf.int32) features = { fields.InputDataFields.context_features: context_features, fields.InputDataFields.valid_context_size: valid_context_size } side_inputs = model.get_side_inputs(features) _ = model.predict(preprocessed_inputs, true_image_shapes, **side_inputs) mock_context_rcnn_lib_v1._compute_box_context_attention.assert_called_once() @parameterized.named_parameters( {'testcase_name': 'static_shapes', 'static_shapes': True}, {'testcase_name': 'nostatic_shapes', 'static_shapes': False}, ) def test_prediction_end_to_end(self, static_shapes): """Runs prediction end to end and test the shape of the results.""" with test_utils.GraphContextOrNone() as g: model = self._build_model( is_training=False, number_of_stages=2, second_stage_batch_size=6, use_matmul_crop_and_resize=static_shapes, clip_anchors_to_image=static_shapes, use_matmul_gather_in_matcher=static_shapes, use_static_shapes=static_shapes, num_classes=42) def graph_fn(): inputs_shape = (2, 20, 20, 3) inputs = tf.cast( tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) preprocessed_inputs, true_image_shapes = model.preprocess(inputs) context_features = tf.random_uniform((2, 20, 10), minval=0, maxval=255, dtype=tf.float32) valid_context_size = tf.random_uniform((2,), minval=0, maxval=10, dtype=tf.int32) features = { fields.InputDataFields.context_features: context_features, fields.InputDataFields.valid_context_size: valid_context_size } side_inputs = model.get_side_inputs(features) prediction_dict = model.predict(preprocessed_inputs, true_image_shapes, **side_inputs) return (prediction_dict['rpn_box_predictor_features'], prediction_dict['rpn_box_encodings'], prediction_dict['refined_box_encodings'], prediction_dict['proposal_boxes_normalized'], prediction_dict['proposal_boxes']) execute_fn = self.execute if static_shapes else self.execute_cpu (rpn_box_predictor_features, rpn_box_encodings, refined_box_encodings, proposal_boxes_normalized, proposal_boxes) = execute_fn(graph_fn, [], graph=g) self.assertAllEqual(len(rpn_box_predictor_features), 1) self.assertAllEqual(rpn_box_predictor_features[0].shape, [2, 20, 20, 512]) self.assertAllEqual(rpn_box_encodings.shape, [2, 3600, 4]) self.assertAllEqual(refined_box_encodings.shape, [16, 42, 4]) self.assertAllEqual(proposal_boxes_normalized.shape, [2, 8, 4]) self.assertAllEqual(proposal_boxes.shape, [2, 8, 4]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/context_rcnn_meta_arch_test.py
context_rcnn_meta_arch_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSD Meta-architecture definition. General tensorflow implementation of convolutional Multibox/SSD detection models. """ import abc import tensorflow.compat.v1 as tf from tensorflow.python.util.deprecation import deprecated_args from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import matcher from object_detection.core import model from object_detection.core import standard_fields as fields from object_detection.core import target_assigner from object_detection.utils import ops from object_detection.utils import shape_utils from object_detection.utils import variables_helper from object_detection.utils import visualization_utils # pylint: disable=g-import-not-at-top try: import tf_slim as slim except ImportError: # TF 2.0 doesn't ship with contrib. pass # pylint: enable=g-import-not-at-top class SSDFeatureExtractor(object): """SSD Slim Feature Extractor definition.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False): """Constructor. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ self._is_training = is_training self._depth_multiplier = depth_multiplier self._min_depth = min_depth self._pad_to_multiple = pad_to_multiple self._conv_hyperparams_fn = conv_hyperparams_fn self._reuse_weights = reuse_weights self._use_explicit_padding = use_explicit_padding self._use_depthwise = use_depthwise self._num_layers = num_layers self._override_base_feature_extractor_hyperparams = ( override_base_feature_extractor_hyperparams) @property def is_keras_model(self): return False @abc.abstractmethod def preprocess(self, resized_inputs): """Preprocesses images for feature extraction (minus image resizing). Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. """ pass @abc.abstractmethod def extract_features(self, preprocessed_inputs): """Extracts features from preprocessed inputs. This function is responsible for extracting feature maps from preprocessed images. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ raise NotImplementedError def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Args: feature_extractor_scope: A scope name for the feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): var_name = variable.op.name if var_name.startswith(feature_extractor_scope + '/'): var_name = var_name.replace(feature_extractor_scope + '/', '') variables_to_restore[var_name] = variable return variables_to_restore class SSDKerasFeatureExtractor(tf.keras.Model): """SSD Feature Extractor definition.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False, name=None): """Constructor. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_config`. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDKerasFeatureExtractor, self).__init__(name=name) self._is_training = is_training self._depth_multiplier = depth_multiplier self._min_depth = min_depth self._pad_to_multiple = pad_to_multiple self._conv_hyperparams = conv_hyperparams self._freeze_batchnorm = freeze_batchnorm self._inplace_batchnorm_update = inplace_batchnorm_update self._use_explicit_padding = use_explicit_padding self._use_depthwise = use_depthwise self._num_layers = num_layers self._override_base_feature_extractor_hyperparams = ( override_base_feature_extractor_hyperparams) @property def is_keras_model(self): return True @abc.abstractmethod def preprocess(self, resized_inputs): """Preprocesses images for feature extraction (minus image resizing). Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. """ raise NotImplementedError @abc.abstractmethod def _extract_features(self, preprocessed_inputs): """Extracts features from preprocessed inputs. This function is responsible for extracting feature maps from preprocessed images. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ raise NotImplementedError # This overrides the keras.Model `call` method with the _extract_features # method. def call(self, inputs, **kwargs): return self._extract_features(inputs) class SSDMetaArch(model.DetectionModel): """SSD Meta-architecture definition.""" @deprecated_args(None, 'NMS is always placed on TPU; do not use nms_on_host ' 'as it has no effect.', 'nms_on_host') def __init__(self, is_training, anchor_generator, box_predictor, box_coder, feature_extractor, encode_background_as_zeros, image_resizer_fn, non_max_suppression_fn, score_conversion_fn, classification_loss, localization_loss, classification_loss_weight, localization_loss_weight, normalize_loss_by_num_matches, hard_example_miner, target_assigner_instance, add_summaries=True, normalize_loc_loss_by_codesize=False, freeze_batchnorm=False, inplace_batchnorm_update=False, add_background_class=True, explicit_background_class=False, random_example_sampler=None, expected_loss_weights_fn=None, use_confidences_as_targets=False, implicit_example_weight=0.5, equalization_loss_config=None, return_raw_detections_during_predict=False, nms_on_host=True): """SSDMetaArch Constructor. TODO(rathodv,jonathanhuang): group NMS parameters + score converter into a class and loss parameters into a class and write config protos for postprocessing and losses. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. anchor_generator: an anchor_generator.AnchorGenerator object. box_predictor: a box_predictor.BoxPredictor object. box_coder: a box_coder.BoxCoder object. feature_extractor: a SSDFeatureExtractor object. encode_background_as_zeros: boolean determining whether background targets are to be encoded as an all zeros vector or a one-hot vector (where background is the 0th class). image_resizer_fn: a callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions and a 1-D tensor of shape [3] indicating shape of true image within the resized image tensor as the resized image tensor could be padded. See builders/image_resizer_builder.py. non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores` and optional `clip_window` inputs (with all other inputs already set) and returns a dictionary hold tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes` and `num_detections`. See `post_processing. batch_multiclass_non_max_suppression` for the type and shape of these tensors. score_conversion_fn: callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). This is usually used to convert logits to probabilities. classification_loss: an object_detection.core.losses.Loss object. localization_loss: a object_detection.core.losses.Loss object. classification_loss_weight: float localization_loss_weight: float normalize_loss_by_num_matches: boolean hard_example_miner: a losses.HardExampleMiner object (can be None) target_assigner_instance: target_assigner.TargetAssigner instance to use. add_summaries: boolean (default: True) controlling whether summary ops should be added to tensorflow graph. normalize_loc_loss_by_codesize: whether to normalize localization loss by code size of the box encoder. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. add_background_class: Whether to add an implicit background class to one-hot encodings of groundtruth labels. Set to false if training a single class model or using groundtruth labels with an explicit background class. explicit_background_class: Set to true if using groundtruth labels with an explicit background class, as in multiclass scores. random_example_sampler: a BalancedPositiveNegativeSampler object that can perform random example sampling when computing loss. If None, random sampling process is skipped. Note that random example sampler and hard example miner can both be applied to the model. In that case, random sampler will take effect first and hard example miner can only process the random sampled examples. expected_loss_weights_fn: If not None, use to calculate loss by background/foreground weighting. Should take batch_cls_targets as inputs and return foreground_weights, background_weights. See expected_classification_loss_by_expected_sampling and expected_classification_loss_by_reweighting_unmatched_anchors in third_party/tensorflow_models/object_detection/utils/ops.py as examples. use_confidences_as_targets: Whether to use groundtruth_condifences field to assign the targets. implicit_example_weight: a float number that specifies the weight used for the implicit negative examples. equalization_loss_config: a namedtuple that specifies configs for computing equalization loss. return_raw_detections_during_predict: Whether to return raw detection boxes in the predict() method. These are decoded boxes that have not been through postprocessing (i.e. NMS). Default False. nms_on_host: boolean (default: True) controlling whether NMS should be carried out on the host (outside of TPU). """ super(SSDMetaArch, self).__init__(num_classes=box_predictor.num_classes) self._is_training = is_training self._freeze_batchnorm = freeze_batchnorm self._inplace_batchnorm_update = inplace_batchnorm_update self._anchor_generator = anchor_generator self._box_predictor = box_predictor self._box_coder = box_coder self._feature_extractor = feature_extractor self._add_background_class = add_background_class self._explicit_background_class = explicit_background_class if add_background_class and explicit_background_class: raise ValueError("Cannot have both 'add_background_class' and" " 'explicit_background_class' true.") # Needed for fine-tuning from classification checkpoints whose # variables do not have the feature extractor scope. if self._feature_extractor.is_keras_model: # Keras feature extractors will have a name they implicitly use to scope. # So, all contained variables are prefixed by this name. # To load from classification checkpoints, need to filter out this name. self._extract_features_scope = feature_extractor.name else: # Slim feature extractors get an explicit naming scope self._extract_features_scope = 'FeatureExtractor' if encode_background_as_zeros: background_class = [0] else: background_class = [1] if self._add_background_class: num_foreground_classes = self.num_classes else: num_foreground_classes = self.num_classes - 1 self._unmatched_class_label = tf.constant( background_class + num_foreground_classes * [0], tf.float32) self._target_assigner = target_assigner_instance self._classification_loss = classification_loss self._localization_loss = localization_loss self._classification_loss_weight = classification_loss_weight self._localization_loss_weight = localization_loss_weight self._normalize_loss_by_num_matches = normalize_loss_by_num_matches self._normalize_loc_loss_by_codesize = normalize_loc_loss_by_codesize self._hard_example_miner = hard_example_miner self._random_example_sampler = random_example_sampler self._parallel_iterations = 16 self._image_resizer_fn = image_resizer_fn self._non_max_suppression_fn = non_max_suppression_fn self._score_conversion_fn = score_conversion_fn self._anchors = None self._add_summaries = add_summaries self._batched_prediction_tensor_names = [] self._expected_loss_weights_fn = expected_loss_weights_fn self._use_confidences_as_targets = use_confidences_as_targets self._implicit_example_weight = implicit_example_weight self._equalization_loss_config = equalization_loss_config self._return_raw_detections_during_predict = ( return_raw_detections_during_predict) @property def feature_extractor(self): return self._feature_extractor @property def anchors(self): if not self._anchors: raise RuntimeError('anchors have not been constructed yet!') if not isinstance(self._anchors, box_list.BoxList): raise RuntimeError('anchors should be a BoxList object, but is not.') return self._anchors @property def batched_prediction_tensor_names(self): if not self._batched_prediction_tensor_names: raise RuntimeError('Must call predict() method to get batched prediction ' 'tensor names.') return self._batched_prediction_tensor_names def preprocess(self, inputs): """Feature-extractor specific preprocessing. SSD meta architecture uses a default clip_window of [0, 0, 1, 1] during post-processing. On calling `preprocess` method, clip_window gets updated based on `true_image_shapes` returned by `image_resizer_fn`. Args: inputs: a [batch, height_in, width_in, channels] float tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: a [batch, height_out, width_out, channels] float tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Raises: ValueError: if inputs tensor does not have type tf.float32 """ with tf.name_scope('Preprocessor'): normalized_inputs = self._feature_extractor.preprocess(inputs) return shape_utils.resize_images_and_return_shapes( normalized_inputs, self._image_resizer_fn) def _compute_clip_window(self, preprocessed_images, true_image_shapes): """Computes clip window to use during post_processing. Computes a new clip window to use during post-processing based on `resized_image_shapes` and `true_image_shapes` only if `preprocess` method has been called. Otherwise returns a default clip window of [0, 0, 1, 1]. Args: preprocessed_images: the [batch, height, width, channels] image tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Or None if the clip window should cover the full image. Returns: a 2-D float32 tensor of the form [batch_size, 4] containing the clip window for each image in the batch in normalized coordinates (relative to the resized dimensions) where each clip window is of the form [ymin, xmin, ymax, xmax] or a default clip window of [0, 0, 1, 1]. """ if true_image_shapes is None: return tf.constant([0, 0, 1, 1], dtype=tf.float32) resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights, true_widths, _ = tf.unstack( tf.cast(true_image_shapes, dtype=tf.float32), axis=1) padded_height = tf.cast(resized_inputs_shape[1], dtype=tf.float32) padded_width = tf.cast(resized_inputs_shape[2], dtype=tf.float32) return tf.stack( [ tf.zeros_like(true_heights), tf.zeros_like(true_widths), true_heights / padded_height, true_widths / padded_width ], axis=1) def predict(self, preprocessed_inputs, true_image_shapes): """Predicts unpostprocessed tensors from input tensor. This function takes an input batch of images and runs it through the forward pass of the network to yield unpostprocessesed predictions. A side effect of calling the predict method is that self._anchors is populated with a box_list.BoxList of anchors. These anchors must be constructed before the postprocess or loss functions can be called. Args: preprocessed_inputs: a [batch, height, width, channels] image tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) preprocessed_inputs: the [batch, height, width, channels] image tensor. 2) box_encodings: 4-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 3) class_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). 4) feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]. 5) anchors: 2-D float tensor of shape [num_anchors, 4] containing the generated anchors in normalized coordinates. 6) final_anchors: 3-D float tensor of shape [batch_size, num_anchors, 4] containing the generated anchors in normalized coordinates. If self._return_raw_detections_during_predict is True, the dictionary will also contain: 7) raw_detection_boxes: a 4-D float32 tensor with shape [batch_size, self.max_num_proposals, 4] in normalized coordinates. 8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape [batch_size, self.max_num_proposals]. """ if self._inplace_batchnorm_update: batchnorm_updates_collections = None else: batchnorm_updates_collections = tf.GraphKeys.UPDATE_OPS if self._feature_extractor.is_keras_model: feature_maps = self._feature_extractor(preprocessed_inputs) else: with slim.arg_scope([slim.batch_norm], is_training=(self._is_training and not self._freeze_batchnorm), updates_collections=batchnorm_updates_collections): with tf.variable_scope(None, self._extract_features_scope, [preprocessed_inputs]): feature_maps = self._feature_extractor.extract_features( preprocessed_inputs) feature_map_spatial_dims = self._get_feature_map_spatial_dims( feature_maps) image_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) boxlist_list = self._anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2]) self._anchors = box_list_ops.concatenate(boxlist_list) if self._box_predictor.is_keras_model: predictor_results_dict = self._box_predictor(feature_maps) else: with slim.arg_scope([slim.batch_norm], is_training=(self._is_training and not self._freeze_batchnorm), updates_collections=batchnorm_updates_collections): predictor_results_dict = self._box_predictor.predict( feature_maps, self._anchor_generator.num_anchors_per_location()) predictions_dict = { 'preprocessed_inputs': preprocessed_inputs, 'feature_maps': feature_maps, 'anchors': self._anchors.get(), 'final_anchors': tf.tile( tf.expand_dims(self._anchors.get(), 0), [image_shape[0], 1, 1]) } for prediction_key, prediction_list in iter(predictor_results_dict.items()): prediction = tf.concat(prediction_list, axis=1) if (prediction_key == 'box_encodings' and prediction.shape.ndims == 4 and prediction.shape[2] == 1): prediction = tf.squeeze(prediction, axis=2) predictions_dict[prediction_key] = prediction if self._return_raw_detections_during_predict: predictions_dict.update(self._raw_detections_and_feature_map_inds( predictions_dict['box_encodings'], boxlist_list)) self._batched_prediction_tensor_names = [x for x in predictions_dict if x != 'anchors'] return predictions_dict def _raw_detections_and_feature_map_inds(self, box_encodings, boxlist_list): anchors = self._anchors.get() raw_detection_boxes, _ = self._batch_decode(box_encodings, anchors) batch_size, _, _ = shape_utils.combined_static_and_dynamic_shape( raw_detection_boxes) feature_map_indices = ( self._anchor_generator.anchor_index_to_feature_map_index(boxlist_list)) feature_map_indices_batched = tf.tile( tf.expand_dims(feature_map_indices, 0), multiples=[batch_size, 1]) return { fields.PredictionFields.raw_detection_boxes: raw_detection_boxes, fields.PredictionFields.raw_detection_feature_map_indices: feature_map_indices_batched } def _get_feature_map_spatial_dims(self, feature_maps): """Return list of spatial dimensions for each feature map in a list. Args: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]. Returns: a list of pairs (height, width) for each feature map in feature_maps """ feature_map_shapes = [ shape_utils.combined_static_and_dynamic_shape( feature_map) for feature_map in feature_maps ] return [(shape[1], shape[2]) for shape in feature_map_shapes] def postprocess(self, prediction_dict, true_image_shapes): """Converts prediction tensors to final detections. This function converts raw predictions tensors to final detection results by slicing off the background class, decoding box predictions and applying non max suppression and clipping to the image window. See base class for output format conventions. Note also that by default, scores are to be interpreted as logits, but if a score_conversion_fn is used, then scores are remapped (and may thus have a different interpretation). Args: prediction_dict: a dictionary holding prediction tensors with 1) preprocessed_inputs: a [batch, height, width, channels] image tensor. 2) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 3) class_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions. 4) mask_predictions: (optional) a 5-D float tensor of shape [batch_size, num_anchors, q, mask_height, mask_width]. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Or None, if the clip window should cover the full image. Returns: detections: a dictionary containing the following fields detection_boxes: [batch, max_detections, 4] tensor with post-processed detection boxes. detection_scores: [batch, max_detections] tensor with scalar scores for post-processed detection boxes. detection_multiclass_scores: [batch, max_detections, num_classes_with_background] tensor with class score distribution for post-processed detection boxes including background class if any. detection_classes: [batch, max_detections] tensor with classes for post-processed detection classes. detection_keypoints: [batch, max_detections, num_keypoints, 2] (if encoded in the prediction_dict 'box_encodings') detection_masks: [batch_size, max_detections, mask_height, mask_width] (optional) num_detections: [batch] raw_detection_boxes: [batch, total_detections, 4] tensor with decoded detection boxes before Non-Max Suppression. raw_detection_score: [batch, total_detections, num_classes_with_background] tensor of multi-class scores for raw detection boxes. Raises: ValueError: if prediction_dict does not contain `box_encodings` or `class_predictions_with_background` fields. """ if ('box_encodings' not in prediction_dict or 'class_predictions_with_background' not in prediction_dict): raise ValueError('prediction_dict does not contain expected entries.') if 'anchors' not in prediction_dict: prediction_dict['anchors'] = self.anchors.get() with tf.name_scope('Postprocessor'): preprocessed_images = prediction_dict['preprocessed_inputs'] box_encodings = prediction_dict['box_encodings'] box_encodings = tf.identity(box_encodings, 'raw_box_encodings') class_predictions_with_background = ( prediction_dict['class_predictions_with_background']) detection_boxes, detection_keypoints = self._batch_decode( box_encodings, prediction_dict['anchors']) detection_boxes = tf.identity(detection_boxes, 'raw_box_locations') detection_boxes = tf.expand_dims(detection_boxes, axis=2) detection_scores_with_background = self._score_conversion_fn( class_predictions_with_background) detection_scores = tf.identity(detection_scores_with_background, 'raw_box_scores') if self._add_background_class or self._explicit_background_class: detection_scores = tf.slice(detection_scores, [0, 0, 1], [-1, -1, -1]) additional_fields = None batch_size = ( shape_utils.combined_static_and_dynamic_shape(preprocessed_images)[0]) if 'feature_maps' in prediction_dict: feature_map_list = [] for feature_map in prediction_dict['feature_maps']: feature_map_list.append(tf.reshape(feature_map, [batch_size, -1])) box_features = tf.concat(feature_map_list, 1) box_features = tf.identity(box_features, 'raw_box_features') additional_fields = { 'multiclass_scores': detection_scores_with_background } if self._anchors is not None: num_boxes = (self._anchors.num_boxes_static() or self._anchors.num_boxes()) anchor_indices = tf.range(num_boxes) batch_anchor_indices = tf.tile( tf.expand_dims(anchor_indices, 0), [batch_size, 1]) # All additional fields need to be float. additional_fields.update({ 'anchor_indices': tf.cast(batch_anchor_indices, tf.float32), }) if detection_keypoints is not None: detection_keypoints = tf.identity( detection_keypoints, 'raw_keypoint_locations') additional_fields[fields.BoxListFields.keypoints] = detection_keypoints (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections) = self._non_max_suppression_fn( detection_boxes, detection_scores, clip_window=self._compute_clip_window( preprocessed_images, true_image_shapes), additional_fields=additional_fields, masks=prediction_dict.get('mask_predictions')) detection_dict = { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections: tf.cast(num_detections, dtype=tf.float32), fields.DetectionResultFields.raw_detection_boxes: tf.squeeze(detection_boxes, axis=2), fields.DetectionResultFields.raw_detection_scores: detection_scores_with_background } if (nmsed_additional_fields is not None and fields.InputDataFields.multiclass_scores in nmsed_additional_fields): detection_dict[ fields.DetectionResultFields.detection_multiclass_scores] = ( nmsed_additional_fields[ fields.InputDataFields.multiclass_scores]) if (nmsed_additional_fields is not None and 'anchor_indices' in nmsed_additional_fields): detection_dict.update({ fields.DetectionResultFields.detection_anchor_indices: tf.cast(nmsed_additional_fields['anchor_indices'], tf.int32), }) if (nmsed_additional_fields is not None and fields.BoxListFields.keypoints in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] = ( nmsed_additional_fields[fields.BoxListFields.keypoints]) if nmsed_masks is not None: detection_dict[ fields.DetectionResultFields.detection_masks] = nmsed_masks return detection_dict def loss(self, prediction_dict, true_image_shapes, scope=None): """Compute scalar loss tensors with respect to provided groundtruth. Calling this function requires that groundtruth tensors have been provided via the provide_groundtruth function. Args: prediction_dict: a dictionary holding prediction tensors with 1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 2) class_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. scope: Optional scope name. Returns: a dictionary mapping loss keys (`localization_loss` and `classification_loss`) to scalar tensors representing corresponding loss values. """ with tf.name_scope(scope, 'Loss', prediction_dict.values()): keypoints = None if self.groundtruth_has_field(fields.BoxListFields.keypoints): keypoints = self.groundtruth_lists(fields.BoxListFields.keypoints) weights = None if self.groundtruth_has_field(fields.BoxListFields.weights): weights = self.groundtruth_lists(fields.BoxListFields.weights) confidences = None if self.groundtruth_has_field(fields.BoxListFields.confidences): confidences = self.groundtruth_lists(fields.BoxListFields.confidences) (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, batch_match) = self._assign_targets( self.groundtruth_lists(fields.BoxListFields.boxes), self.groundtruth_lists(fields.BoxListFields.classes), keypoints, weights, confidences) match_list = [matcher.Match(match) for match in tf.unstack(batch_match)] if self._add_summaries: self._summarize_target_assignment( self.groundtruth_lists(fields.BoxListFields.boxes), match_list) if self._random_example_sampler: batch_cls_per_anchor_weights = tf.reduce_mean( batch_cls_weights, axis=-1) batch_sampled_indicator = tf.cast( shape_utils.static_or_dynamic_map_fn( self._minibatch_subsample_fn, [batch_cls_targets, batch_cls_per_anchor_weights], dtype=tf.bool, parallel_iterations=self._parallel_iterations, back_prop=True), dtype=tf.float32) batch_reg_weights = tf.multiply(batch_sampled_indicator, batch_reg_weights) batch_cls_weights = tf.multiply( tf.expand_dims(batch_sampled_indicator, -1), batch_cls_weights) losses_mask = None if self.groundtruth_has_field(fields.InputDataFields.is_annotated): losses_mask = tf.stack(self.groundtruth_lists( fields.InputDataFields.is_annotated)) location_losses = self._localization_loss( prediction_dict['box_encodings'], batch_reg_targets, ignore_nan_targets=True, weights=batch_reg_weights, losses_mask=losses_mask) cls_losses = self._classification_loss( prediction_dict['class_predictions_with_background'], batch_cls_targets, weights=batch_cls_weights, losses_mask=losses_mask) if self._expected_loss_weights_fn: # Need to compute losses for assigned targets against the # unmatched_class_label as well as their assigned targets. # simplest thing (but wasteful) is just to calculate all losses # twice batch_size, num_anchors, num_classes = batch_cls_targets.get_shape() unmatched_targets = tf.ones([batch_size, num_anchors, 1 ]) * self._unmatched_class_label unmatched_cls_losses = self._classification_loss( prediction_dict['class_predictions_with_background'], unmatched_targets, weights=batch_cls_weights, losses_mask=losses_mask) if cls_losses.get_shape().ndims == 3: batch_size, num_anchors, num_classes = cls_losses.get_shape() cls_losses = tf.reshape(cls_losses, [batch_size, -1]) unmatched_cls_losses = tf.reshape(unmatched_cls_losses, [batch_size, -1]) batch_cls_targets = tf.reshape( batch_cls_targets, [batch_size, num_anchors * num_classes, -1]) batch_cls_targets = tf.concat( [1 - batch_cls_targets, batch_cls_targets], axis=-1) location_losses = tf.tile(location_losses, [1, num_classes]) foreground_weights, background_weights = ( self._expected_loss_weights_fn(batch_cls_targets)) cls_losses = ( foreground_weights * cls_losses + background_weights * unmatched_cls_losses) location_losses *= foreground_weights classification_loss = tf.reduce_sum(cls_losses) localization_loss = tf.reduce_sum(location_losses) elif self._hard_example_miner: cls_losses = ops.reduce_sum_trailing_dimensions(cls_losses, ndims=2) (localization_loss, classification_loss) = self._apply_hard_mining( location_losses, cls_losses, prediction_dict, match_list) if self._add_summaries: self._hard_example_miner.summarize() else: cls_losses = ops.reduce_sum_trailing_dimensions(cls_losses, ndims=2) localization_loss = tf.reduce_sum(location_losses) classification_loss = tf.reduce_sum(cls_losses) # Optionally normalize by number of positive matches normalizer = tf.constant(1.0, dtype=tf.float32) if self._normalize_loss_by_num_matches: normalizer = tf.maximum(tf.cast(tf.reduce_sum(batch_reg_weights), dtype=tf.float32), 1.0) localization_loss_normalizer = normalizer if self._normalize_loc_loss_by_codesize: localization_loss_normalizer *= self._box_coder.code_size localization_loss = tf.multiply((self._localization_loss_weight / localization_loss_normalizer), localization_loss, name='localization_loss') classification_loss = tf.multiply((self._classification_loss_weight / normalizer), classification_loss, name='classification_loss') loss_dict = { 'Loss/localization_loss': localization_loss, 'Loss/classification_loss': classification_loss } return loss_dict def _minibatch_subsample_fn(self, inputs): """Randomly samples anchors for one image. Args: inputs: a list of 2 inputs. First one is a tensor of shape [num_anchors, num_classes] indicating targets assigned to each anchor. Second one is a tensor of shape [num_anchors] indicating the class weight of each anchor. Returns: batch_sampled_indicator: bool tensor of shape [num_anchors] indicating whether the anchor should be selected for loss computation. """ cls_targets, cls_weights = inputs if self._add_background_class: # Set background_class bits to 0 so that the positives_indicator # computation would not consider background class. background_class = tf.zeros_like(tf.slice(cls_targets, [0, 0], [-1, 1])) regular_class = tf.slice(cls_targets, [0, 1], [-1, -1]) cls_targets = tf.concat([background_class, regular_class], 1) positives_indicator = tf.reduce_sum(cls_targets, axis=1) return self._random_example_sampler.subsample( tf.cast(cls_weights, tf.bool), batch_size=None, labels=tf.cast(positives_indicator, tf.bool)) def _summarize_anchor_classification_loss(self, class_ids, cls_losses): positive_indices = tf.where(tf.greater(class_ids, 0)) positive_anchor_cls_loss = tf.squeeze( tf.gather(cls_losses, positive_indices), axis=1) visualization_utils.add_cdf_image_summary(positive_anchor_cls_loss, 'PositiveAnchorLossCDF') negative_indices = tf.where(tf.equal(class_ids, 0)) negative_anchor_cls_loss = tf.squeeze( tf.gather(cls_losses, negative_indices), axis=1) visualization_utils.add_cdf_image_summary(negative_anchor_cls_loss, 'NegativeAnchorLossCDF') def _assign_targets(self, groundtruth_boxes_list, groundtruth_classes_list, groundtruth_keypoints_list=None, groundtruth_weights_list=None, groundtruth_confidences_list=None): """Assign groundtruth targets. Adds a background class to each one-hot encoding of groundtruth classes and uses target assigner to obtain regression and classification targets. Args: groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4] containing coordinates of the groundtruth boxes. Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and assumed to be normalized and clipped relative to the image window with y_min <= y_max and x_min <= x_max. groundtruth_classes_list: a list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes] containing the class targets with the 0th index assumed to map to the first non-background class. groundtruth_keypoints_list: (optional) a list of 3-D tensors of shape [num_boxes, num_keypoints, 2] groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. groundtruth_confidences_list: A list of 2-D tf.float32 tensors of shape [num_boxes, num_classes] containing class confidences for groundtruth boxes. Returns: batch_cls_targets: a tensor with shape [batch_size, num_anchors, num_classes], batch_cls_weights: a tensor with shape [batch_size, num_anchors], batch_reg_targets: a tensor with shape [batch_size, num_anchors, box_code_dimension] batch_reg_weights: a tensor with shape [batch_size, num_anchors], match: an int32 tensor of shape [batch_size, num_anchors], containing result of anchor groundtruth matching. Each position in the tensor indicates an anchor and holds the following meaning: (1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i]. (2) if match[x, i]=-1, anchor i is marked to be background . (3) if match[x, i]=-2, anchor i is ignored since it is not background and does not have sufficient overlap to call it a foreground. """ groundtruth_boxlists = [ box_list.BoxList(boxes) for boxes in groundtruth_boxes_list ] train_using_confidences = (self._is_training and self._use_confidences_as_targets) if self._add_background_class: groundtruth_classes_with_background_list = [ tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT') for one_hot_encoding in groundtruth_classes_list ] if train_using_confidences: groundtruth_confidences_with_background_list = [ tf.pad(groundtruth_confidences, [[0, 0], [1, 0]], mode='CONSTANT') for groundtruth_confidences in groundtruth_confidences_list ] else: groundtruth_classes_with_background_list = groundtruth_classes_list if groundtruth_keypoints_list is not None: for boxlist, keypoints in zip( groundtruth_boxlists, groundtruth_keypoints_list): boxlist.add_field(fields.BoxListFields.keypoints, keypoints) if train_using_confidences: return target_assigner.batch_assign_confidences( self._target_assigner, self.anchors, groundtruth_boxlists, groundtruth_confidences_with_background_list, groundtruth_weights_list, self._unmatched_class_label, self._add_background_class, self._implicit_example_weight) else: return target_assigner.batch_assign_targets( self._target_assigner, self.anchors, groundtruth_boxlists, groundtruth_classes_with_background_list, self._unmatched_class_label, groundtruth_weights_list) def _summarize_target_assignment(self, groundtruth_boxes_list, match_list): """Creates tensorflow summaries for the input boxes and anchors. This function creates four summaries corresponding to the average number (over images in a batch) of (1) groundtruth boxes, (2) anchors marked as positive, (3) anchors marked as negative, and (4) anchors marked as ignored. Args: groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4] containing corners of the groundtruth boxes. match_list: a list of matcher.Match objects encoding the match between anchors and groundtruth boxes for each image of the batch, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. """ # TODO(rathodv): Add a test for these summaries. try: # TODO(kaftan): Integrate these summaries into the v2 style loops with tf.compat.v2.init_scope(): if tf.compat.v2.executing_eagerly(): return except AttributeError: pass avg_num_gt_boxes = tf.reduce_mean( tf.cast( tf.stack([tf.shape(x)[0] for x in groundtruth_boxes_list]), dtype=tf.float32)) avg_num_matched_gt_boxes = tf.reduce_mean( tf.cast( tf.stack([match.num_matched_rows() for match in match_list]), dtype=tf.float32)) avg_pos_anchors = tf.reduce_mean( tf.cast( tf.stack([match.num_matched_columns() for match in match_list]), dtype=tf.float32)) avg_neg_anchors = tf.reduce_mean( tf.cast( tf.stack([match.num_unmatched_columns() for match in match_list]), dtype=tf.float32)) avg_ignored_anchors = tf.reduce_mean( tf.cast( tf.stack([match.num_ignored_columns() for match in match_list]), dtype=tf.float32)) tf.summary.scalar('AvgNumGroundtruthBoxesPerImage', avg_num_gt_boxes, family='TargetAssignment') tf.summary.scalar('AvgNumGroundtruthBoxesMatchedPerImage', avg_num_matched_gt_boxes, family='TargetAssignment') tf.summary.scalar('AvgNumPositiveAnchorsPerImage', avg_pos_anchors, family='TargetAssignment') tf.summary.scalar('AvgNumNegativeAnchorsPerImage', avg_neg_anchors, family='TargetAssignment') tf.summary.scalar('AvgNumIgnoredAnchorsPerImage', avg_ignored_anchors, family='TargetAssignment') def _apply_hard_mining(self, location_losses, cls_losses, prediction_dict, match_list): """Applies hard mining to anchorwise losses. Args: location_losses: Float tensor of shape [batch_size, num_anchors] representing anchorwise location losses. cls_losses: Float tensor of shape [batch_size, num_anchors] representing anchorwise classification losses. prediction_dict: p a dictionary holding prediction tensors with 1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 2) class_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions. 3) anchors: (optional) 2-D float tensor of shape [num_anchors, 4]. match_list: a list of matcher.Match objects encoding the match between anchors and groundtruth boxes for each image of the batch, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. Returns: mined_location_loss: a float scalar with sum of localization losses from selected hard examples. mined_cls_loss: a float scalar with sum of classification losses from selected hard examples. """ class_predictions = prediction_dict['class_predictions_with_background'] if self._add_background_class: class_predictions = tf.slice(class_predictions, [0, 0, 1], [-1, -1, -1]) if 'anchors' not in prediction_dict: prediction_dict['anchors'] = self.anchors.get() decoded_boxes, _ = self._batch_decode(prediction_dict['box_encodings'], prediction_dict['anchors']) decoded_box_tensors_list = tf.unstack(decoded_boxes) class_prediction_list = tf.unstack(class_predictions) decoded_boxlist_list = [] for box_location, box_score in zip(decoded_box_tensors_list, class_prediction_list): decoded_boxlist = box_list.BoxList(box_location) decoded_boxlist.add_field('scores', box_score) decoded_boxlist_list.append(decoded_boxlist) return self._hard_example_miner( location_losses=location_losses, cls_losses=cls_losses, decoded_boxlist_list=decoded_boxlist_list, match_list=match_list) def _batch_decode(self, box_encodings, anchors): """Decodes a batch of box encodings with respect to the anchors. Args: box_encodings: A float32 tensor of shape [batch_size, num_anchors, box_code_size] containing box encodings. anchors: A tensor of shape [num_anchors, 4]. Returns: decoded_boxes: A float32 tensor of shape [batch_size, num_anchors, 4] containing the decoded boxes. decoded_keypoints: A float32 tensor of shape [batch_size, num_anchors, num_keypoints, 2] containing the decoded keypoints if present in the input `box_encodings`, None otherwise. """ combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size = combined_shape[0] tiled_anchor_boxes = tf.tile(tf.expand_dims(anchors, 0), [batch_size, 1, 1]) tiled_anchors_boxlist = box_list.BoxList( tf.reshape(tiled_anchor_boxes, [-1, 4])) decoded_boxes = self._box_coder.decode( tf.reshape(box_encodings, [-1, self._box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints = None if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints = decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints = decoded_keypoints.get_shape()[1] decoded_keypoints = tf.reshape( decoded_keypoints, tf.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) decoded_boxes = tf.reshape(decoded_boxes.get(), tf.stack( [combined_shape[0], combined_shape[1], 4])) return decoded_boxes, decoded_keypoints def regularization_losses(self): """Returns a list of regularization losses for this model. Returns a list of regularization losses for this model that the estimator needs to use during training/optimization. Returns: A list of regularization loss tensors. """ losses = [] slim_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # Copy the slim losses to avoid modifying the collection if slim_losses: losses.extend(slim_losses) if self._box_predictor.is_keras_model: losses.extend(self._box_predictor.losses) if self._feature_extractor.is_keras_model: losses.extend(self._feature_extractor.losses) return losses def restore_map(self, fine_tune_checkpoint_type='detection', load_all_detection_checkpoint_vars=False): """Returns a map of variables to load from a foreign checkpoint. See parent class for details. Args: fine_tune_checkpoint_type: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`. Default 'detection'. load_all_detection_checkpoint_vars: whether to load all variables (when `fine_tune_checkpoint_type` is `detection`). If False, only variables within the feature extractor scope are included. Default False. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. Raises: ValueError: if fine_tune_checkpoint_type is neither `classification` nor `detection`. """ if fine_tune_checkpoint_type == 'classification': return self._feature_extractor.restore_from_classification_checkpoint_fn( self._extract_features_scope) elif fine_tune_checkpoint_type == 'detection': variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): var_name = variable.op.name if load_all_detection_checkpoint_vars: variables_to_restore[var_name] = variable else: if var_name.startswith(self._extract_features_scope): variables_to_restore[var_name] = variable return variables_to_restore else: raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( fine_tune_checkpoint_type)) def restore_from_objects(self, fine_tune_checkpoint_type='detection'): """Returns a map of Trackable objects to load from a foreign checkpoint. Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module or Checkpoint). This enables the model to initialize based on weights from another task. For example, the feature extractor variables from a classification model can be used to bootstrap training of an object detector. When loading from an object detection model, the checkpoint model should have the same parameters as this detection model with exception of the num_classes parameter. Note that this function is intended to be used to restore Keras-based models when running Tensorflow 2, whereas restore_map (above) is intended to be used to restore Slim-based models when running Tensorflow 1.x. Args: fine_tune_checkpoint_type: A string inidicating the subset of variables to load. Valid values: `detection`, `classification`, `full`. Default `detection`. An SSD checkpoint has three parts: 1) Classification Network (like ResNet) 2) DeConv layers (for FPN) 3) Box/Class prediction parameters The parameters will be loaded using the following strategy: `classification` - will load #1 `detection` - will load #1, #2 `full` - will load #1, #2, #3 Returns: A dict mapping keys to Trackable objects (tf.Module or Checkpoint). """ if fine_tune_checkpoint_type == 'classification': return { 'feature_extractor': self._feature_extractor.classification_backbone } elif fine_tune_checkpoint_type == 'detection': fake_model = tf.train.Checkpoint( _feature_extractor=self._feature_extractor) return {'model': fake_model} elif fine_tune_checkpoint_type == 'full': return {'model': self} else: raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( fine_tune_checkpoint_type)) def updates(self): """Returns a list of update operators for this model. Returns a list of update operators for this model that must be executed at each training step. The estimator's train op needs to have a control dependency on these updates. Returns: A list of update operators. """ update_ops = [] slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Copy the slim ops to avoid modifying the collection if slim_update_ops: update_ops.extend(slim_update_ops) if self._box_predictor.is_keras_model: update_ops.extend(self._box_predictor.get_updates_for(None)) update_ops.extend(self._box_predictor.get_updates_for( self._box_predictor.inputs)) if self._feature_extractor.is_keras_model: update_ops.extend(self._feature_extractor.get_updates_for(None)) update_ops.extend(self._feature_extractor.get_updates_for( self._feature_extractor.inputs)) return update_ops
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/ssd_meta_arch.py
ssd_meta_arch.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/input_reader.proto """Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/input_reader.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n*object_detection/protos/input_reader.proto\x12\x17object_detection.protos\"\xeb\n\n\x0bInputReader\x12\x0e\n\x04name\x18\x17 \x01(\t:\x00\x12\x18\n\x0elabel_map_path\x18\x01 \x01(\t:\x00\x12\x15\n\x07shuffle\x18\x02 \x01(\x08:\x04true\x12!\n\x13shuffle_buffer_size\x18\x0b \x01(\r:\x04\x32\x30\x34\x38\x12*\n\x1d\x66ilenames_shuffle_buffer_size\x18\x0c \x01(\r:\x03\x31\x30\x30\x12\x15\n\nnum_epochs\x18\x05 \x01(\r:\x01\x30\x12!\n\x16sample_1_of_n_examples\x18\x16 \x01(\r:\x01\x31\x12\x17\n\x0bnum_readers\x18\x06 \x01(\r:\x02\x36\x34\x12\x1f\n\x14num_parallel_batches\x18\x13 \x01(\r:\x01\x38\x12\x1f\n\x14num_prefetch_batches\x18\x14 \x01(\x05:\x01\x32\x12 \n\x0equeue_capacity\x18\x03 \x01(\r:\x04\x32\x30\x30\x30\x42\x02\x18\x01\x12#\n\x11min_after_dequeue\x18\x04 \x01(\r:\x04\x31\x30\x30\x30\x42\x02\x18\x01\x12\x1d\n\x11read_block_length\x18\x0f \x01(\r:\x02\x33\x32\x12\x1e\n\rprefetch_size\x18\r \x01(\r:\x03\x35\x31\x32\x42\x02\x18\x01\x12&\n\x16num_parallel_map_calls\x18\x0e \x01(\r:\x02\x36\x34\x42\x02\x18\x01\x12\x1c\n\x0e\x64rop_remainder\x18# \x01(\x08:\x04true\x12\"\n\x17num_additional_channels\x18\x12 \x01(\x05:\x01\x30\x12\x18\n\rnum_keypoints\x18\x10 \x01(\r:\x01\x30\x12\x1c\n\x14keypoint_type_weight\x18\x1a \x03(\x02\x12 \n\x13max_number_of_boxes\x18\x15 \x01(\x05:\x03\x31\x30\x30\x12%\n\x16load_multiclass_scores\x18\x18 \x01(\x08:\x05\x66\x61lse\x12$\n\x15load_context_features\x18\x19 \x01(\x08:\x05\x66\x61lse\x12%\n\x16load_context_image_ids\x18$ \x01(\x08:\x05\x66\x61lse\x12\"\n\x13load_instance_masks\x18\x07 \x01(\x08:\x05\x66\x61lse\x12M\n\tmask_type\x18\n \x01(\x0e\x32).object_detection.protos.InstanceMaskType:\x0fNUMERICAL_MASKS\x12\x1e\n\x0fload_dense_pose\x18\x1f \x01(\x08:\x05\x66\x61lse\x12\x1c\n\rload_track_id\x18! \x01(\x08:\x05\x66\x61lse\x12+\n\x1cload_keypoint_depth_features\x18% \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10use_display_name\x18\x11 \x01(\x08:\x05\x66\x61lse\x12 \n\x11include_source_id\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\x42\n\ninput_type\x18\x1e \x01(\x0e\x32\".object_detection.protos.InputType:\nTF_EXAMPLE\x12\x17\n\x0b\x66rame_index\x18 \x01(\x05:\x02-1\x12N\n\x16tf_record_input_reader\x18\x08 \x01(\x0b\x32,.object_detection.protos.TFRecordInputReaderH\x00\x12M\n\x15\x65xternal_input_reader\x18\t \x01(\x0b\x32,.object_detection.protos.ExternalInputReaderH\x00\x12$\n\x1csample_from_datasets_weights\x18\" \x03(\x02\x12&\n\x17\x65xpand_labels_hierarchy\x18\x1d \x01(\x08:\x05\x66\x61lseB\x0e\n\x0cinput_reader\")\n\x13TFRecordInputReader\x12\x12\n\ninput_path\x18\x01 \x03(\t\"\x1c\n\x13\x45xternalInputReader*\x05\x08\x01\x10\xe8\x07*C\n\x10InstanceMaskType\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x13\n\x0fNUMERICAL_MASKS\x10\x01\x12\r\n\tPNG_MASKS\x10\x02*G\n\tInputType\x12\x11\n\rINPUT_DEFAULT\x10\x00\x12\x0e\n\nTF_EXAMPLE\x10\x01\x12\x17\n\x13TF_SEQUENCE_EXAMPLE\x10\x02' ) _INSTANCEMASKTYPE = _descriptor.EnumDescriptor( name='InstanceMaskType', full_name='object_detection.protos.InstanceMaskType', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='NUMERICAL_MASKS', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='PNG_MASKS', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1534, serialized_end=1601, ) _sym_db.RegisterEnumDescriptor(_INSTANCEMASKTYPE) InstanceMaskType = enum_type_wrapper.EnumTypeWrapper(_INSTANCEMASKTYPE) _INPUTTYPE = _descriptor.EnumDescriptor( name='InputType', full_name='object_detection.protos.InputType', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='INPUT_DEFAULT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='TF_EXAMPLE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='TF_SEQUENCE_EXAMPLE', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1603, serialized_end=1674, ) _sym_db.RegisterEnumDescriptor(_INPUTTYPE) InputType = enum_type_wrapper.EnumTypeWrapper(_INPUTTYPE) DEFAULT = 0 NUMERICAL_MASKS = 1 PNG_MASKS = 2 INPUT_DEFAULT = 0 TF_EXAMPLE = 1 TF_SEQUENCE_EXAMPLE = 2 _INPUTREADER = _descriptor.Descriptor( name='InputReader', full_name='object_detection.protos.InputReader', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='name', full_name='object_detection.protos.InputReader.name', index=0, number=23, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='label_map_path', full_name='object_detection.protos.InputReader.label_map_path', index=1, number=1, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='shuffle', full_name='object_detection.protos.InputReader.shuffle', index=2, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='shuffle_buffer_size', full_name='object_detection.protos.InputReader.shuffle_buffer_size', index=3, number=11, type=13, cpp_type=3, label=1, has_default_value=True, default_value=2048, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='filenames_shuffle_buffer_size', full_name='object_detection.protos.InputReader.filenames_shuffle_buffer_size', index=4, number=12, type=13, cpp_type=3, label=1, has_default_value=True, default_value=100, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_epochs', full_name='object_detection.protos.InputReader.num_epochs', index=5, number=5, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='sample_1_of_n_examples', full_name='object_detection.protos.InputReader.sample_1_of_n_examples', index=6, number=22, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_readers', full_name='object_detection.protos.InputReader.num_readers', index=7, number=6, type=13, cpp_type=3, label=1, has_default_value=True, default_value=64, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_parallel_batches', full_name='object_detection.protos.InputReader.num_parallel_batches', index=8, number=19, type=13, cpp_type=3, label=1, has_default_value=True, default_value=8, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_prefetch_batches', full_name='object_detection.protos.InputReader.num_prefetch_batches', index=9, number=20, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='queue_capacity', full_name='object_detection.protos.InputReader.queue_capacity', index=10, number=3, type=13, cpp_type=3, label=1, has_default_value=True, default_value=2000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_after_dequeue', full_name='object_detection.protos.InputReader.min_after_dequeue', index=11, number=4, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='read_block_length', full_name='object_detection.protos.InputReader.read_block_length', index=12, number=15, type=13, cpp_type=3, label=1, has_default_value=True, default_value=32, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='prefetch_size', full_name='object_detection.protos.InputReader.prefetch_size', index=13, number=13, type=13, cpp_type=3, label=1, has_default_value=True, default_value=512, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_parallel_map_calls', full_name='object_detection.protos.InputReader.num_parallel_map_calls', index=14, number=14, type=13, cpp_type=3, label=1, has_default_value=True, default_value=64, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='drop_remainder', full_name='object_detection.protos.InputReader.drop_remainder', index=15, number=35, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_additional_channels', full_name='object_detection.protos.InputReader.num_additional_channels', index=16, number=18, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_keypoints', full_name='object_detection.protos.InputReader.num_keypoints', index=17, number=16, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_type_weight', full_name='object_detection.protos.InputReader.keypoint_type_weight', index=18, number=26, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_number_of_boxes', full_name='object_detection.protos.InputReader.max_number_of_boxes', index=19, number=21, type=5, cpp_type=1, label=1, has_default_value=True, default_value=100, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='load_multiclass_scores', full_name='object_detection.protos.InputReader.load_multiclass_scores', index=20, number=24, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='load_context_features', full_name='object_detection.protos.InputReader.load_context_features', index=21, number=25, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='load_context_image_ids', full_name='object_detection.protos.InputReader.load_context_image_ids', index=22, number=36, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='load_instance_masks', full_name='object_detection.protos.InputReader.load_instance_masks', index=23, number=7, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_type', full_name='object_detection.protos.InputReader.mask_type', index=24, number=10, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='load_dense_pose', full_name='object_detection.protos.InputReader.load_dense_pose', index=25, number=31, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='load_track_id', full_name='object_detection.protos.InputReader.load_track_id', index=26, number=33, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='load_keypoint_depth_features', full_name='object_detection.protos.InputReader.load_keypoint_depth_features', index=27, number=37, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_display_name', full_name='object_detection.protos.InputReader.use_display_name', index=28, number=17, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='include_source_id', full_name='object_detection.protos.InputReader.include_source_id', index=29, number=27, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='input_type', full_name='object_detection.protos.InputReader.input_type', index=30, number=30, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='frame_index', full_name='object_detection.protos.InputReader.frame_index', index=31, number=32, type=5, cpp_type=1, label=1, has_default_value=True, default_value=-1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='tf_record_input_reader', full_name='object_detection.protos.InputReader.tf_record_input_reader', index=32, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='external_input_reader', full_name='object_detection.protos.InputReader.external_input_reader', index=33, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='sample_from_datasets_weights', full_name='object_detection.protos.InputReader.sample_from_datasets_weights', index=34, number=34, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='expand_labels_hierarchy', full_name='object_detection.protos.InputReader.expand_labels_hierarchy', index=35, number=29, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='input_reader', full_name='object_detection.protos.InputReader.input_reader', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=72, serialized_end=1459, ) _TFRECORDINPUTREADER = _descriptor.Descriptor( name='TFRecordInputReader', full_name='object_detection.protos.TFRecordInputReader', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='input_path', full_name='object_detection.protos.TFRecordInputReader.input_path', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1461, serialized_end=1502, ) _EXTERNALINPUTREADER = _descriptor.Descriptor( name='ExternalInputReader', full_name='object_detection.protos.ExternalInputReader', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=True, syntax='proto2', extension_ranges=[(1, 1000), ], oneofs=[ ], serialized_start=1504, serialized_end=1532, ) _INPUTREADER.fields_by_name['mask_type'].enum_type = _INSTANCEMASKTYPE _INPUTREADER.fields_by_name['input_type'].enum_type = _INPUTTYPE _INPUTREADER.fields_by_name['tf_record_input_reader'].message_type = _TFRECORDINPUTREADER _INPUTREADER.fields_by_name['external_input_reader'].message_type = _EXTERNALINPUTREADER _INPUTREADER.oneofs_by_name['input_reader'].fields.append( _INPUTREADER.fields_by_name['tf_record_input_reader']) _INPUTREADER.fields_by_name['tf_record_input_reader'].containing_oneof = _INPUTREADER.oneofs_by_name['input_reader'] _INPUTREADER.oneofs_by_name['input_reader'].fields.append( _INPUTREADER.fields_by_name['external_input_reader']) _INPUTREADER.fields_by_name['external_input_reader'].containing_oneof = _INPUTREADER.oneofs_by_name['input_reader'] DESCRIPTOR.message_types_by_name['InputReader'] = _INPUTREADER DESCRIPTOR.message_types_by_name['TFRecordInputReader'] = _TFRECORDINPUTREADER DESCRIPTOR.message_types_by_name['ExternalInputReader'] = _EXTERNALINPUTREADER DESCRIPTOR.enum_types_by_name['InstanceMaskType'] = _INSTANCEMASKTYPE DESCRIPTOR.enum_types_by_name['InputType'] = _INPUTTYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) InputReader = _reflection.GeneratedProtocolMessageType('InputReader', (_message.Message,), { 'DESCRIPTOR' : _INPUTREADER, '__module__' : 'object_detection.protos.input_reader_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.InputReader) }) _sym_db.RegisterMessage(InputReader) TFRecordInputReader = _reflection.GeneratedProtocolMessageType('TFRecordInputReader', (_message.Message,), { 'DESCRIPTOR' : _TFRECORDINPUTREADER, '__module__' : 'object_detection.protos.input_reader_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.TFRecordInputReader) }) _sym_db.RegisterMessage(TFRecordInputReader) ExternalInputReader = _reflection.GeneratedProtocolMessageType('ExternalInputReader', (_message.Message,), { 'DESCRIPTOR' : _EXTERNALINPUTREADER, '__module__' : 'object_detection.protos.input_reader_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ExternalInputReader) }) _sym_db.RegisterMessage(ExternalInputReader) _INPUTREADER.fields_by_name['queue_capacity']._options = None _INPUTREADER.fields_by_name['min_after_dequeue']._options = None _INPUTREADER.fields_by_name['prefetch_size']._options = None _INPUTREADER.fields_by_name['num_parallel_map_calls']._options = None # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/input_reader_pb2.py
input_reader_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/box_predictor.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from object_detection.protos import hyperparams_pb2 as object__detection_dot_protos_dot_hyperparams__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/box_predictor.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n+object_detection/protos/box_predictor.proto\x12\x17object_detection.protos\x1a)object_detection/protos/hyperparams.proto\"\x90\x03\n\x0c\x42oxPredictor\x12Y\n\x1b\x63onvolutional_box_predictor\x18\x01 \x01(\x0b\x32\x32.object_detection.protos.ConvolutionalBoxPredictorH\x00\x12P\n\x17mask_rcnn_box_predictor\x18\x02 \x01(\x0b\x32-.object_detection.protos.MaskRCNNBoxPredictorH\x00\x12G\n\x12rfcn_box_predictor\x18\x03 \x01(\x0b\x32).object_detection.protos.RfcnBoxPredictorH\x00\x12s\n)weight_shared_convolutional_box_predictor\x18\x04 \x01(\x0b\x32>.object_detection.protos.WeightSharedConvolutionalBoxPredictorH\x00\x42\x15\n\x13\x62ox_predictor_oneof\"\xaf\x04\n\x19\x43onvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x14\n\tmin_depth\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\tmax_depth\x18\x03 \x01(\x05:\x01\x30\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x19\n\x0buse_dropout\x18\x05 \x01(\x08:\x04true\x12%\n\x18\x64ropout_keep_probability\x18\x06 \x01(\x02:\x03\x30.8\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x31\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12&\n\x17\x61pply_sigmoid_to_scores\x18\t \x01(\x08:\x05\x66\x61lse\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\x12\x1c\n\ruse_depthwise\x18\x0b \x01(\x08:\x05\x66\x61lse\x12j\n\x18\x62ox_encodings_clip_range\x18\x0c \x01(\x0b\x32H.object_detection.protos.ConvolutionalBoxPredictor.BoxEncodingsClipRange\x1a\x31\n\x15\x42oxEncodingsClipRange\x12\x0b\n\x03min\x18\x01 \x01(\x02\x12\x0b\n\x03max\x18\x02 \x01(\x02\"\xad\x06\n%WeightSharedConvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12.\n\x1f\x61pply_conv_hyperparams_to_heads\x18\x13 \x01(\x08:\x05\x66\x61lse\x12/\n apply_conv_hyperparams_pointwise\x18\x14 \x01(\x08:\x05\x66\x61lse\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x10\n\x05\x64\x65pth\x18\x02 \x01(\x05:\x01\x30\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x33\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\x12\x1a\n\x0buse_dropout\x18\x0b \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x0c \x01(\x02:\x03\x30.8\x12%\n\x16share_prediction_tower\x18\r \x01(\x08:\x05\x66\x61lse\x12\x1c\n\ruse_depthwise\x18\x0e \x01(\x08:\x05\x66\x61lse\x12p\n\x0fscore_converter\x18\x10 \x01(\x0e\x32M.object_detection.protos.WeightSharedConvolutionalBoxPredictor.ScoreConverter:\x08IDENTITY\x12v\n\x18\x62ox_encodings_clip_range\x18\x11 \x01(\x0b\x32T.object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange\x1a\x31\n\x15\x42oxEncodingsClipRange\x12\x0b\n\x03min\x18\x01 \x01(\x02\x12\x0b\n\x03max\x18\x02 \x01(\x02\"+\n\x0eScoreConverter\x12\x0c\n\x08IDENTITY\x10\x00\x12\x0b\n\x07SIGMOID\x10\x01\"\xbf\x04\n\x14MaskRCNNBoxPredictor\x12<\n\x0e\x66\x63_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x1a\n\x0buse_dropout\x18\x02 \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x03 \x01(\x02:\x03\x30.5\x12\x18\n\rbox_code_size\x18\x04 \x01(\x05:\x01\x34\x12>\n\x10\x63onv_hyperparams\x18\x05 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12%\n\x16predict_instance_masks\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\'\n\x1amask_prediction_conv_depth\x18\x07 \x01(\x05:\x03\x32\x35\x36\x12 \n\x11predict_keypoints\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0bmask_height\x18\t \x01(\x05:\x02\x31\x35\x12\x16\n\nmask_width\x18\n \x01(\x05:\x02\x31\x35\x12*\n\x1fmask_prediction_num_conv_layers\x18\x0b \x01(\x05:\x01\x32\x12\'\n\x18masks_are_class_agnostic\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\'\n\x18share_box_across_classes\x18\r \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x63onvolve_then_upsample_masks\x18\x0e \x01(\x08:\x05\x66\x61lse\"\xf9\x01\n\x10RfcnBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\"\n\x17num_spatial_bins_height\x18\x02 \x01(\x05:\x01\x33\x12!\n\x16num_spatial_bins_width\x18\x03 \x01(\x05:\x01\x33\x12\x13\n\x05\x64\x65pth\x18\x04 \x01(\x05:\x04\x31\x30\x32\x34\x12\x18\n\rbox_code_size\x18\x05 \x01(\x05:\x01\x34\x12\x17\n\x0b\x63rop_height\x18\x06 \x01(\x05:\x02\x31\x32\x12\x16\n\ncrop_width\x18\x07 \x01(\x05:\x02\x31\x32' , dependencies=[object__detection_dot_protos_dot_hyperparams__pb2.DESCRIPTOR,]) _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER = _descriptor.EnumDescriptor( name='ScoreConverter', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.ScoreConverter', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='IDENTITY', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SIGMOID', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1851, serialized_end=1894, ) _sym_db.RegisterEnumDescriptor(_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER) _BOXPREDICTOR = _descriptor.Descriptor( name='BoxPredictor', full_name='object_detection.protos.BoxPredictor', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.convolutional_box_predictor', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_rcnn_box_predictor', full_name='object_detection.protos.BoxPredictor.mask_rcnn_box_predictor', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='rfcn_box_predictor', full_name='object_detection.protos.BoxPredictor.rfcn_box_predictor', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weight_shared_convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.weight_shared_convolutional_box_predictor', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='box_predictor_oneof', full_name='object_detection.protos.BoxPredictor.box_predictor_oneof', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=116, serialized_end=516, ) _CONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE = _descriptor.Descriptor( name='BoxEncodingsClipRange', full_name='object_detection.protos.ConvolutionalBoxPredictor.BoxEncodingsClipRange', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min', full_name='object_detection.protos.ConvolutionalBoxPredictor.BoxEncodingsClipRange.min', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max', full_name='object_detection.protos.ConvolutionalBoxPredictor.BoxEncodingsClipRange.max', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1029, serialized_end=1078, ) _CONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor( name='ConvolutionalBoxPredictor', full_name='object_detection.protos.ConvolutionalBoxPredictor', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='conv_hyperparams', full_name='object_detection.protos.ConvolutionalBoxPredictor.conv_hyperparams', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.min_depth', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.max_depth', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_layers_before_predictor', full_name='object_detection.protos.ConvolutionalBoxPredictor.num_layers_before_predictor', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_dropout', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_dropout', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='dropout_keep_probability', full_name='object_detection.protos.ConvolutionalBoxPredictor.dropout_keep_probability', index=5, number=6, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.8), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='kernel_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.kernel_size', index=6, number=7, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='box_code_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.box_code_size', index=7, number=8, type=5, cpp_type=1, label=1, has_default_value=True, default_value=4, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='apply_sigmoid_to_scores', full_name='object_detection.protos.ConvolutionalBoxPredictor.apply_sigmoid_to_scores', index=8, number=9, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='class_prediction_bias_init', full_name='object_detection.protos.ConvolutionalBoxPredictor.class_prediction_bias_init', index=9, number=10, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_depthwise', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_depthwise', index=10, number=11, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='box_encodings_clip_range', full_name='object_detection.protos.ConvolutionalBoxPredictor.box_encodings_clip_range', index=11, number=12, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_CONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=519, serialized_end=1078, ) _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE = _descriptor.Descriptor( name='BoxEncodingsClipRange', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange.min', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange.max', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1029, serialized_end=1078, ) _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor( name='WeightSharedConvolutionalBoxPredictor', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='conv_hyperparams', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.conv_hyperparams', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='apply_conv_hyperparams_to_heads', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.apply_conv_hyperparams_to_heads', index=1, number=19, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='apply_conv_hyperparams_pointwise', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.apply_conv_hyperparams_pointwise', index=2, number=20, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_layers_before_predictor', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.num_layers_before_predictor', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='depth', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.depth', index=4, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='kernel_size', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.kernel_size', index=5, number=7, type=5, cpp_type=1, label=1, has_default_value=True, default_value=3, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='box_code_size', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.box_code_size', index=6, number=8, type=5, cpp_type=1, label=1, has_default_value=True, default_value=4, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='class_prediction_bias_init', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.class_prediction_bias_init', index=7, number=10, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_dropout', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.use_dropout', index=8, number=11, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='dropout_keep_probability', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.dropout_keep_probability', index=9, number=12, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.8), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='share_prediction_tower', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.share_prediction_tower', index=10, number=13, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_depthwise', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.use_depthwise', index=11, number=14, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='score_converter', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.score_converter', index=12, number=16, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='box_encodings_clip_range', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.box_encodings_clip_range', index=13, number=17, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE, ], enum_types=[ _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1081, serialized_end=1894, ) _MASKRCNNBOXPREDICTOR = _descriptor.Descriptor( name='MaskRCNNBoxPredictor', full_name='object_detection.protos.MaskRCNNBoxPredictor', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='fc_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.fc_hyperparams', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_dropout', full_name='object_detection.protos.MaskRCNNBoxPredictor.use_dropout', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='dropout_keep_probability', full_name='object_detection.protos.MaskRCNNBoxPredictor.dropout_keep_probability', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='box_code_size', full_name='object_detection.protos.MaskRCNNBoxPredictor.box_code_size', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=4, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='conv_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.conv_hyperparams', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='predict_instance_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_instance_masks', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_prediction_conv_depth', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_conv_depth', index=6, number=7, type=5, cpp_type=1, label=1, has_default_value=True, default_value=256, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='predict_keypoints', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_keypoints', index=7, number=8, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_height', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_height', index=8, number=9, type=5, cpp_type=1, label=1, has_default_value=True, default_value=15, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_width', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_width', index=9, number=10, type=5, cpp_type=1, label=1, has_default_value=True, default_value=15, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_prediction_num_conv_layers', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_num_conv_layers', index=10, number=11, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='masks_are_class_agnostic', full_name='object_detection.protos.MaskRCNNBoxPredictor.masks_are_class_agnostic', index=11, number=12, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='share_box_across_classes', full_name='object_detection.protos.MaskRCNNBoxPredictor.share_box_across_classes', index=12, number=13, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='convolve_then_upsample_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.convolve_then_upsample_masks', index=13, number=14, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1897, serialized_end=2472, ) _RFCNBOXPREDICTOR = _descriptor.Descriptor( name='RfcnBoxPredictor', full_name='object_detection.protos.RfcnBoxPredictor', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='conv_hyperparams', full_name='object_detection.protos.RfcnBoxPredictor.conv_hyperparams', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_spatial_bins_height', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_height', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=3, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_spatial_bins_width', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_width', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=3, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='depth', full_name='object_detection.protos.RfcnBoxPredictor.depth', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1024, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='box_code_size', full_name='object_detection.protos.RfcnBoxPredictor.box_code_size', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=True, default_value=4, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='crop_height', full_name='object_detection.protos.RfcnBoxPredictor.crop_height', index=5, number=6, type=5, cpp_type=1, label=1, has_default_value=True, default_value=12, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='crop_width', full_name='object_detection.protos.RfcnBoxPredictor.crop_width', index=6, number=7, type=5, cpp_type=1, label=1, has_default_value=True, default_value=12, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2475, serialized_end=2724, ) _BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].message_type = _CONVOLUTIONALBOXPREDICTOR _BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].message_type = _MASKRCNNBOXPREDICTOR _BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].message_type = _RFCNBOXPREDICTOR _BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'].message_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append( _BOXPREDICTOR.fields_by_name['convolutional_box_predictor']) _BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'] _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append( _BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor']) _BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'] _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append( _BOXPREDICTOR.fields_by_name['rfcn_box_predictor']) _BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'] _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append( _BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor']) _BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'] _CONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE.containing_type = _CONVOLUTIONALBOXPREDICTOR _CONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS _CONVOLUTIONALBOXPREDICTOR.fields_by_name['box_encodings_clip_range'].message_type = _CONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE.containing_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['score_converter'].enum_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['box_encodings_clip_range'].message_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER.containing_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR _MASKRCNNBOXPREDICTOR.fields_by_name['fc_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS _MASKRCNNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS _RFCNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS DESCRIPTOR.message_types_by_name['BoxPredictor'] = _BOXPREDICTOR DESCRIPTOR.message_types_by_name['ConvolutionalBoxPredictor'] = _CONVOLUTIONALBOXPREDICTOR DESCRIPTOR.message_types_by_name['WeightSharedConvolutionalBoxPredictor'] = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR DESCRIPTOR.message_types_by_name['MaskRCNNBoxPredictor'] = _MASKRCNNBOXPREDICTOR DESCRIPTOR.message_types_by_name['RfcnBoxPredictor'] = _RFCNBOXPREDICTOR _sym_db.RegisterFileDescriptor(DESCRIPTOR) BoxPredictor = _reflection.GeneratedProtocolMessageType('BoxPredictor', (_message.Message,), { 'DESCRIPTOR' : _BOXPREDICTOR, '__module__' : 'object_detection.protos.box_predictor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.BoxPredictor) }) _sym_db.RegisterMessage(BoxPredictor) ConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('ConvolutionalBoxPredictor', (_message.Message,), { 'BoxEncodingsClipRange' : _reflection.GeneratedProtocolMessageType('BoxEncodingsClipRange', (_message.Message,), { 'DESCRIPTOR' : _CONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE, '__module__' : 'object_detection.protos.box_predictor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ConvolutionalBoxPredictor.BoxEncodingsClipRange) }) , 'DESCRIPTOR' : _CONVOLUTIONALBOXPREDICTOR, '__module__' : 'object_detection.protos.box_predictor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ConvolutionalBoxPredictor) }) _sym_db.RegisterMessage(ConvolutionalBoxPredictor) _sym_db.RegisterMessage(ConvolutionalBoxPredictor.BoxEncodingsClipRange) WeightSharedConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('WeightSharedConvolutionalBoxPredictor', (_message.Message,), { 'BoxEncodingsClipRange' : _reflection.GeneratedProtocolMessageType('BoxEncodingsClipRange', (_message.Message,), { 'DESCRIPTOR' : _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE, '__module__' : 'object_detection.protos.box_predictor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange) }) , 'DESCRIPTOR' : _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR, '__module__' : 'object_detection.protos.box_predictor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.WeightSharedConvolutionalBoxPredictor) }) _sym_db.RegisterMessage(WeightSharedConvolutionalBoxPredictor) _sym_db.RegisterMessage(WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange) MaskRCNNBoxPredictor = _reflection.GeneratedProtocolMessageType('MaskRCNNBoxPredictor', (_message.Message,), { 'DESCRIPTOR' : _MASKRCNNBOXPREDICTOR, '__module__' : 'object_detection.protos.box_predictor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.MaskRCNNBoxPredictor) }) _sym_db.RegisterMessage(MaskRCNNBoxPredictor) RfcnBoxPredictor = _reflection.GeneratedProtocolMessageType('RfcnBoxPredictor', (_message.Message,), { 'DESCRIPTOR' : _RFCNBOXPREDICTOR, '__module__' : 'object_detection.protos.box_predictor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RfcnBoxPredictor) }) _sym_db.RegisterMessage(RfcnBoxPredictor) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/box_predictor_pb2.py
box_predictor_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/multiscale_anchor_generator.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/multiscale_anchor_generator.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n9object_detection/protos/multiscale_anchor_generator.proto\x12\x17object_detection.protos\"\xba\x01\n\x19MultiscaleAnchorGenerator\x12\x14\n\tmin_level\x18\x01 \x01(\x05:\x01\x33\x12\x14\n\tmax_level\x18\x02 \x01(\x05:\x01\x37\x12\x17\n\x0c\x61nchor_scale\x18\x03 \x01(\x02:\x01\x34\x12\x15\n\raspect_ratios\x18\x04 \x03(\x02\x12\x1c\n\x11scales_per_octave\x18\x05 \x01(\x05:\x01\x32\x12#\n\x15normalize_coordinates\x18\x06 \x01(\x08:\x04true' ) _MULTISCALEANCHORGENERATOR = _descriptor.Descriptor( name='MultiscaleAnchorGenerator', full_name='object_detection.protos.MultiscaleAnchorGenerator', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_level', full_name='object_detection.protos.MultiscaleAnchorGenerator.min_level', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=3, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_level', full_name='object_detection.protos.MultiscaleAnchorGenerator.max_level', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=7, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='anchor_scale', full_name='object_detection.protos.MultiscaleAnchorGenerator.anchor_scale', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(4), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='aspect_ratios', full_name='object_detection.protos.MultiscaleAnchorGenerator.aspect_ratios', index=3, number=4, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scales_per_octave', full_name='object_detection.protos.MultiscaleAnchorGenerator.scales_per_octave', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='normalize_coordinates', full_name='object_detection.protos.MultiscaleAnchorGenerator.normalize_coordinates', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=87, serialized_end=273, ) DESCRIPTOR.message_types_by_name['MultiscaleAnchorGenerator'] = _MULTISCALEANCHORGENERATOR _sym_db.RegisterFileDescriptor(DESCRIPTOR) MultiscaleAnchorGenerator = _reflection.GeneratedProtocolMessageType('MultiscaleAnchorGenerator', (_message.Message,), { 'DESCRIPTOR' : _MULTISCALEANCHORGENERATOR, '__module__' : 'object_detection.protos.multiscale_anchor_generator_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.MultiscaleAnchorGenerator) }) _sym_db.RegisterMessage(MultiscaleAnchorGenerator) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/multiscale_anchor_generator_pb2.py
multiscale_anchor_generator_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/post_processing.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from object_detection.protos import calibration_pb2 as object__detection_dot_protos_dot_calibration__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/post_processing.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n-object_detection/protos/post_processing.proto\x12\x17object_detection.protos\x1a)object_detection/protos/calibration.proto\"\xc9\x03\n\x16\x42\x61tchNonMaxSuppression\x12\x1a\n\x0fscore_threshold\x18\x01 \x01(\x02:\x01\x30\x12\x1a\n\riou_threshold\x18\x02 \x01(\x02:\x03\x30.6\x12%\n\x18max_detections_per_class\x18\x03 \x01(\x05:\x03\x31\x30\x30\x12!\n\x14max_total_detections\x18\x05 \x01(\x05:\x03\x31\x30\x30\x12 \n\x11use_static_shapes\x18\x06 \x01(\x08:\x05\x66\x61lse\x12%\n\x16use_class_agnostic_nms\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x19max_classes_per_detection\x18\x08 \x01(\x05:\x01\x31\x12\x19\n\x0esoft_nms_sigma\x18\t \x01(\x02:\x01\x30\x12\"\n\x13use_partitioned_nms\x18\n \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10use_combined_nms\x18\x0b \x01(\x08:\x05\x66\x61lse\x12%\n\x17\x63hange_coordinate_frame\x18\x0c \x01(\x08:\x04true\x12\x1b\n\x0cuse_hard_nms\x18\r \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x0buse_cpu_nms\x18\x0e \x01(\x08:\x05\x66\x61lse\"\xd9\x02\n\x0ePostProcessing\x12R\n\x19\x62\x61tch_non_max_suppression\x18\x01 \x01(\x0b\x32/.object_detection.protos.BatchNonMaxSuppression\x12Y\n\x0fscore_converter\x18\x02 \x01(\x0e\x32\x36.object_detection.protos.PostProcessing.ScoreConverter:\x08IDENTITY\x12\x16\n\x0blogit_scale\x18\x03 \x01(\x02:\x01\x31\x12\x46\n\x12\x63\x61libration_config\x18\x04 \x01(\x0b\x32*.object_detection.protos.CalibrationConfig\"8\n\x0eScoreConverter\x12\x0c\n\x08IDENTITY\x10\x00\x12\x0b\n\x07SIGMOID\x10\x01\x12\x0b\n\x07SOFTMAX\x10\x02' , dependencies=[object__detection_dot_protos_dot_calibration__pb2.DESCRIPTOR,]) _POSTPROCESSING_SCORECONVERTER = _descriptor.EnumDescriptor( name='ScoreConverter', full_name='object_detection.protos.PostProcessing.ScoreConverter', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='IDENTITY', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SIGMOID', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SOFTMAX', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=867, serialized_end=923, ) _sym_db.RegisterEnumDescriptor(_POSTPROCESSING_SCORECONVERTER) _BATCHNONMAXSUPPRESSION = _descriptor.Descriptor( name='BatchNonMaxSuppression', full_name='object_detection.protos.BatchNonMaxSuppression', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='score_threshold', full_name='object_detection.protos.BatchNonMaxSuppression.score_threshold', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='iou_threshold', full_name='object_detection.protos.BatchNonMaxSuppression.iou_threshold', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.6), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_detections_per_class', full_name='object_detection.protos.BatchNonMaxSuppression.max_detections_per_class', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=100, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_total_detections', full_name='object_detection.protos.BatchNonMaxSuppression.max_total_detections', index=3, number=5, type=5, cpp_type=1, label=1, has_default_value=True, default_value=100, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_static_shapes', full_name='object_detection.protos.BatchNonMaxSuppression.use_static_shapes', index=4, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_class_agnostic_nms', full_name='object_detection.protos.BatchNonMaxSuppression.use_class_agnostic_nms', index=5, number=7, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_classes_per_detection', full_name='object_detection.protos.BatchNonMaxSuppression.max_classes_per_detection', index=6, number=8, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='soft_nms_sigma', full_name='object_detection.protos.BatchNonMaxSuppression.soft_nms_sigma', index=7, number=9, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_partitioned_nms', full_name='object_detection.protos.BatchNonMaxSuppression.use_partitioned_nms', index=8, number=10, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_combined_nms', full_name='object_detection.protos.BatchNonMaxSuppression.use_combined_nms', index=9, number=11, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='change_coordinate_frame', full_name='object_detection.protos.BatchNonMaxSuppression.change_coordinate_frame', index=10, number=12, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_hard_nms', full_name='object_detection.protos.BatchNonMaxSuppression.use_hard_nms', index=11, number=13, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_cpu_nms', full_name='object_detection.protos.BatchNonMaxSuppression.use_cpu_nms', index=12, number=14, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=118, serialized_end=575, ) _POSTPROCESSING = _descriptor.Descriptor( name='PostProcessing', full_name='object_detection.protos.PostProcessing', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='batch_non_max_suppression', full_name='object_detection.protos.PostProcessing.batch_non_max_suppression', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='score_converter', full_name='object_detection.protos.PostProcessing.score_converter', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='logit_scale', full_name='object_detection.protos.PostProcessing.logit_scale', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='calibration_config', full_name='object_detection.protos.PostProcessing.calibration_config', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _POSTPROCESSING_SCORECONVERTER, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=578, serialized_end=923, ) _POSTPROCESSING.fields_by_name['batch_non_max_suppression'].message_type = _BATCHNONMAXSUPPRESSION _POSTPROCESSING.fields_by_name['score_converter'].enum_type = _POSTPROCESSING_SCORECONVERTER _POSTPROCESSING.fields_by_name['calibration_config'].message_type = object__detection_dot_protos_dot_calibration__pb2._CALIBRATIONCONFIG _POSTPROCESSING_SCORECONVERTER.containing_type = _POSTPROCESSING DESCRIPTOR.message_types_by_name['BatchNonMaxSuppression'] = _BATCHNONMAXSUPPRESSION DESCRIPTOR.message_types_by_name['PostProcessing'] = _POSTPROCESSING _sym_db.RegisterFileDescriptor(DESCRIPTOR) BatchNonMaxSuppression = _reflection.GeneratedProtocolMessageType('BatchNonMaxSuppression', (_message.Message,), { 'DESCRIPTOR' : _BATCHNONMAXSUPPRESSION, '__module__' : 'object_detection.protos.post_processing_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.BatchNonMaxSuppression) }) _sym_db.RegisterMessage(BatchNonMaxSuppression) PostProcessing = _reflection.GeneratedProtocolMessageType('PostProcessing', (_message.Message,), { 'DESCRIPTOR' : _POSTPROCESSING, '__module__' : 'object_detection.protos.post_processing_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.PostProcessing) }) _sym_db.RegisterMessage(PostProcessing) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/post_processing_pb2.py
post_processing_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/region_similarity_calculator.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/region_similarity_calculator.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n:object_detection/protos/region_similarity_calculator.proto\x12\x17object_detection.protos\"\xde\x02\n\x1aRegionSimilarityCalculator\x12N\n\x16neg_sq_dist_similarity\x18\x01 \x01(\x0b\x32,.object_detection.protos.NegSqDistSimilarityH\x00\x12@\n\x0eiou_similarity\x18\x02 \x01(\x0b\x32&.object_detection.protos.IouSimilarityH\x00\x12@\n\x0eioa_similarity\x18\x03 \x01(\x0b\x32&.object_detection.protos.IoaSimilarityH\x00\x12W\n\x1athresholded_iou_similarity\x18\x04 \x01(\x0b\x32\x31.object_detection.protos.ThresholdedIouSimilarityH\x00\x42\x13\n\x11region_similarity\"\x15\n\x13NegSqDistSimilarity\"\x0f\n\rIouSimilarity\"\x0f\n\rIoaSimilarity\"6\n\x18ThresholdedIouSimilarity\x12\x1a\n\riou_threshold\x18\x01 \x01(\x02:\x03\x30.5' ) _REGIONSIMILARITYCALCULATOR = _descriptor.Descriptor( name='RegionSimilarityCalculator', full_name='object_detection.protos.RegionSimilarityCalculator', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='neg_sq_dist_similarity', full_name='object_detection.protos.RegionSimilarityCalculator.neg_sq_dist_similarity', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='iou_similarity', full_name='object_detection.protos.RegionSimilarityCalculator.iou_similarity', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ioa_similarity', full_name='object_detection.protos.RegionSimilarityCalculator.ioa_similarity', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='thresholded_iou_similarity', full_name='object_detection.protos.RegionSimilarityCalculator.thresholded_iou_similarity', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='region_similarity', full_name='object_detection.protos.RegionSimilarityCalculator.region_similarity', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=88, serialized_end=438, ) _NEGSQDISTSIMILARITY = _descriptor.Descriptor( name='NegSqDistSimilarity', full_name='object_detection.protos.NegSqDistSimilarity', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=440, serialized_end=461, ) _IOUSIMILARITY = _descriptor.Descriptor( name='IouSimilarity', full_name='object_detection.protos.IouSimilarity', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=463, serialized_end=478, ) _IOASIMILARITY = _descriptor.Descriptor( name='IoaSimilarity', full_name='object_detection.protos.IoaSimilarity', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=480, serialized_end=495, ) _THRESHOLDEDIOUSIMILARITY = _descriptor.Descriptor( name='ThresholdedIouSimilarity', full_name='object_detection.protos.ThresholdedIouSimilarity', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='iou_threshold', full_name='object_detection.protos.ThresholdedIouSimilarity.iou_threshold', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=497, serialized_end=551, ) _REGIONSIMILARITYCALCULATOR.fields_by_name['neg_sq_dist_similarity'].message_type = _NEGSQDISTSIMILARITY _REGIONSIMILARITYCALCULATOR.fields_by_name['iou_similarity'].message_type = _IOUSIMILARITY _REGIONSIMILARITYCALCULATOR.fields_by_name['ioa_similarity'].message_type = _IOASIMILARITY _REGIONSIMILARITYCALCULATOR.fields_by_name['thresholded_iou_similarity'].message_type = _THRESHOLDEDIOUSIMILARITY _REGIONSIMILARITYCALCULATOR.oneofs_by_name['region_similarity'].fields.append( _REGIONSIMILARITYCALCULATOR.fields_by_name['neg_sq_dist_similarity']) _REGIONSIMILARITYCALCULATOR.fields_by_name['neg_sq_dist_similarity'].containing_oneof = _REGIONSIMILARITYCALCULATOR.oneofs_by_name['region_similarity'] _REGIONSIMILARITYCALCULATOR.oneofs_by_name['region_similarity'].fields.append( _REGIONSIMILARITYCALCULATOR.fields_by_name['iou_similarity']) _REGIONSIMILARITYCALCULATOR.fields_by_name['iou_similarity'].containing_oneof = _REGIONSIMILARITYCALCULATOR.oneofs_by_name['region_similarity'] _REGIONSIMILARITYCALCULATOR.oneofs_by_name['region_similarity'].fields.append( _REGIONSIMILARITYCALCULATOR.fields_by_name['ioa_similarity']) _REGIONSIMILARITYCALCULATOR.fields_by_name['ioa_similarity'].containing_oneof = _REGIONSIMILARITYCALCULATOR.oneofs_by_name['region_similarity'] _REGIONSIMILARITYCALCULATOR.oneofs_by_name['region_similarity'].fields.append( _REGIONSIMILARITYCALCULATOR.fields_by_name['thresholded_iou_similarity']) _REGIONSIMILARITYCALCULATOR.fields_by_name['thresholded_iou_similarity'].containing_oneof = _REGIONSIMILARITYCALCULATOR.oneofs_by_name['region_similarity'] DESCRIPTOR.message_types_by_name['RegionSimilarityCalculator'] = _REGIONSIMILARITYCALCULATOR DESCRIPTOR.message_types_by_name['NegSqDistSimilarity'] = _NEGSQDISTSIMILARITY DESCRIPTOR.message_types_by_name['IouSimilarity'] = _IOUSIMILARITY DESCRIPTOR.message_types_by_name['IoaSimilarity'] = _IOASIMILARITY DESCRIPTOR.message_types_by_name['ThresholdedIouSimilarity'] = _THRESHOLDEDIOUSIMILARITY _sym_db.RegisterFileDescriptor(DESCRIPTOR) RegionSimilarityCalculator = _reflection.GeneratedProtocolMessageType('RegionSimilarityCalculator', (_message.Message,), { 'DESCRIPTOR' : _REGIONSIMILARITYCALCULATOR, '__module__' : 'object_detection.protos.region_similarity_calculator_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RegionSimilarityCalculator) }) _sym_db.RegisterMessage(RegionSimilarityCalculator) NegSqDistSimilarity = _reflection.GeneratedProtocolMessageType('NegSqDistSimilarity', (_message.Message,), { 'DESCRIPTOR' : _NEGSQDISTSIMILARITY, '__module__' : 'object_detection.protos.region_similarity_calculator_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.NegSqDistSimilarity) }) _sym_db.RegisterMessage(NegSqDistSimilarity) IouSimilarity = _reflection.GeneratedProtocolMessageType('IouSimilarity', (_message.Message,), { 'DESCRIPTOR' : _IOUSIMILARITY, '__module__' : 'object_detection.protos.region_similarity_calculator_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.IouSimilarity) }) _sym_db.RegisterMessage(IouSimilarity) IoaSimilarity = _reflection.GeneratedProtocolMessageType('IoaSimilarity', (_message.Message,), { 'DESCRIPTOR' : _IOASIMILARITY, '__module__' : 'object_detection.protos.region_similarity_calculator_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.IoaSimilarity) }) _sym_db.RegisterMessage(IoaSimilarity) ThresholdedIouSimilarity = _reflection.GeneratedProtocolMessageType('ThresholdedIouSimilarity', (_message.Message,), { 'DESCRIPTOR' : _THRESHOLDEDIOUSIMILARITY, '__module__' : 'object_detection.protos.region_similarity_calculator_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ThresholdedIouSimilarity) }) _sym_db.RegisterMessage(ThresholdedIouSimilarity) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/region_similarity_calculator_pb2.py
region_similarity_calculator_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/faster_rcnn_box_coder.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/faster_rcnn_box_coder.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n3object_detection/protos/faster_rcnn_box_coder.proto\x12\x17object_detection.protos\"o\n\x12\x46\x61sterRcnnBoxCoder\x12\x13\n\x07y_scale\x18\x01 \x01(\x02:\x02\x31\x30\x12\x13\n\x07x_scale\x18\x02 \x01(\x02:\x02\x31\x30\x12\x17\n\x0cheight_scale\x18\x03 \x01(\x02:\x01\x35\x12\x16\n\x0bwidth_scale\x18\x04 \x01(\x02:\x01\x35' ) _FASTERRCNNBOXCODER = _descriptor.Descriptor( name='FasterRcnnBoxCoder', full_name='object_detection.protos.FasterRcnnBoxCoder', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='y_scale', full_name='object_detection.protos.FasterRcnnBoxCoder.y_scale', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(10), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='x_scale', full_name='object_detection.protos.FasterRcnnBoxCoder.x_scale', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(10), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='height_scale', full_name='object_detection.protos.FasterRcnnBoxCoder.height_scale', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width_scale', full_name='object_detection.protos.FasterRcnnBoxCoder.width_scale', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=80, serialized_end=191, ) DESCRIPTOR.message_types_by_name['FasterRcnnBoxCoder'] = _FASTERRCNNBOXCODER _sym_db.RegisterFileDescriptor(DESCRIPTOR) FasterRcnnBoxCoder = _reflection.GeneratedProtocolMessageType('FasterRcnnBoxCoder', (_message.Message,), { 'DESCRIPTOR' : _FASTERRCNNBOXCODER, '__module__' : 'object_detection.protos.faster_rcnn_box_coder_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.FasterRcnnBoxCoder) }) _sym_db.RegisterMessage(FasterRcnnBoxCoder) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/faster_rcnn_box_coder_pb2.py
faster_rcnn_box_coder_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/bipartite_matcher.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/bipartite_matcher.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n/object_detection/protos/bipartite_matcher.proto\x12\x17object_detection.protos\"4\n\x10\x42ipartiteMatcher\x12 \n\x11use_matmul_gather\x18\x06 \x01(\x08:\x05\x66\x61lse' ) _BIPARTITEMATCHER = _descriptor.Descriptor( name='BipartiteMatcher', full_name='object_detection.protos.BipartiteMatcher', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='use_matmul_gather', full_name='object_detection.protos.BipartiteMatcher.use_matmul_gather', index=0, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=76, serialized_end=128, ) DESCRIPTOR.message_types_by_name['BipartiteMatcher'] = _BIPARTITEMATCHER _sym_db.RegisterFileDescriptor(DESCRIPTOR) BipartiteMatcher = _reflection.GeneratedProtocolMessageType('BipartiteMatcher', (_message.Message,), { 'DESCRIPTOR' : _BIPARTITEMATCHER, '__module__' : 'object_detection.protos.bipartite_matcher_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.BipartiteMatcher) }) _sym_db.RegisterMessage(BipartiteMatcher) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/bipartite_matcher_pb2.py
bipartite_matcher_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/string_int_label_map.proto """Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/string_int_label_map.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n2object_detection/protos/string_int_label_map.proto\x12\x17object_detection.protos\"\xc1\x02\n\x15StringIntLabelMapItem\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12M\n\tkeypoints\x18\x04 \x03(\x0b\x32:.object_detection.protos.StringIntLabelMapItem.KeypointMap\x12\x14\n\x0c\x61ncestor_ids\x18\x05 \x03(\x05\x12\x16\n\x0e\x64\x65scendant_ids\x18\x06 \x03(\x05\x12\x39\n\tfrequency\x18\x07 \x01(\x0e\x32&.object_detection.protos.LVISFrequency\x12\x16\n\x0einstance_count\x18\x08 \x01(\x05\x1a(\n\x0bKeypointMap\x12\n\n\x02id\x18\x01 \x01(\x05\x12\r\n\x05label\x18\x02 \x01(\t\"Q\n\x11StringIntLabelMap\x12<\n\x04item\x18\x01 \x03(\x0b\x32..object_detection.protos.StringIntLabelMapItem*D\n\rLVISFrequency\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0c\n\x08\x46REQUENT\x10\x01\x12\n\n\x06\x43OMMON\x10\x02\x12\x08\n\x04RARE\x10\x03' ) _LVISFREQUENCY = _descriptor.EnumDescriptor( name='LVISFrequency', full_name='object_detection.protos.LVISFrequency', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='FREQUENT', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='COMMON', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='RARE', index=3, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=486, serialized_end=554, ) _sym_db.RegisterEnumDescriptor(_LVISFREQUENCY) LVISFrequency = enum_type_wrapper.EnumTypeWrapper(_LVISFREQUENCY) UNSPECIFIED = 0 FREQUENT = 1 COMMON = 2 RARE = 3 _STRINGINTLABELMAPITEM_KEYPOINTMAP = _descriptor.Descriptor( name='KeypointMap', full_name='object_detection.protos.StringIntLabelMapItem.KeypointMap', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='id', full_name='object_detection.protos.StringIntLabelMapItem.KeypointMap.id', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='label', full_name='object_detection.protos.StringIntLabelMapItem.KeypointMap.label', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=361, serialized_end=401, ) _STRINGINTLABELMAPITEM = _descriptor.Descriptor( name='StringIntLabelMapItem', full_name='object_detection.protos.StringIntLabelMapItem', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='name', full_name='object_detection.protos.StringIntLabelMapItem.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='id', full_name='object_detection.protos.StringIntLabelMapItem.id', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='display_name', full_name='object_detection.protos.StringIntLabelMapItem.display_name', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoints', full_name='object_detection.protos.StringIntLabelMapItem.keypoints', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ancestor_ids', full_name='object_detection.protos.StringIntLabelMapItem.ancestor_ids', index=4, number=5, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='descendant_ids', full_name='object_detection.protos.StringIntLabelMapItem.descendant_ids', index=5, number=6, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='frequency', full_name='object_detection.protos.StringIntLabelMapItem.frequency', index=6, number=7, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='instance_count', full_name='object_detection.protos.StringIntLabelMapItem.instance_count', index=7, number=8, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_STRINGINTLABELMAPITEM_KEYPOINTMAP, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=80, serialized_end=401, ) _STRINGINTLABELMAP = _descriptor.Descriptor( name='StringIntLabelMap', full_name='object_detection.protos.StringIntLabelMap', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='item', full_name='object_detection.protos.StringIntLabelMap.item', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=403, serialized_end=484, ) _STRINGINTLABELMAPITEM_KEYPOINTMAP.containing_type = _STRINGINTLABELMAPITEM _STRINGINTLABELMAPITEM.fields_by_name['keypoints'].message_type = _STRINGINTLABELMAPITEM_KEYPOINTMAP _STRINGINTLABELMAPITEM.fields_by_name['frequency'].enum_type = _LVISFREQUENCY _STRINGINTLABELMAP.fields_by_name['item'].message_type = _STRINGINTLABELMAPITEM DESCRIPTOR.message_types_by_name['StringIntLabelMapItem'] = _STRINGINTLABELMAPITEM DESCRIPTOR.message_types_by_name['StringIntLabelMap'] = _STRINGINTLABELMAP DESCRIPTOR.enum_types_by_name['LVISFrequency'] = _LVISFREQUENCY _sym_db.RegisterFileDescriptor(DESCRIPTOR) StringIntLabelMapItem = _reflection.GeneratedProtocolMessageType('StringIntLabelMapItem', (_message.Message,), { 'KeypointMap' : _reflection.GeneratedProtocolMessageType('KeypointMap', (_message.Message,), { 'DESCRIPTOR' : _STRINGINTLABELMAPITEM_KEYPOINTMAP, '__module__' : 'object_detection.protos.string_int_label_map_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.StringIntLabelMapItem.KeypointMap) }) , 'DESCRIPTOR' : _STRINGINTLABELMAPITEM, '__module__' : 'object_detection.protos.string_int_label_map_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.StringIntLabelMapItem) }) _sym_db.RegisterMessage(StringIntLabelMapItem) _sym_db.RegisterMessage(StringIntLabelMapItem.KeypointMap) StringIntLabelMap = _reflection.GeneratedProtocolMessageType('StringIntLabelMap', (_message.Message,), { 'DESCRIPTOR' : _STRINGINTLABELMAP, '__module__' : 'object_detection.protos.string_int_label_map_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.StringIntLabelMap) }) _sym_db.RegisterMessage(StringIntLabelMap) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/string_int_label_map_pb2.py
string_int_label_map_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/ssd_anchor_generator.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/ssd_anchor_generator.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n2object_detection/protos/ssd_anchor_generator.proto\x12\x17object_detection.protos\"\xf2\x02\n\x12SsdAnchorGenerator\x12\x15\n\nnum_layers\x18\x01 \x01(\x05:\x01\x36\x12\x16\n\tmin_scale\x18\x02 \x01(\x02:\x03\x30.2\x12\x17\n\tmax_scale\x18\x03 \x01(\x02:\x04\x30.95\x12\x0e\n\x06scales\x18\x0c \x03(\x02\x12\x15\n\raspect_ratios\x18\x04 \x03(\x02\x12*\n\x1finterpolated_scale_aspect_ratio\x18\r \x01(\x02:\x01\x31\x12*\n\x1creduce_boxes_in_lowest_layer\x18\x05 \x01(\x08:\x04true\x12\x1d\n\x12\x62\x61se_anchor_height\x18\x06 \x01(\x02:\x01\x31\x12\x1c\n\x11\x62\x61se_anchor_width\x18\x07 \x01(\x02:\x01\x31\x12\x15\n\rheight_stride\x18\x08 \x03(\x05\x12\x14\n\x0cwidth_stride\x18\t \x03(\x05\x12\x15\n\rheight_offset\x18\n \x03(\x05\x12\x14\n\x0cwidth_offset\x18\x0b \x03(\x05' ) _SSDANCHORGENERATOR = _descriptor.Descriptor( name='SsdAnchorGenerator', full_name='object_detection.protos.SsdAnchorGenerator', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='num_layers', full_name='object_detection.protos.SsdAnchorGenerator.num_layers', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=6, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_scale', full_name='object_detection.protos.SsdAnchorGenerator.min_scale', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.2), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_scale', full_name='object_detection.protos.SsdAnchorGenerator.max_scale', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.95), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scales', full_name='object_detection.protos.SsdAnchorGenerator.scales', index=3, number=12, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='aspect_ratios', full_name='object_detection.protos.SsdAnchorGenerator.aspect_ratios', index=4, number=4, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='interpolated_scale_aspect_ratio', full_name='object_detection.protos.SsdAnchorGenerator.interpolated_scale_aspect_ratio', index=5, number=13, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='reduce_boxes_in_lowest_layer', full_name='object_detection.protos.SsdAnchorGenerator.reduce_boxes_in_lowest_layer', index=6, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='base_anchor_height', full_name='object_detection.protos.SsdAnchorGenerator.base_anchor_height', index=7, number=6, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='base_anchor_width', full_name='object_detection.protos.SsdAnchorGenerator.base_anchor_width', index=8, number=7, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='height_stride', full_name='object_detection.protos.SsdAnchorGenerator.height_stride', index=9, number=8, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width_stride', full_name='object_detection.protos.SsdAnchorGenerator.width_stride', index=10, number=9, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='height_offset', full_name='object_detection.protos.SsdAnchorGenerator.height_offset', index=11, number=10, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width_offset', full_name='object_detection.protos.SsdAnchorGenerator.width_offset', index=12, number=11, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=80, serialized_end=450, ) DESCRIPTOR.message_types_by_name['SsdAnchorGenerator'] = _SSDANCHORGENERATOR _sym_db.RegisterFileDescriptor(DESCRIPTOR) SsdAnchorGenerator = _reflection.GeneratedProtocolMessageType('SsdAnchorGenerator', (_message.Message,), { 'DESCRIPTOR' : _SSDANCHORGENERATOR, '__module__' : 'object_detection.protos.ssd_anchor_generator_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SsdAnchorGenerator) }) _sym_db.RegisterMessage(SsdAnchorGenerator) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/ssd_anchor_generator_pb2.py
ssd_anchor_generator_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/image_resizer.proto """Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/image_resizer.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n+object_detection/protos/image_resizer.proto\x12\x17object_detection.protos\"\xb5\x03\n\x0cImageResizer\x12T\n\x19keep_aspect_ratio_resizer\x18\x01 \x01(\x0b\x32/.object_detection.protos.KeepAspectRatioResizerH\x00\x12I\n\x13\x66ixed_shape_resizer\x18\x02 \x01(\x0b\x32*.object_detection.protos.FixedShapeResizerH\x00\x12\x44\n\x10identity_resizer\x18\x03 \x01(\x0b\x32(.object_detection.protos.IdentityResizerH\x00\x12U\n\x19\x63onditional_shape_resizer\x18\x04 \x01(\x0b\x32\x30.object_detection.protos.ConditionalShapeResizerH\x00\x12P\n\x17pad_to_multiple_resizer\x18\x05 \x01(\x0b\x32-.object_detection.protos.PadToMultipleResizerH\x00\x42\x15\n\x13image_resizer_oneof\"\x11\n\x0fIdentityResizer\"\x80\x02\n\x16KeepAspectRatioResizer\x12\x1a\n\rmin_dimension\x18\x01 \x01(\x05:\x03\x36\x30\x30\x12\x1b\n\rmax_dimension\x18\x02 \x01(\x05:\x04\x31\x30\x32\x34\x12\x44\n\rresize_method\x18\x03 \x01(\x0e\x32#.object_detection.protos.ResizeType:\x08\x42ILINEAR\x12#\n\x14pad_to_max_dimension\x18\x04 \x01(\x08:\x05\x66\x61lse\x12#\n\x14\x63onvert_to_grayscale\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1d\n\x15per_channel_pad_value\x18\x06 \x03(\x02\"\xa7\x01\n\x11\x46ixedShapeResizer\x12\x13\n\x06height\x18\x01 \x01(\x05:\x03\x33\x30\x30\x12\x12\n\x05width\x18\x02 \x01(\x05:\x03\x33\x30\x30\x12\x44\n\rresize_method\x18\x03 \x01(\x0e\x32#.object_detection.protos.ResizeType:\x08\x42ILINEAR\x12#\n\x14\x63onvert_to_grayscale\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xb9\x02\n\x17\x43onditionalShapeResizer\x12\\\n\tcondition\x18\x01 \x01(\x0e\x32@.object_detection.protos.ConditionalShapeResizer.ResizeCondition:\x07GREATER\x12\x1b\n\x0esize_threshold\x18\x02 \x01(\x05:\x03\x33\x30\x30\x12\x44\n\rresize_method\x18\x03 \x01(\x0e\x32#.object_detection.protos.ResizeType:\x08\x42ILINEAR\x12#\n\x14\x63onvert_to_grayscale\x18\x04 \x01(\x08:\x05\x66\x61lse\"8\n\x0fResizeCondition\x12\x0b\n\x07INVALID\x10\x00\x12\x0b\n\x07GREATER\x10\x01\x12\x0b\n\x07SMALLER\x10\x02\"P\n\x14PadToMultipleResizer\x12\x13\n\x08multiple\x18\x01 \x01(\x05:\x01\x31\x12#\n\x14\x63onvert_to_grayscale\x18\x04 \x01(\x08:\x05\x66\x61lse*G\n\nResizeType\x12\x0c\n\x08\x42ILINEAR\x10\x00\x12\x14\n\x10NEAREST_NEIGHBOR\x10\x01\x12\x0b\n\x07\x42ICUBIC\x10\x02\x12\x08\n\x04\x41REA\x10\x03' ) _RESIZETYPE = _descriptor.EnumDescriptor( name='ResizeType', full_name='object_detection.protos.ResizeType', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='BILINEAR', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='NEAREST_NEIGHBOR', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BICUBIC', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='AREA', index=3, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1358, serialized_end=1429, ) _sym_db.RegisterEnumDescriptor(_RESIZETYPE) ResizeType = enum_type_wrapper.EnumTypeWrapper(_RESIZETYPE) BILINEAR = 0 NEAREST_NEIGHBOR = 1 BICUBIC = 2 AREA = 3 _CONDITIONALSHAPERESIZER_RESIZECONDITION = _descriptor.EnumDescriptor( name='ResizeCondition', full_name='object_detection.protos.ConditionalShapeResizer.ResizeCondition', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='INVALID', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='GREATER', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SMALLER', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1218, serialized_end=1274, ) _sym_db.RegisterEnumDescriptor(_CONDITIONALSHAPERESIZER_RESIZECONDITION) _IMAGERESIZER = _descriptor.Descriptor( name='ImageResizer', full_name='object_detection.protos.ImageResizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='keep_aspect_ratio_resizer', full_name='object_detection.protos.ImageResizer.keep_aspect_ratio_resizer', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='fixed_shape_resizer', full_name='object_detection.protos.ImageResizer.fixed_shape_resizer', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='identity_resizer', full_name='object_detection.protos.ImageResizer.identity_resizer', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='conditional_shape_resizer', full_name='object_detection.protos.ImageResizer.conditional_shape_resizer', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_to_multiple_resizer', full_name='object_detection.protos.ImageResizer.pad_to_multiple_resizer', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='image_resizer_oneof', full_name='object_detection.protos.ImageResizer.image_resizer_oneof', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=73, serialized_end=510, ) _IDENTITYRESIZER = _descriptor.Descriptor( name='IdentityResizer', full_name='object_detection.protos.IdentityResizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=512, serialized_end=529, ) _KEEPASPECTRATIORESIZER = _descriptor.Descriptor( name='KeepAspectRatioResizer', full_name='object_detection.protos.KeepAspectRatioResizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_dimension', full_name='object_detection.protos.KeepAspectRatioResizer.min_dimension', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=600, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_dimension', full_name='object_detection.protos.KeepAspectRatioResizer.max_dimension', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1024, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='resize_method', full_name='object_detection.protos.KeepAspectRatioResizer.resize_method', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_to_max_dimension', full_name='object_detection.protos.KeepAspectRatioResizer.pad_to_max_dimension', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='convert_to_grayscale', full_name='object_detection.protos.KeepAspectRatioResizer.convert_to_grayscale', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='per_channel_pad_value', full_name='object_detection.protos.KeepAspectRatioResizer.per_channel_pad_value', index=5, number=6, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=532, serialized_end=788, ) _FIXEDSHAPERESIZER = _descriptor.Descriptor( name='FixedShapeResizer', full_name='object_detection.protos.FixedShapeResizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='height', full_name='object_detection.protos.FixedShapeResizer.height', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=300, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width', full_name='object_detection.protos.FixedShapeResizer.width', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=300, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='resize_method', full_name='object_detection.protos.FixedShapeResizer.resize_method', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='convert_to_grayscale', full_name='object_detection.protos.FixedShapeResizer.convert_to_grayscale', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=791, serialized_end=958, ) _CONDITIONALSHAPERESIZER = _descriptor.Descriptor( name='ConditionalShapeResizer', full_name='object_detection.protos.ConditionalShapeResizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='condition', full_name='object_detection.protos.ConditionalShapeResizer.condition', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='size_threshold', full_name='object_detection.protos.ConditionalShapeResizer.size_threshold', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=300, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='resize_method', full_name='object_detection.protos.ConditionalShapeResizer.resize_method', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='convert_to_grayscale', full_name='object_detection.protos.ConditionalShapeResizer.convert_to_grayscale', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _CONDITIONALSHAPERESIZER_RESIZECONDITION, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=961, serialized_end=1274, ) _PADTOMULTIPLERESIZER = _descriptor.Descriptor( name='PadToMultipleResizer', full_name='object_detection.protos.PadToMultipleResizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='multiple', full_name='object_detection.protos.PadToMultipleResizer.multiple', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='convert_to_grayscale', full_name='object_detection.protos.PadToMultipleResizer.convert_to_grayscale', index=1, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1276, serialized_end=1356, ) _IMAGERESIZER.fields_by_name['keep_aspect_ratio_resizer'].message_type = _KEEPASPECTRATIORESIZER _IMAGERESIZER.fields_by_name['fixed_shape_resizer'].message_type = _FIXEDSHAPERESIZER _IMAGERESIZER.fields_by_name['identity_resizer'].message_type = _IDENTITYRESIZER _IMAGERESIZER.fields_by_name['conditional_shape_resizer'].message_type = _CONDITIONALSHAPERESIZER _IMAGERESIZER.fields_by_name['pad_to_multiple_resizer'].message_type = _PADTOMULTIPLERESIZER _IMAGERESIZER.oneofs_by_name['image_resizer_oneof'].fields.append( _IMAGERESIZER.fields_by_name['keep_aspect_ratio_resizer']) _IMAGERESIZER.fields_by_name['keep_aspect_ratio_resizer'].containing_oneof = _IMAGERESIZER.oneofs_by_name['image_resizer_oneof'] _IMAGERESIZER.oneofs_by_name['image_resizer_oneof'].fields.append( _IMAGERESIZER.fields_by_name['fixed_shape_resizer']) _IMAGERESIZER.fields_by_name['fixed_shape_resizer'].containing_oneof = _IMAGERESIZER.oneofs_by_name['image_resizer_oneof'] _IMAGERESIZER.oneofs_by_name['image_resizer_oneof'].fields.append( _IMAGERESIZER.fields_by_name['identity_resizer']) _IMAGERESIZER.fields_by_name['identity_resizer'].containing_oneof = _IMAGERESIZER.oneofs_by_name['image_resizer_oneof'] _IMAGERESIZER.oneofs_by_name['image_resizer_oneof'].fields.append( _IMAGERESIZER.fields_by_name['conditional_shape_resizer']) _IMAGERESIZER.fields_by_name['conditional_shape_resizer'].containing_oneof = _IMAGERESIZER.oneofs_by_name['image_resizer_oneof'] _IMAGERESIZER.oneofs_by_name['image_resizer_oneof'].fields.append( _IMAGERESIZER.fields_by_name['pad_to_multiple_resizer']) _IMAGERESIZER.fields_by_name['pad_to_multiple_resizer'].containing_oneof = _IMAGERESIZER.oneofs_by_name['image_resizer_oneof'] _KEEPASPECTRATIORESIZER.fields_by_name['resize_method'].enum_type = _RESIZETYPE _FIXEDSHAPERESIZER.fields_by_name['resize_method'].enum_type = _RESIZETYPE _CONDITIONALSHAPERESIZER.fields_by_name['condition'].enum_type = _CONDITIONALSHAPERESIZER_RESIZECONDITION _CONDITIONALSHAPERESIZER.fields_by_name['resize_method'].enum_type = _RESIZETYPE _CONDITIONALSHAPERESIZER_RESIZECONDITION.containing_type = _CONDITIONALSHAPERESIZER DESCRIPTOR.message_types_by_name['ImageResizer'] = _IMAGERESIZER DESCRIPTOR.message_types_by_name['IdentityResizer'] = _IDENTITYRESIZER DESCRIPTOR.message_types_by_name['KeepAspectRatioResizer'] = _KEEPASPECTRATIORESIZER DESCRIPTOR.message_types_by_name['FixedShapeResizer'] = _FIXEDSHAPERESIZER DESCRIPTOR.message_types_by_name['ConditionalShapeResizer'] = _CONDITIONALSHAPERESIZER DESCRIPTOR.message_types_by_name['PadToMultipleResizer'] = _PADTOMULTIPLERESIZER DESCRIPTOR.enum_types_by_name['ResizeType'] = _RESIZETYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) ImageResizer = _reflection.GeneratedProtocolMessageType('ImageResizer', (_message.Message,), { 'DESCRIPTOR' : _IMAGERESIZER, '__module__' : 'object_detection.protos.image_resizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ImageResizer) }) _sym_db.RegisterMessage(ImageResizer) IdentityResizer = _reflection.GeneratedProtocolMessageType('IdentityResizer', (_message.Message,), { 'DESCRIPTOR' : _IDENTITYRESIZER, '__module__' : 'object_detection.protos.image_resizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.IdentityResizer) }) _sym_db.RegisterMessage(IdentityResizer) KeepAspectRatioResizer = _reflection.GeneratedProtocolMessageType('KeepAspectRatioResizer', (_message.Message,), { 'DESCRIPTOR' : _KEEPASPECTRATIORESIZER, '__module__' : 'object_detection.protos.image_resizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.KeepAspectRatioResizer) }) _sym_db.RegisterMessage(KeepAspectRatioResizer) FixedShapeResizer = _reflection.GeneratedProtocolMessageType('FixedShapeResizer', (_message.Message,), { 'DESCRIPTOR' : _FIXEDSHAPERESIZER, '__module__' : 'object_detection.protos.image_resizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.FixedShapeResizer) }) _sym_db.RegisterMessage(FixedShapeResizer) ConditionalShapeResizer = _reflection.GeneratedProtocolMessageType('ConditionalShapeResizer', (_message.Message,), { 'DESCRIPTOR' : _CONDITIONALSHAPERESIZER, '__module__' : 'object_detection.protos.image_resizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ConditionalShapeResizer) }) _sym_db.RegisterMessage(ConditionalShapeResizer) PadToMultipleResizer = _reflection.GeneratedProtocolMessageType('PadToMultipleResizer', (_message.Message,), { 'DESCRIPTOR' : _PADTOMULTIPLERESIZER, '__module__' : 'object_detection.protos.image_resizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.PadToMultipleResizer) }) _sym_db.RegisterMessage(PadToMultipleResizer) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/image_resizer_pb2.py
image_resizer_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/matcher.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from object_detection.protos import argmax_matcher_pb2 as object__detection_dot_protos_dot_argmax__matcher__pb2 from object_detection.protos import bipartite_matcher_pb2 as object__detection_dot_protos_dot_bipartite__matcher__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/matcher.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n%object_detection/protos/matcher.proto\x12\x17object_detection.protos\x1a,object_detection/protos/argmax_matcher.proto\x1a/object_detection/protos/bipartite_matcher.proto\"\xa4\x01\n\x07Matcher\x12@\n\x0e\x61rgmax_matcher\x18\x01 \x01(\x0b\x32&.object_detection.protos.ArgMaxMatcherH\x00\x12\x46\n\x11\x62ipartite_matcher\x18\x02 \x01(\x0b\x32).object_detection.protos.BipartiteMatcherH\x00\x42\x0f\n\rmatcher_oneof' , dependencies=[object__detection_dot_protos_dot_argmax__matcher__pb2.DESCRIPTOR,object__detection_dot_protos_dot_bipartite__matcher__pb2.DESCRIPTOR,]) _MATCHER = _descriptor.Descriptor( name='Matcher', full_name='object_detection.protos.Matcher', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='argmax_matcher', full_name='object_detection.protos.Matcher.argmax_matcher', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bipartite_matcher', full_name='object_detection.protos.Matcher.bipartite_matcher', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='matcher_oneof', full_name='object_detection.protos.Matcher.matcher_oneof', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=162, serialized_end=326, ) _MATCHER.fields_by_name['argmax_matcher'].message_type = object__detection_dot_protos_dot_argmax__matcher__pb2._ARGMAXMATCHER _MATCHER.fields_by_name['bipartite_matcher'].message_type = object__detection_dot_protos_dot_bipartite__matcher__pb2._BIPARTITEMATCHER _MATCHER.oneofs_by_name['matcher_oneof'].fields.append( _MATCHER.fields_by_name['argmax_matcher']) _MATCHER.fields_by_name['argmax_matcher'].containing_oneof = _MATCHER.oneofs_by_name['matcher_oneof'] _MATCHER.oneofs_by_name['matcher_oneof'].fields.append( _MATCHER.fields_by_name['bipartite_matcher']) _MATCHER.fields_by_name['bipartite_matcher'].containing_oneof = _MATCHER.oneofs_by_name['matcher_oneof'] DESCRIPTOR.message_types_by_name['Matcher'] = _MATCHER _sym_db.RegisterFileDescriptor(DESCRIPTOR) Matcher = _reflection.GeneratedProtocolMessageType('Matcher', (_message.Message,), { 'DESCRIPTOR' : _MATCHER, '__module__' : 'object_detection.protos.matcher_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.Matcher) }) _sym_db.RegisterMessage(Matcher) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/matcher_pb2.py
matcher_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/fpn.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/fpn.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n!object_detection/protos/fpn.proto\x12\x17object_detection.protos\"i\n\x16\x46\x65\x61turePyramidNetworks\x12\x14\n\tmin_level\x18\x01 \x01(\x05:\x01\x33\x12\x14\n\tmax_level\x18\x02 \x01(\x05:\x01\x37\x12#\n\x16\x61\x64\x64itional_layer_depth\x18\x03 \x01(\x05:\x03\x32\x35\x36\"\xa6\x01\n#BidirectionalFeaturePyramidNetworks\x12\x14\n\tmin_level\x18\x01 \x01(\x05:\x01\x33\x12\x14\n\tmax_level\x18\x02 \x01(\x05:\x01\x37\x12\x16\n\x0enum_iterations\x18\x03 \x01(\x05\x12\x13\n\x0bnum_filters\x18\x04 \x01(\x05\x12&\n\x0e\x63ombine_method\x18\x05 \x01(\t:\x0e\x66\x61st_attention' ) _FEATUREPYRAMIDNETWORKS = _descriptor.Descriptor( name='FeaturePyramidNetworks', full_name='object_detection.protos.FeaturePyramidNetworks', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_level', full_name='object_detection.protos.FeaturePyramidNetworks.min_level', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=3, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_level', full_name='object_detection.protos.FeaturePyramidNetworks.max_level', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=7, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='additional_layer_depth', full_name='object_detection.protos.FeaturePyramidNetworks.additional_layer_depth', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=256, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=62, serialized_end=167, ) _BIDIRECTIONALFEATUREPYRAMIDNETWORKS = _descriptor.Descriptor( name='BidirectionalFeaturePyramidNetworks', full_name='object_detection.protos.BidirectionalFeaturePyramidNetworks', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_level', full_name='object_detection.protos.BidirectionalFeaturePyramidNetworks.min_level', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=3, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_level', full_name='object_detection.protos.BidirectionalFeaturePyramidNetworks.max_level', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=7, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_iterations', full_name='object_detection.protos.BidirectionalFeaturePyramidNetworks.num_iterations', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_filters', full_name='object_detection.protos.BidirectionalFeaturePyramidNetworks.num_filters', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='combine_method', full_name='object_detection.protos.BidirectionalFeaturePyramidNetworks.combine_method', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"fast_attention".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=170, serialized_end=336, ) DESCRIPTOR.message_types_by_name['FeaturePyramidNetworks'] = _FEATUREPYRAMIDNETWORKS DESCRIPTOR.message_types_by_name['BidirectionalFeaturePyramidNetworks'] = _BIDIRECTIONALFEATUREPYRAMIDNETWORKS _sym_db.RegisterFileDescriptor(DESCRIPTOR) FeaturePyramidNetworks = _reflection.GeneratedProtocolMessageType('FeaturePyramidNetworks', (_message.Message,), { 'DESCRIPTOR' : _FEATUREPYRAMIDNETWORKS, '__module__' : 'object_detection.protos.fpn_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.FeaturePyramidNetworks) }) _sym_db.RegisterMessage(FeaturePyramidNetworks) BidirectionalFeaturePyramidNetworks = _reflection.GeneratedProtocolMessageType('BidirectionalFeaturePyramidNetworks', (_message.Message,), { 'DESCRIPTOR' : _BIDIRECTIONALFEATUREPYRAMIDNETWORKS, '__module__' : 'object_detection.protos.fpn_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.BidirectionalFeaturePyramidNetworks) }) _sym_db.RegisterMessage(BidirectionalFeaturePyramidNetworks) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/fpn_pb2.py
fpn_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/train.proto """Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from object_detection.protos import optimizer_pb2 as object__detection_dot_protos_dot_optimizer__pb2 from object_detection.protos import preprocessor_pb2 as object__detection_dot_protos_dot_preprocessor__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/train.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n#object_detection/protos/train.proto\x12\x17object_detection.protos\x1a\'object_detection/protos/optimizer.proto\x1a*object_detection/protos/preprocessor.proto\"\xb1\t\n\x0bTrainConfig\x12\x16\n\nbatch_size\x18\x01 \x01(\r:\x02\x33\x32\x12M\n\x19\x64\x61ta_augmentation_options\x18\x02 \x03(\x0b\x32*.object_detection.protos.PreprocessingStep\x12\x1c\n\rsync_replicas\x18\x03 \x01(\x08:\x05\x66\x61lse\x12,\n\x1dkeep_checkpoint_every_n_hours\x18\x04 \x01(\x02:\x05\x31\x30\x30\x30\x30\x12\x35\n\toptimizer\x18\x05 \x01(\x0b\x32\".object_detection.protos.Optimizer\x12$\n\x19gradient_clipping_by_norm\x18\x06 \x01(\x02:\x01\x30\x12\x1e\n\x14\x66ine_tune_checkpoint\x18\x07 \x01(\t:\x00\x12#\n\x19\x66ine_tune_checkpoint_type\x18\x16 \x01(\t:\x00\x12T\n\x1c\x66ine_tune_checkpoint_version\x18\x1c \x01(\x0e\x32*.object_detection.protos.CheckpointVersion:\x02V1\x12,\n\x19\x66rom_detection_checkpoint\x18\x08 \x01(\x08:\x05\x66\x61lseB\x02\x18\x01\x12\x31\n\"load_all_detection_checkpoint_vars\x18\x13 \x01(\x08:\x05\x66\x61lse\x12\x38\n*run_fine_tune_checkpoint_dummy_computation\x18\x1e \x01(\x08:\x04true\x12\x14\n\tnum_steps\x18\t \x01(\r:\x01\x30\x12\x1f\n\x13startup_delay_steps\x18\n \x01(\x02:\x02\x31\x35\x12\x1f\n\x14\x62ias_grad_multiplier\x18\x0b \x01(\x02:\x01\x30\x12\"\n\x1aupdate_trainable_variables\x18\x19 \x03(\t\x12\x18\n\x10\x66reeze_variables\x18\x0c \x03(\t\x12 \n\x15replicas_to_aggregate\x18\r \x01(\x05:\x01\x31\x12%\n\x14\x62\x61tch_queue_capacity\x18\x0e \x01(\x05:\x03\x31\x35\x30\x42\x02\x18\x01\x12&\n\x17num_batch_queue_threads\x18\x0f \x01(\x05:\x01\x38\x42\x02\x18\x01\x12&\n\x17prefetch_queue_capacity\x18\x10 \x01(\x05:\x01\x35\x42\x02\x18\x01\x12)\n\x1amerge_multiple_label_boxes\x18\x11 \x01(\x08:\x05\x66\x61lse\x12$\n\x15use_multiclass_scores\x18\x18 \x01(\x08:\x05\x66\x61lse\x12%\n\x17\x61\x64\x64_regularization_loss\x18\x12 \x01(\x08:\x04true\x12$\n\x13max_number_of_boxes\x18\x14 \x01(\x05:\x03\x31\x30\x30\x42\x02\x18\x01\x12\'\n\x19unpad_groundtruth_tensors\x18\x15 \x01(\x08:\x04true\x12%\n\x16retain_original_images\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0cuse_bfloat16\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\"\n\x13summarize_gradients\x18\x1b \x01(\x08:\x05\x66\x61lse*0\n\x11\x43heckpointVersion\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02V1\x10\x01\x12\x06\n\x02V2\x10\x02' , dependencies=[object__detection_dot_protos_dot_optimizer__pb2.DESCRIPTOR,object__detection_dot_protos_dot_preprocessor__pb2.DESCRIPTOR,]) _CHECKPOINTVERSION = _descriptor.EnumDescriptor( name='CheckpointVersion', full_name='object_detection.protos.CheckpointVersion', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='UNKNOWN', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='V1', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='V2', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1353, serialized_end=1401, ) _sym_db.RegisterEnumDescriptor(_CHECKPOINTVERSION) CheckpointVersion = enum_type_wrapper.EnumTypeWrapper(_CHECKPOINTVERSION) UNKNOWN = 0 V1 = 1 V2 = 2 _TRAINCONFIG = _descriptor.Descriptor( name='TrainConfig', full_name='object_detection.protos.TrainConfig', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='batch_size', full_name='object_detection.protos.TrainConfig.batch_size', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=True, default_value=32, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='data_augmentation_options', full_name='object_detection.protos.TrainConfig.data_augmentation_options', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='sync_replicas', full_name='object_detection.protos.TrainConfig.sync_replicas', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keep_checkpoint_every_n_hours', full_name='object_detection.protos.TrainConfig.keep_checkpoint_every_n_hours', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(10000), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='optimizer', full_name='object_detection.protos.TrainConfig.optimizer', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='gradient_clipping_by_norm', full_name='object_detection.protos.TrainConfig.gradient_clipping_by_norm', index=5, number=6, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='fine_tune_checkpoint', full_name='object_detection.protos.TrainConfig.fine_tune_checkpoint', index=6, number=7, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='fine_tune_checkpoint_type', full_name='object_detection.protos.TrainConfig.fine_tune_checkpoint_type', index=7, number=22, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='fine_tune_checkpoint_version', full_name='object_detection.protos.TrainConfig.fine_tune_checkpoint_version', index=8, number=28, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='from_detection_checkpoint', full_name='object_detection.protos.TrainConfig.from_detection_checkpoint', index=9, number=8, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='load_all_detection_checkpoint_vars', full_name='object_detection.protos.TrainConfig.load_all_detection_checkpoint_vars', index=10, number=19, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='run_fine_tune_checkpoint_dummy_computation', full_name='object_detection.protos.TrainConfig.run_fine_tune_checkpoint_dummy_computation', index=11, number=30, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_steps', full_name='object_detection.protos.TrainConfig.num_steps', index=12, number=9, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='startup_delay_steps', full_name='object_detection.protos.TrainConfig.startup_delay_steps', index=13, number=10, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(15), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bias_grad_multiplier', full_name='object_detection.protos.TrainConfig.bias_grad_multiplier', index=14, number=11, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='update_trainable_variables', full_name='object_detection.protos.TrainConfig.update_trainable_variables', index=15, number=25, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='freeze_variables', full_name='object_detection.protos.TrainConfig.freeze_variables', index=16, number=12, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='replicas_to_aggregate', full_name='object_detection.protos.TrainConfig.replicas_to_aggregate', index=17, number=13, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='batch_queue_capacity', full_name='object_detection.protos.TrainConfig.batch_queue_capacity', index=18, number=14, type=5, cpp_type=1, label=1, has_default_value=True, default_value=150, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_batch_queue_threads', full_name='object_detection.protos.TrainConfig.num_batch_queue_threads', index=19, number=15, type=5, cpp_type=1, label=1, has_default_value=True, default_value=8, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='prefetch_queue_capacity', full_name='object_detection.protos.TrainConfig.prefetch_queue_capacity', index=20, number=16, type=5, cpp_type=1, label=1, has_default_value=True, default_value=5, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='merge_multiple_label_boxes', full_name='object_detection.protos.TrainConfig.merge_multiple_label_boxes', index=21, number=17, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_multiclass_scores', full_name='object_detection.protos.TrainConfig.use_multiclass_scores', index=22, number=24, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='add_regularization_loss', full_name='object_detection.protos.TrainConfig.add_regularization_loss', index=23, number=18, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_number_of_boxes', full_name='object_detection.protos.TrainConfig.max_number_of_boxes', index=24, number=20, type=5, cpp_type=1, label=1, has_default_value=True, default_value=100, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='unpad_groundtruth_tensors', full_name='object_detection.protos.TrainConfig.unpad_groundtruth_tensors', index=25, number=21, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='retain_original_images', full_name='object_detection.protos.TrainConfig.retain_original_images', index=26, number=23, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_bfloat16', full_name='object_detection.protos.TrainConfig.use_bfloat16', index=27, number=26, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='summarize_gradients', full_name='object_detection.protos.TrainConfig.summarize_gradients', index=28, number=27, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=150, serialized_end=1351, ) _TRAINCONFIG.fields_by_name['data_augmentation_options'].message_type = object__detection_dot_protos_dot_preprocessor__pb2._PREPROCESSINGSTEP _TRAINCONFIG.fields_by_name['optimizer'].message_type = object__detection_dot_protos_dot_optimizer__pb2._OPTIMIZER _TRAINCONFIG.fields_by_name['fine_tune_checkpoint_version'].enum_type = _CHECKPOINTVERSION DESCRIPTOR.message_types_by_name['TrainConfig'] = _TRAINCONFIG DESCRIPTOR.enum_types_by_name['CheckpointVersion'] = _CHECKPOINTVERSION _sym_db.RegisterFileDescriptor(DESCRIPTOR) TrainConfig = _reflection.GeneratedProtocolMessageType('TrainConfig', (_message.Message,), { 'DESCRIPTOR' : _TRAINCONFIG, '__module__' : 'object_detection.protos.train_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.TrainConfig) }) _sym_db.RegisterMessage(TrainConfig) _TRAINCONFIG.fields_by_name['from_detection_checkpoint']._options = None _TRAINCONFIG.fields_by_name['batch_queue_capacity']._options = None _TRAINCONFIG.fields_by_name['num_batch_queue_threads']._options = None _TRAINCONFIG.fields_by_name['prefetch_queue_capacity']._options = None _TRAINCONFIG.fields_by_name['max_number_of_boxes']._options = None # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/train_pb2.py
train_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/graph_rewriter.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/graph_rewriter.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n,object_detection/protos/graph_rewriter.proto\x12\x17object_detection.protos\"W\n\rGraphRewriter\x12;\n\x0cquantization\x18\x01 \x01(\x0b\x32%.object_detection.protos.Quantization*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"s\n\x0cQuantization\x12\x15\n\x05\x64\x65lay\x18\x01 \x01(\x05:\x06\x35\x30\x30\x30\x30\x30\x12\x16\n\x0bweight_bits\x18\x02 \x01(\x05:\x01\x38\x12\x1a\n\x0f\x61\x63tivation_bits\x18\x03 \x01(\x05:\x01\x38\x12\x18\n\tsymmetric\x18\x04 \x01(\x08:\x05\x66\x61lse' ) _GRAPHREWRITER = _descriptor.Descriptor( name='GraphRewriter', full_name='object_detection.protos.GraphRewriter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='quantization', full_name='object_detection.protos.GraphRewriter.quantization', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=True, syntax='proto2', extension_ranges=[(1000, 536870912), ], oneofs=[ ], serialized_start=73, serialized_end=160, ) _QUANTIZATION = _descriptor.Descriptor( name='Quantization', full_name='object_detection.protos.Quantization', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='delay', full_name='object_detection.protos.Quantization.delay', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=500000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weight_bits', full_name='object_detection.protos.Quantization.weight_bits', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=8, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='activation_bits', full_name='object_detection.protos.Quantization.activation_bits', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=8, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='symmetric', full_name='object_detection.protos.Quantization.symmetric', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=162, serialized_end=277, ) _GRAPHREWRITER.fields_by_name['quantization'].message_type = _QUANTIZATION DESCRIPTOR.message_types_by_name['GraphRewriter'] = _GRAPHREWRITER DESCRIPTOR.message_types_by_name['Quantization'] = _QUANTIZATION _sym_db.RegisterFileDescriptor(DESCRIPTOR) GraphRewriter = _reflection.GeneratedProtocolMessageType('GraphRewriter', (_message.Message,), { 'DESCRIPTOR' : _GRAPHREWRITER, '__module__' : 'object_detection.protos.graph_rewriter_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.GraphRewriter) }) _sym_db.RegisterMessage(GraphRewriter) Quantization = _reflection.GeneratedProtocolMessageType('Quantization', (_message.Message,), { 'DESCRIPTOR' : _QUANTIZATION, '__module__' : 'object_detection.protos.graph_rewriter_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.Quantization) }) _sym_db.RegisterMessage(Quantization) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/graph_rewriter_pb2.py
graph_rewriter_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/losses.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/losses.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n$object_detection/protos/losses.proto\x12\x17object_detection.protos\"\xfe\x05\n\x04Loss\x12\x44\n\x11localization_loss\x18\x01 \x01(\x0b\x32).object_detection.protos.LocalizationLoss\x12H\n\x13\x63lassification_loss\x18\x02 \x01(\x0b\x32+.object_detection.protos.ClassificationLoss\x12\x45\n\x12hard_example_miner\x18\x03 \x01(\x0b\x32).object_detection.protos.HardExampleMiner\x12 \n\x15\x63lassification_weight\x18\x04 \x01(\x02:\x01\x31\x12\x1e\n\x13localization_weight\x18\x05 \x01(\x02:\x01\x31\x12M\n\x16random_example_sampler\x18\x06 \x01(\x0b\x32-.object_detection.protos.RandomExampleSampler\x12I\n\x11\x65qualization_loss\x18\x07 \x01(\x0b\x32..object_detection.protos.Loss.EqualizationLoss\x12V\n\x15\x65xpected_loss_weights\x18\x12 \x01(\x0e\x32\x31.object_detection.protos.Loss.ExpectedLossWeights:\x04NONE\x12#\n\x18min_num_negative_samples\x18\x13 \x01(\x02:\x01\x30\x12*\n\x1f\x64\x65sired_negative_sampling_ratio\x18\x14 \x01(\x02:\x01\x33\x1a?\n\x10\x45qualizationLoss\x12\x11\n\x06weight\x18\x01 \x01(\x02:\x01\x30\x12\x18\n\x10\x65xclude_prefixes\x18\x02 \x03(\t\"Y\n\x13\x45xpectedLossWeights\x12\x08\n\x04NONE\x10\x00\x12\x15\n\x11\x45XPECTED_SAMPLING\x10\x01\x12!\n\x1dREWEIGHTING_UNMATCHED_ANCHORS\x10\x02\"\xb7\x03\n\x10LocalizationLoss\x12J\n\x0bweighted_l2\x18\x01 \x01(\x0b\x32\x33.object_detection.protos.WeightedL2LocalizationLossH\x00\x12W\n\x12weighted_smooth_l1\x18\x02 \x01(\x0b\x32\x39.object_detection.protos.WeightedSmoothL1LocalizationLossH\x00\x12L\n\x0cweighted_iou\x18\x03 \x01(\x0b\x32\x34.object_detection.protos.WeightedIOULocalizationLossH\x00\x12K\n\x14l1_localization_loss\x18\x04 \x01(\x0b\x32+.object_detection.protos.L1LocalizationLossH\x00\x12N\n\rweighted_giou\x18\x05 \x01(\x0b\x32\x35.object_detection.protos.WeightedGIOULocalizationLossH\x00\x42\x13\n\x11localization_loss\">\n\x1aWeightedL2LocalizationLoss\x12 \n\x11\x61nchorwise_output\x18\x01 \x01(\x08:\x05\x66\x61lse\"V\n WeightedSmoothL1LocalizationLoss\x12 \n\x11\x61nchorwise_output\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05\x64\x65lta\x18\x02 \x01(\x02:\x01\x31\"\x1d\n\x1bWeightedIOULocalizationLoss\"\x14\n\x12L1LocalizationLoss\"\x1e\n\x1cWeightedGIOULocalizationLoss\"\xd1\x05\n\x12\x43lassificationLoss\x12V\n\x10weighted_sigmoid\x18\x01 \x01(\x0b\x32:.object_detection.protos.WeightedSigmoidClassificationLossH\x00\x12V\n\x10weighted_softmax\x18\x02 \x01(\x0b\x32:.object_detection.protos.WeightedSoftmaxClassificationLossH\x00\x12j\n\x17weighted_logits_softmax\x18\x05 \x01(\x0b\x32G.object_detection.protos.WeightedSoftmaxClassificationAgainstLogitsLossH\x00\x12^\n\x14\x62ootstrapped_sigmoid\x18\x03 \x01(\x0b\x32>.object_detection.protos.BootstrappedSigmoidClassificationLossH\x00\x12Y\n\x16weighted_sigmoid_focal\x18\x04 \x01(\x0b\x32\x37.object_detection.protos.SigmoidFocalClassificationLossH\x00\x12g\n#penalty_reduced_logistic_focal_loss\x18\x06 \x01(\x0b\x32\x38.object_detection.protos.PenaltyReducedLogisticFocalLossH\x00\x12\x64\n!weighted_dice_classification_loss\x18\x07 \x01(\x0b\x32\x37.object_detection.protos.WeightedDiceClassificationLossH\x00\x42\x15\n\x13\x63lassification_loss\"E\n!WeightedSigmoidClassificationLoss\x12 \n\x11\x61nchorwise_output\x18\x01 \x01(\x08:\x05\x66\x61lse\"c\n\x1eSigmoidFocalClassificationLoss\x12 \n\x11\x61nchorwise_output\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05gamma\x18\x02 \x01(\x02:\x01\x32\x12\r\n\x05\x61lpha\x18\x03 \x01(\x02\"]\n!WeightedSoftmaxClassificationLoss\x12 \n\x11\x61nchorwise_output\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x16\n\x0blogit_scale\x18\x02 \x01(\x02:\x01\x31\"j\n.WeightedSoftmaxClassificationAgainstLogitsLoss\x12 \n\x11\x61nchorwise_output\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x16\n\x0blogit_scale\x18\x02 \x01(\x02:\x01\x31\"w\n%BootstrappedSigmoidClassificationLoss\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x1d\n\x0ehard_bootstrap\x18\x02 \x01(\x08:\x05\x66\x61lse\x12 \n\x11\x61nchorwise_output\x18\x03 \x01(\x08:\x05\x66\x61lse\">\n\x1fPenaltyReducedLogisticFocalLoss\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x0c\n\x04\x62\x65ta\x18\x02 \x01(\x02\"\xa1\x02\n\x10HardExampleMiner\x12\x1d\n\x11num_hard_examples\x18\x01 \x01(\x05:\x02\x36\x34\x12\x1a\n\riou_threshold\x18\x02 \x01(\x02:\x03\x30.7\x12K\n\tloss_type\x18\x03 \x01(\x0e\x32\x32.object_detection.protos.HardExampleMiner.LossType:\x04\x42OTH\x12%\n\x1amax_negatives_per_positive\x18\x04 \x01(\x05:\x01\x30\x12\"\n\x17min_negatives_per_image\x18\x05 \x01(\x05:\x01\x30\":\n\x08LossType\x12\x08\n\x04\x42OTH\x10\x00\x12\x12\n\x0e\x43LASSIFICATION\x10\x01\x12\x10\n\x0cLOCALIZATION\x10\x02\">\n\x14RandomExampleSampler\x12&\n\x18positive_sample_fraction\x18\x01 \x01(\x02:\x04\x30.01\"F\n\x1eWeightedDiceClassificationLoss\x12$\n\x15squared_normalization\x18\x01 \x01(\x08:\x05\x66\x61lse' ) _LOSS_EXPECTEDLOSSWEIGHTS = _descriptor.EnumDescriptor( name='ExpectedLossWeights', full_name='object_detection.protos.Loss.ExpectedLossWeights', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='NONE', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXPECTED_SAMPLING', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='REWEIGHTING_UNMATCHED_ANCHORS', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=743, serialized_end=832, ) _sym_db.RegisterEnumDescriptor(_LOSS_EXPECTEDLOSSWEIGHTS) _HARDEXAMPLEMINER_LOSSTYPE = _descriptor.EnumDescriptor( name='LossType', full_name='object_detection.protos.HardExampleMiner.LossType', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='BOTH', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CLASSIFICATION', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='LOCALIZATION', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=3029, serialized_end=3087, ) _sym_db.RegisterEnumDescriptor(_HARDEXAMPLEMINER_LOSSTYPE) _LOSS_EQUALIZATIONLOSS = _descriptor.Descriptor( name='EqualizationLoss', full_name='object_detection.protos.Loss.EqualizationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='weight', full_name='object_detection.protos.Loss.EqualizationLoss.weight', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='exclude_prefixes', full_name='object_detection.protos.Loss.EqualizationLoss.exclude_prefixes', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=678, serialized_end=741, ) _LOSS = _descriptor.Descriptor( name='Loss', full_name='object_detection.protos.Loss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='localization_loss', full_name='object_detection.protos.Loss.localization_loss', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='classification_loss', full_name='object_detection.protos.Loss.classification_loss', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='hard_example_miner', full_name='object_detection.protos.Loss.hard_example_miner', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='classification_weight', full_name='object_detection.protos.Loss.classification_weight', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='localization_weight', full_name='object_detection.protos.Loss.localization_weight', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_example_sampler', full_name='object_detection.protos.Loss.random_example_sampler', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='equalization_loss', full_name='object_detection.protos.Loss.equalization_loss', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='expected_loss_weights', full_name='object_detection.protos.Loss.expected_loss_weights', index=7, number=18, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_num_negative_samples', full_name='object_detection.protos.Loss.min_num_negative_samples', index=8, number=19, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='desired_negative_sampling_ratio', full_name='object_detection.protos.Loss.desired_negative_sampling_ratio', index=9, number=20, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(3), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_LOSS_EQUALIZATIONLOSS, ], enum_types=[ _LOSS_EXPECTEDLOSSWEIGHTS, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=66, serialized_end=832, ) _LOCALIZATIONLOSS = _descriptor.Descriptor( name='LocalizationLoss', full_name='object_detection.protos.LocalizationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='weighted_l2', full_name='object_detection.protos.LocalizationLoss.weighted_l2', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weighted_smooth_l1', full_name='object_detection.protos.LocalizationLoss.weighted_smooth_l1', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weighted_iou', full_name='object_detection.protos.LocalizationLoss.weighted_iou', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='l1_localization_loss', full_name='object_detection.protos.LocalizationLoss.l1_localization_loss', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weighted_giou', full_name='object_detection.protos.LocalizationLoss.weighted_giou', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='localization_loss', full_name='object_detection.protos.LocalizationLoss.localization_loss', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=835, serialized_end=1274, ) _WEIGHTEDL2LOCALIZATIONLOSS = _descriptor.Descriptor( name='WeightedL2LocalizationLoss', full_name='object_detection.protos.WeightedL2LocalizationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='anchorwise_output', full_name='object_detection.protos.WeightedL2LocalizationLoss.anchorwise_output', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1276, serialized_end=1338, ) _WEIGHTEDSMOOTHL1LOCALIZATIONLOSS = _descriptor.Descriptor( name='WeightedSmoothL1LocalizationLoss', full_name='object_detection.protos.WeightedSmoothL1LocalizationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='anchorwise_output', full_name='object_detection.protos.WeightedSmoothL1LocalizationLoss.anchorwise_output', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='delta', full_name='object_detection.protos.WeightedSmoothL1LocalizationLoss.delta', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1340, serialized_end=1426, ) _WEIGHTEDIOULOCALIZATIONLOSS = _descriptor.Descriptor( name='WeightedIOULocalizationLoss', full_name='object_detection.protos.WeightedIOULocalizationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1428, serialized_end=1457, ) _L1LOCALIZATIONLOSS = _descriptor.Descriptor( name='L1LocalizationLoss', full_name='object_detection.protos.L1LocalizationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1459, serialized_end=1479, ) _WEIGHTEDGIOULOCALIZATIONLOSS = _descriptor.Descriptor( name='WeightedGIOULocalizationLoss', full_name='object_detection.protos.WeightedGIOULocalizationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1481, serialized_end=1511, ) _CLASSIFICATIONLOSS = _descriptor.Descriptor( name='ClassificationLoss', full_name='object_detection.protos.ClassificationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='weighted_sigmoid', full_name='object_detection.protos.ClassificationLoss.weighted_sigmoid', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weighted_softmax', full_name='object_detection.protos.ClassificationLoss.weighted_softmax', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weighted_logits_softmax', full_name='object_detection.protos.ClassificationLoss.weighted_logits_softmax', index=2, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bootstrapped_sigmoid', full_name='object_detection.protos.ClassificationLoss.bootstrapped_sigmoid', index=3, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weighted_sigmoid_focal', full_name='object_detection.protos.ClassificationLoss.weighted_sigmoid_focal', index=4, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='penalty_reduced_logistic_focal_loss', full_name='object_detection.protos.ClassificationLoss.penalty_reduced_logistic_focal_loss', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weighted_dice_classification_loss', full_name='object_detection.protos.ClassificationLoss.weighted_dice_classification_loss', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='classification_loss', full_name='object_detection.protos.ClassificationLoss.classification_loss', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=1514, serialized_end=2235, ) _WEIGHTEDSIGMOIDCLASSIFICATIONLOSS = _descriptor.Descriptor( name='WeightedSigmoidClassificationLoss', full_name='object_detection.protos.WeightedSigmoidClassificationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='anchorwise_output', full_name='object_detection.protos.WeightedSigmoidClassificationLoss.anchorwise_output', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2237, serialized_end=2306, ) _SIGMOIDFOCALCLASSIFICATIONLOSS = _descriptor.Descriptor( name='SigmoidFocalClassificationLoss', full_name='object_detection.protos.SigmoidFocalClassificationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='anchorwise_output', full_name='object_detection.protos.SigmoidFocalClassificationLoss.anchorwise_output', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='gamma', full_name='object_detection.protos.SigmoidFocalClassificationLoss.gamma', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(2), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='alpha', full_name='object_detection.protos.SigmoidFocalClassificationLoss.alpha', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2308, serialized_end=2407, ) _WEIGHTEDSOFTMAXCLASSIFICATIONLOSS = _descriptor.Descriptor( name='WeightedSoftmaxClassificationLoss', full_name='object_detection.protos.WeightedSoftmaxClassificationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='anchorwise_output', full_name='object_detection.protos.WeightedSoftmaxClassificationLoss.anchorwise_output', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='logit_scale', full_name='object_detection.protos.WeightedSoftmaxClassificationLoss.logit_scale', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2409, serialized_end=2502, ) _WEIGHTEDSOFTMAXCLASSIFICATIONAGAINSTLOGITSLOSS = _descriptor.Descriptor( name='WeightedSoftmaxClassificationAgainstLogitsLoss', full_name='object_detection.protos.WeightedSoftmaxClassificationAgainstLogitsLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='anchorwise_output', full_name='object_detection.protos.WeightedSoftmaxClassificationAgainstLogitsLoss.anchorwise_output', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='logit_scale', full_name='object_detection.protos.WeightedSoftmaxClassificationAgainstLogitsLoss.logit_scale', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2504, serialized_end=2610, ) _BOOTSTRAPPEDSIGMOIDCLASSIFICATIONLOSS = _descriptor.Descriptor( name='BootstrappedSigmoidClassificationLoss', full_name='object_detection.protos.BootstrappedSigmoidClassificationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='alpha', full_name='object_detection.protos.BootstrappedSigmoidClassificationLoss.alpha', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='hard_bootstrap', full_name='object_detection.protos.BootstrappedSigmoidClassificationLoss.hard_bootstrap', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='anchorwise_output', full_name='object_detection.protos.BootstrappedSigmoidClassificationLoss.anchorwise_output', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2612, serialized_end=2731, ) _PENALTYREDUCEDLOGISTICFOCALLOSS = _descriptor.Descriptor( name='PenaltyReducedLogisticFocalLoss', full_name='object_detection.protos.PenaltyReducedLogisticFocalLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='alpha', full_name='object_detection.protos.PenaltyReducedLogisticFocalLoss.alpha', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='beta', full_name='object_detection.protos.PenaltyReducedLogisticFocalLoss.beta', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2733, serialized_end=2795, ) _HARDEXAMPLEMINER = _descriptor.Descriptor( name='HardExampleMiner', full_name='object_detection.protos.HardExampleMiner', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='num_hard_examples', full_name='object_detection.protos.HardExampleMiner.num_hard_examples', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=64, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='iou_threshold', full_name='object_detection.protos.HardExampleMiner.iou_threshold', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.7), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='loss_type', full_name='object_detection.protos.HardExampleMiner.loss_type', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_negatives_per_positive', full_name='object_detection.protos.HardExampleMiner.max_negatives_per_positive', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_negatives_per_image', full_name='object_detection.protos.HardExampleMiner.min_negatives_per_image', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _HARDEXAMPLEMINER_LOSSTYPE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2798, serialized_end=3087, ) _RANDOMEXAMPLESAMPLER = _descriptor.Descriptor( name='RandomExampleSampler', full_name='object_detection.protos.RandomExampleSampler', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='positive_sample_fraction', full_name='object_detection.protos.RandomExampleSampler.positive_sample_fraction', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.01), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3089, serialized_end=3151, ) _WEIGHTEDDICECLASSIFICATIONLOSS = _descriptor.Descriptor( name='WeightedDiceClassificationLoss', full_name='object_detection.protos.WeightedDiceClassificationLoss', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='squared_normalization', full_name='object_detection.protos.WeightedDiceClassificationLoss.squared_normalization', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3153, serialized_end=3223, ) _LOSS_EQUALIZATIONLOSS.containing_type = _LOSS _LOSS.fields_by_name['localization_loss'].message_type = _LOCALIZATIONLOSS _LOSS.fields_by_name['classification_loss'].message_type = _CLASSIFICATIONLOSS _LOSS.fields_by_name['hard_example_miner'].message_type = _HARDEXAMPLEMINER _LOSS.fields_by_name['random_example_sampler'].message_type = _RANDOMEXAMPLESAMPLER _LOSS.fields_by_name['equalization_loss'].message_type = _LOSS_EQUALIZATIONLOSS _LOSS.fields_by_name['expected_loss_weights'].enum_type = _LOSS_EXPECTEDLOSSWEIGHTS _LOSS_EXPECTEDLOSSWEIGHTS.containing_type = _LOSS _LOCALIZATIONLOSS.fields_by_name['weighted_l2'].message_type = _WEIGHTEDL2LOCALIZATIONLOSS _LOCALIZATIONLOSS.fields_by_name['weighted_smooth_l1'].message_type = _WEIGHTEDSMOOTHL1LOCALIZATIONLOSS _LOCALIZATIONLOSS.fields_by_name['weighted_iou'].message_type = _WEIGHTEDIOULOCALIZATIONLOSS _LOCALIZATIONLOSS.fields_by_name['l1_localization_loss'].message_type = _L1LOCALIZATIONLOSS _LOCALIZATIONLOSS.fields_by_name['weighted_giou'].message_type = _WEIGHTEDGIOULOCALIZATIONLOSS _LOCALIZATIONLOSS.oneofs_by_name['localization_loss'].fields.append( _LOCALIZATIONLOSS.fields_by_name['weighted_l2']) _LOCALIZATIONLOSS.fields_by_name['weighted_l2'].containing_oneof = _LOCALIZATIONLOSS.oneofs_by_name['localization_loss'] _LOCALIZATIONLOSS.oneofs_by_name['localization_loss'].fields.append( _LOCALIZATIONLOSS.fields_by_name['weighted_smooth_l1']) _LOCALIZATIONLOSS.fields_by_name['weighted_smooth_l1'].containing_oneof = _LOCALIZATIONLOSS.oneofs_by_name['localization_loss'] _LOCALIZATIONLOSS.oneofs_by_name['localization_loss'].fields.append( _LOCALIZATIONLOSS.fields_by_name['weighted_iou']) _LOCALIZATIONLOSS.fields_by_name['weighted_iou'].containing_oneof = _LOCALIZATIONLOSS.oneofs_by_name['localization_loss'] _LOCALIZATIONLOSS.oneofs_by_name['localization_loss'].fields.append( _LOCALIZATIONLOSS.fields_by_name['l1_localization_loss']) _LOCALIZATIONLOSS.fields_by_name['l1_localization_loss'].containing_oneof = _LOCALIZATIONLOSS.oneofs_by_name['localization_loss'] _LOCALIZATIONLOSS.oneofs_by_name['localization_loss'].fields.append( _LOCALIZATIONLOSS.fields_by_name['weighted_giou']) _LOCALIZATIONLOSS.fields_by_name['weighted_giou'].containing_oneof = _LOCALIZATIONLOSS.oneofs_by_name['localization_loss'] _CLASSIFICATIONLOSS.fields_by_name['weighted_sigmoid'].message_type = _WEIGHTEDSIGMOIDCLASSIFICATIONLOSS _CLASSIFICATIONLOSS.fields_by_name['weighted_softmax'].message_type = _WEIGHTEDSOFTMAXCLASSIFICATIONLOSS _CLASSIFICATIONLOSS.fields_by_name['weighted_logits_softmax'].message_type = _WEIGHTEDSOFTMAXCLASSIFICATIONAGAINSTLOGITSLOSS _CLASSIFICATIONLOSS.fields_by_name['bootstrapped_sigmoid'].message_type = _BOOTSTRAPPEDSIGMOIDCLASSIFICATIONLOSS _CLASSIFICATIONLOSS.fields_by_name['weighted_sigmoid_focal'].message_type = _SIGMOIDFOCALCLASSIFICATIONLOSS _CLASSIFICATIONLOSS.fields_by_name['penalty_reduced_logistic_focal_loss'].message_type = _PENALTYREDUCEDLOGISTICFOCALLOSS _CLASSIFICATIONLOSS.fields_by_name['weighted_dice_classification_loss'].message_type = _WEIGHTEDDICECLASSIFICATIONLOSS _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'].fields.append( _CLASSIFICATIONLOSS.fields_by_name['weighted_sigmoid']) _CLASSIFICATIONLOSS.fields_by_name['weighted_sigmoid'].containing_oneof = _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'] _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'].fields.append( _CLASSIFICATIONLOSS.fields_by_name['weighted_softmax']) _CLASSIFICATIONLOSS.fields_by_name['weighted_softmax'].containing_oneof = _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'] _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'].fields.append( _CLASSIFICATIONLOSS.fields_by_name['weighted_logits_softmax']) _CLASSIFICATIONLOSS.fields_by_name['weighted_logits_softmax'].containing_oneof = _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'] _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'].fields.append( _CLASSIFICATIONLOSS.fields_by_name['bootstrapped_sigmoid']) _CLASSIFICATIONLOSS.fields_by_name['bootstrapped_sigmoid'].containing_oneof = _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'] _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'].fields.append( _CLASSIFICATIONLOSS.fields_by_name['weighted_sigmoid_focal']) _CLASSIFICATIONLOSS.fields_by_name['weighted_sigmoid_focal'].containing_oneof = _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'] _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'].fields.append( _CLASSIFICATIONLOSS.fields_by_name['penalty_reduced_logistic_focal_loss']) _CLASSIFICATIONLOSS.fields_by_name['penalty_reduced_logistic_focal_loss'].containing_oneof = _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'] _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'].fields.append( _CLASSIFICATIONLOSS.fields_by_name['weighted_dice_classification_loss']) _CLASSIFICATIONLOSS.fields_by_name['weighted_dice_classification_loss'].containing_oneof = _CLASSIFICATIONLOSS.oneofs_by_name['classification_loss'] _HARDEXAMPLEMINER.fields_by_name['loss_type'].enum_type = _HARDEXAMPLEMINER_LOSSTYPE _HARDEXAMPLEMINER_LOSSTYPE.containing_type = _HARDEXAMPLEMINER DESCRIPTOR.message_types_by_name['Loss'] = _LOSS DESCRIPTOR.message_types_by_name['LocalizationLoss'] = _LOCALIZATIONLOSS DESCRIPTOR.message_types_by_name['WeightedL2LocalizationLoss'] = _WEIGHTEDL2LOCALIZATIONLOSS DESCRIPTOR.message_types_by_name['WeightedSmoothL1LocalizationLoss'] = _WEIGHTEDSMOOTHL1LOCALIZATIONLOSS DESCRIPTOR.message_types_by_name['WeightedIOULocalizationLoss'] = _WEIGHTEDIOULOCALIZATIONLOSS DESCRIPTOR.message_types_by_name['L1LocalizationLoss'] = _L1LOCALIZATIONLOSS DESCRIPTOR.message_types_by_name['WeightedGIOULocalizationLoss'] = _WEIGHTEDGIOULOCALIZATIONLOSS DESCRIPTOR.message_types_by_name['ClassificationLoss'] = _CLASSIFICATIONLOSS DESCRIPTOR.message_types_by_name['WeightedSigmoidClassificationLoss'] = _WEIGHTEDSIGMOIDCLASSIFICATIONLOSS DESCRIPTOR.message_types_by_name['SigmoidFocalClassificationLoss'] = _SIGMOIDFOCALCLASSIFICATIONLOSS DESCRIPTOR.message_types_by_name['WeightedSoftmaxClassificationLoss'] = _WEIGHTEDSOFTMAXCLASSIFICATIONLOSS DESCRIPTOR.message_types_by_name['WeightedSoftmaxClassificationAgainstLogitsLoss'] = _WEIGHTEDSOFTMAXCLASSIFICATIONAGAINSTLOGITSLOSS DESCRIPTOR.message_types_by_name['BootstrappedSigmoidClassificationLoss'] = _BOOTSTRAPPEDSIGMOIDCLASSIFICATIONLOSS DESCRIPTOR.message_types_by_name['PenaltyReducedLogisticFocalLoss'] = _PENALTYREDUCEDLOGISTICFOCALLOSS DESCRIPTOR.message_types_by_name['HardExampleMiner'] = _HARDEXAMPLEMINER DESCRIPTOR.message_types_by_name['RandomExampleSampler'] = _RANDOMEXAMPLESAMPLER DESCRIPTOR.message_types_by_name['WeightedDiceClassificationLoss'] = _WEIGHTEDDICECLASSIFICATIONLOSS _sym_db.RegisterFileDescriptor(DESCRIPTOR) Loss = _reflection.GeneratedProtocolMessageType('Loss', (_message.Message,), { 'EqualizationLoss' : _reflection.GeneratedProtocolMessageType('EqualizationLoss', (_message.Message,), { 'DESCRIPTOR' : _LOSS_EQUALIZATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.Loss.EqualizationLoss) }) , 'DESCRIPTOR' : _LOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.Loss) }) _sym_db.RegisterMessage(Loss) _sym_db.RegisterMessage(Loss.EqualizationLoss) LocalizationLoss = _reflection.GeneratedProtocolMessageType('LocalizationLoss', (_message.Message,), { 'DESCRIPTOR' : _LOCALIZATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.LocalizationLoss) }) _sym_db.RegisterMessage(LocalizationLoss) WeightedL2LocalizationLoss = _reflection.GeneratedProtocolMessageType('WeightedL2LocalizationLoss', (_message.Message,), { 'DESCRIPTOR' : _WEIGHTEDL2LOCALIZATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.WeightedL2LocalizationLoss) }) _sym_db.RegisterMessage(WeightedL2LocalizationLoss) WeightedSmoothL1LocalizationLoss = _reflection.GeneratedProtocolMessageType('WeightedSmoothL1LocalizationLoss', (_message.Message,), { 'DESCRIPTOR' : _WEIGHTEDSMOOTHL1LOCALIZATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.WeightedSmoothL1LocalizationLoss) }) _sym_db.RegisterMessage(WeightedSmoothL1LocalizationLoss) WeightedIOULocalizationLoss = _reflection.GeneratedProtocolMessageType('WeightedIOULocalizationLoss', (_message.Message,), { 'DESCRIPTOR' : _WEIGHTEDIOULOCALIZATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.WeightedIOULocalizationLoss) }) _sym_db.RegisterMessage(WeightedIOULocalizationLoss) L1LocalizationLoss = _reflection.GeneratedProtocolMessageType('L1LocalizationLoss', (_message.Message,), { 'DESCRIPTOR' : _L1LOCALIZATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.L1LocalizationLoss) }) _sym_db.RegisterMessage(L1LocalizationLoss) WeightedGIOULocalizationLoss = _reflection.GeneratedProtocolMessageType('WeightedGIOULocalizationLoss', (_message.Message,), { 'DESCRIPTOR' : _WEIGHTEDGIOULOCALIZATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.WeightedGIOULocalizationLoss) }) _sym_db.RegisterMessage(WeightedGIOULocalizationLoss) ClassificationLoss = _reflection.GeneratedProtocolMessageType('ClassificationLoss', (_message.Message,), { 'DESCRIPTOR' : _CLASSIFICATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ClassificationLoss) }) _sym_db.RegisterMessage(ClassificationLoss) WeightedSigmoidClassificationLoss = _reflection.GeneratedProtocolMessageType('WeightedSigmoidClassificationLoss', (_message.Message,), { 'DESCRIPTOR' : _WEIGHTEDSIGMOIDCLASSIFICATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.WeightedSigmoidClassificationLoss) }) _sym_db.RegisterMessage(WeightedSigmoidClassificationLoss) SigmoidFocalClassificationLoss = _reflection.GeneratedProtocolMessageType('SigmoidFocalClassificationLoss', (_message.Message,), { 'DESCRIPTOR' : _SIGMOIDFOCALCLASSIFICATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SigmoidFocalClassificationLoss) }) _sym_db.RegisterMessage(SigmoidFocalClassificationLoss) WeightedSoftmaxClassificationLoss = _reflection.GeneratedProtocolMessageType('WeightedSoftmaxClassificationLoss', (_message.Message,), { 'DESCRIPTOR' : _WEIGHTEDSOFTMAXCLASSIFICATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.WeightedSoftmaxClassificationLoss) }) _sym_db.RegisterMessage(WeightedSoftmaxClassificationLoss) WeightedSoftmaxClassificationAgainstLogitsLoss = _reflection.GeneratedProtocolMessageType('WeightedSoftmaxClassificationAgainstLogitsLoss', (_message.Message,), { 'DESCRIPTOR' : _WEIGHTEDSOFTMAXCLASSIFICATIONAGAINSTLOGITSLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.WeightedSoftmaxClassificationAgainstLogitsLoss) }) _sym_db.RegisterMessage(WeightedSoftmaxClassificationAgainstLogitsLoss) BootstrappedSigmoidClassificationLoss = _reflection.GeneratedProtocolMessageType('BootstrappedSigmoidClassificationLoss', (_message.Message,), { 'DESCRIPTOR' : _BOOTSTRAPPEDSIGMOIDCLASSIFICATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.BootstrappedSigmoidClassificationLoss) }) _sym_db.RegisterMessage(BootstrappedSigmoidClassificationLoss) PenaltyReducedLogisticFocalLoss = _reflection.GeneratedProtocolMessageType('PenaltyReducedLogisticFocalLoss', (_message.Message,), { 'DESCRIPTOR' : _PENALTYREDUCEDLOGISTICFOCALLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.PenaltyReducedLogisticFocalLoss) }) _sym_db.RegisterMessage(PenaltyReducedLogisticFocalLoss) HardExampleMiner = _reflection.GeneratedProtocolMessageType('HardExampleMiner', (_message.Message,), { 'DESCRIPTOR' : _HARDEXAMPLEMINER, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.HardExampleMiner) }) _sym_db.RegisterMessage(HardExampleMiner) RandomExampleSampler = _reflection.GeneratedProtocolMessageType('RandomExampleSampler', (_message.Message,), { 'DESCRIPTOR' : _RANDOMEXAMPLESAMPLER, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomExampleSampler) }) _sym_db.RegisterMessage(RandomExampleSampler) WeightedDiceClassificationLoss = _reflection.GeneratedProtocolMessageType('WeightedDiceClassificationLoss', (_message.Message,), { 'DESCRIPTOR' : _WEIGHTEDDICECLASSIFICATIONLOSS, '__module__' : 'object_detection.protos.losses_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.WeightedDiceClassificationLoss) }) _sym_db.RegisterMessage(WeightedDiceClassificationLoss) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/losses_pb2.py
losses_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/calibration.proto """Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/calibration.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n)object_detection/protos/calibration.proto\x12\x17object_detection.protos\"\xe4\x03\n\x11\x43\x61librationConfig\x12P\n\x16\x66unction_approximation\x18\x01 \x01(\x0b\x32..object_detection.protos.FunctionApproximationH\x00\x12\x62\n class_id_function_approximations\x18\x02 \x01(\x0b\x32\x36.object_detection.protos.ClassIdFunctionApproximationsH\x00\x12J\n\x13sigmoid_calibration\x18\x03 \x01(\x0b\x32+.object_detection.protos.SigmoidCalibrationH\x00\x12\\\n\x1d\x63lass_id_sigmoid_calibrations\x18\x04 \x01(\x0b\x32\x33.object_detection.protos.ClassIdSigmoidCalibrationsH\x00\x12\x61\n\x1ftemperature_scaling_calibration\x18\x05 \x01(\x0b\x32\x36.object_detection.protos.TemperatureScalingCalibrationH\x00\x42\x0c\n\ncalibrator\"L\n\x15\x46unctionApproximation\x12\x33\n\tx_y_pairs\x18\x01 \x01(\x0b\x32 .object_detection.protos.XYPairs\"\xe9\x01\n\x1d\x43lassIdFunctionApproximations\x12l\n\x15\x63lass_id_xy_pairs_map\x18\x01 \x03(\x0b\x32M.object_detection.protos.ClassIdFunctionApproximations.ClassIdXyPairsMapEntry\x1aZ\n\x16\x43lassIdXyPairsMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12/\n\x05value\x18\x02 \x01(\x0b\x32 .object_detection.protos.XYPairs:\x02\x38\x01\"\\\n\x12SigmoidCalibration\x12\x46\n\x12sigmoid_parameters\x18\x01 \x01(\x0b\x32*.object_detection.protos.SigmoidParameters\"\x8b\x02\n\x1a\x43lassIdSigmoidCalibrations\x12}\n\x1f\x63lass_id_sigmoid_parameters_map\x18\x01 \x03(\x0b\x32T.object_detection.protos.ClassIdSigmoidCalibrations.ClassIdSigmoidParametersMapEntry\x1an\n ClassIdSigmoidParametersMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x39\n\x05value\x18\x02 \x01(\x0b\x32*.object_detection.protos.SigmoidParameters:\x02\x38\x01\"/\n\x1dTemperatureScalingCalibration\x12\x0e\n\x06scaler\x18\x01 \x01(\x02\"\xab\x01\n\x07XYPairs\x12\x39\n\x08x_y_pair\x18\x01 \x03(\x0b\x32\'.object_detection.protos.XYPairs.XYPair\x12\x45\n\x12training_data_type\x18\x02 \x01(\x0e\x32).object_detection.protos.TrainingDataType\x1a\x1e\n\x06XYPair\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\"0\n\x11SigmoidParameters\x12\r\n\x01\x61\x18\x01 \x01(\x02:\x02-1\x12\x0c\n\x01\x62\x18\x02 \x01(\x02:\x01\x30*N\n\x10TrainingDataType\x12\x15\n\x11\x44\x41TA_TYPE_UNKNOWN\x10\x00\x12\x0f\n\x0b\x41LL_CLASSES\x10\x01\x12\x12\n\x0e\x43LASS_SPECIFIC\x10\x02' ) _TRAININGDATATYPE = _descriptor.EnumDescriptor( name='TrainingDataType', full_name='object_detection.protos.TrainingDataType', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='DATA_TYPE_UNKNOWN', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ALL_CLASSES', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CLASS_SPECIFIC', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1508, serialized_end=1586, ) _sym_db.RegisterEnumDescriptor(_TRAININGDATATYPE) TrainingDataType = enum_type_wrapper.EnumTypeWrapper(_TRAININGDATATYPE) DATA_TYPE_UNKNOWN = 0 ALL_CLASSES = 1 CLASS_SPECIFIC = 2 _CALIBRATIONCONFIG = _descriptor.Descriptor( name='CalibrationConfig', full_name='object_detection.protos.CalibrationConfig', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='function_approximation', full_name='object_detection.protos.CalibrationConfig.function_approximation', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='class_id_function_approximations', full_name='object_detection.protos.CalibrationConfig.class_id_function_approximations', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='sigmoid_calibration', full_name='object_detection.protos.CalibrationConfig.sigmoid_calibration', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='class_id_sigmoid_calibrations', full_name='object_detection.protos.CalibrationConfig.class_id_sigmoid_calibrations', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='temperature_scaling_calibration', full_name='object_detection.protos.CalibrationConfig.temperature_scaling_calibration', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='calibrator', full_name='object_detection.protos.CalibrationConfig.calibrator', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=71, serialized_end=555, ) _FUNCTIONAPPROXIMATION = _descriptor.Descriptor( name='FunctionApproximation', full_name='object_detection.protos.FunctionApproximation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='x_y_pairs', full_name='object_detection.protos.FunctionApproximation.x_y_pairs', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=557, serialized_end=633, ) _CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY = _descriptor.Descriptor( name='ClassIdXyPairsMapEntry', full_name='object_detection.protos.ClassIdFunctionApproximations.ClassIdXyPairsMapEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='key', full_name='object_detection.protos.ClassIdFunctionApproximations.ClassIdXyPairsMapEntry.key', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='value', full_name='object_detection.protos.ClassIdFunctionApproximations.ClassIdXyPairsMapEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=b'8\001', is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=779, serialized_end=869, ) _CLASSIDFUNCTIONAPPROXIMATIONS = _descriptor.Descriptor( name='ClassIdFunctionApproximations', full_name='object_detection.protos.ClassIdFunctionApproximations', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='class_id_xy_pairs_map', full_name='object_detection.protos.ClassIdFunctionApproximations.class_id_xy_pairs_map', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=636, serialized_end=869, ) _SIGMOIDCALIBRATION = _descriptor.Descriptor( name='SigmoidCalibration', full_name='object_detection.protos.SigmoidCalibration', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='sigmoid_parameters', full_name='object_detection.protos.SigmoidCalibration.sigmoid_parameters', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=871, serialized_end=963, ) _CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY = _descriptor.Descriptor( name='ClassIdSigmoidParametersMapEntry', full_name='object_detection.protos.ClassIdSigmoidCalibrations.ClassIdSigmoidParametersMapEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='key', full_name='object_detection.protos.ClassIdSigmoidCalibrations.ClassIdSigmoidParametersMapEntry.key', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='value', full_name='object_detection.protos.ClassIdSigmoidCalibrations.ClassIdSigmoidParametersMapEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=b'8\001', is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1123, serialized_end=1233, ) _CLASSIDSIGMOIDCALIBRATIONS = _descriptor.Descriptor( name='ClassIdSigmoidCalibrations', full_name='object_detection.protos.ClassIdSigmoidCalibrations', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='class_id_sigmoid_parameters_map', full_name='object_detection.protos.ClassIdSigmoidCalibrations.class_id_sigmoid_parameters_map', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=966, serialized_end=1233, ) _TEMPERATURESCALINGCALIBRATION = _descriptor.Descriptor( name='TemperatureScalingCalibration', full_name='object_detection.protos.TemperatureScalingCalibration', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='scaler', full_name='object_detection.protos.TemperatureScalingCalibration.scaler', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1235, serialized_end=1282, ) _XYPAIRS_XYPAIR = _descriptor.Descriptor( name='XYPair', full_name='object_detection.protos.XYPairs.XYPair', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='x', full_name='object_detection.protos.XYPairs.XYPair.x', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='y', full_name='object_detection.protos.XYPairs.XYPair.y', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1426, serialized_end=1456, ) _XYPAIRS = _descriptor.Descriptor( name='XYPairs', full_name='object_detection.protos.XYPairs', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='x_y_pair', full_name='object_detection.protos.XYPairs.x_y_pair', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='training_data_type', full_name='object_detection.protos.XYPairs.training_data_type', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_XYPAIRS_XYPAIR, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1285, serialized_end=1456, ) _SIGMOIDPARAMETERS = _descriptor.Descriptor( name='SigmoidParameters', full_name='object_detection.protos.SigmoidParameters', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='a', full_name='object_detection.protos.SigmoidParameters.a', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(-1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='b', full_name='object_detection.protos.SigmoidParameters.b', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1458, serialized_end=1506, ) _CALIBRATIONCONFIG.fields_by_name['function_approximation'].message_type = _FUNCTIONAPPROXIMATION _CALIBRATIONCONFIG.fields_by_name['class_id_function_approximations'].message_type = _CLASSIDFUNCTIONAPPROXIMATIONS _CALIBRATIONCONFIG.fields_by_name['sigmoid_calibration'].message_type = _SIGMOIDCALIBRATION _CALIBRATIONCONFIG.fields_by_name['class_id_sigmoid_calibrations'].message_type = _CLASSIDSIGMOIDCALIBRATIONS _CALIBRATIONCONFIG.fields_by_name['temperature_scaling_calibration'].message_type = _TEMPERATURESCALINGCALIBRATION _CALIBRATIONCONFIG.oneofs_by_name['calibrator'].fields.append( _CALIBRATIONCONFIG.fields_by_name['function_approximation']) _CALIBRATIONCONFIG.fields_by_name['function_approximation'].containing_oneof = _CALIBRATIONCONFIG.oneofs_by_name['calibrator'] _CALIBRATIONCONFIG.oneofs_by_name['calibrator'].fields.append( _CALIBRATIONCONFIG.fields_by_name['class_id_function_approximations']) _CALIBRATIONCONFIG.fields_by_name['class_id_function_approximations'].containing_oneof = _CALIBRATIONCONFIG.oneofs_by_name['calibrator'] _CALIBRATIONCONFIG.oneofs_by_name['calibrator'].fields.append( _CALIBRATIONCONFIG.fields_by_name['sigmoid_calibration']) _CALIBRATIONCONFIG.fields_by_name['sigmoid_calibration'].containing_oneof = _CALIBRATIONCONFIG.oneofs_by_name['calibrator'] _CALIBRATIONCONFIG.oneofs_by_name['calibrator'].fields.append( _CALIBRATIONCONFIG.fields_by_name['class_id_sigmoid_calibrations']) _CALIBRATIONCONFIG.fields_by_name['class_id_sigmoid_calibrations'].containing_oneof = _CALIBRATIONCONFIG.oneofs_by_name['calibrator'] _CALIBRATIONCONFIG.oneofs_by_name['calibrator'].fields.append( _CALIBRATIONCONFIG.fields_by_name['temperature_scaling_calibration']) _CALIBRATIONCONFIG.fields_by_name['temperature_scaling_calibration'].containing_oneof = _CALIBRATIONCONFIG.oneofs_by_name['calibrator'] _FUNCTIONAPPROXIMATION.fields_by_name['x_y_pairs'].message_type = _XYPAIRS _CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY.fields_by_name['value'].message_type = _XYPAIRS _CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY.containing_type = _CLASSIDFUNCTIONAPPROXIMATIONS _CLASSIDFUNCTIONAPPROXIMATIONS.fields_by_name['class_id_xy_pairs_map'].message_type = _CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY _SIGMOIDCALIBRATION.fields_by_name['sigmoid_parameters'].message_type = _SIGMOIDPARAMETERS _CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY.fields_by_name['value'].message_type = _SIGMOIDPARAMETERS _CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY.containing_type = _CLASSIDSIGMOIDCALIBRATIONS _CLASSIDSIGMOIDCALIBRATIONS.fields_by_name['class_id_sigmoid_parameters_map'].message_type = _CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY _XYPAIRS_XYPAIR.containing_type = _XYPAIRS _XYPAIRS.fields_by_name['x_y_pair'].message_type = _XYPAIRS_XYPAIR _XYPAIRS.fields_by_name['training_data_type'].enum_type = _TRAININGDATATYPE DESCRIPTOR.message_types_by_name['CalibrationConfig'] = _CALIBRATIONCONFIG DESCRIPTOR.message_types_by_name['FunctionApproximation'] = _FUNCTIONAPPROXIMATION DESCRIPTOR.message_types_by_name['ClassIdFunctionApproximations'] = _CLASSIDFUNCTIONAPPROXIMATIONS DESCRIPTOR.message_types_by_name['SigmoidCalibration'] = _SIGMOIDCALIBRATION DESCRIPTOR.message_types_by_name['ClassIdSigmoidCalibrations'] = _CLASSIDSIGMOIDCALIBRATIONS DESCRIPTOR.message_types_by_name['TemperatureScalingCalibration'] = _TEMPERATURESCALINGCALIBRATION DESCRIPTOR.message_types_by_name['XYPairs'] = _XYPAIRS DESCRIPTOR.message_types_by_name['SigmoidParameters'] = _SIGMOIDPARAMETERS DESCRIPTOR.enum_types_by_name['TrainingDataType'] = _TRAININGDATATYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) CalibrationConfig = _reflection.GeneratedProtocolMessageType('CalibrationConfig', (_message.Message,), { 'DESCRIPTOR' : _CALIBRATIONCONFIG, '__module__' : 'object_detection.protos.calibration_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CalibrationConfig) }) _sym_db.RegisterMessage(CalibrationConfig) FunctionApproximation = _reflection.GeneratedProtocolMessageType('FunctionApproximation', (_message.Message,), { 'DESCRIPTOR' : _FUNCTIONAPPROXIMATION, '__module__' : 'object_detection.protos.calibration_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.FunctionApproximation) }) _sym_db.RegisterMessage(FunctionApproximation) ClassIdFunctionApproximations = _reflection.GeneratedProtocolMessageType('ClassIdFunctionApproximations', (_message.Message,), { 'ClassIdXyPairsMapEntry' : _reflection.GeneratedProtocolMessageType('ClassIdXyPairsMapEntry', (_message.Message,), { 'DESCRIPTOR' : _CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY, '__module__' : 'object_detection.protos.calibration_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ClassIdFunctionApproximations.ClassIdXyPairsMapEntry) }) , 'DESCRIPTOR' : _CLASSIDFUNCTIONAPPROXIMATIONS, '__module__' : 'object_detection.protos.calibration_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ClassIdFunctionApproximations) }) _sym_db.RegisterMessage(ClassIdFunctionApproximations) _sym_db.RegisterMessage(ClassIdFunctionApproximations.ClassIdXyPairsMapEntry) SigmoidCalibration = _reflection.GeneratedProtocolMessageType('SigmoidCalibration', (_message.Message,), { 'DESCRIPTOR' : _SIGMOIDCALIBRATION, '__module__' : 'object_detection.protos.calibration_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SigmoidCalibration) }) _sym_db.RegisterMessage(SigmoidCalibration) ClassIdSigmoidCalibrations = _reflection.GeneratedProtocolMessageType('ClassIdSigmoidCalibrations', (_message.Message,), { 'ClassIdSigmoidParametersMapEntry' : _reflection.GeneratedProtocolMessageType('ClassIdSigmoidParametersMapEntry', (_message.Message,), { 'DESCRIPTOR' : _CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY, '__module__' : 'object_detection.protos.calibration_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ClassIdSigmoidCalibrations.ClassIdSigmoidParametersMapEntry) }) , 'DESCRIPTOR' : _CLASSIDSIGMOIDCALIBRATIONS, '__module__' : 'object_detection.protos.calibration_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ClassIdSigmoidCalibrations) }) _sym_db.RegisterMessage(ClassIdSigmoidCalibrations) _sym_db.RegisterMessage(ClassIdSigmoidCalibrations.ClassIdSigmoidParametersMapEntry) TemperatureScalingCalibration = _reflection.GeneratedProtocolMessageType('TemperatureScalingCalibration', (_message.Message,), { 'DESCRIPTOR' : _TEMPERATURESCALINGCALIBRATION, '__module__' : 'object_detection.protos.calibration_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.TemperatureScalingCalibration) }) _sym_db.RegisterMessage(TemperatureScalingCalibration) XYPairs = _reflection.GeneratedProtocolMessageType('XYPairs', (_message.Message,), { 'XYPair' : _reflection.GeneratedProtocolMessageType('XYPair', (_message.Message,), { 'DESCRIPTOR' : _XYPAIRS_XYPAIR, '__module__' : 'object_detection.protos.calibration_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.XYPairs.XYPair) }) , 'DESCRIPTOR' : _XYPAIRS, '__module__' : 'object_detection.protos.calibration_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.XYPairs) }) _sym_db.RegisterMessage(XYPairs) _sym_db.RegisterMessage(XYPairs.XYPair) SigmoidParameters = _reflection.GeneratedProtocolMessageType('SigmoidParameters', (_message.Message,), { 'DESCRIPTOR' : _SIGMOIDPARAMETERS, '__module__' : 'object_detection.protos.calibration_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SigmoidParameters) }) _sym_db.RegisterMessage(SigmoidParameters) _CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY._options = None _CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY._options = None # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/calibration_pb2.py
calibration_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/ssd.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from object_detection.protos import anchor_generator_pb2 as object__detection_dot_protos_dot_anchor__generator__pb2 from object_detection.protos import box_coder_pb2 as object__detection_dot_protos_dot_box__coder__pb2 from object_detection.protos import box_predictor_pb2 as object__detection_dot_protos_dot_box__predictor__pb2 from object_detection.protos import hyperparams_pb2 as object__detection_dot_protos_dot_hyperparams__pb2 from object_detection.protos import image_resizer_pb2 as object__detection_dot_protos_dot_image__resizer__pb2 from object_detection.protos import losses_pb2 as object__detection_dot_protos_dot_losses__pb2 from object_detection.protos import matcher_pb2 as object__detection_dot_protos_dot_matcher__pb2 from object_detection.protos import post_processing_pb2 as object__detection_dot_protos_dot_post__processing__pb2 from object_detection.protos import region_similarity_calculator_pb2 as object__detection_dot_protos_dot_region__similarity__calculator__pb2 from object_detection.protos import fpn_pb2 as object__detection_dot_protos_dot_fpn__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/ssd.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n!object_detection/protos/ssd.proto\x12\x17object_detection.protos\x1a.object_detection/protos/anchor_generator.proto\x1a\'object_detection/protos/box_coder.proto\x1a+object_detection/protos/box_predictor.proto\x1a)object_detection/protos/hyperparams.proto\x1a+object_detection/protos/image_resizer.proto\x1a$object_detection/protos/losses.proto\x1a%object_detection/protos/matcher.proto\x1a-object_detection/protos/post_processing.proto\x1a:object_detection/protos/region_similarity_calculator.proto\x1a!object_detection/protos/fpn.proto\"\xdc\x0b\n\x03Ssd\x12\x13\n\x0bnum_classes\x18\x01 \x01(\x05\x12<\n\rimage_resizer\x18\x02 \x01(\x0b\x32%.object_detection.protos.ImageResizer\x12G\n\x11\x66\x65\x61ture_extractor\x18\x03 \x01(\x0b\x32,.object_detection.protos.SsdFeatureExtractor\x12\x34\n\tbox_coder\x18\x04 \x01(\x0b\x32!.object_detection.protos.BoxCoder\x12\x31\n\x07matcher\x18\x05 \x01(\x0b\x32 .object_detection.protos.Matcher\x12R\n\x15similarity_calculator\x18\x06 \x01(\x0b\x32\x33.object_detection.protos.RegionSimilarityCalculator\x12)\n\x1a\x65ncode_background_as_zeros\x18\x0c \x01(\x08:\x05\x66\x61lse\x12 \n\x15negative_class_weight\x18\r \x01(\x02:\x01\x31\x12<\n\rbox_predictor\x18\x07 \x01(\x0b\x32%.object_detection.protos.BoxPredictor\x12\x42\n\x10\x61nchor_generator\x18\x08 \x01(\x0b\x32(.object_detection.protos.AnchorGenerator\x12@\n\x0fpost_processing\x18\t \x01(\x0b\x32\'.object_detection.protos.PostProcessing\x12+\n\x1dnormalize_loss_by_num_matches\x18\n \x01(\x08:\x04true\x12-\n\x1enormalize_loc_loss_by_codesize\x18\x0e \x01(\x08:\x05\x66\x61lse\x12+\n\x04loss\x18\x0b \x01(\x0b\x32\x1d.object_detection.protos.Loss\x12\x1f\n\x10\x66reeze_batchnorm\x18\x10 \x01(\x08:\x05\x66\x61lse\x12\'\n\x18inplace_batchnorm_update\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\"\n\x14\x61\x64\x64_background_class\x18\x15 \x01(\x08:\x04true\x12(\n\x19\x65xplicit_background_class\x18\x18 \x01(\x08:\x05\x66\x61lse\x12)\n\x1ause_confidences_as_targets\x18\x16 \x01(\x08:\x05\x66\x61lse\x12\"\n\x17implicit_example_weight\x18\x17 \x01(\x02:\x01\x31\x12\x33\n$return_raw_detections_during_predict\x18\x1a \x01(\x08:\x05\x66\x61lse\x12?\n\x10mask_head_config\x18\x19 \x01(\x0b\x32%.object_detection.protos.Ssd.MaskHead\x1a\x84\x03\n\x08MaskHead\x12\x17\n\x0bmask_height\x18\x01 \x01(\x05:\x02\x31\x35\x12\x16\n\nmask_width\x18\x02 \x01(\x05:\x02\x31\x35\x12&\n\x18masks_are_class_agnostic\x18\x03 \x01(\x08:\x04true\x12\'\n\x1amask_prediction_conv_depth\x18\x04 \x01(\x05:\x03\x32\x35\x36\x12*\n\x1fmask_prediction_num_conv_layers\x18\x05 \x01(\x05:\x01\x32\x12+\n\x1c\x63onvolve_then_upsample_masks\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x10mask_loss_weight\x18\x07 \x01(\x02:\x01\x35\x12!\n\x15mask_loss_sample_size\x18\x08 \x01(\x05:\x02\x31\x36\x12>\n\x10\x63onv_hyperparams\x18\t \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x1d\n\x11initial_crop_size\x18\n \x01(\x05:\x02\x31\x35\"\xad\x04\n\x13SsdFeatureExtractor\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x1b\n\x10\x64\x65pth_multiplier\x18\x02 \x01(\x02:\x01\x31\x12\x15\n\tmin_depth\x18\x03 \x01(\x05:\x02\x31\x36\x12>\n\x10\x63onv_hyperparams\x18\x04 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12:\n+override_base_feature_extractor_hyperparams\x18\t \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x0fpad_to_multiple\x18\x05 \x01(\x05:\x01\x31\x12#\n\x14use_explicit_padding\x18\x07 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\ruse_depthwise\x18\x08 \x01(\x08:\x05\x66\x61lse\x12>\n\x03\x66pn\x18\n \x01(\x0b\x32/.object_detection.protos.FeaturePyramidNetworksH\x00\x12M\n\x05\x62ifpn\x18\x13 \x01(\x0b\x32<.object_detection.protos.BidirectionalFeaturePyramidNetworksH\x00\x12\x34\n%replace_preprocessor_with_placeholder\x18\x0b \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnum_layers\x18\x0c \x01(\x05:\x01\x36\x42\x17\n\x15\x66\x65\x61ture_pyramid_oneofJ\x04\x08\x06\x10\x07' , dependencies=[object__detection_dot_protos_dot_anchor__generator__pb2.DESCRIPTOR,object__detection_dot_protos_dot_box__coder__pb2.DESCRIPTOR,object__detection_dot_protos_dot_box__predictor__pb2.DESCRIPTOR,object__detection_dot_protos_dot_hyperparams__pb2.DESCRIPTOR,object__detection_dot_protos_dot_image__resizer__pb2.DESCRIPTOR,object__detection_dot_protos_dot_losses__pb2.DESCRIPTOR,object__detection_dot_protos_dot_matcher__pb2.DESCRIPTOR,object__detection_dot_protos_dot_post__processing__pb2.DESCRIPTOR,object__detection_dot_protos_dot_region__similarity__calculator__pb2.DESCRIPTOR,object__detection_dot_protos_dot_fpn__pb2.DESCRIPTOR,]) _SSD_MASKHEAD = _descriptor.Descriptor( name='MaskHead', full_name='object_detection.protos.Ssd.MaskHead', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='mask_height', full_name='object_detection.protos.Ssd.MaskHead.mask_height', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=15, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_width', full_name='object_detection.protos.Ssd.MaskHead.mask_width', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=15, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='masks_are_class_agnostic', full_name='object_detection.protos.Ssd.MaskHead.masks_are_class_agnostic', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_prediction_conv_depth', full_name='object_detection.protos.Ssd.MaskHead.mask_prediction_conv_depth', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=256, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_prediction_num_conv_layers', full_name='object_detection.protos.Ssd.MaskHead.mask_prediction_num_conv_layers', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='convolve_then_upsample_masks', full_name='object_detection.protos.Ssd.MaskHead.convolve_then_upsample_masks', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_loss_weight', full_name='object_detection.protos.Ssd.MaskHead.mask_loss_weight', index=6, number=7, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_loss_sample_size', full_name='object_detection.protos.Ssd.MaskHead.mask_loss_sample_size', index=7, number=8, type=5, cpp_type=1, label=1, has_default_value=True, default_value=16, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='conv_hyperparams', full_name='object_detection.protos.Ssd.MaskHead.conv_hyperparams', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='initial_crop_size', full_name='object_detection.protos.Ssd.MaskHead.initial_crop_size', index=9, number=10, type=5, cpp_type=1, label=1, has_default_value=True, default_value=15, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1616, serialized_end=2004, ) _SSD = _descriptor.Descriptor( name='Ssd', full_name='object_detection.protos.Ssd', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='num_classes', full_name='object_detection.protos.Ssd.num_classes', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='image_resizer', full_name='object_detection.protos.Ssd.image_resizer', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='feature_extractor', full_name='object_detection.protos.Ssd.feature_extractor', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='box_coder', full_name='object_detection.protos.Ssd.box_coder', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='matcher', full_name='object_detection.protos.Ssd.matcher', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='similarity_calculator', full_name='object_detection.protos.Ssd.similarity_calculator', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='encode_background_as_zeros', full_name='object_detection.protos.Ssd.encode_background_as_zeros', index=6, number=12, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='negative_class_weight', full_name='object_detection.protos.Ssd.negative_class_weight', index=7, number=13, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='box_predictor', full_name='object_detection.protos.Ssd.box_predictor', index=8, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='anchor_generator', full_name='object_detection.protos.Ssd.anchor_generator', index=9, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='post_processing', full_name='object_detection.protos.Ssd.post_processing', index=10, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='normalize_loss_by_num_matches', full_name='object_detection.protos.Ssd.normalize_loss_by_num_matches', index=11, number=10, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='normalize_loc_loss_by_codesize', full_name='object_detection.protos.Ssd.normalize_loc_loss_by_codesize', index=12, number=14, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='loss', full_name='object_detection.protos.Ssd.loss', index=13, number=11, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='freeze_batchnorm', full_name='object_detection.protos.Ssd.freeze_batchnorm', index=14, number=16, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='inplace_batchnorm_update', full_name='object_detection.protos.Ssd.inplace_batchnorm_update', index=15, number=15, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='add_background_class', full_name='object_detection.protos.Ssd.add_background_class', index=16, number=21, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='explicit_background_class', full_name='object_detection.protos.Ssd.explicit_background_class', index=17, number=24, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_confidences_as_targets', full_name='object_detection.protos.Ssd.use_confidences_as_targets', index=18, number=22, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='implicit_example_weight', full_name='object_detection.protos.Ssd.implicit_example_weight', index=19, number=23, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='return_raw_detections_during_predict', full_name='object_detection.protos.Ssd.return_raw_detections_during_predict', index=20, number=26, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_head_config', full_name='object_detection.protos.Ssd.mask_head_config', index=21, number=25, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_SSD_MASKHEAD, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=504, serialized_end=2004, ) _SSDFEATUREEXTRACTOR = _descriptor.Descriptor( name='SsdFeatureExtractor', full_name='object_detection.protos.SsdFeatureExtractor', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='type', full_name='object_detection.protos.SsdFeatureExtractor.type', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='depth_multiplier', full_name='object_detection.protos.SsdFeatureExtractor.depth_multiplier', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_depth', full_name='object_detection.protos.SsdFeatureExtractor.min_depth', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=16, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='conv_hyperparams', full_name='object_detection.protos.SsdFeatureExtractor.conv_hyperparams', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='override_base_feature_extractor_hyperparams', full_name='object_detection.protos.SsdFeatureExtractor.override_base_feature_extractor_hyperparams', index=4, number=9, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_to_multiple', full_name='object_detection.protos.SsdFeatureExtractor.pad_to_multiple', index=5, number=5, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_explicit_padding', full_name='object_detection.protos.SsdFeatureExtractor.use_explicit_padding', index=6, number=7, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_depthwise', full_name='object_detection.protos.SsdFeatureExtractor.use_depthwise', index=7, number=8, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='fpn', full_name='object_detection.protos.SsdFeatureExtractor.fpn', index=8, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bifpn', full_name='object_detection.protos.SsdFeatureExtractor.bifpn', index=9, number=19, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='replace_preprocessor_with_placeholder', full_name='object_detection.protos.SsdFeatureExtractor.replace_preprocessor_with_placeholder', index=10, number=11, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_layers', full_name='object_detection.protos.SsdFeatureExtractor.num_layers', index=11, number=12, type=5, cpp_type=1, label=1, has_default_value=True, default_value=6, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='feature_pyramid_oneof', full_name='object_detection.protos.SsdFeatureExtractor.feature_pyramid_oneof', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=2007, serialized_end=2564, ) _SSD_MASKHEAD.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS _SSD_MASKHEAD.containing_type = _SSD _SSD.fields_by_name['image_resizer'].message_type = object__detection_dot_protos_dot_image__resizer__pb2._IMAGERESIZER _SSD.fields_by_name['feature_extractor'].message_type = _SSDFEATUREEXTRACTOR _SSD.fields_by_name['box_coder'].message_type = object__detection_dot_protos_dot_box__coder__pb2._BOXCODER _SSD.fields_by_name['matcher'].message_type = object__detection_dot_protos_dot_matcher__pb2._MATCHER _SSD.fields_by_name['similarity_calculator'].message_type = object__detection_dot_protos_dot_region__similarity__calculator__pb2._REGIONSIMILARITYCALCULATOR _SSD.fields_by_name['box_predictor'].message_type = object__detection_dot_protos_dot_box__predictor__pb2._BOXPREDICTOR _SSD.fields_by_name['anchor_generator'].message_type = object__detection_dot_protos_dot_anchor__generator__pb2._ANCHORGENERATOR _SSD.fields_by_name['post_processing'].message_type = object__detection_dot_protos_dot_post__processing__pb2._POSTPROCESSING _SSD.fields_by_name['loss'].message_type = object__detection_dot_protos_dot_losses__pb2._LOSS _SSD.fields_by_name['mask_head_config'].message_type = _SSD_MASKHEAD _SSDFEATUREEXTRACTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS _SSDFEATUREEXTRACTOR.fields_by_name['fpn'].message_type = object__detection_dot_protos_dot_fpn__pb2._FEATUREPYRAMIDNETWORKS _SSDFEATUREEXTRACTOR.fields_by_name['bifpn'].message_type = object__detection_dot_protos_dot_fpn__pb2._BIDIRECTIONALFEATUREPYRAMIDNETWORKS _SSDFEATUREEXTRACTOR.oneofs_by_name['feature_pyramid_oneof'].fields.append( _SSDFEATUREEXTRACTOR.fields_by_name['fpn']) _SSDFEATUREEXTRACTOR.fields_by_name['fpn'].containing_oneof = _SSDFEATUREEXTRACTOR.oneofs_by_name['feature_pyramid_oneof'] _SSDFEATUREEXTRACTOR.oneofs_by_name['feature_pyramid_oneof'].fields.append( _SSDFEATUREEXTRACTOR.fields_by_name['bifpn']) _SSDFEATUREEXTRACTOR.fields_by_name['bifpn'].containing_oneof = _SSDFEATUREEXTRACTOR.oneofs_by_name['feature_pyramid_oneof'] DESCRIPTOR.message_types_by_name['Ssd'] = _SSD DESCRIPTOR.message_types_by_name['SsdFeatureExtractor'] = _SSDFEATUREEXTRACTOR _sym_db.RegisterFileDescriptor(DESCRIPTOR) Ssd = _reflection.GeneratedProtocolMessageType('Ssd', (_message.Message,), { 'MaskHead' : _reflection.GeneratedProtocolMessageType('MaskHead', (_message.Message,), { 'DESCRIPTOR' : _SSD_MASKHEAD, '__module__' : 'object_detection.protos.ssd_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.Ssd.MaskHead) }) , 'DESCRIPTOR' : _SSD, '__module__' : 'object_detection.protos.ssd_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.Ssd) }) _sym_db.RegisterMessage(Ssd) _sym_db.RegisterMessage(Ssd.MaskHead) SsdFeatureExtractor = _reflection.GeneratedProtocolMessageType('SsdFeatureExtractor', (_message.Message,), { 'DESCRIPTOR' : _SSDFEATUREEXTRACTOR, '__module__' : 'object_detection.protos.ssd_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SsdFeatureExtractor) }) _sym_db.RegisterMessage(SsdFeatureExtractor) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/ssd_pb2.py
ssd_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/argmax_matcher.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/argmax_matcher.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n,object_detection/protos/argmax_matcher.proto\x12\x17object_detection.protos\"\xec\x01\n\rArgMaxMatcher\x12\x1e\n\x11matched_threshold\x18\x01 \x01(\x02:\x03\x30.5\x12 \n\x13unmatched_threshold\x18\x02 \x01(\x02:\x03\x30.5\x12 \n\x11ignore_thresholds\x18\x03 \x01(\x08:\x05\x66\x61lse\x12,\n\x1enegatives_lower_than_unmatched\x18\x04 \x01(\x08:\x04true\x12\'\n\x18\x66orce_match_for_each_row\x18\x05 \x01(\x08:\x05\x66\x61lse\x12 \n\x11use_matmul_gather\x18\x06 \x01(\x08:\x05\x66\x61lse' ) _ARGMAXMATCHER = _descriptor.Descriptor( name='ArgMaxMatcher', full_name='object_detection.protos.ArgMaxMatcher', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='matched_threshold', full_name='object_detection.protos.ArgMaxMatcher.matched_threshold', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='unmatched_threshold', full_name='object_detection.protos.ArgMaxMatcher.unmatched_threshold', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ignore_thresholds', full_name='object_detection.protos.ArgMaxMatcher.ignore_thresholds', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='negatives_lower_than_unmatched', full_name='object_detection.protos.ArgMaxMatcher.negatives_lower_than_unmatched', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='force_match_for_each_row', full_name='object_detection.protos.ArgMaxMatcher.force_match_for_each_row', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_matmul_gather', full_name='object_detection.protos.ArgMaxMatcher.use_matmul_gather', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=74, serialized_end=310, ) DESCRIPTOR.message_types_by_name['ArgMaxMatcher'] = _ARGMAXMATCHER _sym_db.RegisterFileDescriptor(DESCRIPTOR) ArgMaxMatcher = _reflection.GeneratedProtocolMessageType('ArgMaxMatcher', (_message.Message,), { 'DESCRIPTOR' : _ARGMAXMATCHER, '__module__' : 'object_detection.protos.argmax_matcher_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ArgMaxMatcher) }) _sym_db.RegisterMessage(ArgMaxMatcher) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/argmax_matcher_pb2.py
argmax_matcher_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/keypoint_box_coder.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/keypoint_box_coder.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n0object_detection/protos/keypoint_box_coder.proto\x12\x17object_detection.protos\"\x84\x01\n\x10KeypointBoxCoder\x12\x15\n\rnum_keypoints\x18\x01 \x01(\x05\x12\x13\n\x07y_scale\x18\x02 \x01(\x02:\x02\x31\x30\x12\x13\n\x07x_scale\x18\x03 \x01(\x02:\x02\x31\x30\x12\x17\n\x0cheight_scale\x18\x04 \x01(\x02:\x01\x35\x12\x16\n\x0bwidth_scale\x18\x05 \x01(\x02:\x01\x35' ) _KEYPOINTBOXCODER = _descriptor.Descriptor( name='KeypointBoxCoder', full_name='object_detection.protos.KeypointBoxCoder', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='num_keypoints', full_name='object_detection.protos.KeypointBoxCoder.num_keypoints', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='y_scale', full_name='object_detection.protos.KeypointBoxCoder.y_scale', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(10), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='x_scale', full_name='object_detection.protos.KeypointBoxCoder.x_scale', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(10), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='height_scale', full_name='object_detection.protos.KeypointBoxCoder.height_scale', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width_scale', full_name='object_detection.protos.KeypointBoxCoder.width_scale', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=78, serialized_end=210, ) DESCRIPTOR.message_types_by_name['KeypointBoxCoder'] = _KEYPOINTBOXCODER _sym_db.RegisterFileDescriptor(DESCRIPTOR) KeypointBoxCoder = _reflection.GeneratedProtocolMessageType('KeypointBoxCoder', (_message.Message,), { 'DESCRIPTOR' : _KEYPOINTBOXCODER, '__module__' : 'object_detection.protos.keypoint_box_coder_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.KeypointBoxCoder) }) _sym_db.RegisterMessage(KeypointBoxCoder) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/keypoint_box_coder_pb2.py
keypoint_box_coder_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/target_assigner.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from object_detection.protos import box_coder_pb2 as object__detection_dot_protos_dot_box__coder__pb2 from object_detection.protos import matcher_pb2 as object__detection_dot_protos_dot_matcher__pb2 from object_detection.protos import region_similarity_calculator_pb2 as object__detection_dot_protos_dot_region__similarity__calculator__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/target_assigner.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n-object_detection/protos/target_assigner.proto\x12\x17object_detection.protos\x1a\'object_detection/protos/box_coder.proto\x1a%object_detection/protos/matcher.proto\x1a:object_detection/protos/region_similarity_calculator.proto\"\xcd\x01\n\x0eTargetAssigner\x12\x31\n\x07matcher\x18\x01 \x01(\x0b\x32 .object_detection.protos.Matcher\x12R\n\x15similarity_calculator\x18\x02 \x01(\x0b\x32\x33.object_detection.protos.RegionSimilarityCalculator\x12\x34\n\tbox_coder\x18\x03 \x01(\x0b\x32!.object_detection.protos.BoxCoder' , dependencies=[object__detection_dot_protos_dot_box__coder__pb2.DESCRIPTOR,object__detection_dot_protos_dot_matcher__pb2.DESCRIPTOR,object__detection_dot_protos_dot_region__similarity__calculator__pb2.DESCRIPTOR,]) _TARGETASSIGNER = _descriptor.Descriptor( name='TargetAssigner', full_name='object_detection.protos.TargetAssigner', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='matcher', full_name='object_detection.protos.TargetAssigner.matcher', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='similarity_calculator', full_name='object_detection.protos.TargetAssigner.similarity_calculator', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='box_coder', full_name='object_detection.protos.TargetAssigner.box_coder', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=215, serialized_end=420, ) _TARGETASSIGNER.fields_by_name['matcher'].message_type = object__detection_dot_protos_dot_matcher__pb2._MATCHER _TARGETASSIGNER.fields_by_name['similarity_calculator'].message_type = object__detection_dot_protos_dot_region__similarity__calculator__pb2._REGIONSIMILARITYCALCULATOR _TARGETASSIGNER.fields_by_name['box_coder'].message_type = object__detection_dot_protos_dot_box__coder__pb2._BOXCODER DESCRIPTOR.message_types_by_name['TargetAssigner'] = _TARGETASSIGNER _sym_db.RegisterFileDescriptor(DESCRIPTOR) TargetAssigner = _reflection.GeneratedProtocolMessageType('TargetAssigner', (_message.Message,), { 'DESCRIPTOR' : _TARGETASSIGNER, '__module__' : 'object_detection.protos.target_assigner_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.TargetAssigner) }) _sym_db.RegisterMessage(TargetAssigner) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/target_assigner_pb2.py
target_assigner_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/center_net.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from object_detection.protos import image_resizer_pb2 as object__detection_dot_protos_dot_image__resizer__pb2 from object_detection.protos import losses_pb2 as object__detection_dot_protos_dot_losses__pb2 from object_detection.protos import post_processing_pb2 as object__detection_dot_protos_dot_post__processing__pb2 from object_detection.protos import preprocessor_pb2 as object__detection_dot_protos_dot_preprocessor__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/center_net.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n(object_detection/protos/center_net.proto\x12\x17object_detection.protos\x1a+object_detection/protos/image_resizer.proto\x1a$object_detection/protos/losses.proto\x1a-object_detection/protos/post_processing.proto\x1a*object_detection/protos/preprocessor.proto\"\xc9$\n\tCenterNet\x12\x13\n\x0bnum_classes\x18\x01 \x01(\x05\x12M\n\x11\x66\x65\x61ture_extractor\x18\x02 \x01(\x0b\x32\x32.object_detection.protos.CenterNetFeatureExtractor\x12<\n\rimage_resizer\x18\x03 \x01(\x0b\x32%.object_detection.protos.ImageResizer\x12\x1c\n\ruse_depthwise\x18\r \x01(\x08:\x05\x66\x61lse\x12%\n\x16\x63ompute_heatmap_sparse\x18\x0f \x01(\x08:\x05\x66\x61lse\x12Q\n\x15object_detection_task\x18\x04 \x01(\x0b\x32\x32.object_detection.protos.CenterNet.ObjectDetection\x12S\n\x14object_center_params\x18\x05 \x01(\x0b\x32\x35.object_detection.protos.CenterNet.ObjectCenterParams\x12\x1f\n\x17keypoint_label_map_path\x18\x06 \x01(\t\x12W\n\x18keypoint_estimation_task\x18\x07 \x03(\x0b\x32\x35.object_detection.protos.CenterNet.KeypointEstimation\x12O\n\x14mask_estimation_task\x18\x08 \x01(\x0b\x32\x31.object_detection.protos.CenterNet.MaskEstimation\x12Y\n\x19\x64\x65nsepose_estimation_task\x18\t \x01(\x0b\x32\x36.object_detection.protos.CenterNet.DensePoseEstimation\x12Q\n\x15track_estimation_task\x18\n \x01(\x0b\x32\x32.object_detection.protos.CenterNet.TrackEstimation\x12Y\n\x14temporal_offset_task\x18\x0c \x01(\x0b\x32;.object_detection.protos.CenterNet.TemporalOffsetEstimation\x12Y\n\x17\x64\x65\x65pmac_mask_estimation\x18\x0e \x01(\x0b\x32\x38.object_detection.protos.CenterNet.DeepMACMaskEstimation\x12@\n\x0fpost_processing\x18\x18 \x01(\x0b\x32\'.object_detection.protos.PostProcessing\x1a\x41\n\x14PredictionHeadParams\x12\x13\n\x0bnum_filters\x18\x01 \x03(\x05\x12\x14\n\x0ckernel_sizes\x18\x02 \x03(\x05\x1a\xf4\x02\n\x0fObjectDetection\x12\x1b\n\x10task_loss_weight\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x12offset_loss_weight\x18\x03 \x01(\x02:\x01\x31\x12\x1e\n\x11scale_loss_weight\x18\x04 \x01(\x02:\x03\x30.1\x12\x44\n\x11localization_loss\x18\x08 \x01(\x0b\x32).object_detection.protos.LocalizationLoss\x12R\n\x11scale_head_params\x18\t \x01(\x0b\x32\x37.object_detection.protos.CenterNet.PredictionHeadParams\x12S\n\x12offset_head_params\x18\n \x01(\x0b\x32\x37.object_detection.protos.CenterNet.PredictionHeadParamsJ\x04\x08\x02\x10\x03J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x07\x10\x08\x1a\x88\x03\n\x12ObjectCenterParams\x12$\n\x19object_center_loss_weight\x18\x01 \x01(\x02:\x01\x31\x12H\n\x13\x63lassification_loss\x18\x02 \x01(\x0b\x32+.object_detection.protos.ClassificationLoss\x12 \n\x11heatmap_bias_init\x18\x03 \x01(\x02:\x05-2.19\x12 \n\x13min_box_overlap_iou\x18\x04 \x01(\x02:\x03\x30.7\x12 \n\x13max_box_predictions\x18\x05 \x01(\x05:\x03\x31\x30\x30\x12\"\n\x13use_labeled_classes\x18\x06 \x01(\x08:\x05\x66\x61lse\x12#\n\x1bkeypoint_weights_for_center\x18\x07 \x03(\x02\x12S\n\x12\x63\x65nter_head_params\x18\x08 \x01(\x0b\x32\x37.object_detection.protos.CenterNet.PredictionHeadParams\x1a\x85\n\n\x12KeypointEstimation\x12\x11\n\ttask_name\x18\x01 \x01(\t\x12\x1b\n\x10task_loss_weight\x18\x02 \x01(\x02:\x01\x31\x12+\n\x04loss\x18\x03 \x01(\x0b\x32\x1d.object_detection.protos.Loss\x12\x1b\n\x13keypoint_class_name\x18\x04 \x01(\t\x12l\n\x15keypoint_label_to_std\x18\x05 \x03(\x0b\x32M.object_detection.protos.CenterNet.KeypointEstimation.KeypointLabelToStdEntry\x12*\n\x1fkeypoint_regression_loss_weight\x18\x06 \x01(\x02:\x01\x31\x12\'\n\x1ckeypoint_heatmap_loss_weight\x18\x07 \x01(\x02:\x01\x31\x12&\n\x1bkeypoint_offset_loss_weight\x18\x08 \x01(\x02:\x01\x31\x12 \n\x11heatmap_bias_init\x18\t \x01(\x02:\x05-2.19\x12/\n\"keypoint_candidate_score_threshold\x18\n \x01(\x02:\x03\x30.1\x12(\n\x1bnum_candidates_per_keypoint\x18\x0b \x01(\x05:\x03\x31\x30\x30\x12$\n\x19peak_max_pool_kernel_size\x18\x0c \x01(\x05:\x01\x33\x12%\n\x18unmatched_keypoint_score\x18\r \x01(\x02:\x03\x30.1\x12\x16\n\tbox_scale\x18\x0e \x01(\x02:\x03\x31.2\x12#\n\x16\x63\x61ndidate_search_scale\x18\x0f \x01(\x02:\x03\x30.3\x12,\n\x16\x63\x61ndidate_ranking_mode\x18\x10 \x01(\t:\x0cmin_distance\x12 \n\x15score_distance_offset\x18\x16 \x01(\x02:\x01\x31\x12\x1d\n\x12offset_peak_radius\x18\x11 \x01(\x05:\x01\x30\x12\"\n\x13per_keypoint_offset\x18\x12 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\rpredict_depth\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x12per_keypoint_depth\x18\x14 \x01(\x08:\x05\x66\x61lse\x12%\n\x1akeypoint_depth_loss_weight\x18\x15 \x01(\x02:\x01\x31\x12*\n\x1b\x63lip_out_of_frame_keypoints\x18\x17 \x01(\x08:\x05\x66\x61lse\x12 \n\x11rescore_instances\x18\x18 \x01(\x08:\x05\x66\x61lse\x12T\n\x13heatmap_head_params\x18\x19 \x01(\x0b\x32\x37.object_detection.protos.CenterNet.PredictionHeadParams\x12S\n\x12offset_head_params\x18\x1a \x01(\x0b\x32\x37.object_detection.protos.CenterNet.PredictionHeadParams\x12T\n\x13regress_head_params\x18\x1b \x01(\x0b\x32\x37.object_detection.protos.CenterNet.PredictionHeadParams\x1a\x39\n\x17KeypointLabelToStdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x1a\xbd\x02\n\x0eMaskEstimation\x12\x1b\n\x10task_loss_weight\x18\x01 \x01(\x02:\x01\x31\x12H\n\x13\x63lassification_loss\x18\x02 \x01(\x0b\x32+.object_detection.protos.ClassificationLoss\x12\x18\n\x0bmask_height\x18\x04 \x01(\x05:\x03\x32\x35\x36\x12\x17\n\nmask_width\x18\x05 \x01(\x05:\x03\x32\x35\x36\x12\x1c\n\x0fscore_threshold\x18\x06 \x01(\x02:\x03\x30.5\x12 \n\x11heatmap_bias_init\x18\x03 \x01(\x02:\x05-2.19\x12Q\n\x10mask_head_params\x18\x07 \x01(\x0b\x32\x37.object_detection.protos.CenterNet.PredictionHeadParams\x1a\x8f\x02\n\x13\x44\x65nsePoseEstimation\x12\x1b\n\x10task_loss_weight\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x08\x63lass_id\x18\x02 \x01(\x05\x12+\n\x04loss\x18\x03 \x01(\x0b\x32\x1d.object_detection.protos.Loss\x12\x15\n\tnum_parts\x18\x04 \x01(\x05:\x02\x32\x34\x12\x1b\n\x10part_loss_weight\x18\x05 \x01(\x02:\x01\x31\x12!\n\x16\x63oordinate_loss_weight\x18\x06 \x01(\x02:\x01\x31\x12#\n\x15upsample_to_input_res\x18\x07 \x01(\x08:\x04true\x12 \n\x11heatmap_bias_init\x18\x08 \x01(\x02:\x05-2.19\x1a\xc7\x01\n\x0fTrackEstimation\x12\x1b\n\x10task_loss_weight\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\rnum_track_ids\x18\x02 \x01(\x05\x12\x1c\n\x0freid_embed_size\x18\x03 \x01(\x05:\x03\x31\x32\x38\x12\x18\n\rnum_fc_layers\x18\x04 \x01(\x05:\x01\x31\x12H\n\x13\x63lassification_loss\x18\x05 \x01(\x0b\x32+.object_detection.protos.ClassificationLoss\x1a}\n\x18TemporalOffsetEstimation\x12\x1b\n\x10task_loss_weight\x18\x01 \x01(\x02:\x01\x31\x12\x44\n\x11localization_loss\x18\x02 \x01(\x0b\x32).object_detection.protos.LocalizationLoss\x1a\xd3\x04\n\x15\x44\x65\x65pMACMaskEstimation\x12H\n\x13\x63lassification_loss\x18\x01 \x01(\x0b\x32+.object_detection.protos.ClassificationLoss\x12\x1b\n\x10task_loss_weight\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x03\x64im\x18\x03 \x01(\x05:\x03\x32\x35\x36\x12\x1f\n\x13pixel_embedding_dim\x18\x04 \x01(\x05:\x02\x31\x36\x12\"\n\x1a\x61llowed_masked_classes_ids\x18\x05 \x03(\x05\x12\x15\n\tmask_size\x18\x06 \x01(\x05:\x02\x33\x32\x12\x1f\n\x13mask_num_subsamples\x18\x43 \x01(\x05:\x02-1\x12\x14\n\x06use_xy\x18\x08 \x01(\x08:\x04true\x12!\n\x0cnetwork_type\x18\t \x01(\t:\x0bhourglass52\x12$\n\x16use_instance_embedding\x18\n \x01(\x08:\x04true\x12\x1d\n\x11num_init_channels\x18\x0b \x01(\x05:\x02\x36\x34\x12,\n\x1dpredict_full_resolution_masks\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\"\n\x15postprocess_crop_size\x18\r \x01(\x05:\x03\x32\x35\x36\x12\x1f\n\x14max_roi_jitter_ratio\x18\x0e \x01(\x02:\x01\x30\x12S\n\x0bjitter_mode\x18\x0f \x01(\x0e\x32\x35.object_detection.protos.RandomJitterBoxes.JitterMode:\x07\x44\x45\x46\x41ULT\"\xfc\x01\n\x19\x43\x65nterNetFeatureExtractor\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x15\n\rchannel_means\x18\x02 \x03(\x02\x12\x14\n\x0c\x63hannel_stds\x18\x03 \x03(\x02\x12\x1b\n\x0c\x62gr_ordering\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\ruse_depthwise\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x10\x64\x65pth_multiplier\x18\t \x01(\x02:\x01\x31\x12!\n\x12use_separable_conv\x18\n \x01(\x08:\x05\x66\x61lse\x12)\n\x18upsampling_interpolation\x18\x0b \x01(\t:\x07nearest' , dependencies=[object__detection_dot_protos_dot_image__resizer__pb2.DESCRIPTOR,object__detection_dot_protos_dot_losses__pb2.DESCRIPTOR,object__detection_dot_protos_dot_post__processing__pb2.DESCRIPTOR,object__detection_dot_protos_dot_preprocessor__pb2.DESCRIPTOR,]) _CENTERNET_PREDICTIONHEADPARAMS = _descriptor.Descriptor( name='PredictionHeadParams', full_name='object_detection.protos.CenterNet.PredictionHeadParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='num_filters', full_name='object_detection.protos.CenterNet.PredictionHeadParams.num_filters', index=0, number=1, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='kernel_sizes', full_name='object_detection.protos.CenterNet.PredictionHeadParams.kernel_sizes', index=1, number=2, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1281, serialized_end=1346, ) _CENTERNET_OBJECTDETECTION = _descriptor.Descriptor( name='ObjectDetection', full_name='object_detection.protos.CenterNet.ObjectDetection', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='task_loss_weight', full_name='object_detection.protos.CenterNet.ObjectDetection.task_loss_weight', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='offset_loss_weight', full_name='object_detection.protos.CenterNet.ObjectDetection.offset_loss_weight', index=1, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale_loss_weight', full_name='object_detection.protos.CenterNet.ObjectDetection.scale_loss_weight', index=2, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='localization_loss', full_name='object_detection.protos.CenterNet.ObjectDetection.localization_loss', index=3, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale_head_params', full_name='object_detection.protos.CenterNet.ObjectDetection.scale_head_params', index=4, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='offset_head_params', full_name='object_detection.protos.CenterNet.ObjectDetection.offset_head_params', index=5, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1349, serialized_end=1721, ) _CENTERNET_OBJECTCENTERPARAMS = _descriptor.Descriptor( name='ObjectCenterParams', full_name='object_detection.protos.CenterNet.ObjectCenterParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='object_center_loss_weight', full_name='object_detection.protos.CenterNet.ObjectCenterParams.object_center_loss_weight', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='classification_loss', full_name='object_detection.protos.CenterNet.ObjectCenterParams.classification_loss', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='heatmap_bias_init', full_name='object_detection.protos.CenterNet.ObjectCenterParams.heatmap_bias_init', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(-2.19), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_box_overlap_iou', full_name='object_detection.protos.CenterNet.ObjectCenterParams.min_box_overlap_iou', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.7), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_box_predictions', full_name='object_detection.protos.CenterNet.ObjectCenterParams.max_box_predictions', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=True, default_value=100, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_labeled_classes', full_name='object_detection.protos.CenterNet.ObjectCenterParams.use_labeled_classes', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_weights_for_center', full_name='object_detection.protos.CenterNet.ObjectCenterParams.keypoint_weights_for_center', index=6, number=7, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='center_head_params', full_name='object_detection.protos.CenterNet.ObjectCenterParams.center_head_params', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1724, serialized_end=2116, ) _CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY = _descriptor.Descriptor( name='KeypointLabelToStdEntry', full_name='object_detection.protos.CenterNet.KeypointEstimation.KeypointLabelToStdEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='key', full_name='object_detection.protos.CenterNet.KeypointEstimation.KeypointLabelToStdEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='value', full_name='object_detection.protos.CenterNet.KeypointEstimation.KeypointLabelToStdEntry.value', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=b'8\001', is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3347, serialized_end=3404, ) _CENTERNET_KEYPOINTESTIMATION = _descriptor.Descriptor( name='KeypointEstimation', full_name='object_detection.protos.CenterNet.KeypointEstimation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='task_name', full_name='object_detection.protos.CenterNet.KeypointEstimation.task_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='task_loss_weight', full_name='object_detection.protos.CenterNet.KeypointEstimation.task_loss_weight', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='loss', full_name='object_detection.protos.CenterNet.KeypointEstimation.loss', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_class_name', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_class_name', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_label_to_std', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_label_to_std', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_regression_loss_weight', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_regression_loss_weight', index=5, number=6, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_heatmap_loss_weight', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_heatmap_loss_weight', index=6, number=7, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_offset_loss_weight', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_offset_loss_weight', index=7, number=8, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='heatmap_bias_init', full_name='object_detection.protos.CenterNet.KeypointEstimation.heatmap_bias_init', index=8, number=9, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(-2.19), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_candidate_score_threshold', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_candidate_score_threshold', index=9, number=10, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_candidates_per_keypoint', full_name='object_detection.protos.CenterNet.KeypointEstimation.num_candidates_per_keypoint', index=10, number=11, type=5, cpp_type=1, label=1, has_default_value=True, default_value=100, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='peak_max_pool_kernel_size', full_name='object_detection.protos.CenterNet.KeypointEstimation.peak_max_pool_kernel_size', index=11, number=12, type=5, cpp_type=1, label=1, has_default_value=True, default_value=3, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='unmatched_keypoint_score', full_name='object_detection.protos.CenterNet.KeypointEstimation.unmatched_keypoint_score', index=12, number=13, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='box_scale', full_name='object_detection.protos.CenterNet.KeypointEstimation.box_scale', index=13, number=14, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1.2), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='candidate_search_scale', full_name='object_detection.protos.CenterNet.KeypointEstimation.candidate_search_scale', index=14, number=15, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.3), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='candidate_ranking_mode', full_name='object_detection.protos.CenterNet.KeypointEstimation.candidate_ranking_mode', index=15, number=16, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"min_distance".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='score_distance_offset', full_name='object_detection.protos.CenterNet.KeypointEstimation.score_distance_offset', index=16, number=22, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='offset_peak_radius', full_name='object_detection.protos.CenterNet.KeypointEstimation.offset_peak_radius', index=17, number=17, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='per_keypoint_offset', full_name='object_detection.protos.CenterNet.KeypointEstimation.per_keypoint_offset', index=18, number=18, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='predict_depth', full_name='object_detection.protos.CenterNet.KeypointEstimation.predict_depth', index=19, number=19, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='per_keypoint_depth', full_name='object_detection.protos.CenterNet.KeypointEstimation.per_keypoint_depth', index=20, number=20, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_depth_loss_weight', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_depth_loss_weight', index=21, number=21, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='clip_out_of_frame_keypoints', full_name='object_detection.protos.CenterNet.KeypointEstimation.clip_out_of_frame_keypoints', index=22, number=23, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='rescore_instances', full_name='object_detection.protos.CenterNet.KeypointEstimation.rescore_instances', index=23, number=24, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='heatmap_head_params', full_name='object_detection.protos.CenterNet.KeypointEstimation.heatmap_head_params', index=24, number=25, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='offset_head_params', full_name='object_detection.protos.CenterNet.KeypointEstimation.offset_head_params', index=25, number=26, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='regress_head_params', full_name='object_detection.protos.CenterNet.KeypointEstimation.regress_head_params', index=26, number=27, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2119, serialized_end=3404, ) _CENTERNET_MASKESTIMATION = _descriptor.Descriptor( name='MaskEstimation', full_name='object_detection.protos.CenterNet.MaskEstimation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='task_loss_weight', full_name='object_detection.protos.CenterNet.MaskEstimation.task_loss_weight', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='classification_loss', full_name='object_detection.protos.CenterNet.MaskEstimation.classification_loss', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_height', full_name='object_detection.protos.CenterNet.MaskEstimation.mask_height', index=2, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=256, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_width', full_name='object_detection.protos.CenterNet.MaskEstimation.mask_width', index=3, number=5, type=5, cpp_type=1, label=1, has_default_value=True, default_value=256, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='score_threshold', full_name='object_detection.protos.CenterNet.MaskEstimation.score_threshold', index=4, number=6, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='heatmap_bias_init', full_name='object_detection.protos.CenterNet.MaskEstimation.heatmap_bias_init', index=5, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(-2.19), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_head_params', full_name='object_detection.protos.CenterNet.MaskEstimation.mask_head_params', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3407, serialized_end=3724, ) _CENTERNET_DENSEPOSEESTIMATION = _descriptor.Descriptor( name='DensePoseEstimation', full_name='object_detection.protos.CenterNet.DensePoseEstimation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='task_loss_weight', full_name='object_detection.protos.CenterNet.DensePoseEstimation.task_loss_weight', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='class_id', full_name='object_detection.protos.CenterNet.DensePoseEstimation.class_id', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='loss', full_name='object_detection.protos.CenterNet.DensePoseEstimation.loss', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_parts', full_name='object_detection.protos.CenterNet.DensePoseEstimation.num_parts', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=24, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='part_loss_weight', full_name='object_detection.protos.CenterNet.DensePoseEstimation.part_loss_weight', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='coordinate_loss_weight', full_name='object_detection.protos.CenterNet.DensePoseEstimation.coordinate_loss_weight', index=5, number=6, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='upsample_to_input_res', full_name='object_detection.protos.CenterNet.DensePoseEstimation.upsample_to_input_res', index=6, number=7, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='heatmap_bias_init', full_name='object_detection.protos.CenterNet.DensePoseEstimation.heatmap_bias_init', index=7, number=8, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(-2.19), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3727, serialized_end=3998, ) _CENTERNET_TRACKESTIMATION = _descriptor.Descriptor( name='TrackEstimation', full_name='object_detection.protos.CenterNet.TrackEstimation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='task_loss_weight', full_name='object_detection.protos.CenterNet.TrackEstimation.task_loss_weight', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_track_ids', full_name='object_detection.protos.CenterNet.TrackEstimation.num_track_ids', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='reid_embed_size', full_name='object_detection.protos.CenterNet.TrackEstimation.reid_embed_size', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=128, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_fc_layers', full_name='object_detection.protos.CenterNet.TrackEstimation.num_fc_layers', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='classification_loss', full_name='object_detection.protos.CenterNet.TrackEstimation.classification_loss', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=4001, serialized_end=4200, ) _CENTERNET_TEMPORALOFFSETESTIMATION = _descriptor.Descriptor( name='TemporalOffsetEstimation', full_name='object_detection.protos.CenterNet.TemporalOffsetEstimation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='task_loss_weight', full_name='object_detection.protos.CenterNet.TemporalOffsetEstimation.task_loss_weight', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='localization_loss', full_name='object_detection.protos.CenterNet.TemporalOffsetEstimation.localization_loss', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=4202, serialized_end=4327, ) _CENTERNET_DEEPMACMASKESTIMATION = _descriptor.Descriptor( name='DeepMACMaskEstimation', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='classification_loss', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.classification_loss', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='task_loss_weight', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.task_loss_weight', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='dim', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.dim', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=256, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pixel_embedding_dim', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.pixel_embedding_dim', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=16, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='allowed_masked_classes_ids', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.allowed_masked_classes_ids', index=4, number=5, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_size', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.mask_size', index=5, number=6, type=5, cpp_type=1, label=1, has_default_value=True, default_value=32, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_num_subsamples', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.mask_num_subsamples', index=6, number=67, type=5, cpp_type=1, label=1, has_default_value=True, default_value=-1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_xy', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.use_xy', index=7, number=8, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='network_type', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.network_type', index=8, number=9, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"hourglass52".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_instance_embedding', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.use_instance_embedding', index=9, number=10, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_init_channels', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.num_init_channels', index=10, number=11, type=5, cpp_type=1, label=1, has_default_value=True, default_value=64, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='predict_full_resolution_masks', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.predict_full_resolution_masks', index=11, number=12, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='postprocess_crop_size', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.postprocess_crop_size', index=12, number=13, type=5, cpp_type=1, label=1, has_default_value=True, default_value=256, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_roi_jitter_ratio', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.max_roi_jitter_ratio', index=13, number=14, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='jitter_mode', full_name='object_detection.protos.CenterNet.DeepMACMaskEstimation.jitter_mode', index=14, number=15, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=4330, serialized_end=4925, ) _CENTERNET = _descriptor.Descriptor( name='CenterNet', full_name='object_detection.protos.CenterNet', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='num_classes', full_name='object_detection.protos.CenterNet.num_classes', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='feature_extractor', full_name='object_detection.protos.CenterNet.feature_extractor', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='image_resizer', full_name='object_detection.protos.CenterNet.image_resizer', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_depthwise', full_name='object_detection.protos.CenterNet.use_depthwise', index=3, number=13, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='compute_heatmap_sparse', full_name='object_detection.protos.CenterNet.compute_heatmap_sparse', index=4, number=15, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='object_detection_task', full_name='object_detection.protos.CenterNet.object_detection_task', index=5, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='object_center_params', full_name='object_detection.protos.CenterNet.object_center_params', index=6, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_label_map_path', full_name='object_detection.protos.CenterNet.keypoint_label_map_path', index=7, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_estimation_task', full_name='object_detection.protos.CenterNet.keypoint_estimation_task', index=8, number=7, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mask_estimation_task', full_name='object_detection.protos.CenterNet.mask_estimation_task', index=9, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='densepose_estimation_task', full_name='object_detection.protos.CenterNet.densepose_estimation_task', index=10, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='track_estimation_task', full_name='object_detection.protos.CenterNet.track_estimation_task', index=11, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='temporal_offset_task', full_name='object_detection.protos.CenterNet.temporal_offset_task', index=12, number=12, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='deepmac_mask_estimation', full_name='object_detection.protos.CenterNet.deepmac_mask_estimation', index=13, number=14, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='post_processing', full_name='object_detection.protos.CenterNet.post_processing', index=14, number=24, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_CENTERNET_PREDICTIONHEADPARAMS, _CENTERNET_OBJECTDETECTION, _CENTERNET_OBJECTCENTERPARAMS, _CENTERNET_KEYPOINTESTIMATION, _CENTERNET_MASKESTIMATION, _CENTERNET_DENSEPOSEESTIMATION, _CENTERNET_TRACKESTIMATION, _CENTERNET_TEMPORALOFFSETESTIMATION, _CENTERNET_DEEPMACMASKESTIMATION, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=244, serialized_end=4925, ) _CENTERNETFEATUREEXTRACTOR = _descriptor.Descriptor( name='CenterNetFeatureExtractor', full_name='object_detection.protos.CenterNetFeatureExtractor', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='type', full_name='object_detection.protos.CenterNetFeatureExtractor.type', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='channel_means', full_name='object_detection.protos.CenterNetFeatureExtractor.channel_means', index=1, number=2, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='channel_stds', full_name='object_detection.protos.CenterNetFeatureExtractor.channel_stds', index=2, number=3, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bgr_ordering', full_name='object_detection.protos.CenterNetFeatureExtractor.bgr_ordering', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_depthwise', full_name='object_detection.protos.CenterNetFeatureExtractor.use_depthwise', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='depth_multiplier', full_name='object_detection.protos.CenterNetFeatureExtractor.depth_multiplier', index=5, number=9, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_separable_conv', full_name='object_detection.protos.CenterNetFeatureExtractor.use_separable_conv', index=6, number=10, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='upsampling_interpolation', full_name='object_detection.protos.CenterNetFeatureExtractor.upsampling_interpolation', index=7, number=11, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"nearest".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=4928, serialized_end=5180, ) _CENTERNET_PREDICTIONHEADPARAMS.containing_type = _CENTERNET _CENTERNET_OBJECTDETECTION.fields_by_name['localization_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._LOCALIZATIONLOSS _CENTERNET_OBJECTDETECTION.fields_by_name['scale_head_params'].message_type = _CENTERNET_PREDICTIONHEADPARAMS _CENTERNET_OBJECTDETECTION.fields_by_name['offset_head_params'].message_type = _CENTERNET_PREDICTIONHEADPARAMS _CENTERNET_OBJECTDETECTION.containing_type = _CENTERNET _CENTERNET_OBJECTCENTERPARAMS.fields_by_name['classification_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._CLASSIFICATIONLOSS _CENTERNET_OBJECTCENTERPARAMS.fields_by_name['center_head_params'].message_type = _CENTERNET_PREDICTIONHEADPARAMS _CENTERNET_OBJECTCENTERPARAMS.containing_type = _CENTERNET _CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY.containing_type = _CENTERNET_KEYPOINTESTIMATION _CENTERNET_KEYPOINTESTIMATION.fields_by_name['loss'].message_type = object__detection_dot_protos_dot_losses__pb2._LOSS _CENTERNET_KEYPOINTESTIMATION.fields_by_name['keypoint_label_to_std'].message_type = _CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY _CENTERNET_KEYPOINTESTIMATION.fields_by_name['heatmap_head_params'].message_type = _CENTERNET_PREDICTIONHEADPARAMS _CENTERNET_KEYPOINTESTIMATION.fields_by_name['offset_head_params'].message_type = _CENTERNET_PREDICTIONHEADPARAMS _CENTERNET_KEYPOINTESTIMATION.fields_by_name['regress_head_params'].message_type = _CENTERNET_PREDICTIONHEADPARAMS _CENTERNET_KEYPOINTESTIMATION.containing_type = _CENTERNET _CENTERNET_MASKESTIMATION.fields_by_name['classification_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._CLASSIFICATIONLOSS _CENTERNET_MASKESTIMATION.fields_by_name['mask_head_params'].message_type = _CENTERNET_PREDICTIONHEADPARAMS _CENTERNET_MASKESTIMATION.containing_type = _CENTERNET _CENTERNET_DENSEPOSEESTIMATION.fields_by_name['loss'].message_type = object__detection_dot_protos_dot_losses__pb2._LOSS _CENTERNET_DENSEPOSEESTIMATION.containing_type = _CENTERNET _CENTERNET_TRACKESTIMATION.fields_by_name['classification_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._CLASSIFICATIONLOSS _CENTERNET_TRACKESTIMATION.containing_type = _CENTERNET _CENTERNET_TEMPORALOFFSETESTIMATION.fields_by_name['localization_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._LOCALIZATIONLOSS _CENTERNET_TEMPORALOFFSETESTIMATION.containing_type = _CENTERNET _CENTERNET_DEEPMACMASKESTIMATION.fields_by_name['classification_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._CLASSIFICATIONLOSS _CENTERNET_DEEPMACMASKESTIMATION.fields_by_name['jitter_mode'].enum_type = object__detection_dot_protos_dot_preprocessor__pb2._RANDOMJITTERBOXES_JITTERMODE _CENTERNET_DEEPMACMASKESTIMATION.containing_type = _CENTERNET _CENTERNET.fields_by_name['feature_extractor'].message_type = _CENTERNETFEATUREEXTRACTOR _CENTERNET.fields_by_name['image_resizer'].message_type = object__detection_dot_protos_dot_image__resizer__pb2._IMAGERESIZER _CENTERNET.fields_by_name['object_detection_task'].message_type = _CENTERNET_OBJECTDETECTION _CENTERNET.fields_by_name['object_center_params'].message_type = _CENTERNET_OBJECTCENTERPARAMS _CENTERNET.fields_by_name['keypoint_estimation_task'].message_type = _CENTERNET_KEYPOINTESTIMATION _CENTERNET.fields_by_name['mask_estimation_task'].message_type = _CENTERNET_MASKESTIMATION _CENTERNET.fields_by_name['densepose_estimation_task'].message_type = _CENTERNET_DENSEPOSEESTIMATION _CENTERNET.fields_by_name['track_estimation_task'].message_type = _CENTERNET_TRACKESTIMATION _CENTERNET.fields_by_name['temporal_offset_task'].message_type = _CENTERNET_TEMPORALOFFSETESTIMATION _CENTERNET.fields_by_name['deepmac_mask_estimation'].message_type = _CENTERNET_DEEPMACMASKESTIMATION _CENTERNET.fields_by_name['post_processing'].message_type = object__detection_dot_protos_dot_post__processing__pb2._POSTPROCESSING DESCRIPTOR.message_types_by_name['CenterNet'] = _CENTERNET DESCRIPTOR.message_types_by_name['CenterNetFeatureExtractor'] = _CENTERNETFEATUREEXTRACTOR _sym_db.RegisterFileDescriptor(DESCRIPTOR) CenterNet = _reflection.GeneratedProtocolMessageType('CenterNet', (_message.Message,), { 'PredictionHeadParams' : _reflection.GeneratedProtocolMessageType('PredictionHeadParams', (_message.Message,), { 'DESCRIPTOR' : _CENTERNET_PREDICTIONHEADPARAMS, '__module__' : 'object_detection.protos.center_net_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.PredictionHeadParams) }) , 'ObjectDetection' : _reflection.GeneratedProtocolMessageType('ObjectDetection', (_message.Message,), { 'DESCRIPTOR' : _CENTERNET_OBJECTDETECTION, '__module__' : 'object_detection.protos.center_net_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.ObjectDetection) }) , 'ObjectCenterParams' : _reflection.GeneratedProtocolMessageType('ObjectCenterParams', (_message.Message,), { 'DESCRIPTOR' : _CENTERNET_OBJECTCENTERPARAMS, '__module__' : 'object_detection.protos.center_net_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.ObjectCenterParams) }) , 'KeypointEstimation' : _reflection.GeneratedProtocolMessageType('KeypointEstimation', (_message.Message,), { 'KeypointLabelToStdEntry' : _reflection.GeneratedProtocolMessageType('KeypointLabelToStdEntry', (_message.Message,), { 'DESCRIPTOR' : _CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY, '__module__' : 'object_detection.protos.center_net_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.KeypointEstimation.KeypointLabelToStdEntry) }) , 'DESCRIPTOR' : _CENTERNET_KEYPOINTESTIMATION, '__module__' : 'object_detection.protos.center_net_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.KeypointEstimation) }) , 'MaskEstimation' : _reflection.GeneratedProtocolMessageType('MaskEstimation', (_message.Message,), { 'DESCRIPTOR' : _CENTERNET_MASKESTIMATION, '__module__' : 'object_detection.protos.center_net_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.MaskEstimation) }) , 'DensePoseEstimation' : _reflection.GeneratedProtocolMessageType('DensePoseEstimation', (_message.Message,), { 'DESCRIPTOR' : _CENTERNET_DENSEPOSEESTIMATION, '__module__' : 'object_detection.protos.center_net_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.DensePoseEstimation) }) , 'TrackEstimation' : _reflection.GeneratedProtocolMessageType('TrackEstimation', (_message.Message,), { 'DESCRIPTOR' : _CENTERNET_TRACKESTIMATION, '__module__' : 'object_detection.protos.center_net_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.TrackEstimation) }) , 'TemporalOffsetEstimation' : _reflection.GeneratedProtocolMessageType('TemporalOffsetEstimation', (_message.Message,), { 'DESCRIPTOR' : _CENTERNET_TEMPORALOFFSETESTIMATION, '__module__' : 'object_detection.protos.center_net_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.TemporalOffsetEstimation) }) , 'DeepMACMaskEstimation' : _reflection.GeneratedProtocolMessageType('DeepMACMaskEstimation', (_message.Message,), { 'DESCRIPTOR' : _CENTERNET_DEEPMACMASKESTIMATION, '__module__' : 'object_detection.protos.center_net_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.DeepMACMaskEstimation) }) , 'DESCRIPTOR' : _CENTERNET, '__module__' : 'object_detection.protos.center_net_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet) }) _sym_db.RegisterMessage(CenterNet) _sym_db.RegisterMessage(CenterNet.PredictionHeadParams) _sym_db.RegisterMessage(CenterNet.ObjectDetection) _sym_db.RegisterMessage(CenterNet.ObjectCenterParams) _sym_db.RegisterMessage(CenterNet.KeypointEstimation) _sym_db.RegisterMessage(CenterNet.KeypointEstimation.KeypointLabelToStdEntry) _sym_db.RegisterMessage(CenterNet.MaskEstimation) _sym_db.RegisterMessage(CenterNet.DensePoseEstimation) _sym_db.RegisterMessage(CenterNet.TrackEstimation) _sym_db.RegisterMessage(CenterNet.TemporalOffsetEstimation) _sym_db.RegisterMessage(CenterNet.DeepMACMaskEstimation) CenterNetFeatureExtractor = _reflection.GeneratedProtocolMessageType('CenterNetFeatureExtractor', (_message.Message,), { 'DESCRIPTOR' : _CENTERNETFEATUREEXTRACTOR, '__module__' : 'object_detection.protos.center_net_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNetFeatureExtractor) }) _sym_db.RegisterMessage(CenterNetFeatureExtractor) _CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY._options = None # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/center_net_pb2.py
center_net_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/anchor_generator.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from object_detection.protos import flexible_grid_anchor_generator_pb2 as object__detection_dot_protos_dot_flexible__grid__anchor__generator__pb2 from object_detection.protos import grid_anchor_generator_pb2 as object__detection_dot_protos_dot_grid__anchor__generator__pb2 from object_detection.protos import multiscale_anchor_generator_pb2 as object__detection_dot_protos_dot_multiscale__anchor__generator__pb2 from object_detection.protos import ssd_anchor_generator_pb2 as object__detection_dot_protos_dot_ssd__anchor__generator__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/anchor_generator.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n.object_detection/protos/anchor_generator.proto\x12\x17object_detection.protos\x1a<object_detection/protos/flexible_grid_anchor_generator.proto\x1a\x33object_detection/protos/grid_anchor_generator.proto\x1a\x39object_detection/protos/multiscale_anchor_generator.proto\x1a\x32object_detection/protos/ssd_anchor_generator.proto\"\x82\x03\n\x0f\x41nchorGenerator\x12M\n\x15grid_anchor_generator\x18\x01 \x01(\x0b\x32,.object_detection.protos.GridAnchorGeneratorH\x00\x12K\n\x14ssd_anchor_generator\x18\x02 \x01(\x0b\x32+.object_detection.protos.SsdAnchorGeneratorH\x00\x12Y\n\x1bmultiscale_anchor_generator\x18\x03 \x01(\x0b\x32\x32.object_detection.protos.MultiscaleAnchorGeneratorH\x00\x12^\n\x1e\x66lexible_grid_anchor_generator\x18\x04 \x01(\x0b\x32\x34.object_detection.protos.FlexibleGridAnchorGeneratorH\x00\x42\x18\n\x16\x61nchor_generator_oneof' , dependencies=[object__detection_dot_protos_dot_flexible__grid__anchor__generator__pb2.DESCRIPTOR,object__detection_dot_protos_dot_grid__anchor__generator__pb2.DESCRIPTOR,object__detection_dot_protos_dot_multiscale__anchor__generator__pb2.DESCRIPTOR,object__detection_dot_protos_dot_ssd__anchor__generator__pb2.DESCRIPTOR,]) _ANCHORGENERATOR = _descriptor.Descriptor( name='AnchorGenerator', full_name='object_detection.protos.AnchorGenerator', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='grid_anchor_generator', full_name='object_detection.protos.AnchorGenerator.grid_anchor_generator', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ssd_anchor_generator', full_name='object_detection.protos.AnchorGenerator.ssd_anchor_generator', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='multiscale_anchor_generator', full_name='object_detection.protos.AnchorGenerator.multiscale_anchor_generator', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='flexible_grid_anchor_generator', full_name='object_detection.protos.AnchorGenerator.flexible_grid_anchor_generator', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='anchor_generator_oneof', full_name='object_detection.protos.AnchorGenerator.anchor_generator_oneof', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=302, serialized_end=688, ) _ANCHORGENERATOR.fields_by_name['grid_anchor_generator'].message_type = object__detection_dot_protos_dot_grid__anchor__generator__pb2._GRIDANCHORGENERATOR _ANCHORGENERATOR.fields_by_name['ssd_anchor_generator'].message_type = object__detection_dot_protos_dot_ssd__anchor__generator__pb2._SSDANCHORGENERATOR _ANCHORGENERATOR.fields_by_name['multiscale_anchor_generator'].message_type = object__detection_dot_protos_dot_multiscale__anchor__generator__pb2._MULTISCALEANCHORGENERATOR _ANCHORGENERATOR.fields_by_name['flexible_grid_anchor_generator'].message_type = object__detection_dot_protos_dot_flexible__grid__anchor__generator__pb2._FLEXIBLEGRIDANCHORGENERATOR _ANCHORGENERATOR.oneofs_by_name['anchor_generator_oneof'].fields.append( _ANCHORGENERATOR.fields_by_name['grid_anchor_generator']) _ANCHORGENERATOR.fields_by_name['grid_anchor_generator'].containing_oneof = _ANCHORGENERATOR.oneofs_by_name['anchor_generator_oneof'] _ANCHORGENERATOR.oneofs_by_name['anchor_generator_oneof'].fields.append( _ANCHORGENERATOR.fields_by_name['ssd_anchor_generator']) _ANCHORGENERATOR.fields_by_name['ssd_anchor_generator'].containing_oneof = _ANCHORGENERATOR.oneofs_by_name['anchor_generator_oneof'] _ANCHORGENERATOR.oneofs_by_name['anchor_generator_oneof'].fields.append( _ANCHORGENERATOR.fields_by_name['multiscale_anchor_generator']) _ANCHORGENERATOR.fields_by_name['multiscale_anchor_generator'].containing_oneof = _ANCHORGENERATOR.oneofs_by_name['anchor_generator_oneof'] _ANCHORGENERATOR.oneofs_by_name['anchor_generator_oneof'].fields.append( _ANCHORGENERATOR.fields_by_name['flexible_grid_anchor_generator']) _ANCHORGENERATOR.fields_by_name['flexible_grid_anchor_generator'].containing_oneof = _ANCHORGENERATOR.oneofs_by_name['anchor_generator_oneof'] DESCRIPTOR.message_types_by_name['AnchorGenerator'] = _ANCHORGENERATOR _sym_db.RegisterFileDescriptor(DESCRIPTOR) AnchorGenerator = _reflection.GeneratedProtocolMessageType('AnchorGenerator', (_message.Message,), { 'DESCRIPTOR' : _ANCHORGENERATOR, '__module__' : 'object_detection.protos.anchor_generator_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.AnchorGenerator) }) _sym_db.RegisterMessage(AnchorGenerator) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/anchor_generator_pb2.py
anchor_generator_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/grid_anchor_generator.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/grid_anchor_generator.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n3object_detection/protos/grid_anchor_generator.proto\x12\x17object_detection.protos\"\xcd\x01\n\x13GridAnchorGenerator\x12\x13\n\x06height\x18\x01 \x01(\x05:\x03\x32\x35\x36\x12\x12\n\x05width\x18\x02 \x01(\x05:\x03\x32\x35\x36\x12\x19\n\rheight_stride\x18\x03 \x01(\x05:\x02\x31\x36\x12\x18\n\x0cwidth_stride\x18\x04 \x01(\x05:\x02\x31\x36\x12\x18\n\rheight_offset\x18\x05 \x01(\x05:\x01\x30\x12\x17\n\x0cwidth_offset\x18\x06 \x01(\x05:\x01\x30\x12\x0e\n\x06scales\x18\x07 \x03(\x02\x12\x15\n\raspect_ratios\x18\x08 \x03(\x02' ) _GRIDANCHORGENERATOR = _descriptor.Descriptor( name='GridAnchorGenerator', full_name='object_detection.protos.GridAnchorGenerator', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='height', full_name='object_detection.protos.GridAnchorGenerator.height', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=256, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width', full_name='object_detection.protos.GridAnchorGenerator.width', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=256, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='height_stride', full_name='object_detection.protos.GridAnchorGenerator.height_stride', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=16, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width_stride', full_name='object_detection.protos.GridAnchorGenerator.width_stride', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=16, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='height_offset', full_name='object_detection.protos.GridAnchorGenerator.height_offset', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width_offset', full_name='object_detection.protos.GridAnchorGenerator.width_offset', index=5, number=6, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scales', full_name='object_detection.protos.GridAnchorGenerator.scales', index=6, number=7, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='aspect_ratios', full_name='object_detection.protos.GridAnchorGenerator.aspect_ratios', index=7, number=8, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=81, serialized_end=286, ) DESCRIPTOR.message_types_by_name['GridAnchorGenerator'] = _GRIDANCHORGENERATOR _sym_db.RegisterFileDescriptor(DESCRIPTOR) GridAnchorGenerator = _reflection.GeneratedProtocolMessageType('GridAnchorGenerator', (_message.Message,), { 'DESCRIPTOR' : _GRIDANCHORGENERATOR, '__module__' : 'object_detection.protos.grid_anchor_generator_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.GridAnchorGenerator) }) _sym_db.RegisterMessage(GridAnchorGenerator) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/grid_anchor_generator_pb2.py
grid_anchor_generator_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/flexible_grid_anchor_generator.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/flexible_grid_anchor_generator.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n<object_detection/protos/flexible_grid_anchor_generator.proto\x12\x17object_detection.protos\"|\n\x1b\x46lexibleGridAnchorGenerator\x12\x38\n\x0b\x61nchor_grid\x18\x01 \x03(\x0b\x32#.object_detection.protos.AnchorGrid\x12#\n\x15normalize_coordinates\x18\x02 \x01(\x08:\x04true\"\x97\x01\n\nAnchorGrid\x12\x12\n\nbase_sizes\x18\x01 \x03(\x02\x12\x15\n\raspect_ratios\x18\x02 \x03(\x02\x12\x15\n\rheight_stride\x18\x03 \x01(\r\x12\x14\n\x0cwidth_stride\x18\x04 \x01(\r\x12\x18\n\rheight_offset\x18\x05 \x01(\r:\x01\x30\x12\x17\n\x0cwidth_offset\x18\x06 \x01(\r:\x01\x30' ) _FLEXIBLEGRIDANCHORGENERATOR = _descriptor.Descriptor( name='FlexibleGridAnchorGenerator', full_name='object_detection.protos.FlexibleGridAnchorGenerator', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='anchor_grid', full_name='object_detection.protos.FlexibleGridAnchorGenerator.anchor_grid', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='normalize_coordinates', full_name='object_detection.protos.FlexibleGridAnchorGenerator.normalize_coordinates', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=89, serialized_end=213, ) _ANCHORGRID = _descriptor.Descriptor( name='AnchorGrid', full_name='object_detection.protos.AnchorGrid', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='base_sizes', full_name='object_detection.protos.AnchorGrid.base_sizes', index=0, number=1, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='aspect_ratios', full_name='object_detection.protos.AnchorGrid.aspect_ratios', index=1, number=2, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='height_stride', full_name='object_detection.protos.AnchorGrid.height_stride', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width_stride', full_name='object_detection.protos.AnchorGrid.width_stride', index=3, number=4, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='height_offset', full_name='object_detection.protos.AnchorGrid.height_offset', index=4, number=5, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width_offset', full_name='object_detection.protos.AnchorGrid.width_offset', index=5, number=6, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=216, serialized_end=367, ) _FLEXIBLEGRIDANCHORGENERATOR.fields_by_name['anchor_grid'].message_type = _ANCHORGRID DESCRIPTOR.message_types_by_name['FlexibleGridAnchorGenerator'] = _FLEXIBLEGRIDANCHORGENERATOR DESCRIPTOR.message_types_by_name['AnchorGrid'] = _ANCHORGRID _sym_db.RegisterFileDescriptor(DESCRIPTOR) FlexibleGridAnchorGenerator = _reflection.GeneratedProtocolMessageType('FlexibleGridAnchorGenerator', (_message.Message,), { 'DESCRIPTOR' : _FLEXIBLEGRIDANCHORGENERATOR, '__module__' : 'object_detection.protos.flexible_grid_anchor_generator_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.FlexibleGridAnchorGenerator) }) _sym_db.RegisterMessage(FlexibleGridAnchorGenerator) AnchorGrid = _reflection.GeneratedProtocolMessageType('AnchorGrid', (_message.Message,), { 'DESCRIPTOR' : _ANCHORGRID, '__module__' : 'object_detection.protos.flexible_grid_anchor_generator_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.AnchorGrid) }) _sym_db.RegisterMessage(AnchorGrid) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/flexible_grid_anchor_generator_pb2.py
flexible_grid_anchor_generator_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/box_coder.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from object_detection.protos import faster_rcnn_box_coder_pb2 as object__detection_dot_protos_dot_faster__rcnn__box__coder__pb2 from object_detection.protos import keypoint_box_coder_pb2 as object__detection_dot_protos_dot_keypoint__box__coder__pb2 from object_detection.protos import mean_stddev_box_coder_pb2 as object__detection_dot_protos_dot_mean__stddev__box__coder__pb2 from object_detection.protos import square_box_coder_pb2 as object__detection_dot_protos_dot_square__box__coder__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/box_coder.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n\'object_detection/protos/box_coder.proto\x12\x17object_detection.protos\x1a\x33object_detection/protos/faster_rcnn_box_coder.proto\x1a\x30object_detection/protos/keypoint_box_coder.proto\x1a\x33object_detection/protos/mean_stddev_box_coder.proto\x1a.object_detection/protos/square_box_coder.proto\"\xc7\x02\n\x08\x42oxCoder\x12L\n\x15\x66\x61ster_rcnn_box_coder\x18\x01 \x01(\x0b\x32+.object_detection.protos.FasterRcnnBoxCoderH\x00\x12L\n\x15mean_stddev_box_coder\x18\x02 \x01(\x0b\x32+.object_detection.protos.MeanStddevBoxCoderH\x00\x12\x43\n\x10square_box_coder\x18\x03 \x01(\x0b\x32\'.object_detection.protos.SquareBoxCoderH\x00\x12G\n\x12keypoint_box_coder\x18\x04 \x01(\x0b\x32).object_detection.protos.KeypointBoxCoderH\x00\x42\x11\n\x0f\x62ox_coder_oneof' , dependencies=[object__detection_dot_protos_dot_faster__rcnn__box__coder__pb2.DESCRIPTOR,object__detection_dot_protos_dot_keypoint__box__coder__pb2.DESCRIPTOR,object__detection_dot_protos_dot_mean__stddev__box__coder__pb2.DESCRIPTOR,object__detection_dot_protos_dot_square__box__coder__pb2.DESCRIPTOR,]) _BOXCODER = _descriptor.Descriptor( name='BoxCoder', full_name='object_detection.protos.BoxCoder', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='faster_rcnn_box_coder', full_name='object_detection.protos.BoxCoder.faster_rcnn_box_coder', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mean_stddev_box_coder', full_name='object_detection.protos.BoxCoder.mean_stddev_box_coder', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='square_box_coder', full_name='object_detection.protos.BoxCoder.square_box_coder', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_box_coder', full_name='object_detection.protos.BoxCoder.keypoint_box_coder', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='box_coder_oneof', full_name='object_detection.protos.BoxCoder.box_coder_oneof', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=273, serialized_end=600, ) _BOXCODER.fields_by_name['faster_rcnn_box_coder'].message_type = object__detection_dot_protos_dot_faster__rcnn__box__coder__pb2._FASTERRCNNBOXCODER _BOXCODER.fields_by_name['mean_stddev_box_coder'].message_type = object__detection_dot_protos_dot_mean__stddev__box__coder__pb2._MEANSTDDEVBOXCODER _BOXCODER.fields_by_name['square_box_coder'].message_type = object__detection_dot_protos_dot_square__box__coder__pb2._SQUAREBOXCODER _BOXCODER.fields_by_name['keypoint_box_coder'].message_type = object__detection_dot_protos_dot_keypoint__box__coder__pb2._KEYPOINTBOXCODER _BOXCODER.oneofs_by_name['box_coder_oneof'].fields.append( _BOXCODER.fields_by_name['faster_rcnn_box_coder']) _BOXCODER.fields_by_name['faster_rcnn_box_coder'].containing_oneof = _BOXCODER.oneofs_by_name['box_coder_oneof'] _BOXCODER.oneofs_by_name['box_coder_oneof'].fields.append( _BOXCODER.fields_by_name['mean_stddev_box_coder']) _BOXCODER.fields_by_name['mean_stddev_box_coder'].containing_oneof = _BOXCODER.oneofs_by_name['box_coder_oneof'] _BOXCODER.oneofs_by_name['box_coder_oneof'].fields.append( _BOXCODER.fields_by_name['square_box_coder']) _BOXCODER.fields_by_name['square_box_coder'].containing_oneof = _BOXCODER.oneofs_by_name['box_coder_oneof'] _BOXCODER.oneofs_by_name['box_coder_oneof'].fields.append( _BOXCODER.fields_by_name['keypoint_box_coder']) _BOXCODER.fields_by_name['keypoint_box_coder'].containing_oneof = _BOXCODER.oneofs_by_name['box_coder_oneof'] DESCRIPTOR.message_types_by_name['BoxCoder'] = _BOXCODER _sym_db.RegisterFileDescriptor(DESCRIPTOR) BoxCoder = _reflection.GeneratedProtocolMessageType('BoxCoder', (_message.Message,), { 'DESCRIPTOR' : _BOXCODER, '__module__' : 'object_detection.protos.box_coder_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.BoxCoder) }) _sym_db.RegisterMessage(BoxCoder) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/box_coder_pb2.py
box_coder_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/optimizer.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/optimizer.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n\'object_detection/protos/optimizer.proto\x12\x17object_detection.protos\"\xb5\x02\n\tOptimizer\x12G\n\x12rms_prop_optimizer\x18\x01 \x01(\x0b\x32).object_detection.protos.RMSPropOptimizerH\x00\x12H\n\x12momentum_optimizer\x18\x02 \x01(\x0b\x32*.object_detection.protos.MomentumOptimizerH\x00\x12@\n\x0e\x61\x64\x61m_optimizer\x18\x03 \x01(\x0b\x32&.object_detection.protos.AdamOptimizerH\x00\x12 \n\x12use_moving_average\x18\x04 \x01(\x08:\x04true\x12$\n\x14moving_average_decay\x18\x05 \x01(\x02:\x06\x30.9999B\x0b\n\toptimizer\"\x9f\x01\n\x10RMSPropOptimizer\x12<\n\rlearning_rate\x18\x01 \x01(\x0b\x32%.object_detection.protos.LearningRate\x12%\n\x18momentum_optimizer_value\x18\x02 \x01(\x02:\x03\x30.9\x12\x12\n\x05\x64\x65\x63\x61y\x18\x03 \x01(\x02:\x03\x30.9\x12\x12\n\x07\x65psilon\x18\x04 \x01(\x02:\x01\x31\"x\n\x11MomentumOptimizer\x12<\n\rlearning_rate\x18\x01 \x01(\x0b\x32%.object_detection.protos.LearningRate\x12%\n\x18momentum_optimizer_value\x18\x02 \x01(\x02:\x03\x30.9\"e\n\rAdamOptimizer\x12<\n\rlearning_rate\x18\x01 \x01(\x0b\x32%.object_detection.protos.LearningRate\x12\x16\n\x07\x65psilon\x18\x02 \x01(\x02:\x05\x31\x65-08\"\x80\x03\n\x0cLearningRate\x12O\n\x16\x63onstant_learning_rate\x18\x01 \x01(\x0b\x32-.object_detection.protos.ConstantLearningRateH\x00\x12`\n\x1f\x65xponential_decay_learning_rate\x18\x02 \x01(\x0b\x32\x35.object_detection.protos.ExponentialDecayLearningRateH\x00\x12T\n\x19manual_step_learning_rate\x18\x03 \x01(\x0b\x32/.object_detection.protos.ManualStepLearningRateH\x00\x12V\n\x1a\x63osine_decay_learning_rate\x18\x04 \x01(\x0b\x32\x30.object_detection.protos.CosineDecayLearningRateH\x00\x42\x0f\n\rlearning_rate\"4\n\x14\x43onstantLearningRate\x12\x1c\n\rlearning_rate\x18\x01 \x01(\x02:\x05\x30.002\"\xef\x01\n\x1c\x45xponentialDecayLearningRate\x12$\n\x15initial_learning_rate\x18\x01 \x01(\x02:\x05\x30.002\x12\x1c\n\x0b\x64\x65\x63\x61y_steps\x18\x02 \x01(\r:\x07\x34\x30\x30\x30\x30\x30\x30\x12\x1a\n\x0c\x64\x65\x63\x61y_factor\x18\x03 \x01(\x02:\x04\x30.95\x12\x17\n\tstaircase\x18\x04 \x01(\x08:\x04true\x12\x1f\n\x14\x62urnin_learning_rate\x18\x05 \x01(\x02:\x01\x30\x12\x17\n\x0c\x62urnin_steps\x18\x06 \x01(\r:\x01\x30\x12\x1c\n\x11min_learning_rate\x18\x07 \x01(\x02:\x01\x30\"\xf1\x01\n\x16ManualStepLearningRate\x12$\n\x15initial_learning_rate\x18\x01 \x01(\x02:\x05\x30.002\x12V\n\x08schedule\x18\x02 \x03(\x0b\x32\x44.object_detection.protos.ManualStepLearningRate.LearningRateSchedule\x12\x15\n\x06warmup\x18\x03 \x01(\x08:\x05\x66\x61lse\x1a\x42\n\x14LearningRateSchedule\x12\x0c\n\x04step\x18\x01 \x01(\r\x12\x1c\n\rlearning_rate\x18\x02 \x01(\x02:\x05\x30.002\"\xbe\x01\n\x17\x43osineDecayLearningRate\x12!\n\x12learning_rate_base\x18\x01 \x01(\x02:\x05\x30.002\x12\x1c\n\x0btotal_steps\x18\x02 \x01(\r:\x07\x34\x30\x30\x30\x30\x30\x30\x12$\n\x14warmup_learning_rate\x18\x03 \x01(\x02:\x06\x30.0002\x12\x1b\n\x0cwarmup_steps\x18\x04 \x01(\r:\x05\x31\x30\x30\x30\x30\x12\x1f\n\x14hold_base_rate_steps\x18\x05 \x01(\r:\x01\x30' ) _OPTIMIZER = _descriptor.Descriptor( name='Optimizer', full_name='object_detection.protos.Optimizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='rms_prop_optimizer', full_name='object_detection.protos.Optimizer.rms_prop_optimizer', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='momentum_optimizer', full_name='object_detection.protos.Optimizer.momentum_optimizer', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='adam_optimizer', full_name='object_detection.protos.Optimizer.adam_optimizer', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_moving_average', full_name='object_detection.protos.Optimizer.use_moving_average', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='moving_average_decay', full_name='object_detection.protos.Optimizer.moving_average_decay', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.9999), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='optimizer', full_name='object_detection.protos.Optimizer.optimizer', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=69, serialized_end=378, ) _RMSPROPOPTIMIZER = _descriptor.Descriptor( name='RMSPropOptimizer', full_name='object_detection.protos.RMSPropOptimizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='learning_rate', full_name='object_detection.protos.RMSPropOptimizer.learning_rate', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='momentum_optimizer_value', full_name='object_detection.protos.RMSPropOptimizer.momentum_optimizer_value', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.9), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='decay', full_name='object_detection.protos.RMSPropOptimizer.decay', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.9), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='epsilon', full_name='object_detection.protos.RMSPropOptimizer.epsilon', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=381, serialized_end=540, ) _MOMENTUMOPTIMIZER = _descriptor.Descriptor( name='MomentumOptimizer', full_name='object_detection.protos.MomentumOptimizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='learning_rate', full_name='object_detection.protos.MomentumOptimizer.learning_rate', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='momentum_optimizer_value', full_name='object_detection.protos.MomentumOptimizer.momentum_optimizer_value', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.9), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=542, serialized_end=662, ) _ADAMOPTIMIZER = _descriptor.Descriptor( name='AdamOptimizer', full_name='object_detection.protos.AdamOptimizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='learning_rate', full_name='object_detection.protos.AdamOptimizer.learning_rate', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='epsilon', full_name='object_detection.protos.AdamOptimizer.epsilon', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1e-08), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=664, serialized_end=765, ) _LEARNINGRATE = _descriptor.Descriptor( name='LearningRate', full_name='object_detection.protos.LearningRate', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='constant_learning_rate', full_name='object_detection.protos.LearningRate.constant_learning_rate', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='exponential_decay_learning_rate', full_name='object_detection.protos.LearningRate.exponential_decay_learning_rate', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='manual_step_learning_rate', full_name='object_detection.protos.LearningRate.manual_step_learning_rate', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='cosine_decay_learning_rate', full_name='object_detection.protos.LearningRate.cosine_decay_learning_rate', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='learning_rate', full_name='object_detection.protos.LearningRate.learning_rate', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=768, serialized_end=1152, ) _CONSTANTLEARNINGRATE = _descriptor.Descriptor( name='ConstantLearningRate', full_name='object_detection.protos.ConstantLearningRate', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='learning_rate', full_name='object_detection.protos.ConstantLearningRate.learning_rate', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.002), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1154, serialized_end=1206, ) _EXPONENTIALDECAYLEARNINGRATE = _descriptor.Descriptor( name='ExponentialDecayLearningRate', full_name='object_detection.protos.ExponentialDecayLearningRate', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='initial_learning_rate', full_name='object_detection.protos.ExponentialDecayLearningRate.initial_learning_rate', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.002), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='decay_steps', full_name='object_detection.protos.ExponentialDecayLearningRate.decay_steps', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=True, default_value=4000000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='decay_factor', full_name='object_detection.protos.ExponentialDecayLearningRate.decay_factor', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.95), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='staircase', full_name='object_detection.protos.ExponentialDecayLearningRate.staircase', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='burnin_learning_rate', full_name='object_detection.protos.ExponentialDecayLearningRate.burnin_learning_rate', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='burnin_steps', full_name='object_detection.protos.ExponentialDecayLearningRate.burnin_steps', index=5, number=6, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_learning_rate', full_name='object_detection.protos.ExponentialDecayLearningRate.min_learning_rate', index=6, number=7, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1209, serialized_end=1448, ) _MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE = _descriptor.Descriptor( name='LearningRateSchedule', full_name='object_detection.protos.ManualStepLearningRate.LearningRateSchedule', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='step', full_name='object_detection.protos.ManualStepLearningRate.LearningRateSchedule.step', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='learning_rate', full_name='object_detection.protos.ManualStepLearningRate.LearningRateSchedule.learning_rate', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.002), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1626, serialized_end=1692, ) _MANUALSTEPLEARNINGRATE = _descriptor.Descriptor( name='ManualStepLearningRate', full_name='object_detection.protos.ManualStepLearningRate', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='initial_learning_rate', full_name='object_detection.protos.ManualStepLearningRate.initial_learning_rate', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.002), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='schedule', full_name='object_detection.protos.ManualStepLearningRate.schedule', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='warmup', full_name='object_detection.protos.ManualStepLearningRate.warmup', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1451, serialized_end=1692, ) _COSINEDECAYLEARNINGRATE = _descriptor.Descriptor( name='CosineDecayLearningRate', full_name='object_detection.protos.CosineDecayLearningRate', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='learning_rate_base', full_name='object_detection.protos.CosineDecayLearningRate.learning_rate_base', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.002), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='total_steps', full_name='object_detection.protos.CosineDecayLearningRate.total_steps', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=True, default_value=4000000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='warmup_learning_rate', full_name='object_detection.protos.CosineDecayLearningRate.warmup_learning_rate', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.0002), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='warmup_steps', full_name='object_detection.protos.CosineDecayLearningRate.warmup_steps', index=3, number=4, type=13, cpp_type=3, label=1, has_default_value=True, default_value=10000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='hold_base_rate_steps', full_name='object_detection.protos.CosineDecayLearningRate.hold_base_rate_steps', index=4, number=5, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1695, serialized_end=1885, ) _OPTIMIZER.fields_by_name['rms_prop_optimizer'].message_type = _RMSPROPOPTIMIZER _OPTIMIZER.fields_by_name['momentum_optimizer'].message_type = _MOMENTUMOPTIMIZER _OPTIMIZER.fields_by_name['adam_optimizer'].message_type = _ADAMOPTIMIZER _OPTIMIZER.oneofs_by_name['optimizer'].fields.append( _OPTIMIZER.fields_by_name['rms_prop_optimizer']) _OPTIMIZER.fields_by_name['rms_prop_optimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['optimizer'] _OPTIMIZER.oneofs_by_name['optimizer'].fields.append( _OPTIMIZER.fields_by_name['momentum_optimizer']) _OPTIMIZER.fields_by_name['momentum_optimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['optimizer'] _OPTIMIZER.oneofs_by_name['optimizer'].fields.append( _OPTIMIZER.fields_by_name['adam_optimizer']) _OPTIMIZER.fields_by_name['adam_optimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['optimizer'] _RMSPROPOPTIMIZER.fields_by_name['learning_rate'].message_type = _LEARNINGRATE _MOMENTUMOPTIMIZER.fields_by_name['learning_rate'].message_type = _LEARNINGRATE _ADAMOPTIMIZER.fields_by_name['learning_rate'].message_type = _LEARNINGRATE _LEARNINGRATE.fields_by_name['constant_learning_rate'].message_type = _CONSTANTLEARNINGRATE _LEARNINGRATE.fields_by_name['exponential_decay_learning_rate'].message_type = _EXPONENTIALDECAYLEARNINGRATE _LEARNINGRATE.fields_by_name['manual_step_learning_rate'].message_type = _MANUALSTEPLEARNINGRATE _LEARNINGRATE.fields_by_name['cosine_decay_learning_rate'].message_type = _COSINEDECAYLEARNINGRATE _LEARNINGRATE.oneofs_by_name['learning_rate'].fields.append( _LEARNINGRATE.fields_by_name['constant_learning_rate']) _LEARNINGRATE.fields_by_name['constant_learning_rate'].containing_oneof = _LEARNINGRATE.oneofs_by_name['learning_rate'] _LEARNINGRATE.oneofs_by_name['learning_rate'].fields.append( _LEARNINGRATE.fields_by_name['exponential_decay_learning_rate']) _LEARNINGRATE.fields_by_name['exponential_decay_learning_rate'].containing_oneof = _LEARNINGRATE.oneofs_by_name['learning_rate'] _LEARNINGRATE.oneofs_by_name['learning_rate'].fields.append( _LEARNINGRATE.fields_by_name['manual_step_learning_rate']) _LEARNINGRATE.fields_by_name['manual_step_learning_rate'].containing_oneof = _LEARNINGRATE.oneofs_by_name['learning_rate'] _LEARNINGRATE.oneofs_by_name['learning_rate'].fields.append( _LEARNINGRATE.fields_by_name['cosine_decay_learning_rate']) _LEARNINGRATE.fields_by_name['cosine_decay_learning_rate'].containing_oneof = _LEARNINGRATE.oneofs_by_name['learning_rate'] _MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE.containing_type = _MANUALSTEPLEARNINGRATE _MANUALSTEPLEARNINGRATE.fields_by_name['schedule'].message_type = _MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE DESCRIPTOR.message_types_by_name['Optimizer'] = _OPTIMIZER DESCRIPTOR.message_types_by_name['RMSPropOptimizer'] = _RMSPROPOPTIMIZER DESCRIPTOR.message_types_by_name['MomentumOptimizer'] = _MOMENTUMOPTIMIZER DESCRIPTOR.message_types_by_name['AdamOptimizer'] = _ADAMOPTIMIZER DESCRIPTOR.message_types_by_name['LearningRate'] = _LEARNINGRATE DESCRIPTOR.message_types_by_name['ConstantLearningRate'] = _CONSTANTLEARNINGRATE DESCRIPTOR.message_types_by_name['ExponentialDecayLearningRate'] = _EXPONENTIALDECAYLEARNINGRATE DESCRIPTOR.message_types_by_name['ManualStepLearningRate'] = _MANUALSTEPLEARNINGRATE DESCRIPTOR.message_types_by_name['CosineDecayLearningRate'] = _COSINEDECAYLEARNINGRATE _sym_db.RegisterFileDescriptor(DESCRIPTOR) Optimizer = _reflection.GeneratedProtocolMessageType('Optimizer', (_message.Message,), { 'DESCRIPTOR' : _OPTIMIZER, '__module__' : 'object_detection.protos.optimizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.Optimizer) }) _sym_db.RegisterMessage(Optimizer) RMSPropOptimizer = _reflection.GeneratedProtocolMessageType('RMSPropOptimizer', (_message.Message,), { 'DESCRIPTOR' : _RMSPROPOPTIMIZER, '__module__' : 'object_detection.protos.optimizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RMSPropOptimizer) }) _sym_db.RegisterMessage(RMSPropOptimizer) MomentumOptimizer = _reflection.GeneratedProtocolMessageType('MomentumOptimizer', (_message.Message,), { 'DESCRIPTOR' : _MOMENTUMOPTIMIZER, '__module__' : 'object_detection.protos.optimizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.MomentumOptimizer) }) _sym_db.RegisterMessage(MomentumOptimizer) AdamOptimizer = _reflection.GeneratedProtocolMessageType('AdamOptimizer', (_message.Message,), { 'DESCRIPTOR' : _ADAMOPTIMIZER, '__module__' : 'object_detection.protos.optimizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.AdamOptimizer) }) _sym_db.RegisterMessage(AdamOptimizer) LearningRate = _reflection.GeneratedProtocolMessageType('LearningRate', (_message.Message,), { 'DESCRIPTOR' : _LEARNINGRATE, '__module__' : 'object_detection.protos.optimizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.LearningRate) }) _sym_db.RegisterMessage(LearningRate) ConstantLearningRate = _reflection.GeneratedProtocolMessageType('ConstantLearningRate', (_message.Message,), { 'DESCRIPTOR' : _CONSTANTLEARNINGRATE, '__module__' : 'object_detection.protos.optimizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ConstantLearningRate) }) _sym_db.RegisterMessage(ConstantLearningRate) ExponentialDecayLearningRate = _reflection.GeneratedProtocolMessageType('ExponentialDecayLearningRate', (_message.Message,), { 'DESCRIPTOR' : _EXPONENTIALDECAYLEARNINGRATE, '__module__' : 'object_detection.protos.optimizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ExponentialDecayLearningRate) }) _sym_db.RegisterMessage(ExponentialDecayLearningRate) ManualStepLearningRate = _reflection.GeneratedProtocolMessageType('ManualStepLearningRate', (_message.Message,), { 'LearningRateSchedule' : _reflection.GeneratedProtocolMessageType('LearningRateSchedule', (_message.Message,), { 'DESCRIPTOR' : _MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE, '__module__' : 'object_detection.protos.optimizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ManualStepLearningRate.LearningRateSchedule) }) , 'DESCRIPTOR' : _MANUALSTEPLEARNINGRATE, '__module__' : 'object_detection.protos.optimizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ManualStepLearningRate) }) _sym_db.RegisterMessage(ManualStepLearningRate) _sym_db.RegisterMessage(ManualStepLearningRate.LearningRateSchedule) CosineDecayLearningRate = _reflection.GeneratedProtocolMessageType('CosineDecayLearningRate', (_message.Message,), { 'DESCRIPTOR' : _COSINEDECAYLEARNINGRATE, '__module__' : 'object_detection.protos.optimizer_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CosineDecayLearningRate) }) _sym_db.RegisterMessage(CosineDecayLearningRate) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/optimizer_pb2.py
optimizer_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/preprocessor.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/preprocessor.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n*object_detection/protos/preprocessor.proto\x12\x17object_detection.protos\"\xd5\x18\n\x11PreprocessingStep\x12\x42\n\x0fnormalize_image\x18\x01 \x01(\x0b\x32\'.object_detection.protos.NormalizeImageH\x00\x12O\n\x16random_horizontal_flip\x18\x02 \x01(\x0b\x32-.object_detection.protos.RandomHorizontalFlipH\x00\x12R\n\x18random_pixel_value_scale\x18\x03 \x01(\x0b\x32..object_detection.protos.RandomPixelValueScaleH\x00\x12G\n\x12random_image_scale\x18\x04 \x01(\x0b\x32).object_detection.protos.RandomImageScaleH\x00\x12\x46\n\x12random_rgb_to_gray\x18\x05 \x01(\x0b\x32(.object_detection.protos.RandomRGBtoGrayH\x00\x12S\n\x18random_adjust_brightness\x18\x06 \x01(\x0b\x32/.object_detection.protos.RandomAdjustBrightnessH\x00\x12O\n\x16random_adjust_contrast\x18\x07 \x01(\x0b\x32-.object_detection.protos.RandomAdjustContrastH\x00\x12\x45\n\x11random_adjust_hue\x18\x08 \x01(\x0b\x32(.object_detection.protos.RandomAdjustHueH\x00\x12S\n\x18random_adjust_saturation\x18\t \x01(\x0b\x32/.object_detection.protos.RandomAdjustSaturationH\x00\x12K\n\x14random_distort_color\x18\n \x01(\x0b\x32+.object_detection.protos.RandomDistortColorH\x00\x12I\n\x13random_jitter_boxes\x18\x0b \x01(\x0b\x32*.object_detection.protos.RandomJitterBoxesH\x00\x12\x45\n\x11random_crop_image\x18\x0c \x01(\x0b\x32(.object_detection.protos.RandomCropImageH\x00\x12\x43\n\x10random_pad_image\x18\r \x01(\x0b\x32\'.object_detection.protos.RandomPadImageH\x00\x12L\n\x15random_crop_pad_image\x18\x0e \x01(\x0b\x32+.object_detection.protos.RandomCropPadImageH\x00\x12W\n\x1brandom_crop_to_aspect_ratio\x18\x0f \x01(\x0b\x32\x30.object_detection.protos.RandomCropToAspectRatioH\x00\x12K\n\x14random_black_patches\x18\x10 \x01(\x0b\x32+.object_detection.protos.RandomBlackPatchesH\x00\x12K\n\x14random_resize_method\x18\x11 \x01(\x0b\x32+.object_detection.protos.RandomResizeMethodH\x00\x12\x61\n scale_boxes_to_pixel_coordinates\x18\x12 \x01(\x0b\x32\x35.object_detection.protos.ScaleBoxesToPixelCoordinatesH\x00\x12<\n\x0cresize_image\x18\x13 \x01(\x0b\x32$.object_detection.protos.ResizeImageH\x00\x12M\n\x15subtract_channel_mean\x18\x14 \x01(\x0b\x32,.object_detection.protos.SubtractChannelMeanH\x00\x12\x41\n\x0fssd_random_crop\x18\x15 \x01(\x0b\x32&.object_detection.protos.SSDRandomCropH\x00\x12H\n\x13ssd_random_crop_pad\x18\x16 \x01(\x0b\x32).object_detection.protos.SSDRandomCropPadH\x00\x12\x64\n\"ssd_random_crop_fixed_aspect_ratio\x18\x17 \x01(\x0b\x32\x36.object_detection.protos.SSDRandomCropFixedAspectRatioH\x00\x12k\n&ssd_random_crop_pad_fixed_aspect_ratio\x18\x18 \x01(\x0b\x32\x39.object_detection.protos.SSDRandomCropPadFixedAspectRatioH\x00\x12K\n\x14random_vertical_flip\x18\x19 \x01(\x0b\x32+.object_detection.protos.RandomVerticalFlipH\x00\x12\x46\n\x11random_rotation90\x18\x1a \x01(\x0b\x32).object_detection.protos.RandomRotation90H\x00\x12\x39\n\x0brgb_to_gray\x18\x1b \x01(\x0b\x32\".object_detection.protos.RGBtoGrayH\x00\x12_\n\x1f\x63onvert_class_logits_to_softmax\x18\x1c \x01(\x0b\x32\x34.object_detection.protos.ConvertClassLogitsToSoftmaxH\x00\x12T\n\x19random_absolute_pad_image\x18\x1d \x01(\x0b\x32/.object_detection.protos.RandomAbsolutePadImageH\x00\x12R\n\x18random_self_concat_image\x18\x1e \x01(\x0b\x32..object_detection.protos.RandomSelfConcatImageH\x00\x12\x46\n\x11\x61utoaugment_image\x18\x1f \x01(\x0b\x32).object_detection.protos.AutoAugmentImageH\x00\x12[\n\x1c\x64rop_label_probabilistically\x18 \x01(\x0b\x32\x33.object_detection.protos.DropLabelProbabilisticallyH\x00\x12<\n\x0cremap_labels\x18! \x01(\x0b\x32$.object_detection.protos.RemapLabelsH\x00\x12I\n\x13random_jpeg_quality\x18\" \x01(\x0b\x32*.object_detection.protos.RandomJpegQualityH\x00\x12\x63\n!random_downscale_to_target_pixels\x18# \x01(\x0b\x32\x36.object_detection.protos.RandomDownscaleToTargetPixelsH\x00\x12M\n\x15random_patch_gaussian\x18$ \x01(\x0b\x32,.object_detection.protos.RandomPatchGaussianH\x00\x12W\n\x1brandom_square_crop_by_scale\x18% \x01(\x0b\x32\x30.object_detection.protos.RandomSquareCropByScaleH\x00\x12\x65\n#random_scale_crop_and_pad_to_square\x18& \x01(\x0b\x32\x36.object_detection.protos.RandomScaleCropAndPadToSquareH\x00\x12<\n\x0c\x61\x64just_gamma\x18\' \x01(\x0b\x32$.object_detection.protos.AdjustGammaH\x00\x42\x14\n\x12preprocessing_step\"v\n\x0eNormalizeImage\x12\x17\n\x0foriginal_minval\x18\x01 \x01(\x02\x12\x17\n\x0foriginal_maxval\x18\x02 \x01(\x02\x12\x18\n\rtarget_minval\x18\x03 \x01(\x02:\x01\x30\x12\x18\n\rtarget_maxval\x18\x04 \x01(\x02:\x01\x31\"S\n\x14RandomHorizontalFlip\x12!\n\x19keypoint_flip_permutation\x18\x01 \x03(\x05\x12\x18\n\x0bprobability\x18\x02 \x01(\x02:\x03\x30.5\"Q\n\x12RandomVerticalFlip\x12!\n\x19keypoint_flip_permutation\x18\x01 \x03(\x05\x12\x18\n\x0bprobability\x18\x02 \x01(\x02:\x03\x30.5\"N\n\x10RandomRotation90\x12 \n\x18keypoint_rot_permutation\x18\x01 \x03(\x05\x12\x18\n\x0bprobability\x18\x02 \x01(\x02:\x03\x30.5\"A\n\x15RandomPixelValueScale\x12\x13\n\x06minval\x18\x01 \x01(\x02:\x03\x30.9\x12\x13\n\x06maxval\x18\x02 \x01(\x02:\x03\x31.1\"L\n\x10RandomImageScale\x12\x1c\n\x0fmin_scale_ratio\x18\x01 \x01(\x02:\x03\x30.5\x12\x1a\n\x0fmax_scale_ratio\x18\x02 \x01(\x02:\x01\x32\"+\n\x0fRandomRGBtoGray\x12\x18\n\x0bprobability\x18\x01 \x01(\x02:\x03\x30.1\"0\n\x16RandomAdjustBrightness\x12\x16\n\tmax_delta\x18\x01 \x01(\x02:\x03\x30.2\"G\n\x14RandomAdjustContrast\x12\x16\n\tmin_delta\x18\x01 \x01(\x02:\x03\x30.8\x12\x17\n\tmax_delta\x18\x02 \x01(\x02:\x04\x31.25\"*\n\x0fRandomAdjustHue\x12\x17\n\tmax_delta\x18\x01 \x01(\x02:\x04\x30.02\"I\n\x16RandomAdjustSaturation\x12\x16\n\tmin_delta\x18\x01 \x01(\x02:\x03\x30.8\x12\x17\n\tmax_delta\x18\x02 \x01(\x02:\x04\x31.25\",\n\x12RandomDistortColor\x12\x16\n\x0e\x63olor_ordering\x18\x01 \x01(\x05\"\x8f\x02\n\x11RandomJitterBoxes\x12\x13\n\x05ratio\x18\x01 \x01(\x02:\x04\x30.05\x12S\n\x0bjitter_mode\x18\x02 \x01(\x0e\x32\x35.object_detection.protos.RandomJitterBoxes.JitterMode:\x07\x44\x45\x46\x41ULT\"\x8f\x01\n\nJitterMode\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\n\n\x06\x45XPAND\x10\x01\x12\n\n\x06SHRINK\x10\x02\x12\x14\n\x10\x45XPAND_SYMMETRIC\x10\x04\x12\x14\n\x10SHRINK_SYMMETRIC\x10\x05\x12\x17\n\x13\x45XPAND_SYMMETRIC_XY\x10\x06\x12\x17\n\x13SHRINK_SYMMETRIC_XY\x10\x07\"\xeb\x01\n\x0fRandomCropImage\x12\x1d\n\x12min_object_covered\x18\x01 \x01(\x02:\x01\x31\x12\x1e\n\x10min_aspect_ratio\x18\x02 \x01(\x02:\x04\x30.75\x12\x1e\n\x10max_aspect_ratio\x18\x03 \x01(\x02:\x04\x31.33\x12\x15\n\x08min_area\x18\x04 \x01(\x02:\x03\x30.1\x12\x13\n\x08max_area\x18\x05 \x01(\x02:\x01\x31\x12\x1b\n\x0eoverlap_thresh\x18\x06 \x01(\x02:\x03\x30.3\x12\x18\n\nclip_boxes\x18\x08 \x01(\x08:\x04true\x12\x16\n\x0brandom_coef\x18\x07 \x01(\x02:\x01\x30\"\x89\x01\n\x0eRandomPadImage\x12\x18\n\x10min_image_height\x18\x01 \x01(\x05\x12\x17\n\x0fmin_image_width\x18\x02 \x01(\x05\x12\x18\n\x10max_image_height\x18\x03 \x01(\x05\x12\x17\n\x0fmax_image_width\x18\x04 \x01(\x05\x12\x11\n\tpad_color\x18\x05 \x03(\x02\"b\n\x16RandomAbsolutePadImage\x12\x1a\n\x12max_height_padding\x18\x01 \x01(\x05\x12\x19\n\x11max_width_padding\x18\x02 \x01(\x05\x12\x11\n\tpad_color\x18\x03 \x03(\x02\"\xbf\x02\n\x12RandomCropPadImage\x12\x1d\n\x12min_object_covered\x18\x01 \x01(\x02:\x01\x31\x12\x1e\n\x10min_aspect_ratio\x18\x02 \x01(\x02:\x04\x30.75\x12\x1e\n\x10max_aspect_ratio\x18\x03 \x01(\x02:\x04\x31.33\x12\x15\n\x08min_area\x18\x04 \x01(\x02:\x03\x30.1\x12\x13\n\x08max_area\x18\x05 \x01(\x02:\x01\x31\x12\x1b\n\x0eoverlap_thresh\x18\x06 \x01(\x02:\x03\x30.3\x12\x18\n\nclip_boxes\x18\x0b \x01(\x08:\x04true\x12\x16\n\x0brandom_coef\x18\x07 \x01(\x02:\x01\x30\x12\x1d\n\x15min_padded_size_ratio\x18\x08 \x03(\x02\x12\x1d\n\x15max_padded_size_ratio\x18\t \x03(\x02\x12\x11\n\tpad_color\x18\n \x03(\x02\"i\n\x17RandomCropToAspectRatio\x12\x17\n\x0c\x61spect_ratio\x18\x01 \x01(\x02:\x01\x31\x12\x1b\n\x0eoverlap_thresh\x18\x02 \x01(\x02:\x03\x30.3\x12\x18\n\nclip_boxes\x18\x03 \x01(\x08:\x04true\"o\n\x12RandomBlackPatches\x12\x1d\n\x11max_black_patches\x18\x01 \x01(\x05:\x02\x31\x30\x12\x18\n\x0bprobability\x18\x02 \x01(\x02:\x03\x30.5\x12 \n\x13size_to_image_ratio\x18\x03 \x01(\x02:\x03\x30.1\"A\n\x12RandomResizeMethod\x12\x15\n\rtarget_height\x18\x01 \x01(\x05\x12\x14\n\x0ctarget_width\x18\x02 \x01(\x05\"\x0b\n\tRGBtoGray\"\x1e\n\x1cScaleBoxesToPixelCoordinates\"\xc0\x01\n\x0bResizeImage\x12\x12\n\nnew_height\x18\x01 \x01(\x05\x12\x11\n\tnew_width\x18\x02 \x01(\x05\x12\x45\n\x06method\x18\x03 \x01(\x0e\x32+.object_detection.protos.ResizeImage.Method:\x08\x42ILINEAR\"C\n\x06Method\x12\x08\n\x04\x41REA\x10\x01\x12\x0b\n\x07\x42ICUBIC\x10\x02\x12\x0c\n\x08\x42ILINEAR\x10\x03\x12\x14\n\x10NEAREST_NEIGHBOR\x10\x04\"$\n\x13SubtractChannelMean\x12\r\n\x05means\x18\x01 \x03(\x02\"\xd3\x01\n\x16SSDRandomCropOperation\x12\x1a\n\x12min_object_covered\x18\x01 \x01(\x02\x12\x18\n\x10min_aspect_ratio\x18\x02 \x01(\x02\x12\x18\n\x10max_aspect_ratio\x18\x03 \x01(\x02\x12\x10\n\x08min_area\x18\x04 \x01(\x02\x12\x10\n\x08max_area\x18\x05 \x01(\x02\x12\x16\n\x0eoverlap_thresh\x18\x06 \x01(\x02\x12\x18\n\nclip_boxes\x18\x08 \x01(\x08:\x04true\x12\x13\n\x0brandom_coef\x18\x07 \x01(\x02\"T\n\rSSDRandomCrop\x12\x43\n\noperations\x18\x01 \x03(\x0b\x32/.object_detection.protos.SSDRandomCropOperation\"\xd3\x02\n\x19SSDRandomCropPadOperation\x12\x1a\n\x12min_object_covered\x18\x01 \x01(\x02\x12\x18\n\x10min_aspect_ratio\x18\x02 \x01(\x02\x12\x18\n\x10max_aspect_ratio\x18\x03 \x01(\x02\x12\x10\n\x08min_area\x18\x04 \x01(\x02\x12\x10\n\x08max_area\x18\x05 \x01(\x02\x12\x16\n\x0eoverlap_thresh\x18\x06 \x01(\x02\x12\x18\n\nclip_boxes\x18\r \x01(\x08:\x04true\x12\x13\n\x0brandom_coef\x18\x07 \x01(\x02\x12\x1d\n\x15min_padded_size_ratio\x18\x08 \x03(\x02\x12\x1d\n\x15max_padded_size_ratio\x18\t \x03(\x02\x12\x13\n\x0bpad_color_r\x18\n \x01(\x02\x12\x13\n\x0bpad_color_g\x18\x0b \x01(\x02\x12\x13\n\x0bpad_color_b\x18\x0c \x01(\x02\"Z\n\x10SSDRandomCropPad\x12\x46\n\noperations\x18\x01 \x03(\x0b\x32\x32.object_detection.protos.SSDRandomCropPadOperation\"\xaf\x01\n&SSDRandomCropFixedAspectRatioOperation\x12\x1a\n\x12min_object_covered\x18\x01 \x01(\x02\x12\x10\n\x08min_area\x18\x04 \x01(\x02\x12\x10\n\x08max_area\x18\x05 \x01(\x02\x12\x16\n\x0eoverlap_thresh\x18\x06 \x01(\x02\x12\x18\n\nclip_boxes\x18\x08 \x01(\x08:\x04true\x12\x13\n\x0brandom_coef\x18\x07 \x01(\x02\"\x8d\x01\n\x1dSSDRandomCropFixedAspectRatio\x12S\n\noperations\x18\x01 \x03(\x0b\x32?.object_detection.protos.SSDRandomCropFixedAspectRatioOperation\x12\x17\n\x0c\x61spect_ratio\x18\x02 \x01(\x02:\x01\x31\"\xe6\x01\n)SSDRandomCropPadFixedAspectRatioOperation\x12\x1a\n\x12min_object_covered\x18\x01 \x01(\x02\x12\x18\n\x10min_aspect_ratio\x18\x02 \x01(\x02\x12\x18\n\x10max_aspect_ratio\x18\x03 \x01(\x02\x12\x10\n\x08min_area\x18\x04 \x01(\x02\x12\x10\n\x08max_area\x18\x05 \x01(\x02\x12\x16\n\x0eoverlap_thresh\x18\x06 \x01(\x02\x12\x18\n\nclip_boxes\x18\x08 \x01(\x08:\x04true\x12\x13\n\x0brandom_coef\x18\x07 \x01(\x02\"\xd1\x01\n SSDRandomCropPadFixedAspectRatio\x12V\n\noperations\x18\x01 \x03(\x0b\x32\x42.object_detection.protos.SSDRandomCropPadFixedAspectRatioOperation\x12\x17\n\x0c\x61spect_ratio\x18\x02 \x01(\x02:\x01\x31\x12\x1d\n\x15min_padded_size_ratio\x18\x03 \x03(\x02\x12\x1d\n\x15max_padded_size_ratio\x18\x04 \x03(\x02\"5\n\x1b\x43onvertClassLogitsToSoftmax\x12\x16\n\x0btemperature\x18\x01 \x01(\x02:\x01\x31\"m\n\x15RandomSelfConcatImage\x12(\n\x1b\x63oncat_vertical_probability\x18\x01 \x01(\x02:\x03\x30.1\x12*\n\x1d\x63oncat_horizontal_probability\x18\x02 \x01(\x02:\x03\x30.1\"+\n\x10\x41utoAugmentImage\x12\x17\n\x0bpolicy_name\x18\x01 \x01(\t:\x02v0\"H\n\x1a\x44ropLabelProbabilistically\x12\r\n\x05label\x18\x01 \x01(\x05\x12\x1b\n\x10\x64rop_probability\x18\x02 \x01(\x02:\x01\x31\"9\n\x0bRemapLabels\x12\x17\n\x0foriginal_labels\x18\x01 \x03(\x05\x12\x11\n\tnew_label\x18\x02 \x01(\x05\"g\n\x11RandomJpegQuality\x12\x16\n\x0brandom_coef\x18\x01 \x01(\x02:\x01\x30\x12\x1b\n\x10min_jpeg_quality\x18\x02 \x01(\x05:\x01\x30\x12\x1d\n\x10max_jpeg_quality\x18\x03 \x01(\x05:\x03\x31\x30\x30\"}\n\x1dRandomDownscaleToTargetPixels\x12\x16\n\x0brandom_coef\x18\x01 \x01(\x02:\x01\x30\x12!\n\x11min_target_pixels\x18\x02 \x01(\x05:\x06\x33\x30\x30\x30\x30\x30\x12!\n\x11max_target_pixels\x18\x03 \x01(\x05:\x06\x35\x30\x30\x30\x30\x30\"\xa5\x01\n\x13RandomPatchGaussian\x12\x16\n\x0brandom_coef\x18\x01 \x01(\x02:\x01\x30\x12\x19\n\x0emin_patch_size\x18\x02 \x01(\x05:\x01\x31\x12\x1b\n\x0emax_patch_size\x18\x03 \x01(\x05:\x03\x32\x35\x30\x12\x1e\n\x13min_gaussian_stddev\x18\x04 \x01(\x02:\x01\x30\x12\x1e\n\x13max_gaussian_stddev\x18\x05 \x01(\x02:\x01\x31\"y\n\x17RandomSquareCropByScale\x12\x17\n\nmax_border\x18\x01 \x01(\x05:\x03\x31\x32\x38\x12\x16\n\tscale_min\x18\x02 \x01(\x02:\x03\x30.6\x12\x16\n\tscale_max\x18\x03 \x01(\x02:\x03\x31.3\x12\x15\n\nnum_scales\x18\x04 \x01(\x05:\x01\x38\"g\n\x1dRandomScaleCropAndPadToSquare\x12\x18\n\x0boutput_size\x18\x01 \x01(\x05:\x03\x35\x31\x32\x12\x16\n\tscale_min\x18\x02 \x01(\x02:\x03\x30.1\x12\x14\n\tscale_max\x18\x03 \x01(\x02:\x01\x32\"0\n\x0b\x41\x64justGamma\x12\x10\n\x05gamma\x18\x01 \x01(\x02:\x01\x31\x12\x0f\n\x04gain\x18\x02 \x01(\x02:\x01\x31' ) _RANDOMJITTERBOXES_JITTERMODE = _descriptor.EnumDescriptor( name='JitterMode', full_name='object_detection.protos.RandomJitterBoxes.JitterMode', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXPAND', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SHRINK', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXPAND_SYMMETRIC', index=3, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SHRINK_SYMMETRIC', index=4, number=5, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXPAND_SYMMETRIC_XY', index=5, number=6, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SHRINK_SYMMETRIC_XY', index=6, number=7, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=4206, serialized_end=4349, ) _sym_db.RegisterEnumDescriptor(_RANDOMJITTERBOXES_JITTERMODE) _RESIZEIMAGE_METHOD = _descriptor.EnumDescriptor( name='Method', full_name='object_detection.protos.ResizeImage.Method', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='AREA', index=0, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BICUBIC', index=1, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BILINEAR', index=2, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='NEAREST_NEIGHBOR', index=3, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=5609, serialized_end=5676, ) _sym_db.RegisterEnumDescriptor(_RESIZEIMAGE_METHOD) _PREPROCESSINGSTEP = _descriptor.Descriptor( name='PreprocessingStep', full_name='object_detection.protos.PreprocessingStep', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='normalize_image', full_name='object_detection.protos.PreprocessingStep.normalize_image', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_horizontal_flip', full_name='object_detection.protos.PreprocessingStep.random_horizontal_flip', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_pixel_value_scale', full_name='object_detection.protos.PreprocessingStep.random_pixel_value_scale', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_image_scale', full_name='object_detection.protos.PreprocessingStep.random_image_scale', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_rgb_to_gray', full_name='object_detection.protos.PreprocessingStep.random_rgb_to_gray', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_adjust_brightness', full_name='object_detection.protos.PreprocessingStep.random_adjust_brightness', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_adjust_contrast', full_name='object_detection.protos.PreprocessingStep.random_adjust_contrast', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_adjust_hue', full_name='object_detection.protos.PreprocessingStep.random_adjust_hue', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_adjust_saturation', full_name='object_detection.protos.PreprocessingStep.random_adjust_saturation', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_distort_color', full_name='object_detection.protos.PreprocessingStep.random_distort_color', index=9, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_jitter_boxes', full_name='object_detection.protos.PreprocessingStep.random_jitter_boxes', index=10, number=11, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_crop_image', full_name='object_detection.protos.PreprocessingStep.random_crop_image', index=11, number=12, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_pad_image', full_name='object_detection.protos.PreprocessingStep.random_pad_image', index=12, number=13, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_crop_pad_image', full_name='object_detection.protos.PreprocessingStep.random_crop_pad_image', index=13, number=14, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_crop_to_aspect_ratio', full_name='object_detection.protos.PreprocessingStep.random_crop_to_aspect_ratio', index=14, number=15, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_black_patches', full_name='object_detection.protos.PreprocessingStep.random_black_patches', index=15, number=16, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_resize_method', full_name='object_detection.protos.PreprocessingStep.random_resize_method', index=16, number=17, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale_boxes_to_pixel_coordinates', full_name='object_detection.protos.PreprocessingStep.scale_boxes_to_pixel_coordinates', index=17, number=18, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='resize_image', full_name='object_detection.protos.PreprocessingStep.resize_image', index=18, number=19, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='subtract_channel_mean', full_name='object_detection.protos.PreprocessingStep.subtract_channel_mean', index=19, number=20, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ssd_random_crop', full_name='object_detection.protos.PreprocessingStep.ssd_random_crop', index=20, number=21, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ssd_random_crop_pad', full_name='object_detection.protos.PreprocessingStep.ssd_random_crop_pad', index=21, number=22, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ssd_random_crop_fixed_aspect_ratio', full_name='object_detection.protos.PreprocessingStep.ssd_random_crop_fixed_aspect_ratio', index=22, number=23, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ssd_random_crop_pad_fixed_aspect_ratio', full_name='object_detection.protos.PreprocessingStep.ssd_random_crop_pad_fixed_aspect_ratio', index=23, number=24, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_vertical_flip', full_name='object_detection.protos.PreprocessingStep.random_vertical_flip', index=24, number=25, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_rotation90', full_name='object_detection.protos.PreprocessingStep.random_rotation90', index=25, number=26, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='rgb_to_gray', full_name='object_detection.protos.PreprocessingStep.rgb_to_gray', index=26, number=27, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='convert_class_logits_to_softmax', full_name='object_detection.protos.PreprocessingStep.convert_class_logits_to_softmax', index=27, number=28, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_absolute_pad_image', full_name='object_detection.protos.PreprocessingStep.random_absolute_pad_image', index=28, number=29, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_self_concat_image', full_name='object_detection.protos.PreprocessingStep.random_self_concat_image', index=29, number=30, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='autoaugment_image', full_name='object_detection.protos.PreprocessingStep.autoaugment_image', index=30, number=31, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='drop_label_probabilistically', full_name='object_detection.protos.PreprocessingStep.drop_label_probabilistically', index=31, number=32, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='remap_labels', full_name='object_detection.protos.PreprocessingStep.remap_labels', index=32, number=33, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_jpeg_quality', full_name='object_detection.protos.PreprocessingStep.random_jpeg_quality', index=33, number=34, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_downscale_to_target_pixels', full_name='object_detection.protos.PreprocessingStep.random_downscale_to_target_pixels', index=34, number=35, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_patch_gaussian', full_name='object_detection.protos.PreprocessingStep.random_patch_gaussian', index=35, number=36, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_square_crop_by_scale', full_name='object_detection.protos.PreprocessingStep.random_square_crop_by_scale', index=36, number=37, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_scale_crop_and_pad_to_square', full_name='object_detection.protos.PreprocessingStep.random_scale_crop_and_pad_to_square', index=37, number=38, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='adjust_gamma', full_name='object_detection.protos.PreprocessingStep.adjust_gamma', index=38, number=39, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='preprocessing_step', full_name='object_detection.protos.PreprocessingStep.preprocessing_step', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=72, serialized_end=3229, ) _NORMALIZEIMAGE = _descriptor.Descriptor( name='NormalizeImage', full_name='object_detection.protos.NormalizeImage', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='original_minval', full_name='object_detection.protos.NormalizeImage.original_minval', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='original_maxval', full_name='object_detection.protos.NormalizeImage.original_maxval', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='target_minval', full_name='object_detection.protos.NormalizeImage.target_minval', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='target_maxval', full_name='object_detection.protos.NormalizeImage.target_maxval', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3231, serialized_end=3349, ) _RANDOMHORIZONTALFLIP = _descriptor.Descriptor( name='RandomHorizontalFlip', full_name='object_detection.protos.RandomHorizontalFlip', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='keypoint_flip_permutation', full_name='object_detection.protos.RandomHorizontalFlip.keypoint_flip_permutation', index=0, number=1, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='probability', full_name='object_detection.protos.RandomHorizontalFlip.probability', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3351, serialized_end=3434, ) _RANDOMVERTICALFLIP = _descriptor.Descriptor( name='RandomVerticalFlip', full_name='object_detection.protos.RandomVerticalFlip', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='keypoint_flip_permutation', full_name='object_detection.protos.RandomVerticalFlip.keypoint_flip_permutation', index=0, number=1, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='probability', full_name='object_detection.protos.RandomVerticalFlip.probability', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3436, serialized_end=3517, ) _RANDOMROTATION90 = _descriptor.Descriptor( name='RandomRotation90', full_name='object_detection.protos.RandomRotation90', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='keypoint_rot_permutation', full_name='object_detection.protos.RandomRotation90.keypoint_rot_permutation', index=0, number=1, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='probability', full_name='object_detection.protos.RandomRotation90.probability', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3519, serialized_end=3597, ) _RANDOMPIXELVALUESCALE = _descriptor.Descriptor( name='RandomPixelValueScale', full_name='object_detection.protos.RandomPixelValueScale', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='minval', full_name='object_detection.protos.RandomPixelValueScale.minval', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.9), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='maxval', full_name='object_detection.protos.RandomPixelValueScale.maxval', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1.1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3599, serialized_end=3664, ) _RANDOMIMAGESCALE = _descriptor.Descriptor( name='RandomImageScale', full_name='object_detection.protos.RandomImageScale', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_scale_ratio', full_name='object_detection.protos.RandomImageScale.min_scale_ratio', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_scale_ratio', full_name='object_detection.protos.RandomImageScale.max_scale_ratio', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(2), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3666, serialized_end=3742, ) _RANDOMRGBTOGRAY = _descriptor.Descriptor( name='RandomRGBtoGray', full_name='object_detection.protos.RandomRGBtoGray', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='probability', full_name='object_detection.protos.RandomRGBtoGray.probability', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3744, serialized_end=3787, ) _RANDOMADJUSTBRIGHTNESS = _descriptor.Descriptor( name='RandomAdjustBrightness', full_name='object_detection.protos.RandomAdjustBrightness', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='max_delta', full_name='object_detection.protos.RandomAdjustBrightness.max_delta', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.2), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3789, serialized_end=3837, ) _RANDOMADJUSTCONTRAST = _descriptor.Descriptor( name='RandomAdjustContrast', full_name='object_detection.protos.RandomAdjustContrast', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_delta', full_name='object_detection.protos.RandomAdjustContrast.min_delta', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.8), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_delta', full_name='object_detection.protos.RandomAdjustContrast.max_delta', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1.25), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3839, serialized_end=3910, ) _RANDOMADJUSTHUE = _descriptor.Descriptor( name='RandomAdjustHue', full_name='object_detection.protos.RandomAdjustHue', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='max_delta', full_name='object_detection.protos.RandomAdjustHue.max_delta', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.02), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3912, serialized_end=3954, ) _RANDOMADJUSTSATURATION = _descriptor.Descriptor( name='RandomAdjustSaturation', full_name='object_detection.protos.RandomAdjustSaturation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_delta', full_name='object_detection.protos.RandomAdjustSaturation.min_delta', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.8), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_delta', full_name='object_detection.protos.RandomAdjustSaturation.max_delta', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1.25), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=3956, serialized_end=4029, ) _RANDOMDISTORTCOLOR = _descriptor.Descriptor( name='RandomDistortColor', full_name='object_detection.protos.RandomDistortColor', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='color_ordering', full_name='object_detection.protos.RandomDistortColor.color_ordering', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=4031, serialized_end=4075, ) _RANDOMJITTERBOXES = _descriptor.Descriptor( name='RandomJitterBoxes', full_name='object_detection.protos.RandomJitterBoxes', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='ratio', full_name='object_detection.protos.RandomJitterBoxes.ratio', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.05), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='jitter_mode', full_name='object_detection.protos.RandomJitterBoxes.jitter_mode', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _RANDOMJITTERBOXES_JITTERMODE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=4078, serialized_end=4349, ) _RANDOMCROPIMAGE = _descriptor.Descriptor( name='RandomCropImage', full_name='object_detection.protos.RandomCropImage', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_object_covered', full_name='object_detection.protos.RandomCropImage.min_object_covered', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_aspect_ratio', full_name='object_detection.protos.RandomCropImage.min_aspect_ratio', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.75), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_aspect_ratio', full_name='object_detection.protos.RandomCropImage.max_aspect_ratio', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1.33), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_area', full_name='object_detection.protos.RandomCropImage.min_area', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_area', full_name='object_detection.protos.RandomCropImage.max_area', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='overlap_thresh', full_name='object_detection.protos.RandomCropImage.overlap_thresh', index=5, number=6, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.3), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='clip_boxes', full_name='object_detection.protos.RandomCropImage.clip_boxes', index=6, number=8, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_coef', full_name='object_detection.protos.RandomCropImage.random_coef', index=7, number=7, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=4352, serialized_end=4587, ) _RANDOMPADIMAGE = _descriptor.Descriptor( name='RandomPadImage', full_name='object_detection.protos.RandomPadImage', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_image_height', full_name='object_detection.protos.RandomPadImage.min_image_height', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_image_width', full_name='object_detection.protos.RandomPadImage.min_image_width', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_image_height', full_name='object_detection.protos.RandomPadImage.max_image_height', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_image_width', full_name='object_detection.protos.RandomPadImage.max_image_width', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_color', full_name='object_detection.protos.RandomPadImage.pad_color', index=4, number=5, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=4590, serialized_end=4727, ) _RANDOMABSOLUTEPADIMAGE = _descriptor.Descriptor( name='RandomAbsolutePadImage', full_name='object_detection.protos.RandomAbsolutePadImage', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='max_height_padding', full_name='object_detection.protos.RandomAbsolutePadImage.max_height_padding', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_width_padding', full_name='object_detection.protos.RandomAbsolutePadImage.max_width_padding', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_color', full_name='object_detection.protos.RandomAbsolutePadImage.pad_color', index=2, number=3, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=4729, serialized_end=4827, ) _RANDOMCROPPADIMAGE = _descriptor.Descriptor( name='RandomCropPadImage', full_name='object_detection.protos.RandomCropPadImage', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_object_covered', full_name='object_detection.protos.RandomCropPadImage.min_object_covered', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_aspect_ratio', full_name='object_detection.protos.RandomCropPadImage.min_aspect_ratio', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.75), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_aspect_ratio', full_name='object_detection.protos.RandomCropPadImage.max_aspect_ratio', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1.33), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_area', full_name='object_detection.protos.RandomCropPadImage.min_area', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_area', full_name='object_detection.protos.RandomCropPadImage.max_area', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='overlap_thresh', full_name='object_detection.protos.RandomCropPadImage.overlap_thresh', index=5, number=6, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.3), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='clip_boxes', full_name='object_detection.protos.RandomCropPadImage.clip_boxes', index=6, number=11, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_coef', full_name='object_detection.protos.RandomCropPadImage.random_coef', index=7, number=7, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_padded_size_ratio', full_name='object_detection.protos.RandomCropPadImage.min_padded_size_ratio', index=8, number=8, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_padded_size_ratio', full_name='object_detection.protos.RandomCropPadImage.max_padded_size_ratio', index=9, number=9, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_color', full_name='object_detection.protos.RandomCropPadImage.pad_color', index=10, number=10, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=4830, serialized_end=5149, ) _RANDOMCROPTOASPECTRATIO = _descriptor.Descriptor( name='RandomCropToAspectRatio', full_name='object_detection.protos.RandomCropToAspectRatio', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='aspect_ratio', full_name='object_detection.protos.RandomCropToAspectRatio.aspect_ratio', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='overlap_thresh', full_name='object_detection.protos.RandomCropToAspectRatio.overlap_thresh', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.3), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='clip_boxes', full_name='object_detection.protos.RandomCropToAspectRatio.clip_boxes', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5151, serialized_end=5256, ) _RANDOMBLACKPATCHES = _descriptor.Descriptor( name='RandomBlackPatches', full_name='object_detection.protos.RandomBlackPatches', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='max_black_patches', full_name='object_detection.protos.RandomBlackPatches.max_black_patches', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=10, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='probability', full_name='object_detection.protos.RandomBlackPatches.probability', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='size_to_image_ratio', full_name='object_detection.protos.RandomBlackPatches.size_to_image_ratio', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5258, serialized_end=5369, ) _RANDOMRESIZEMETHOD = _descriptor.Descriptor( name='RandomResizeMethod', full_name='object_detection.protos.RandomResizeMethod', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='target_height', full_name='object_detection.protos.RandomResizeMethod.target_height', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='target_width', full_name='object_detection.protos.RandomResizeMethod.target_width', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5371, serialized_end=5436, ) _RGBTOGRAY = _descriptor.Descriptor( name='RGBtoGray', full_name='object_detection.protos.RGBtoGray', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5438, serialized_end=5449, ) _SCALEBOXESTOPIXELCOORDINATES = _descriptor.Descriptor( name='ScaleBoxesToPixelCoordinates', full_name='object_detection.protos.ScaleBoxesToPixelCoordinates', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5451, serialized_end=5481, ) _RESIZEIMAGE = _descriptor.Descriptor( name='ResizeImage', full_name='object_detection.protos.ResizeImage', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='new_height', full_name='object_detection.protos.ResizeImage.new_height', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='new_width', full_name='object_detection.protos.ResizeImage.new_width', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='method', full_name='object_detection.protos.ResizeImage.method', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=3, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _RESIZEIMAGE_METHOD, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5484, serialized_end=5676, ) _SUBTRACTCHANNELMEAN = _descriptor.Descriptor( name='SubtractChannelMean', full_name='object_detection.protos.SubtractChannelMean', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='means', full_name='object_detection.protos.SubtractChannelMean.means', index=0, number=1, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5678, serialized_end=5714, ) _SSDRANDOMCROPOPERATION = _descriptor.Descriptor( name='SSDRandomCropOperation', full_name='object_detection.protos.SSDRandomCropOperation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_object_covered', full_name='object_detection.protos.SSDRandomCropOperation.min_object_covered', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_aspect_ratio', full_name='object_detection.protos.SSDRandomCropOperation.min_aspect_ratio', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_aspect_ratio', full_name='object_detection.protos.SSDRandomCropOperation.max_aspect_ratio', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_area', full_name='object_detection.protos.SSDRandomCropOperation.min_area', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_area', full_name='object_detection.protos.SSDRandomCropOperation.max_area', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='overlap_thresh', full_name='object_detection.protos.SSDRandomCropOperation.overlap_thresh', index=5, number=6, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='clip_boxes', full_name='object_detection.protos.SSDRandomCropOperation.clip_boxes', index=6, number=8, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_coef', full_name='object_detection.protos.SSDRandomCropOperation.random_coef', index=7, number=7, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5717, serialized_end=5928, ) _SSDRANDOMCROP = _descriptor.Descriptor( name='SSDRandomCrop', full_name='object_detection.protos.SSDRandomCrop', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='operations', full_name='object_detection.protos.SSDRandomCrop.operations', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5930, serialized_end=6014, ) _SSDRANDOMCROPPADOPERATION = _descriptor.Descriptor( name='SSDRandomCropPadOperation', full_name='object_detection.protos.SSDRandomCropPadOperation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_object_covered', full_name='object_detection.protos.SSDRandomCropPadOperation.min_object_covered', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_aspect_ratio', full_name='object_detection.protos.SSDRandomCropPadOperation.min_aspect_ratio', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_aspect_ratio', full_name='object_detection.protos.SSDRandomCropPadOperation.max_aspect_ratio', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_area', full_name='object_detection.protos.SSDRandomCropPadOperation.min_area', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_area', full_name='object_detection.protos.SSDRandomCropPadOperation.max_area', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='overlap_thresh', full_name='object_detection.protos.SSDRandomCropPadOperation.overlap_thresh', index=5, number=6, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='clip_boxes', full_name='object_detection.protos.SSDRandomCropPadOperation.clip_boxes', index=6, number=13, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_coef', full_name='object_detection.protos.SSDRandomCropPadOperation.random_coef', index=7, number=7, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_padded_size_ratio', full_name='object_detection.protos.SSDRandomCropPadOperation.min_padded_size_ratio', index=8, number=8, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_padded_size_ratio', full_name='object_detection.protos.SSDRandomCropPadOperation.max_padded_size_ratio', index=9, number=9, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_color_r', full_name='object_detection.protos.SSDRandomCropPadOperation.pad_color_r', index=10, number=10, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_color_g', full_name='object_detection.protos.SSDRandomCropPadOperation.pad_color_g', index=11, number=11, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_color_b', full_name='object_detection.protos.SSDRandomCropPadOperation.pad_color_b', index=12, number=12, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6017, serialized_end=6356, ) _SSDRANDOMCROPPAD = _descriptor.Descriptor( name='SSDRandomCropPad', full_name='object_detection.protos.SSDRandomCropPad', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='operations', full_name='object_detection.protos.SSDRandomCropPad.operations', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6358, serialized_end=6448, ) _SSDRANDOMCROPFIXEDASPECTRATIOOPERATION = _descriptor.Descriptor( name='SSDRandomCropFixedAspectRatioOperation', full_name='object_detection.protos.SSDRandomCropFixedAspectRatioOperation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_object_covered', full_name='object_detection.protos.SSDRandomCropFixedAspectRatioOperation.min_object_covered', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_area', full_name='object_detection.protos.SSDRandomCropFixedAspectRatioOperation.min_area', index=1, number=4, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_area', full_name='object_detection.protos.SSDRandomCropFixedAspectRatioOperation.max_area', index=2, number=5, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='overlap_thresh', full_name='object_detection.protos.SSDRandomCropFixedAspectRatioOperation.overlap_thresh', index=3, number=6, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='clip_boxes', full_name='object_detection.protos.SSDRandomCropFixedAspectRatioOperation.clip_boxes', index=4, number=8, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_coef', full_name='object_detection.protos.SSDRandomCropFixedAspectRatioOperation.random_coef', index=5, number=7, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6451, serialized_end=6626, ) _SSDRANDOMCROPFIXEDASPECTRATIO = _descriptor.Descriptor( name='SSDRandomCropFixedAspectRatio', full_name='object_detection.protos.SSDRandomCropFixedAspectRatio', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='operations', full_name='object_detection.protos.SSDRandomCropFixedAspectRatio.operations', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='aspect_ratio', full_name='object_detection.protos.SSDRandomCropFixedAspectRatio.aspect_ratio', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6629, serialized_end=6770, ) _SSDRANDOMCROPPADFIXEDASPECTRATIOOPERATION = _descriptor.Descriptor( name='SSDRandomCropPadFixedAspectRatioOperation', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatioOperation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min_object_covered', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatioOperation.min_object_covered', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_aspect_ratio', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatioOperation.min_aspect_ratio', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_aspect_ratio', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatioOperation.max_aspect_ratio', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_area', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatioOperation.min_area', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_area', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatioOperation.max_area', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='overlap_thresh', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatioOperation.overlap_thresh', index=5, number=6, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='clip_boxes', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatioOperation.clip_boxes', index=6, number=8, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_coef', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatioOperation.random_coef', index=7, number=7, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6773, serialized_end=7003, ) _SSDRANDOMCROPPADFIXEDASPECTRATIO = _descriptor.Descriptor( name='SSDRandomCropPadFixedAspectRatio', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatio', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='operations', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatio.operations', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='aspect_ratio', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatio.aspect_ratio', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_padded_size_ratio', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatio.min_padded_size_ratio', index=2, number=3, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_padded_size_ratio', full_name='object_detection.protos.SSDRandomCropPadFixedAspectRatio.max_padded_size_ratio', index=3, number=4, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7006, serialized_end=7215, ) _CONVERTCLASSLOGITSTOSOFTMAX = _descriptor.Descriptor( name='ConvertClassLogitsToSoftmax', full_name='object_detection.protos.ConvertClassLogitsToSoftmax', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='temperature', full_name='object_detection.protos.ConvertClassLogitsToSoftmax.temperature', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7217, serialized_end=7270, ) _RANDOMSELFCONCATIMAGE = _descriptor.Descriptor( name='RandomSelfConcatImage', full_name='object_detection.protos.RandomSelfConcatImage', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='concat_vertical_probability', full_name='object_detection.protos.RandomSelfConcatImage.concat_vertical_probability', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='concat_horizontal_probability', full_name='object_detection.protos.RandomSelfConcatImage.concat_horizontal_probability', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7272, serialized_end=7381, ) _AUTOAUGMENTIMAGE = _descriptor.Descriptor( name='AutoAugmentImage', full_name='object_detection.protos.AutoAugmentImage', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='policy_name', full_name='object_detection.protos.AutoAugmentImage.policy_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"v0".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7383, serialized_end=7426, ) _DROPLABELPROBABILISTICALLY = _descriptor.Descriptor( name='DropLabelProbabilistically', full_name='object_detection.protos.DropLabelProbabilistically', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='label', full_name='object_detection.protos.DropLabelProbabilistically.label', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='drop_probability', full_name='object_detection.protos.DropLabelProbabilistically.drop_probability', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7428, serialized_end=7500, ) _REMAPLABELS = _descriptor.Descriptor( name='RemapLabels', full_name='object_detection.protos.RemapLabels', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='original_labels', full_name='object_detection.protos.RemapLabels.original_labels', index=0, number=1, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='new_label', full_name='object_detection.protos.RemapLabels.new_label', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7502, serialized_end=7559, ) _RANDOMJPEGQUALITY = _descriptor.Descriptor( name='RandomJpegQuality', full_name='object_detection.protos.RandomJpegQuality', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='random_coef', full_name='object_detection.protos.RandomJpegQuality.random_coef', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_jpeg_quality', full_name='object_detection.protos.RandomJpegQuality.min_jpeg_quality', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_jpeg_quality', full_name='object_detection.protos.RandomJpegQuality.max_jpeg_quality', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=100, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7561, serialized_end=7664, ) _RANDOMDOWNSCALETOTARGETPIXELS = _descriptor.Descriptor( name='RandomDownscaleToTargetPixels', full_name='object_detection.protos.RandomDownscaleToTargetPixels', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='random_coef', full_name='object_detection.protos.RandomDownscaleToTargetPixels.random_coef', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_target_pixels', full_name='object_detection.protos.RandomDownscaleToTargetPixels.min_target_pixels', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=300000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_target_pixels', full_name='object_detection.protos.RandomDownscaleToTargetPixels.max_target_pixels', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=500000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7666, serialized_end=7791, ) _RANDOMPATCHGAUSSIAN = _descriptor.Descriptor( name='RandomPatchGaussian', full_name='object_detection.protos.RandomPatchGaussian', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='random_coef', full_name='object_detection.protos.RandomPatchGaussian.random_coef', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_patch_size', full_name='object_detection.protos.RandomPatchGaussian.min_patch_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_patch_size', full_name='object_detection.protos.RandomPatchGaussian.max_patch_size', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=250, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_gaussian_stddev', full_name='object_detection.protos.RandomPatchGaussian.min_gaussian_stddev', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_gaussian_stddev', full_name='object_detection.protos.RandomPatchGaussian.max_gaussian_stddev', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7794, serialized_end=7959, ) _RANDOMSQUARECROPBYSCALE = _descriptor.Descriptor( name='RandomSquareCropByScale', full_name='object_detection.protos.RandomSquareCropByScale', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='max_border', full_name='object_detection.protos.RandomSquareCropByScale.max_border', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=128, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale_min', full_name='object_detection.protos.RandomSquareCropByScale.scale_min', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.6), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale_max', full_name='object_detection.protos.RandomSquareCropByScale.scale_max', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1.3), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_scales', full_name='object_detection.protos.RandomSquareCropByScale.num_scales', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=8, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7961, serialized_end=8082, ) _RANDOMSCALECROPANDPADTOSQUARE = _descriptor.Descriptor( name='RandomScaleCropAndPadToSquare', full_name='object_detection.protos.RandomScaleCropAndPadToSquare', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='output_size', full_name='object_detection.protos.RandomScaleCropAndPadToSquare.output_size', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=512, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale_min', full_name='object_detection.protos.RandomScaleCropAndPadToSquare.scale_min', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale_max', full_name='object_detection.protos.RandomScaleCropAndPadToSquare.scale_max', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(2), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=8084, serialized_end=8187, ) _ADJUSTGAMMA = _descriptor.Descriptor( name='AdjustGamma', full_name='object_detection.protos.AdjustGamma', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='gamma', full_name='object_detection.protos.AdjustGamma.gamma', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='gain', full_name='object_detection.protos.AdjustGamma.gain', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=8189, serialized_end=8237, ) _PREPROCESSINGSTEP.fields_by_name['normalize_image'].message_type = _NORMALIZEIMAGE _PREPROCESSINGSTEP.fields_by_name['random_horizontal_flip'].message_type = _RANDOMHORIZONTALFLIP _PREPROCESSINGSTEP.fields_by_name['random_pixel_value_scale'].message_type = _RANDOMPIXELVALUESCALE _PREPROCESSINGSTEP.fields_by_name['random_image_scale'].message_type = _RANDOMIMAGESCALE _PREPROCESSINGSTEP.fields_by_name['random_rgb_to_gray'].message_type = _RANDOMRGBTOGRAY _PREPROCESSINGSTEP.fields_by_name['random_adjust_brightness'].message_type = _RANDOMADJUSTBRIGHTNESS _PREPROCESSINGSTEP.fields_by_name['random_adjust_contrast'].message_type = _RANDOMADJUSTCONTRAST _PREPROCESSINGSTEP.fields_by_name['random_adjust_hue'].message_type = _RANDOMADJUSTHUE _PREPROCESSINGSTEP.fields_by_name['random_adjust_saturation'].message_type = _RANDOMADJUSTSATURATION _PREPROCESSINGSTEP.fields_by_name['random_distort_color'].message_type = _RANDOMDISTORTCOLOR _PREPROCESSINGSTEP.fields_by_name['random_jitter_boxes'].message_type = _RANDOMJITTERBOXES _PREPROCESSINGSTEP.fields_by_name['random_crop_image'].message_type = _RANDOMCROPIMAGE _PREPROCESSINGSTEP.fields_by_name['random_pad_image'].message_type = _RANDOMPADIMAGE _PREPROCESSINGSTEP.fields_by_name['random_crop_pad_image'].message_type = _RANDOMCROPPADIMAGE _PREPROCESSINGSTEP.fields_by_name['random_crop_to_aspect_ratio'].message_type = _RANDOMCROPTOASPECTRATIO _PREPROCESSINGSTEP.fields_by_name['random_black_patches'].message_type = _RANDOMBLACKPATCHES _PREPROCESSINGSTEP.fields_by_name['random_resize_method'].message_type = _RANDOMRESIZEMETHOD _PREPROCESSINGSTEP.fields_by_name['scale_boxes_to_pixel_coordinates'].message_type = _SCALEBOXESTOPIXELCOORDINATES _PREPROCESSINGSTEP.fields_by_name['resize_image'].message_type = _RESIZEIMAGE _PREPROCESSINGSTEP.fields_by_name['subtract_channel_mean'].message_type = _SUBTRACTCHANNELMEAN _PREPROCESSINGSTEP.fields_by_name['ssd_random_crop'].message_type = _SSDRANDOMCROP _PREPROCESSINGSTEP.fields_by_name['ssd_random_crop_pad'].message_type = _SSDRANDOMCROPPAD _PREPROCESSINGSTEP.fields_by_name['ssd_random_crop_fixed_aspect_ratio'].message_type = _SSDRANDOMCROPFIXEDASPECTRATIO _PREPROCESSINGSTEP.fields_by_name['ssd_random_crop_pad_fixed_aspect_ratio'].message_type = _SSDRANDOMCROPPADFIXEDASPECTRATIO _PREPROCESSINGSTEP.fields_by_name['random_vertical_flip'].message_type = _RANDOMVERTICALFLIP _PREPROCESSINGSTEP.fields_by_name['random_rotation90'].message_type = _RANDOMROTATION90 _PREPROCESSINGSTEP.fields_by_name['rgb_to_gray'].message_type = _RGBTOGRAY _PREPROCESSINGSTEP.fields_by_name['convert_class_logits_to_softmax'].message_type = _CONVERTCLASSLOGITSTOSOFTMAX _PREPROCESSINGSTEP.fields_by_name['random_absolute_pad_image'].message_type = _RANDOMABSOLUTEPADIMAGE _PREPROCESSINGSTEP.fields_by_name['random_self_concat_image'].message_type = _RANDOMSELFCONCATIMAGE _PREPROCESSINGSTEP.fields_by_name['autoaugment_image'].message_type = _AUTOAUGMENTIMAGE _PREPROCESSINGSTEP.fields_by_name['drop_label_probabilistically'].message_type = _DROPLABELPROBABILISTICALLY _PREPROCESSINGSTEP.fields_by_name['remap_labels'].message_type = _REMAPLABELS _PREPROCESSINGSTEP.fields_by_name['random_jpeg_quality'].message_type = _RANDOMJPEGQUALITY _PREPROCESSINGSTEP.fields_by_name['random_downscale_to_target_pixels'].message_type = _RANDOMDOWNSCALETOTARGETPIXELS _PREPROCESSINGSTEP.fields_by_name['random_patch_gaussian'].message_type = _RANDOMPATCHGAUSSIAN _PREPROCESSINGSTEP.fields_by_name['random_square_crop_by_scale'].message_type = _RANDOMSQUARECROPBYSCALE _PREPROCESSINGSTEP.fields_by_name['random_scale_crop_and_pad_to_square'].message_type = _RANDOMSCALECROPANDPADTOSQUARE _PREPROCESSINGSTEP.fields_by_name['adjust_gamma'].message_type = _ADJUSTGAMMA _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['normalize_image']) _PREPROCESSINGSTEP.fields_by_name['normalize_image'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_horizontal_flip']) _PREPROCESSINGSTEP.fields_by_name['random_horizontal_flip'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_pixel_value_scale']) _PREPROCESSINGSTEP.fields_by_name['random_pixel_value_scale'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_image_scale']) _PREPROCESSINGSTEP.fields_by_name['random_image_scale'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_rgb_to_gray']) _PREPROCESSINGSTEP.fields_by_name['random_rgb_to_gray'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_adjust_brightness']) _PREPROCESSINGSTEP.fields_by_name['random_adjust_brightness'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_adjust_contrast']) _PREPROCESSINGSTEP.fields_by_name['random_adjust_contrast'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_adjust_hue']) _PREPROCESSINGSTEP.fields_by_name['random_adjust_hue'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_adjust_saturation']) _PREPROCESSINGSTEP.fields_by_name['random_adjust_saturation'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_distort_color']) _PREPROCESSINGSTEP.fields_by_name['random_distort_color'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_jitter_boxes']) _PREPROCESSINGSTEP.fields_by_name['random_jitter_boxes'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_crop_image']) _PREPROCESSINGSTEP.fields_by_name['random_crop_image'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_pad_image']) _PREPROCESSINGSTEP.fields_by_name['random_pad_image'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_crop_pad_image']) _PREPROCESSINGSTEP.fields_by_name['random_crop_pad_image'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_crop_to_aspect_ratio']) _PREPROCESSINGSTEP.fields_by_name['random_crop_to_aspect_ratio'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_black_patches']) _PREPROCESSINGSTEP.fields_by_name['random_black_patches'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_resize_method']) _PREPROCESSINGSTEP.fields_by_name['random_resize_method'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['scale_boxes_to_pixel_coordinates']) _PREPROCESSINGSTEP.fields_by_name['scale_boxes_to_pixel_coordinates'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['resize_image']) _PREPROCESSINGSTEP.fields_by_name['resize_image'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['subtract_channel_mean']) _PREPROCESSINGSTEP.fields_by_name['subtract_channel_mean'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['ssd_random_crop']) _PREPROCESSINGSTEP.fields_by_name['ssd_random_crop'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['ssd_random_crop_pad']) _PREPROCESSINGSTEP.fields_by_name['ssd_random_crop_pad'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['ssd_random_crop_fixed_aspect_ratio']) _PREPROCESSINGSTEP.fields_by_name['ssd_random_crop_fixed_aspect_ratio'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['ssd_random_crop_pad_fixed_aspect_ratio']) _PREPROCESSINGSTEP.fields_by_name['ssd_random_crop_pad_fixed_aspect_ratio'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_vertical_flip']) _PREPROCESSINGSTEP.fields_by_name['random_vertical_flip'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_rotation90']) _PREPROCESSINGSTEP.fields_by_name['random_rotation90'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['rgb_to_gray']) _PREPROCESSINGSTEP.fields_by_name['rgb_to_gray'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['convert_class_logits_to_softmax']) _PREPROCESSINGSTEP.fields_by_name['convert_class_logits_to_softmax'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_absolute_pad_image']) _PREPROCESSINGSTEP.fields_by_name['random_absolute_pad_image'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_self_concat_image']) _PREPROCESSINGSTEP.fields_by_name['random_self_concat_image'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['autoaugment_image']) _PREPROCESSINGSTEP.fields_by_name['autoaugment_image'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['drop_label_probabilistically']) _PREPROCESSINGSTEP.fields_by_name['drop_label_probabilistically'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['remap_labels']) _PREPROCESSINGSTEP.fields_by_name['remap_labels'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_jpeg_quality']) _PREPROCESSINGSTEP.fields_by_name['random_jpeg_quality'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_downscale_to_target_pixels']) _PREPROCESSINGSTEP.fields_by_name['random_downscale_to_target_pixels'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_patch_gaussian']) _PREPROCESSINGSTEP.fields_by_name['random_patch_gaussian'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_square_crop_by_scale']) _PREPROCESSINGSTEP.fields_by_name['random_square_crop_by_scale'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['random_scale_crop_and_pad_to_square']) _PREPROCESSINGSTEP.fields_by_name['random_scale_crop_and_pad_to_square'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'].fields.append( _PREPROCESSINGSTEP.fields_by_name['adjust_gamma']) _PREPROCESSINGSTEP.fields_by_name['adjust_gamma'].containing_oneof = _PREPROCESSINGSTEP.oneofs_by_name['preprocessing_step'] _RANDOMJITTERBOXES.fields_by_name['jitter_mode'].enum_type = _RANDOMJITTERBOXES_JITTERMODE _RANDOMJITTERBOXES_JITTERMODE.containing_type = _RANDOMJITTERBOXES _RESIZEIMAGE.fields_by_name['method'].enum_type = _RESIZEIMAGE_METHOD _RESIZEIMAGE_METHOD.containing_type = _RESIZEIMAGE _SSDRANDOMCROP.fields_by_name['operations'].message_type = _SSDRANDOMCROPOPERATION _SSDRANDOMCROPPAD.fields_by_name['operations'].message_type = _SSDRANDOMCROPPADOPERATION _SSDRANDOMCROPFIXEDASPECTRATIO.fields_by_name['operations'].message_type = _SSDRANDOMCROPFIXEDASPECTRATIOOPERATION _SSDRANDOMCROPPADFIXEDASPECTRATIO.fields_by_name['operations'].message_type = _SSDRANDOMCROPPADFIXEDASPECTRATIOOPERATION DESCRIPTOR.message_types_by_name['PreprocessingStep'] = _PREPROCESSINGSTEP DESCRIPTOR.message_types_by_name['NormalizeImage'] = _NORMALIZEIMAGE DESCRIPTOR.message_types_by_name['RandomHorizontalFlip'] = _RANDOMHORIZONTALFLIP DESCRIPTOR.message_types_by_name['RandomVerticalFlip'] = _RANDOMVERTICALFLIP DESCRIPTOR.message_types_by_name['RandomRotation90'] = _RANDOMROTATION90 DESCRIPTOR.message_types_by_name['RandomPixelValueScale'] = _RANDOMPIXELVALUESCALE DESCRIPTOR.message_types_by_name['RandomImageScale'] = _RANDOMIMAGESCALE DESCRIPTOR.message_types_by_name['RandomRGBtoGray'] = _RANDOMRGBTOGRAY DESCRIPTOR.message_types_by_name['RandomAdjustBrightness'] = _RANDOMADJUSTBRIGHTNESS DESCRIPTOR.message_types_by_name['RandomAdjustContrast'] = _RANDOMADJUSTCONTRAST DESCRIPTOR.message_types_by_name['RandomAdjustHue'] = _RANDOMADJUSTHUE DESCRIPTOR.message_types_by_name['RandomAdjustSaturation'] = _RANDOMADJUSTSATURATION DESCRIPTOR.message_types_by_name['RandomDistortColor'] = _RANDOMDISTORTCOLOR DESCRIPTOR.message_types_by_name['RandomJitterBoxes'] = _RANDOMJITTERBOXES DESCRIPTOR.message_types_by_name['RandomCropImage'] = _RANDOMCROPIMAGE DESCRIPTOR.message_types_by_name['RandomPadImage'] = _RANDOMPADIMAGE DESCRIPTOR.message_types_by_name['RandomAbsolutePadImage'] = _RANDOMABSOLUTEPADIMAGE DESCRIPTOR.message_types_by_name['RandomCropPadImage'] = _RANDOMCROPPADIMAGE DESCRIPTOR.message_types_by_name['RandomCropToAspectRatio'] = _RANDOMCROPTOASPECTRATIO DESCRIPTOR.message_types_by_name['RandomBlackPatches'] = _RANDOMBLACKPATCHES DESCRIPTOR.message_types_by_name['RandomResizeMethod'] = _RANDOMRESIZEMETHOD DESCRIPTOR.message_types_by_name['RGBtoGray'] = _RGBTOGRAY DESCRIPTOR.message_types_by_name['ScaleBoxesToPixelCoordinates'] = _SCALEBOXESTOPIXELCOORDINATES DESCRIPTOR.message_types_by_name['ResizeImage'] = _RESIZEIMAGE DESCRIPTOR.message_types_by_name['SubtractChannelMean'] = _SUBTRACTCHANNELMEAN DESCRIPTOR.message_types_by_name['SSDRandomCropOperation'] = _SSDRANDOMCROPOPERATION DESCRIPTOR.message_types_by_name['SSDRandomCrop'] = _SSDRANDOMCROP DESCRIPTOR.message_types_by_name['SSDRandomCropPadOperation'] = _SSDRANDOMCROPPADOPERATION DESCRIPTOR.message_types_by_name['SSDRandomCropPad'] = _SSDRANDOMCROPPAD DESCRIPTOR.message_types_by_name['SSDRandomCropFixedAspectRatioOperation'] = _SSDRANDOMCROPFIXEDASPECTRATIOOPERATION DESCRIPTOR.message_types_by_name['SSDRandomCropFixedAspectRatio'] = _SSDRANDOMCROPFIXEDASPECTRATIO DESCRIPTOR.message_types_by_name['SSDRandomCropPadFixedAspectRatioOperation'] = _SSDRANDOMCROPPADFIXEDASPECTRATIOOPERATION DESCRIPTOR.message_types_by_name['SSDRandomCropPadFixedAspectRatio'] = _SSDRANDOMCROPPADFIXEDASPECTRATIO DESCRIPTOR.message_types_by_name['ConvertClassLogitsToSoftmax'] = _CONVERTCLASSLOGITSTOSOFTMAX DESCRIPTOR.message_types_by_name['RandomSelfConcatImage'] = _RANDOMSELFCONCATIMAGE DESCRIPTOR.message_types_by_name['AutoAugmentImage'] = _AUTOAUGMENTIMAGE DESCRIPTOR.message_types_by_name['DropLabelProbabilistically'] = _DROPLABELPROBABILISTICALLY DESCRIPTOR.message_types_by_name['RemapLabels'] = _REMAPLABELS DESCRIPTOR.message_types_by_name['RandomJpegQuality'] = _RANDOMJPEGQUALITY DESCRIPTOR.message_types_by_name['RandomDownscaleToTargetPixels'] = _RANDOMDOWNSCALETOTARGETPIXELS DESCRIPTOR.message_types_by_name['RandomPatchGaussian'] = _RANDOMPATCHGAUSSIAN DESCRIPTOR.message_types_by_name['RandomSquareCropByScale'] = _RANDOMSQUARECROPBYSCALE DESCRIPTOR.message_types_by_name['RandomScaleCropAndPadToSquare'] = _RANDOMSCALECROPANDPADTOSQUARE DESCRIPTOR.message_types_by_name['AdjustGamma'] = _ADJUSTGAMMA _sym_db.RegisterFileDescriptor(DESCRIPTOR) PreprocessingStep = _reflection.GeneratedProtocolMessageType('PreprocessingStep', (_message.Message,), { 'DESCRIPTOR' : _PREPROCESSINGSTEP, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.PreprocessingStep) }) _sym_db.RegisterMessage(PreprocessingStep) NormalizeImage = _reflection.GeneratedProtocolMessageType('NormalizeImage', (_message.Message,), { 'DESCRIPTOR' : _NORMALIZEIMAGE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.NormalizeImage) }) _sym_db.RegisterMessage(NormalizeImage) RandomHorizontalFlip = _reflection.GeneratedProtocolMessageType('RandomHorizontalFlip', (_message.Message,), { 'DESCRIPTOR' : _RANDOMHORIZONTALFLIP, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomHorizontalFlip) }) _sym_db.RegisterMessage(RandomHorizontalFlip) RandomVerticalFlip = _reflection.GeneratedProtocolMessageType('RandomVerticalFlip', (_message.Message,), { 'DESCRIPTOR' : _RANDOMVERTICALFLIP, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomVerticalFlip) }) _sym_db.RegisterMessage(RandomVerticalFlip) RandomRotation90 = _reflection.GeneratedProtocolMessageType('RandomRotation90', (_message.Message,), { 'DESCRIPTOR' : _RANDOMROTATION90, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomRotation90) }) _sym_db.RegisterMessage(RandomRotation90) RandomPixelValueScale = _reflection.GeneratedProtocolMessageType('RandomPixelValueScale', (_message.Message,), { 'DESCRIPTOR' : _RANDOMPIXELVALUESCALE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomPixelValueScale) }) _sym_db.RegisterMessage(RandomPixelValueScale) RandomImageScale = _reflection.GeneratedProtocolMessageType('RandomImageScale', (_message.Message,), { 'DESCRIPTOR' : _RANDOMIMAGESCALE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomImageScale) }) _sym_db.RegisterMessage(RandomImageScale) RandomRGBtoGray = _reflection.GeneratedProtocolMessageType('RandomRGBtoGray', (_message.Message,), { 'DESCRIPTOR' : _RANDOMRGBTOGRAY, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomRGBtoGray) }) _sym_db.RegisterMessage(RandomRGBtoGray) RandomAdjustBrightness = _reflection.GeneratedProtocolMessageType('RandomAdjustBrightness', (_message.Message,), { 'DESCRIPTOR' : _RANDOMADJUSTBRIGHTNESS, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomAdjustBrightness) }) _sym_db.RegisterMessage(RandomAdjustBrightness) RandomAdjustContrast = _reflection.GeneratedProtocolMessageType('RandomAdjustContrast', (_message.Message,), { 'DESCRIPTOR' : _RANDOMADJUSTCONTRAST, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomAdjustContrast) }) _sym_db.RegisterMessage(RandomAdjustContrast) RandomAdjustHue = _reflection.GeneratedProtocolMessageType('RandomAdjustHue', (_message.Message,), { 'DESCRIPTOR' : _RANDOMADJUSTHUE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomAdjustHue) }) _sym_db.RegisterMessage(RandomAdjustHue) RandomAdjustSaturation = _reflection.GeneratedProtocolMessageType('RandomAdjustSaturation', (_message.Message,), { 'DESCRIPTOR' : _RANDOMADJUSTSATURATION, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomAdjustSaturation) }) _sym_db.RegisterMessage(RandomAdjustSaturation) RandomDistortColor = _reflection.GeneratedProtocolMessageType('RandomDistortColor', (_message.Message,), { 'DESCRIPTOR' : _RANDOMDISTORTCOLOR, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomDistortColor) }) _sym_db.RegisterMessage(RandomDistortColor) RandomJitterBoxes = _reflection.GeneratedProtocolMessageType('RandomJitterBoxes', (_message.Message,), { 'DESCRIPTOR' : _RANDOMJITTERBOXES, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomJitterBoxes) }) _sym_db.RegisterMessage(RandomJitterBoxes) RandomCropImage = _reflection.GeneratedProtocolMessageType('RandomCropImage', (_message.Message,), { 'DESCRIPTOR' : _RANDOMCROPIMAGE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomCropImage) }) _sym_db.RegisterMessage(RandomCropImage) RandomPadImage = _reflection.GeneratedProtocolMessageType('RandomPadImage', (_message.Message,), { 'DESCRIPTOR' : _RANDOMPADIMAGE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomPadImage) }) _sym_db.RegisterMessage(RandomPadImage) RandomAbsolutePadImage = _reflection.GeneratedProtocolMessageType('RandomAbsolutePadImage', (_message.Message,), { 'DESCRIPTOR' : _RANDOMABSOLUTEPADIMAGE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomAbsolutePadImage) }) _sym_db.RegisterMessage(RandomAbsolutePadImage) RandomCropPadImage = _reflection.GeneratedProtocolMessageType('RandomCropPadImage', (_message.Message,), { 'DESCRIPTOR' : _RANDOMCROPPADIMAGE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomCropPadImage) }) _sym_db.RegisterMessage(RandomCropPadImage) RandomCropToAspectRatio = _reflection.GeneratedProtocolMessageType('RandomCropToAspectRatio', (_message.Message,), { 'DESCRIPTOR' : _RANDOMCROPTOASPECTRATIO, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomCropToAspectRatio) }) _sym_db.RegisterMessage(RandomCropToAspectRatio) RandomBlackPatches = _reflection.GeneratedProtocolMessageType('RandomBlackPatches', (_message.Message,), { 'DESCRIPTOR' : _RANDOMBLACKPATCHES, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomBlackPatches) }) _sym_db.RegisterMessage(RandomBlackPatches) RandomResizeMethod = _reflection.GeneratedProtocolMessageType('RandomResizeMethod', (_message.Message,), { 'DESCRIPTOR' : _RANDOMRESIZEMETHOD, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomResizeMethod) }) _sym_db.RegisterMessage(RandomResizeMethod) RGBtoGray = _reflection.GeneratedProtocolMessageType('RGBtoGray', (_message.Message,), { 'DESCRIPTOR' : _RGBTOGRAY, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RGBtoGray) }) _sym_db.RegisterMessage(RGBtoGray) ScaleBoxesToPixelCoordinates = _reflection.GeneratedProtocolMessageType('ScaleBoxesToPixelCoordinates', (_message.Message,), { 'DESCRIPTOR' : _SCALEBOXESTOPIXELCOORDINATES, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ScaleBoxesToPixelCoordinates) }) _sym_db.RegisterMessage(ScaleBoxesToPixelCoordinates) ResizeImage = _reflection.GeneratedProtocolMessageType('ResizeImage', (_message.Message,), { 'DESCRIPTOR' : _RESIZEIMAGE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ResizeImage) }) _sym_db.RegisterMessage(ResizeImage) SubtractChannelMean = _reflection.GeneratedProtocolMessageType('SubtractChannelMean', (_message.Message,), { 'DESCRIPTOR' : _SUBTRACTCHANNELMEAN, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SubtractChannelMean) }) _sym_db.RegisterMessage(SubtractChannelMean) SSDRandomCropOperation = _reflection.GeneratedProtocolMessageType('SSDRandomCropOperation', (_message.Message,), { 'DESCRIPTOR' : _SSDRANDOMCROPOPERATION, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SSDRandomCropOperation) }) _sym_db.RegisterMessage(SSDRandomCropOperation) SSDRandomCrop = _reflection.GeneratedProtocolMessageType('SSDRandomCrop', (_message.Message,), { 'DESCRIPTOR' : _SSDRANDOMCROP, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SSDRandomCrop) }) _sym_db.RegisterMessage(SSDRandomCrop) SSDRandomCropPadOperation = _reflection.GeneratedProtocolMessageType('SSDRandomCropPadOperation', (_message.Message,), { 'DESCRIPTOR' : _SSDRANDOMCROPPADOPERATION, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SSDRandomCropPadOperation) }) _sym_db.RegisterMessage(SSDRandomCropPadOperation) SSDRandomCropPad = _reflection.GeneratedProtocolMessageType('SSDRandomCropPad', (_message.Message,), { 'DESCRIPTOR' : _SSDRANDOMCROPPAD, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SSDRandomCropPad) }) _sym_db.RegisterMessage(SSDRandomCropPad) SSDRandomCropFixedAspectRatioOperation = _reflection.GeneratedProtocolMessageType('SSDRandomCropFixedAspectRatioOperation', (_message.Message,), { 'DESCRIPTOR' : _SSDRANDOMCROPFIXEDASPECTRATIOOPERATION, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SSDRandomCropFixedAspectRatioOperation) }) _sym_db.RegisterMessage(SSDRandomCropFixedAspectRatioOperation) SSDRandomCropFixedAspectRatio = _reflection.GeneratedProtocolMessageType('SSDRandomCropFixedAspectRatio', (_message.Message,), { 'DESCRIPTOR' : _SSDRANDOMCROPFIXEDASPECTRATIO, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SSDRandomCropFixedAspectRatio) }) _sym_db.RegisterMessage(SSDRandomCropFixedAspectRatio) SSDRandomCropPadFixedAspectRatioOperation = _reflection.GeneratedProtocolMessageType('SSDRandomCropPadFixedAspectRatioOperation', (_message.Message,), { 'DESCRIPTOR' : _SSDRANDOMCROPPADFIXEDASPECTRATIOOPERATION, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SSDRandomCropPadFixedAspectRatioOperation) }) _sym_db.RegisterMessage(SSDRandomCropPadFixedAspectRatioOperation) SSDRandomCropPadFixedAspectRatio = _reflection.GeneratedProtocolMessageType('SSDRandomCropPadFixedAspectRatio', (_message.Message,), { 'DESCRIPTOR' : _SSDRANDOMCROPPADFIXEDASPECTRATIO, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SSDRandomCropPadFixedAspectRatio) }) _sym_db.RegisterMessage(SSDRandomCropPadFixedAspectRatio) ConvertClassLogitsToSoftmax = _reflection.GeneratedProtocolMessageType('ConvertClassLogitsToSoftmax', (_message.Message,), { 'DESCRIPTOR' : _CONVERTCLASSLOGITSTOSOFTMAX, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ConvertClassLogitsToSoftmax) }) _sym_db.RegisterMessage(ConvertClassLogitsToSoftmax) RandomSelfConcatImage = _reflection.GeneratedProtocolMessageType('RandomSelfConcatImage', (_message.Message,), { 'DESCRIPTOR' : _RANDOMSELFCONCATIMAGE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomSelfConcatImage) }) _sym_db.RegisterMessage(RandomSelfConcatImage) AutoAugmentImage = _reflection.GeneratedProtocolMessageType('AutoAugmentImage', (_message.Message,), { 'DESCRIPTOR' : _AUTOAUGMENTIMAGE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.AutoAugmentImage) }) _sym_db.RegisterMessage(AutoAugmentImage) DropLabelProbabilistically = _reflection.GeneratedProtocolMessageType('DropLabelProbabilistically', (_message.Message,), { 'DESCRIPTOR' : _DROPLABELPROBABILISTICALLY, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.DropLabelProbabilistically) }) _sym_db.RegisterMessage(DropLabelProbabilistically) RemapLabels = _reflection.GeneratedProtocolMessageType('RemapLabels', (_message.Message,), { 'DESCRIPTOR' : _REMAPLABELS, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RemapLabels) }) _sym_db.RegisterMessage(RemapLabels) RandomJpegQuality = _reflection.GeneratedProtocolMessageType('RandomJpegQuality', (_message.Message,), { 'DESCRIPTOR' : _RANDOMJPEGQUALITY, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomJpegQuality) }) _sym_db.RegisterMessage(RandomJpegQuality) RandomDownscaleToTargetPixels = _reflection.GeneratedProtocolMessageType('RandomDownscaleToTargetPixels', (_message.Message,), { 'DESCRIPTOR' : _RANDOMDOWNSCALETOTARGETPIXELS, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomDownscaleToTargetPixels) }) _sym_db.RegisterMessage(RandomDownscaleToTargetPixels) RandomPatchGaussian = _reflection.GeneratedProtocolMessageType('RandomPatchGaussian', (_message.Message,), { 'DESCRIPTOR' : _RANDOMPATCHGAUSSIAN, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomPatchGaussian) }) _sym_db.RegisterMessage(RandomPatchGaussian) RandomSquareCropByScale = _reflection.GeneratedProtocolMessageType('RandomSquareCropByScale', (_message.Message,), { 'DESCRIPTOR' : _RANDOMSQUARECROPBYSCALE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomSquareCropByScale) }) _sym_db.RegisterMessage(RandomSquareCropByScale) RandomScaleCropAndPadToSquare = _reflection.GeneratedProtocolMessageType('RandomScaleCropAndPadToSquare', (_message.Message,), { 'DESCRIPTOR' : _RANDOMSCALECROPANDPADTOSQUARE, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomScaleCropAndPadToSquare) }) _sym_db.RegisterMessage(RandomScaleCropAndPadToSquare) AdjustGamma = _reflection.GeneratedProtocolMessageType('AdjustGamma', (_message.Message,), { 'DESCRIPTOR' : _ADJUSTGAMMA, '__module__' : 'object_detection.protos.preprocessor_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.AdjustGamma) }) _sym_db.RegisterMessage(AdjustGamma) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/preprocessor_pb2.py
preprocessor_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/eval.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/eval.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n\"object_detection/protos/eval.proto\x12\x17object_detection.protos\"\xaa\n\n\nEvalConfig\x12\x15\n\nbatch_size\x18\x19 \x01(\r:\x01\x31\x12\x1e\n\x12num_visualizations\x18\x01 \x01(\r:\x02\x31\x30\x12\x1e\n\x0cnum_examples\x18\x02 \x01(\r:\x04\x35\x30\x30\x30\x42\x02\x18\x01\x12\x1f\n\x12\x65val_interval_secs\x18\x03 \x01(\r:\x03\x33\x30\x30\x12\x18\n\tmax_evals\x18\x04 \x01(\r:\x01\x30\x42\x02\x18\x01\x12\x19\n\nsave_graph\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\"\n\x18visualization_export_dir\x18\x06 \x01(\t:\x00\x12\x15\n\x0b\x65val_master\x18\x07 \x01(\t:\x00\x12\x13\n\x0bmetrics_set\x18\x08 \x03(\t\x12J\n\x14parameterized_metric\x18\x1f \x03(\x0b\x32,.object_detection.protos.ParameterizedMetric\x12\x15\n\x0b\x65xport_path\x18\t \x01(\t:\x00\x12!\n\x12ignore_groundtruth\x18\n \x01(\x08:\x05\x66\x61lse\x12\"\n\x13use_moving_averages\x18\x0b \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x65val_instance_masks\x18\x0c \x01(\x08:\x05\x66\x61lse\x12 \n\x13min_score_threshold\x18\r \x01(\x02:\x03\x30.5\x12&\n\x1amax_num_boxes_to_visualize\x18\x0e \x01(\x05:\x02\x32\x30\x12\x1a\n\x0bskip_scores\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x0bskip_labels\x18\x10 \x01(\x08:\x05\x66\x61lse\x12*\n\x1bvisualize_groundtruth_boxes\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\x32\n#groundtruth_box_visualization_color\x18\x12 \x01(\t:\x05\x62lack\x12\x35\n&keep_image_id_for_visualization_export\x18\x13 \x01(\x08:\x05\x66\x61lse\x12$\n\x16retain_original_images\x18\x17 \x01(\x08:\x04true\x12+\n\x1cinclude_metrics_per_category\x18\x18 \x01(\x08:\x05\x66\x61lse\x12\'\n\x18\x61ll_metrics_per_category\x18# \x01(\x08:\x05\x66\x61lse\x12R\n\x10super_categories\x18\" \x03(\x0b\x32\x38.object_detection.protos.EvalConfig.SuperCategoriesEntry\x12\x1d\n\x12recall_lower_bound\x18\x1a \x01(\x02:\x01\x30\x12\x1d\n\x12recall_upper_bound\x18\x1b \x01(\x02:\x01\x31\x12\x38\n)retain_original_image_additional_channels\x18\x1c \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0f\x66orce_no_resize\x18\x1d \x01(\x08:\x05\x66\x61lse\x12%\n\x16use_dummy_loss_in_eval\x18\x1e \x01(\x08:\x05\x66\x61lse\x12<\n\rkeypoint_edge\x18 \x03(\x0b\x32%.object_detection.protos.KeypointEdge\x12\x33\n$skip_predictions_for_unlabeled_class\x18! \x01(\x08:\x05\x66\x61lse\x1a\x36\n\x14SuperCategoriesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"|\n\x13ParameterizedMetric\x12M\n\x15\x63oco_keypoint_metrics\x18\x01 \x01(\x0b\x32,.object_detection.protos.CocoKeypointMetricsH\x00\x42\x16\n\x14parameterized_metric\"\xd3\x01\n\x13\x43ocoKeypointMetrics\x12\x13\n\x0b\x63lass_label\x18\x01 \x01(\t\x12i\n\x18keypoint_label_to_sigmas\x18\x02 \x03(\x0b\x32G.object_detection.protos.CocoKeypointMetrics.KeypointLabelToSigmasEntry\x1a<\n\x1aKeypointLabelToSigmasEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\"*\n\x0cKeypointEdge\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05' ) _EVALCONFIG_SUPERCATEGORIESENTRY = _descriptor.Descriptor( name='SuperCategoriesEntry', full_name='object_detection.protos.EvalConfig.SuperCategoriesEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='key', full_name='object_detection.protos.EvalConfig.SuperCategoriesEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='value', full_name='object_detection.protos.EvalConfig.SuperCategoriesEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=b'8\001', is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1332, serialized_end=1386, ) _EVALCONFIG = _descriptor.Descriptor( name='EvalConfig', full_name='object_detection.protos.EvalConfig', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='batch_size', full_name='object_detection.protos.EvalConfig.batch_size', index=0, number=25, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_visualizations', full_name='object_detection.protos.EvalConfig.num_visualizations', index=1, number=1, type=13, cpp_type=3, label=1, has_default_value=True, default_value=10, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_examples', full_name='object_detection.protos.EvalConfig.num_examples', index=2, number=2, type=13, cpp_type=3, label=1, has_default_value=True, default_value=5000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='eval_interval_secs', full_name='object_detection.protos.EvalConfig.eval_interval_secs', index=3, number=3, type=13, cpp_type=3, label=1, has_default_value=True, default_value=300, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_evals', full_name='object_detection.protos.EvalConfig.max_evals', index=4, number=4, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='save_graph', full_name='object_detection.protos.EvalConfig.save_graph', index=5, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='visualization_export_dir', full_name='object_detection.protos.EvalConfig.visualization_export_dir', index=6, number=6, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='eval_master', full_name='object_detection.protos.EvalConfig.eval_master', index=7, number=7, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='metrics_set', full_name='object_detection.protos.EvalConfig.metrics_set', index=8, number=8, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='parameterized_metric', full_name='object_detection.protos.EvalConfig.parameterized_metric', index=9, number=31, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='export_path', full_name='object_detection.protos.EvalConfig.export_path', index=10, number=9, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ignore_groundtruth', full_name='object_detection.protos.EvalConfig.ignore_groundtruth', index=11, number=10, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_moving_averages', full_name='object_detection.protos.EvalConfig.use_moving_averages', index=12, number=11, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='eval_instance_masks', full_name='object_detection.protos.EvalConfig.eval_instance_masks', index=13, number=12, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_score_threshold', full_name='object_detection.protos.EvalConfig.min_score_threshold', index=14, number=13, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_num_boxes_to_visualize', full_name='object_detection.protos.EvalConfig.max_num_boxes_to_visualize', index=15, number=14, type=5, cpp_type=1, label=1, has_default_value=True, default_value=20, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='skip_scores', full_name='object_detection.protos.EvalConfig.skip_scores', index=16, number=15, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='skip_labels', full_name='object_detection.protos.EvalConfig.skip_labels', index=17, number=16, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='visualize_groundtruth_boxes', full_name='object_detection.protos.EvalConfig.visualize_groundtruth_boxes', index=18, number=17, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='groundtruth_box_visualization_color', full_name='object_detection.protos.EvalConfig.groundtruth_box_visualization_color', index=19, number=18, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"black".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keep_image_id_for_visualization_export', full_name='object_detection.protos.EvalConfig.keep_image_id_for_visualization_export', index=20, number=19, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='retain_original_images', full_name='object_detection.protos.EvalConfig.retain_original_images', index=21, number=23, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='include_metrics_per_category', full_name='object_detection.protos.EvalConfig.include_metrics_per_category', index=22, number=24, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='all_metrics_per_category', full_name='object_detection.protos.EvalConfig.all_metrics_per_category', index=23, number=35, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='super_categories', full_name='object_detection.protos.EvalConfig.super_categories', index=24, number=34, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='recall_lower_bound', full_name='object_detection.protos.EvalConfig.recall_lower_bound', index=25, number=26, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='recall_upper_bound', full_name='object_detection.protos.EvalConfig.recall_upper_bound', index=26, number=27, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='retain_original_image_additional_channels', full_name='object_detection.protos.EvalConfig.retain_original_image_additional_channels', index=27, number=28, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='force_no_resize', full_name='object_detection.protos.EvalConfig.force_no_resize', index=28, number=29, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_dummy_loss_in_eval', full_name='object_detection.protos.EvalConfig.use_dummy_loss_in_eval', index=29, number=30, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_edge', full_name='object_detection.protos.EvalConfig.keypoint_edge', index=30, number=32, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='skip_predictions_for_unlabeled_class', full_name='object_detection.protos.EvalConfig.skip_predictions_for_unlabeled_class', index=31, number=33, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_EVALCONFIG_SUPERCATEGORIESENTRY, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=64, serialized_end=1386, ) _PARAMETERIZEDMETRIC = _descriptor.Descriptor( name='ParameterizedMetric', full_name='object_detection.protos.ParameterizedMetric', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='coco_keypoint_metrics', full_name='object_detection.protos.ParameterizedMetric.coco_keypoint_metrics', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='parameterized_metric', full_name='object_detection.protos.ParameterizedMetric.parameterized_metric', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=1388, serialized_end=1512, ) _COCOKEYPOINTMETRICS_KEYPOINTLABELTOSIGMASENTRY = _descriptor.Descriptor( name='KeypointLabelToSigmasEntry', full_name='object_detection.protos.CocoKeypointMetrics.KeypointLabelToSigmasEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='key', full_name='object_detection.protos.CocoKeypointMetrics.KeypointLabelToSigmasEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='value', full_name='object_detection.protos.CocoKeypointMetrics.KeypointLabelToSigmasEntry.value', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=b'8\001', is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1666, serialized_end=1726, ) _COCOKEYPOINTMETRICS = _descriptor.Descriptor( name='CocoKeypointMetrics', full_name='object_detection.protos.CocoKeypointMetrics', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='class_label', full_name='object_detection.protos.CocoKeypointMetrics.class_label', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keypoint_label_to_sigmas', full_name='object_detection.protos.CocoKeypointMetrics.keypoint_label_to_sigmas', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_COCOKEYPOINTMETRICS_KEYPOINTLABELTOSIGMASENTRY, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1515, serialized_end=1726, ) _KEYPOINTEDGE = _descriptor.Descriptor( name='KeypointEdge', full_name='object_detection.protos.KeypointEdge', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='start', full_name='object_detection.protos.KeypointEdge.start', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='end', full_name='object_detection.protos.KeypointEdge.end', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1728, serialized_end=1770, ) _EVALCONFIG_SUPERCATEGORIESENTRY.containing_type = _EVALCONFIG _EVALCONFIG.fields_by_name['parameterized_metric'].message_type = _PARAMETERIZEDMETRIC _EVALCONFIG.fields_by_name['super_categories'].message_type = _EVALCONFIG_SUPERCATEGORIESENTRY _EVALCONFIG.fields_by_name['keypoint_edge'].message_type = _KEYPOINTEDGE _PARAMETERIZEDMETRIC.fields_by_name['coco_keypoint_metrics'].message_type = _COCOKEYPOINTMETRICS _PARAMETERIZEDMETRIC.oneofs_by_name['parameterized_metric'].fields.append( _PARAMETERIZEDMETRIC.fields_by_name['coco_keypoint_metrics']) _PARAMETERIZEDMETRIC.fields_by_name['coco_keypoint_metrics'].containing_oneof = _PARAMETERIZEDMETRIC.oneofs_by_name['parameterized_metric'] _COCOKEYPOINTMETRICS_KEYPOINTLABELTOSIGMASENTRY.containing_type = _COCOKEYPOINTMETRICS _COCOKEYPOINTMETRICS.fields_by_name['keypoint_label_to_sigmas'].message_type = _COCOKEYPOINTMETRICS_KEYPOINTLABELTOSIGMASENTRY DESCRIPTOR.message_types_by_name['EvalConfig'] = _EVALCONFIG DESCRIPTOR.message_types_by_name['ParameterizedMetric'] = _PARAMETERIZEDMETRIC DESCRIPTOR.message_types_by_name['CocoKeypointMetrics'] = _COCOKEYPOINTMETRICS DESCRIPTOR.message_types_by_name['KeypointEdge'] = _KEYPOINTEDGE _sym_db.RegisterFileDescriptor(DESCRIPTOR) EvalConfig = _reflection.GeneratedProtocolMessageType('EvalConfig', (_message.Message,), { 'SuperCategoriesEntry' : _reflection.GeneratedProtocolMessageType('SuperCategoriesEntry', (_message.Message,), { 'DESCRIPTOR' : _EVALCONFIG_SUPERCATEGORIESENTRY, '__module__' : 'object_detection.protos.eval_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.EvalConfig.SuperCategoriesEntry) }) , 'DESCRIPTOR' : _EVALCONFIG, '__module__' : 'object_detection.protos.eval_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.EvalConfig) }) _sym_db.RegisterMessage(EvalConfig) _sym_db.RegisterMessage(EvalConfig.SuperCategoriesEntry) ParameterizedMetric = _reflection.GeneratedProtocolMessageType('ParameterizedMetric', (_message.Message,), { 'DESCRIPTOR' : _PARAMETERIZEDMETRIC, '__module__' : 'object_detection.protos.eval_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ParameterizedMetric) }) _sym_db.RegisterMessage(ParameterizedMetric) CocoKeypointMetrics = _reflection.GeneratedProtocolMessageType('CocoKeypointMetrics', (_message.Message,), { 'KeypointLabelToSigmasEntry' : _reflection.GeneratedProtocolMessageType('KeypointLabelToSigmasEntry', (_message.Message,), { 'DESCRIPTOR' : _COCOKEYPOINTMETRICS_KEYPOINTLABELTOSIGMASENTRY, '__module__' : 'object_detection.protos.eval_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CocoKeypointMetrics.KeypointLabelToSigmasEntry) }) , 'DESCRIPTOR' : _COCOKEYPOINTMETRICS, '__module__' : 'object_detection.protos.eval_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.CocoKeypointMetrics) }) _sym_db.RegisterMessage(CocoKeypointMetrics) _sym_db.RegisterMessage(CocoKeypointMetrics.KeypointLabelToSigmasEntry) KeypointEdge = _reflection.GeneratedProtocolMessageType('KeypointEdge', (_message.Message,), { 'DESCRIPTOR' : _KEYPOINTEDGE, '__module__' : 'object_detection.protos.eval_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.KeypointEdge) }) _sym_db.RegisterMessage(KeypointEdge) _EVALCONFIG_SUPERCATEGORIESENTRY._options = None _EVALCONFIG.fields_by_name['num_examples']._options = None _EVALCONFIG.fields_by_name['max_evals']._options = None _COCOKEYPOINTMETRICS_KEYPOINTLABELTOSIGMASENTRY._options = None # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/eval_pb2.py
eval_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/model.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from object_detection.protos import center_net_pb2 as object__detection_dot_protos_dot_center__net__pb2 from object_detection.protos import faster_rcnn_pb2 as object__detection_dot_protos_dot_faster__rcnn__pb2 from object_detection.protos import ssd_pb2 as object__detection_dot_protos_dot_ssd__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/model.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n#object_detection/protos/model.proto\x12\x17object_detection.protos\x1a(object_detection/protos/center_net.proto\x1a)object_detection/protos/faster_rcnn.proto\x1a!object_detection/protos/ssd.proto\"\x86\x02\n\x0e\x44\x65tectionModel\x12:\n\x0b\x66\x61ster_rcnn\x18\x01 \x01(\x0b\x32#.object_detection.protos.FasterRcnnH\x00\x12+\n\x03ssd\x18\x02 \x01(\x0b\x32\x1c.object_detection.protos.SsdH\x00\x12H\n\x12\x65xperimental_model\x18\x03 \x01(\x0b\x32*.object_detection.protos.ExperimentalModelH\x00\x12\x38\n\ncenter_net\x18\x04 \x01(\x0b\x32\".object_detection.protos.CenterNetH\x00\x42\x07\n\x05model\"!\n\x11\x45xperimentalModel\x12\x0c\n\x04name\x18\x01 \x01(\t' , dependencies=[object__detection_dot_protos_dot_center__net__pb2.DESCRIPTOR,object__detection_dot_protos_dot_faster__rcnn__pb2.DESCRIPTOR,object__detection_dot_protos_dot_ssd__pb2.DESCRIPTOR,]) _DETECTIONMODEL = _descriptor.Descriptor( name='DetectionModel', full_name='object_detection.protos.DetectionModel', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='faster_rcnn', full_name='object_detection.protos.DetectionModel.faster_rcnn', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ssd', full_name='object_detection.protos.DetectionModel.ssd', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='experimental_model', full_name='object_detection.protos.DetectionModel.experimental_model', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='center_net', full_name='object_detection.protos.DetectionModel.center_net', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='model', full_name='object_detection.protos.DetectionModel.model', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=185, serialized_end=447, ) _EXPERIMENTALMODEL = _descriptor.Descriptor( name='ExperimentalModel', full_name='object_detection.protos.ExperimentalModel', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='name', full_name='object_detection.protos.ExperimentalModel.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=449, serialized_end=482, ) _DETECTIONMODEL.fields_by_name['faster_rcnn'].message_type = object__detection_dot_protos_dot_faster__rcnn__pb2._FASTERRCNN _DETECTIONMODEL.fields_by_name['ssd'].message_type = object__detection_dot_protos_dot_ssd__pb2._SSD _DETECTIONMODEL.fields_by_name['experimental_model'].message_type = _EXPERIMENTALMODEL _DETECTIONMODEL.fields_by_name['center_net'].message_type = object__detection_dot_protos_dot_center__net__pb2._CENTERNET _DETECTIONMODEL.oneofs_by_name['model'].fields.append( _DETECTIONMODEL.fields_by_name['faster_rcnn']) _DETECTIONMODEL.fields_by_name['faster_rcnn'].containing_oneof = _DETECTIONMODEL.oneofs_by_name['model'] _DETECTIONMODEL.oneofs_by_name['model'].fields.append( _DETECTIONMODEL.fields_by_name['ssd']) _DETECTIONMODEL.fields_by_name['ssd'].containing_oneof = _DETECTIONMODEL.oneofs_by_name['model'] _DETECTIONMODEL.oneofs_by_name['model'].fields.append( _DETECTIONMODEL.fields_by_name['experimental_model']) _DETECTIONMODEL.fields_by_name['experimental_model'].containing_oneof = _DETECTIONMODEL.oneofs_by_name['model'] _DETECTIONMODEL.oneofs_by_name['model'].fields.append( _DETECTIONMODEL.fields_by_name['center_net']) _DETECTIONMODEL.fields_by_name['center_net'].containing_oneof = _DETECTIONMODEL.oneofs_by_name['model'] DESCRIPTOR.message_types_by_name['DetectionModel'] = _DETECTIONMODEL DESCRIPTOR.message_types_by_name['ExperimentalModel'] = _EXPERIMENTALMODEL _sym_db.RegisterFileDescriptor(DESCRIPTOR) DetectionModel = _reflection.GeneratedProtocolMessageType('DetectionModel', (_message.Message,), { 'DESCRIPTOR' : _DETECTIONMODEL, '__module__' : 'object_detection.protos.model_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.DetectionModel) }) _sym_db.RegisterMessage(DetectionModel) ExperimentalModel = _reflection.GeneratedProtocolMessageType('ExperimentalModel', (_message.Message,), { 'DESCRIPTOR' : _EXPERIMENTALMODEL, '__module__' : 'object_detection.protos.model_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.ExperimentalModel) }) _sym_db.RegisterMessage(ExperimentalModel) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/model_pb2.py
model_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/faster_rcnn.proto """Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from object_detection.protos import anchor_generator_pb2 as object__detection_dot_protos_dot_anchor__generator__pb2 from object_detection.protos import box_predictor_pb2 as object__detection_dot_protos_dot_box__predictor__pb2 from object_detection.protos import hyperparams_pb2 as object__detection_dot_protos_dot_hyperparams__pb2 from object_detection.protos import image_resizer_pb2 as object__detection_dot_protos_dot_image__resizer__pb2 from object_detection.protos import losses_pb2 as object__detection_dot_protos_dot_losses__pb2 from object_detection.protos import post_processing_pb2 as object__detection_dot_protos_dot_post__processing__pb2 from object_detection.protos import fpn_pb2 as object__detection_dot_protos_dot_fpn__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/faster_rcnn.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n)object_detection/protos/faster_rcnn.proto\x12\x17object_detection.protos\x1a.object_detection/protos/anchor_generator.proto\x1a+object_detection/protos/box_predictor.proto\x1a)object_detection/protos/hyperparams.proto\x1a+object_detection/protos/image_resizer.proto\x1a$object_detection/protos/losses.proto\x1a-object_detection/protos/post_processing.proto\x1a!object_detection/protos/fpn.proto\"\x97\x10\n\nFasterRcnn\x12\x1b\n\x10number_of_stages\x18\x01 \x01(\x05:\x01\x32\x12\x13\n\x0bnum_classes\x18\x03 \x01(\x05\x12<\n\rimage_resizer\x18\x04 \x01(\x0b\x32%.object_detection.protos.ImageResizer\x12N\n\x11\x66\x65\x61ture_extractor\x18\x05 \x01(\x0b\x32\x33.object_detection.protos.FasterRcnnFeatureExtractor\x12N\n\x1c\x66irst_stage_anchor_generator\x18\x06 \x01(\x0b\x32(.object_detection.protos.AnchorGenerator\x12\"\n\x17\x66irst_stage_atrous_rate\x18\x07 \x01(\x05:\x01\x31\x12X\n*first_stage_box_predictor_conv_hyperparams\x18\x08 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x30\n%first_stage_box_predictor_kernel_size\x18\t \x01(\x05:\x01\x33\x12,\n\x1f\x66irst_stage_box_predictor_depth\x18\n \x01(\x05:\x03\x35\x31\x32\x12\'\n\x1a\x66irst_stage_minibatch_size\x18\x0b \x01(\x05:\x03\x32\x35\x36\x12\x32\n%first_stage_positive_balance_fraction\x18\x0c \x01(\x02:\x03\x30.5\x12*\n\x1f\x66irst_stage_nms_score_threshold\x18\r \x01(\x02:\x01\x30\x12*\n\x1d\x66irst_stage_nms_iou_threshold\x18\x0e \x01(\x02:\x03\x30.7\x12&\n\x19\x66irst_stage_max_proposals\x18\x0f \x01(\x05:\x03\x33\x30\x30\x12/\n$first_stage_localization_loss_weight\x18\x10 \x01(\x02:\x01\x31\x12-\n\"first_stage_objectness_loss_weight\x18\x11 \x01(\x02:\x01\x31\x12\x19\n\x11initial_crop_size\x18\x12 \x01(\x05\x12\x1b\n\x13maxpool_kernel_size\x18\x13 \x01(\x05\x12\x16\n\x0emaxpool_stride\x18\x14 \x01(\x05\x12I\n\x1asecond_stage_box_predictor\x18\x15 \x01(\x0b\x32%.object_detection.protos.BoxPredictor\x12#\n\x17second_stage_batch_size\x18\x16 \x01(\x05:\x02\x36\x34\x12+\n\x1dsecond_stage_balance_fraction\x18\x17 \x01(\x02:\x04\x30.25\x12M\n\x1csecond_stage_post_processing\x18\x18 \x01(\x0b\x32\'.object_detection.protos.PostProcessing\x12\x30\n%second_stage_localization_loss_weight\x18\x19 \x01(\x02:\x01\x31\x12\x32\n\'second_stage_classification_loss_weight\x18\x1a \x01(\x02:\x01\x31\x12\x33\n(second_stage_mask_prediction_loss_weight\x18\x1b \x01(\x02:\x01\x31\x12\x45\n\x12hard_example_miner\x18\x1c \x01(\x0b\x32).object_detection.protos.HardExampleMiner\x12U\n second_stage_classification_loss\x18\x1d \x01(\x0b\x32+.object_detection.protos.ClassificationLoss\x12\'\n\x18inplace_batchnorm_update\x18\x1e \x01(\x08:\x05\x66\x61lse\x12)\n\x1ause_matmul_crop_and_resize\x18\x1f \x01(\x08:\x05\x66\x61lse\x12$\n\x15\x63lip_anchors_to_image\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1cuse_matmul_gather_in_matcher\x18! \x01(\x08:\x05\x66\x61lse\x12\x30\n!use_static_balanced_label_sampler\x18\" \x01(\x08:\x05\x66\x61lse\x12 \n\x11use_static_shapes\x18# \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x0cresize_masks\x18$ \x01(\x08:\x04true\x12)\n\x1ause_static_shapes_for_eval\x18% \x01(\x08:\x05\x66\x61lse\x12\x30\n\"use_partitioned_nms_in_first_stage\x18& \x01(\x08:\x04true\x12\x33\n$return_raw_detections_during_predict\x18\' \x01(\x08:\x05\x66\x61lse\x12.\n\x1fuse_combined_nms_in_first_stage\x18( \x01(\x08:\x05\x66\x61lse\x12(\n\x19output_final_box_features\x18* \x01(\x08:\x05\x66\x61lse\x12,\n\x1doutput_final_box_rpn_features\x18+ \x01(\x08:\x05\x66\x61lse\x12\x38\n\x0e\x63ontext_config\x18) \x01(\x0b\x32 .object_detection.protos.Context\"\xbd\x03\n\x07\x43ontext\x12&\n\x18max_num_context_features\x18\x01 \x01(\x05:\x04\x32\x30\x30\x30\x12,\n\x1e\x61ttention_bottleneck_dimension\x18\x02 \x01(\x05:\x04\x32\x30\x34\x38\x12#\n\x15\x61ttention_temperature\x18\x03 \x01(\x02:\x04\x30.01\x12$\n\x16\x63ontext_feature_length\x18\x04 \x01(\x05:\x04\x32\x30\x35\x37\x12!\n\x12use_self_attention\x18\x06 \x01(\x08:\x05\x66\x61lse\x12%\n\x17use_long_term_attention\x18\x07 \x01(\x08:\x04true\x12)\n\x1aself_attention_in_sequence\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x13num_attention_heads\x18\t \x01(\x05:\x01\x31\x12\x1f\n\x14num_attention_layers\x18\x0b \x01(\x05:\x01\x31\x12[\n\x12\x61ttention_position\x18\n \x01(\x0e\x32*.object_detection.protos.AttentionPosition:\x13POST_BOX_CLASSIFIER\"\xcf\x02\n\x1a\x46\x61sterRcnnFeatureExtractor\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\'\n\x1b\x66irst_stage_features_stride\x18\x02 \x01(\x05:\x02\x31\x36\x12#\n\x14\x62\x61tch_norm_trainable\x18\x03 \x01(\x08:\x05\x66\x61lse\x12>\n\x10\x63onv_hyperparams\x18\x04 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12:\n+override_base_feature_extractor_hyperparams\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0fpad_to_multiple\x18\x06 \x01(\x05:\x02\x33\x32\x12<\n\x03\x66pn\x18\x07 \x01(\x0b\x32/.object_detection.protos.FeaturePyramidNetworks*Q\n\x11\x41ttentionPosition\x12\x15\n\x11\x41TTENTION_DEFAULT\x10\x00\x12\x17\n\x13POST_BOX_CLASSIFIER\x10\x01\x12\x0c\n\x08POST_RPN\x10\x02' , dependencies=[object__detection_dot_protos_dot_anchor__generator__pb2.DESCRIPTOR,object__detection_dot_protos_dot_box__predictor__pb2.DESCRIPTOR,object__detection_dot_protos_dot_hyperparams__pb2.DESCRIPTOR,object__detection_dot_protos_dot_image__resizer__pb2.DESCRIPTOR,object__detection_dot_protos_dot_losses__pb2.DESCRIPTOR,object__detection_dot_protos_dot_post__processing__pb2.DESCRIPTOR,object__detection_dot_protos_dot_fpn__pb2.DESCRIPTOR,]) _ATTENTIONPOSITION = _descriptor.EnumDescriptor( name='AttentionPosition', full_name='object_detection.protos.AttentionPosition', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='ATTENTION_DEFAULT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='POST_BOX_CLASSIFIER', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='POST_RPN', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=3231, serialized_end=3312, ) _sym_db.RegisterEnumDescriptor(_ATTENTIONPOSITION) AttentionPosition = enum_type_wrapper.EnumTypeWrapper(_ATTENTIONPOSITION) ATTENTION_DEFAULT = 0 POST_BOX_CLASSIFIER = 1 POST_RPN = 2 _FASTERRCNN = _descriptor.Descriptor( name='FasterRcnn', full_name='object_detection.protos.FasterRcnn', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='number_of_stages', full_name='object_detection.protos.FasterRcnn.number_of_stages', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_classes', full_name='object_detection.protos.FasterRcnn.num_classes', index=1, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='image_resizer', full_name='object_detection.protos.FasterRcnn.image_resizer', index=2, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='feature_extractor', full_name='object_detection.protos.FasterRcnn.feature_extractor', index=3, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_anchor_generator', full_name='object_detection.protos.FasterRcnn.first_stage_anchor_generator', index=4, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_atrous_rate', full_name='object_detection.protos.FasterRcnn.first_stage_atrous_rate', index=5, number=7, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_box_predictor_conv_hyperparams', full_name='object_detection.protos.FasterRcnn.first_stage_box_predictor_conv_hyperparams', index=6, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_box_predictor_kernel_size', full_name='object_detection.protos.FasterRcnn.first_stage_box_predictor_kernel_size', index=7, number=9, type=5, cpp_type=1, label=1, has_default_value=True, default_value=3, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_box_predictor_depth', full_name='object_detection.protos.FasterRcnn.first_stage_box_predictor_depth', index=8, number=10, type=5, cpp_type=1, label=1, has_default_value=True, default_value=512, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_minibatch_size', full_name='object_detection.protos.FasterRcnn.first_stage_minibatch_size', index=9, number=11, type=5, cpp_type=1, label=1, has_default_value=True, default_value=256, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_positive_balance_fraction', full_name='object_detection.protos.FasterRcnn.first_stage_positive_balance_fraction', index=10, number=12, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_nms_score_threshold', full_name='object_detection.protos.FasterRcnn.first_stage_nms_score_threshold', index=11, number=13, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_nms_iou_threshold', full_name='object_detection.protos.FasterRcnn.first_stage_nms_iou_threshold', index=12, number=14, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.7), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_max_proposals', full_name='object_detection.protos.FasterRcnn.first_stage_max_proposals', index=13, number=15, type=5, cpp_type=1, label=1, has_default_value=True, default_value=300, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_localization_loss_weight', full_name='object_detection.protos.FasterRcnn.first_stage_localization_loss_weight', index=14, number=16, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_objectness_loss_weight', full_name='object_detection.protos.FasterRcnn.first_stage_objectness_loss_weight', index=15, number=17, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='initial_crop_size', full_name='object_detection.protos.FasterRcnn.initial_crop_size', index=16, number=18, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='maxpool_kernel_size', full_name='object_detection.protos.FasterRcnn.maxpool_kernel_size', index=17, number=19, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='maxpool_stride', full_name='object_detection.protos.FasterRcnn.maxpool_stride', index=18, number=20, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='second_stage_box_predictor', full_name='object_detection.protos.FasterRcnn.second_stage_box_predictor', index=19, number=21, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='second_stage_batch_size', full_name='object_detection.protos.FasterRcnn.second_stage_batch_size', index=20, number=22, type=5, cpp_type=1, label=1, has_default_value=True, default_value=64, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='second_stage_balance_fraction', full_name='object_detection.protos.FasterRcnn.second_stage_balance_fraction', index=21, number=23, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.25), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='second_stage_post_processing', full_name='object_detection.protos.FasterRcnn.second_stage_post_processing', index=22, number=24, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='second_stage_localization_loss_weight', full_name='object_detection.protos.FasterRcnn.second_stage_localization_loss_weight', index=23, number=25, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='second_stage_classification_loss_weight', full_name='object_detection.protos.FasterRcnn.second_stage_classification_loss_weight', index=24, number=26, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='second_stage_mask_prediction_loss_weight', full_name='object_detection.protos.FasterRcnn.second_stage_mask_prediction_loss_weight', index=25, number=27, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='hard_example_miner', full_name='object_detection.protos.FasterRcnn.hard_example_miner', index=26, number=28, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='second_stage_classification_loss', full_name='object_detection.protos.FasterRcnn.second_stage_classification_loss', index=27, number=29, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='inplace_batchnorm_update', full_name='object_detection.protos.FasterRcnn.inplace_batchnorm_update', index=28, number=30, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_matmul_crop_and_resize', full_name='object_detection.protos.FasterRcnn.use_matmul_crop_and_resize', index=29, number=31, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='clip_anchors_to_image', full_name='object_detection.protos.FasterRcnn.clip_anchors_to_image', index=30, number=32, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_matmul_gather_in_matcher', full_name='object_detection.protos.FasterRcnn.use_matmul_gather_in_matcher', index=31, number=33, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_static_balanced_label_sampler', full_name='object_detection.protos.FasterRcnn.use_static_balanced_label_sampler', index=32, number=34, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_static_shapes', full_name='object_detection.protos.FasterRcnn.use_static_shapes', index=33, number=35, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='resize_masks', full_name='object_detection.protos.FasterRcnn.resize_masks', index=34, number=36, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_static_shapes_for_eval', full_name='object_detection.protos.FasterRcnn.use_static_shapes_for_eval', index=35, number=37, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_partitioned_nms_in_first_stage', full_name='object_detection.protos.FasterRcnn.use_partitioned_nms_in_first_stage', index=36, number=38, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='return_raw_detections_during_predict', full_name='object_detection.protos.FasterRcnn.return_raw_detections_during_predict', index=37, number=39, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_combined_nms_in_first_stage', full_name='object_detection.protos.FasterRcnn.use_combined_nms_in_first_stage', index=38, number=40, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='output_final_box_features', full_name='object_detection.protos.FasterRcnn.output_final_box_features', index=39, number=42, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='output_final_box_rpn_features', full_name='object_detection.protos.FasterRcnn.output_final_box_rpn_features', index=40, number=43, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='context_config', full_name='object_detection.protos.FasterRcnn.context_config', index=41, number=41, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=372, serialized_end=2443, ) _CONTEXT = _descriptor.Descriptor( name='Context', full_name='object_detection.protos.Context', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='max_num_context_features', full_name='object_detection.protos.Context.max_num_context_features', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='attention_bottleneck_dimension', full_name='object_detection.protos.Context.attention_bottleneck_dimension', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2048, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='attention_temperature', full_name='object_detection.protos.Context.attention_temperature', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.01), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='context_feature_length', full_name='object_detection.protos.Context.context_feature_length', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2057, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_self_attention', full_name='object_detection.protos.Context.use_self_attention', index=4, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_long_term_attention', full_name='object_detection.protos.Context.use_long_term_attention', index=5, number=7, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='self_attention_in_sequence', full_name='object_detection.protos.Context.self_attention_in_sequence', index=6, number=8, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_attention_heads', full_name='object_detection.protos.Context.num_attention_heads', index=7, number=9, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_attention_layers', full_name='object_detection.protos.Context.num_attention_layers', index=8, number=11, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='attention_position', full_name='object_detection.protos.Context.attention_position', index=9, number=10, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2446, serialized_end=2891, ) _FASTERRCNNFEATUREEXTRACTOR = _descriptor.Descriptor( name='FasterRcnnFeatureExtractor', full_name='object_detection.protos.FasterRcnnFeatureExtractor', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='type', full_name='object_detection.protos.FasterRcnnFeatureExtractor.type', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='first_stage_features_stride', full_name='object_detection.protos.FasterRcnnFeatureExtractor.first_stage_features_stride', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=16, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='batch_norm_trainable', full_name='object_detection.protos.FasterRcnnFeatureExtractor.batch_norm_trainable', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='conv_hyperparams', full_name='object_detection.protos.FasterRcnnFeatureExtractor.conv_hyperparams', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='override_base_feature_extractor_hyperparams', full_name='object_detection.protos.FasterRcnnFeatureExtractor.override_base_feature_extractor_hyperparams', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_to_multiple', full_name='object_detection.protos.FasterRcnnFeatureExtractor.pad_to_multiple', index=5, number=6, type=5, cpp_type=1, label=1, has_default_value=True, default_value=32, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='fpn', full_name='object_detection.protos.FasterRcnnFeatureExtractor.fpn', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2894, serialized_end=3229, ) _FASTERRCNN.fields_by_name['image_resizer'].message_type = object__detection_dot_protos_dot_image__resizer__pb2._IMAGERESIZER _FASTERRCNN.fields_by_name['feature_extractor'].message_type = _FASTERRCNNFEATUREEXTRACTOR _FASTERRCNN.fields_by_name['first_stage_anchor_generator'].message_type = object__detection_dot_protos_dot_anchor__generator__pb2._ANCHORGENERATOR _FASTERRCNN.fields_by_name['first_stage_box_predictor_conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS _FASTERRCNN.fields_by_name['second_stage_box_predictor'].message_type = object__detection_dot_protos_dot_box__predictor__pb2._BOXPREDICTOR _FASTERRCNN.fields_by_name['second_stage_post_processing'].message_type = object__detection_dot_protos_dot_post__processing__pb2._POSTPROCESSING _FASTERRCNN.fields_by_name['hard_example_miner'].message_type = object__detection_dot_protos_dot_losses__pb2._HARDEXAMPLEMINER _FASTERRCNN.fields_by_name['second_stage_classification_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._CLASSIFICATIONLOSS _FASTERRCNN.fields_by_name['context_config'].message_type = _CONTEXT _CONTEXT.fields_by_name['attention_position'].enum_type = _ATTENTIONPOSITION _FASTERRCNNFEATUREEXTRACTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS _FASTERRCNNFEATUREEXTRACTOR.fields_by_name['fpn'].message_type = object__detection_dot_protos_dot_fpn__pb2._FEATUREPYRAMIDNETWORKS DESCRIPTOR.message_types_by_name['FasterRcnn'] = _FASTERRCNN DESCRIPTOR.message_types_by_name['Context'] = _CONTEXT DESCRIPTOR.message_types_by_name['FasterRcnnFeatureExtractor'] = _FASTERRCNNFEATUREEXTRACTOR DESCRIPTOR.enum_types_by_name['AttentionPosition'] = _ATTENTIONPOSITION _sym_db.RegisterFileDescriptor(DESCRIPTOR) FasterRcnn = _reflection.GeneratedProtocolMessageType('FasterRcnn', (_message.Message,), { 'DESCRIPTOR' : _FASTERRCNN, '__module__' : 'object_detection.protos.faster_rcnn_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.FasterRcnn) }) _sym_db.RegisterMessage(FasterRcnn) Context = _reflection.GeneratedProtocolMessageType('Context', (_message.Message,), { 'DESCRIPTOR' : _CONTEXT, '__module__' : 'object_detection.protos.faster_rcnn_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.Context) }) _sym_db.RegisterMessage(Context) FasterRcnnFeatureExtractor = _reflection.GeneratedProtocolMessageType('FasterRcnnFeatureExtractor', (_message.Message,), { 'DESCRIPTOR' : _FASTERRCNNFEATUREEXTRACTOR, '__module__' : 'object_detection.protos.faster_rcnn_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.FasterRcnnFeatureExtractor) }) _sym_db.RegisterMessage(FasterRcnnFeatureExtractor) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/faster_rcnn_pb2.py
faster_rcnn_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/hyperparams.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/hyperparams.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n)object_detection/protos/hyperparams.proto\x12\x17object_detection.protos\"\xe5\x04\n\x0bHyperparams\x12\x39\n\x02op\x18\x01 \x01(\x0e\x32\'.object_detection.protos.Hyperparams.Op:\x04\x43ONV\x12\x39\n\x0bregularizer\x18\x02 \x01(\x0b\x32$.object_detection.protos.Regularizer\x12\x39\n\x0binitializer\x18\x03 \x01(\x0b\x32$.object_detection.protos.Initializer\x12I\n\nactivation\x18\x04 \x01(\x0e\x32/.object_detection.protos.Hyperparams.Activation:\x04RELU\x12\x38\n\nbatch_norm\x18\x05 \x01(\x0b\x32\".object_detection.protos.BatchNormH\x00\x12=\n\x0fsync_batch_norm\x18\t \x01(\x0b\x32\".object_detection.protos.BatchNormH\x00\x12\x38\n\ngroup_norm\x18\x07 \x01(\x0b\x32\".object_detection.protos.GroupNormH\x00\x12#\n\x14regularize_depthwise\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x1d\n\x0e\x66orce_use_bias\x18\x08 \x01(\x08:\x05\x66\x61lse\"\x16\n\x02Op\x12\x08\n\x04\x43ONV\x10\x01\x12\x06\n\x02\x46\x43\x10\x02\"7\n\nActivation\x12\x08\n\x04NONE\x10\x00\x12\x08\n\x04RELU\x10\x01\x12\n\n\x06RELU_6\x10\x02\x12\t\n\x05SWISH\x10\x03\x42\x12\n\x10normalizer_oneof\"\xa6\x01\n\x0bRegularizer\x12@\n\x0el1_regularizer\x18\x01 \x01(\x0b\x32&.object_detection.protos.L1RegularizerH\x00\x12@\n\x0el2_regularizer\x18\x02 \x01(\x0b\x32&.object_detection.protos.L2RegularizerH\x00\x42\x13\n\x11regularizer_oneof\"\"\n\rL1Regularizer\x12\x11\n\x06weight\x18\x01 \x01(\x02:\x01\x31\"\"\n\rL2Regularizer\x12\x11\n\x06weight\x18\x01 \x01(\x02:\x01\x31\"\xd8\x02\n\x0bInitializer\x12[\n\x1ctruncated_normal_initializer\x18\x01 \x01(\x0b\x32\x33.object_detection.protos.TruncatedNormalInitializerH\x00\x12[\n\x1cvariance_scaling_initializer\x18\x02 \x01(\x0b\x32\x33.object_detection.protos.VarianceScalingInitializerH\x00\x12U\n\x19random_normal_initializer\x18\x03 \x01(\x0b\x32\x30.object_detection.protos.RandomNormalInitializerH\x00\x12#\n\x19keras_initializer_by_name\x18\x04 \x01(\tH\x00\x42\x13\n\x11initializer_oneof\"@\n\x1aTruncatedNormalInitializer\x12\x0f\n\x04mean\x18\x01 \x01(\x02:\x01\x30\x12\x11\n\x06stddev\x18\x02 \x01(\x02:\x01\x31\"\xc5\x01\n\x1aVarianceScalingInitializer\x12\x11\n\x06\x66\x61\x63tor\x18\x01 \x01(\x02:\x01\x32\x12\x16\n\x07uniform\x18\x02 \x01(\x08:\x05\x66\x61lse\x12N\n\x04mode\x18\x03 \x01(\x0e\x32\x38.object_detection.protos.VarianceScalingInitializer.Mode:\x06\x46\x41N_IN\",\n\x04Mode\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x46\x41N_AVG\x10\x02\"=\n\x17RandomNormalInitializer\x12\x0f\n\x04mean\x18\x01 \x01(\x02:\x01\x30\x12\x11\n\x06stddev\x18\x02 \x01(\x02:\x01\x31\"z\n\tBatchNorm\x12\x14\n\x05\x64\x65\x63\x61y\x18\x01 \x01(\x02:\x05\x30.999\x12\x14\n\x06\x63\x65nter\x18\x02 \x01(\x08:\x04true\x12\x14\n\x05scale\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x16\n\x07\x65psilon\x18\x04 \x01(\x02:\x05\x30.001\x12\x13\n\x05train\x18\x05 \x01(\x08:\x04true\"\x0b\n\tGroupNorm' ) _HYPERPARAMS_OP = _descriptor.EnumDescriptor( name='Op', full_name='object_detection.protos.Hyperparams.Op', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='CONV', index=0, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='FC', index=1, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=585, serialized_end=607, ) _sym_db.RegisterEnumDescriptor(_HYPERPARAMS_OP) _HYPERPARAMS_ACTIVATION = _descriptor.EnumDescriptor( name='Activation', full_name='object_detection.protos.Hyperparams.Activation', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='NONE', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='RELU', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='RELU_6', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SWISH', index=3, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=609, serialized_end=664, ) _sym_db.RegisterEnumDescriptor(_HYPERPARAMS_ACTIVATION) _VARIANCESCALINGINITIALIZER_MODE = _descriptor.EnumDescriptor( name='Mode', full_name='object_detection.protos.VarianceScalingInitializer.Mode', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='FAN_IN', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='FAN_OUT', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='FAN_AVG', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=1494, serialized_end=1538, ) _sym_db.RegisterEnumDescriptor(_VARIANCESCALINGINITIALIZER_MODE) _HYPERPARAMS = _descriptor.Descriptor( name='Hyperparams', full_name='object_detection.protos.Hyperparams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='op', full_name='object_detection.protos.Hyperparams.op', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='regularizer', full_name='object_detection.protos.Hyperparams.regularizer', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='initializer', full_name='object_detection.protos.Hyperparams.initializer', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='activation', full_name='object_detection.protos.Hyperparams.activation', index=3, number=4, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='batch_norm', full_name='object_detection.protos.Hyperparams.batch_norm', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='sync_batch_norm', full_name='object_detection.protos.Hyperparams.sync_batch_norm', index=5, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='group_norm', full_name='object_detection.protos.Hyperparams.group_norm', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='regularize_depthwise', full_name='object_detection.protos.Hyperparams.regularize_depthwise', index=7, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='force_use_bias', full_name='object_detection.protos.Hyperparams.force_use_bias', index=8, number=8, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _HYPERPARAMS_OP, _HYPERPARAMS_ACTIVATION, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='normalizer_oneof', full_name='object_detection.protos.Hyperparams.normalizer_oneof', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=71, serialized_end=684, ) _REGULARIZER = _descriptor.Descriptor( name='Regularizer', full_name='object_detection.protos.Regularizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='l1_regularizer', full_name='object_detection.protos.Regularizer.l1_regularizer', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='l2_regularizer', full_name='object_detection.protos.Regularizer.l2_regularizer', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='regularizer_oneof', full_name='object_detection.protos.Regularizer.regularizer_oneof', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=687, serialized_end=853, ) _L1REGULARIZER = _descriptor.Descriptor( name='L1Regularizer', full_name='object_detection.protos.L1Regularizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='weight', full_name='object_detection.protos.L1Regularizer.weight', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=855, serialized_end=889, ) _L2REGULARIZER = _descriptor.Descriptor( name='L2Regularizer', full_name='object_detection.protos.L2Regularizer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='weight', full_name='object_detection.protos.L2Regularizer.weight', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=891, serialized_end=925, ) _INITIALIZER = _descriptor.Descriptor( name='Initializer', full_name='object_detection.protos.Initializer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='truncated_normal_initializer', full_name='object_detection.protos.Initializer.truncated_normal_initializer', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='variance_scaling_initializer', full_name='object_detection.protos.Initializer.variance_scaling_initializer', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_normal_initializer', full_name='object_detection.protos.Initializer.random_normal_initializer', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='keras_initializer_by_name', full_name='object_detection.protos.Initializer.keras_initializer_by_name', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='initializer_oneof', full_name='object_detection.protos.Initializer.initializer_oneof', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=928, serialized_end=1272, ) _TRUNCATEDNORMALINITIALIZER = _descriptor.Descriptor( name='TruncatedNormalInitializer', full_name='object_detection.protos.TruncatedNormalInitializer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='mean', full_name='object_detection.protos.TruncatedNormalInitializer.mean', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stddev', full_name='object_detection.protos.TruncatedNormalInitializer.stddev', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1274, serialized_end=1338, ) _VARIANCESCALINGINITIALIZER = _descriptor.Descriptor( name='VarianceScalingInitializer', full_name='object_detection.protos.VarianceScalingInitializer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='factor', full_name='object_detection.protos.VarianceScalingInitializer.factor', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(2), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='uniform', full_name='object_detection.protos.VarianceScalingInitializer.uniform', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mode', full_name='object_detection.protos.VarianceScalingInitializer.mode', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _VARIANCESCALINGINITIALIZER_MODE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1341, serialized_end=1538, ) _RANDOMNORMALINITIALIZER = _descriptor.Descriptor( name='RandomNormalInitializer', full_name='object_detection.protos.RandomNormalInitializer', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='mean', full_name='object_detection.protos.RandomNormalInitializer.mean', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stddev', full_name='object_detection.protos.RandomNormalInitializer.stddev', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1540, serialized_end=1601, ) _BATCHNORM = _descriptor.Descriptor( name='BatchNorm', full_name='object_detection.protos.BatchNorm', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='decay', full_name='object_detection.protos.BatchNorm.decay', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.999), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='center', full_name='object_detection.protos.BatchNorm.center', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale', full_name='object_detection.protos.BatchNorm.scale', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='epsilon', full_name='object_detection.protos.BatchNorm.epsilon', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.001), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='train', full_name='object_detection.protos.BatchNorm.train', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1603, serialized_end=1725, ) _GROUPNORM = _descriptor.Descriptor( name='GroupNorm', full_name='object_detection.protos.GroupNorm', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=1727, serialized_end=1738, ) _HYPERPARAMS.fields_by_name['op'].enum_type = _HYPERPARAMS_OP _HYPERPARAMS.fields_by_name['regularizer'].message_type = _REGULARIZER _HYPERPARAMS.fields_by_name['initializer'].message_type = _INITIALIZER _HYPERPARAMS.fields_by_name['activation'].enum_type = _HYPERPARAMS_ACTIVATION _HYPERPARAMS.fields_by_name['batch_norm'].message_type = _BATCHNORM _HYPERPARAMS.fields_by_name['sync_batch_norm'].message_type = _BATCHNORM _HYPERPARAMS.fields_by_name['group_norm'].message_type = _GROUPNORM _HYPERPARAMS_OP.containing_type = _HYPERPARAMS _HYPERPARAMS_ACTIVATION.containing_type = _HYPERPARAMS _HYPERPARAMS.oneofs_by_name['normalizer_oneof'].fields.append( _HYPERPARAMS.fields_by_name['batch_norm']) _HYPERPARAMS.fields_by_name['batch_norm'].containing_oneof = _HYPERPARAMS.oneofs_by_name['normalizer_oneof'] _HYPERPARAMS.oneofs_by_name['normalizer_oneof'].fields.append( _HYPERPARAMS.fields_by_name['sync_batch_norm']) _HYPERPARAMS.fields_by_name['sync_batch_norm'].containing_oneof = _HYPERPARAMS.oneofs_by_name['normalizer_oneof'] _HYPERPARAMS.oneofs_by_name['normalizer_oneof'].fields.append( _HYPERPARAMS.fields_by_name['group_norm']) _HYPERPARAMS.fields_by_name['group_norm'].containing_oneof = _HYPERPARAMS.oneofs_by_name['normalizer_oneof'] _REGULARIZER.fields_by_name['l1_regularizer'].message_type = _L1REGULARIZER _REGULARIZER.fields_by_name['l2_regularizer'].message_type = _L2REGULARIZER _REGULARIZER.oneofs_by_name['regularizer_oneof'].fields.append( _REGULARIZER.fields_by_name['l1_regularizer']) _REGULARIZER.fields_by_name['l1_regularizer'].containing_oneof = _REGULARIZER.oneofs_by_name['regularizer_oneof'] _REGULARIZER.oneofs_by_name['regularizer_oneof'].fields.append( _REGULARIZER.fields_by_name['l2_regularizer']) _REGULARIZER.fields_by_name['l2_regularizer'].containing_oneof = _REGULARIZER.oneofs_by_name['regularizer_oneof'] _INITIALIZER.fields_by_name['truncated_normal_initializer'].message_type = _TRUNCATEDNORMALINITIALIZER _INITIALIZER.fields_by_name['variance_scaling_initializer'].message_type = _VARIANCESCALINGINITIALIZER _INITIALIZER.fields_by_name['random_normal_initializer'].message_type = _RANDOMNORMALINITIALIZER _INITIALIZER.oneofs_by_name['initializer_oneof'].fields.append( _INITIALIZER.fields_by_name['truncated_normal_initializer']) _INITIALIZER.fields_by_name['truncated_normal_initializer'].containing_oneof = _INITIALIZER.oneofs_by_name['initializer_oneof'] _INITIALIZER.oneofs_by_name['initializer_oneof'].fields.append( _INITIALIZER.fields_by_name['variance_scaling_initializer']) _INITIALIZER.fields_by_name['variance_scaling_initializer'].containing_oneof = _INITIALIZER.oneofs_by_name['initializer_oneof'] _INITIALIZER.oneofs_by_name['initializer_oneof'].fields.append( _INITIALIZER.fields_by_name['random_normal_initializer']) _INITIALIZER.fields_by_name['random_normal_initializer'].containing_oneof = _INITIALIZER.oneofs_by_name['initializer_oneof'] _INITIALIZER.oneofs_by_name['initializer_oneof'].fields.append( _INITIALIZER.fields_by_name['keras_initializer_by_name']) _INITIALIZER.fields_by_name['keras_initializer_by_name'].containing_oneof = _INITIALIZER.oneofs_by_name['initializer_oneof'] _VARIANCESCALINGINITIALIZER.fields_by_name['mode'].enum_type = _VARIANCESCALINGINITIALIZER_MODE _VARIANCESCALINGINITIALIZER_MODE.containing_type = _VARIANCESCALINGINITIALIZER DESCRIPTOR.message_types_by_name['Hyperparams'] = _HYPERPARAMS DESCRIPTOR.message_types_by_name['Regularizer'] = _REGULARIZER DESCRIPTOR.message_types_by_name['L1Regularizer'] = _L1REGULARIZER DESCRIPTOR.message_types_by_name['L2Regularizer'] = _L2REGULARIZER DESCRIPTOR.message_types_by_name['Initializer'] = _INITIALIZER DESCRIPTOR.message_types_by_name['TruncatedNormalInitializer'] = _TRUNCATEDNORMALINITIALIZER DESCRIPTOR.message_types_by_name['VarianceScalingInitializer'] = _VARIANCESCALINGINITIALIZER DESCRIPTOR.message_types_by_name['RandomNormalInitializer'] = _RANDOMNORMALINITIALIZER DESCRIPTOR.message_types_by_name['BatchNorm'] = _BATCHNORM DESCRIPTOR.message_types_by_name['GroupNorm'] = _GROUPNORM _sym_db.RegisterFileDescriptor(DESCRIPTOR) Hyperparams = _reflection.GeneratedProtocolMessageType('Hyperparams', (_message.Message,), { 'DESCRIPTOR' : _HYPERPARAMS, '__module__' : 'object_detection.protos.hyperparams_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.Hyperparams) }) _sym_db.RegisterMessage(Hyperparams) Regularizer = _reflection.GeneratedProtocolMessageType('Regularizer', (_message.Message,), { 'DESCRIPTOR' : _REGULARIZER, '__module__' : 'object_detection.protos.hyperparams_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.Regularizer) }) _sym_db.RegisterMessage(Regularizer) L1Regularizer = _reflection.GeneratedProtocolMessageType('L1Regularizer', (_message.Message,), { 'DESCRIPTOR' : _L1REGULARIZER, '__module__' : 'object_detection.protos.hyperparams_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.L1Regularizer) }) _sym_db.RegisterMessage(L1Regularizer) L2Regularizer = _reflection.GeneratedProtocolMessageType('L2Regularizer', (_message.Message,), { 'DESCRIPTOR' : _L2REGULARIZER, '__module__' : 'object_detection.protos.hyperparams_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.L2Regularizer) }) _sym_db.RegisterMessage(L2Regularizer) Initializer = _reflection.GeneratedProtocolMessageType('Initializer', (_message.Message,), { 'DESCRIPTOR' : _INITIALIZER, '__module__' : 'object_detection.protos.hyperparams_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.Initializer) }) _sym_db.RegisterMessage(Initializer) TruncatedNormalInitializer = _reflection.GeneratedProtocolMessageType('TruncatedNormalInitializer', (_message.Message,), { 'DESCRIPTOR' : _TRUNCATEDNORMALINITIALIZER, '__module__' : 'object_detection.protos.hyperparams_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.TruncatedNormalInitializer) }) _sym_db.RegisterMessage(TruncatedNormalInitializer) VarianceScalingInitializer = _reflection.GeneratedProtocolMessageType('VarianceScalingInitializer', (_message.Message,), { 'DESCRIPTOR' : _VARIANCESCALINGINITIALIZER, '__module__' : 'object_detection.protos.hyperparams_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.VarianceScalingInitializer) }) _sym_db.RegisterMessage(VarianceScalingInitializer) RandomNormalInitializer = _reflection.GeneratedProtocolMessageType('RandomNormalInitializer', (_message.Message,), { 'DESCRIPTOR' : _RANDOMNORMALINITIALIZER, '__module__' : 'object_detection.protos.hyperparams_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.RandomNormalInitializer) }) _sym_db.RegisterMessage(RandomNormalInitializer) BatchNorm = _reflection.GeneratedProtocolMessageType('BatchNorm', (_message.Message,), { 'DESCRIPTOR' : _BATCHNORM, '__module__' : 'object_detection.protos.hyperparams_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.BatchNorm) }) _sym_db.RegisterMessage(BatchNorm) GroupNorm = _reflection.GeneratedProtocolMessageType('GroupNorm', (_message.Message,), { 'DESCRIPTOR' : _GROUPNORM, '__module__' : 'object_detection.protos.hyperparams_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.GroupNorm) }) _sym_db.RegisterMessage(GroupNorm) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/hyperparams_pb2.py
hyperparams_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/mean_stddev_box_coder.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/mean_stddev_box_coder.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n3object_detection/protos/mean_stddev_box_coder.proto\x12\x17object_detection.protos\"*\n\x12MeanStddevBoxCoder\x12\x14\n\x06stddev\x18\x01 \x01(\x02:\x04\x30.01' ) _MEANSTDDEVBOXCODER = _descriptor.Descriptor( name='MeanStddevBoxCoder', full_name='object_detection.protos.MeanStddevBoxCoder', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='stddev', full_name='object_detection.protos.MeanStddevBoxCoder.stddev', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.01), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=80, serialized_end=122, ) DESCRIPTOR.message_types_by_name['MeanStddevBoxCoder'] = _MEANSTDDEVBOXCODER _sym_db.RegisterFileDescriptor(DESCRIPTOR) MeanStddevBoxCoder = _reflection.GeneratedProtocolMessageType('MeanStddevBoxCoder', (_message.Message,), { 'DESCRIPTOR' : _MEANSTDDEVBOXCODER, '__module__' : 'object_detection.protos.mean_stddev_box_coder_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.MeanStddevBoxCoder) }) _sym_db.RegisterMessage(MeanStddevBoxCoder) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/mean_stddev_box_coder_pb2.py
mean_stddev_box_coder_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/pipeline.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from object_detection.protos import eval_pb2 as object__detection_dot_protos_dot_eval__pb2 from object_detection.protos import graph_rewriter_pb2 as object__detection_dot_protos_dot_graph__rewriter__pb2 from object_detection.protos import input_reader_pb2 as object__detection_dot_protos_dot_input__reader__pb2 from object_detection.protos import model_pb2 as object__detection_dot_protos_dot_model__pb2 from object_detection.protos import train_pb2 as object__detection_dot_protos_dot_train__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/pipeline.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n&object_detection/protos/pipeline.proto\x12\x17object_detection.protos\x1a\"object_detection/protos/eval.proto\x1a,object_detection/protos/graph_rewriter.proto\x1a*object_detection/protos/input_reader.proto\x1a#object_detection/protos/model.proto\x1a#object_detection/protos/train.proto\"\x95\x03\n\x17TrainEvalPipelineConfig\x12\x36\n\x05model\x18\x01 \x01(\x0b\x32\'.object_detection.protos.DetectionModel\x12:\n\x0ctrain_config\x18\x02 \x01(\x0b\x32$.object_detection.protos.TrainConfig\x12@\n\x12train_input_reader\x18\x03 \x01(\x0b\x32$.object_detection.protos.InputReader\x12\x38\n\x0b\x65val_config\x18\x04 \x01(\x0b\x32#.object_detection.protos.EvalConfig\x12?\n\x11\x65val_input_reader\x18\x05 \x03(\x0b\x32$.object_detection.protos.InputReader\x12>\n\x0egraph_rewriter\x18\x06 \x01(\x0b\x32&.object_detection.protos.GraphRewriter*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02' , dependencies=[object__detection_dot_protos_dot_eval__pb2.DESCRIPTOR,object__detection_dot_protos_dot_graph__rewriter__pb2.DESCRIPTOR,object__detection_dot_protos_dot_input__reader__pb2.DESCRIPTOR,object__detection_dot_protos_dot_model__pb2.DESCRIPTOR,object__detection_dot_protos_dot_train__pb2.DESCRIPTOR,]) _TRAINEVALPIPELINECONFIG = _descriptor.Descriptor( name='TrainEvalPipelineConfig', full_name='object_detection.protos.TrainEvalPipelineConfig', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='model', full_name='object_detection.protos.TrainEvalPipelineConfig.model', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='train_config', full_name='object_detection.protos.TrainEvalPipelineConfig.train_config', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='train_input_reader', full_name='object_detection.protos.TrainEvalPipelineConfig.train_input_reader', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='eval_config', full_name='object_detection.protos.TrainEvalPipelineConfig.eval_config', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='eval_input_reader', full_name='object_detection.protos.TrainEvalPipelineConfig.eval_input_reader', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='graph_rewriter', full_name='object_detection.protos.TrainEvalPipelineConfig.graph_rewriter', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=True, syntax='proto2', extension_ranges=[(1000, 536870912), ], oneofs=[ ], serialized_start=268, serialized_end=673, ) _TRAINEVALPIPELINECONFIG.fields_by_name['model'].message_type = object__detection_dot_protos_dot_model__pb2._DETECTIONMODEL _TRAINEVALPIPELINECONFIG.fields_by_name['train_config'].message_type = object__detection_dot_protos_dot_train__pb2._TRAINCONFIG _TRAINEVALPIPELINECONFIG.fields_by_name['train_input_reader'].message_type = object__detection_dot_protos_dot_input__reader__pb2._INPUTREADER _TRAINEVALPIPELINECONFIG.fields_by_name['eval_config'].message_type = object__detection_dot_protos_dot_eval__pb2._EVALCONFIG _TRAINEVALPIPELINECONFIG.fields_by_name['eval_input_reader'].message_type = object__detection_dot_protos_dot_input__reader__pb2._INPUTREADER _TRAINEVALPIPELINECONFIG.fields_by_name['graph_rewriter'].message_type = object__detection_dot_protos_dot_graph__rewriter__pb2._GRAPHREWRITER DESCRIPTOR.message_types_by_name['TrainEvalPipelineConfig'] = _TRAINEVALPIPELINECONFIG _sym_db.RegisterFileDescriptor(DESCRIPTOR) TrainEvalPipelineConfig = _reflection.GeneratedProtocolMessageType('TrainEvalPipelineConfig', (_message.Message,), { 'DESCRIPTOR' : _TRAINEVALPIPELINECONFIG, '__module__' : 'object_detection.protos.pipeline_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.TrainEvalPipelineConfig) }) _sym_db.RegisterMessage(TrainEvalPipelineConfig) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/pipeline_pb2.py
pipeline_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: object_detection/protos/square_box_coder.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='object_detection/protos/square_box_coder.proto', package='object_detection.protos', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n.object_detection/protos/square_box_coder.proto\x12\x17object_detection.protos\"S\n\x0eSquareBoxCoder\x12\x13\n\x07y_scale\x18\x01 \x01(\x02:\x02\x31\x30\x12\x13\n\x07x_scale\x18\x02 \x01(\x02:\x02\x31\x30\x12\x17\n\x0clength_scale\x18\x03 \x01(\x02:\x01\x35' ) _SQUAREBOXCODER = _descriptor.Descriptor( name='SquareBoxCoder', full_name='object_detection.protos.SquareBoxCoder', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='y_scale', full_name='object_detection.protos.SquareBoxCoder.y_scale', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(10), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='x_scale', full_name='object_detection.protos.SquareBoxCoder.x_scale', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(10), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='length_scale', full_name='object_detection.protos.SquareBoxCoder.length_scale', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=75, serialized_end=158, ) DESCRIPTOR.message_types_by_name['SquareBoxCoder'] = _SQUAREBOXCODER _sym_db.RegisterFileDescriptor(DESCRIPTOR) SquareBoxCoder = _reflection.GeneratedProtocolMessageType('SquareBoxCoder', (_message.Message,), { 'DESCRIPTOR' : _SQUAREBOXCODER, '__module__' : 'object_detection.protos.square_box_coder_pb2' # @@protoc_insertion_point(class_scope:object_detection.protos.SquareBoxCoder) }) _sym_db.RegisterMessage(SquareBoxCoder) # @@protoc_insertion_point(module_scope)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/protos/square_box_coder_pb2.py
square_box_coder_pb2.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """RFCN Box Predictor.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.core import box_predictor from object_detection.utils import ops BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class RfcnBoxPredictor(box_predictor.BoxPredictor): """RFCN Box Predictor. Applies a position sensitive ROI pooling on position sensitive feature maps to predict classes and refined locations. See https://arxiv.org/abs/1605.06409 for details. This is used for the second stage of the RFCN meta architecture. Notice that locations are *not* shared across classes, thus for each anchor, a separate prediction is made for each class. """ def __init__(self, is_training, num_classes, conv_hyperparams_fn, num_spatial_bins, depth, crop_size, box_code_size): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams_fn: A function to construct tf-slim arg_scope with hyperparameters for convolutional layers. num_spatial_bins: A list of two integers `[spatial_bins_y, spatial_bins_x]`. depth: Target depth to reduce the input feature maps to. crop_size: A list of two integers `[crop_height, crop_width]`. box_code_size: Size of encoding for each box. """ super(RfcnBoxPredictor, self).__init__(is_training, num_classes) self._conv_hyperparams_fn = conv_hyperparams_fn self._num_spatial_bins = num_spatial_bins self._depth = depth self._crop_size = crop_size self._box_code_size = box_code_size @property def num_classes(self): return self._num_classes def _predict(self, image_features, num_predictions_per_location, proposal_boxes): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Currently, this must be set to [1], or an error will be raised. proposal_boxes: A float tensor of shape [batch_size, num_proposals, box_code_size]. Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. Raises: ValueError: if num_predictions_per_location is not 1 or if len(image_features) is not 1. """ if (len(num_predictions_per_location) != 1 or num_predictions_per_location[0] != 1): raise ValueError('Currently RfcnBoxPredictor only supports ' 'predicting a single box per class per location.') if len(image_features) != 1: raise ValueError('length of `image_features` must be 1. Found {}'. format(len(image_features))) image_feature = image_features[0] num_predictions_per_location = num_predictions_per_location[0] batch_size = tf.shape(proposal_boxes)[0] num_boxes = tf.shape(proposal_boxes)[1] net = image_feature with slim.arg_scope(self._conv_hyperparams_fn()): net = slim.conv2d(net, self._depth, [1, 1], scope='reduce_depth') # Location predictions. location_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * self.num_classes * self._box_code_size) location_feature_map = slim.conv2d(net, location_feature_map_depth, [1, 1], activation_fn=None, scope='refined_locations') box_encodings = ops.batch_position_sensitive_crop_regions( location_feature_map, boxes=proposal_boxes, crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True) box_encodings = tf.squeeze(box_encodings, axis=[2, 3]) box_encodings = tf.reshape(box_encodings, [batch_size * num_boxes, 1, self.num_classes, self._box_code_size]) # Class predictions. total_classes = self.num_classes + 1 # Account for background class. class_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * total_classes) class_feature_map = slim.conv2d(net, class_feature_map_depth, [1, 1], activation_fn=None, scope='class_predictions') class_predictions_with_background = ( ops.batch_position_sensitive_crop_regions( class_feature_map, boxes=proposal_boxes, crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True)) class_predictions_with_background = tf.squeeze( class_predictions_with_background, axis=[2, 3]) class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size * num_boxes, 1, total_classes]) return {BOX_ENCODINGS: [box_encodings], CLASS_PREDICTIONS_WITH_BACKGROUND: [class_predictions_with_background]}
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/rfcn_box_predictor.py
rfcn_box_predictor.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Convolutional Box Predictors with and without weight sharing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.core import box_predictor from object_detection.utils import shape_utils from object_detection.utils import static_shape BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class _NoopVariableScope(object): """A dummy class that does not push any scope.""" def __enter__(self): return None def __exit__(self, exc_type, exc_value, traceback): return False class ConvolutionalBoxPredictor(box_predictor.BoxPredictor): """Convolutional Box Predictor. Optionally add an intermediate 1x1 convolutional layer after features and predict in parallel branches box_encodings and class_predictions_with_background. Currently this box predictor assumes that predictions are "shared" across classes --- that is each anchor makes box predictions which do not depend on class. """ def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, other_heads, conv_hyperparams_fn, num_layers_before_predictor, min_depth, max_depth): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes. class_prediction_head: The head that predicts the classes. other_heads: A dictionary mapping head names to convolutional head classes. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. num_layers_before_predictor: Number of the additional conv layers before the predictor. min_depth: Minimum feature depth prior to predicting box encodings and class predictions. max_depth: Maximum feature depth prior to predicting box encodings and class predictions. If max_depth is set to 0, no additional feature map will be inserted before location and class predictions. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalBoxPredictor, self).__init__(is_training, num_classes) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._other_heads = other_heads self._conv_hyperparams_fn = conv_hyperparams_fn self._min_depth = min_depth self._max_depth = max_depth self._num_layers_before_predictor = num_layers_before_predictor @property def num_classes(self): return self._num_classes def _predict(self, image_features, num_predictions_per_location_list): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location_list: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Returns: A dictionary containing: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. (optional) Predictions from other heads. """ predictions = { BOX_ENCODINGS: [], CLASS_PREDICTIONS_WITH_BACKGROUND: [], } for head_name in self._other_heads.keys(): predictions[head_name] = [] # TODO(rathodv): Come up with a better way to generate scope names # in box predictor once we have time to retrain all models in the zoo. # The following lines create scope names to be backwards compatible with the # existing checkpoints. box_predictor_scopes = [_NoopVariableScope()] if len(image_features) > 1: box_predictor_scopes = [ tf.variable_scope('BoxPredictor_{}'.format(i)) for i in range(len(image_features)) ] for (image_feature, num_predictions_per_location, box_predictor_scope) in zip( image_features, num_predictions_per_location_list, box_predictor_scopes): net = image_feature with box_predictor_scope: with slim.arg_scope(self._conv_hyperparams_fn()): with slim.arg_scope([slim.dropout], is_training=self._is_training): # Add additional conv layers before the class predictor. features_depth = static_shape.get_depth(image_feature.get_shape()) depth = max(min(features_depth, self._max_depth), self._min_depth) tf.logging.info('depth of additional conv before box predictor: {}'. format(depth)) if depth > 0 and self._num_layers_before_predictor > 0: for i in range(self._num_layers_before_predictor): net = slim.conv2d( net, depth, [1, 1], reuse=tf.AUTO_REUSE, scope='Conv2d_%d_1x1_%d' % (i, depth)) sorted_keys = sorted(self._other_heads.keys()) sorted_keys.append(BOX_ENCODINGS) sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND) for head_name in sorted_keys: if head_name == BOX_ENCODINGS: head_obj = self._box_prediction_head elif head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: head_obj = self._class_prediction_head else: head_obj = self._other_heads[head_name] prediction = head_obj.predict( features=net, num_predictions_per_location=num_predictions_per_location) predictions[head_name].append(prediction) return predictions # TODO(rathodv): Replace with slim.arg_scope_func_key once its available # externally. def _arg_scope_func_key(op): """Returns a key that can be used to index arg_scope dictionary.""" return getattr(op, '_key_op', str(op)) # TODO(rathodv): Merge the implementation with ConvolutionalBoxPredictor above # since they are very similar. class WeightSharedConvolutionalBoxPredictor(box_predictor.BoxPredictor): """Convolutional Box Predictor with weight sharing. Defines the box predictor as defined in https://arxiv.org/abs/1708.02002. This class differs from ConvolutionalBoxPredictor in that it shares weights and biases while predicting from different feature maps. However, batch_norm parameters are not shared because the statistics of the activations vary among the different feature maps. Also note that separate multi-layer towers are constructed for the box encoding and class predictors respectively. """ def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, other_heads, conv_hyperparams_fn, depth, num_layers_before_predictor, kernel_size=3, apply_batch_norm=False, share_prediction_tower=False, use_depthwise=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes. class_prediction_head: The head that predicts the classes. other_heads: A dictionary mapping head names to convolutional head classes. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. depth: depth of conv layers. num_layers_before_predictor: Number of the additional conv layers before the predictor. kernel_size: Size of final convolution kernel. apply_batch_norm: Whether to apply batch normalization to conv layers in this predictor. share_prediction_tower: Whether to share the multi-layer tower among box prediction head, class prediction head and other heads. use_depthwise: Whether to use depthwise separable conv2d instead of regular conv2d. """ super(WeightSharedConvolutionalBoxPredictor, self).__init__(is_training, num_classes) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._other_heads = other_heads self._conv_hyperparams_fn = conv_hyperparams_fn self._depth = depth self._num_layers_before_predictor = num_layers_before_predictor self._kernel_size = kernel_size self._apply_batch_norm = apply_batch_norm self._share_prediction_tower = share_prediction_tower self._use_depthwise = use_depthwise @property def num_classes(self): return self._num_classes def _insert_additional_projection_layer(self, image_feature, inserted_layer_counter, target_channel): if inserted_layer_counter < 0: return image_feature, inserted_layer_counter image_feature = slim.conv2d( image_feature, target_channel, [1, 1], stride=1, padding='SAME', activation_fn=None, normalizer_fn=(tf.identity if self._apply_batch_norm else None), scope='ProjectionLayer/conv2d_{}'.format( inserted_layer_counter)) if self._apply_batch_norm: image_feature = slim.batch_norm( image_feature, scope='ProjectionLayer/conv2d_{}/BatchNorm'.format( inserted_layer_counter)) inserted_layer_counter += 1 return image_feature, inserted_layer_counter def _compute_base_tower(self, tower_name_scope, image_feature, feature_index): net = image_feature for i in range(self._num_layers_before_predictor): if self._use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d net = conv_op( net, self._depth, [self._kernel_size, self._kernel_size], stride=1, padding='SAME', activation_fn=None, normalizer_fn=(tf.identity if self._apply_batch_norm else None), scope='{}/conv2d_{}'.format(tower_name_scope, i)) if self._apply_batch_norm: net = slim.batch_norm( net, scope='{}/conv2d_{}/BatchNorm/feature_{}'. format(tower_name_scope, i, feature_index)) net = tf.nn.relu6(net) return net def _predict_head(self, head_name, head_obj, image_feature, box_tower_feature, feature_index, num_predictions_per_location): if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: tower_name_scope = 'ClassPredictionTower' else: tower_name_scope = head_name + 'PredictionTower' if self._share_prediction_tower: head_tower_feature = box_tower_feature else: head_tower_feature = self._compute_base_tower( tower_name_scope=tower_name_scope, image_feature=image_feature, feature_index=feature_index) return head_obj.predict( features=head_tower_feature, num_predictions_per_location=num_predictions_per_location) def _predict(self, image_features, num_predictions_per_location_list): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels] containing features for a batch of images. Note that when not all tensors in the list have the same number of channels, an additional projection layer will be added on top the tensor to generate feature map with number of channels consitent with the majority. num_predictions_per_location_list: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Note that all values must be the same since the weights are shared. Returns: A dictionary containing: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, code_size] representing the location of the objects. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. (optional) Predictions from other heads. E.g., mask_predictions: A list of float tensors of shape [batch_size, num_anchord_i, num_classes, mask_height, mask_width]. Raises: ValueError: If the num predictions per locations differs between the feature maps. """ if len(set(num_predictions_per_location_list)) > 1: raise ValueError('num predictions per location must be same for all' 'feature maps, found: {}'.format( num_predictions_per_location_list)) feature_channels = [ shape_utils.get_dim_as_int(image_feature.shape[3]) for image_feature in image_features ] has_different_feature_channels = len(set(feature_channels)) > 1 if has_different_feature_channels: inserted_layer_counter = 0 target_channel = max(set(feature_channels), key=feature_channels.count) tf.logging.info('Not all feature maps have the same number of ' 'channels, found: {}, appending additional projection ' 'layers to bring all feature maps to uniformly have {} ' 'channels.'.format(feature_channels, target_channel)) else: # Place holder variables if has_different_feature_channels is False. target_channel = -1 inserted_layer_counter = -1 predictions = { BOX_ENCODINGS: [], CLASS_PREDICTIONS_WITH_BACKGROUND: [], } for head_name in self._other_heads.keys(): predictions[head_name] = [] for feature_index, (image_feature, num_predictions_per_location) in enumerate( zip(image_features, num_predictions_per_location_list)): with tf.variable_scope('WeightSharedConvolutionalBoxPredictor', reuse=tf.AUTO_REUSE): with slim.arg_scope(self._conv_hyperparams_fn()): # TODO(wangjiang) Pass is_training to the head class directly. with slim.arg_scope([slim.dropout], is_training=self._is_training): (image_feature, inserted_layer_counter) = self._insert_additional_projection_layer( image_feature, inserted_layer_counter, target_channel) if self._share_prediction_tower: box_tower_scope = 'PredictionTower' else: box_tower_scope = 'BoxPredictionTower' box_tower_feature = self._compute_base_tower( tower_name_scope=box_tower_scope, image_feature=image_feature, feature_index=feature_index) box_encodings = self._box_prediction_head.predict( features=box_tower_feature, num_predictions_per_location=num_predictions_per_location) predictions[BOX_ENCODINGS].append(box_encodings) sorted_keys = sorted(self._other_heads.keys()) sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND) for head_name in sorted_keys: if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: head_obj = self._class_prediction_head else: head_obj = self._other_heads[head_name] prediction = self._predict_head( head_name=head_name, head_obj=head_obj, image_feature=image_feature, box_tower_feature=box_tower_feature, feature_index=feature_index, num_predictions_per_location=num_predictions_per_location) predictions[head_name].append(prediction) return predictions
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/convolutional_box_predictor.py
convolutional_box_predictor.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Convolutional Box Predictors with and without weight sharing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from six.moves import range import tensorflow.compat.v1 as tf from object_detection.core import box_predictor from object_detection.utils import shape_utils from object_detection.utils import static_shape keras = tf.keras.layers BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class _NoopVariableScope(object): """A dummy class that does not push any scope.""" def __enter__(self): return None def __exit__(self, exc_type, exc_value, traceback): return False class ConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor): """Convolutional Keras Box Predictor. Optionally add an intermediate 1x1 convolutional layer after features and predict in parallel branches box_encodings and class_predictions_with_background. Currently this box predictor assumes that predictions are "shared" across classes --- that is each anchor makes box predictions which do not depend on class. """ def __init__(self, is_training, num_classes, box_prediction_heads, class_prediction_heads, other_heads, conv_hyperparams, num_layers_before_predictor, min_depth, max_depth, freeze_batchnorm, inplace_batchnorm_update, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_heads: A list of heads that predict the boxes. class_prediction_heads: A list of heads that predict the classes. other_heads: A dictionary mapping head names to lists of convolutional heads. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. num_layers_before_predictor: Number of the additional conv layers before the predictor. min_depth: Minimum feature depth prior to predicting box encodings and class predictions. max_depth: Maximum feature depth prior to predicting box encodings and class predictions. If max_depth is set to 0, no additional feature map will be inserted before location and class predictions. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalBoxPredictor, self).__init__( is_training, num_classes, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, name=name) if min_depth > max_depth: raise ValueError('min_depth should be less than or equal to max_depth') if len(box_prediction_heads) != len(class_prediction_heads): raise ValueError('All lists of heads must be the same length.') for other_head_list in other_heads.values(): if len(box_prediction_heads) != len(other_head_list): raise ValueError('All lists of heads must be the same length.') self._prediction_heads = { BOX_ENCODINGS: box_prediction_heads, CLASS_PREDICTIONS_WITH_BACKGROUND: class_prediction_heads, } if other_heads: self._prediction_heads.update(other_heads) # We generate a consistent ordering for the prediction head names, # So that all workers build the model in the exact same order self._sorted_head_names = sorted(self._prediction_heads.keys()) self._conv_hyperparams = conv_hyperparams self._min_depth = min_depth self._max_depth = max_depth self._num_layers_before_predictor = num_layers_before_predictor self._shared_nets = [] def build(self, input_shapes): """Creates the variables of the layer.""" if len(input_shapes) != len(self._prediction_heads[BOX_ENCODINGS]): raise ValueError('This box predictor was constructed with %d heads,' 'but there are %d inputs.' % (len(self._prediction_heads[BOX_ENCODINGS]), len(input_shapes))) for stack_index, input_shape in enumerate(input_shapes): net = [] # Add additional conv layers before the class predictor. features_depth = static_shape.get_depth(input_shape) depth = max(min(features_depth, self._max_depth), self._min_depth) tf.logging.info( 'depth of additional conv before box predictor: {}'.format(depth)) if depth > 0 and self._num_layers_before_predictor > 0: for i in range(self._num_layers_before_predictor): net.append(keras.Conv2D(depth, [1, 1], name='SharedConvolutions_%d/Conv2d_%d_1x1_%d' % (stack_index, i, depth), padding='SAME', **self._conv_hyperparams.params())) net.append(self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_norm' % (stack_index, i, depth))) net.append(self._conv_hyperparams.build_activation_layer( name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_activation' % (stack_index, i, depth), )) # Until certain bugs are fixed in checkpointable lists, # this net must be appended only once it's been filled with layers self._shared_nets.append(net) self.built = True def _predict(self, image_features, **kwargs): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. **kwargs: Unused Keyword args Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ predictions = collections.defaultdict(list) for (index, net) in enumerate(image_features): # Apply shared conv layers before the head predictors. for layer in self._shared_nets[index]: net = layer(net) for head_name in self._sorted_head_names: head_obj = self._prediction_heads[head_name][index] prediction = head_obj(net) predictions[head_name].append(prediction) return predictions class WeightSharedConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor): """Convolutional Box Predictor with weight sharing based on Keras. Defines the box predictor as defined in https://arxiv.org/abs/1708.02002. This class differs from ConvolutionalBoxPredictor in that it shares weights and biases while predicting from different feature maps. However, batch_norm parameters are not shared because the statistics of the activations vary among the different feature maps. Also note that separate multi-layer towers are constructed for the box encoding and class predictors respectively. """ def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, other_heads, conv_hyperparams, depth, num_layers_before_predictor, freeze_batchnorm, inplace_batchnorm_update, kernel_size=3, apply_batch_norm=False, share_prediction_tower=False, use_depthwise=False, apply_conv_hyperparams_pointwise=False, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes. class_prediction_head: The head that predicts the classes. other_heads: A dictionary mapping head names to convolutional head classes. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. depth: depth of conv layers. num_layers_before_predictor: Number of the additional conv layers before the predictor. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. kernel_size: Size of final convolution kernel. apply_batch_norm: Whether to apply batch normalization to conv layers in this predictor. share_prediction_tower: Whether to share the multi-layer tower among box prediction head, class prediction head and other heads. use_depthwise: Whether to use depthwise separable conv2d instead of regular conv2d. apply_conv_hyperparams_pointwise: Whether to apply the conv_hyperparams to the pointwise_initializer and pointwise_regularizer when using depthwise separable convolutions. By default, conv_hyperparams are only applied to the depthwise initializer and regularizer when use_depthwise is true. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. """ super(WeightSharedConvolutionalBoxPredictor, self).__init__( is_training, num_classes, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, name=name) self._box_prediction_head = box_prediction_head self._prediction_heads = { CLASS_PREDICTIONS_WITH_BACKGROUND: class_prediction_head, } if other_heads: self._prediction_heads.update(other_heads) # We generate a consistent ordering for the prediction head names, # so that all workers build the model in the exact same order. self._sorted_head_names = sorted(self._prediction_heads.keys()) self._conv_hyperparams = conv_hyperparams self._depth = depth self._num_layers_before_predictor = num_layers_before_predictor self._kernel_size = kernel_size self._apply_batch_norm = apply_batch_norm self._share_prediction_tower = share_prediction_tower self._use_depthwise = use_depthwise self._apply_conv_hyperparams_pointwise = apply_conv_hyperparams_pointwise # Additional projection layers to bring all feature maps to uniform # channels. self._additional_projection_layers = [] # The base tower layers for each head. self._base_tower_layers_for_heads = { BOX_ENCODINGS: [], CLASS_PREDICTIONS_WITH_BACKGROUND: [], } for head_name in other_heads.keys(): self._base_tower_layers_for_heads[head_name] = [] # A dict maps the tower_name_scope of each head to the shared conv layers in # the base tower for different feature map levels. self._head_scope_conv_layers = {} def _insert_additional_projection_layer( self, inserted_layer_counter, target_channel): projection_layers = [] if inserted_layer_counter >= 0: use_bias = False if (self._apply_batch_norm and not self._conv_hyperparams.force_use_bias()) else True projection_layers.append(keras.Conv2D( target_channel, [1, 1], strides=1, padding='SAME', name='ProjectionLayer/conv2d_{}'.format(inserted_layer_counter), **self._conv_hyperparams.params(use_bias=use_bias))) if self._apply_batch_norm: projection_layers.append(self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='ProjectionLayer/conv2d_{}/BatchNorm'.format( inserted_layer_counter))) inserted_layer_counter += 1 return inserted_layer_counter, projection_layers def _compute_base_tower(self, tower_name_scope, feature_index): conv_layers = [] batch_norm_layers = [] activation_layers = [] use_bias = False if (self._apply_batch_norm and not self._conv_hyperparams.force_use_bias()) else True for additional_conv_layer_idx in range(self._num_layers_before_predictor): layer_name = '{}/conv2d_{}'.format( tower_name_scope, additional_conv_layer_idx) if tower_name_scope not in self._head_scope_conv_layers: if self._use_depthwise: kwargs = self._conv_hyperparams.params(use_bias=use_bias) # Both the regularizer and initializer apply to the depthwise layer, # so we remap the kernel_* to depthwise_* here. kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] if self._apply_conv_hyperparams_pointwise: kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['pointwise_initializer'] = kwargs['kernel_initializer'] conv_layers.append( tf.keras.layers.SeparableConv2D( self._depth, [self._kernel_size, self._kernel_size], padding='SAME', name=layer_name, **kwargs)) else: conv_layers.append( tf.keras.layers.Conv2D( self._depth, [self._kernel_size, self._kernel_size], padding='SAME', name=layer_name, **self._conv_hyperparams.params(use_bias=use_bias))) # Each feature gets a separate batchnorm parameter even though they share # the same convolution weights. if self._apply_batch_norm: batch_norm_layers.append(self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='{}/conv2d_{}/BatchNorm/feature_{}'.format( tower_name_scope, additional_conv_layer_idx, feature_index))) activation_layers.append(self._conv_hyperparams.build_activation_layer( name='{}/conv2d_{}/activation_{}'.format( tower_name_scope, additional_conv_layer_idx, feature_index))) # Set conv layers as the shared conv layers for different feature maps with # the same tower_name_scope. if tower_name_scope in self._head_scope_conv_layers: conv_layers = self._head_scope_conv_layers[tower_name_scope] # Stack the base_tower_layers in the order of conv_layer, batch_norm_layer # and activation_layer base_tower_layers = [] for i in range(self._num_layers_before_predictor): base_tower_layers.extend([conv_layers[i]]) if self._apply_batch_norm: base_tower_layers.extend([batch_norm_layers[i]]) base_tower_layers.extend([activation_layers[i]]) return conv_layers, base_tower_layers def build(self, input_shapes): """Creates the variables of the layer.""" feature_channels = [ shape_utils.get_dim_as_int(input_shape[3]) for input_shape in input_shapes ] has_different_feature_channels = len(set(feature_channels)) > 1 if has_different_feature_channels: inserted_layer_counter = 0 target_channel = max(set(feature_channels), key=feature_channels.count) tf.logging.info('Not all feature maps have the same number of ' 'channels, found: {}, appending additional projection ' 'layers to bring all feature maps to uniformly have {} ' 'channels.'.format(feature_channels, target_channel)) else: # Place holder variables if has_different_feature_channels is False. target_channel = -1 inserted_layer_counter = -1 def _build_layers(tower_name_scope, feature_index): conv_layers, base_tower_layers = self._compute_base_tower( tower_name_scope=tower_name_scope, feature_index=feature_index) if tower_name_scope not in self._head_scope_conv_layers: self._head_scope_conv_layers[tower_name_scope] = conv_layers return base_tower_layers for feature_index in range(len(input_shapes)): # Additional projection layers should not be shared as input channels # (and thus weight shapes) are different inserted_layer_counter, projection_layers = ( self._insert_additional_projection_layer( inserted_layer_counter, target_channel)) self._additional_projection_layers.append(projection_layers) if self._share_prediction_tower: box_tower_scope = 'PredictionTower' else: box_tower_scope = 'BoxPredictionTower' # For box tower base box_tower_layers = _build_layers(box_tower_scope, feature_index) self._base_tower_layers_for_heads[BOX_ENCODINGS].append(box_tower_layers) for head_name in self._sorted_head_names: if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: tower_name_scope = 'ClassPredictionTower' else: tower_name_scope = '{}PredictionTower'.format(head_name) box_tower_layers = _build_layers(tower_name_scope, feature_index) self._base_tower_layers_for_heads[head_name].append(box_tower_layers) self.built = True def _predict(self, image_features, **kwargs): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. **kwargs: Unused Keyword args Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ predictions = collections.defaultdict(list) def _apply_layers(base_tower_layers, image_feature): for layer in base_tower_layers: image_feature = layer(image_feature) return image_feature for (index, image_feature) in enumerate(image_features): # Apply additional projection layers to image features for layer in self._additional_projection_layers[index]: image_feature = layer(image_feature) # Apply box tower layers. box_tower_feature = _apply_layers( self._base_tower_layers_for_heads[BOX_ENCODINGS][index], image_feature) box_encodings = self._box_prediction_head(box_tower_feature) predictions[BOX_ENCODINGS].append(box_encodings) for head_name in self._sorted_head_names: head_obj = self._prediction_heads[head_name] if self._share_prediction_tower: head_tower_feature = box_tower_feature else: head_tower_feature = _apply_layers( self._base_tower_layers_for_heads[head_name][index], image_feature) prediction = head_obj(head_tower_feature) predictions[head_name].append(prediction) return predictions
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/convolutional_keras_box_predictor.py
convolutional_keras_box_predictor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mask R-CNN Box Predictor.""" from object_detection.core import box_predictor BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class MaskRCNNBoxPredictor(box_predictor.BoxPredictor): """Mask R-CNN Box Predictor. See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017). Mask R-CNN. arXiv preprint arXiv:1703.06870. This is used for the second stage of the Mask R-CNN detector where proposals cropped from an image are arranged along the batch dimension of the input image_features tensor. Notice that locations are *not* shared across classes, thus for each anchor, a separate prediction is made for each class. In addition to predicting boxes and classes, optionally this class allows predicting masks and/or keypoints inside detection boxes. Currently this box predictor makes per-class predictions; that is, each anchor makes a separate box prediction for each class. """ def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, third_stage_heads): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes in second stage. class_prediction_head: The head that predicts the classes in second stage. third_stage_heads: A dictionary mapping head names to mask rcnn head classes. """ super(MaskRCNNBoxPredictor, self).__init__(is_training, num_classes) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._third_stage_heads = third_stage_heads @property def num_classes(self): return self._num_classes def get_second_stage_prediction_heads(self): return BOX_ENCODINGS, CLASS_PREDICTIONS_WITH_BACKGROUND def get_third_stage_prediction_heads(self): return sorted(self._third_stage_heads.keys()) def _predict(self, image_features, num_predictions_per_location, prediction_stage=2): """Optionally computes encoded object locations, confidences, and masks. Predicts the heads belonging to the given prediction stage. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing roi pooled features for each image. The length of the list should be 1 otherwise a ValueError will be raised. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Currently, this must be set to [1], or an error will be raised. prediction_stage: Prediction stage. Acceptable values are 2 and 3. Returns: A dictionary containing the predicted tensors that are listed in self._prediction_heads. A subset of the following keys will exist in the dictionary: BOX_ENCODINGS: A float tensor of shape [batch_size, 1, num_classes, code_size] representing the location of the objects. CLASS_PREDICTIONS_WITH_BACKGROUND: A float tensor of shape [batch_size, 1, num_classes + 1] representing the class predictions for the proposals. MASK_PREDICTIONS: A float tensor of shape [batch_size, 1, num_classes, image_height, image_width] Raises: ValueError: If num_predictions_per_location is not 1 or if len(image_features) is not 1. ValueError: if prediction_stage is not 2 or 3. """ if (len(num_predictions_per_location) != 1 or num_predictions_per_location[0] != 1): raise ValueError('Currently FullyConnectedBoxPredictor only supports ' 'predicting a single box per class per location.') if len(image_features) != 1: raise ValueError('length of `image_features` must be 1. Found {}'.format( len(image_features))) image_feature = image_features[0] predictions_dict = {} if prediction_stage == 2: predictions_dict[BOX_ENCODINGS] = self._box_prediction_head.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location[0]) predictions_dict[CLASS_PREDICTIONS_WITH_BACKGROUND] = ( self._class_prediction_head.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location[0])) elif prediction_stage == 3: for prediction_head in self.get_third_stage_prediction_heads(): head_object = self._third_stage_heads[prediction_head] predictions_dict[prediction_head] = head_object.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location[0]) else: raise ValueError('prediction_stage should be either 2 or 3.') return predictions_dict
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/mask_rcnn_box_predictor.py
mask_rcnn_box_predictor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.rfcn_box_predictor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors import rfcn_box_predictor as box_predictor from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class RfcnBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.build(conv_hyperparams, is_training=True) def test_get_correct_box_encoding_and_class_prediction_shapes(self): def graph_fn(image_features, proposal_boxes): rfcn_box_predictor = box_predictor.RfcnBoxPredictor( is_training=False, num_classes=2, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), num_spatial_bins=[3, 3], depth=4, crop_size=[12, 12], box_code_size=4 ) box_predictions = rfcn_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor', proposal_boxes=proposal_boxes) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) proposal_boxes = np.random.rand(4, 2, 4).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features, proposal_boxes]) self.assertAllEqual(box_encodings.shape, [8, 1, 2, 4]) self.assertAllEqual(class_predictions_with_background.shape, [8, 1, 3]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/rfcn_box_predictor_tf1_test.py
rfcn_box_predictor_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.convolutional_keras_box_predictor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.predictors import convolutional_keras_box_predictor as box_predictor from object_detection.predictors.heads import keras_box_head from object_detection.predictors.heads import keras_class_head from object_detection.predictors.heads import keras_mask_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class ConvolutionalKerasBoxPredictorTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_get_boxes_for_five_aspect_ratios_per_location(self): conv_box_predictor = ( box_predictor_builder.build_convolutional_keras_box_predictor( is_training=False, num_classes=0, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5], min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4 )) def graph_fn(image_features): box_predictions = conv_box_predictor([image_features]) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) def test_get_boxes_for_one_aspect_ratio_per_location(self): conv_box_predictor = ( box_predictor_builder.build_convolutional_keras_box_predictor( is_training=False, num_classes=0, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[1], min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4 )) def graph_fn(image_features): box_predictions = conv_box_predictor([image_features]) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( self): num_classes_without_background = 6 image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) conv_box_predictor = ( box_predictor_builder.build_convolutional_keras_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5], min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4 )) def graph_fn(image_features): box_predictions = conv_box_predictor([image_features]) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) (box_encodings, class_predictions_with_background) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 320, num_classes_without_background+1]) def test_get_predictions_with_feature_maps_of_dynamic_shape( self): tf.keras.backend.clear_session() conv_box_predictor = ( box_predictor_builder.build_convolutional_keras_box_predictor( is_training=False, num_classes=0, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5], min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4 )) variables = [] def graph_fn(image_features): box_predictions = conv_box_predictor([image_features]) variables.extend(list(conv_box_predictor.variables)) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return box_encodings, objectness_predictions resolution = 32 expected_num_anchors = resolution*resolution*5 box_encodings, objectness_predictions = self.execute( graph_fn, [np.random.rand(4, resolution, resolution, 64)]) actual_variable_set = set([var.name.split(':')[0] for var in variables]) self.assertAllEqual(box_encodings.shape, [4, expected_num_anchors, 1, 4]) self.assertAllEqual(objectness_predictions.shape, [4, expected_num_anchors, 1]) expected_variable_set = set([ 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias', 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel', 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias', 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel', 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias', 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel']) self.assertEqual(expected_variable_set, actual_variable_set) self.assertEqual(conv_box_predictor._sorted_head_names, ['box_encodings', 'class_predictions_with_background']) def test_use_depthwise_convolution(self): tf.keras.backend.clear_session() conv_box_predictor = ( box_predictor_builder.build_convolutional_keras_box_predictor( is_training=False, num_classes=0, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5], min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=3, box_code_size=4, use_depthwise=True )) variables = [] def graph_fn(image_features): box_predictions = conv_box_predictor([image_features]) variables.extend(list(conv_box_predictor.variables)) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return box_encodings, objectness_predictions resolution = 32 expected_num_anchors = resolution*resolution*5 box_encodings, objectness_predictions = self.execute( graph_fn, [np.random.rand(4, resolution, resolution, 64)]) actual_variable_set = set([var.name.split(':')[0] for var in variables]) self.assertAllEqual(box_encodings.shape, [4, expected_num_anchors, 1, 4]) self.assertAllEqual(objectness_predictions.shape, [4, expected_num_anchors, 1]) expected_variable_set = set([ 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias', 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel', 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor_depthwise/' 'bias', 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor_depthwise/' 'depthwise_kernel', 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias', 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel', 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor_depthwise/bias', 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor_depthwise/' 'depthwise_kernel', 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias', 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel']) self.assertEqual(expected_variable_set, actual_variable_set) self.assertEqual(conv_box_predictor._sorted_head_names, ['box_encodings', 'class_predictions_with_background']) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class WeightSharedConvolutionalKerasBoxPredictorTest(test_case.TestCase): def _build_conv_hyperparams(self, add_batch_norm=True): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { stddev: 0.01 mean: 0.0 } } """ if add_batch_norm: batch_norm_proto = """ batch_norm { train: true, } """ conv_hyperparams_text_proto += batch_norm_proto text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) # pylint: disable=line-too-long def test_get_boxes_for_five_aspect_ratios_per_location(self): conv_box_predictor = ( box_predictor_builder .build_weight_shared_convolutional_keras_box_predictor( is_training=False, num_classes=0, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5], depth=32, num_layers_before_predictor=1, box_code_size=4)) def graph_fn(image_features): box_predictions = conv_box_predictor([image_features]) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute( graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) def test_bias_predictions_to_background_with_sigmoid_score_conversion(self): conv_box_predictor = ( box_predictor_builder .build_weight_shared_convolutional_keras_box_predictor( is_training=True, num_classes=2, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5], depth=32, num_layers_before_predictor=1, class_prediction_bias_init=-4.6, box_code_size=4)) def graph_fn(image_features): box_predictions = conv_box_predictor([image_features]) class_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (tf.nn.sigmoid(class_predictions),) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) class_predictions = self.execute(graph_fn, [image_features]) self.assertAlmostEqual(np.mean(class_predictions), 0.01, places=3) def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( self): num_classes_without_background = 6 conv_box_predictor = ( box_predictor_builder .build_weight_shared_convolutional_keras_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5], depth=32, num_layers_before_predictor=1, box_code_size=4)) def graph_fn(image_features): box_predictions = conv_box_predictor([image_features]) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 320, num_classes_without_background+1]) def test_get_multi_class_predictions_from_two_feature_maps( self): num_classes_without_background = 6 conv_box_predictor = ( box_predictor_builder .build_weight_shared_convolutional_keras_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5, 5], depth=32, num_layers_before_predictor=1, box_code_size=4)) def graph_fn(image_features1, image_features2): box_predictions = conv_box_predictor([image_features1, image_features2]) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features1, image_features2]) self.assertAllEqual(box_encodings.shape, [4, 640, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 640, num_classes_without_background+1]) def test_get_multi_class_predictions_from_feature_maps_of_different_depth( self): num_classes_without_background = 6 conv_box_predictor = ( box_predictor_builder .build_weight_shared_convolutional_keras_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5, 5, 5], depth=32, num_layers_before_predictor=1, box_code_size=4)) def graph_fn(image_features1, image_features2, image_features3): box_predictions = conv_box_predictor( [image_features1, image_features2, image_features3]) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) image_features3 = np.random.rand(4, 8, 8, 32).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features1, image_features2, image_features3]) self.assertAllEqual(box_encodings.shape, [4, 960, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 960, num_classes_without_background+1]) def test_predictions_multiple_feature_maps_share_weights_separate_batchnorm( self): tf.keras.backend.clear_session() num_classes_without_background = 6 conv_box_predictor = ( box_predictor_builder .build_weight_shared_convolutional_keras_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5, 5], depth=32, num_layers_before_predictor=2, box_code_size=4)) variables = [] def graph_fn(image_features1, image_features2): box_predictions = conv_box_predictor([image_features1, image_features2]) variables.extend(list(conv_box_predictor.variables)) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) self.execute(graph_fn, [ np.random.rand(4, 32, 32, 3).astype(np.float32), np.random.rand(4, 16, 16, 3).astype(np.float32) ]) actual_variable_set = set([var.name.split(':')[0] for var in variables]) expected_variable_set = set([ # Box prediction tower ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/moving_mean'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/moving_variance'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/moving_mean'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/moving_variance'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/moving_mean'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/moving_variance'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/moving_mean'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/moving_variance'), # Box prediction head ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), # Class prediction tower ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/moving_mean'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/moving_variance'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/moving_mean'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/moving_variance'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/moving_mean'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/moving_variance'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/moving_mean'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/moving_variance'), # Class prediction head ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_multiple_feature_maps_share_weights_without_batchnorm( self): tf.keras.backend.clear_session() num_classes_without_background = 6 conv_box_predictor = ( box_predictor_builder .build_weight_shared_convolutional_keras_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5, 5], depth=32, num_layers_before_predictor=2, box_code_size=4, apply_batch_norm=False)) variables = [] def graph_fn(image_features1, image_features2): box_predictions = conv_box_predictor([image_features1, image_features2]) variables.extend(list(conv_box_predictor.variables)) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) self.execute(graph_fn, [ np.random.rand(4, 32, 32, 3).astype(np.float32), np.random.rand(4, 16, 16, 3).astype(np.float32) ]) actual_variable_set = set([var.name.split(':')[0] for var in variables]) expected_variable_set = set([ # Box prediction tower ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/bias'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/bias'), # Box prediction head ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), # Class prediction tower ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/bias'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/bias'), # Class prediction head ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_multiple_feature_maps_share_weights_with_depthwise( self): tf.keras.backend.clear_session() num_classes_without_background = 6 conv_box_predictor = ( box_predictor_builder .build_weight_shared_convolutional_keras_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams=self._build_conv_hyperparams(add_batch_norm=False), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5, 5], depth=32, num_layers_before_predictor=2, box_code_size=4, apply_batch_norm=False, use_depthwise=True)) variables = [] def graph_fn(image_features1, image_features2): box_predictions = conv_box_predictor([image_features1, image_features2]) variables.extend(list(conv_box_predictor.variables)) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) self.execute(graph_fn, [ np.random.rand(4, 32, 32, 3).astype(np.float32), np.random.rand(4, 16, 16, 3).astype(np.float32) ]) actual_variable_set = set([var.name.split(':')[0] for var in variables]) expected_variable_set = set([ # Box prediction tower ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/depthwise_kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/pointwise_kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/bias'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/depthwise_kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/pointwise_kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/bias'), # Box prediction head ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/depthwise_kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/pointwise_kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), # Class prediction tower ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/depthwise_kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/pointwise_kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/bias'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/depthwise_kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/pointwise_kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/bias'), # Class prediction head ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/depthwise_kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/pointwise_kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_no_batchnorm_params_when_batchnorm_is_not_configured(self): tf.keras.backend.clear_session() num_classes_without_background = 6 conv_box_predictor = ( box_predictor_builder .build_weight_shared_convolutional_keras_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams=self._build_conv_hyperparams(add_batch_norm=False), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5, 5], depth=32, num_layers_before_predictor=2, box_code_size=4, apply_batch_norm=False)) variables = [] def graph_fn(image_features1, image_features2): box_predictions = conv_box_predictor( [image_features1, image_features2]) variables.extend(list(conv_box_predictor.variables)) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) self.execute(graph_fn, [ np.random.rand(4, 32, 32, 3).astype(np.float32), np.random.rand(4, 16, 16, 3).astype(np.float32) ]) actual_variable_set = set([var.name.split(':')[0] for var in variables]) expected_variable_set = set([ # Box prediction tower ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/bias'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/bias'), # Box prediction head ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), # Class prediction tower ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/bias'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/bias'), # Class prediction head ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_share_weights_share_tower_separate_batchnorm( self): tf.keras.backend.clear_session() num_classes_without_background = 6 conv_box_predictor = ( box_predictor_builder .build_weight_shared_convolutional_keras_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5, 5], depth=32, num_layers_before_predictor=2, box_code_size=4, share_prediction_tower=True)) variables = [] def graph_fn(image_features1, image_features2): box_predictions = conv_box_predictor( [image_features1, image_features2]) variables.extend(list(conv_box_predictor.variables)) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) self.execute(graph_fn, [ np.random.rand(4, 32, 32, 3).astype(np.float32), np.random.rand(4, 16, 16, 3).astype(np.float32) ]) actual_variable_set = set([var.name.split(':')[0] for var in variables]) expected_variable_set = set([ # Shared prediction tower ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/BatchNorm/feature_0/beta'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/BatchNorm/feature_1/beta'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/BatchNorm/feature_0/moving_mean'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/BatchNorm/feature_1/moving_mean'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/BatchNorm/feature_0/moving_variance'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/BatchNorm/feature_1/moving_variance'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/BatchNorm/feature_0/beta'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/BatchNorm/feature_1/beta'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/BatchNorm/feature_0/moving_mean'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/BatchNorm/feature_1/moving_mean'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/BatchNorm/feature_0/moving_variance'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/BatchNorm/feature_1/moving_variance'), # Box prediction head ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), # Class prediction head ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_share_weights_share_tower_without_batchnorm( self): tf.keras.backend.clear_session() num_classes_without_background = 6 conv_box_predictor = ( box_predictor_builder .build_weight_shared_convolutional_keras_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams=self._build_conv_hyperparams(add_batch_norm=False), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5, 5], depth=32, num_layers_before_predictor=2, box_code_size=4, share_prediction_tower=True, apply_batch_norm=False)) variables = [] def graph_fn(image_features1, image_features2): box_predictions = conv_box_predictor( [image_features1, image_features2]) variables.extend(list(conv_box_predictor.variables)) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) self.execute(graph_fn, [ np.random.rand(4, 32, 32, 3).astype(np.float32), np.random.rand(4, 16, 16, 3).astype(np.float32) ]) actual_variable_set = set([var.name.split(':')[0] for var in variables]) expected_variable_set = set([ # Shared prediction tower ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/bias'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/bias'), # Box prediction head ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), # Class prediction head ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), ('WeightSharedConvolutionalBoxPredictor/' 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_other_heads_predictions(self): box_code_size = 4 num_classes_without_background = 3 other_head_name = 'Mask' mask_height = 5 mask_width = 5 num_predictions_per_location = 5 box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( box_code_size=box_code_size, conv_hyperparams=self._build_conv_hyperparams(), num_predictions_per_location=num_predictions_per_location) class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( num_class_slots=num_classes_without_background + 1, conv_hyperparams=self._build_conv_hyperparams(), num_predictions_per_location=num_predictions_per_location) other_heads = { other_head_name: keras_mask_head.WeightSharedConvolutionalMaskHead( num_classes=num_classes_without_background, conv_hyperparams=self._build_conv_hyperparams(), num_predictions_per_location=num_predictions_per_location, mask_height=mask_height, mask_width=mask_width) } conv_box_predictor = box_predictor.WeightSharedConvolutionalBoxPredictor( is_training=False, num_classes=num_classes_without_background, box_prediction_head=box_prediction_head, class_prediction_head=class_prediction_head, other_heads=other_heads, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, depth=32, num_layers_before_predictor=2) def graph_fn(image_features): box_predictions = conv_box_predictor([image_features]) for key, value in box_predictions.items(): box_predictions[key] = tf.concat(value, axis=1) assert len(box_predictions) == 3 return (box_predictions[box_predictor.BOX_ENCODINGS], box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], box_predictions[other_head_name]) batch_size = 4 feature_ht = 8 feature_wt = 8 image_features = np.random.rand(batch_size, feature_ht, feature_wt, 64).astype(np.float32) (box_encodings, class_predictions, other_head_predictions) = self.execute( graph_fn, [image_features]) num_anchors = feature_ht * feature_wt * num_predictions_per_location self.assertAllEqual(box_encodings.shape, [batch_size, num_anchors, box_code_size]) self.assertAllEqual( class_predictions.shape, [batch_size, num_anchors, num_classes_without_background + 1]) self.assertAllEqual(other_head_predictions.shape, [ batch_size, num_anchors, num_classes_without_background, mask_height, mask_width ]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/convolutional_keras_box_predictor_tf2_test.py
convolutional_keras_box_predictor_tf2_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.mask_rcnn_box_predictor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.predictors import mask_rcnn_box_predictor as box_predictor from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class MaskRCNNBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_get_boxes_with_five_classes(self): def graph_fn(image_features): mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( is_training=False, num_classes=5, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=False, dropout_keep_prob=0.5, box_code_size=4, ) box_predictions = mask_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor', prediction_stage=2) return (box_predictions[box_predictor.BOX_ENCODINGS], box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [2, 1, 5, 4]) self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) def test_get_boxes_with_five_classes_share_box_across_classes(self): def graph_fn(image_features): mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( is_training=False, num_classes=5, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=False, dropout_keep_prob=0.5, box_code_size=4, share_box_across_classes=True ) box_predictions = mask_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor', prediction_stage=2) return (box_predictions[box_predictor.BOX_ENCODINGS], box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [2, 1, 1, 4]) self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) def test_value_error_on_predict_instance_masks_with_no_conv_hyperparms(self): with self.assertRaises(ValueError): box_predictor_builder.build_mask_rcnn_box_predictor( is_training=False, num_classes=5, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=False, dropout_keep_prob=0.5, box_code_size=4, predict_instance_masks=True) def test_get_instance_masks(self): def graph_fn(image_features): mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( is_training=False, num_classes=5, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=False, dropout_keep_prob=0.5, box_code_size=4, conv_hyperparams_fn=self._build_arg_scope_with_hyperparams( op_type=hyperparams_pb2.Hyperparams.CONV), predict_instance_masks=True) box_predictions = mask_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor', prediction_stage=3) return (box_predictions[box_predictor.MASK_PREDICTIONS],) image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) mask_predictions = self.execute(graph_fn, [image_features]) self.assertAllEqual(mask_predictions.shape, [2, 1, 5, 14, 14]) def test_do_not_return_instance_masks_without_request(self): image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32) mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( is_training=False, num_classes=5, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=False, dropout_keep_prob=0.5, box_code_size=4) box_predictions = mask_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor', prediction_stage=2) self.assertEqual(len(box_predictions), 2) self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions) self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND in box_predictions) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/mask_rcnn_box_predictor_tf1_test.py
mask_rcnn_box_predictor_tf1_test.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.convolutional_box_predictor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from absl.testing import parameterized import numpy as np from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.predictors import convolutional_box_predictor as box_predictor from object_detection.predictors.heads import box_head from object_detection.predictors.heads import class_head from object_detection.predictors.heads import mask_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class ConvolutionalBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.build(conv_hyperparams, is_training=True) def test_get_boxes_for_five_aspect_ratios_per_location(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) def test_get_boxes_for_one_aspect_ratio_per_location(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( self): num_classes_without_background = 6 image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) (box_encodings, class_predictions_with_background) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 320, num_classes_without_background+1]) def test_get_predictions_with_feature_maps_of_dynamic_shape( self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) init_op = tf.global_variables_initializer() resolution = 32 expected_num_anchors = resolution*resolution*5 with self.test_session() as sess: sess.run(init_op) (box_encodings_shape, objectness_predictions_shape) = sess.run( [tf.shape(box_encodings), tf.shape(objectness_predictions)], feed_dict={image_features: np.random.rand(4, resolution, resolution, 64)}) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4]) self.assertAllEqual(objectness_predictions_shape, [4, expected_num_anchors, 1]) expected_variable_set = set([ 'BoxPredictor/Conv2d_0_1x1_32/biases', 'BoxPredictor/Conv2d_0_1x1_32/weights', 'BoxPredictor/BoxEncodingPredictor/biases', 'BoxPredictor/BoxEncodingPredictor/weights', 'BoxPredictor/ClassPredictor/biases', 'BoxPredictor/ClassPredictor/weights']) self.assertEqual(expected_variable_set, actual_variable_set) def test_use_depthwise_convolution(self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, dropout_keep_prob=0.8, kernel_size=3, box_code_size=4, use_dropout=True, use_depthwise=True)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) init_op = tf.global_variables_initializer() resolution = 32 expected_num_anchors = resolution*resolution*5 with self.test_session() as sess: sess.run(init_op) (box_encodings_shape, objectness_predictions_shape) = sess.run( [tf.shape(box_encodings), tf.shape(objectness_predictions)], feed_dict={image_features: np.random.rand(4, resolution, resolution, 64)}) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4]) self.assertAllEqual(objectness_predictions_shape, [4, expected_num_anchors, 1]) expected_variable_set = set([ 'BoxPredictor/Conv2d_0_1x1_32/biases', 'BoxPredictor/Conv2d_0_1x1_32/weights', 'BoxPredictor/BoxEncodingPredictor_depthwise/biases', 'BoxPredictor/BoxEncodingPredictor_depthwise/depthwise_weights', 'BoxPredictor/BoxEncodingPredictor/biases', 'BoxPredictor/BoxEncodingPredictor/weights', 'BoxPredictor/ClassPredictor_depthwise/biases', 'BoxPredictor/ClassPredictor_depthwise/depthwise_weights', 'BoxPredictor/ClassPredictor/biases', 'BoxPredictor/ClassPredictor/weights']) self.assertEqual(expected_variable_set, actual_variable_set) def test_no_dangling_outputs(self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, dropout_keep_prob=0.8, kernel_size=3, box_code_size=4, use_dropout=True, use_depthwise=True)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) bad_dangling_ops = [] types_safe_to_dangle = set(['Assign', 'Mul', 'Const']) for op in tf.get_default_graph().get_operations(): if (not op.outputs) or (not op.outputs[0].consumers()): if 'BoxPredictor' in op.name: if op.type not in types_safe_to_dangle: bad_dangling_ops.append(op) self.assertEqual(bad_dangling_ops, []) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class WeightSharedConvolutionalBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { random_normal_initializer { stddev: 0.01 mean: 0.0 } } batch_norm { train: true, } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.build(conv_hyperparams, is_training=True) def _build_conv_arg_scope_no_batch_norm(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { random_normal_initializer { stddev: 0.01 mean: 0.0 } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.build(conv_hyperparams, is_training=True) def test_get_boxes_for_five_aspect_ratios_per_location(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute( graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) def test_bias_predictions_to_background_with_sigmoid_score_conversion(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=True, num_classes=2, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, class_prediction_bias_init=-4.6, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') class_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (tf.nn.sigmoid(class_predictions),) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) class_predictions = self.execute(graph_fn, [image_features]) self.assertAlmostEqual(np.mean(class_predictions), 0.01, places=3) def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( self): num_classes_without_background = 6 def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 320, num_classes_without_background+1]) def test_get_multi_class_predictions_from_two_feature_maps( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features1, image_features2]) self.assertAllEqual(box_encodings.shape, [4, 640, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 640, num_classes_without_background+1]) def test_get_multi_class_predictions_from_feature_maps_of_different_depth( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2, image_features3): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2, image_features3], num_predictions_per_location=[5, 5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) image_features3 = np.random.rand(4, 8, 8, 32).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features1, image_features2, image_features3]) self.assertAllEqual(box_encodings.shape, [4, 960, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 960, num_classes_without_background+1]) def test_predictions_multiple_feature_maps_share_weights_separate_batchnorm( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Box prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_multiple_feature_maps_share_weights_without_batchnorm( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4, apply_batch_norm=False)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Box prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/biases'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_multiple_feature_maps_share_weights_with_depthwise( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4, apply_batch_norm=False, use_depthwise=True)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Box prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/biases'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_no_batchnorm_params_when_batchnorm_is_not_configured(self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_conv_arg_scope_no_batch_norm(), depth=32, num_layers_before_predictor=2, box_code_size=4, apply_batch_norm=False)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Box prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/biases'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_share_weights_share_tower_separate_batchnorm( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4, share_prediction_tower=True)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Shared prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/BatchNorm/feature_1/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/BatchNorm/feature_1/beta'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_share_weights_share_tower_without_batchnorm( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4, share_prediction_tower=True, apply_batch_norm=False)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Shared prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/biases'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_get_predictions_with_feature_maps_of_dynamic_shape( self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat(box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) init_op = tf.global_variables_initializer() resolution = 32 expected_num_anchors = resolution*resolution*5 with self.test_session() as sess: sess.run(init_op) (box_encodings_shape, objectness_predictions_shape) = sess.run( [tf.shape(box_encodings), tf.shape(objectness_predictions)], feed_dict={image_features: np.random.rand(4, resolution, resolution, 64)}) self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 4]) self.assertAllEqual(objectness_predictions_shape, [4, expected_num_anchors, 1]) def test_other_heads_predictions(self): box_code_size = 4 num_classes_without_background = 3 other_head_name = 'Mask' mask_height = 5 mask_width = 5 num_predictions_per_location = 5 def graph_fn(image_features): box_prediction_head = box_head.WeightSharedConvolutionalBoxHead( box_code_size) class_prediction_head = class_head.WeightSharedConvolutionalClassHead( num_classes_without_background + 1) other_heads = { other_head_name: mask_head.WeightSharedConvolutionalMaskHead( num_classes_without_background, mask_height=mask_height, mask_width=mask_width) } conv_box_predictor = box_predictor.WeightSharedConvolutionalBoxPredictor( is_training=False, num_classes=num_classes_without_background, box_prediction_head=box_prediction_head, class_prediction_head=class_prediction_head, other_heads=other_heads, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[num_predictions_per_location], scope='BoxPredictor') for key, value in box_predictions.items(): box_predictions[key] = tf.concat(value, axis=1) assert len(box_predictions) == 3 return (box_predictions[box_predictor.BOX_ENCODINGS], box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], box_predictions[other_head_name]) batch_size = 4 feature_ht = 8 feature_wt = 8 image_features = np.random.rand(batch_size, feature_ht, feature_wt, 64).astype(np.float32) (box_encodings, class_predictions, other_head_predictions) = self.execute( graph_fn, [image_features]) num_anchors = feature_ht * feature_wt * num_predictions_per_location self.assertAllEqual(box_encodings.shape, [batch_size, num_anchors, box_code_size]) self.assertAllEqual( class_predictions.shape, [batch_size, num_anchors, num_classes_without_background + 1]) self.assertAllEqual(other_head_predictions.shape, [ batch_size, num_anchors, num_classes_without_background, mask_height, mask_width ]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/convolutional_box_predictor_tf1_test.py
convolutional_box_predictor_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.rfcn_box_predictor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors import rfcn_keras_box_predictor as box_predictor from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class RfcnKerasBoxPredictorTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_get_correct_box_encoding_and_class_prediction_shapes(self): rfcn_box_predictor = box_predictor.RfcnKerasBoxPredictor( is_training=False, num_classes=2, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, num_spatial_bins=[3, 3], depth=4, crop_size=[12, 12], box_code_size=4) def graph_fn(image_features, proposal_boxes): box_predictions = rfcn_box_predictor( [image_features], proposal_boxes=proposal_boxes) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) proposal_boxes = np.random.rand(4, 2, 4).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features, proposal_boxes]) self.assertAllEqual(box_encodings.shape, [8, 1, 2, 4]) self.assertAllEqual(class_predictions_with_background.shape, [8, 1, 3]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/rfcn_keras_box_predictor_tf2_test.py
rfcn_keras_box_predictor_tf2_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mask R-CNN Box Predictor.""" from object_detection.core import box_predictor BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class MaskRCNNKerasBoxPredictor(box_predictor.KerasBoxPredictor): """Mask R-CNN Box Predictor. See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017). Mask R-CNN. arXiv preprint arXiv:1703.06870. This is used for the second stage of the Mask R-CNN detector where proposals cropped from an image are arranged along the batch dimension of the input image_features tensor. Notice that locations are *not* shared across classes, thus for each anchor, a separate prediction is made for each class. In addition to predicting boxes and classes, optionally this class allows predicting masks and/or keypoints inside detection boxes. Currently this box predictor makes per-class predictions; that is, each anchor makes a separate box prediction for each class. """ def __init__(self, is_training, num_classes, freeze_batchnorm, box_prediction_head, class_prediction_head, third_stage_heads, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. box_prediction_head: The head that predicts the boxes in second stage. class_prediction_head: The head that predicts the classes in second stage. third_stage_heads: A dictionary mapping head names to mask rcnn head classes. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. """ super(MaskRCNNKerasBoxPredictor, self).__init__( is_training, num_classes, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=False, name=name) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._third_stage_heads = third_stage_heads @property def num_classes(self): return self._num_classes def get_second_stage_prediction_heads(self): return BOX_ENCODINGS, CLASS_PREDICTIONS_WITH_BACKGROUND def get_third_stage_prediction_heads(self): return sorted(self._third_stage_heads.keys()) def _predict(self, image_features, prediction_stage=2, **kwargs): """Optionally computes encoded object locations, confidences, and masks. Predicts the heads belonging to the given prediction stage. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing roi pooled features for each image. The length of the list should be 1 otherwise a ValueError will be raised. prediction_stage: Prediction stage. Acceptable values are 2 and 3. **kwargs: Unused Keyword args Returns: A dictionary containing the predicted tensors that are listed in self._prediction_heads. A subset of the following keys will exist in the dictionary: BOX_ENCODINGS: A float tensor of shape [batch_size, 1, num_classes, code_size] representing the location of the objects. CLASS_PREDICTIONS_WITH_BACKGROUND: A float tensor of shape [batch_size, 1, num_classes + 1] representing the class predictions for the proposals. MASK_PREDICTIONS: A float tensor of shape [batch_size, 1, num_classes, image_height, image_width] Raises: ValueError: If num_predictions_per_location is not 1 or if len(image_features) is not 1. ValueError: if prediction_stage is not 2 or 3. """ if len(image_features) != 1: raise ValueError('length of `image_features` must be 1. Found {}'.format( len(image_features))) image_feature = image_features[0] predictions_dict = {} if prediction_stage == 2: predictions_dict[BOX_ENCODINGS] = self._box_prediction_head(image_feature) predictions_dict[CLASS_PREDICTIONS_WITH_BACKGROUND] = ( self._class_prediction_head(image_feature)) elif prediction_stage == 3: for prediction_head in self.get_third_stage_prediction_heads(): head_object = self._third_stage_heads[prediction_head] predictions_dict[prediction_head] = head_object(image_feature) else: raise ValueError('prediction_stage should be either 2 or 3.') return predictions_dict
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/mask_rcnn_keras_box_predictor.py
mask_rcnn_keras_box_predictor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """RFCN Box Predictor.""" import tensorflow.compat.v1 as tf from object_detection.core import box_predictor from object_detection.utils import ops BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class RfcnKerasBoxPredictor(box_predictor.KerasBoxPredictor): """RFCN Box Predictor. Applies a position sensitive ROI pooling on position sensitive feature maps to predict classes and refined locations. See https://arxiv.org/abs/1605.06409 for details. This is used for the second stage of the RFCN meta architecture. Notice that locations are *not* shared across classes, thus for each anchor, a separate prediction is made for each class. """ def __init__(self, is_training, num_classes, conv_hyperparams, freeze_batchnorm, num_spatial_bins, depth, crop_size, box_code_size, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. num_spatial_bins: A list of two integers `[spatial_bins_y, spatial_bins_x]`. depth: Target depth to reduce the input feature maps to. crop_size: A list of two integers `[crop_height, crop_width]`. box_code_size: Size of encoding for each box. name: A string name scope to assign to the box predictor. If `None`, Keras will auto-generate one from the class name. """ super(RfcnKerasBoxPredictor, self).__init__( is_training, num_classes, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=False, name=name) self._freeze_batchnorm = freeze_batchnorm self._conv_hyperparams = conv_hyperparams self._num_spatial_bins = num_spatial_bins self._depth = depth self._crop_size = crop_size self._box_code_size = box_code_size # Build the shared layers used for both heads self._shared_conv_layers = [] self._shared_conv_layers.append( tf.keras.layers.Conv2D( self._depth, [1, 1], padding='SAME', name='reduce_depth_conv', **self._conv_hyperparams.params())) self._shared_conv_layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='reduce_depth_batchnorm')) self._shared_conv_layers.append( self._conv_hyperparams.build_activation_layer( name='reduce_depth_activation')) self._box_encoder_layers = [] location_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * self.num_classes * self._box_code_size) self._box_encoder_layers.append( tf.keras.layers.Conv2D( location_feature_map_depth, [1, 1], padding='SAME', name='refined_locations_conv', **self._conv_hyperparams.params())) self._box_encoder_layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='refined_locations_batchnorm')) self._class_predictor_layers = [] self._total_classes = self.num_classes + 1 # Account for background class. class_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * self._total_classes) self._class_predictor_layers.append( tf.keras.layers.Conv2D( class_feature_map_depth, [1, 1], padding='SAME', name='class_predictions_conv', **self._conv_hyperparams.params())) self._class_predictor_layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='class_predictions_batchnorm')) @property def num_classes(self): return self._num_classes def _predict(self, image_features, proposal_boxes, **kwargs): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. proposal_boxes: A float tensor of shape [batch_size, num_proposals, box_code_size]. **kwargs: Unused Keyword args Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. Raises: ValueError: if num_predictions_per_location is not 1 or if len(image_features) is not 1. """ if len(image_features) != 1: raise ValueError('length of `image_features` must be 1. Found {}'. format(len(image_features))) image_feature = image_features[0] batch_size = tf.shape(proposal_boxes)[0] num_boxes = tf.shape(proposal_boxes)[1] net = image_feature for layer in self._shared_conv_layers: net = layer(net) # Location predictions. box_net = net for layer in self._box_encoder_layers: box_net = layer(box_net) box_encodings = ops.batch_position_sensitive_crop_regions( box_net, boxes=proposal_boxes, crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True) box_encodings = tf.squeeze(box_encodings, axis=[2, 3]) box_encodings = tf.reshape(box_encodings, [batch_size * num_boxes, 1, self.num_classes, self._box_code_size]) # Class predictions. class_net = net for layer in self._class_predictor_layers: class_net = layer(class_net) class_predictions_with_background = ( ops.batch_position_sensitive_crop_regions( class_net, boxes=proposal_boxes, crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True)) class_predictions_with_background = tf.squeeze( class_predictions_with_background, axis=[2, 3]) class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size * num_boxes, 1, self._total_classes]) return {BOX_ENCODINGS: [box_encodings], CLASS_PREDICTIONS_WITH_BACKGROUND: [class_predictions_with_background]}
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/rfcn_keras_box_predictor.py
rfcn_keras_box_predictor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.mask_rcnn_box_predictor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.predictors import mask_rcnn_keras_box_predictor as box_predictor from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class MaskRCNNKerasBoxPredictorTest(test_case.TestCase): def _build_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.KerasLayerHyperparams(hyperparams) def test_get_boxes_with_five_classes(self): mask_box_predictor = ( box_predictor_builder.build_mask_rcnn_keras_box_predictor( is_training=False, num_classes=5, fc_hyperparams=self._build_hyperparams(), freeze_batchnorm=False, use_dropout=False, dropout_keep_prob=0.5, box_code_size=4, )) def graph_fn(image_features): box_predictions = mask_box_predictor( [image_features], prediction_stage=2) return (box_predictions[box_predictor.BOX_ENCODINGS], box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [2, 1, 5, 4]) self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) def test_get_boxes_with_five_classes_share_box_across_classes(self): mask_box_predictor = ( box_predictor_builder.build_mask_rcnn_keras_box_predictor( is_training=False, num_classes=5, fc_hyperparams=self._build_hyperparams(), freeze_batchnorm=False, use_dropout=False, dropout_keep_prob=0.5, box_code_size=4, share_box_across_classes=True )) def graph_fn(image_features): box_predictions = mask_box_predictor( [image_features], prediction_stage=2) return (box_predictions[box_predictor.BOX_ENCODINGS], box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [2, 1, 1, 4]) self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) def test_get_instance_masks(self): mask_box_predictor = ( box_predictor_builder.build_mask_rcnn_keras_box_predictor( is_training=False, num_classes=5, fc_hyperparams=self._build_hyperparams(), freeze_batchnorm=False, use_dropout=False, dropout_keep_prob=0.5, box_code_size=4, conv_hyperparams=self._build_hyperparams( op_type=hyperparams_pb2.Hyperparams.CONV), predict_instance_masks=True)) def graph_fn(image_features): box_predictions = mask_box_predictor( [image_features], prediction_stage=3) return (box_predictions[box_predictor.MASK_PREDICTIONS],) image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) mask_predictions = self.execute(graph_fn, [image_features]) self.assertAllEqual(mask_predictions.shape, [2, 1, 5, 14, 14]) def test_do_not_return_instance_masks_without_request(self): image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32) mask_box_predictor = ( box_predictor_builder.build_mask_rcnn_keras_box_predictor( is_training=False, num_classes=5, fc_hyperparams=self._build_hyperparams(), freeze_batchnorm=False, use_dropout=False, dropout_keep_prob=0.5, box_code_size=4)) box_predictions = mask_box_predictor( [image_features], prediction_stage=2) self.assertEqual(len(box_predictions), 2) self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions) self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND in box_predictions) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/mask_rcnn_keras_box_predictor_tf2_test.py
mask_rcnn_keras_box_predictor_tf2_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Class Head. Contains Class prediction head classes for different meta architectures. All the class prediction heads have a predict function that receives the `features` as the first argument and returns class predictions with background. """ import tensorflow.compat.v1 as tf from object_detection.predictors.heads import head from object_detection.utils import shape_utils class ConvolutionalClassHead(head.KerasHead): """Convolutional class prediction head.""" def __init__(self, is_training, num_class_slots, use_dropout, dropout_keep_prob, kernel_size, num_predictions_per_location, conv_hyperparams, freeze_batchnorm, class_prediction_bias_init=0.0, use_depthwise=False, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(ConvolutionalClassHead, self).__init__(name=name) self._is_training = is_training self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._class_prediction_bias_init = class_prediction_bias_init self._use_depthwise = use_depthwise self._num_class_slots = num_class_slots self._class_predictor_layers = [] if self._use_dropout: self._class_predictor_layers.append( # The Dropout layer's `training` parameter for the call method must # be set implicitly by the Keras set_learning_phase. The object # detection training code takes care of this. tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) if self._use_depthwise: self._class_predictor_layers.append( tf.keras.layers.DepthwiseConv2D( [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, dilation_rate=1, name='ClassPredictor_depthwise', **conv_hyperparams.params())) self._class_predictor_layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name='ClassPredictor_depthwise_batchnorm')) self._class_predictor_layers.append( conv_hyperparams.build_activation_layer( name='ClassPredictor_depthwise_activation')) self._class_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._num_class_slots, [1, 1], name='ClassPredictor', **conv_hyperparams.params(use_bias=True))) else: self._class_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], padding='SAME', name='ClassPredictor', bias_initializer=tf.constant_initializer( self._class_prediction_bias_init), **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: class_predictions_with_background: A float tensor of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals. """ class_predictions_with_background = features for layer in self._class_predictor_layers: class_predictions_with_background = layer( class_predictions_with_background) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) return class_predictions_with_background class MaskRCNNClassHead(head.KerasHead): """Mask RCNN class prediction head. This is a piece of Mask RCNN which is responsible for predicting just the class scores of boxes. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_class_slots, fc_hyperparams, freeze_batchnorm, use_dropout, dropout_keep_prob, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for fully connected dense ops. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. name: A string name scope to assign to the class head. If `None`, Keras will auto-generate one from the class name. """ super(MaskRCNNClassHead, self).__init__(name=name) self._is_training = is_training self._freeze_batchnorm = freeze_batchnorm self._num_class_slots = num_class_slots self._fc_hyperparams = fc_hyperparams self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._class_predictor_layers = [tf.keras.layers.Flatten()] if self._use_dropout: self._class_predictor_layers.append( tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) self._class_predictor_layers.append( tf.keras.layers.Dense(self._num_class_slots, name='ClassPredictor_dense')) self._class_predictor_layers.append( fc_hyperparams.build_batch_norm(training=(is_training and not freeze_batchnorm), name='ClassPredictor_batchnorm')) def _predict(self, features): """Predicts the class scores for boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. Returns: class_predictions_with_background: A float tensor of shape [batch_size, 1, num_class_slots] representing the class predictions for the proposals. """ spatial_averaged_roi_pooled_features = tf.reduce_mean( features, [1, 2], keep_dims=True, name='AvgPool') net = spatial_averaged_roi_pooled_features for layer in self._class_predictor_layers: net = layer(net) class_predictions_with_background = tf.reshape( net, [-1, 1, self._num_class_slots]) return class_predictions_with_background class WeightSharedConvolutionalClassHead(head.KerasHead): """Weight shared convolutional class prediction head. This head allows sharing the same set of parameters (weights) when called more then once on different feature maps. """ def __init__(self, num_class_slots, num_predictions_per_location, conv_hyperparams, kernel_size=3, class_prediction_bias_init=0.0, use_dropout=False, dropout_keep_prob=0.8, use_depthwise=False, apply_conv_hyperparams_to_heads=False, score_converter_fn=tf.identity, return_flat_predictions=True, name=None): """Constructor. Args: num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. kernel_size: Size of final convolution kernel. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. apply_conv_hyperparams_to_heads: Whether to apply conv_hyperparams to depthwise seperable convolution layers in the box and class heads. By default, the conv_hyperparams are only applied to layers in the predictor tower when using depthwise separable convolutions. score_converter_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). return_flat_predictions: If true, returns flattened prediction tensor of shape [batch, height * width * num_predictions_per_location, box_coder]. Otherwise returns the prediction tensor before reshaping, whose shape is [batch, height, width, num_predictions_per_location * num_class_slots]. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(WeightSharedConvolutionalClassHead, self).__init__(name=name) self._num_class_slots = num_class_slots self._num_predictions_per_location = num_predictions_per_location self._kernel_size = kernel_size self._class_prediction_bias_init = class_prediction_bias_init self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._use_depthwise = use_depthwise self._apply_conv_hyperparams_to_heads = apply_conv_hyperparams_to_heads self._score_converter_fn = score_converter_fn self._return_flat_predictions = return_flat_predictions self._class_predictor_layers = [] if self._use_dropout: self._class_predictor_layers.append( tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) if self._use_depthwise: kwargs = conv_hyperparams.params(use_bias=True) if self._apply_conv_hyperparams_to_heads: kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['pointwise_initializer'] = kwargs['kernel_initializer'] self._class_predictor_layers.append( tf.keras.layers.SeparableConv2D( num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, name='ClassPredictor', bias_initializer=tf.constant_initializer( self._class_prediction_bias_init), **kwargs)) else: self._class_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], padding='SAME', name='ClassPredictor', bias_initializer=tf.constant_initializer( self._class_prediction_bias_init), **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: class_predictions_with_background: A float tensor of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals. """ class_predictions_with_background = features for layer in self._class_predictor_layers: class_predictions_with_background = layer( class_predictions_with_background) batch_size, height, width = shape_utils.combined_static_and_dynamic_shape( features)[0:3] class_predictions_with_background = tf.reshape( class_predictions_with_background, [ batch_size, height, width, self._num_predictions_per_location, self._num_class_slots ]) class_predictions_with_background = self._score_converter_fn( class_predictions_with_background) if self._return_flat_predictions: class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) else: class_predictions_with_background = tf.reshape( class_predictions_with_background, [ batch_size, height, width, self._num_predictions_per_location * self._num_class_slots ]) return class_predictions_with_background
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/keras_class_head.py
keras_class_head.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.mask_head.""" import unittest import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import keras_mask_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class ConvolutionalMaskPredictorTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_prediction_size_use_depthwise_false(self): conv_hyperparams = self._build_conv_hyperparams() mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( is_training=True, num_classes=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, conv_hyperparams=conv_hyperparams, freeze_batchnorm=False, num_predictions_per_location=1, use_depthwise=False, mask_height=7, mask_width=7) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head(image_feature) return mask_predictions mask_predictions = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.shape) def test_prediction_size_use_depthwise_true(self): conv_hyperparams = self._build_conv_hyperparams() mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( is_training=True, num_classes=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, conv_hyperparams=conv_hyperparams, freeze_batchnorm=False, num_predictions_per_location=1, use_depthwise=True, mask_height=7, mask_width=7) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head(image_feature) return mask_predictions mask_predictions = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.shape) def test_class_agnostic_prediction_size_use_depthwise_false(self): conv_hyperparams = self._build_conv_hyperparams() mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( is_training=True, num_classes=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, conv_hyperparams=conv_hyperparams, freeze_batchnorm=False, num_predictions_per_location=1, use_depthwise=False, mask_height=7, mask_width=7, masks_are_class_agnostic=True) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head(image_feature) return mask_predictions mask_predictions = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.shape) def test_class_agnostic_prediction_size_use_depthwise_true(self): conv_hyperparams = self._build_conv_hyperparams() mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( is_training=True, num_classes=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, conv_hyperparams=conv_hyperparams, freeze_batchnorm=False, num_predictions_per_location=1, use_depthwise=True, mask_height=7, mask_width=7, masks_are_class_agnostic=True) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head(image_feature) return mask_predictions mask_predictions = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.shape) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class MaskRCNNMaskHeadTest(test_case.TestCase): def _build_conv_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.KerasLayerHyperparams(hyperparams) def test_prediction_size(self): mask_prediction_head = keras_mask_head.MaskRCNNMaskHead( is_training=True, num_classes=20, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, mask_height=14, mask_width=14, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=False) def graph_fn(): roi_pooled_features = tf.random_uniform( [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) prediction = mask_prediction_head(roi_pooled_features) return prediction prediction = self.execute(graph_fn, []) self.assertAllEqual([64, 1, 20, 14, 14], prediction.shape) def test_prediction_size_with_convolve_then_upsample(self): mask_prediction_head = keras_mask_head.MaskRCNNMaskHead( is_training=True, num_classes=20, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, mask_height=28, mask_width=28, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=True, convolve_then_upsample=True) def graph_fn(): roi_pooled_features = tf.random_uniform( [64, 14, 14, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) prediction = mask_prediction_head(roi_pooled_features) return prediction prediction = self.execute(graph_fn, []) self.assertAllEqual([64, 1, 1, 28, 28], prediction.shape) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class WeightSharedConvolutionalMaskPredictorTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_prediction_size(self): mask_prediction_head = ( keras_mask_head.WeightSharedConvolutionalMaskHead( num_classes=20, num_predictions_per_location=1, conv_hyperparams=self._build_conv_hyperparams(), mask_height=7, mask_width=7)) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head(image_feature) return mask_predictions mask_predictions = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.shape) def test_class_agnostic_prediction_size(self): mask_prediction_head = ( keras_mask_head.WeightSharedConvolutionalMaskHead( num_classes=20, num_predictions_per_location=1, conv_hyperparams=self._build_conv_hyperparams(), mask_height=7, mask_width=7, masks_are_class_agnostic=True)) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head(image_feature) return mask_predictions mask_predictions = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.shape) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/keras_mask_head_tf2_test.py
keras_mask_head_tf2_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Class Head. Contains Class prediction head classes for different meta architectures. All the class prediction heads have a predict function that receives the `features` as the first argument and returns class predictions with background. """ import functools import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.predictors.heads import head from object_detection.utils import shape_utils class MaskRCNNClassHead(head.Head): """Mask RCNN class prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_class_slots, fc_hyperparams_fn, use_dropout, dropout_keep_prob, scope='ClassPredictor'): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. fc_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for fully connected ops. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. scope: Scope name for the convolution operation. """ super(MaskRCNNClassHead, self).__init__() self._is_training = is_training self._num_class_slots = num_class_slots self._fc_hyperparams_fn = fc_hyperparams_fn self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._scope = scope def predict(self, features, num_predictions_per_location=1): """Predicts boxes and class scores. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: class_predictions_with_background: A float tensor of shape [batch_size, 1, num_class_slots] representing the class predictions for the proposals. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') spatial_averaged_roi_pooled_features = tf.reduce_mean( features, [1, 2], keep_dims=True, name='AvgPool') flattened_roi_pooled_features = slim.flatten( spatial_averaged_roi_pooled_features) if self._use_dropout: flattened_roi_pooled_features = slim.dropout( flattened_roi_pooled_features, keep_prob=self._dropout_keep_prob, is_training=self._is_training) with slim.arg_scope(self._fc_hyperparams_fn()): class_predictions_with_background = slim.fully_connected( flattened_roi_pooled_features, self._num_class_slots, reuse=tf.AUTO_REUSE, activation_fn=None, scope=self._scope) class_predictions_with_background = tf.reshape( class_predictions_with_background, [-1, 1, self._num_class_slots]) return class_predictions_with_background class ConvolutionalClassHead(head.Head): """Convolutional class prediction head.""" def __init__(self, is_training, num_class_slots, use_dropout, dropout_keep_prob, kernel_size, apply_sigmoid_to_scores=False, class_prediction_bias_init=0.0, use_depthwise=False, scope='ClassPredictor'): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). apply_sigmoid_to_scores: if True, apply the sigmoid on the output class_predictions. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. scope: Scope name for the convolution operation. Raises: ValueError: if min_depth > max_depth. ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(ConvolutionalClassHead, self).__init__() self._is_training = is_training self._num_class_slots = num_class_slots self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._apply_sigmoid_to_scores = apply_sigmoid_to_scores self._class_prediction_bias_init = class_prediction_bias_init self._use_depthwise = use_depthwise self._scope = scope def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: class_predictions_with_background: A float tensors of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals. """ net = features if self._use_dropout: net = slim.dropout(net, keep_prob=self._dropout_keep_prob) if self._use_depthwise: depthwise_scope = self._scope + '_depthwise' class_predictions_with_background = slim.separable_conv2d( net, None, [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, stride=1, rate=1, scope=depthwise_scope) class_predictions_with_background = slim.conv2d( class_predictions_with_background, num_predictions_per_location * self._num_class_slots, [1, 1], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope=self._scope) else: class_predictions_with_background = slim.conv2d( net, num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope=self._scope, biases_initializer=tf.constant_initializer( self._class_prediction_bias_init)) if self._apply_sigmoid_to_scores: class_predictions_with_background = tf.sigmoid( class_predictions_with_background) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) return class_predictions_with_background # TODO(alirezafathi): See if possible to unify Weight Shared with regular # convolutional class head. class WeightSharedConvolutionalClassHead(head.Head): """Weight shared convolutional class prediction head. This head allows sharing the same set of parameters (weights) when called more then once on different feature maps. """ def __init__(self, num_class_slots, kernel_size=3, class_prediction_bias_init=0.0, use_dropout=False, dropout_keep_prob=0.8, use_depthwise=False, score_converter_fn=tf.identity, return_flat_predictions=True, scope='ClassPredictor'): """Constructor. Args: num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. kernel_size: Size of final convolution kernel. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. score_converter_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). return_flat_predictions: If true, returns flattened prediction tensor of shape [batch, height * width * num_predictions_per_location, box_coder]. Otherwise returns the prediction tensor before reshaping, whose shape is [batch, height, width, num_predictions_per_location * num_class_slots]. scope: Scope name for the convolution operation. Raises: ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(WeightSharedConvolutionalClassHead, self).__init__() self._num_class_slots = num_class_slots self._kernel_size = kernel_size self._class_prediction_bias_init = class_prediction_bias_init self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._use_depthwise = use_depthwise self._score_converter_fn = score_converter_fn self._return_flat_predictions = return_flat_predictions self._scope = scope def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: class_predictions_with_background: A tensor of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals, or a tensor of shape [batch, height, width, num_predictions_per_location * num_class_slots] representing class predictions before reshaping if self._return_flat_predictions is False. """ class_predictions_net = features if self._use_dropout: class_predictions_net = slim.dropout( class_predictions_net, keep_prob=self._dropout_keep_prob) if self._use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d class_predictions_with_background = conv_op( class_predictions_net, num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], activation_fn=None, stride=1, padding='SAME', normalizer_fn=None, biases_initializer=tf.constant_initializer( self._class_prediction_bias_init), scope=self._scope) batch_size, height, width = shape_utils.combined_static_and_dynamic_shape( features)[0:3] class_predictions_with_background = tf.reshape( class_predictions_with_background, [ batch_size, height, width, num_predictions_per_location, self._num_class_slots ]) class_predictions_with_background = self._score_converter_fn( class_predictions_with_background) if self._return_flat_predictions: class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) else: class_predictions_with_background = tf.reshape( class_predictions_with_background, [ batch_size, height, width, num_predictions_per_location * self._num_class_slots ]) return class_predictions_with_background
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/class_head.py
class_head.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mask Head. Contains Mask prediction head classes for different meta architectures. All the mask prediction heads have a predict function that receives the `features` as the first argument and returns `mask_predictions`. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.predictors.heads import head from object_detection.utils import ops class MaskRCNNMaskHead(head.Head): """Mask RCNN mask prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, num_classes, conv_hyperparams_fn=None, mask_height=14, mask_width=14, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=False, convolve_then_upsample=False): """Constructor. Args: num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. mask_height: Desired output mask height. The default value is 14. mask_width: Desired output mask width. The default value is 14. mask_prediction_num_conv_layers: Number of convolution layers applied to the image_features in mask prediction branch. mask_prediction_conv_depth: The depth for the first conv2d_transpose op applied to the image_features in the mask prediction branch. If set to 0, the depth of the convolution layers will be automatically chosen based on the number of object classes and the number of channels in the image features. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. convolve_then_upsample: Whether to apply convolutions on mask features before upsampling using nearest neighbor resizing. Otherwise, mask features are resized to [`mask_height`, `mask_width`] using bilinear resizing before applying convolutions. Raises: ValueError: conv_hyperparams_fn is None. """ super(MaskRCNNMaskHead, self).__init__() self._num_classes = num_classes self._conv_hyperparams_fn = conv_hyperparams_fn self._mask_height = mask_height self._mask_width = mask_width self._mask_prediction_num_conv_layers = mask_prediction_num_conv_layers self._mask_prediction_conv_depth = mask_prediction_conv_depth self._masks_are_class_agnostic = masks_are_class_agnostic self._convolve_then_upsample = convolve_then_upsample if conv_hyperparams_fn is None: raise ValueError('conv_hyperparams_fn is None.') def _get_mask_predictor_conv_depth(self, num_feature_channels, num_classes, class_weight=3.0, feature_weight=2.0): """Computes the depth of the mask predictor convolutions. Computes the depth of the mask predictor convolutions given feature channels and number of classes by performing a weighted average of the two in log space to compute the number of convolution channels. The weights that are used for computing the weighted average do not need to sum to 1. Args: num_feature_channels: An integer containing the number of feature channels. num_classes: An integer containing the number of classes. class_weight: Class weight used in computing the weighted average. feature_weight: Feature weight used in computing the weighted average. Returns: An integer containing the number of convolution channels used by mask predictor. """ num_feature_channels_log = math.log(float(num_feature_channels), 2.0) num_classes_log = math.log(float(num_classes), 2.0) weighted_num_feature_channels_log = ( num_feature_channels_log * feature_weight) weighted_num_classes_log = num_classes_log * class_weight total_weight = feature_weight + class_weight num_conv_channels_log = round( (weighted_num_feature_channels_log + weighted_num_classes_log) / total_weight) return int(math.pow(2.0, num_conv_channels_log)) def predict(self, features, num_predictions_per_location=1): """Performs mask prediction. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: instance_masks: A float tensor of shape [batch_size, 1, num_classes, mask_height, mask_width]. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') num_conv_channels = self._mask_prediction_conv_depth if num_conv_channels == 0: num_feature_channels = features.get_shape().as_list()[3] num_conv_channels = self._get_mask_predictor_conv_depth( num_feature_channels, self._num_classes) with slim.arg_scope(self._conv_hyperparams_fn()): if not self._convolve_then_upsample: features = tf.image.resize_bilinear( features, [self._mask_height, self._mask_width], align_corners=True) for _ in range(self._mask_prediction_num_conv_layers - 1): features = slim.conv2d( features, num_outputs=num_conv_channels, kernel_size=[3, 3]) if self._convolve_then_upsample: # Replace Transposed Convolution with a Nearest Neighbor upsampling step # followed by 3x3 convolution. height_scale = self._mask_height // features.shape[1].value width_scale = self._mask_width // features.shape[2].value features = ops.nearest_neighbor_upsampling( features, height_scale=height_scale, width_scale=width_scale) features = slim.conv2d( features, num_outputs=num_conv_channels, kernel_size=[3, 3]) num_masks = 1 if self._masks_are_class_agnostic else self._num_classes mask_predictions = slim.conv2d( features, num_outputs=num_masks, activation_fn=None, normalizer_fn=None, kernel_size=[3, 3]) return tf.expand_dims( tf.transpose(mask_predictions, perm=[0, 3, 1, 2]), axis=1, name='MaskPredictor') class ConvolutionalMaskHead(head.Head): """Convolutional class prediction head.""" def __init__(self, is_training, num_classes, use_dropout, dropout_keep_prob, kernel_size, use_depthwise=False, mask_height=7, mask_width=7, masks_are_class_agnostic=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: Number of classes. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. mask_height: Desired output mask height. The default value is 7. mask_width: Desired output mask width. The default value is 7. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalMaskHead, self).__init__() self._is_training = is_training self._num_classes = num_classes self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._use_depthwise = use_depthwise self._mask_height = mask_height self._mask_width = mask_width self._masks_are_class_agnostic = masks_are_class_agnostic def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: mask_predictions: A float tensors of shape [batch_size, num_anchors, num_masks, mask_height, mask_width] representing the mask predictions for the proposals. """ image_feature = features # Add a slot for the background class. if self._masks_are_class_agnostic: num_masks = 1 else: num_masks = self._num_classes num_mask_channels = num_masks * self._mask_height * self._mask_width net = image_feature if self._use_dropout: net = slim.dropout(net, keep_prob=self._dropout_keep_prob) if self._use_depthwise: mask_predictions = slim.separable_conv2d( net, None, [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, stride=1, rate=1, scope='MaskPredictor_depthwise') mask_predictions = slim.conv2d( mask_predictions, num_predictions_per_location * num_mask_channels, [1, 1], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='MaskPredictor') else: mask_predictions = slim.conv2d( net, num_predictions_per_location * num_mask_channels, [self._kernel_size, self._kernel_size], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='MaskPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] mask_predictions = tf.reshape( mask_predictions, [batch_size, -1, num_masks, self._mask_height, self._mask_width]) return mask_predictions # TODO(alirezafathi): See if possible to unify Weight Shared with regular # convolutional mask head. class WeightSharedConvolutionalMaskHead(head.Head): """Weight shared convolutional mask prediction head.""" def __init__(self, num_classes, kernel_size=3, use_dropout=False, dropout_keep_prob=0.8, mask_height=7, mask_width=7, masks_are_class_agnostic=False): """Constructor. Args: num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). kernel_size: Size of final convolution kernel. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. mask_height: Desired output mask height. The default value is 7. mask_width: Desired output mask width. The default value is 7. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. """ super(WeightSharedConvolutionalMaskHead, self).__init__() self._num_classes = num_classes self._kernel_size = kernel_size self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._mask_height = mask_height self._mask_width = mask_width self._masks_are_class_agnostic = masks_are_class_agnostic def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: mask_predictions: A tensor of shape [batch_size, num_anchors, num_classes, mask_height, mask_width] representing the mask predictions for the proposals. """ mask_predictions_net = features if self._masks_are_class_agnostic: num_masks = 1 else: num_masks = self._num_classes num_mask_channels = num_masks * self._mask_height * self._mask_width if self._use_dropout: mask_predictions_net = slim.dropout( mask_predictions_net, keep_prob=self._dropout_keep_prob) mask_predictions = slim.conv2d( mask_predictions_net, num_predictions_per_location * num_mask_channels, [self._kernel_size, self._kernel_size], activation_fn=None, stride=1, padding='SAME', normalizer_fn=None, scope='MaskPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] mask_predictions = tf.reshape( mask_predictions, [batch_size, -1, num_masks, self._mask_height, self._mask_width]) return mask_predictions
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/mask_head.py
mask_head.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.keypoint_head.""" import unittest import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import keypoint_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class MaskRCNNKeypointHeadTest(test_case.TestCase): def _build_arg_scope_with_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): keypoint_prediction_head = keypoint_head.MaskRCNNKeypointHead( conv_hyperparams_fn=self._build_arg_scope_with_hyperparams()) roi_pooled_features = tf.random_uniform( [64, 14, 14, 1024], minval=-2.0, maxval=2.0, dtype=tf.float32) prediction = keypoint_prediction_head.predict( features=roi_pooled_features, num_predictions_per_location=1) self.assertAllEqual([64, 1, 17, 56, 56], prediction.get_shape().as_list()) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/keypoint_head_tf1_test.py
keypoint_head_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base head class. All the different kinds of prediction heads in different models will inherit from this class. What is in common between all head classes is that they have a `predict` function that receives `features` as its first argument. How to add a new prediction head to an existing meta architecture? For example, how can we add a `3d shape` prediction head to Mask RCNN? We have to take the following steps to add a new prediction head to an existing meta arch: (a) Add a class for predicting the head. This class should inherit from the `Head` class below and have a `predict` function that receives the features and predicts the output. The output is always a tf.float32 tensor. (b) Add the head to the meta architecture. For example in case of Mask RCNN, go to box_predictor_builder and put in the logic for adding the new head to the Mask RCNN box predictor. (c) Add the logic for computing the loss for the new head. (d) Add the necessary metrics for the new head. (e) (optional) Add visualization for the new head. """ from abc import abstractmethod import tensorflow.compat.v1 as tf class Head(object): """Mask RCNN head base class.""" def __init__(self): """Constructor.""" pass @abstractmethod def predict(self, features, num_predictions_per_location): """Returns the head's predictions. Args: features: A float tensor of features. num_predictions_per_location: Int containing number of predictions per location. Returns: A tf.float32 tensor. """ pass class KerasHead(tf.keras.layers.Layer): """Keras head base class.""" def call(self, features): """The Keras model call will delegate to the `_predict` method.""" return self._predict(features) @abstractmethod def _predict(self, features): """Returns the head's predictions. Args: features: A float tensor of features. Returns: A tf.float32 tensor. """ pass
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/head.py
head.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras Mask Heads. Contains Mask prediction head classes for different meta architectures. All the mask prediction heads have a predict function that receives the `features` as the first argument and returns `mask_predictions`. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from six.moves import range import tensorflow.compat.v1 as tf from object_detection.predictors.heads import head from object_detection.utils import ops from object_detection.utils import shape_utils class ConvolutionalMaskHead(head.KerasHead): """Convolutional class prediction head.""" def __init__(self, is_training, num_classes, use_dropout, dropout_keep_prob, kernel_size, num_predictions_per_location, conv_hyperparams, freeze_batchnorm, use_depthwise=False, mask_height=7, mask_width=7, masks_are_class_agnostic=False, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: Number of classes. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. mask_height: Desired output mask height. The default value is 7. mask_width: Desired output mask width. The default value is 7. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalMaskHead, self).__init__(name=name) self._is_training = is_training self._num_classes = num_classes self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._num_predictions_per_location = num_predictions_per_location self._use_depthwise = use_depthwise self._mask_height = mask_height self._mask_width = mask_width self._masks_are_class_agnostic = masks_are_class_agnostic self._mask_predictor_layers = [] # Add a slot for the background class. if self._masks_are_class_agnostic: self._num_masks = 1 else: self._num_masks = self._num_classes num_mask_channels = self._num_masks * self._mask_height * self._mask_width if self._use_dropout: self._mask_predictor_layers.append( # The Dropout layer's `training` parameter for the call method must # be set implicitly by the Keras set_learning_phase. The object # detection training code takes care of this. tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) if self._use_depthwise: self._mask_predictor_layers.append( tf.keras.layers.DepthwiseConv2D( [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, dilation_rate=1, name='MaskPredictor_depthwise', **conv_hyperparams.params())) self._mask_predictor_layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name='MaskPredictor_depthwise_batchnorm')) self._mask_predictor_layers.append( conv_hyperparams.build_activation_layer( name='MaskPredictor_depthwise_activation')) self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * num_mask_channels, [1, 1], name='MaskPredictor', **conv_hyperparams.params(use_bias=True))) else: self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * num_mask_channels, [self._kernel_size, self._kernel_size], padding='SAME', name='MaskPredictor', **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: mask_predictions: A float tensors of shape [batch_size, num_anchors, num_masks, mask_height, mask_width] representing the mask predictions for the proposals. """ mask_predictions = features for layer in self._mask_predictor_layers: mask_predictions = layer(mask_predictions) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] mask_predictions = tf.reshape( mask_predictions, [batch_size, -1, self._num_masks, self._mask_height, self._mask_width]) return mask_predictions class MaskRCNNMaskHead(head.KerasHead): """Mask RCNN mask prediction head. This is a piece of Mask RCNN which is responsible for predicting just the pixelwise foreground scores for regions within the boxes. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_classes, freeze_batchnorm, conv_hyperparams, mask_height=14, mask_width=14, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=False, convolve_then_upsample=False, name=None): """Constructor. Args: is_training: Indicates whether the Mask head is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. mask_height: Desired output mask height. The default value is 14. mask_width: Desired output mask width. The default value is 14. mask_prediction_num_conv_layers: Number of convolution layers applied to the image_features in mask prediction branch. mask_prediction_conv_depth: The depth for the first conv2d_transpose op applied to the image_features in the mask prediction branch. If set to 0, the depth of the convolution layers will be automatically chosen based on the number of object classes and the number of channels in the image features. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. convolve_then_upsample: Whether to apply convolutions on mask features before upsampling using nearest neighbor resizing. Otherwise, mask features are resized to [`mask_height`, `mask_width`] using bilinear resizing before applying convolutions. name: A string name scope to assign to the mask head. If `None`, Keras will auto-generate one from the class name. """ super(MaskRCNNMaskHead, self).__init__(name=name) self._is_training = is_training self._freeze_batchnorm = freeze_batchnorm self._num_classes = num_classes self._conv_hyperparams = conv_hyperparams self._mask_height = mask_height self._mask_width = mask_width self._mask_prediction_num_conv_layers = mask_prediction_num_conv_layers self._mask_prediction_conv_depth = mask_prediction_conv_depth self._masks_are_class_agnostic = masks_are_class_agnostic self._convolve_then_upsample = convolve_then_upsample self._mask_predictor_layers = [] def build(self, input_shapes): num_conv_channels = self._mask_prediction_conv_depth if num_conv_channels == 0: num_feature_channels = input_shapes.as_list()[3] num_conv_channels = self._get_mask_predictor_conv_depth( num_feature_channels, self._num_classes) for i in range(self._mask_prediction_num_conv_layers - 1): self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_conv_channels, [3, 3], padding='SAME', name='MaskPredictor_conv2d_{}'.format(i), **self._conv_hyperparams.params())) self._mask_predictor_layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='MaskPredictor_batchnorm_{}'.format(i))) self._mask_predictor_layers.append( self._conv_hyperparams.build_activation_layer( name='MaskPredictor_activation_{}'.format(i))) if self._convolve_then_upsample: # Replace Transposed Convolution with a Nearest Neighbor upsampling step # followed by 3x3 convolution. height_scale = self._mask_height // shape_utils.get_dim_as_int( input_shapes[1]) width_scale = self._mask_width // shape_utils.get_dim_as_int( input_shapes[2]) # pylint: disable=g-long-lambda self._mask_predictor_layers.append(tf.keras.layers.Lambda( lambda features: ops.nearest_neighbor_upsampling( features, height_scale=height_scale, width_scale=width_scale) )) # pylint: enable=g-long-lambda self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_conv_channels, [3, 3], padding='SAME', name='MaskPredictor_upsample_conv2d', **self._conv_hyperparams.params())) self._mask_predictor_layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='MaskPredictor_upsample_batchnorm')) self._mask_predictor_layers.append( self._conv_hyperparams.build_activation_layer( name='MaskPredictor_upsample_activation')) num_masks = 1 if self._masks_are_class_agnostic else self._num_classes self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_masks, [3, 3], padding='SAME', name='MaskPredictor_last_conv2d', **self._conv_hyperparams.params(use_bias=True))) self.built = True def _get_mask_predictor_conv_depth(self, num_feature_channels, num_classes, class_weight=3.0, feature_weight=2.0): """Computes the depth of the mask predictor convolutions. Computes the depth of the mask predictor convolutions given feature channels and number of classes by performing a weighted average of the two in log space to compute the number of convolution channels. The weights that are used for computing the weighted average do not need to sum to 1. Args: num_feature_channels: An integer containing the number of feature channels. num_classes: An integer containing the number of classes. class_weight: Class weight used in computing the weighted average. feature_weight: Feature weight used in computing the weighted average. Returns: An integer containing the number of convolution channels used by mask predictor. """ num_feature_channels_log = math.log(float(num_feature_channels), 2.0) num_classes_log = math.log(float(num_classes), 2.0) weighted_num_feature_channels_log = ( num_feature_channels_log * feature_weight) weighted_num_classes_log = num_classes_log * class_weight total_weight = feature_weight + class_weight num_conv_channels_log = round( (weighted_num_feature_channels_log + weighted_num_classes_log) / total_weight) return int(math.pow(2.0, num_conv_channels_log)) def _predict(self, features): """Predicts pixelwise foreground scores for regions within the boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. Returns: instance_masks: A float tensor of shape [batch_size, 1, num_classes, mask_height, mask_width]. """ if not self._convolve_then_upsample: features = tf.image.resize_bilinear( features, [self._mask_height, self._mask_width], align_corners=True) mask_predictions = features for layer in self._mask_predictor_layers: mask_predictions = layer(mask_predictions) return tf.expand_dims( tf.transpose(mask_predictions, perm=[0, 3, 1, 2]), axis=1, name='MaskPredictor') class WeightSharedConvolutionalMaskHead(head.KerasHead): """Weight shared convolutional mask prediction head based on Keras.""" def __init__(self, num_classes, num_predictions_per_location, conv_hyperparams, kernel_size=3, use_dropout=False, dropout_keep_prob=0.8, mask_height=7, mask_width=7, masks_are_class_agnostic=False, name=None): """Constructor. Args: num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. kernel_size: Size of final convolution kernel. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. mask_height: Desired output mask height. The default value is 7. mask_width: Desired output mask width. The default value is 7. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. """ super(WeightSharedConvolutionalMaskHead, self).__init__(name=name) self._num_classes = num_classes self._num_predictions_per_location = num_predictions_per_location self._kernel_size = kernel_size self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._mask_height = mask_height self._mask_width = mask_width self._masks_are_class_agnostic = masks_are_class_agnostic self._mask_predictor_layers = [] if self._masks_are_class_agnostic: self._num_masks = 1 else: self._num_masks = self._num_classes num_mask_channels = self._num_masks * self._mask_height * self._mask_width if self._use_dropout: self._mask_predictor_layers.append( tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * num_mask_channels, [self._kernel_size, self._kernel_size], padding='SAME', name='MaskPredictor', **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: mask_predictions: A tensor of shape [batch_size, num_anchors, num_classes, mask_height, mask_width] representing the mask predictions for the proposals. """ mask_predictions = features for layer in self._mask_predictor_layers: mask_predictions = layer(mask_predictions) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] mask_predictions = tf.reshape( mask_predictions, [batch_size, -1, self._num_masks, self._mask_height, self._mask_width]) return mask_predictions
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/keras_mask_head.py
keras_mask_head.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.class_head.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import class_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class MaskRCNNClassHeadTest(test_case.TestCase): def _build_arg_scope_with_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): class_prediction_head = class_head.MaskRCNNClassHead( is_training=False, num_class_slots=20, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=True, dropout_keep_prob=0.5) roi_pooled_features = tf.random_uniform( [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) prediction = class_prediction_head.predict( features=roi_pooled_features, num_predictions_per_location=1) self.assertAllEqual([64, 1, 20], prediction.get_shape().as_list()) def test_scope_name(self): expected_var_names = set([ """ClassPredictor/weights""", """ClassPredictor/biases""" ]) g = tf.Graph() with g.as_default(): class_prediction_head = class_head.MaskRCNNClassHead( is_training=True, num_class_slots=20, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=True, dropout_keep_prob=0.5) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_prediction_head.predict( features=image_feature, num_predictions_per_location=1) actual_variable_set = set([ var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) ]) self.assertSetEqual(expected_var_names, actual_variable_set) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class ConvolutionalClassPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): class_prediction_head = class_head.ConvolutionalClassHead( is_training=True, num_class_slots=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_predictions = class_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list()) def test_scope_name(self): expected_var_names = set([ """ClassPredictor/weights""", """ClassPredictor/biases""" ]) g = tf.Graph() with g.as_default(): class_prediction_head = class_head.ConvolutionalClassHead( is_training=True, num_class_slots=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_prediction_head.predict( features=image_feature, num_predictions_per_location=1) actual_variable_set = set([ var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) ]) self.assertSetEqual(expected_var_names, actual_variable_set) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class WeightSharedConvolutionalClassPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): class_prediction_head = ( class_head.WeightSharedConvolutionalClassHead(num_class_slots=20)) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_predictions = class_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list()) def test_scope_name(self): expected_var_names = set([ """ClassPredictor/weights""", """ClassPredictor/biases""" ]) g = tf.Graph() with g.as_default(): class_prediction_head = class_head.WeightSharedConvolutionalClassHead( num_class_slots=20) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_prediction_head.predict( features=image_feature, num_predictions_per_location=1) actual_variable_set = set([ var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) ]) self.assertSetEqual(expected_var_names, actual_variable_set) def test_softmax_score_converter(self): num_class_slots = 10 batch_size = 2 height = 17 width = 19 num_predictions_per_location = 2 assert num_predictions_per_location != 1 def graph_fn(): class_prediction_head = ( class_head.WeightSharedConvolutionalClassHead( num_class_slots=num_class_slots, score_converter_fn=tf.nn.softmax)) image_feature = tf.random_uniform([batch_size, height, width, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_predictions = class_prediction_head.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location) return class_predictions class_predictions_out = self.execute(graph_fn, []) class_predictions_sum = np.sum(class_predictions_out, axis=-1) num_anchors = height * width * num_predictions_per_location exp_class_predictions_sum = np.ones((batch_size, num_anchors), dtype=np.float32) self.assertAllEqual((batch_size, num_anchors, num_class_slots), class_predictions_out.shape) self.assertAllClose(class_predictions_sum, exp_class_predictions_sum) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/class_head_tf1_test.py
class_head_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.box_head.""" import unittest import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import box_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class MaskRCNNBoxHeadTest(test_case.TestCase): def _build_arg_scope_with_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): box_prediction_head = box_head.MaskRCNNBoxHead( is_training=False, num_classes=20, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=True, dropout_keep_prob=0.5, box_code_size=4, share_box_across_classes=False) roi_pooled_features = tf.random_uniform( [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) prediction = box_prediction_head.predict( features=roi_pooled_features, num_predictions_per_location=1) self.assertAllEqual([64, 1, 20, 4], prediction.get_shape().as_list()) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class ConvolutionalBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): box_prediction_head = box_head.ConvolutionalBoxHead( is_training=True, box_code_size=4, kernel_size=3) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) box_encodings = box_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 1, 4], box_encodings.get_shape().as_list()) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class WeightSharedConvolutionalBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): box_prediction_head = box_head.WeightSharedConvolutionalBoxHead( box_code_size=4) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) box_encodings = box_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 4], box_encodings.get_shape().as_list()) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/box_head_tf1_test.py
box_head_tf1_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.class_head.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import keras_class_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class ConvolutionalKerasClassPredictorTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_prediction_size_depthwise_false(self): conv_hyperparams = self._build_conv_hyperparams() class_prediction_head = keras_class_head.ConvolutionalClassHead( is_training=True, num_class_slots=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, conv_hyperparams=conv_hyperparams, freeze_batchnorm=False, num_predictions_per_location=1, use_depthwise=False) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_predictions = class_prediction_head(image_feature,) return class_predictions class_predictions = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 20], class_predictions.shape) def test_prediction_size_depthwise_true(self): conv_hyperparams = self._build_conv_hyperparams() class_prediction_head = keras_class_head.ConvolutionalClassHead( is_training=True, num_class_slots=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, conv_hyperparams=conv_hyperparams, freeze_batchnorm=False, num_predictions_per_location=1, use_depthwise=True) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_predictions = class_prediction_head(image_feature,) return class_predictions class_predictions = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 20], class_predictions.shape) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class MaskRCNNClassHeadTest(test_case.TestCase): def _build_fc_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.KerasLayerHyperparams(hyperparams) def test_prediction_size(self): class_prediction_head = keras_class_head.MaskRCNNClassHead( is_training=False, num_class_slots=20, fc_hyperparams=self._build_fc_hyperparams(), freeze_batchnorm=False, use_dropout=True, dropout_keep_prob=0.5) def graph_fn(): roi_pooled_features = tf.random_uniform( [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) prediction = class_prediction_head(roi_pooled_features) return prediction prediction = self.execute(graph_fn, []) self.assertAllEqual([64, 1, 20], prediction.shape) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class WeightSharedConvolutionalKerasClassPredictorTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_prediction_size_depthwise_false(self): conv_hyperparams = self._build_conv_hyperparams() class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( num_class_slots=20, conv_hyperparams=conv_hyperparams, num_predictions_per_location=1, use_depthwise=False) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_predictions = class_prediction_head(image_feature) return class_predictions class_predictions = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 20], class_predictions.shape) def test_prediction_size_depthwise_true(self): conv_hyperparams = self._build_conv_hyperparams() class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( num_class_slots=20, conv_hyperparams=conv_hyperparams, num_predictions_per_location=1, use_depthwise=True) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_predictions = class_prediction_head(image_feature) return class_predictions class_predictions = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 20], class_predictions.shape) def test_variable_count_depth_wise_true(self): conv_hyperparams = self._build_conv_hyperparams() class_prediction_head = ( keras_class_head.WeightSharedConvolutionalClassHead( num_class_slots=20, conv_hyperparams=conv_hyperparams, num_predictions_per_location=1, use_depthwise=True)) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_prediction_head(image_feature) self.assertEqual(len(class_prediction_head.variables), 3) def test_variable_count_depth_wise_False(self): conv_hyperparams = self._build_conv_hyperparams() class_prediction_head = ( keras_class_head.WeightSharedConvolutionalClassHead( num_class_slots=20, conv_hyperparams=conv_hyperparams, num_predictions_per_location=1, use_depthwise=False)) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_prediction_head(image_feature) self.assertEqual(len(class_prediction_head.variables), 2) def test_softmax_score_converter(self): num_class_slots = 10 batch_size = 2 height = 17 width = 19 num_predictions_per_location = 2 assert num_predictions_per_location != 1 conv_hyperparams = self._build_conv_hyperparams() class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( num_class_slots=num_class_slots, conv_hyperparams=conv_hyperparams, num_predictions_per_location=num_predictions_per_location, score_converter_fn=tf.nn.softmax) def graph_fn(): image_feature = tf.random_uniform([batch_size, height, width, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_predictions = class_prediction_head(image_feature) return class_predictions class_predictions_out = self.execute(graph_fn, []) class_predictions_sum = np.sum(class_predictions_out, axis=-1) num_anchors = height * width * num_predictions_per_location exp_class_predictions_sum = np.ones((batch_size, num_anchors), dtype=np.float32) self.assertAllEqual((batch_size, num_anchors, num_class_slots), class_predictions_out.shape) self.assertAllClose(class_predictions_sum, exp_class_predictions_sum) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/keras_class_head_tf2_test.py
keras_class_head_tf2_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.mask_head.""" import unittest import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import mask_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class MaskRCNNMaskHeadTest(test_case.TestCase): def _build_arg_scope_with_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): mask_prediction_head = mask_head.MaskRCNNMaskHead( num_classes=20, conv_hyperparams_fn=self._build_arg_scope_with_hyperparams(), mask_height=14, mask_width=14, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=False) roi_pooled_features = tf.random_uniform( [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) prediction = mask_prediction_head.predict( features=roi_pooled_features, num_predictions_per_location=1) self.assertAllEqual([64, 1, 20, 14, 14], prediction.get_shape().as_list()) def test_prediction_size_with_convolve_then_upsample(self): mask_prediction_head = mask_head.MaskRCNNMaskHead( num_classes=20, conv_hyperparams_fn=self._build_arg_scope_with_hyperparams(), mask_height=28, mask_width=28, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=True, convolve_then_upsample=True) roi_pooled_features = tf.random_uniform( [64, 14, 14, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) prediction = mask_prediction_head.predict( features=roi_pooled_features, num_predictions_per_location=1) self.assertAllEqual([64, 1, 1, 28, 28], prediction.get_shape().as_list()) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class ConvolutionalMaskPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): mask_prediction_head = mask_head.ConvolutionalMaskHead( is_training=True, num_classes=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, mask_height=7, mask_width=7) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.get_shape().as_list()) def test_class_agnostic_prediction_size(self): mask_prediction_head = mask_head.ConvolutionalMaskHead( is_training=True, num_classes=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, mask_height=7, mask_width=7, masks_are_class_agnostic=True) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.get_shape().as_list()) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class WeightSharedConvolutionalMaskPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): mask_prediction_head = ( mask_head.WeightSharedConvolutionalMaskHead( num_classes=20, mask_height=7, mask_width=7)) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.get_shape().as_list()) def test_class_agnostic_prediction_size(self): mask_prediction_head = ( mask_head.WeightSharedConvolutionalMaskHead( num_classes=20, mask_height=7, mask_width=7, masks_are_class_agnostic=True)) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.get_shape().as_list()) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/mask_head_tf1_test.py
mask_head_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Box Head. Contains Box prediction head classes for different meta architectures. All the box prediction heads have a predict function that receives the `features` as the first argument and returns `box_encodings`. """ import functools import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.predictors.heads import head class MaskRCNNBoxHead(head.Head): """Box prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_classes, fc_hyperparams_fn, use_dropout, dropout_keep_prob, box_code_size, share_box_across_classes=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). fc_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for fully connected ops. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. box_code_size: Size of encoding for each box. share_box_across_classes: Whether to share boxes across classes rather than use a different box for each class. """ super(MaskRCNNBoxHead, self).__init__() self._is_training = is_training self._num_classes = num_classes self._fc_hyperparams_fn = fc_hyperparams_fn self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._box_code_size = box_code_size self._share_box_across_classes = share_box_across_classes def predict(self, features, num_predictions_per_location=1): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: box_encodings: A float tensor of shape [batch_size, 1, num_classes, code_size] representing the location of the objects. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') spatial_averaged_roi_pooled_features = tf.reduce_mean( features, [1, 2], keep_dims=True, name='AvgPool') flattened_roi_pooled_features = slim.flatten( spatial_averaged_roi_pooled_features) if self._use_dropout: flattened_roi_pooled_features = slim.dropout( flattened_roi_pooled_features, keep_prob=self._dropout_keep_prob, is_training=self._is_training) number_of_boxes = 1 if not self._share_box_across_classes: number_of_boxes = self._num_classes with slim.arg_scope(self._fc_hyperparams_fn()): box_encodings = slim.fully_connected( flattened_roi_pooled_features, number_of_boxes * self._box_code_size, reuse=tf.AUTO_REUSE, activation_fn=None, scope='BoxEncodingPredictor') box_encodings = tf.reshape(box_encodings, [-1, 1, number_of_boxes, self._box_code_size]) return box_encodings class ConvolutionalBoxHead(head.Head): """Convolutional box prediction head.""" def __init__(self, is_training, box_code_size, kernel_size, use_depthwise=False, box_encodings_clip_range=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. box_code_size: Size of encoding for each box. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. box_encodings_clip_range: Min and max values for clipping box_encodings. Raises: ValueError: if min_depth > max_depth. ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(ConvolutionalBoxHead, self).__init__() self._is_training = is_training self._box_code_size = box_code_size self._kernel_size = kernel_size self._use_depthwise = use_depthwise self._box_encodings_clip_range = box_encodings_clip_range def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. Returns: box_encodings: A float tensors of shape [batch_size, num_anchors, q, code_size] representing the location of the objects, where q is 1 or the number of classes. """ net = features if self._use_depthwise: box_encodings = slim.separable_conv2d( net, None, [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, stride=1, rate=1, scope='BoxEncodingPredictor_depthwise') box_encodings = slim.conv2d( box_encodings, num_predictions_per_location * self._box_code_size, [1, 1], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='BoxEncodingPredictor') else: box_encodings = slim.conv2d( net, num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='BoxEncodingPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] # Clipping the box encodings to make the inference graph TPU friendly. if self._box_encodings_clip_range is not None: box_encodings = tf.clip_by_value( box_encodings, self._box_encodings_clip_range.min, self._box_encodings_clip_range.max) box_encodings = tf.reshape(box_encodings, [batch_size, -1, 1, self._box_code_size]) return box_encodings # TODO(alirezafathi): See if possible to unify Weight Shared with regular # convolutional box head. class WeightSharedConvolutionalBoxHead(head.Head): """Weight shared convolutional box prediction head. This head allows sharing the same set of parameters (weights) when called more then once on different feature maps. """ def __init__(self, box_code_size, kernel_size=3, use_depthwise=False, box_encodings_clip_range=None, return_flat_predictions=True): """Constructor. Args: box_code_size: Size of encoding for each box. kernel_size: Size of final convolution kernel. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. box_encodings_clip_range: Min and max values for clipping box_encodings. return_flat_predictions: If true, returns flattened prediction tensor of shape [batch, height * width * num_predictions_per_location, box_coder]. Otherwise returns the prediction tensor before reshaping, whose shape is [batch, height, width, num_predictions_per_location * num_class_slots]. Raises: ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(WeightSharedConvolutionalBoxHead, self).__init__() self._box_code_size = box_code_size self._kernel_size = kernel_size self._use_depthwise = use_depthwise self._box_encodings_clip_range = box_encodings_clip_range self._return_flat_predictions = return_flat_predictions def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: box_encodings: A float tensor of shape [batch_size, num_anchors, code_size] representing the location of the objects, or a float tensor of shape [batch, height, width, num_predictions_per_location * box_code_size] representing grid box location predictions if self._return_flat_predictions is False. """ box_encodings_net = features if self._use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d box_encodings = conv_op( box_encodings_net, num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], activation_fn=None, stride=1, padding='SAME', normalizer_fn=None, scope='BoxPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] # Clipping the box encodings to make the inference graph TPU friendly. if self._box_encodings_clip_range is not None: box_encodings = tf.clip_by_value( box_encodings, self._box_encodings_clip_range.min, self._box_encodings_clip_range.max) if self._return_flat_predictions: box_encodings = tf.reshape(box_encodings, [batch_size, -1, self._box_code_size]) return box_encodings
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/box_head.py
box_head.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Box Head. Contains Box prediction head classes for different meta architectures. All the box prediction heads have a _predict function that receives the `features` as the first argument and returns `box_encodings`. """ import tensorflow.compat.v1 as tf from object_detection.predictors.heads import head class ConvolutionalBoxHead(head.KerasHead): """Convolutional box prediction head.""" def __init__(self, is_training, box_code_size, kernel_size, num_predictions_per_location, conv_hyperparams, freeze_batchnorm, use_depthwise=False, box_encodings_clip_range=None, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. box_code_size: Size of encoding for each box. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. box_encodings_clip_range: Min and max values for clipping box_encodings. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(ConvolutionalBoxHead, self).__init__(name=name) self._is_training = is_training self._box_code_size = box_code_size self._kernel_size = kernel_size self._num_predictions_per_location = num_predictions_per_location self._use_depthwise = use_depthwise self._box_encodings_clip_range = box_encodings_clip_range self._box_encoder_layers = [] if self._use_depthwise: self._box_encoder_layers.append( tf.keras.layers.DepthwiseConv2D( [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, dilation_rate=1, name='BoxEncodingPredictor_depthwise', **conv_hyperparams.params())) self._box_encoder_layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name='BoxEncodingPredictor_depthwise_batchnorm')) self._box_encoder_layers.append( conv_hyperparams.build_activation_layer( name='BoxEncodingPredictor_depthwise_activation')) self._box_encoder_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._box_code_size, [1, 1], name='BoxEncodingPredictor', **conv_hyperparams.params(use_bias=True))) else: self._box_encoder_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], padding='SAME', name='BoxEncodingPredictor', **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: box_encodings: A float tensor of shape [batch_size, num_anchors, q, code_size] representing the location of the objects, where q is 1 or the number of classes. """ box_encodings = features for layer in self._box_encoder_layers: box_encodings = layer(box_encodings) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] # Clipping the box encodings to make the inference graph TPU friendly. if self._box_encodings_clip_range is not None: box_encodings = tf.clip_by_value( box_encodings, self._box_encodings_clip_range.min, self._box_encodings_clip_range.max) box_encodings = tf.reshape(box_encodings, [batch_size, -1, 1, self._box_code_size]) return box_encodings class MaskRCNNBoxHead(head.KerasHead): """Box prediction head. This is a piece of Mask RCNN which is responsible for predicting just the box encodings. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_classes, fc_hyperparams, freeze_batchnorm, use_dropout, dropout_keep_prob, box_code_size, share_box_across_classes=False, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for fully connected dense ops. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. box_code_size: Size of encoding for each box. share_box_across_classes: Whether to share boxes across classes rather than use a different box for each class. name: A string name scope to assign to the box head. If `None`, Keras will auto-generate one from the class name. """ super(MaskRCNNBoxHead, self).__init__(name=name) self._is_training = is_training self._num_classes = num_classes self._fc_hyperparams = fc_hyperparams self._freeze_batchnorm = freeze_batchnorm self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._box_code_size = box_code_size self._share_box_across_classes = share_box_across_classes self._box_encoder_layers = [tf.keras.layers.Flatten()] if self._use_dropout: self._box_encoder_layers.append( tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) self._number_of_boxes = 1 if not self._share_box_across_classes: self._number_of_boxes = self._num_classes self._box_encoder_layers.append( tf.keras.layers.Dense(self._number_of_boxes * self._box_code_size, name='BoxEncodingPredictor_dense')) self._box_encoder_layers.append( fc_hyperparams.build_batch_norm(training=(is_training and not freeze_batchnorm), name='BoxEncodingPredictor_batchnorm')) def _predict(self, features): """Predicts box encodings. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. Returns: box_encodings: A float tensor of shape [batch_size, 1, num_classes, code_size] representing the location of the objects. """ spatial_averaged_roi_pooled_features = tf.reduce_mean( features, [1, 2], keep_dims=True, name='AvgPool') net = spatial_averaged_roi_pooled_features for layer in self._box_encoder_layers: net = layer(net) box_encodings = tf.reshape(net, [-1, 1, self._number_of_boxes, self._box_code_size]) return box_encodings # TODO(b/128922690): Unify the implementations of ConvolutionalBoxHead # and WeightSharedConvolutionalBoxHead class WeightSharedConvolutionalBoxHead(head.KerasHead): """Weight shared convolutional box prediction head based on Keras. This head allows sharing the same set of parameters (weights) when called more then once on different feature maps. """ def __init__(self, box_code_size, num_predictions_per_location, conv_hyperparams, kernel_size=3, use_depthwise=False, apply_conv_hyperparams_to_heads=False, box_encodings_clip_range=None, return_flat_predictions=True, name=None): """Constructor. Args: box_code_size: Size of encoding for each box. num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. kernel_size: Size of final convolution kernel. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. apply_conv_hyperparams_to_heads: Whether to apply conv_hyperparams to depthwise seperable convolution layers in the box and class heads. By default, the conv_hyperparams are only applied to layers in the predictor tower when using depthwise separable convolutions. box_encodings_clip_range: Min and max values for clipping box_encodings. return_flat_predictions: If true, returns flattened prediction tensor of shape [batch, height * width * num_predictions_per_location, box_coder]. Otherwise returns the prediction tensor before reshaping, whose shape is [batch, height, width, num_predictions_per_location * num_class_slots]. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(WeightSharedConvolutionalBoxHead, self).__init__(name=name) self._box_code_size = box_code_size self._kernel_size = kernel_size self._num_predictions_per_location = num_predictions_per_location self._use_depthwise = use_depthwise self._apply_conv_hyperparams_to_heads = apply_conv_hyperparams_to_heads self._box_encodings_clip_range = box_encodings_clip_range self._return_flat_predictions = return_flat_predictions self._box_encoder_layers = [] if self._use_depthwise: kwargs = conv_hyperparams.params(use_bias=True) if self._apply_conv_hyperparams_to_heads: kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['pointwise_initializer'] = kwargs['kernel_initializer'] self._box_encoder_layers.append( tf.keras.layers.SeparableConv2D( num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], padding='SAME', name='BoxPredictor', **kwargs)) else: self._box_encoder_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], padding='SAME', name='BoxPredictor', **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: box_encodings: A float tensor of shape [batch_size, num_anchors, q, code_size] representing the location of the objects, where q is 1 or the number of classes. """ box_encodings = features for layer in self._box_encoder_layers: box_encodings = layer(box_encodings) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] # Clipping the box encodings to make the inference graph TPU friendly. if self._box_encodings_clip_range is not None: box_encodings = tf.clip_by_value( box_encodings, self._box_encodings_clip_range.min, self._box_encodings_clip_range.max) if self._return_flat_predictions: box_encodings = tf.reshape(box_encodings, [batch_size, -1, self._box_code_size]) return box_encodings
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/keras_box_head.py
keras_box_head.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keypoint Head. Contains Keypoint prediction head classes for different meta architectures. All the keypoint prediction heads have a predict function that receives the `features` as the first argument and returns `keypoint_predictions`. Keypoints could be used to represent the human body joint locations as in Mask RCNN paper. Or they could be used to represent different part locations of objects. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.predictors.heads import head class MaskRCNNKeypointHead(head.Head): """Mask RCNN keypoint prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, num_keypoints=17, conv_hyperparams_fn=None, keypoint_heatmap_height=56, keypoint_heatmap_width=56, keypoint_prediction_num_conv_layers=8, keypoint_prediction_conv_depth=512): """Constructor. Args: num_keypoints: (int scalar) number of keypoints. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. keypoint_heatmap_height: Desired output mask height. The default value is 14. keypoint_heatmap_width: Desired output mask width. The default value is 14. keypoint_prediction_num_conv_layers: Number of convolution layers applied to the image_features in mask prediction branch. keypoint_prediction_conv_depth: The depth for the first conv2d_transpose op applied to the image_features in the mask prediction branch. If set to 0, the depth of the convolution layers will be automatically chosen based on the number of object classes and the number of channels in the image features. """ super(MaskRCNNKeypointHead, self).__init__() self._num_keypoints = num_keypoints self._conv_hyperparams_fn = conv_hyperparams_fn self._keypoint_heatmap_height = keypoint_heatmap_height self._keypoint_heatmap_width = keypoint_heatmap_width self._keypoint_prediction_num_conv_layers = ( keypoint_prediction_num_conv_layers) self._keypoint_prediction_conv_depth = keypoint_prediction_conv_depth def predict(self, features, num_predictions_per_location=1): """Performs keypoint prediction. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: instance_masks: A float tensor of shape [batch_size, 1, num_keypoints, heatmap_height, heatmap_width]. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') with slim.arg_scope(self._conv_hyperparams_fn()): net = slim.conv2d( features, self._keypoint_prediction_conv_depth, [3, 3], scope='conv_1') for i in range(1, self._keypoint_prediction_num_conv_layers): net = slim.conv2d( net, self._keypoint_prediction_conv_depth, [3, 3], scope='conv_%d' % (i + 1)) net = slim.conv2d_transpose( net, self._num_keypoints, [2, 2], scope='deconv1') heatmaps_mask = tf.image.resize_bilinear( net, [self._keypoint_heatmap_height, self._keypoint_heatmap_width], align_corners=True, name='upsample') return tf.expand_dims( tf.transpose(heatmaps_mask, perm=[0, 3, 1, 2]), axis=1, name='KeypointPredictor')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/keypoint_head.py
keypoint_head.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.box_head.""" import unittest import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import keras_box_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class ConvolutionalKerasBoxHeadTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_prediction_size_depthwise_false(self): conv_hyperparams = self._build_conv_hyperparams() box_prediction_head = keras_box_head.ConvolutionalBoxHead( is_training=True, box_code_size=4, kernel_size=3, conv_hyperparams=conv_hyperparams, freeze_batchnorm=False, num_predictions_per_location=1, use_depthwise=False) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) box_encodings = box_prediction_head(image_feature) return box_encodings box_encodings = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 1, 4], box_encodings.shape) def test_prediction_size_depthwise_true(self): conv_hyperparams = self._build_conv_hyperparams() box_prediction_head = keras_box_head.ConvolutionalBoxHead( is_training=True, box_code_size=4, kernel_size=3, conv_hyperparams=conv_hyperparams, freeze_batchnorm=False, num_predictions_per_location=1, use_depthwise=True) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) box_encodings = box_prediction_head(image_feature) return box_encodings box_encodings = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 1, 4], box_encodings.shape) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class MaskRCNNKerasBoxHeadTest(test_case.TestCase): def _build_fc_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.KerasLayerHyperparams(hyperparams) def test_prediction_size(self): box_prediction_head = keras_box_head.MaskRCNNBoxHead( is_training=False, num_classes=20, fc_hyperparams=self._build_fc_hyperparams(), freeze_batchnorm=False, use_dropout=True, dropout_keep_prob=0.5, box_code_size=4, share_box_across_classes=False) def graph_fn(): roi_pooled_features = tf.random_uniform( [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) prediction = box_prediction_head(roi_pooled_features) return prediction prediction = self.execute(graph_fn, []) self.assertAllEqual([64, 1, 20, 4], prediction.shape) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class WeightSharedConvolutionalKerasBoxHead(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_prediction_size_depthwise_false(self): conv_hyperparams = self._build_conv_hyperparams() box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( box_code_size=4, conv_hyperparams=conv_hyperparams, num_predictions_per_location=1, use_depthwise=False) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) box_encodings = box_prediction_head(image_feature) return box_encodings box_encodings = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 4], box_encodings.shape) def test_prediction_size_depthwise_true(self): conv_hyperparams = self._build_conv_hyperparams() box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( box_code_size=4, conv_hyperparams=conv_hyperparams, num_predictions_per_location=1, use_depthwise=True) def graph_fn(): image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) box_encodings = box_prediction_head(image_feature) return box_encodings box_encodings = self.execute(graph_fn, []) self.assertAllEqual([64, 323, 4], box_encodings.shape) def test_variable_count_depth_wise_true(self): conv_hyperparams = self._build_conv_hyperparams() box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( box_code_size=4, conv_hyperparams=conv_hyperparams, num_predictions_per_location=1, use_depthwise=True) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) box_prediction_head(image_feature) self.assertEqual(len(box_prediction_head.variables), 3) def test_variable_count_depth_wise_False(self): conv_hyperparams = self._build_conv_hyperparams() box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( box_code_size=4, conv_hyperparams=conv_hyperparams, num_predictions_per_location=1, use_depthwise=False) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) box_prediction_head(image_feature) self.assertEqual(len(box_prediction_head.variables), 2) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/keras_box_head_tf2_test.py
keras_box_head_tf2_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python binary for exporting SavedModel, tailored for TPU inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from object_detection.tpu_exporters import export_saved_model_tpu_lib flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('pipeline_config_file', None, 'A pipeline_pb2.TrainEvalPipelineConfig config file.') flags.DEFINE_string( 'ckpt_path', None, 'Path to trained checkpoint, typically of the form ' 'path/to/model.ckpt') flags.DEFINE_string('export_dir', None, 'Path to export SavedModel.') flags.DEFINE_string('input_placeholder_name', 'placeholder_tensor', 'Name of input placeholder in model\'s signature_def_map.') flags.DEFINE_string( 'input_type', 'tf_example', 'Type of input node. Can be ' 'one of [`image_tensor`, `encoded_image_string_tensor`, ' '`tf_example`]') flags.DEFINE_boolean('use_bfloat16', False, 'If true, use tf.bfloat16 on TPU.') def main(argv): if len(argv) > 1: raise tf.app.UsageError('Too many command-line arguments.') export_saved_model_tpu_lib.export(FLAGS.pipeline_config_file, FLAGS.ckpt_path, FLAGS.export_dir, FLAGS.input_placeholder_name, FLAGS.input_type, FLAGS.use_bfloat16) if __name__ == '__main__': tf.app.flags.mark_flag_as_required('pipeline_config_file') tf.app.flags.mark_flag_as_required('ckpt_path') tf.app.flags.mark_flag_as_required('export_dir') tf.app.run()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/tpu_exporters/export_saved_model_tpu.py
export_saved_model_tpu.py
# Lint as: python2, python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for Utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf from object_detection.tpu_exporters import utils class UtilsTest(tf.test.TestCase): def testBfloat16ToFloat32(self): bfloat16_tensor = tf.random.uniform([2, 3], dtype=tf.bfloat16) float32_tensor = utils.bfloat16_to_float32(bfloat16_tensor) self.assertEqual(float32_tensor.dtype, tf.float32) def testOtherDtypesNotConverted(self): int32_tensor = tf.ones([2, 3], dtype=tf.int32) converted_tensor = utils.bfloat16_to_float32(int32_tensor) self.assertEqual(converted_tensor.dtype, tf.int32) def testBfloat16ToFloat32Nested(self): tensor_dict = { 'key1': tf.random.uniform([2, 3], dtype=tf.bfloat16), 'key2': [ tf.random.uniform([1, 2], dtype=tf.bfloat16) for _ in range(3) ], 'key3': tf.ones([2, 3], dtype=tf.int32), } tensor_dict = utils.bfloat16_to_float32_nested(tensor_dict) self.assertEqual(tensor_dict['key1'].dtype, tf.float32) for t in tensor_dict['key2']: self.assertEqual(t.dtype, tf.float32) self.assertEqual(tensor_dict['key3'].dtype, tf.int32) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/tpu_exporters/utils_test.py
utils_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for TPU inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf def bfloat16_to_float32(tensor): """Converts a tensor to tf.float32 only if it is tf.bfloat16.""" if tensor.dtype == tf.bfloat16: return tf.cast(tensor, dtype=tf.float32) else: return tensor def bfloat16_to_float32_nested(bfloat16_tensor_dict): """Converts bfloat16 tensors in a nested structure to float32. Other tensors not of dtype bfloat16 will be left as is. Args: bfloat16_tensor_dict: A Python dict, values being Tensor or Python list/tuple of Tensor. Returns: A Python dict with the same structure as `bfloat16_tensor_dict`, with all bfloat16 tensors converted to float32. """ float32_tensor_dict = {} for k, v in bfloat16_tensor_dict.items(): if isinstance(v, tf.Tensor): float32_tensor_dict[k] = bfloat16_to_float32(v) elif isinstance(v, (list, tuple)): float32_tensor_dict[k] = [bfloat16_to_float32(t) for t in v] return float32_tensor_dict
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/tpu_exporters/utils.py
utils.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python library for ssd model, tailored for TPU inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf # pylint: disable=g-import-not-at-top # Checking TF version, because this module relies on TPUPartitionedCall # in tensorflow.python.tpu, which is not available until TF r1.14. major, minor, _ = tf.__version__.split('.') # pylint: disable=protected-access if int(major) < 1 or (int(major == 1) and int(minor) < 14): raise RuntimeError( 'TensorFlow version >= 1.14 is required. Found ({}).'.format( tf.__version__)) # pylint: disable=protected-access from tensorflow.python.framework import function from tensorflow.python.tpu import functional as tpu_functional from tensorflow.python.tpu import tpu from tensorflow.python.tpu.bfloat16 import bfloat16_scope from tensorflow.python.tpu.ops import tpu_ops from object_detection import exporter from object_detection.builders import model_builder from object_detection.tpu_exporters import utils ANCHORS = 'anchors' BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' def get_prediction_tensor_shapes(pipeline_config): """Gets static shapes of tensors by building the graph on CPU. This function builds the graph on CPU and obtain static shapes of output tensors from TPUPartitionedCall. Shapes information are later used for setting shapes of tensors when TPU graphs are built. This is necessary because tensors coming out of TPUPartitionedCall lose their shape information, which are needed for a lot of CPU operations later. Args: pipeline_config: A TrainEvalPipelineConfig proto. Returns: A python dict of tensors' names and their shapes. """ detection_model = model_builder.build( pipeline_config.model, is_training=False) _, input_tensors = exporter.input_placeholder_fn_map['image_tensor']() inputs = tf.cast(input_tensors, dtype=tf.float32) preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs) prediction_dict = detection_model.predict(preprocessed_inputs, true_image_shapes) return { BOX_ENCODINGS: prediction_dict[BOX_ENCODINGS].shape.as_list(), CLASS_PREDICTIONS_WITH_BACKGROUND: prediction_dict[CLASS_PREDICTIONS_WITH_BACKGROUND].shape.as_list(), ANCHORS: prediction_dict[ANCHORS].shape.as_list(), } def recover_shape(preprocessed_inputs, prediction_outputs, shapes_info): """Recovers shape from TPUPartitionedCall. Args: preprocessed_inputs: 4D tensor, shaped (batch, channels, height, width) prediction_outputs: Python list of tensors, in the following order - box_encodings - 3D tensor, shaped (code_size, batch, num_anchors); class_predictions_with_background - 3D tensor, shaped (num_classes + 1, batch, num_anchors); anchors - 2D tensor, shaped (4, num_anchors) shapes_info: Python dict of tensor shapes as lists. Returns: preprocessed_inputs: 4D tensor, shaped (batch, height, width, channels) box_encodings: 3D tensor, shaped (batch, num_anchors, code_size) class_predictions_with_background: 3D tensor, shaped (batch, num_anchors, num_classes + 1) anchors: 2D tensor, shaped (num_anchors, 4) """ # Dimshuffle: (b, c, h, w) -> (b, h, w, c) preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 2, 3, 1]) box_encodings = tf.transpose(prediction_outputs[0], perm=[1, 2, 0]) # [None, None, detection_model._box_coder.code_size] box_encodings.set_shape(shapes_info[BOX_ENCODINGS]) class_predictions_with_background = tf.transpose( prediction_outputs[1], perm=[1, 2, 0]) # [None, None, num_classes + 1] class_predictions_with_background.set_shape( shapes_info[CLASS_PREDICTIONS_WITH_BACKGROUND]) anchors = tf.transpose(prediction_outputs[2], perm=[1, 0]) # [None, 4] anchors.set_shape(shapes_info[ANCHORS]) return (preprocessed_inputs, box_encodings, class_predictions_with_background, anchors) def build_graph(pipeline_config, shapes_info, input_type='encoded_image_string_tensor', use_bfloat16=False): """Builds TPU serving graph of ssd to be exported. Args: pipeline_config: A TrainEvalPipelineConfig proto. shapes_info: A python dict of tensors' names and their shapes, returned by `get_prediction_tensor_shapes()`. input_type: One of 'encoded_image_string_tensor': a 1d tensor with dtype=tf.string 'image_tensor': a 4d tensor with dtype=tf.uint8 'tf_example': a 1d tensor with dtype=tf.string use_bfloat16: If true, use tf.bfloat16 on TPU. Returns: placeholder_tensor: A placeholder tensor, type determined by `input_type`. result_tensor_dict: A python dict of tensors' names and tensors. """ detection_model = model_builder.build( pipeline_config.model, is_training=False) placeholder_tensor, input_tensors = \ exporter.input_placeholder_fn_map[input_type]() inputs = tf.cast(input_tensors, dtype=tf.float32) preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs) # Dimshuffle: (b, h, w, c) -> (b, c, h, w) # This is to avoid extra padding due to TPU memory layout: # We swap larger dimensions in and smaller dimensions out, so that small # dimensions don't get padded tens / hundreds times of its own size. # This trick is applied to other similar tensors below. preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 3, 1, 2]) if use_bfloat16: preprocessed_inputs = tf.cast(preprocessed_inputs, dtype=tf.bfloat16) def predict_tpu_subgraph(preprocessed_inputs, true_image_shapes): """Wraps over the CPU version of `predict()`. This builds a same graph as the original `predict()`, manipulates result tensors' dimensions to be memory efficient on TPU, and returns them as list of tensors. Args: preprocessed_inputs: A 4D tensor of shape (batch, channels, height, width) true_image_shapes: True image shapes tensor. Returns: A Python list of tensors: box_encodings: 3D tensor of shape (code_size, batch_size, num_anchors) class_predictions_with_background: 3D tensor, shape (num_classes + 1, batch_size, num_anchors) anchors: 2D tensor of shape (4, num_anchors) """ # Dimshuffle: (b, c, h, w) -> (b, h, w, c) preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 2, 3, 1]) if use_bfloat16: with bfloat16_scope(): prediction_dict = detection_model.predict(preprocessed_inputs, true_image_shapes) else: prediction_dict = detection_model.predict(preprocessed_inputs, true_image_shapes) # Dimshuffle: (batch, anchors, depth) -> (depth, batch, anchors) return [ tf.transpose(prediction_dict[BOX_ENCODINGS], perm=[2, 0, 1]), tf.transpose( prediction_dict[CLASS_PREDICTIONS_WITH_BACKGROUND], perm=[2, 0, 1]), tf.transpose(prediction_dict[ANCHORS], perm=[1, 0]), ] @function.Defun(capture_resource_var_by_value=False) def predict_tpu(): return tpu.rewrite(predict_tpu_subgraph, [preprocessed_inputs, true_image_shapes]) prediction_outputs = tpu_functional.TPUPartitionedCall( args=predict_tpu.captured_inputs, device_ordinal=tpu_ops.tpu_ordinal_selector(), Tout=[o.type for o in predict_tpu.definition.signature.output_arg], f=predict_tpu) (preprocessed_inputs, box_encodings, class_predictions_with_background, anchors) = recover_shape(preprocessed_inputs, prediction_outputs, shapes_info) output_tensors = { 'preprocessed_inputs': preprocessed_inputs, BOX_ENCODINGS: box_encodings, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background, ANCHORS: anchors, } if use_bfloat16: output_tensors = utils.bfloat16_to_float32_nested(output_tensors) postprocessed_tensors = detection_model.postprocess(output_tensors, true_image_shapes) result_tensor_dict = exporter.add_output_tensor_nodes(postprocessed_tensors, 'inference_op') return placeholder_tensor, result_tensor_dict
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/tpu_exporters/ssd.py
ssd.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python library for faster_rcnn model, tailored for TPU inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=protected-access import tensorflow.compat.v1 as tf # pylint: disable=g-import-not-at-top # Checking TF version, because this module relies on TPUPartitionedCall # in tensorflow.python.tpu, which is not available until TF r1.14. major, minor, _ = tf.__version__.split('.') # pylint: disable=protected-access if int(major) < 1 or (int(major == 1) and int(minor) < 14): raise RuntimeError( 'TensorFlow version >= 1.14 is required. Found ({}).'.format( tf.__version__)) from tensorflow.python.framework import function from tensorflow.python.tpu import functional as tpu_functional from tensorflow.python.tpu import tpu from tensorflow.python.tpu.bfloat16 import bfloat16_scope from tensorflow.python.tpu.ops import tpu_ops from object_detection import exporter from object_detection.builders import model_builder from object_detection.tpu_exporters import utils ANCHORS = 'anchors' BOX_CLASSIFIER_FEATURES = 'box_classifier_features' BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' IMAGE_SHAPE = 'image_shape' NUM_PROPOSALS = 'num_proposals' PROPOSAL_BOXES = 'proposal_boxes' PROPOSAL_BOXES_NORMALIZED = 'proposal_boxes_normalized' REFINED_BOX_ENCODINGS = 'refined_box_encodings' RPN_BOX_ENCODINGS = 'rpn_box_encodings' RPN_BOX_PREDICTOR_FEATURES = 'rpn_box_predictor_features' RPN_FEATURES_TO_CROP = 'rpn_features_to_crop' RPN_OBJECTNESS_PREDICTIONS_WITH_BACKGROUND = \ 'rpn_objectness_predictions_with_background' INPUT_BUILDER_UTIL_MAP = { 'model_build': model_builder.build, } def modify_config(pipeline_config): """Modifies pipeline config to build the correct graph for TPU.""" # faster_rcnn.use_static_shapes and faster_rcnn.use_static_shapes_for_eval # are set to True in order for detection_model.use_static_shapes to be True. # We need to set this so that clip_to_window in _predict_first_stage # can work on TPU. However as a side-effect, the flag forces the use of # padded version of NMS. pipeline_config.model.faster_rcnn.use_static_shapes = True pipeline_config.model.faster_rcnn.use_static_shapes_for_eval = True pipeline_config.model.faster_rcnn.use_matmul_crop_and_resize = True pipeline_config.model.faster_rcnn.clip_anchors_to_image = True return pipeline_config def get_prediction_tensor_shapes(pipeline_config): """Gets static shapes of tensors by building the graph on CPU. This function builds the graph on CPU and obtain static shapes of output tensors from TPUPartitionedCall. Shapes information are later used for setting shapes of tensors when TPU graphs are built. This is necessary because tensors coming out of TPUPartitionedCall lose their shape information, which are needed for a lot of CPU operations later. Args: pipeline_config: A TrainEvalPipelineConfig proto. Returns: A python dict of tensors' names and their shapes. """ pipeline_config = modify_config(pipeline_config) detection_model = INPUT_BUILDER_UTIL_MAP['model_build']( pipeline_config.model, is_training=False) _, input_tensors = exporter.input_placeholder_fn_map['image_tensor']() inputs = tf.cast(input_tensors, dtype=tf.float32) preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs) prediction_dict = detection_model.predict(preprocessed_inputs, true_image_shapes) shapes_info = {} for k, v in prediction_dict.items(): if isinstance(v, list): shapes_info[k] = [item.shape.as_list() for item in v] else: shapes_info[k] = v.shape.as_list() return shapes_info def build_graph(pipeline_config, shapes_info, input_type='encoded_image_string_tensor', use_bfloat16=True): """Builds serving graph of faster_rcnn to be exported. Args: pipeline_config: A TrainEvalPipelineConfig proto. shapes_info: A python dict of tensors' names and their shapes, returned by `get_prediction_tensor_shapes()`. input_type: One of 'encoded_image_string_tensor': a 1d tensor with dtype=tf.string 'image_tensor': a 4d tensor with dtype=tf.uint8 'tf_example': a 1d tensor with dtype=tf.string use_bfloat16: If true, use tf.bfloat16 on TPU. Returns: placeholder_tensor: A placeholder tensor, type determined by `input_type`. result_tensor_dict: A python dict of tensors' names and tensors. """ pipeline_config = modify_config(pipeline_config) detection_model = INPUT_BUILDER_UTIL_MAP['model_build']( pipeline_config.model, is_training=False) placeholder_tensor, input_tensors = \ exporter.input_placeholder_fn_map[input_type]() # CPU pre-processing inputs = tf.cast(input_tensors, dtype=tf.float32) preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs) # Dimshuffle: [b, h, w, c] -> [b, c, h, w] preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 3, 1, 2]) if use_bfloat16: preprocessed_inputs = tf.cast(preprocessed_inputs, dtype=tf.bfloat16) # TPU feature extraction def tpu_subgraph_predict_fn(preprocessed_inputs, true_image_shapes): """Defines the first part of graph on TPU.""" # [b, c, h, w] -> [b, h, w, c] preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 2, 3, 1]) prediction_dict = detection_model.predict(preprocessed_inputs, true_image_shapes) return ( # [batch, anchor, depth] -> [depth, batch, anchor] tf.transpose(prediction_dict[RPN_BOX_ENCODINGS], perm=[2, 0, 1]), # [batch, anchor, depth] -> [depth, batch, anchor] tf.transpose( prediction_dict[RPN_OBJECTNESS_PREDICTIONS_WITH_BACKGROUND], perm=[2, 0, 1]), # [anchors, depth] tf.transpose(prediction_dict[ANCHORS], perm=[1, 0]), # [num_proposals, num_classes, code_size] prediction_dict[REFINED_BOX_ENCODINGS], prediction_dict[CLASS_PREDICTIONS_WITH_BACKGROUND], prediction_dict[NUM_PROPOSALS], prediction_dict[PROPOSAL_BOXES]) @function.Defun(capture_resource_var_by_value=False) def tpu_subgraph_predict(): if use_bfloat16: with bfloat16_scope(): return tpu.rewrite(tpu_subgraph_predict_fn, [preprocessed_inputs, true_image_shapes]) else: return tpu.rewrite(tpu_subgraph_predict_fn, [preprocessed_inputs, true_image_shapes]) (rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, refined_box_encodings, class_predictions_with_background, num_proposals, proposal_boxes) = tpu_functional.TPUPartitionedCall( args=tpu_subgraph_predict.captured_inputs, device_ordinal=tpu_ops.tpu_ordinal_selector(), Tout=[ o.type for o in tpu_subgraph_predict.definition.signature.output_arg ], f=tpu_subgraph_predict) prediction_dict = { RPN_BOX_ENCODINGS: tf.transpose(rpn_box_encodings, perm=[1, 2, 0]), RPN_OBJECTNESS_PREDICTIONS_WITH_BACKGROUND: tf.transpose( rpn_objectness_predictions_with_background, perm=[1, 2, 0]), ANCHORS: tf.transpose(anchors, perm=[1, 0]), REFINED_BOX_ENCODINGS: refined_box_encodings, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background, NUM_PROPOSALS: num_proposals, PROPOSAL_BOXES: proposal_boxes } for k in prediction_dict: if isinstance(prediction_dict[k], list): prediction_dict[k] = [ prediction_dict[k][idx].set_shape(shapes_info[k][idx]) for idx in len(prediction_dict[k])] else: prediction_dict[k].set_shape(shapes_info[k]) if use_bfloat16: prediction_dict = utils.bfloat16_to_float32_nested(prediction_dict) # CPU post-processing (NMS) postprocessed_tensors = detection_model.postprocess(prediction_dict, true_image_shapes) result_tensor_dict = exporter.add_output_tensor_nodes(postprocessed_tensors, 'inference_op') return placeholder_tensor, result_tensor_dict
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/tpu_exporters/faster_rcnn.py
faster_rcnn.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python library for exporting SavedModel, tailored for TPU inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from google.protobuf import text_format # pylint: disable=g-direct-tensorflow-import from tensorflow.python.saved_model import loader from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import tag_constants from tensorflow.python.tpu import tpu # pylint: enable=g-direct-tensorflow-import from object_detection.protos import pipeline_pb2 from object_detection.tpu_exporters import faster_rcnn from object_detection.tpu_exporters import ssd model_map = { 'faster_rcnn': faster_rcnn, 'ssd': ssd, } def parse_pipeline_config(pipeline_config_file): """Returns pipeline config and meta architecture name.""" with tf.gfile.GFile(pipeline_config_file, 'r') as config_file: config_str = config_file.read() pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() text_format.Merge(config_str, pipeline_config) meta_arch = pipeline_config.model.WhichOneof('model') return pipeline_config, meta_arch def export(pipeline_config_file, ckpt_path, export_dir, input_placeholder_name='placeholder_tensor', input_type='encoded_image_string_tensor', use_bfloat16=False): """Exports as SavedModel. Args: pipeline_config_file: Pipeline config file name. ckpt_path: Training checkpoint path. export_dir: Directory to export SavedModel. input_placeholder_name: input placeholder's name in SavedModel signature. input_type: One of 'encoded_image_string_tensor': a 1d tensor with dtype=tf.string 'image_tensor': a 4d tensor with dtype=tf.uint8 'tf_example': a 1d tensor with dtype=tf.string use_bfloat16: If true, use tf.bfloat16 on TPU. """ pipeline_config, meta_arch = parse_pipeline_config(pipeline_config_file) shapes_info = model_map[meta_arch].get_prediction_tensor_shapes( pipeline_config) with tf.Graph().as_default(), tf.Session() as sess: placeholder_tensor, result_tensor_dict = model_map[meta_arch].build_graph( pipeline_config, shapes_info, input_type, use_bfloat16) saver = tf.train.Saver() init_op = tf.global_variables_initializer() sess.run(init_op) if ckpt_path is not None: saver.restore(sess, ckpt_path) # export saved model builder = tf.saved_model.builder.SavedModelBuilder(export_dir) tensor_info_inputs = { input_placeholder_name: tf.saved_model.utils.build_tensor_info(placeholder_tensor) } tensor_info_outputs = { k: tf.saved_model.utils.build_tensor_info(v) for k, v in result_tensor_dict.items() } detection_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs=tensor_info_inputs, outputs=tensor_info_outputs, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)) tf.logging.info('Inputs:\n{}\nOutputs:{}\nPredict method name:{}'.format( tensor_info_inputs, tensor_info_outputs, tf.saved_model.signature_constants.PREDICT_METHOD_NAME)) # Graph for TPU. builder.add_meta_graph_and_variables( sess, [ tf.saved_model.tag_constants.SERVING, tf.saved_model.tag_constants.TPU ], signature_def_map={ tf.saved_model.signature_constants .DEFAULT_SERVING_SIGNATURE_DEF_KEY: detection_signature, }, strip_default_attrs=True) # Graph for CPU, this is for passing infra validation. builder.add_meta_graph( [tf.saved_model.tag_constants.SERVING], signature_def_map={ tf.saved_model.signature_constants .DEFAULT_SERVING_SIGNATURE_DEF_KEY: detection_signature, }, strip_default_attrs=True) builder.save(as_text=False) tf.logging.info('Model saved to {}'.format(export_dir)) def run_inference(inputs, pipeline_config_file, ckpt_path, input_type='encoded_image_string_tensor', use_bfloat16=False, repeat=1): """Runs inference on TPU. Args: inputs: Input image with the same type as `input_type` pipeline_config_file: Pipeline config file name. ckpt_path: Training checkpoint path. input_type: One of 'encoded_image_string_tensor': a 1d tensor with dtype=tf.string 'image_tensor': a 4d tensor with dtype=tf.uint8 'tf_example': a 1d tensor with dtype=tf.string use_bfloat16: If true, use tf.bfloat16 on TPU. repeat: Number of times to repeat running the provided input for profiling. Returns: A dict of resulting tensors. """ pipeline_config, meta_arch = parse_pipeline_config(pipeline_config_file) shapes_info = model_map[meta_arch].get_prediction_tensor_shapes( pipeline_config) with tf.Graph().as_default(), tf.Session() as sess: placeholder_tensor, result_tensor_dict = model_map[meta_arch].build_graph( pipeline_config, shapes_info, input_type, use_bfloat16) saver = tf.train.Saver() init_op = tf.global_variables_initializer() sess.run(tpu.initialize_system()) sess.run(init_op) if ckpt_path is not None: saver.restore(sess, ckpt_path) for _ in range(repeat): tensor_dict_out = sess.run( result_tensor_dict, feed_dict={placeholder_tensor: [inputs]}) sess.run(tpu.shutdown_system()) return tensor_dict_out def run_inference_from_saved_model(inputs, saved_model_dir, input_placeholder_name='placeholder_tensor', repeat=1): """Loads saved model and run inference on TPU. Args: inputs: Input image with the same type as `input_type` saved_model_dir: The directory SavedModel being exported to. input_placeholder_name: input placeholder's name in SavedModel signature. repeat: Number of times to repeat running the provided input for profiling. Returns: A dict of resulting tensors. """ with tf.Graph().as_default(), tf.Session() as sess: meta_graph = loader.load(sess, [tag_constants.SERVING, tag_constants.TPU], saved_model_dir) sess.run(tpu.initialize_system()) key_prediction = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY tensor_name_input = ( meta_graph.signature_def[key_prediction].inputs[input_placeholder_name] .name) tensor_name_output = { k: v.name for k, v in (meta_graph.signature_def[key_prediction].outputs.items()) } for _ in range(repeat): tensor_dict_out = sess.run( tensor_name_output, feed_dict={tensor_name_input: [inputs]}) sess.run(tpu.shutdown_system()) return tensor_dict_out
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/tpu_exporters/export_saved_model_tpu_lib.py
export_saved_model_tpu_lib.py
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/tpu_exporters/__init__.py
__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for object detection's TPU exporter.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import unittest from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf from object_detection.tpu_exporters import export_saved_model_tpu_lib from object_detection.utils import tf_version flags = tf.app.flags FLAGS = flags.FLAGS def get_path(path_suffix): return os.path.join(tf.resource_loader.get_data_files_path(), 'testdata', path_suffix) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class ExportSavedModelTPUTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters( ('ssd', get_path('ssd/ssd_pipeline.config'), 'image_tensor', True, 20), ('faster_rcnn', get_path('faster_rcnn/faster_rcnn_resnet101_atrous_coco.config'), 'image_tensor', True, 20)) def testExportAndLoad(self, pipeline_config_file, input_type='image_tensor', use_bfloat16=False, repeat=1): input_placeholder_name = 'placeholder_tensor' export_dir = os.path.join(FLAGS.test_tmpdir, 'tpu_saved_model') if tf.gfile.Exists(export_dir): tf.gfile.DeleteRecursively(export_dir) ckpt_path = None export_saved_model_tpu_lib.export(pipeline_config_file, ckpt_path, export_dir, input_placeholder_name, input_type, use_bfloat16) inputs = np.random.rand(256, 256, 3) tensor_dict_out = export_saved_model_tpu_lib.run_inference_from_saved_model( inputs, export_dir, input_placeholder_name, repeat) for k, v in tensor_dict_out.items(): tf.logging.info('{}: {}'.format(k, v)) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/tpu_exporters/export_saved_model_tpu_lib_tf1_test.py
export_saved_model_tpu_lib_tf1_test.py
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/tpu_exporters/testdata/__init__.py
__init__.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.models.model_builder.""" from absl.testing import parameterized from google.protobuf import text_format from object_detection.builders import model_builder from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import rfcn_meta_arch from object_detection.meta_architectures import ssd_meta_arch from object_detection.protos import hyperparams_pb2 from object_detection.protos import losses_pb2 from object_detection.protos import model_pb2 from object_detection.utils import test_case class ModelBuilderTest(test_case.TestCase, parameterized.TestCase): def default_ssd_feature_extractor(self): raise NotImplementedError def default_faster_rcnn_feature_extractor(self): raise NotImplementedError def ssd_feature_extractors(self): raise NotImplementedError def get_override_base_feature_extractor_hyperparams(self, extractor_type): raise NotImplementedError def faster_rcnn_feature_extractors(self): raise NotImplementedError def create_model(self, model_config, is_training=True): """Builds a DetectionModel based on the model config. Args: model_config: A model.proto object containing the config for the desired DetectionModel. is_training: True if this model is being built for training purposes. Returns: DetectionModel based on the config. """ return model_builder.build(model_config, is_training=is_training) def create_default_ssd_model_proto(self): """Creates a DetectionModel proto with ssd model fields populated.""" model_text_proto = """ ssd { feature_extractor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } box_coder { faster_rcnn_box_coder { } } matcher { argmax_matcher { } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { aspect_ratios: 1.0 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } loss { classification_loss { weighted_softmax { } } localization_loss { weighted_smooth_l1 { } } } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model_proto.ssd.feature_extractor.type = (self. default_ssd_feature_extractor()) return model_proto def create_default_faster_rcnn_model_proto(self): """Creates a DetectionModel proto with FasterRCNN model fields populated.""" model_text_proto = """ faster_rcnn { inplace_batchnorm_update: false num_classes: 3 image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 16 width_stride: 16 } } first_stage_box_predictor_conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } initial_crop_size: 14 maxpool_kernel_size: 2 maxpool_stride: 2 second_stage_box_predictor { mask_rcnn_box_predictor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } fc_hyperparams { op: FC regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.01 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } score_converter: SOFTMAX } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) (model_proto.faster_rcnn.feature_extractor.type ) = self.default_faster_rcnn_feature_extractor() return model_proto def test_create_ssd_models_from_config(self): model_proto = self.create_default_ssd_model_proto() for extractor_type, extractor_class in self.ssd_feature_extractors().items( ): model_proto.ssd.feature_extractor.type = extractor_type model_proto.ssd.feature_extractor.override_base_feature_extractor_hyperparams = ( self.get_override_base_feature_extractor_hyperparams(extractor_type)) model = model_builder.build(model_proto, is_training=True) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, extractor_class) def test_create_ssd_fpn_model_from_config(self): model_proto = self.create_default_ssd_model_proto() model_proto.ssd.feature_extractor.fpn.min_level = 3 model_proto.ssd.feature_extractor.fpn.max_level = 7 model = model_builder.build(model_proto, is_training=True) self.assertEqual(model._feature_extractor._fpn_min_level, 3) self.assertEqual(model._feature_extractor._fpn_max_level, 7) @parameterized.named_parameters( { 'testcase_name': 'mask_rcnn_with_matmul', 'use_matmul_crop_and_resize': False, 'enable_mask_prediction': True }, { 'testcase_name': 'mask_rcnn_without_matmul', 'use_matmul_crop_and_resize': True, 'enable_mask_prediction': True }, { 'testcase_name': 'faster_rcnn_with_matmul', 'use_matmul_crop_and_resize': False, 'enable_mask_prediction': False }, { 'testcase_name': 'faster_rcnn_without_matmul', 'use_matmul_crop_and_resize': True, 'enable_mask_prediction': False }, ) def test_create_faster_rcnn_models_from_config(self, use_matmul_crop_and_resize, enable_mask_prediction): model_proto = self.create_default_faster_rcnn_model_proto() faster_rcnn_config = model_proto.faster_rcnn faster_rcnn_config.use_matmul_crop_and_resize = use_matmul_crop_and_resize if enable_mask_prediction: faster_rcnn_config.second_stage_mask_prediction_loss_weight = 3.0 mask_predictor_config = ( faster_rcnn_config.second_stage_box_predictor.mask_rcnn_box_predictor) mask_predictor_config.predict_instance_masks = True for extractor_type, extractor_class in ( self.faster_rcnn_feature_extractors().items()): faster_rcnn_config.feature_extractor.type = extractor_type model = model_builder.build(model_proto, is_training=True) self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch) self.assertIsInstance(model._feature_extractor, extractor_class) if enable_mask_prediction: self.assertAlmostEqual(model._second_stage_mask_loss_weight, 3.0) def test_create_faster_rcnn_model_from_config_with_example_miner(self): model_proto = self.create_default_faster_rcnn_model_proto() model_proto.faster_rcnn.hard_example_miner.num_hard_examples = 64 model = model_builder.build(model_proto, is_training=True) self.assertIsNotNone(model._hard_example_miner) def test_create_rfcn_model_from_config(self): model_proto = self.create_default_faster_rcnn_model_proto() rfcn_predictor_config = ( model_proto.faster_rcnn.second_stage_box_predictor.rfcn_box_predictor) rfcn_predictor_config.conv_hyperparams.op = hyperparams_pb2.Hyperparams.CONV for extractor_type, extractor_class in ( self.faster_rcnn_feature_extractors().items()): model_proto.faster_rcnn.feature_extractor.type = extractor_type model = model_builder.build(model_proto, is_training=True) self.assertIsInstance(model, rfcn_meta_arch.RFCNMetaArch) self.assertIsInstance(model._feature_extractor, extractor_class) @parameterized.parameters(True, False) def test_create_faster_rcnn_from_config_with_crop_feature( self, output_final_box_features): model_proto = self.create_default_faster_rcnn_model_proto() model_proto.faster_rcnn.output_final_box_features = ( output_final_box_features) _ = model_builder.build(model_proto, is_training=True) def test_invalid_model_config_proto(self): model_proto = '' with self.assertRaisesRegex( ValueError, 'model_config not of type model_pb2.DetectionModel.'): model_builder.build(model_proto, is_training=True) def test_unknown_meta_architecture(self): model_proto = model_pb2.DetectionModel() with self.assertRaisesRegex(ValueError, 'Unknown meta architecture'): model_builder.build(model_proto, is_training=True) def test_unknown_ssd_feature_extractor(self): model_proto = self.create_default_ssd_model_proto() model_proto.ssd.feature_extractor.type = 'unknown_feature_extractor' with self.assertRaises(ValueError): model_builder.build(model_proto, is_training=True) def test_unknown_faster_rcnn_feature_extractor(self): model_proto = self.create_default_faster_rcnn_model_proto() model_proto.faster_rcnn.feature_extractor.type = 'unknown_feature_extractor' with self.assertRaises(ValueError): model_builder.build(model_proto, is_training=True) def test_invalid_first_stage_nms_iou_threshold(self): model_proto = self.create_default_faster_rcnn_model_proto() model_proto.faster_rcnn.first_stage_nms_iou_threshold = 1.1 with self.assertRaisesRegex(ValueError, r'iou_threshold not in \[0, 1\.0\]'): model_builder.build(model_proto, is_training=True) model_proto.faster_rcnn.first_stage_nms_iou_threshold = -0.1 with self.assertRaisesRegex(ValueError, r'iou_threshold not in \[0, 1\.0\]'): model_builder.build(model_proto, is_training=True) def test_invalid_second_stage_batch_size(self): model_proto = self.create_default_faster_rcnn_model_proto() model_proto.faster_rcnn.first_stage_max_proposals = 1 model_proto.faster_rcnn.second_stage_batch_size = 2 with self.assertRaisesRegex( ValueError, 'second_stage_batch_size should be no greater ' 'than first_stage_max_proposals.'): model_builder.build(model_proto, is_training=True) def test_invalid_faster_rcnn_batchnorm_update(self): model_proto = self.create_default_faster_rcnn_model_proto() model_proto.faster_rcnn.inplace_batchnorm_update = True with self.assertRaisesRegex(ValueError, 'inplace batchnorm updates not supported'): model_builder.build(model_proto, is_training=True) def test_create_experimental_model(self): model_text_proto = """ experimental_model { name: 'model42' }""" build_func = lambda *args: 42 model_builder.EXPERIMENTAL_META_ARCH_BUILDER_MAP['model42'] = build_func model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) self.assertEqual(model_builder.build(model_proto, is_training=True), 42)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/model_builder_test.py
model_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for box_coder_builder.""" import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.box_coders import faster_rcnn_box_coder from object_detection.box_coders import keypoint_box_coder from object_detection.box_coders import mean_stddev_box_coder from object_detection.box_coders import square_box_coder from object_detection.builders import box_coder_builder from object_detection.protos import box_coder_pb2 class BoxCoderBuilderTest(tf.test.TestCase): def test_build_faster_rcnn_box_coder_with_defaults(self): box_coder_text_proto = """ faster_rcnn_box_coder { } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertIsInstance(box_coder_object, faster_rcnn_box_coder.FasterRcnnBoxCoder) self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0]) def test_build_faster_rcnn_box_coder_with_non_default_parameters(self): box_coder_text_proto = """ faster_rcnn_box_coder { y_scale: 6.0 x_scale: 3.0 height_scale: 7.0 width_scale: 8.0 } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertIsInstance(box_coder_object, faster_rcnn_box_coder.FasterRcnnBoxCoder) self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0]) def test_build_keypoint_box_coder_with_defaults(self): box_coder_text_proto = """ keypoint_box_coder { } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder) self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0]) def test_build_keypoint_box_coder_with_non_default_parameters(self): box_coder_text_proto = """ keypoint_box_coder { num_keypoints: 6 y_scale: 6.0 x_scale: 3.0 height_scale: 7.0 width_scale: 8.0 } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder) self.assertEqual(box_coder_object._num_keypoints, 6) self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0]) def test_build_mean_stddev_box_coder(self): box_coder_text_proto = """ mean_stddev_box_coder { } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertTrue( isinstance(box_coder_object, mean_stddev_box_coder.MeanStddevBoxCoder)) def test_build_square_box_coder_with_defaults(self): box_coder_text_proto = """ square_box_coder { } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertTrue( isinstance(box_coder_object, square_box_coder.SquareBoxCoder)) self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0]) def test_build_square_box_coder_with_non_default_parameters(self): box_coder_text_proto = """ square_box_coder { y_scale: 6.0 x_scale: 3.0 length_scale: 7.0 } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertTrue( isinstance(box_coder_object, square_box_coder.SquareBoxCoder)) self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0]) def test_raise_error_on_empty_box_coder(self): box_coder_text_proto = """ """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) with self.assertRaises(ValueError): box_coder_builder.build(box_coder_proto) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/box_coder_builder_test.py
box_coder_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A function to build an object detection matcher from configuration.""" from object_detection.matchers import argmax_matcher from object_detection.protos import matcher_pb2 from object_detection.utils import tf_version if tf_version.is_tf1(): from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top def build(matcher_config): """Builds a matcher object based on the matcher config. Args: matcher_config: A matcher.proto object containing the config for the desired Matcher. Returns: Matcher based on the config. Raises: ValueError: On empty matcher proto. """ if not isinstance(matcher_config, matcher_pb2.Matcher): raise ValueError('matcher_config not of type matcher_pb2.Matcher.') if matcher_config.WhichOneof('matcher_oneof') == 'argmax_matcher': matcher = matcher_config.argmax_matcher matched_threshold = unmatched_threshold = None if not matcher.ignore_thresholds: matched_threshold = matcher.matched_threshold unmatched_threshold = matcher.unmatched_threshold return argmax_matcher.ArgMaxMatcher( matched_threshold=matched_threshold, unmatched_threshold=unmatched_threshold, negatives_lower_than_unmatched=matcher.negatives_lower_than_unmatched, force_match_for_each_row=matcher.force_match_for_each_row, use_matmul_gather=matcher.use_matmul_gather) if matcher_config.WhichOneof('matcher_oneof') == 'bipartite_matcher': if tf_version.is_tf2(): raise ValueError('bipartite_matcher is not supported in TF 2.X') matcher = matcher_config.bipartite_matcher return bipartite_matcher.GreedyBipartiteMatcher(matcher.use_matmul_gather) raise ValueError('Empty matcher.')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/matcher_builder.py
matcher_builder.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions for quantized training and evaluation.""" import tensorflow.compat.v1 as tf import tf_slim as slim # pylint: disable=g-import-not-at-top try: from tensorflow.contrib import quantize as contrib_quantize except ImportError: # TF 2.0 doesn't ship with contrib. pass # pylint: enable=g-import-not-at-top def build(graph_rewriter_config, is_training): """Returns a function that modifies default graph based on options. Args: graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto. is_training: whether in training of eval mode. """ def graph_rewrite_fn(): """Function to quantize weights and activation of the default graph.""" if (graph_rewriter_config.quantization.weight_bits != 8 or graph_rewriter_config.quantization.activation_bits != 8): raise ValueError('Only 8bit quantization is supported') # Quantize the graph by inserting quantize ops for weights and activations if is_training: contrib_quantize.experimental_create_training_graph( input_graph=tf.get_default_graph(), quant_delay=graph_rewriter_config.quantization.delay ) else: contrib_quantize.experimental_create_eval_graph( input_graph=tf.get_default_graph() ) slim.summarize_collection('quant_vars') return graph_rewrite_fn
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/graph_rewriter_builder.py
graph_rewriter_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for losses_builder.""" import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import losses_builder from object_detection.core import losses from object_detection.protos import losses_pb2 from object_detection.utils import ops class LocalizationLossBuilderTest(tf.test.TestCase): def test_build_weighted_l2_localization_loss(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(localization_loss, losses.WeightedL2LocalizationLoss) def test_build_weighted_smooth_l1_localization_loss_default_delta(self): losses_text_proto = """ localization_loss { weighted_smooth_l1 { } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(localization_loss, losses.WeightedSmoothL1LocalizationLoss) self.assertAlmostEqual(localization_loss._delta, 1.0) def test_build_weighted_smooth_l1_localization_loss_non_default_delta(self): losses_text_proto = """ localization_loss { weighted_smooth_l1 { delta: 0.1 } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(localization_loss, losses.WeightedSmoothL1LocalizationLoss) self.assertAlmostEqual(localization_loss._delta, 0.1) def test_build_weighted_iou_localization_loss(self): losses_text_proto = """ localization_loss { weighted_iou { } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(localization_loss, losses.WeightedIOULocalizationLoss) def test_build_weighted_giou_localization_loss(self): losses_text_proto = """ localization_loss { weighted_giou { } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(localization_loss, losses.WeightedGIOULocalizationLoss) def test_anchorwise_output(self): losses_text_proto = """ localization_loss { weighted_smooth_l1 { } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(localization_loss, losses.WeightedSmoothL1LocalizationLoss) predictions = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]]) targets = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]]) weights = tf.constant([[1.0, 1.0]]) loss = localization_loss(predictions, targets, weights=weights) self.assertEqual(loss.shape, [1, 2]) def test_raise_error_on_empty_localization_config(self): losses_text_proto = """ classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) with self.assertRaises(ValueError): losses_builder._build_localization_loss(losses_proto) class ClassificationLossBuilderTest(tf.test.TestCase): def test_build_weighted_sigmoid_classification_loss(self): losses_text_proto = """ classification_loss { weighted_sigmoid { } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(classification_loss, losses.WeightedSigmoidClassificationLoss) def test_build_weighted_sigmoid_focal_classification_loss(self): losses_text_proto = """ classification_loss { weighted_sigmoid_focal { } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(classification_loss, losses.SigmoidFocalClassificationLoss) self.assertAlmostEqual(classification_loss._alpha, None) self.assertAlmostEqual(classification_loss._gamma, 2.0) def test_build_weighted_sigmoid_focal_loss_non_default(self): losses_text_proto = """ classification_loss { weighted_sigmoid_focal { alpha: 0.25 gamma: 3.0 } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(classification_loss, losses.SigmoidFocalClassificationLoss) self.assertAlmostEqual(classification_loss._alpha, 0.25) self.assertAlmostEqual(classification_loss._gamma, 3.0) def test_build_weighted_softmax_classification_loss(self): losses_text_proto = """ classification_loss { weighted_softmax { } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(classification_loss, losses.WeightedSoftmaxClassificationLoss) def test_build_weighted_logits_softmax_classification_loss(self): losses_text_proto = """ classification_loss { weighted_logits_softmax { } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance( classification_loss, losses.WeightedSoftmaxClassificationAgainstLogitsLoss) def test_build_weighted_softmax_classification_loss_with_logit_scale(self): losses_text_proto = """ classification_loss { weighted_softmax { logit_scale: 2.0 } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(classification_loss, losses.WeightedSoftmaxClassificationLoss) def test_build_bootstrapped_sigmoid_classification_loss(self): losses_text_proto = """ classification_loss { bootstrapped_sigmoid { alpha: 0.5 } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(classification_loss, losses.BootstrappedSigmoidClassificationLoss) def test_anchorwise_output(self): losses_text_proto = """ classification_loss { weighted_sigmoid { anchorwise_output: true } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(classification_loss, losses.WeightedSigmoidClassificationLoss) predictions = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.5, 0.5]]]) targets = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]]) weights = tf.constant([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]) loss = classification_loss(predictions, targets, weights=weights) self.assertEqual(loss.shape, [1, 2, 3]) def test_raise_error_on_empty_config(self): losses_text_proto = """ localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) with self.assertRaises(ValueError): losses_builder.build(losses_proto) def test_build_penalty_reduced_logistic_focal_loss(self): losses_text_proto = """ classification_loss { penalty_reduced_logistic_focal_loss { alpha: 2.0 beta: 4.0 } } localization_loss { l1_localization_loss { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(classification_loss, losses.PenaltyReducedLogisticFocalLoss) self.assertAlmostEqual(classification_loss._alpha, 2.0) self.assertAlmostEqual(classification_loss._beta, 4.0) def test_build_dice_loss(self): losses_text_proto = """ classification_loss { weighted_dice_classification_loss { squared_normalization: true } } localization_loss { l1_localization_loss { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(classification_loss, losses.WeightedDiceClassificationLoss) assert classification_loss._squared_normalization class HardExampleMinerBuilderTest(tf.test.TestCase): def test_do_not_build_hard_example_miner_by_default(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) self.assertEqual(hard_example_miner, None) def test_build_hard_example_miner_for_classification_loss(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } hard_example_miner { loss_type: CLASSIFICATION } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) self.assertEqual(hard_example_miner._loss_type, 'cls') def test_build_hard_example_miner_for_localization_loss(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } hard_example_miner { loss_type: LOCALIZATION } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) self.assertEqual(hard_example_miner._loss_type, 'loc') def test_build_hard_example_miner_with_non_default_values(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } hard_example_miner { num_hard_examples: 32 iou_threshold: 0.5 loss_type: LOCALIZATION max_negatives_per_positive: 10 min_negatives_per_image: 3 } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) self.assertEqual(hard_example_miner._num_hard_examples, 32) self.assertAlmostEqual(hard_example_miner._iou_threshold, 0.5) self.assertEqual(hard_example_miner._max_negatives_per_positive, 10) self.assertEqual(hard_example_miner._min_negatives_per_image, 3) class LossBuilderTest(tf.test.TestCase): def test_build_all_loss_parameters(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } hard_example_miner { } classification_weight: 0.8 localization_weight: 0.2 """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) (classification_loss, localization_loss, classification_weight, localization_weight, hard_example_miner, _, _) = losses_builder.build(losses_proto) self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) self.assertIsInstance(classification_loss, losses.WeightedSoftmaxClassificationLoss) self.assertIsInstance(localization_loss, losses.WeightedL2LocalizationLoss) self.assertAlmostEqual(classification_weight, 0.8) self.assertAlmostEqual(localization_weight, 0.2) def test_build_expected_sampling(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } hard_example_miner { } classification_weight: 0.8 localization_weight: 0.2 """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) (classification_loss, localization_loss, classification_weight, localization_weight, hard_example_miner, _, _) = losses_builder.build(losses_proto) self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) self.assertIsInstance(classification_loss, losses.WeightedSoftmaxClassificationLoss) self.assertIsInstance(localization_loss, losses.WeightedL2LocalizationLoss) self.assertAlmostEqual(classification_weight, 0.8) self.assertAlmostEqual(localization_weight, 0.2) def test_build_reweighting_unmatched_anchors(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } hard_example_miner { } classification_weight: 0.8 localization_weight: 0.2 """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) (classification_loss, localization_loss, classification_weight, localization_weight, hard_example_miner, _, _) = losses_builder.build(losses_proto) self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) self.assertIsInstance(classification_loss, losses.WeightedSoftmaxClassificationLoss) self.assertIsInstance(localization_loss, losses.WeightedL2LocalizationLoss) self.assertAlmostEqual(classification_weight, 0.8) self.assertAlmostEqual(localization_weight, 0.2) def test_raise_error_when_both_focal_loss_and_hard_example_miner(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_sigmoid_focal { } } hard_example_miner { } classification_weight: 0.8 localization_weight: 0.2 """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) with self.assertRaises(ValueError): losses_builder.build(losses_proto) class FasterRcnnClassificationLossBuilderTest(tf.test.TestCase): def test_build_sigmoid_loss(self): losses_text_proto = """ weighted_sigmoid { } """ losses_proto = losses_pb2.ClassificationLoss() text_format.Merge(losses_text_proto, losses_proto) classification_loss = losses_builder.build_faster_rcnn_classification_loss( losses_proto) self.assertIsInstance(classification_loss, losses.WeightedSigmoidClassificationLoss) def test_build_softmax_loss(self): losses_text_proto = """ weighted_softmax { } """ losses_proto = losses_pb2.ClassificationLoss() text_format.Merge(losses_text_proto, losses_proto) classification_loss = losses_builder.build_faster_rcnn_classification_loss( losses_proto) self.assertIsInstance(classification_loss, losses.WeightedSoftmaxClassificationLoss) def test_build_logits_softmax_loss(self): losses_text_proto = """ weighted_logits_softmax { } """ losses_proto = losses_pb2.ClassificationLoss() text_format.Merge(losses_text_proto, losses_proto) classification_loss = losses_builder.build_faster_rcnn_classification_loss( losses_proto) self.assertTrue( isinstance(classification_loss, losses.WeightedSoftmaxClassificationAgainstLogitsLoss)) def test_build_sigmoid_focal_loss(self): losses_text_proto = """ weighted_sigmoid_focal { } """ losses_proto = losses_pb2.ClassificationLoss() text_format.Merge(losses_text_proto, losses_proto) classification_loss = losses_builder.build_faster_rcnn_classification_loss( losses_proto) self.assertIsInstance(classification_loss, losses.SigmoidFocalClassificationLoss) def test_build_softmax_loss_by_default(self): losses_text_proto = """ """ losses_proto = losses_pb2.ClassificationLoss() text_format.Merge(losses_text_proto, losses_proto) classification_loss = losses_builder.build_faster_rcnn_classification_loss( losses_proto) self.assertIsInstance(classification_loss, losses.WeightedSoftmaxClassificationLoss) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/losses_builder_test.py
losses_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Builder for preprocessing steps.""" import tensorflow.compat.v1 as tf from object_detection.core import preprocessor from object_detection.protos import preprocessor_pb2 def _get_step_config_from_proto(preprocessor_step_config, step_name): """Returns the value of a field named step_name from proto. Args: preprocessor_step_config: A preprocessor_pb2.PreprocessingStep object. step_name: Name of the field to get value from. Returns: result_dict: a sub proto message from preprocessor_step_config which will be later converted to a dictionary. Raises: ValueError: If field does not exist in proto. """ for field, value in preprocessor_step_config.ListFields(): if field.name == step_name: return value raise ValueError('Could not get field %s from proto!' % step_name) def _get_dict_from_proto(config): """Helper function to put all proto fields into a dictionary. For many preprocessing steps, there's an trivial 1-1 mapping from proto fields to function arguments. This function automatically populates a dictionary with the arguments from the proto. Protos that CANNOT be trivially populated include: * nested messages. * steps that check if an optional field is set (ie. where None != 0). * protos that don't map 1-1 to arguments (ie. list should be reshaped). * fields requiring additional validation (ie. repeated field has n elements). Args: config: A protobuf object that does not violate the conditions above. Returns: result_dict: |config| converted into a python dictionary. """ result_dict = {} for field, value in config.ListFields(): result_dict[field.name] = value return result_dict # A map from a PreprocessingStep proto config field name to the preprocessing # function that should be used. The PreprocessingStep proto should be parsable # with _get_dict_from_proto. PREPROCESSING_FUNCTION_MAP = { 'normalize_image': preprocessor.normalize_image, 'random_pixel_value_scale': preprocessor.random_pixel_value_scale, 'random_image_scale': preprocessor.random_image_scale, 'random_rgb_to_gray': preprocessor.random_rgb_to_gray, 'random_adjust_brightness': preprocessor.random_adjust_brightness, 'random_adjust_contrast': preprocessor.random_adjust_contrast, 'random_adjust_hue': preprocessor.random_adjust_hue, 'random_adjust_saturation': preprocessor.random_adjust_saturation, 'random_distort_color': preprocessor.random_distort_color, 'random_crop_to_aspect_ratio': preprocessor.random_crop_to_aspect_ratio, 'random_black_patches': preprocessor.random_black_patches, 'random_jpeg_quality': preprocessor.random_jpeg_quality, 'random_downscale_to_target_pixels': preprocessor.random_downscale_to_target_pixels, 'random_patch_gaussian': preprocessor.random_patch_gaussian, 'rgb_to_gray': preprocessor.rgb_to_gray, 'scale_boxes_to_pixel_coordinates': (preprocessor.scale_boxes_to_pixel_coordinates), 'subtract_channel_mean': preprocessor.subtract_channel_mean, 'convert_class_logits_to_softmax': preprocessor.convert_class_logits_to_softmax, 'adjust_gamma': preprocessor.adjust_gamma, } # A map to convert from preprocessor_pb2.ResizeImage.Method enum to # tf.image.ResizeMethod. RESIZE_METHOD_MAP = { preprocessor_pb2.ResizeImage.AREA: tf.image.ResizeMethod.AREA, preprocessor_pb2.ResizeImage.BICUBIC: tf.image.ResizeMethod.BICUBIC, preprocessor_pb2.ResizeImage.BILINEAR: tf.image.ResizeMethod.BILINEAR, preprocessor_pb2.ResizeImage.NEAREST_NEIGHBOR: ( tf.image.ResizeMethod.NEAREST_NEIGHBOR), } def get_random_jitter_kwargs(proto): return { 'ratio': proto.ratio, 'jitter_mode': preprocessor_pb2.RandomJitterBoxes.JitterMode.Name(proto.jitter_mode ).lower() } def build(preprocessor_step_config): """Builds preprocessing step based on the configuration. Args: preprocessor_step_config: PreprocessingStep configuration proto. Returns: function, argmap: A callable function and an argument map to call function with. Raises: ValueError: On invalid configuration. """ step_type = preprocessor_step_config.WhichOneof('preprocessing_step') if step_type in PREPROCESSING_FUNCTION_MAP: preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type] step_config = _get_step_config_from_proto(preprocessor_step_config, step_type) function_args = _get_dict_from_proto(step_config) return (preprocessing_function, function_args) if step_type == 'random_horizontal_flip': config = preprocessor_step_config.random_horizontal_flip return (preprocessor.random_horizontal_flip, { 'keypoint_flip_permutation': tuple( config.keypoint_flip_permutation) or None, 'probability': config.probability or None, }) if step_type == 'random_vertical_flip': config = preprocessor_step_config.random_vertical_flip return (preprocessor.random_vertical_flip, { 'keypoint_flip_permutation': tuple( config.keypoint_flip_permutation) or None, 'probability': config.probability or None, }) if step_type == 'random_rotation90': config = preprocessor_step_config.random_rotation90 return (preprocessor.random_rotation90, { 'keypoint_rot_permutation': tuple( config.keypoint_rot_permutation) or None, 'probability': config.probability or None, }) if step_type == 'random_crop_image': config = preprocessor_step_config.random_crop_image return (preprocessor.random_crop_image, { 'min_object_covered': config.min_object_covered, 'aspect_ratio_range': (config.min_aspect_ratio, config.max_aspect_ratio), 'area_range': (config.min_area, config.max_area), 'overlap_thresh': config.overlap_thresh, 'clip_boxes': config.clip_boxes, 'random_coef': config.random_coef, }) if step_type == 'random_pad_image': config = preprocessor_step_config.random_pad_image min_image_size = None if (config.HasField('min_image_height') != config.HasField('min_image_width')): raise ValueError('min_image_height and min_image_width should be either ' 'both set or both unset.') if config.HasField('min_image_height'): min_image_size = (config.min_image_height, config.min_image_width) max_image_size = None if (config.HasField('max_image_height') != config.HasField('max_image_width')): raise ValueError('max_image_height and max_image_width should be either ' 'both set or both unset.') if config.HasField('max_image_height'): max_image_size = (config.max_image_height, config.max_image_width) pad_color = config.pad_color or None if pad_color: if len(pad_color) != 3: tf.logging.warn('pad_color should have 3 elements (RGB) if set!') pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) return (preprocessor.random_pad_image, { 'min_image_size': min_image_size, 'max_image_size': max_image_size, 'pad_color': pad_color, }) if step_type == 'random_absolute_pad_image': config = preprocessor_step_config.random_absolute_pad_image max_height_padding = config.max_height_padding or 1 max_width_padding = config.max_width_padding or 1 pad_color = config.pad_color or None if pad_color: if len(pad_color) != 3: tf.logging.warn('pad_color should have 3 elements (RGB) if set!') pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) return (preprocessor.random_absolute_pad_image, { 'max_height_padding': max_height_padding, 'max_width_padding': max_width_padding, 'pad_color': pad_color, }) if step_type == 'random_crop_pad_image': config = preprocessor_step_config.random_crop_pad_image min_padded_size_ratio = config.min_padded_size_ratio if min_padded_size_ratio and len(min_padded_size_ratio) != 2: raise ValueError('min_padded_size_ratio should have 2 elements if set!') max_padded_size_ratio = config.max_padded_size_ratio if max_padded_size_ratio and len(max_padded_size_ratio) != 2: raise ValueError('max_padded_size_ratio should have 2 elements if set!') pad_color = config.pad_color or None if pad_color: if len(pad_color) != 3: tf.logging.warn('pad_color should have 3 elements (RGB) if set!') pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) kwargs = { 'min_object_covered': config.min_object_covered, 'aspect_ratio_range': (config.min_aspect_ratio, config.max_aspect_ratio), 'area_range': (config.min_area, config.max_area), 'overlap_thresh': config.overlap_thresh, 'clip_boxes': config.clip_boxes, 'random_coef': config.random_coef, 'pad_color': pad_color, } if min_padded_size_ratio: kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio) if max_padded_size_ratio: kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio) return (preprocessor.random_crop_pad_image, kwargs) if step_type == 'random_resize_method': config = preprocessor_step_config.random_resize_method return (preprocessor.random_resize_method, { 'target_size': [config.target_height, config.target_width], }) if step_type == 'resize_image': config = preprocessor_step_config.resize_image method = RESIZE_METHOD_MAP[config.method] return (preprocessor.resize_image, { 'new_height': config.new_height, 'new_width': config.new_width, 'method': method }) if step_type == 'random_self_concat_image': config = preprocessor_step_config.random_self_concat_image return (preprocessor.random_self_concat_image, { 'concat_vertical_probability': config.concat_vertical_probability, 'concat_horizontal_probability': config.concat_horizontal_probability }) if step_type == 'ssd_random_crop': config = preprocessor_step_config.ssd_random_crop if config.operations: min_object_covered = [op.min_object_covered for op in config.operations] aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations] area_range = [(op.min_area, op.max_area) for op in config.operations] overlap_thresh = [op.overlap_thresh for op in config.operations] clip_boxes = [op.clip_boxes for op in config.operations] random_coef = [op.random_coef for op in config.operations] return (preprocessor.ssd_random_crop, { 'min_object_covered': min_object_covered, 'aspect_ratio_range': aspect_ratio_range, 'area_range': area_range, 'overlap_thresh': overlap_thresh, 'clip_boxes': clip_boxes, 'random_coef': random_coef, }) return (preprocessor.ssd_random_crop, {}) if step_type == 'autoaugment_image': config = preprocessor_step_config.autoaugment_image return (preprocessor.autoaugment_image, { 'policy_name': config.policy_name, }) if step_type == 'drop_label_probabilistically': config = preprocessor_step_config.drop_label_probabilistically return (preprocessor.drop_label_probabilistically, { 'dropped_label': config.label, 'drop_probability': config.drop_probability, }) if step_type == 'remap_labels': config = preprocessor_step_config.remap_labels return (preprocessor.remap_labels, { 'original_labels': config.original_labels, 'new_label': config.new_label }) if step_type == 'ssd_random_crop_pad': config = preprocessor_step_config.ssd_random_crop_pad if config.operations: min_object_covered = [op.min_object_covered for op in config.operations] aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations] area_range = [(op.min_area, op.max_area) for op in config.operations] overlap_thresh = [op.overlap_thresh for op in config.operations] clip_boxes = [op.clip_boxes for op in config.operations] random_coef = [op.random_coef for op in config.operations] min_padded_size_ratio = [tuple(op.min_padded_size_ratio) for op in config.operations] max_padded_size_ratio = [tuple(op.max_padded_size_ratio) for op in config.operations] pad_color = [(op.pad_color_r, op.pad_color_g, op.pad_color_b) for op in config.operations] return (preprocessor.ssd_random_crop_pad, { 'min_object_covered': min_object_covered, 'aspect_ratio_range': aspect_ratio_range, 'area_range': area_range, 'overlap_thresh': overlap_thresh, 'clip_boxes': clip_boxes, 'random_coef': random_coef, 'min_padded_size_ratio': min_padded_size_ratio, 'max_padded_size_ratio': max_padded_size_ratio, 'pad_color': pad_color, }) return (preprocessor.ssd_random_crop_pad, {}) if step_type == 'ssd_random_crop_fixed_aspect_ratio': config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio if config.operations: min_object_covered = [op.min_object_covered for op in config.operations] area_range = [(op.min_area, op.max_area) for op in config.operations] overlap_thresh = [op.overlap_thresh for op in config.operations] clip_boxes = [op.clip_boxes for op in config.operations] random_coef = [op.random_coef for op in config.operations] return (preprocessor.ssd_random_crop_fixed_aspect_ratio, { 'min_object_covered': min_object_covered, 'aspect_ratio': config.aspect_ratio, 'area_range': area_range, 'overlap_thresh': overlap_thresh, 'clip_boxes': clip_boxes, 'random_coef': random_coef, }) return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {}) if step_type == 'ssd_random_crop_pad_fixed_aspect_ratio': config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio kwargs = {} aspect_ratio = config.aspect_ratio if aspect_ratio: kwargs['aspect_ratio'] = aspect_ratio min_padded_size_ratio = config.min_padded_size_ratio if min_padded_size_ratio: if len(min_padded_size_ratio) != 2: raise ValueError('min_padded_size_ratio should have 2 elements if set!') kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio) max_padded_size_ratio = config.max_padded_size_ratio if max_padded_size_ratio: if len(max_padded_size_ratio) != 2: raise ValueError('max_padded_size_ratio should have 2 elements if set!') kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio) if config.operations: kwargs['min_object_covered'] = [op.min_object_covered for op in config.operations] kwargs['aspect_ratio_range'] = [(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations] kwargs['area_range'] = [(op.min_area, op.max_area) for op in config.operations] kwargs['overlap_thresh'] = [op.overlap_thresh for op in config.operations] kwargs['clip_boxes'] = [op.clip_boxes for op in config.operations] kwargs['random_coef'] = [op.random_coef for op in config.operations] return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, kwargs) if step_type == 'random_square_crop_by_scale': config = preprocessor_step_config.random_square_crop_by_scale return preprocessor.random_square_crop_by_scale, { 'scale_min': config.scale_min, 'scale_max': config.scale_max, 'max_border': config.max_border, 'num_scales': config.num_scales } if step_type == 'random_scale_crop_and_pad_to_square': config = preprocessor_step_config.random_scale_crop_and_pad_to_square return preprocessor.random_scale_crop_and_pad_to_square, { 'scale_min': config.scale_min, 'scale_max': config.scale_max, 'output_size': config.output_size, } if step_type == 'random_jitter_boxes': config = preprocessor_step_config.random_jitter_boxes kwargs = get_random_jitter_kwargs(config) return preprocessor.random_jitter_boxes, kwargs raise ValueError('Unknown preprocessing step.')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/preprocessor_builder.py
preprocessor_builder.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A function to build an object detection anchor generator from config.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import zip from object_detection.anchor_generators import flexible_grid_anchor_generator from object_detection.anchor_generators import grid_anchor_generator from object_detection.anchor_generators import multiple_grid_anchor_generator from object_detection.anchor_generators import multiscale_grid_anchor_generator from object_detection.protos import anchor_generator_pb2 def build(anchor_generator_config): """Builds an anchor generator based on the config. Args: anchor_generator_config: An anchor_generator.proto object containing the config for the desired anchor generator. Returns: Anchor generator based on the config. Raises: ValueError: On empty anchor generator proto. """ if not isinstance(anchor_generator_config, anchor_generator_pb2.AnchorGenerator): raise ValueError('anchor_generator_config not of type ' 'anchor_generator_pb2.AnchorGenerator') if anchor_generator_config.WhichOneof( 'anchor_generator_oneof') == 'grid_anchor_generator': grid_anchor_generator_config = anchor_generator_config.grid_anchor_generator return grid_anchor_generator.GridAnchorGenerator( scales=[float(scale) for scale in grid_anchor_generator_config.scales], aspect_ratios=[float(aspect_ratio) for aspect_ratio in grid_anchor_generator_config.aspect_ratios], base_anchor_size=[grid_anchor_generator_config.height, grid_anchor_generator_config.width], anchor_stride=[grid_anchor_generator_config.height_stride, grid_anchor_generator_config.width_stride], anchor_offset=[grid_anchor_generator_config.height_offset, grid_anchor_generator_config.width_offset]) elif anchor_generator_config.WhichOneof( 'anchor_generator_oneof') == 'ssd_anchor_generator': ssd_anchor_generator_config = anchor_generator_config.ssd_anchor_generator anchor_strides = None if ssd_anchor_generator_config.height_stride: anchor_strides = list( zip(ssd_anchor_generator_config.height_stride, ssd_anchor_generator_config.width_stride)) anchor_offsets = None if ssd_anchor_generator_config.height_offset: anchor_offsets = list( zip(ssd_anchor_generator_config.height_offset, ssd_anchor_generator_config.width_offset)) return multiple_grid_anchor_generator.create_ssd_anchors( num_layers=ssd_anchor_generator_config.num_layers, min_scale=ssd_anchor_generator_config.min_scale, max_scale=ssd_anchor_generator_config.max_scale, scales=[float(scale) for scale in ssd_anchor_generator_config.scales], aspect_ratios=ssd_anchor_generator_config.aspect_ratios, interpolated_scale_aspect_ratio=( ssd_anchor_generator_config.interpolated_scale_aspect_ratio), base_anchor_size=[ ssd_anchor_generator_config.base_anchor_height, ssd_anchor_generator_config.base_anchor_width ], anchor_strides=anchor_strides, anchor_offsets=anchor_offsets, reduce_boxes_in_lowest_layer=( ssd_anchor_generator_config.reduce_boxes_in_lowest_layer)) elif anchor_generator_config.WhichOneof( 'anchor_generator_oneof') == 'multiscale_anchor_generator': cfg = anchor_generator_config.multiscale_anchor_generator return multiscale_grid_anchor_generator.MultiscaleGridAnchorGenerator( cfg.min_level, cfg.max_level, cfg.anchor_scale, [float(aspect_ratio) for aspect_ratio in cfg.aspect_ratios], cfg.scales_per_octave, cfg.normalize_coordinates ) elif anchor_generator_config.WhichOneof( 'anchor_generator_oneof') == 'flexible_grid_anchor_generator': cfg = anchor_generator_config.flexible_grid_anchor_generator base_sizes = [] aspect_ratios = [] strides = [] offsets = [] for anchor_grid in cfg.anchor_grid: base_sizes.append(tuple(anchor_grid.base_sizes)) aspect_ratios.append(tuple(anchor_grid.aspect_ratios)) strides.append((anchor_grid.height_stride, anchor_grid.width_stride)) offsets.append((anchor_grid.height_offset, anchor_grid.width_offset)) return flexible_grid_anchor_generator.FlexibleGridAnchorGenerator( base_sizes, aspect_ratios, strides, offsets, cfg.normalize_coordinates) else: raise ValueError('Empty anchor generator.')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/anchor_generator_builder.py
anchor_generator_builder.py
# Lint as: python2, python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for decoder_builder.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import decoder_builder from object_detection.core import standard_fields as fields from object_detection.dataset_tools import seq_example_util from object_detection.protos import input_reader_pb2 from object_detection.utils import dataset_util from object_detection.utils import test_case def _get_labelmap_path(): """Returns an absolute path to label map file.""" parent_path = os.path.dirname(tf.resource_loader.get_data_files_path()) return os.path.join(parent_path, 'data', 'pet_label_map.pbtxt') class DecoderBuilderTest(test_case.TestCase): def _make_serialized_tf_example(self, has_additional_channels=False): image_tensor_np = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) additional_channels_tensor_np = np.random.randint( 255, size=(4, 5, 1)).astype(np.uint8) flat_mask = (4 * 5) * [1.0] def graph_fn(image_tensor): encoded_jpeg = tf.image.encode_jpeg(image_tensor) return encoded_jpeg encoded_jpeg = self.execute_cpu(graph_fn, [image_tensor_np]) encoded_additional_channels_jpeg = self.execute_cpu( graph_fn, [additional_channels_tensor_np]) features = { 'image/source_id': dataset_util.bytes_feature('0'.encode()), 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), 'image/height': dataset_util.int64_feature(4), 'image/width': dataset_util.int64_feature(5), 'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]), 'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]), 'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]), 'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]), 'image/object/class/label': dataset_util.int64_list_feature([2]), 'image/object/mask': dataset_util.float_list_feature(flat_mask), 'image/object/keypoint/x': dataset_util.float_list_feature([1.0, 1.0]), 'image/object/keypoint/y': dataset_util.float_list_feature([1.0, 1.0]) } if has_additional_channels: additional_channels_key = 'image/additional_channels/encoded' features[additional_channels_key] = dataset_util.bytes_list_feature( [encoded_additional_channels_jpeg] * 2) example = tf.train.Example(features=tf.train.Features(feature=features)) return example.SerializeToString() def _make_random_serialized_jpeg_images(self, num_frames, image_height, image_width): def graph_fn(): images = tf.cast(tf.random.uniform( [num_frames, image_height, image_width, 3], maxval=256, dtype=tf.int32), dtype=tf.uint8) images_list = tf.unstack(images, axis=0) encoded_images = [tf.io.encode_jpeg(image) for image in images_list] return encoded_images return self.execute_cpu(graph_fn, []) def _make_serialized_tf_sequence_example(self): num_frames = 4 image_height = 20 image_width = 30 image_source_ids = [str(i) for i in range(num_frames)] encoded_images = self._make_random_serialized_jpeg_images( num_frames, image_height, image_width) sequence_example_serialized = seq_example_util.make_sequence_example( dataset_name='video_dataset', video_id='video', encoded_images=encoded_images, image_height=image_height, image_width=image_width, image_source_ids=image_source_ids, image_format='JPEG', is_annotated=[[1], [1], [1], [1]], bboxes=[ [[]], # Frame 0. [[0., 0., 1., 1.]], # Frame 1. [[0., 0., 1., 1.], [0.1, 0.1, 0.2, 0.2]], # Frame 2. [[]], # Frame 3. ], label_strings=[ [], # Frame 0. ['Abyssinian'], # Frame 1. ['Abyssinian', 'american_bulldog'], # Frame 2. [], # Frame 3 ]).SerializeToString() return sequence_example_serialized def test_build_tf_record_input_reader(self): input_reader_text_proto = 'tf_record_input_reader {}' input_reader_proto = input_reader_pb2.InputReader() text_format.Parse(input_reader_text_proto, input_reader_proto) decoder = decoder_builder.build(input_reader_proto) serialized_seq_example = self._make_serialized_tf_example() def graph_fn(): tensor_dict = decoder.decode(serialized_seq_example) return (tensor_dict[fields.InputDataFields.image], tensor_dict[fields.InputDataFields.groundtruth_classes], tensor_dict[fields.InputDataFields.groundtruth_boxes]) (image, groundtruth_classes, groundtruth_boxes) = self.execute_cpu(graph_fn, []) self.assertEqual((4, 5, 3), image.shape) self.assertAllEqual([2], groundtruth_classes) self.assertEqual((1, 4), groundtruth_boxes.shape) self.assertAllEqual([0.0, 0.0, 1.0, 1.0], groundtruth_boxes[0]) def test_build_tf_record_input_reader_sequence_example(self): label_map_path = _get_labelmap_path() input_reader_text_proto = """ input_type: TF_SEQUENCE_EXAMPLE tf_record_input_reader {} """ input_reader_proto = input_reader_pb2.InputReader() input_reader_proto.label_map_path = label_map_path text_format.Parse(input_reader_text_proto, input_reader_proto) serialized_seq_example = self._make_serialized_tf_sequence_example() def graph_fn(): decoder = decoder_builder.build(input_reader_proto) tensor_dict = decoder.decode(serialized_seq_example) return (tensor_dict[fields.InputDataFields.image], tensor_dict[fields.InputDataFields.groundtruth_classes], tensor_dict[fields.InputDataFields.groundtruth_boxes], tensor_dict[fields.InputDataFields.num_groundtruth_boxes]) (actual_image, actual_groundtruth_classes, actual_groundtruth_boxes, actual_num_groundtruth_boxes) = self.execute_cpu(graph_fn, []) expected_groundtruth_classes = [[-1, -1], [1, -1], [1, 2], [-1, -1]] expected_groundtruth_boxes = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]] expected_num_groundtruth_boxes = [0, 1, 2, 0] # Sequence example images are encoded. self.assertEqual((4,), actual_image.shape) self.assertAllEqual(expected_groundtruth_classes, actual_groundtruth_classes) self.assertAllClose(expected_groundtruth_boxes, actual_groundtruth_boxes) self.assertAllClose( expected_num_groundtruth_boxes, actual_num_groundtruth_boxes) def test_build_tf_record_input_reader_and_load_instance_masks(self): input_reader_text_proto = """ load_instance_masks: true tf_record_input_reader {} """ input_reader_proto = input_reader_pb2.InputReader() text_format.Parse(input_reader_text_proto, input_reader_proto) decoder = decoder_builder.build(input_reader_proto) serialized_seq_example = self._make_serialized_tf_example() def graph_fn(): tensor_dict = decoder.decode(serialized_seq_example) return tensor_dict[fields.InputDataFields.groundtruth_instance_masks] masks = self.execute_cpu(graph_fn, []) self.assertAllEqual((1, 4, 5), masks.shape) def test_build_tf_record_input_reader_and_load_keypoint_depth(self): input_reader_text_proto = """ load_keypoint_depth_features: true num_keypoints: 2 tf_record_input_reader {} """ input_reader_proto = input_reader_pb2.InputReader() text_format.Parse(input_reader_text_proto, input_reader_proto) decoder = decoder_builder.build(input_reader_proto) serialized_example = self._make_serialized_tf_example() def graph_fn(): tensor_dict = decoder.decode(serialized_example) return (tensor_dict[fields.InputDataFields.groundtruth_keypoint_depths], tensor_dict[ fields.InputDataFields.groundtruth_keypoint_depth_weights]) (kpts_depths, kpts_depth_weights) = self.execute_cpu(graph_fn, []) self.assertAllEqual((1, 2), kpts_depths.shape) self.assertAllEqual((1, 2), kpts_depth_weights.shape) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/decoder_builder_test.py
decoder_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A function to build a DetectionModel from configuration.""" import functools import sys from absl import logging from object_detection.builders import anchor_generator_builder from object_detection.builders import box_coder_builder from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.builders import image_resizer_builder from object_detection.builders import losses_builder from object_detection.builders import matcher_builder from object_detection.builders import post_processing_builder from object_detection.builders import region_similarity_calculator_builder as sim_calc from object_detection.core import balanced_positive_negative_sampler as sampler from object_detection.core import post_processing from object_detection.core import target_assigner from object_detection.meta_architectures import center_net_meta_arch from object_detection.meta_architectures import context_rcnn_meta_arch from object_detection.meta_architectures import deepmac_meta_arch from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import rfcn_meta_arch from object_detection.meta_architectures import ssd_meta_arch from object_detection.predictors.heads import mask_head from object_detection.protos import losses_pb2 from object_detection.protos import model_pb2 from object_detection.utils import label_map_util from object_detection.utils import ops from object_detection.utils import spatial_transform_ops as spatial_ops from object_detection.utils import tf_version ## Feature Extractors for TF ## This section conditionally imports different feature extractors based on the ## Tensorflow version. ## # pylint: disable=g-import-not-at-top if tf_version.is_tf2(): from object_detection.models import center_net_hourglass_feature_extractor from object_detection.models import center_net_mobilenet_v2_feature_extractor from object_detection.models import center_net_mobilenet_v2_fpn_feature_extractor from object_detection.models import center_net_resnet_feature_extractor from object_detection.models import center_net_resnet_v1_fpn_feature_extractor from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res_keras from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_resnet_keras from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor as ssd_resnet_v1_fpn_keras from object_detection.models import faster_rcnn_resnet_v1_fpn_keras_feature_extractor as frcnn_resnet_fpn_keras from object_detection.models.ssd_mobilenet_v1_fpn_keras_feature_extractor import SSDMobileNetV1FpnKerasFeatureExtractor from object_detection.models.ssd_mobilenet_v1_keras_feature_extractor import SSDMobileNetV1KerasFeatureExtractor from object_detection.models.ssd_mobilenet_v2_fpn_keras_feature_extractor import SSDMobileNetV2FpnKerasFeatureExtractor from object_detection.models.ssd_mobilenet_v2_keras_feature_extractor import SSDMobileNetV2KerasFeatureExtractor from object_detection.predictors import rfcn_keras_box_predictor if sys.version_info[0] >= 3: from object_detection.models import ssd_efficientnet_bifpn_feature_extractor as ssd_efficientnet_bifpn if tf_version.is_tf1(): from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res from object_detection.models import faster_rcnn_inception_v2_feature_extractor as frcnn_inc_v2 from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as frcnn_resnet_v1 from object_detection.models import ssd_resnet_v1_fpn_feature_extractor as ssd_resnet_v1_fpn from object_detection.models import ssd_resnet_v1_ppn_feature_extractor as ssd_resnet_v1_ppn from object_detection.models.embedded_ssd_mobilenet_v1_feature_extractor import EmbeddedSSDMobileNetV1FeatureExtractor from object_detection.models.ssd_inception_v2_feature_extractor import SSDInceptionV2FeatureExtractor from object_detection.models.ssd_mobilenet_v2_fpn_feature_extractor import SSDMobileNetV2FpnFeatureExtractor from object_detection.models.ssd_mobilenet_v2_mnasfpn_feature_extractor import SSDMobileNetV2MnasFPNFeatureExtractor from object_detection.models.ssd_inception_v3_feature_extractor import SSDInceptionV3FeatureExtractor from object_detection.models.ssd_mobilenet_edgetpu_feature_extractor import SSDMobileNetEdgeTPUFeatureExtractor from object_detection.models.ssd_mobilenet_v1_feature_extractor import SSDMobileNetV1FeatureExtractor from object_detection.models.ssd_mobilenet_v1_fpn_feature_extractor import SSDMobileNetV1FpnFeatureExtractor from object_detection.models.ssd_mobilenet_v1_ppn_feature_extractor import SSDMobileNetV1PpnFeatureExtractor from object_detection.models.ssd_mobilenet_v2_feature_extractor import SSDMobileNetV2FeatureExtractor from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3LargeFeatureExtractor from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3SmallFeatureExtractor from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetCPUFeatureExtractor from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetDSPFeatureExtractor from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetEdgeTPUFeatureExtractor from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetGPUFeatureExtractor from object_detection.models.ssd_pnasnet_feature_extractor import SSDPNASNetFeatureExtractor from object_detection.predictors import rfcn_box_predictor # pylint: enable=g-import-not-at-top if tf_version.is_tf2(): SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = { 'ssd_mobilenet_v1_keras': SSDMobileNetV1KerasFeatureExtractor, 'ssd_mobilenet_v1_fpn_keras': SSDMobileNetV1FpnKerasFeatureExtractor, 'ssd_mobilenet_v2_keras': SSDMobileNetV2KerasFeatureExtractor, 'ssd_mobilenet_v2_fpn_keras': SSDMobileNetV2FpnKerasFeatureExtractor, 'ssd_resnet50_v1_fpn_keras': ssd_resnet_v1_fpn_keras.SSDResNet50V1FpnKerasFeatureExtractor, 'ssd_resnet101_v1_fpn_keras': ssd_resnet_v1_fpn_keras.SSDResNet101V1FpnKerasFeatureExtractor, 'ssd_resnet152_v1_fpn_keras': ssd_resnet_v1_fpn_keras.SSDResNet152V1FpnKerasFeatureExtractor, 'ssd_efficientnet-b0_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB0BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b1_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB1BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b2_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB2BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b3_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB3BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b4_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB4BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b5_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB5BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b6_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB6BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b7_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB7BiFPNKerasFeatureExtractor, } FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = { 'faster_rcnn_resnet50_keras': frcnn_resnet_keras.FasterRCNNResnet50KerasFeatureExtractor, 'faster_rcnn_resnet101_keras': frcnn_resnet_keras.FasterRCNNResnet101KerasFeatureExtractor, 'faster_rcnn_resnet152_keras': frcnn_resnet_keras.FasterRCNNResnet152KerasFeatureExtractor, 'faster_rcnn_inception_resnet_v2_keras': frcnn_inc_res_keras.FasterRCNNInceptionResnetV2KerasFeatureExtractor, 'faster_rcnn_resnet50_fpn_keras': frcnn_resnet_fpn_keras.FasterRCNNResnet50FpnKerasFeatureExtractor, 'faster_rcnn_resnet101_fpn_keras': frcnn_resnet_fpn_keras.FasterRCNNResnet101FpnKerasFeatureExtractor, 'faster_rcnn_resnet152_fpn_keras': frcnn_resnet_fpn_keras.FasterRCNNResnet152FpnKerasFeatureExtractor, } CENTER_NET_EXTRACTOR_FUNCTION_MAP = { 'resnet_v2_50': center_net_resnet_feature_extractor.resnet_v2_50, 'resnet_v2_101': center_net_resnet_feature_extractor.resnet_v2_101, 'resnet_v1_18_fpn': center_net_resnet_v1_fpn_feature_extractor.resnet_v1_18_fpn, 'resnet_v1_34_fpn': center_net_resnet_v1_fpn_feature_extractor.resnet_v1_34_fpn, 'resnet_v1_50_fpn': center_net_resnet_v1_fpn_feature_extractor.resnet_v1_50_fpn, 'resnet_v1_101_fpn': center_net_resnet_v1_fpn_feature_extractor.resnet_v1_101_fpn, 'hourglass_10': center_net_hourglass_feature_extractor.hourglass_10, 'hourglass_20': center_net_hourglass_feature_extractor.hourglass_20, 'hourglass_32': center_net_hourglass_feature_extractor.hourglass_32, 'hourglass_52': center_net_hourglass_feature_extractor.hourglass_52, 'hourglass_104': center_net_hourglass_feature_extractor.hourglass_104, 'mobilenet_v2': center_net_mobilenet_v2_feature_extractor.mobilenet_v2, 'mobilenet_v2_fpn': center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn, 'mobilenet_v2_fpn_sep_conv': center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn, } FEATURE_EXTRACTOR_MAPS = [ CENTER_NET_EXTRACTOR_FUNCTION_MAP, FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP, SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP ] if tf_version.is_tf1(): SSD_FEATURE_EXTRACTOR_CLASS_MAP = { 'ssd_inception_v2': SSDInceptionV2FeatureExtractor, 'ssd_inception_v3': SSDInceptionV3FeatureExtractor, 'ssd_mobilenet_v1': SSDMobileNetV1FeatureExtractor, 'ssd_mobilenet_v1_fpn': SSDMobileNetV1FpnFeatureExtractor, 'ssd_mobilenet_v1_ppn': SSDMobileNetV1PpnFeatureExtractor, 'ssd_mobilenet_v2': SSDMobileNetV2FeatureExtractor, 'ssd_mobilenet_v2_fpn': SSDMobileNetV2FpnFeatureExtractor, 'ssd_mobilenet_v2_mnasfpn': SSDMobileNetV2MnasFPNFeatureExtractor, 'ssd_mobilenet_v3_large': SSDMobileNetV3LargeFeatureExtractor, 'ssd_mobilenet_v3_small': SSDMobileNetV3SmallFeatureExtractor, 'ssd_mobilenet_edgetpu': SSDMobileNetEdgeTPUFeatureExtractor, 'ssd_resnet50_v1_fpn': ssd_resnet_v1_fpn.SSDResnet50V1FpnFeatureExtractor, 'ssd_resnet101_v1_fpn': ssd_resnet_v1_fpn.SSDResnet101V1FpnFeatureExtractor, 'ssd_resnet152_v1_fpn': ssd_resnet_v1_fpn.SSDResnet152V1FpnFeatureExtractor, 'ssd_resnet50_v1_ppn': ssd_resnet_v1_ppn.SSDResnet50V1PpnFeatureExtractor, 'ssd_resnet101_v1_ppn': ssd_resnet_v1_ppn.SSDResnet101V1PpnFeatureExtractor, 'ssd_resnet152_v1_ppn': ssd_resnet_v1_ppn.SSDResnet152V1PpnFeatureExtractor, 'embedded_ssd_mobilenet_v1': EmbeddedSSDMobileNetV1FeatureExtractor, 'ssd_pnasnet': SSDPNASNetFeatureExtractor, 'ssd_mobiledet_cpu': SSDMobileDetCPUFeatureExtractor, 'ssd_mobiledet_dsp': SSDMobileDetDSPFeatureExtractor, 'ssd_mobiledet_edgetpu': SSDMobileDetEdgeTPUFeatureExtractor, 'ssd_mobiledet_gpu': SSDMobileDetGPUFeatureExtractor, } FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP = { 'faster_rcnn_nas': frcnn_nas.FasterRCNNNASFeatureExtractor, 'faster_rcnn_pnas': frcnn_pnas.FasterRCNNPNASFeatureExtractor, 'faster_rcnn_inception_resnet_v2': frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor, 'faster_rcnn_inception_v2': frcnn_inc_v2.FasterRCNNInceptionV2FeatureExtractor, 'faster_rcnn_resnet50': frcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor, 'faster_rcnn_resnet101': frcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor, 'faster_rcnn_resnet152': frcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor, } CENTER_NET_EXTRACTOR_FUNCTION_MAP = {} FEATURE_EXTRACTOR_MAPS = [ SSD_FEATURE_EXTRACTOR_CLASS_MAP, FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP, CENTER_NET_EXTRACTOR_FUNCTION_MAP ] def _check_feature_extractor_exists(feature_extractor_type): feature_extractors = set().union(*FEATURE_EXTRACTOR_MAPS) if feature_extractor_type not in feature_extractors: raise ValueError('{} is not supported. See `model_builder.py` for features ' 'extractors compatible with different versions of ' 'Tensorflow'.format(feature_extractor_type)) def _build_ssd_feature_extractor(feature_extractor_config, is_training, freeze_batchnorm, reuse_weights=None): """Builds a ssd_meta_arch.SSDFeatureExtractor based on config. Args: feature_extractor_config: A SSDFeatureExtractor proto config from ssd.proto. is_training: True if this feature extractor is being built for training. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. reuse_weights: if the feature extractor should reuse weights. Returns: ssd_meta_arch.SSDFeatureExtractor based on config. Raises: ValueError: On invalid feature extractor type. """ feature_type = feature_extractor_config.type depth_multiplier = feature_extractor_config.depth_multiplier min_depth = feature_extractor_config.min_depth pad_to_multiple = feature_extractor_config.pad_to_multiple use_explicit_padding = feature_extractor_config.use_explicit_padding use_depthwise = feature_extractor_config.use_depthwise is_keras = tf_version.is_tf2() if is_keras: conv_hyperparams = hyperparams_builder.KerasLayerHyperparams( feature_extractor_config.conv_hyperparams) else: conv_hyperparams = hyperparams_builder.build( feature_extractor_config.conv_hyperparams, is_training) override_base_feature_extractor_hyperparams = ( feature_extractor_config.override_base_feature_extractor_hyperparams) if not is_keras and feature_type not in SSD_FEATURE_EXTRACTOR_CLASS_MAP: raise ValueError('Unknown ssd feature_extractor: {}'.format(feature_type)) if is_keras: feature_extractor_class = SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[ feature_type] else: feature_extractor_class = SSD_FEATURE_EXTRACTOR_CLASS_MAP[feature_type] kwargs = { 'is_training': is_training, 'depth_multiplier': depth_multiplier, 'min_depth': min_depth, 'pad_to_multiple': pad_to_multiple, 'use_explicit_padding': use_explicit_padding, 'use_depthwise': use_depthwise, 'override_base_feature_extractor_hyperparams': override_base_feature_extractor_hyperparams } if feature_extractor_config.HasField('replace_preprocessor_with_placeholder'): kwargs.update({ 'replace_preprocessor_with_placeholder': feature_extractor_config.replace_preprocessor_with_placeholder }) if feature_extractor_config.HasField('num_layers'): kwargs.update({'num_layers': feature_extractor_config.num_layers}) if is_keras: kwargs.update({ 'conv_hyperparams': conv_hyperparams, 'inplace_batchnorm_update': False, 'freeze_batchnorm': freeze_batchnorm }) else: kwargs.update({ 'conv_hyperparams_fn': conv_hyperparams, 'reuse_weights': reuse_weights, }) if feature_extractor_config.HasField('fpn'): kwargs.update({ 'fpn_min_level': feature_extractor_config.fpn.min_level, 'fpn_max_level': feature_extractor_config.fpn.max_level, 'additional_layer_depth': feature_extractor_config.fpn.additional_layer_depth, }) if feature_extractor_config.HasField('bifpn'): kwargs.update({ 'bifpn_min_level': feature_extractor_config.bifpn.min_level, 'bifpn_max_level': feature_extractor_config.bifpn.max_level, 'bifpn_num_iterations': feature_extractor_config.bifpn.num_iterations, 'bifpn_num_filters': feature_extractor_config.bifpn.num_filters, 'bifpn_combine_method': feature_extractor_config.bifpn.combine_method, }) return feature_extractor_class(**kwargs) def _build_ssd_model(ssd_config, is_training, add_summaries): """Builds an SSD detection model based on the model config. Args: ssd_config: A ssd.proto object containing the config for the desired SSDMetaArch. is_training: True if this model is being built for training purposes. add_summaries: Whether to add tf summaries in the model. Returns: SSDMetaArch based on the config. Raises: ValueError: If ssd_config.type is not recognized (i.e. not registered in model_class_map). """ num_classes = ssd_config.num_classes _check_feature_extractor_exists(ssd_config.feature_extractor.type) # Feature extractor feature_extractor = _build_ssd_feature_extractor( feature_extractor_config=ssd_config.feature_extractor, freeze_batchnorm=ssd_config.freeze_batchnorm, is_training=is_training) box_coder = box_coder_builder.build(ssd_config.box_coder) matcher = matcher_builder.build(ssd_config.matcher) region_similarity_calculator = sim_calc.build( ssd_config.similarity_calculator) encode_background_as_zeros = ssd_config.encode_background_as_zeros negative_class_weight = ssd_config.negative_class_weight anchor_generator = anchor_generator_builder.build( ssd_config.anchor_generator) if feature_extractor.is_keras_model: ssd_box_predictor = box_predictor_builder.build_keras( hyperparams_fn=hyperparams_builder.KerasLayerHyperparams, freeze_batchnorm=ssd_config.freeze_batchnorm, inplace_batchnorm_update=False, num_predictions_per_location_list=anchor_generator .num_anchors_per_location(), box_predictor_config=ssd_config.box_predictor, is_training=is_training, num_classes=num_classes, add_background_class=ssd_config.add_background_class) else: ssd_box_predictor = box_predictor_builder.build( hyperparams_builder.build, ssd_config.box_predictor, is_training, num_classes, ssd_config.add_background_class) image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build( ssd_config.post_processing) (classification_loss, localization_loss, classification_weight, localization_weight, hard_example_miner, random_example_sampler, expected_loss_weights_fn) = losses_builder.build(ssd_config.loss) normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize equalization_loss_config = ops.EqualizationLossConfig( weight=ssd_config.loss.equalization_loss.weight, exclude_prefixes=ssd_config.loss.equalization_loss.exclude_prefixes) target_assigner_instance = target_assigner.TargetAssigner( region_similarity_calculator, matcher, box_coder, negative_class_weight=negative_class_weight) ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch kwargs = {} return ssd_meta_arch_fn( is_training=is_training, anchor_generator=anchor_generator, box_predictor=ssd_box_predictor, box_coder=box_coder, feature_extractor=feature_extractor, encode_background_as_zeros=encode_background_as_zeros, image_resizer_fn=image_resizer_fn, non_max_suppression_fn=non_max_suppression_fn, score_conversion_fn=score_conversion_fn, classification_loss=classification_loss, localization_loss=localization_loss, classification_loss_weight=classification_weight, localization_loss_weight=localization_weight, normalize_loss_by_num_matches=normalize_loss_by_num_matches, hard_example_miner=hard_example_miner, target_assigner_instance=target_assigner_instance, add_summaries=add_summaries, normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize, freeze_batchnorm=ssd_config.freeze_batchnorm, inplace_batchnorm_update=ssd_config.inplace_batchnorm_update, add_background_class=ssd_config.add_background_class, explicit_background_class=ssd_config.explicit_background_class, random_example_sampler=random_example_sampler, expected_loss_weights_fn=expected_loss_weights_fn, use_confidences_as_targets=ssd_config.use_confidences_as_targets, implicit_example_weight=ssd_config.implicit_example_weight, equalization_loss_config=equalization_loss_config, return_raw_detections_during_predict=( ssd_config.return_raw_detections_during_predict), **kwargs) def _build_faster_rcnn_feature_extractor( feature_extractor_config, is_training, reuse_weights=True, inplace_batchnorm_update=False): """Builds a faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config. Args: feature_extractor_config: A FasterRcnnFeatureExtractor proto config from faster_rcnn.proto. is_training: True if this feature extractor is being built for training. reuse_weights: if the feature extractor should reuse weights. inplace_batchnorm_update: Whether to update batch_norm inplace during training. This is required for batch norm to work correctly on TPUs. When this is false, user must add a control dependency on tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch norm moving average parameters. Returns: faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config. Raises: ValueError: On invalid feature extractor type. """ if inplace_batchnorm_update: raise ValueError('inplace batchnorm updates not supported.') feature_type = feature_extractor_config.type first_stage_features_stride = ( feature_extractor_config.first_stage_features_stride) batch_norm_trainable = feature_extractor_config.batch_norm_trainable if feature_type not in FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP: raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format( feature_type)) feature_extractor_class = FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP[ feature_type] return feature_extractor_class( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights=reuse_weights) def _build_faster_rcnn_keras_feature_extractor( feature_extractor_config, is_training, inplace_batchnorm_update=False): """Builds a faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor from config. Args: feature_extractor_config: A FasterRcnnFeatureExtractor proto config from faster_rcnn.proto. is_training: True if this feature extractor is being built for training. inplace_batchnorm_update: Whether to update batch_norm inplace during training. This is required for batch norm to work correctly on TPUs. When this is false, user must add a control dependency on tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch norm moving average parameters. Returns: faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor based on config. Raises: ValueError: On invalid feature extractor type. """ if inplace_batchnorm_update: raise ValueError('inplace batchnorm updates not supported.') feature_type = feature_extractor_config.type first_stage_features_stride = ( feature_extractor_config.first_stage_features_stride) batch_norm_trainable = feature_extractor_config.batch_norm_trainable if feature_type not in FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP: raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format( feature_type)) feature_extractor_class = FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[ feature_type] kwargs = {} if feature_extractor_config.HasField('conv_hyperparams'): kwargs.update({ 'conv_hyperparams': hyperparams_builder.KerasLayerHyperparams( feature_extractor_config.conv_hyperparams), 'override_base_feature_extractor_hyperparams': feature_extractor_config.override_base_feature_extractor_hyperparams }) if feature_extractor_config.HasField('fpn'): kwargs.update({ 'fpn_min_level': feature_extractor_config.fpn.min_level, 'fpn_max_level': feature_extractor_config.fpn.max_level, 'additional_layer_depth': feature_extractor_config.fpn.additional_layer_depth, }) return feature_extractor_class( is_training, first_stage_features_stride, batch_norm_trainable, **kwargs) def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries): """Builds a Faster R-CNN or R-FCN detection model based on the model config. Builds R-FCN model if the second_stage_box_predictor in the config is of type `rfcn_box_predictor` else builds a Faster R-CNN model. Args: frcnn_config: A faster_rcnn.proto object containing the config for the desired FasterRCNNMetaArch or RFCNMetaArch. is_training: True if this model is being built for training purposes. add_summaries: Whether to add tf summaries in the model. Returns: FasterRCNNMetaArch based on the config. Raises: ValueError: If frcnn_config.type is not recognized (i.e. not registered in model_class_map). """ num_classes = frcnn_config.num_classes image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer) _check_feature_extractor_exists(frcnn_config.feature_extractor.type) is_keras = tf_version.is_tf2() if is_keras: feature_extractor = _build_faster_rcnn_keras_feature_extractor( frcnn_config.feature_extractor, is_training, inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update) else: feature_extractor = _build_faster_rcnn_feature_extractor( frcnn_config.feature_extractor, is_training, inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update) number_of_stages = frcnn_config.number_of_stages first_stage_anchor_generator = anchor_generator_builder.build( frcnn_config.first_stage_anchor_generator) first_stage_target_assigner = target_assigner.create_target_assigner( 'FasterRCNN', 'proposal', use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher) first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate if is_keras: first_stage_box_predictor_arg_scope_fn = ( hyperparams_builder.KerasLayerHyperparams( frcnn_config.first_stage_box_predictor_conv_hyperparams)) else: first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build( frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training) first_stage_box_predictor_kernel_size = ( frcnn_config.first_stage_box_predictor_kernel_size) first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size use_static_shapes = frcnn_config.use_static_shapes and ( frcnn_config.use_static_shapes_for_eval or is_training) first_stage_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=frcnn_config.first_stage_positive_balance_fraction, is_static=(frcnn_config.use_static_balanced_label_sampler and use_static_shapes)) first_stage_max_proposals = frcnn_config.first_stage_max_proposals if (frcnn_config.first_stage_nms_iou_threshold < 0 or frcnn_config.first_stage_nms_iou_threshold > 1.0): raise ValueError('iou_threshold not in [0, 1.0].') if (is_training and frcnn_config.second_stage_batch_size > first_stage_max_proposals): raise ValueError('second_stage_batch_size should be no greater than ' 'first_stage_max_proposals.') first_stage_non_max_suppression_fn = functools.partial( post_processing.batch_multiclass_non_max_suppression, score_thresh=frcnn_config.first_stage_nms_score_threshold, iou_thresh=frcnn_config.first_stage_nms_iou_threshold, max_size_per_class=frcnn_config.first_stage_max_proposals, max_total_size=frcnn_config.first_stage_max_proposals, use_static_shapes=use_static_shapes, use_partitioned_nms=frcnn_config.use_partitioned_nms_in_first_stage, use_combined_nms=frcnn_config.use_combined_nms_in_first_stage) first_stage_loc_loss_weight = ( frcnn_config.first_stage_localization_loss_weight) first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight initial_crop_size = frcnn_config.initial_crop_size maxpool_kernel_size = frcnn_config.maxpool_kernel_size maxpool_stride = frcnn_config.maxpool_stride second_stage_target_assigner = target_assigner.create_target_assigner( 'FasterRCNN', 'detection', use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher) if is_keras: second_stage_box_predictor = box_predictor_builder.build_keras( hyperparams_builder.KerasLayerHyperparams, freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[1], box_predictor_config=frcnn_config.second_stage_box_predictor, is_training=is_training, num_classes=num_classes) else: second_stage_box_predictor = box_predictor_builder.build( hyperparams_builder.build, frcnn_config.second_stage_box_predictor, is_training=is_training, num_classes=num_classes) second_stage_batch_size = frcnn_config.second_stage_batch_size second_stage_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=frcnn_config.second_stage_balance_fraction, is_static=(frcnn_config.use_static_balanced_label_sampler and use_static_shapes)) (second_stage_non_max_suppression_fn, second_stage_score_conversion_fn ) = post_processing_builder.build(frcnn_config.second_stage_post_processing) second_stage_localization_loss_weight = ( frcnn_config.second_stage_localization_loss_weight) second_stage_classification_loss = ( losses_builder.build_faster_rcnn_classification_loss( frcnn_config.second_stage_classification_loss)) second_stage_classification_loss_weight = ( frcnn_config.second_stage_classification_loss_weight) second_stage_mask_prediction_loss_weight = ( frcnn_config.second_stage_mask_prediction_loss_weight) hard_example_miner = None if frcnn_config.HasField('hard_example_miner'): hard_example_miner = losses_builder.build_hard_example_miner( frcnn_config.hard_example_miner, second_stage_classification_loss_weight, second_stage_localization_loss_weight) crop_and_resize_fn = ( spatial_ops.multilevel_matmul_crop_and_resize if frcnn_config.use_matmul_crop_and_resize else spatial_ops.multilevel_native_crop_and_resize) clip_anchors_to_image = ( frcnn_config.clip_anchors_to_image) common_kwargs = { 'is_training': is_training, 'num_classes': num_classes, 'image_resizer_fn': image_resizer_fn, 'feature_extractor': feature_extractor, 'number_of_stages': number_of_stages, 'first_stage_anchor_generator': first_stage_anchor_generator, 'first_stage_target_assigner': first_stage_target_assigner, 'first_stage_atrous_rate': first_stage_atrous_rate, 'first_stage_box_predictor_arg_scope_fn': first_stage_box_predictor_arg_scope_fn, 'first_stage_box_predictor_kernel_size': first_stage_box_predictor_kernel_size, 'first_stage_box_predictor_depth': first_stage_box_predictor_depth, 'first_stage_minibatch_size': first_stage_minibatch_size, 'first_stage_sampler': first_stage_sampler, 'first_stage_non_max_suppression_fn': first_stage_non_max_suppression_fn, 'first_stage_max_proposals': first_stage_max_proposals, 'first_stage_localization_loss_weight': first_stage_loc_loss_weight, 'first_stage_objectness_loss_weight': first_stage_obj_loss_weight, 'second_stage_target_assigner': second_stage_target_assigner, 'second_stage_batch_size': second_stage_batch_size, 'second_stage_sampler': second_stage_sampler, 'second_stage_non_max_suppression_fn': second_stage_non_max_suppression_fn, 'second_stage_score_conversion_fn': second_stage_score_conversion_fn, 'second_stage_localization_loss_weight': second_stage_localization_loss_weight, 'second_stage_classification_loss': second_stage_classification_loss, 'second_stage_classification_loss_weight': second_stage_classification_loss_weight, 'hard_example_miner': hard_example_miner, 'add_summaries': add_summaries, 'crop_and_resize_fn': crop_and_resize_fn, 'clip_anchors_to_image': clip_anchors_to_image, 'use_static_shapes': use_static_shapes, 'resize_masks': frcnn_config.resize_masks, 'return_raw_detections_during_predict': frcnn_config.return_raw_detections_during_predict, 'output_final_box_features': frcnn_config.output_final_box_features, 'output_final_box_rpn_features': frcnn_config.output_final_box_rpn_features, } if ((not is_keras and isinstance(second_stage_box_predictor, rfcn_box_predictor.RfcnBoxPredictor)) or (is_keras and isinstance(second_stage_box_predictor, rfcn_keras_box_predictor.RfcnKerasBoxPredictor))): return rfcn_meta_arch.RFCNMetaArch( second_stage_rfcn_box_predictor=second_stage_box_predictor, **common_kwargs) elif frcnn_config.HasField('context_config'): context_config = frcnn_config.context_config common_kwargs.update({ 'attention_bottleneck_dimension': context_config.attention_bottleneck_dimension, 'attention_temperature': context_config.attention_temperature, 'use_self_attention': context_config.use_self_attention, 'use_long_term_attention': context_config.use_long_term_attention, 'self_attention_in_sequence': context_config.self_attention_in_sequence, 'num_attention_heads': context_config.num_attention_heads, 'num_attention_layers': context_config.num_attention_layers, 'attention_position': context_config.attention_position }) return context_rcnn_meta_arch.ContextRCNNMetaArch( initial_crop_size=initial_crop_size, maxpool_kernel_size=maxpool_kernel_size, maxpool_stride=maxpool_stride, second_stage_mask_rcnn_box_predictor=second_stage_box_predictor, second_stage_mask_prediction_loss_weight=( second_stage_mask_prediction_loss_weight), **common_kwargs) else: return faster_rcnn_meta_arch.FasterRCNNMetaArch( initial_crop_size=initial_crop_size, maxpool_kernel_size=maxpool_kernel_size, maxpool_stride=maxpool_stride, second_stage_mask_rcnn_box_predictor=second_stage_box_predictor, second_stage_mask_prediction_loss_weight=( second_stage_mask_prediction_loss_weight), **common_kwargs) EXPERIMENTAL_META_ARCH_BUILDER_MAP = { } def _build_experimental_model(config, is_training, add_summaries=True): return EXPERIMENTAL_META_ARCH_BUILDER_MAP[config.name]( is_training, add_summaries) # The class ID in the groundtruth/model architecture is usually 0-based while # the ID in the label map is 1-based. The offset is used to convert between the # the two. CLASS_ID_OFFSET = 1 KEYPOINT_STD_DEV_DEFAULT = 1.0 def keypoint_proto_to_params(kp_config, keypoint_map_dict): """Converts CenterNet.KeypointEstimation proto to parameter namedtuple.""" label_map_item = keypoint_map_dict[kp_config.keypoint_class_name] classification_loss, localization_loss, _, _, _, _, _ = ( losses_builder.build(kp_config.loss)) keypoint_indices = [ keypoint.id for keypoint in label_map_item.keypoints ] keypoint_labels = [ keypoint.label for keypoint in label_map_item.keypoints ] keypoint_std_dev_dict = { label: KEYPOINT_STD_DEV_DEFAULT for label in keypoint_labels } if kp_config.keypoint_label_to_std: for label, value in kp_config.keypoint_label_to_std.items(): keypoint_std_dev_dict[label] = value keypoint_std_dev = [keypoint_std_dev_dict[label] for label in keypoint_labels] if kp_config.HasField('heatmap_head_params'): heatmap_head_num_filters = list(kp_config.heatmap_head_params.num_filters) heatmap_head_kernel_sizes = list(kp_config.heatmap_head_params.kernel_sizes) else: heatmap_head_num_filters = [256] heatmap_head_kernel_sizes = [3] if kp_config.HasField('offset_head_params'): offset_head_num_filters = list(kp_config.offset_head_params.num_filters) offset_head_kernel_sizes = list(kp_config.offset_head_params.kernel_sizes) else: offset_head_num_filters = [256] offset_head_kernel_sizes = [3] if kp_config.HasField('regress_head_params'): regress_head_num_filters = list(kp_config.regress_head_params.num_filters) regress_head_kernel_sizes = list( kp_config.regress_head_params.kernel_sizes) else: regress_head_num_filters = [256] regress_head_kernel_sizes = [3] return center_net_meta_arch.KeypointEstimationParams( task_name=kp_config.task_name, class_id=label_map_item.id - CLASS_ID_OFFSET, keypoint_indices=keypoint_indices, classification_loss=classification_loss, localization_loss=localization_loss, keypoint_labels=keypoint_labels, keypoint_std_dev=keypoint_std_dev, task_loss_weight=kp_config.task_loss_weight, keypoint_regression_loss_weight=kp_config.keypoint_regression_loss_weight, keypoint_heatmap_loss_weight=kp_config.keypoint_heatmap_loss_weight, keypoint_offset_loss_weight=kp_config.keypoint_offset_loss_weight, heatmap_bias_init=kp_config.heatmap_bias_init, keypoint_candidate_score_threshold=( kp_config.keypoint_candidate_score_threshold), num_candidates_per_keypoint=kp_config.num_candidates_per_keypoint, peak_max_pool_kernel_size=kp_config.peak_max_pool_kernel_size, unmatched_keypoint_score=kp_config.unmatched_keypoint_score, box_scale=kp_config.box_scale, candidate_search_scale=kp_config.candidate_search_scale, candidate_ranking_mode=kp_config.candidate_ranking_mode, offset_peak_radius=kp_config.offset_peak_radius, per_keypoint_offset=kp_config.per_keypoint_offset, predict_depth=kp_config.predict_depth, per_keypoint_depth=kp_config.per_keypoint_depth, keypoint_depth_loss_weight=kp_config.keypoint_depth_loss_weight, score_distance_offset=kp_config.score_distance_offset, clip_out_of_frame_keypoints=kp_config.clip_out_of_frame_keypoints, rescore_instances=kp_config.rescore_instances, heatmap_head_num_filters=heatmap_head_num_filters, heatmap_head_kernel_sizes=heatmap_head_kernel_sizes, offset_head_num_filters=offset_head_num_filters, offset_head_kernel_sizes=offset_head_kernel_sizes, regress_head_num_filters=regress_head_num_filters, regress_head_kernel_sizes=regress_head_kernel_sizes) def object_detection_proto_to_params(od_config): """Converts CenterNet.ObjectDetection proto to parameter namedtuple.""" loss = losses_pb2.Loss() # Add dummy classification loss to avoid the loss_builder throwing error. # TODO(yuhuic): update the loss builder to take the classification loss # directly. loss.classification_loss.weighted_sigmoid.CopyFrom( losses_pb2.WeightedSigmoidClassificationLoss()) loss.localization_loss.CopyFrom(od_config.localization_loss) _, localization_loss, _, _, _, _, _ = (losses_builder.build(loss)) if od_config.HasField('scale_head_params'): scale_head_num_filters = list(od_config.scale_head_params.num_filters) scale_head_kernel_sizes = list(od_config.scale_head_params.kernel_sizes) else: scale_head_num_filters = [256] scale_head_kernel_sizes = [3] if od_config.HasField('offset_head_params'): offset_head_num_filters = list(od_config.offset_head_params.num_filters) offset_head_kernel_sizes = list(od_config.offset_head_params.kernel_sizes) else: offset_head_num_filters = [256] offset_head_kernel_sizes = [3] return center_net_meta_arch.ObjectDetectionParams( localization_loss=localization_loss, scale_loss_weight=od_config.scale_loss_weight, offset_loss_weight=od_config.offset_loss_weight, task_loss_weight=od_config.task_loss_weight, scale_head_num_filters=scale_head_num_filters, scale_head_kernel_sizes=scale_head_kernel_sizes, offset_head_num_filters=offset_head_num_filters, offset_head_kernel_sizes=offset_head_kernel_sizes) def object_center_proto_to_params(oc_config): """Converts CenterNet.ObjectCenter proto to parameter namedtuple.""" loss = losses_pb2.Loss() # Add dummy localization loss to avoid the loss_builder throwing error. # TODO(yuhuic): update the loss builder to take the localization loss # directly. loss.localization_loss.weighted_l2.CopyFrom( losses_pb2.WeightedL2LocalizationLoss()) loss.classification_loss.CopyFrom(oc_config.classification_loss) classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss)) keypoint_weights_for_center = [] if oc_config.keypoint_weights_for_center: keypoint_weights_for_center = list(oc_config.keypoint_weights_for_center) if oc_config.HasField('center_head_params'): center_head_num_filters = list(oc_config.center_head_params.num_filters) center_head_kernel_sizes = list(oc_config.center_head_params.kernel_sizes) else: center_head_num_filters = [256] center_head_kernel_sizes = [3] return center_net_meta_arch.ObjectCenterParams( classification_loss=classification_loss, object_center_loss_weight=oc_config.object_center_loss_weight, heatmap_bias_init=oc_config.heatmap_bias_init, min_box_overlap_iou=oc_config.min_box_overlap_iou, max_box_predictions=oc_config.max_box_predictions, use_labeled_classes=oc_config.use_labeled_classes, keypoint_weights_for_center=keypoint_weights_for_center, center_head_num_filters=center_head_num_filters, center_head_kernel_sizes=center_head_kernel_sizes) def mask_proto_to_params(mask_config): """Converts CenterNet.MaskEstimation proto to parameter namedtuple.""" loss = losses_pb2.Loss() # Add dummy localization loss to avoid the loss_builder throwing error. loss.localization_loss.weighted_l2.CopyFrom( losses_pb2.WeightedL2LocalizationLoss()) loss.classification_loss.CopyFrom(mask_config.classification_loss) classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss)) if mask_config.HasField('mask_head_params'): mask_head_num_filters = list(mask_config.mask_head_params.num_filters) mask_head_kernel_sizes = list(mask_config.mask_head_params.kernel_sizes) else: mask_head_num_filters = [256] mask_head_kernel_sizes = [3] return center_net_meta_arch.MaskParams( classification_loss=classification_loss, task_loss_weight=mask_config.task_loss_weight, mask_height=mask_config.mask_height, mask_width=mask_config.mask_width, score_threshold=mask_config.score_threshold, heatmap_bias_init=mask_config.heatmap_bias_init, mask_head_num_filters=mask_head_num_filters, mask_head_kernel_sizes=mask_head_kernel_sizes) def densepose_proto_to_params(densepose_config): """Converts CenterNet.DensePoseEstimation proto to parameter namedtuple.""" classification_loss, localization_loss, _, _, _, _, _ = ( losses_builder.build(densepose_config.loss)) return center_net_meta_arch.DensePoseParams( class_id=densepose_config.class_id, classification_loss=classification_loss, localization_loss=localization_loss, part_loss_weight=densepose_config.part_loss_weight, coordinate_loss_weight=densepose_config.coordinate_loss_weight, num_parts=densepose_config.num_parts, task_loss_weight=densepose_config.task_loss_weight, upsample_to_input_res=densepose_config.upsample_to_input_res, heatmap_bias_init=densepose_config.heatmap_bias_init) def tracking_proto_to_params(tracking_config): """Converts CenterNet.TrackEstimation proto to parameter namedtuple.""" loss = losses_pb2.Loss() # Add dummy localization loss to avoid the loss_builder throwing error. # TODO(yuhuic): update the loss builder to take the localization loss # directly. loss.localization_loss.weighted_l2.CopyFrom( losses_pb2.WeightedL2LocalizationLoss()) loss.classification_loss.CopyFrom(tracking_config.classification_loss) classification_loss, _, _, _, _, _, _ = losses_builder.build(loss) return center_net_meta_arch.TrackParams( num_track_ids=tracking_config.num_track_ids, reid_embed_size=tracking_config.reid_embed_size, classification_loss=classification_loss, num_fc_layers=tracking_config.num_fc_layers, task_loss_weight=tracking_config.task_loss_weight) def temporal_offset_proto_to_params(temporal_offset_config): """Converts CenterNet.TemporalOffsetEstimation proto to param-tuple.""" loss = losses_pb2.Loss() # Add dummy classification loss to avoid the loss_builder throwing error. # TODO(yuhuic): update the loss builder to take the classification loss # directly. loss.classification_loss.weighted_sigmoid.CopyFrom( losses_pb2.WeightedSigmoidClassificationLoss()) loss.localization_loss.CopyFrom(temporal_offset_config.localization_loss) _, localization_loss, _, _, _, _, _ = losses_builder.build(loss) return center_net_meta_arch.TemporalOffsetParams( localization_loss=localization_loss, task_loss_weight=temporal_offset_config.task_loss_weight) def _build_center_net_model(center_net_config, is_training, add_summaries): """Build a CenterNet detection model. Args: center_net_config: A CenterNet proto object with model configuration. is_training: True if this model is being built for training purposes. add_summaries: Whether to add tf summaries in the model. Returns: CenterNetMetaArch based on the config. """ image_resizer_fn = image_resizer_builder.build( center_net_config.image_resizer) _check_feature_extractor_exists(center_net_config.feature_extractor.type) feature_extractor = _build_center_net_feature_extractor( center_net_config.feature_extractor, is_training) object_center_params = object_center_proto_to_params( center_net_config.object_center_params) object_detection_params = None if center_net_config.HasField('object_detection_task'): object_detection_params = object_detection_proto_to_params( center_net_config.object_detection_task) if center_net_config.HasField('deepmac_mask_estimation'): logging.warn(('Building experimental DeepMAC meta-arch.' ' Some features may be omitted.')) deepmac_params = deepmac_meta_arch.deepmac_proto_to_params( center_net_config.deepmac_mask_estimation) return deepmac_meta_arch.DeepMACMetaArch( is_training=is_training, add_summaries=add_summaries, num_classes=center_net_config.num_classes, feature_extractor=feature_extractor, image_resizer_fn=image_resizer_fn, object_center_params=object_center_params, object_detection_params=object_detection_params, deepmac_params=deepmac_params) keypoint_params_dict = None if center_net_config.keypoint_estimation_task: label_map_proto = label_map_util.load_labelmap( center_net_config.keypoint_label_map_path) keypoint_map_dict = { item.name: item for item in label_map_proto.item if item.keypoints } keypoint_params_dict = {} keypoint_class_id_set = set() all_keypoint_indices = [] for task in center_net_config.keypoint_estimation_task: kp_params = keypoint_proto_to_params(task, keypoint_map_dict) keypoint_params_dict[task.task_name] = kp_params all_keypoint_indices.extend(kp_params.keypoint_indices) if kp_params.class_id in keypoint_class_id_set: raise ValueError(('Multiple keypoint tasks map to the same class id is ' 'not allowed: %d' % kp_params.class_id)) else: keypoint_class_id_set.add(kp_params.class_id) if len(all_keypoint_indices) > len(set(all_keypoint_indices)): raise ValueError('Some keypoint indices are used more than once.') mask_params = None if center_net_config.HasField('mask_estimation_task'): mask_params = mask_proto_to_params(center_net_config.mask_estimation_task) densepose_params = None if center_net_config.HasField('densepose_estimation_task'): densepose_params = densepose_proto_to_params( center_net_config.densepose_estimation_task) track_params = None if center_net_config.HasField('track_estimation_task'): track_params = tracking_proto_to_params( center_net_config.track_estimation_task) temporal_offset_params = None if center_net_config.HasField('temporal_offset_task'): temporal_offset_params = temporal_offset_proto_to_params( center_net_config.temporal_offset_task) non_max_suppression_fn = None if center_net_config.HasField('post_processing'): non_max_suppression_fn, _ = post_processing_builder.build( center_net_config.post_processing) return center_net_meta_arch.CenterNetMetaArch( is_training=is_training, add_summaries=add_summaries, num_classes=center_net_config.num_classes, feature_extractor=feature_extractor, image_resizer_fn=image_resizer_fn, object_center_params=object_center_params, object_detection_params=object_detection_params, keypoint_params_dict=keypoint_params_dict, mask_params=mask_params, densepose_params=densepose_params, track_params=track_params, temporal_offset_params=temporal_offset_params, use_depthwise=center_net_config.use_depthwise, compute_heatmap_sparse=center_net_config.compute_heatmap_sparse, non_max_suppression_fn=non_max_suppression_fn) def _build_center_net_feature_extractor(feature_extractor_config, is_training): """Build a CenterNet feature extractor from the given config.""" if feature_extractor_config.type not in CENTER_NET_EXTRACTOR_FUNCTION_MAP: raise ValueError('\'{}\' is not a known CenterNet feature extractor type' .format(feature_extractor_config.type)) # For backwards compatibility: use_separable_conv = ( feature_extractor_config.use_separable_conv or feature_extractor_config.type == 'mobilenet_v2_fpn_sep_conv') kwargs = { 'channel_means': list(feature_extractor_config.channel_means), 'channel_stds': list(feature_extractor_config.channel_stds), 'bgr_ordering': feature_extractor_config.bgr_ordering, 'depth_multiplier': feature_extractor_config.depth_multiplier, 'use_separable_conv': use_separable_conv, 'upsampling_interpolation': feature_extractor_config.upsampling_interpolation, } return CENTER_NET_EXTRACTOR_FUNCTION_MAP[feature_extractor_config.type]( **kwargs) META_ARCH_BUILDER_MAP = { 'ssd': _build_ssd_model, 'faster_rcnn': _build_faster_rcnn_model, 'experimental_model': _build_experimental_model, 'center_net': _build_center_net_model } def build(model_config, is_training, add_summaries=True): """Builds a DetectionModel based on the model config. Args: model_config: A model.proto object containing the config for the desired DetectionModel. is_training: True if this model is being built for training purposes. add_summaries: Whether to add tensorflow summaries in the model graph. Returns: DetectionModel based on the config. Raises: ValueError: On invalid meta architecture or model. """ if not isinstance(model_config, model_pb2.DetectionModel): raise ValueError('model_config not of type model_pb2.DetectionModel.') meta_architecture = model_config.WhichOneof('model') if meta_architecture not in META_ARCH_BUILDER_MAP: raise ValueError('Unknown meta architecture: {}'.format(meta_architecture)) else: build_func = META_ARCH_BUILDER_MAP[meta_architecture] return build_func(getattr(model_config, meta_architecture), is_training, add_summaries)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/model_builder.py
model_builder.py