code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataDecoder builder.
Creates DataDecoders from InputReader configs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from object_detection.data_decoders import tf_example_decoder
from object_detection.data_decoders import tf_sequence_example_decoder
from object_detection.protos import input_reader_pb2
def build(input_reader_config):
"""Builds a DataDecoder based only on the open source config proto.
Args:
input_reader_config: An input_reader_pb2.InputReader object.
Returns:
A DataDecoder based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':
label_map_proto_file = None
if input_reader_config.HasField('label_map_path'):
label_map_proto_file = input_reader_config.label_map_path
input_type = input_reader_config.input_type
if input_type == input_reader_pb2.InputType.Value('TF_EXAMPLE'):
decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=input_reader_config.load_instance_masks,
load_multiclass_scores=input_reader_config.load_multiclass_scores,
load_context_features=input_reader_config.load_context_features,
instance_mask_type=input_reader_config.mask_type,
label_map_proto_file=label_map_proto_file,
use_display_name=input_reader_config.use_display_name,
num_additional_channels=input_reader_config.num_additional_channels,
num_keypoints=input_reader_config.num_keypoints,
expand_hierarchy_labels=input_reader_config.expand_labels_hierarchy,
load_dense_pose=input_reader_config.load_dense_pose,
load_track_id=input_reader_config.load_track_id,
load_keypoint_depth_features=input_reader_config
.load_keypoint_depth_features)
return decoder
elif input_type == input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE'):
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file,
load_context_features=input_reader_config.load_context_features,
load_context_image_ids=input_reader_config.load_context_image_ids)
return decoder
raise ValueError('Unsupported input_type in config.')
raise ValueError('Unsupported input_reader_config.')
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/decoder_builder.py | decoder_builder.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build localization and classification losses from config."""
import functools
from object_detection.core import balanced_positive_negative_sampler as sampler
from object_detection.core import losses
from object_detection.protos import losses_pb2
from object_detection.utils import ops
def build(loss_config):
"""Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A losses_pb2.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization loss object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
hard_example_miner: Hard example miner object.
random_example_sampler: BalancedPositiveNegativeSampler object.
Raises:
ValueError: If hard_example_miner is used with sigmoid_focal_loss.
ValueError: If random_example_sampler is getting non-positive value as
desired positive example fraction.
"""
classification_loss = _build_classification_loss(
loss_config.classification_loss)
localization_loss = _build_localization_loss(
loss_config.localization_loss)
classification_weight = loss_config.classification_weight
localization_weight = loss_config.localization_weight
hard_example_miner = None
if loss_config.HasField('hard_example_miner'):
if (loss_config.classification_loss.WhichOneof('classification_loss') ==
'weighted_sigmoid_focal'):
raise ValueError('HardExampleMiner should not be used with sigmoid focal '
'loss')
hard_example_miner = build_hard_example_miner(
loss_config.hard_example_miner,
classification_weight,
localization_weight)
random_example_sampler = None
if loss_config.HasField('random_example_sampler'):
if loss_config.random_example_sampler.positive_sample_fraction <= 0:
raise ValueError('RandomExampleSampler should not use non-positive'
'value as positive sample fraction.')
random_example_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=loss_config.random_example_sampler.
positive_sample_fraction)
if loss_config.expected_loss_weights == loss_config.NONE:
expected_loss_weights_fn = None
elif loss_config.expected_loss_weights == loss_config.EXPECTED_SAMPLING:
expected_loss_weights_fn = functools.partial(
ops.expected_classification_loss_by_expected_sampling,
min_num_negative_samples=loss_config.min_num_negative_samples,
desired_negative_sampling_ratio=loss_config
.desired_negative_sampling_ratio)
elif (loss_config.expected_loss_weights == loss_config
.REWEIGHTING_UNMATCHED_ANCHORS):
expected_loss_weights_fn = functools.partial(
ops.expected_classification_loss_by_reweighting_unmatched_anchors,
min_num_negative_samples=loss_config.min_num_negative_samples,
desired_negative_sampling_ratio=loss_config
.desired_negative_sampling_ratio)
else:
raise ValueError('Not a valid value for expected_classification_loss.')
return (classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, random_example_sampler,
expected_loss_weights_fn)
def build_hard_example_miner(config,
classification_weight,
localization_weight):
"""Builds hard example miner based on the config.
Args:
config: A losses_pb2.HardExampleMiner object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
Returns:
Hard example miner.
"""
loss_type = None
if config.loss_type == losses_pb2.HardExampleMiner.BOTH:
loss_type = 'both'
if config.loss_type == losses_pb2.HardExampleMiner.CLASSIFICATION:
loss_type = 'cls'
if config.loss_type == losses_pb2.HardExampleMiner.LOCALIZATION:
loss_type = 'loc'
max_negatives_per_positive = None
num_hard_examples = None
if config.max_negatives_per_positive > 0:
max_negatives_per_positive = config.max_negatives_per_positive
if config.num_hard_examples > 0:
num_hard_examples = config.num_hard_examples
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=num_hard_examples,
iou_threshold=config.iou_threshold,
loss_type=loss_type,
cls_loss_weight=classification_weight,
loc_loss_weight=localization_weight,
max_negatives_per_positive=max_negatives_per_positive,
min_negatives_per_image=config.min_negatives_per_image)
return hard_example_miner
def build_faster_rcnn_classification_loss(loss_config):
"""Builds a classification loss for Faster RCNN based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_logits_softmax':
config = loss_config.weighted_logits_softmax
return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_sigmoid_focal':
config = loss_config.weighted_sigmoid_focal
alpha = None
if config.HasField('alpha'):
alpha = config.alpha
return losses.SigmoidFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
# By default, Faster RCNN second stage classifier uses Softmax loss
# with anchor-wise outputs.
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
def _build_localization_loss(loss_config):
"""Builds a localization loss based on the loss config.
Args:
loss_config: A losses_pb2.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.LocalizationLoss):
raise ValueError('loss_config not of type losses_pb2.LocalizationLoss.')
loss_type = loss_config.WhichOneof('localization_loss')
if loss_type == 'weighted_l2':
return losses.WeightedL2LocalizationLoss()
if loss_type == 'weighted_smooth_l1':
return losses.WeightedSmoothL1LocalizationLoss(
loss_config.weighted_smooth_l1.delta)
if loss_type == 'weighted_iou':
return losses.WeightedIOULocalizationLoss()
if loss_type == 'l1_localization_loss':
return losses.L1LocalizationLoss()
if loss_type == 'weighted_giou':
return losses.WeightedGIOULocalizationLoss()
raise ValueError('Empty loss config.')
def _build_classification_loss(loss_config):
"""Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
elif loss_type == 'weighted_sigmoid_focal':
config = loss_config.weighted_sigmoid_focal
alpha = None
if config.HasField('alpha'):
alpha = config.alpha
return losses.SigmoidFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
elif loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
elif loss_type == 'weighted_logits_softmax':
config = loss_config.weighted_logits_softmax
return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=config.logit_scale)
elif loss_type == 'bootstrapped_sigmoid':
config = loss_config.bootstrapped_sigmoid
return losses.BootstrappedSigmoidClassificationLoss(
alpha=config.alpha,
bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))
elif loss_type == 'penalty_reduced_logistic_focal_loss':
config = loss_config.penalty_reduced_logistic_focal_loss
return losses.PenaltyReducedLogisticFocalLoss(
alpha=config.alpha, beta=config.beta)
elif loss_type == 'weighted_dice_classification_loss':
config = loss_config.weighted_dice_classification_loss
return losses.WeightedDiceClassificationLoss(
squared_normalization=config.squared_normalization)
else:
raise ValueError('Empty loss config.')
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/losses_builder.py | losses_builder.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection box coder from configuration."""
from object_detection.builders import box_coder_builder
from object_detection.builders import matcher_builder
from object_detection.builders import region_similarity_calculator_builder
from object_detection.core import target_assigner
def build(target_assigner_config):
"""Builds a TargetAssigner object based on the config.
Args:
target_assigner_config: A target_assigner proto message containing config
for the desired target assigner.
Returns:
TargetAssigner object based on the config.
"""
matcher_instance = matcher_builder.build(target_assigner_config.matcher)
similarity_calc_instance = region_similarity_calculator_builder.build(
target_assigner_config.similarity_calculator)
box_coder = box_coder_builder.build(target_assigner_config.box_coder)
return target_assigner.TargetAssigner(
matcher=matcher_instance,
similarity_calc=similarity_calc_instance,
box_coder_instance=box_coder)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/target_assigner_builder.py | target_assigner_builder.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function to build box predictor from configuration."""
import collections
import tensorflow.compat.v1 as tf
from object_detection.predictors import convolutional_box_predictor
from object_detection.predictors import convolutional_keras_box_predictor
from object_detection.predictors import mask_rcnn_box_predictor
from object_detection.predictors import mask_rcnn_keras_box_predictor
from object_detection.predictors import rfcn_box_predictor
from object_detection.predictors import rfcn_keras_box_predictor
from object_detection.predictors.heads import box_head
from object_detection.predictors.heads import class_head
from object_detection.predictors.heads import keras_box_head
from object_detection.predictors.heads import keras_class_head
from object_detection.predictors.heads import keras_mask_head
from object_detection.predictors.heads import mask_head
from object_detection.protos import box_predictor_pb2
def build_convolutional_box_predictor(is_training,
num_classes,
conv_hyperparams_fn,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
apply_sigmoid_to_scores=False,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None):
"""Builds the ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
apply_sigmoid_to_scores: If True, apply the sigmoid on the output
class_predictions.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: Constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
Returns:
A ConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
apply_sigmoid_to_scores=apply_sigmoid_to_scores,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise)
other_heads = {}
return convolutional_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth)
def build_convolutional_keras_box_predictor(is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
num_predictions_per_location_list,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None,
name='BoxPredictor'):
"""Builds the Keras ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
Returns:
A Keras ConvolutionalBoxPredictor class.
"""
box_prediction_heads = []
class_prediction_heads = []
other_heads = {}
for stack_index, num_predictions_per_location in enumerate(
num_predictions_per_location_list):
box_prediction_heads.append(
keras_box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
num_predictions_per_location=num_predictions_per_location,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range,
name='ConvolutionalBoxHead_%d' % stack_index))
class_prediction_heads.append(
keras_class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
num_predictions_per_location=num_predictions_per_location,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise,
name='ConvolutionalClassHead_%d' % stack_index))
return convolutional_keras_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_heads=box_prediction_heads,
class_prediction_heads=class_prediction_heads,
other_heads=other_heads,
conv_hyperparams=conv_hyperparams,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
name=name)
def build_weight_shared_convolutional_box_predictor(
is_training,
num_classes,
conv_hyperparams_fn,
depth,
num_layers_before_predictor,
box_code_size,
kernel_size=3,
add_background_class=True,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
share_prediction_tower=False,
apply_batch_norm=True,
use_depthwise=False,
score_converter_fn=tf.identity,
box_encodings_clip_range=None,
keyword_args=None):
"""Builds and returns a WeightSharedConvolutionalBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
use_depthwise: Whether to use depthwise separable conv2d instead of conv2d.
score_converter_fn: Callable score converter to perform elementwise op on
class scores.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
keyword_args: A dictionary with additional args.
Returns:
A WeightSharedConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = (
class_head.WeightSharedConvolutionalClassHead(
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
kernel_size=kernel_size,
class_prediction_bias_init=class_prediction_bias_init,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
use_depthwise=use_depthwise,
score_converter_fn=score_converter_fn))
other_heads = {}
return convolutional_box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=depth,
num_layers_before_predictor=num_layers_before_predictor,
kernel_size=kernel_size,
apply_batch_norm=apply_batch_norm,
share_prediction_tower=share_prediction_tower,
use_depthwise=use_depthwise)
def build_weight_shared_convolutional_keras_box_predictor(
is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
num_predictions_per_location_list,
depth,
num_layers_before_predictor,
box_code_size,
kernel_size=3,
add_background_class=True,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
share_prediction_tower=False,
apply_batch_norm=True,
use_depthwise=False,
apply_conv_hyperparams_to_heads=False,
apply_conv_hyperparams_pointwise=False,
score_converter_fn=tf.identity,
box_encodings_clip_range=None,
name='WeightSharedConvolutionalBoxPredictor',
keyword_args=None):
"""Builds the Keras WeightSharedConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
use_depthwise: Whether to use depthwise separable conv2d instead of conv2d.
apply_conv_hyperparams_to_heads: Whether to apply conv_hyperparams to
depthwise seperable convolution layers in the box and class heads. By
default, the conv_hyperparams are only applied to layers in the predictor
tower when using depthwise separable convolutions.
apply_conv_hyperparams_pointwise: Whether to apply the conv_hyperparams to
the pointwise_initializer and pointwise_regularizer when using depthwise
separable convolutions. By default, conv_hyperparams are only applied to
the depthwise initializer and regularizer when use_depthwise is true.
score_converter_fn: Callable score converter to perform elementwise op on
class scores.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
keyword_args: A dictionary with additional args.
Returns:
A Keras WeightSharedConvolutionalBoxPredictor class.
"""
if len(set(num_predictions_per_location_list)) > 1:
raise ValueError('num predictions per location must be same for all'
'feature maps, found: {}'.format(
num_predictions_per_location_list))
num_predictions_per_location = num_predictions_per_location_list[0]
box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
use_depthwise=use_depthwise,
apply_conv_hyperparams_to_heads=apply_conv_hyperparams_to_heads,
box_encodings_clip_range=box_encodings_clip_range,
name='WeightSharedConvolutionalBoxHead')
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise,
apply_conv_hyperparams_to_heads=apply_conv_hyperparams_to_heads,
score_converter_fn=score_converter_fn,
name='WeightSharedConvolutionalClassHead')
other_heads = {}
return (
convolutional_keras_box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams=conv_hyperparams,
depth=depth,
num_layers_before_predictor=num_layers_before_predictor,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
kernel_size=kernel_size,
apply_batch_norm=apply_batch_norm,
share_prediction_tower=share_prediction_tower,
use_depthwise=use_depthwise,
apply_conv_hyperparams_pointwise=apply_conv_hyperparams_pointwise,
name=name))
def build_mask_rcnn_keras_box_predictor(is_training,
num_classes,
fc_hyperparams,
freeze_batchnorm,
use_dropout,
dropout_keep_prob,
box_code_size,
add_background_class=True,
share_box_across_classes=False,
predict_instance_masks=False,
conv_hyperparams=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample_masks=False):
"""Builds and returns a MaskRCNNKerasBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for fully connected dense ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
predict_instance_masks: If True, will add a third stage mask prediction
to the returned class.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample_masks: Whether to apply convolutions on mask
features before upsampling using nearest neighbor resizing. Otherwise,
mask features are resized to [`mask_height`, `mask_width`] using
bilinear resizing before applying convolutions.
Returns:
A MaskRCNNKerasBoxPredictor class.
"""
box_prediction_head = keras_box_head.MaskRCNNBoxHead(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
box_code_size=box_code_size,
share_box_across_classes=share_box_across_classes)
class_prediction_head = keras_class_head.MaskRCNNClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob)
third_stage_heads = {}
if predict_instance_masks:
third_stage_heads[
mask_rcnn_box_predictor.
MASK_PREDICTIONS] = keras_mask_head.MaskRCNNMaskHead(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
mask_height=mask_height,
mask_width=mask_width,
mask_prediction_num_conv_layers=mask_prediction_num_conv_layers,
mask_prediction_conv_depth=mask_prediction_conv_depth,
masks_are_class_agnostic=masks_are_class_agnostic,
convolve_then_upsample=convolve_then_upsample_masks)
return mask_rcnn_keras_box_predictor.MaskRCNNKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
freeze_batchnorm=freeze_batchnorm,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
third_stage_heads=third_stage_heads)
def build_mask_rcnn_box_predictor(is_training,
num_classes,
fc_hyperparams_fn,
use_dropout,
dropout_keep_prob,
box_code_size,
add_background_class=True,
share_box_across_classes=False,
predict_instance_masks=False,
conv_hyperparams_fn=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample_masks=False):
"""Builds and returns a MaskRCNNBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for fully connected ops.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
predict_instance_masks: If True, will add a third stage mask prediction
to the returned class.
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample_masks: Whether to apply convolutions on mask
features before upsampling using nearest neighbor resizing. Otherwise,
mask features are resized to [`mask_height`, `mask_width`] using
bilinear resizing before applying convolutions.
Returns:
A MaskRCNNBoxPredictor class.
"""
box_prediction_head = box_head.MaskRCNNBoxHead(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
box_code_size=box_code_size,
share_box_across_classes=share_box_across_classes)
class_prediction_head = class_head.MaskRCNNClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob)
third_stage_heads = {}
if predict_instance_masks:
third_stage_heads[
mask_rcnn_box_predictor.
MASK_PREDICTIONS] = mask_head.MaskRCNNMaskHead(
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
mask_height=mask_height,
mask_width=mask_width,
mask_prediction_num_conv_layers=mask_prediction_num_conv_layers,
mask_prediction_conv_depth=mask_prediction_conv_depth,
masks_are_class_agnostic=masks_are_class_agnostic,
convolve_then_upsample=convolve_then_upsample_masks)
return mask_rcnn_box_predictor.MaskRCNNBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
third_stage_heads=third_stage_heads)
def build_score_converter(score_converter_config, is_training):
"""Builds score converter based on the config.
Builds one of [tf.identity, tf.sigmoid] score converters based on the config
and whether the BoxPredictor is for training or inference.
Args:
score_converter_config:
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter.
is_training: Indicates whether the BoxPredictor is in training mode.
Returns:
Callable score converter op.
Raises:
ValueError: On unknown score converter.
"""
if score_converter_config == (
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY):
return tf.identity
if score_converter_config == (
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID):
return tf.identity if is_training else tf.sigmoid
raise ValueError('Unknown score converter.')
BoxEncodingsClipRange = collections.namedtuple('BoxEncodingsClipRange',
['min', 'max'])
def build(argscope_fn, box_predictor_config, is_training, num_classes,
add_background_class=True):
"""Builds box predictor based on the configuration.
Builds box predictor based on the configuration. See box_predictor.proto for
configurable options. Also, see box_predictor.py for more details.
Args:
argscope_fn: A function that takes the following inputs:
* hyperparams_pb2.Hyperparams proto
* a boolean indicating if the model is in training mode.
and returns a tf slim argscope for Conv and FC hyperparameters.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.BoxPredictor object.
Raises:
ValueError: On unknown box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams_fn=conv_hyperparams_fn,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
apply_sigmoid_to_scores=config_box_predictor.apply_sigmoid_to_scores,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams_fn = argscope_fn(config_box_predictor.fc_hyperparams,
is_training)
conv_hyperparams_fn = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams_fn = argscope_fn(
config_box_predictor.conv_hyperparams, is_training)
return build_mask_rcnn_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams_fn=conv_hyperparams_fn,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
box_predictor_object = rfcn_box_predictor.RfcnBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof))
def build_keras(hyperparams_fn, freeze_batchnorm, inplace_batchnorm_update,
num_predictions_per_location_list, box_predictor_config,
is_training, num_classes, add_background_class=True):
"""Builds a Keras-based box predictor based on the configuration.
Builds Keras-based box predictor based on the configuration.
See box_predictor.proto for configurable options. Also, see box_predictor.py
for more details.
Args:
hyperparams_fn: A function that takes a hyperparams_pb2.Hyperparams
proto and returns a `hyperparams_builder.KerasLayerHyperparams`
for Conv or FC hyperparameters.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.KerasBoxPredictor object.
Raises:
ValueError: On unknown box predictor, or one with no Keras box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly. This is
# required because during TPU inference, model.postprocess is not called.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
add_background_class=add_background_class,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
apply_conv_hyperparams_to_heads=(
config_box_predictor.apply_conv_hyperparams_to_heads),
apply_conv_hyperparams_pointwise=(
config_box_predictor.apply_conv_hyperparams_pointwise),
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams = hyperparams_fn(config_box_predictor.fc_hyperparams)
conv_hyperparams = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
return build_mask_rcnn_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams=conv_hyperparams,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
box_predictor_object = rfcn_keras_box_predictor.RfcnKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError(
'Unknown box predictor for Keras: {}'.format(box_predictor_oneof))
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/box_predictor_builder.py | box_predictor_builder.py |
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for calibration_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import interpolate
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.builders import calibration_builder
from object_detection.protos import calibration_pb2
from object_detection.utils import test_case
class CalibrationBuilderTest(test_case.TestCase):
def test_tf_linear_interp1d_map(self):
"""Tests TF linear interpolation mapping to a single number."""
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.5, 0.5, 0.5])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_map_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_map_outputs
tf_map_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_map_outputs_np, [0.5, 0.5, 0.5, 0.5, 0.5])
def test_tf_linear_interp1d_interpolate(self):
"""Tests TF 1d linear interpolation not mapping to a single number."""
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.6, 0.7, 1.0])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_interpolate_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_interpolate_outputs
tf_interpolate_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_interpolate_outputs_np, [0.6, 0.65, 0.7, 0.85, 1.])
@staticmethod
def _get_scipy_interp1d(new_x, x, y):
"""Helper performing 1d linear interpolation using SciPy."""
interpolation1d_fn = interpolate.interp1d(x, y)
return interpolation1d_fn(new_x)
def _get_tf_interp1d(self, new_x, x, y):
"""Helper performing 1d linear interpolation using Tensorflow."""
def graph_fn():
tf_interp_outputs = calibration_builder._tf_linear_interp1d(
tf.convert_to_tensor(new_x, dtype=tf.float32),
tf.convert_to_tensor(x, dtype=tf.float32),
tf.convert_to_tensor(y, dtype=tf.float32))
return tf_interp_outputs
np_tf_interp_outputs = self.execute(graph_fn, [])
return np_tf_interp_outputs
def test_tf_linear_interp1d_against_scipy_map(self):
"""Tests parity of TF linear interpolation with SciPy for simple mapping."""
length = 10
np_x = np.linspace(0, 1, length)
# Mapping all numbers to 0.5
np_y_map = np.repeat(0.5, length)
# Scipy and TF interpolations
test_data_np = np.linspace(0, 1, length * 10)
scipy_map_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_map)
np_tf_map_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_map)
self.assertAllClose(scipy_map_outputs, np_tf_map_outputs)
def test_tf_linear_interp1d_against_scipy_interpolate(self):
"""Tests parity of TF linear interpolation with SciPy."""
length = 10
np_x = np.linspace(0, 1, length)
# Requires interpolation over 0.5 to 1 domain
np_y_interp = np.linspace(0.5, 1, length)
# Scipy interpolation for comparison
test_data_np = np.linspace(0, 1, length * 10)
scipy_interp_outputs = self._get_scipy_interp1d(test_data_np, np_x,
np_y_interp)
np_tf_interp_outputs = self._get_tf_interp1d(test_data_np, np_x,
np_y_interp)
self.assertAllClose(scipy_interp_outputs, np_tf_interp_outputs)
@staticmethod
def _add_function_approximation_to_calibration_proto(calibration_proto,
x_array, y_array,
class_id):
"""Adds a function approximation to calibration proto for a class id."""
# Per-class calibration.
if class_id is not None:
function_approximation = (
calibration_proto.class_id_function_approximations
.class_id_xy_pairs_map[class_id])
# Class-agnostic calibration.
else:
function_approximation = (
calibration_proto.function_approximation.x_y_pairs)
for x, y in zip(x_array, y_array):
x_y_pair_message = function_approximation.x_y_pair.add()
x_y_pair_message.x = x
x_y_pair_message.y = y
def test_class_agnostic_function_approximation(self):
"""Tests that calibration produces correct class-agnostic values."""
# Generate fake calibration proto. For this interpolation, any input on
# [0.0, 0.5] should be divided by 2 and any input on (0.5, 1.0] should have
# 0.25 subtracted from it.
class_agnostic_x = np.asarray([0.0, 0.5, 1.0])
class_agnostic_y = np.asarray([0.0, 0.25, 0.75])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_agnostic_x, class_agnostic_y, class_id=None)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3],
[0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8],
[0.9, 1.0, 1.0]]], dtype=tf.float32)
# Everything should map to 0.5 if classes are ignored.
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15],
[0.2, 0.25, 0.0]],
[[0.35, 0.45, 0.55],
[0.65, 0.75, 0.75]]])
def test_multiclass_function_approximations(self):
"""Tests that calibration produces correct multiclass values."""
# Background class (0-index) maps all predictions to 0.5.
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
# Class id 1 will interpolate using these values.
class_1_x = np.asarray([0.0, 0.2, 1.0])
class_1_y = np.asarray([0.0, 0.6, 1.0])
self._add_function_approximation_to_calibration_proto(
calibration_config, class_1_x, class_1_y, class_id=1)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.6], [0.5, 0.3]],
[[0.5, 0.7], [0.5, 0.96]]])
def test_temperature_scaling(self):
"""Tests that calibration produces correct temperature scaling values."""
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 2.0
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np,
[[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]],
[[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]])
def test_temperature_scaling_incorrect_value_error(self):
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 0
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3]]], dtype=tf.float32)
with self.assertRaises(ValueError):
calibration_fn(class_predictions_with_background)
def test_skips_class_when_calibration_parameters_not_present(self):
"""Tests that graph fails when parameters not present for all classes."""
# Only adding calibration parameters for class id = 0, even though class id
# 1 is present in the data.
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]],
[[0.5, 0.4], [0.5, 0.92]]])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/calibration_builder_test.py | calibration_builder_test.py |
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests object_detection.core.hyperparams_builder."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.core import freezable_batch_norm
from object_detection.protos import hyperparams_pb2
from object_detection.utils import tf_version
def _get_scope_key(op):
return getattr(op, '_key_op', str(op))
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only tests.')
class HyperparamsBuilderTest(tf.test.TestCase):
def test_default_arg_scope_has_conv2d_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.conv2d), scope)
def test_default_arg_scope_has_separable_conv2d_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.separable_conv2d), scope)
def test_default_arg_scope_has_conv2d_transpose_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.conv2d_transpose), scope)
def test_explicit_fc_op_arg_scope_has_fully_connected_op(self):
conv_hyperparams_text_proto = """
op: FC
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.fully_connected), scope)
def test_separable_conv2d_and_conv2d_and_transpose_have_same_parameters(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
kwargs_1, kwargs_2, kwargs_3 = scope.values()
self.assertDictEqual(kwargs_1, kwargs_2)
self.assertDictEqual(kwargs_1, kwargs_3)
def test_return_l1_regularized_weights(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = list(scope.values())[0]
regularizer = conv_scope_arguments['weights_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.abs(weights).sum() * 0.5, result)
def test_return_l2_regularizer_weights(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
regularizer = conv_scope_arguments['weights_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result)
def test_return_non_default_batch_norm_params_with_train_during_train(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertTrue(batch_norm_params['is_training'])
def test_return_batch_norm_params_with_notrain_during_eval(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=False)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertFalse(batch_norm_params['is_training'])
def test_return_batch_norm_params_with_notrain_when_train_is_false(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: false
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertFalse(batch_norm_params['is_training'])
def test_do_not_use_batch_norm_if_default(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], None)
def test_use_none_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], None)
def test_use_relu_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu)
def test_use_relu_6_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu6)
def test_use_swish_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: SWISH
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.swish)
def _assert_variance_in_range(self, initializer, shape, variance,
tol=1e-2):
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
var = tf.get_variable(
name='test',
shape=shape,
dtype=tf.float32,
initializer=initializer)
sess.run(tf.global_variables_initializer())
values = sess.run(var)
self.assertAllClose(np.var(values), variance, tol, tol)
def test_variance_in_range_with_variance_scaling_initializer_fan_in(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_variance_scaling_initializer_fan_out(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 40.)
def test_variance_in_range_with_variance_scaling_initializer_fan_avg(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=4. / (100. + 40.))
def test_variance_in_range_with_variance_scaling_initializer_uniform(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_truncated_normal_initializer(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.49, tol=1e-1)
def test_variance_in_range_with_random_normal_initializer(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.64, tol=1e-1)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only tests.')
class KerasHyperparamsBuilderTest(tf.test.TestCase):
def _assert_variance_in_range(self, initializer, shape, variance,
tol=1e-2):
var = tf.Variable(initializer(shape=shape, dtype=tf.float32))
self.assertAllClose(np.var(var.numpy()), variance, tol, tol)
def test_return_l1_regularized_weights_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer = keras_config.params()['kernel_regularizer']
weights = np.array([1., -1, 4., 2.])
result = regularizer(tf.constant(weights)).numpy()
self.assertAllClose(np.abs(weights).sum() * 0.5, result)
def test_return_l2_regularized_weights_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer = keras_config.params()['kernel_regularizer']
weights = np.array([1., -1, 4., 2.])
result = regularizer(tf.constant(weights)).numpy()
self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result)
def test_return_l1_regularizer_weight_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer_weight = keras_config.get_regularizer_weight()
self.assertIsInstance(regularizer_weight, float)
self.assertAlmostEqual(regularizer_weight, 0.5)
def test_return_l2_regularizer_weight_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer_weight = keras_config.get_regularizer_weight()
self.assertIsInstance(regularizer_weight, float)
self.assertAlmostEqual(regularizer_weight, 0.25)
def test_return_undefined_regularizer_weight_keras(self):
conv_hyperparams_text_proto = """
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer_weight = keras_config.get_regularizer_weight()
self.assertIsNone(regularizer_weight)
def test_return_non_default_batch_norm_params_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertAlmostEqual(batch_norm_params['momentum'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
batch_norm_layer = keras_config.build_batch_norm()
self.assertIsInstance(batch_norm_layer,
freezable_batch_norm.FreezableBatchNorm)
def test_return_non_default_batch_norm_params_keras_override(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params(momentum=0.4)
self.assertAlmostEqual(batch_norm_params['momentum'], 0.4)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
def test_do_not_use_batch_norm_if_default_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertFalse(keras_config.use_batch_norm())
self.assertEqual(keras_config.batch_norm_params(), {})
# The batch norm builder should build an identity Lambda layer
identity_layer = keras_config.build_batch_norm()
self.assertIsInstance(identity_layer,
tf.keras.layers.Lambda)
def test_do_not_use_bias_if_batch_norm_center_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: true
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertTrue(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
hyperparams = keras_config.params()
self.assertFalse(hyperparams['use_bias'])
def test_force_use_bias_if_batch_norm_center_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: true
scale: true
epsilon: 0.03
train: true
}
force_use_bias: true
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertTrue(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
hyperparams = keras_config.params()
self.assertTrue(hyperparams['use_bias'])
def test_use_none_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertIsNone(
keras_config.params(include_activation=True)['activation'])
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.identity)
def test_use_relu_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.relu)
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.nn.relu)
def test_use_relu_6_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.relu6)
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.nn.relu6)
def test_use_swish_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: SWISH
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.swish)
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.nn.swish)
def test_override_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
new_params = keras_config.params(activation=tf.nn.relu)
self.assertEqual(new_params['activation'], tf.nn.relu)
def test_variance_in_range_with_variance_scaling_initializer_fan_in_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_variance_scaling_initializer_fan_out_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 40.)
def test_variance_in_range_with_variance_scaling_initializer_fan_avg_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=4. / (100. + 40.))
def test_variance_in_range_with_variance_scaling_initializer_uniform_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_truncated_normal_initializer_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.49, tol=1e-1)
def test_variance_in_range_with_random_normal_initializer_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.64, tol=1e-1)
def test_keras_initializer_by_name(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
keras_initializer_by_name: "glorot_uniform"
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer_arg = keras_config.params()['kernel_initializer']
conv_layer = tf.keras.layers.Conv2D(
filters=16, kernel_size=3, **keras_config.params())
self.assertEqual(initializer_arg, 'glorot_uniform')
self.assertIsInstance(conv_layer.kernel_initializer,
type(tf.keras.initializers.get('glorot_uniform')))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/hyperparams_builder_test.py | hyperparams_builder_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection box coder from configuration."""
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import keypoint_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.box_coders import square_box_coder
from object_detection.protos import box_coder_pb2
def build(box_coder_config):
"""Builds a box coder object based on the box coder config.
Args:
box_coder_config: A box_coder.proto object containing the config for the
desired box coder.
Returns:
BoxCoder based on the config.
Raises:
ValueError: On empty box coder proto.
"""
if not isinstance(box_coder_config, box_coder_pb2.BoxCoder):
raise ValueError('box_coder_config not of type box_coder_pb2.BoxCoder.')
if box_coder_config.WhichOneof('box_coder_oneof') == 'faster_rcnn_box_coder':
return faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=[
box_coder_config.faster_rcnn_box_coder.y_scale,
box_coder_config.faster_rcnn_box_coder.x_scale,
box_coder_config.faster_rcnn_box_coder.height_scale,
box_coder_config.faster_rcnn_box_coder.width_scale
])
if box_coder_config.WhichOneof('box_coder_oneof') == 'keypoint_box_coder':
return keypoint_box_coder.KeypointBoxCoder(
box_coder_config.keypoint_box_coder.num_keypoints,
scale_factors=[
box_coder_config.keypoint_box_coder.y_scale,
box_coder_config.keypoint_box_coder.x_scale,
box_coder_config.keypoint_box_coder.height_scale,
box_coder_config.keypoint_box_coder.width_scale
])
if (box_coder_config.WhichOneof('box_coder_oneof') ==
'mean_stddev_box_coder'):
return mean_stddev_box_coder.MeanStddevBoxCoder(
stddev=box_coder_config.mean_stddev_box_coder.stddev)
if box_coder_config.WhichOneof('box_coder_oneof') == 'square_box_coder':
return square_box_coder.SquareBoxCoder(scale_factors=[
box_coder_config.square_box_coder.y_scale,
box_coder_config.square_box_coder.x_scale,
box_coder_config.square_box_coder.length_scale
])
raise ValueError('Empty box coder.')
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/box_coder_builder.py | box_coder_builder.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to build DetectionModel training optimizers."""
import tensorflow.compat.v1 as tf
from object_detection.utils import learning_schedules
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
from official.modeling.optimization import ema_optimizer
# pylint: enable=g-import-not-at-top
try:
from tensorflow.contrib import opt as tf_opt # pylint: disable=g-import-not-at-top
except: # pylint: disable=bare-except
pass
def build_optimizers_tf_v1(optimizer_config, global_step=None):
"""Create a TF v1 compatible optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
global_step: A variable representing the current step.
If None, defaults to tf.train.get_or_create_global_step()
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
summary_vars = []
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=config.decay,
momentum=config.momentum_optimizer_value,
epsilon=config.epsilon)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=config.momentum_optimizer_value)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=config.epsilon)
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
optimizer = tf_opt.MovingAverageOptimizer(
optimizer, average_decay=optimizer_config.moving_average_decay)
return optimizer, summary_vars
def build_optimizers_tf_v2(optimizer_config, global_step=None):
"""Create a TF v2 compatible optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
global_step: A variable representing the current step.
If None, defaults to tf.train.get_or_create_global_step()
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
summary_vars = []
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate,
decay=config.decay,
momentum=config.momentum_optimizer_value,
epsilon=config.epsilon)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.keras.optimizers.SGD(
learning_rate,
momentum=config.momentum_optimizer_value)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.keras.optimizers.Adam(learning_rate, epsilon=config.epsilon)
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
optimizer = ema_optimizer.ExponentialMovingAverage(
optimizer=optimizer,
average_decay=optimizer_config.moving_average_decay)
return optimizer, summary_vars
def build(config, global_step=None):
if tf.executing_eagerly():
return build_optimizers_tf_v2(config, global_step)
else:
return build_optimizers_tf_v1(config, global_step)
def _create_learning_rate(learning_rate_config, global_step=None):
"""Create optimizer learning rate based on config.
Args:
learning_rate_config: A LearningRate proto message.
global_step: A variable representing the current step.
If None, defaults to tf.train.get_or_create_global_step()
Returns:
A learning rate.
Raises:
ValueError: when using an unsupported input data type.
"""
if global_step is None:
global_step = tf.train.get_or_create_global_step()
learning_rate = None
learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
if learning_rate_type == 'constant_learning_rate':
config = learning_rate_config.constant_learning_rate
learning_rate = tf.constant(config.learning_rate, dtype=tf.float32,
name='learning_rate')
if learning_rate_type == 'exponential_decay_learning_rate':
config = learning_rate_config.exponential_decay_learning_rate
learning_rate = learning_schedules.exponential_decay_with_burnin(
global_step,
config.initial_learning_rate,
config.decay_steps,
config.decay_factor,
burnin_learning_rate=config.burnin_learning_rate,
burnin_steps=config.burnin_steps,
min_learning_rate=config.min_learning_rate,
staircase=config.staircase)
if learning_rate_type == 'manual_step_learning_rate':
config = learning_rate_config.manual_step_learning_rate
if not config.schedule:
raise ValueError('Empty learning rate schedule.')
learning_rate_step_boundaries = [x.step for x in config.schedule]
learning_rate_sequence = [config.initial_learning_rate]
learning_rate_sequence += [x.learning_rate for x in config.schedule]
learning_rate = learning_schedules.manual_stepping(
global_step, learning_rate_step_boundaries,
learning_rate_sequence, config.warmup)
if learning_rate_type == 'cosine_decay_learning_rate':
config = learning_rate_config.cosine_decay_learning_rate
learning_rate = learning_schedules.cosine_decay_with_warmup(
global_step,
config.learning_rate_base,
config.total_steps,
config.warmup_learning_rate,
config.warmup_steps,
config.hold_base_rate_steps)
if learning_rate is None:
raise ValueError('Learning_rate %s not supported.' % learning_rate_type)
return learning_rate
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/optimizer_builder.py | optimizer_builder.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for region_similarity_calculator_builder."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import region_similarity_calculator_builder
from object_detection.core import region_similarity_calculator
from object_detection.protos import region_similarity_calculator_pb2 as sim_calc_pb2
class RegionSimilarityCalculatorBuilderTest(tf.test.TestCase):
def testBuildIoaSimilarityCalculator(self):
similarity_calc_text_proto = """
ioa_similarity {
}
"""
similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator()
text_format.Merge(similarity_calc_text_proto, similarity_calc_proto)
similarity_calc = region_similarity_calculator_builder.build(
similarity_calc_proto)
self.assertTrue(isinstance(similarity_calc,
region_similarity_calculator.IoaSimilarity))
def testBuildIouSimilarityCalculator(self):
similarity_calc_text_proto = """
iou_similarity {
}
"""
similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator()
text_format.Merge(similarity_calc_text_proto, similarity_calc_proto)
similarity_calc = region_similarity_calculator_builder.build(
similarity_calc_proto)
self.assertTrue(isinstance(similarity_calc,
region_similarity_calculator.IouSimilarity))
def testBuildNegSqDistSimilarityCalculator(self):
similarity_calc_text_proto = """
neg_sq_dist_similarity {
}
"""
similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator()
text_format.Merge(similarity_calc_text_proto, similarity_calc_proto)
similarity_calc = region_similarity_calculator_builder.build(
similarity_calc_proto)
self.assertTrue(isinstance(similarity_calc,
region_similarity_calculator.
NegSqDistSimilarity))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/region_similarity_calculator_builder_test.py | region_similarity_calculator_builder_test.py |
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_builder under TensorFlow 1.X."""
import unittest
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from object_detection.builders import model_builder
from object_detection.builders import model_builder_test
from object_detection.meta_architectures import context_rcnn_meta_arch
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.protos import losses_pb2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class ModelBuilderTF1Test(model_builder_test.ModelBuilderTest):
def default_ssd_feature_extractor(self):
return 'ssd_resnet50_v1_fpn'
def default_faster_rcnn_feature_extractor(self):
return 'faster_rcnn_resnet101'
def ssd_feature_extractors(self):
return model_builder.SSD_FEATURE_EXTRACTOR_CLASS_MAP
def get_override_base_feature_extractor_hyperparams(self, extractor_type):
return extractor_type in {'ssd_inception_v2', 'ssd_inception_v3'}
def faster_rcnn_feature_extractors(self):
return model_builder.FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP
@parameterized.parameters(True, False)
def test_create_context_rcnn_from_config_with_params(self, is_training):
model_proto = self.create_default_faster_rcnn_model_proto()
model_proto.faster_rcnn.context_config.attention_bottleneck_dimension = 10
model_proto.faster_rcnn.context_config.attention_temperature = 0.5
model = model_builder.build(model_proto, is_training=is_training)
self.assertIsInstance(model, context_rcnn_meta_arch.ContextRCNNMetaArch)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/model_builder_tf1_test.py | model_builder_tf1_test.py |
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_builder under TensorFlow 2.X."""
import os
import unittest
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import model_builder
from object_detection.builders import model_builder_test
from object_detection.core import losses
from object_detection.meta_architectures import deepmac_meta_arch
from object_detection.models import center_net_hourglass_feature_extractor
from object_detection.models.keras_models import hourglass_network
from object_detection.protos import center_net_pb2
from object_detection.protos import model_pb2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ModelBuilderTF2Test(
model_builder_test.ModelBuilderTest, parameterized.TestCase):
def default_ssd_feature_extractor(self):
return 'ssd_resnet50_v1_fpn_keras'
def default_faster_rcnn_feature_extractor(self):
return 'faster_rcnn_resnet101_keras'
def ssd_feature_extractors(self):
return model_builder.SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP
def get_override_base_feature_extractor_hyperparams(self, extractor_type):
return extractor_type in {}
def faster_rcnn_feature_extractors(self):
return model_builder.FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP
def get_fake_label_map_file_path(self):
keypoint_spec_text = """
item {
name: "/m/01g317"
id: 1
display_name: "person"
keypoints {
id: 0
label: 'nose'
}
keypoints {
id: 1
label: 'left_shoulder'
}
keypoints {
id: 2
label: 'right_shoulder'
}
keypoints {
id: 3
label: 'hip'
}
}
"""
keypoint_label_map_path = os.path.join(
self.get_temp_dir(), 'keypoint_label_map')
with tf.gfile.Open(keypoint_label_map_path, 'wb') as f:
f.write(keypoint_spec_text)
return keypoint_label_map_path
def get_fake_keypoint_proto(self, customize_head_params=False):
task_proto_txt = """
task_name: "human_pose"
task_loss_weight: 0.9
keypoint_regression_loss_weight: 1.0
keypoint_heatmap_loss_weight: 0.1
keypoint_offset_loss_weight: 0.5
heatmap_bias_init: 2.14
keypoint_class_name: "/m/01g317"
loss {
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 3.0
beta: 4.0
}
}
localization_loss {
l1_localization_loss {
}
}
}
keypoint_label_to_std {
key: "nose"
value: 0.3
}
keypoint_label_to_std {
key: "hip"
value: 0.0
}
keypoint_candidate_score_threshold: 0.3
num_candidates_per_keypoint: 12
peak_max_pool_kernel_size: 5
unmatched_keypoint_score: 0.05
box_scale: 1.7
candidate_search_scale: 0.2
candidate_ranking_mode: "score_distance_ratio"
offset_peak_radius: 3
per_keypoint_offset: true
predict_depth: true
per_keypoint_depth: true
keypoint_depth_loss_weight: 0.3
"""
if customize_head_params:
task_proto_txt += """
heatmap_head_params {
num_filters: 64
num_filters: 32
kernel_sizes: 5
kernel_sizes: 3
}
offset_head_params {
num_filters: 128
num_filters: 64
kernel_sizes: 5
kernel_sizes: 3
}
"""
config = text_format.Merge(task_proto_txt,
center_net_pb2.CenterNet.KeypointEstimation())
return config
def get_fake_object_center_proto(self, customize_head_params=False):
proto_txt = """
object_center_loss_weight: 0.5
heatmap_bias_init: 3.14
min_box_overlap_iou: 0.2
max_box_predictions: 15
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 3.0
beta: 4.0
}
}
"""
if customize_head_params:
proto_txt += """
center_head_params {
num_filters: 64
num_filters: 32
kernel_sizes: 5
kernel_sizes: 3
}
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.ObjectCenterParams())
def get_fake_object_center_from_keypoints_proto(self):
proto_txt = """
object_center_loss_weight: 0.5
heatmap_bias_init: 3.14
min_box_overlap_iou: 0.2
max_box_predictions: 15
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 3.0
beta: 4.0
}
}
keypoint_weights_for_center: 1.0
keypoint_weights_for_center: 0.0
keypoint_weights_for_center: 1.0
keypoint_weights_for_center: 0.0
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.ObjectCenterParams())
def get_fake_object_detection_proto(self, customize_head_params=False):
proto_txt = """
task_loss_weight: 0.5
offset_loss_weight: 0.1
scale_loss_weight: 0.2
localization_loss {
l1_localization_loss {
}
}
"""
if customize_head_params:
proto_txt += """
scale_head_params {
num_filters: 128
num_filters: 64
kernel_sizes: 5
kernel_sizes: 3
}
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.ObjectDetection())
def get_fake_mask_proto(self, customize_head_params=False):
proto_txt = """
task_loss_weight: 0.7
classification_loss {
weighted_softmax {}
}
mask_height: 8
mask_width: 8
score_threshold: 0.7
heatmap_bias_init: -2.0
"""
if customize_head_params:
proto_txt += """
mask_head_params {
num_filters: 128
num_filters: 64
kernel_sizes: 5
kernel_sizes: 3
}
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.MaskEstimation())
def get_fake_densepose_proto(self):
proto_txt = """
task_loss_weight: 0.5
class_id: 0
loss {
classification_loss {
weighted_softmax {}
}
localization_loss {
l1_localization_loss {
}
}
}
num_parts: 24
part_loss_weight: 1.0
coordinate_loss_weight: 2.0
upsample_to_input_res: true
heatmap_bias_init: -2.0
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.DensePoseEstimation())
@parameterized.parameters(
{'customize_head_params': True},
{'customize_head_params': False}
)
def test_create_center_net_model(self, customize_head_params):
"""Test building a CenterNet model from proto txt."""
proto_txt = """
center_net {
num_classes: 10
feature_extractor {
type: "hourglass_52"
channel_stds: [4, 5, 6]
bgr_ordering: true
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
}
"""
# Set up the configuration proto.
config = text_format.Merge(proto_txt, model_pb2.DetectionModel())
config.center_net.object_center_params.CopyFrom(
self.get_fake_object_center_proto(
customize_head_params=customize_head_params))
config.center_net.object_detection_task.CopyFrom(
self.get_fake_object_detection_proto(
customize_head_params=customize_head_params))
config.center_net.keypoint_estimation_task.append(
self.get_fake_keypoint_proto(
customize_head_params=customize_head_params))
config.center_net.keypoint_label_map_path = (
self.get_fake_label_map_file_path())
config.center_net.mask_estimation_task.CopyFrom(
self.get_fake_mask_proto(
customize_head_params=customize_head_params))
config.center_net.densepose_estimation_task.CopyFrom(
self.get_fake_densepose_proto())
# Build the model from the configuration.
model = model_builder.build(config, is_training=True)
# Check object center related parameters.
self.assertEqual(model._num_classes, 10)
self.assertIsInstance(model._center_params.classification_loss,
losses.PenaltyReducedLogisticFocalLoss)
self.assertEqual(model._center_params.classification_loss._alpha, 3.0)
self.assertEqual(model._center_params.classification_loss._beta, 4.0)
self.assertAlmostEqual(model._center_params.min_box_overlap_iou, 0.2)
self.assertAlmostEqual(
model._center_params.heatmap_bias_init, 3.14, places=4)
self.assertEqual(model._center_params.max_box_predictions, 15)
if customize_head_params:
self.assertEqual(model._center_params.center_head_num_filters, [64, 32])
self.assertEqual(model._center_params.center_head_kernel_sizes, [5, 3])
else:
self.assertEqual(model._center_params.center_head_num_filters, [256])
self.assertEqual(model._center_params.center_head_kernel_sizes, [3])
# Check object detection related parameters.
self.assertAlmostEqual(model._od_params.offset_loss_weight, 0.1)
self.assertAlmostEqual(model._od_params.scale_loss_weight, 0.2)
self.assertAlmostEqual(model._od_params.task_loss_weight, 0.5)
self.assertIsInstance(model._od_params.localization_loss,
losses.L1LocalizationLoss)
self.assertEqual(model._od_params.offset_head_num_filters, [256])
self.assertEqual(model._od_params.offset_head_kernel_sizes, [3])
if customize_head_params:
self.assertEqual(model._od_params.scale_head_num_filters, [128, 64])
self.assertEqual(model._od_params.scale_head_kernel_sizes, [5, 3])
else:
self.assertEqual(model._od_params.scale_head_num_filters, [256])
self.assertEqual(model._od_params.scale_head_kernel_sizes, [3])
# Check keypoint estimation related parameters.
kp_params = model._kp_params_dict['human_pose']
self.assertAlmostEqual(kp_params.task_loss_weight, 0.9)
self.assertAlmostEqual(kp_params.keypoint_regression_loss_weight, 1.0)
self.assertAlmostEqual(kp_params.keypoint_offset_loss_weight, 0.5)
self.assertAlmostEqual(kp_params.heatmap_bias_init, 2.14, places=4)
self.assertEqual(kp_params.classification_loss._alpha, 3.0)
self.assertEqual(kp_params.keypoint_indices, [0, 1, 2, 3])
self.assertEqual(kp_params.keypoint_labels,
['nose', 'left_shoulder', 'right_shoulder', 'hip'])
self.assertAllClose(kp_params.keypoint_std_dev, [0.3, 1.0, 1.0, 0.0])
self.assertEqual(kp_params.classification_loss._beta, 4.0)
self.assertIsInstance(kp_params.localization_loss,
losses.L1LocalizationLoss)
self.assertAlmostEqual(kp_params.keypoint_candidate_score_threshold, 0.3)
self.assertEqual(kp_params.num_candidates_per_keypoint, 12)
self.assertEqual(kp_params.peak_max_pool_kernel_size, 5)
self.assertAlmostEqual(kp_params.unmatched_keypoint_score, 0.05)
self.assertAlmostEqual(kp_params.box_scale, 1.7)
self.assertAlmostEqual(kp_params.candidate_search_scale, 0.2)
self.assertEqual(kp_params.candidate_ranking_mode, 'score_distance_ratio')
self.assertEqual(kp_params.offset_peak_radius, 3)
self.assertEqual(kp_params.per_keypoint_offset, True)
self.assertEqual(kp_params.predict_depth, True)
self.assertEqual(kp_params.per_keypoint_depth, True)
self.assertAlmostEqual(kp_params.keypoint_depth_loss_weight, 0.3)
if customize_head_params:
# Set by the config.
self.assertEqual(kp_params.heatmap_head_num_filters, [64, 32])
self.assertEqual(kp_params.heatmap_head_kernel_sizes, [5, 3])
self.assertEqual(kp_params.offset_head_num_filters, [128, 64])
self.assertEqual(kp_params.offset_head_kernel_sizes, [5, 3])
else:
# Default values:
self.assertEqual(kp_params.heatmap_head_num_filters, [256])
self.assertEqual(kp_params.heatmap_head_kernel_sizes, [3])
self.assertEqual(kp_params.offset_head_num_filters, [256])
self.assertEqual(kp_params.offset_head_kernel_sizes, [3])
# Check mask related parameters.
self.assertAlmostEqual(model._mask_params.task_loss_weight, 0.7)
self.assertIsInstance(model._mask_params.classification_loss,
losses.WeightedSoftmaxClassificationLoss)
self.assertEqual(model._mask_params.mask_height, 8)
self.assertEqual(model._mask_params.mask_width, 8)
self.assertAlmostEqual(model._mask_params.score_threshold, 0.7)
self.assertAlmostEqual(
model._mask_params.heatmap_bias_init, -2.0, places=4)
if customize_head_params:
self.assertEqual(model._mask_params.mask_head_num_filters, [128, 64])
self.assertEqual(model._mask_params.mask_head_kernel_sizes, [5, 3])
else:
self.assertEqual(model._mask_params.mask_head_num_filters, [256])
self.assertEqual(model._mask_params.mask_head_kernel_sizes, [3])
# Check DensePose related parameters.
self.assertEqual(model._densepose_params.class_id, 0)
self.assertIsInstance(model._densepose_params.classification_loss,
losses.WeightedSoftmaxClassificationLoss)
self.assertIsInstance(model._densepose_params.localization_loss,
losses.L1LocalizationLoss)
self.assertAlmostEqual(model._densepose_params.part_loss_weight, 1.0)
self.assertAlmostEqual(model._densepose_params.coordinate_loss_weight, 2.0)
self.assertEqual(model._densepose_params.num_parts, 24)
self.assertAlmostEqual(model._densepose_params.task_loss_weight, 0.5)
self.assertTrue(model._densepose_params.upsample_to_input_res)
self.assertEqual(model._densepose_params.upsample_method, 'bilinear')
self.assertAlmostEqual(
model._densepose_params.heatmap_bias_init, -2.0, places=4)
# Check feature extractor parameters.
self.assertIsInstance(
model._feature_extractor, center_net_hourglass_feature_extractor
.CenterNetHourglassFeatureExtractor)
self.assertAllClose(model._feature_extractor._channel_means, [0, 0, 0])
self.assertAllClose(model._feature_extractor._channel_stds, [4, 5, 6])
self.assertTrue(model._feature_extractor._bgr_ordering)
backbone = model._feature_extractor._network
self.assertIsInstance(backbone, hourglass_network.HourglassNetwork)
self.assertTrue(backbone.num_hourglasses, 1)
def test_create_center_net_model_from_keypoints(self):
"""Test building a CenterNet model from proto txt."""
proto_txt = """
center_net {
num_classes: 10
feature_extractor {
type: "hourglass_52"
channel_stds: [4, 5, 6]
bgr_ordering: true
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
}
"""
# Set up the configuration proto.
config = text_format.Parse(proto_txt, model_pb2.DetectionModel())
# Only add object center and keypoint estimation configs here.
config.center_net.object_center_params.CopyFrom(
self.get_fake_object_center_from_keypoints_proto())
config.center_net.keypoint_estimation_task.append(
self.get_fake_keypoint_proto())
config.center_net.keypoint_label_map_path = (
self.get_fake_label_map_file_path())
# Build the model from the configuration.
model = model_builder.build(config, is_training=True)
# Check object center related parameters.
self.assertEqual(model._num_classes, 10)
self.assertEqual(model._center_params.keypoint_weights_for_center,
[1.0, 0.0, 1.0, 0.0])
# Check keypoint estimation related parameters.
kp_params = model._kp_params_dict['human_pose']
self.assertAlmostEqual(kp_params.task_loss_weight, 0.9)
self.assertEqual(kp_params.keypoint_indices, [0, 1, 2, 3])
self.assertEqual(kp_params.keypoint_labels,
['nose', 'left_shoulder', 'right_shoulder', 'hip'])
def test_create_center_net_model_mobilenet(self):
"""Test building a CenterNet model using bilinear interpolation."""
proto_txt = """
center_net {
num_classes: 10
feature_extractor {
type: "mobilenet_v2_fpn"
depth_multiplier: 1.0
use_separable_conv: true
upsampling_interpolation: "bilinear"
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
}
"""
# Set up the configuration proto.
config = text_format.Parse(proto_txt, model_pb2.DetectionModel())
# Only add object center and keypoint estimation configs here.
config.center_net.object_center_params.CopyFrom(
self.get_fake_object_center_from_keypoints_proto())
config.center_net.keypoint_estimation_task.append(
self.get_fake_keypoint_proto())
config.center_net.keypoint_label_map_path = (
self.get_fake_label_map_file_path())
# Build the model from the configuration.
model = model_builder.build(config, is_training=True)
feature_extractor = model._feature_extractor
# Verify the upsampling layers in the FPN use 'bilinear' interpolation.
fpn = feature_extractor.get_layer('model_1')
num_up_sampling2d_layers = 0
for layer in fpn.layers:
if 'up_sampling2d' in layer.name:
num_up_sampling2d_layers += 1
self.assertEqual('bilinear', layer.interpolation)
# Verify that there are up_sampling2d layers.
self.assertGreater(num_up_sampling2d_layers, 0)
def test_create_center_net_deepmac(self):
"""Test building a CenterNet DeepMAC model."""
proto_txt = """
center_net {
num_classes: 90
feature_extractor {
type: "hourglass_52"
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
object_detection_task {
task_loss_weight: 1.0
offset_loss_weight: 1.0
scale_loss_weight: 0.1
localization_loss {
l1_localization_loss {
}
}
}
object_center_params {
object_center_loss_weight: 1.0
min_box_overlap_iou: 0.7
max_box_predictions: 100
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 2.0
beta: 4.0
}
}
}
deepmac_mask_estimation {
classification_loss {
weighted_sigmoid {}
}
}
}
"""
# Set up the configuration proto.
config = text_format.Parse(proto_txt, model_pb2.DetectionModel())
# Build the model from the configuration.
model = model_builder.build(config, is_training=True)
self.assertIsInstance(model, deepmac_meta_arch.DeepMACMetaArch)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/model_builder_tf2_test.py | model_builder_tf2_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function to construct tf-slim arg_scope for convolution, fc ops."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import freezable_batch_norm
from object_detection.protos import hyperparams_pb2
from object_detection.utils import context_manager
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
from object_detection.core import freezable_sync_batch_norm
# pylint: enable=g-import-not-at-top
class KerasLayerHyperparams(object):
"""
A hyperparameter configuration object for Keras layers used in
Object Detection models.
"""
def __init__(self, hyperparams_config):
"""Builds keras hyperparameter config for layers based on the proto config.
It automatically converts from Slim layer hyperparameter configs to
Keras layer hyperparameters. Namely, it:
- Builds Keras initializers/regularizers instead of Slim ones
- sets weights_regularizer/initializer to kernel_regularizer/initializer
- converts batchnorm decay to momentum
- converts Slim l2 regularizer weights to the equivalent Keras l2 weights
Contains a hyperparameter configuration for ops that specifies kernel
initializer, kernel regularizer, activation. Also contains parameters for
batch norm operators based on the configuration.
Note that if the batch_norm parameters are not specified in the config
(i.e. left to default) then batch norm is excluded from the config.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if not isinstance(hyperparams_config,
hyperparams_pb2.Hyperparams):
raise ValueError('hyperparams_config not of type '
'hyperparams_pb.Hyperparams.')
self._batch_norm_params = None
self._use_sync_batch_norm = False
if hyperparams_config.HasField('batch_norm'):
self._batch_norm_params = _build_keras_batch_norm_params(
hyperparams_config.batch_norm)
elif hyperparams_config.HasField('sync_batch_norm'):
self._use_sync_batch_norm = True
self._batch_norm_params = _build_keras_batch_norm_params(
hyperparams_config.sync_batch_norm)
self._force_use_bias = hyperparams_config.force_use_bias
self._activation_fn = _build_activation_fn(hyperparams_config.activation)
# TODO(kaftan): Unclear if these kwargs apply to separable & depthwise conv
# (Those might use depthwise_* instead of kernel_*)
# We should probably switch to using build_conv2d_layer and
# build_depthwise_conv2d_layer methods instead.
self._op_params = {
'kernel_regularizer': _build_keras_regularizer(
hyperparams_config.regularizer),
'kernel_initializer': _build_initializer(
hyperparams_config.initializer, build_for_keras=True),
'activation': _build_activation_fn(hyperparams_config.activation)
}
def use_batch_norm(self):
return self._batch_norm_params is not None
def use_sync_batch_norm(self):
return self._use_sync_batch_norm
def force_use_bias(self):
return self._force_use_bias
def use_bias(self):
return (self._force_use_bias or not
(self.use_batch_norm() and self.batch_norm_params()['center']))
def batch_norm_params(self, **overrides):
"""Returns a dict containing batchnorm layer construction hyperparameters.
Optionally overrides values in the batchnorm hyperparam dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
**overrides: keyword arguments to override in the hyperparams dictionary
Returns: dict containing the layer construction keyword arguments, with
values overridden by the `overrides` keyword arguments.
"""
if self._batch_norm_params is None:
new_batch_norm_params = dict()
else:
new_batch_norm_params = self._batch_norm_params.copy()
new_batch_norm_params.update(overrides)
return new_batch_norm_params
def build_batch_norm(self, training=None, **overrides):
"""Returns a Batch Normalization layer with the appropriate hyperparams.
If the hyperparams are configured to not use batch normalization,
this will return a Keras Lambda layer that only applies tf.Identity,
without doing any normalization.
Optionally overrides values in the batch_norm hyperparam dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
training: if True, the normalization layer will normalize using the batch
statistics. If False, the normalization layer will be frozen and will
act as if it is being used for inference. If None, the layer
will look up the Keras learning phase at `call` time to decide what to
do.
**overrides: batch normalization construction args to override from the
batch_norm hyperparams dictionary.
Returns: Either a FreezableBatchNorm layer (if use_batch_norm() is True),
or a Keras Lambda layer that applies the identity (if use_batch_norm()
is False)
"""
if self.use_batch_norm():
if self._use_sync_batch_norm:
return freezable_sync_batch_norm.FreezableSyncBatchNorm(
training=training, **self.batch_norm_params(**overrides))
else:
return freezable_batch_norm.FreezableBatchNorm(
training=training, **self.batch_norm_params(**overrides))
else:
return tf.keras.layers.Lambda(tf.identity)
def build_activation_layer(self, name='activation'):
"""Returns a Keras layer that applies the desired activation function.
Args:
name: The name to assign the Keras layer.
Returns: A Keras lambda layer that applies the activation function
specified in the hyperparam config, or applies the identity if the
activation function is None.
"""
if self._activation_fn:
return tf.keras.layers.Lambda(self._activation_fn, name=name)
else:
return tf.keras.layers.Lambda(tf.identity, name=name)
def get_regularizer_weight(self):
"""Returns the l1 or l2 regularizer weight.
Returns: A float value corresponding to the l1 or l2 regularization weight,
or None if neither l1 or l2 regularization is defined.
"""
regularizer = self._op_params['kernel_regularizer']
if hasattr(regularizer, 'l1'):
return float(regularizer.l1)
elif hasattr(regularizer, 'l2'):
return float(regularizer.l2)
else:
return None
def params(self, include_activation=False, **overrides):
"""Returns a dict containing the layer construction hyperparameters to use.
Optionally overrides values in the returned dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
include_activation: If False, activation in the returned dictionary will
be set to `None`, and the activation must be applied via a separate
layer created by `build_activation_layer`. If True, `activation` in the
output param dictionary will be set to the activation function
specified in the hyperparams config.
**overrides: keyword arguments to override in the hyperparams dictionary.
Returns: dict containing the layer construction keyword arguments, with
values overridden by the `overrides` keyword arguments.
"""
new_params = self._op_params.copy()
new_params['activation'] = None
if include_activation:
new_params['activation'] = self._activation_fn
new_params['use_bias'] = self.use_bias()
new_params.update(**overrides)
return new_params
def build(hyperparams_config, is_training):
"""Builds tf-slim arg_scope for convolution ops based on the config.
Returns an arg_scope to use for convolution ops containing weights
initializer, weights regularizer, activation function, batch norm function
and batch norm parameters based on the configuration.
Note that if no normalization parameters are specified in the config,
(i.e. left to default) then both batch norm and group norm are excluded
from the arg_scope.
The batch norm parameters are set for updates based on `is_training` argument
and conv_hyperparams_config.batch_norm.train parameter. During training, they
are updated only if batch_norm.train parameter is true. However, during eval,
no updates are made to the batch norm variables. In both cases, their current
values are used during forward pass.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
is_training: Whether the network is in training mode.
Returns:
arg_scope_fn: A function to construct tf-slim arg_scope containing
hyperparameters for ops.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if not isinstance(hyperparams_config,
hyperparams_pb2.Hyperparams):
raise ValueError('hyperparams_config not of type '
'hyperparams_pb.Hyperparams.')
if hyperparams_config.force_use_bias:
raise ValueError('Hyperparams force_use_bias only supported by '
'KerasLayerHyperparams.')
if hyperparams_config.HasField('sync_batch_norm'):
raise ValueError('Hyperparams sync_batch_norm only supported by '
'KerasLayerHyperparams.')
normalizer_fn = None
batch_norm_params = None
if hyperparams_config.HasField('batch_norm'):
normalizer_fn = slim.batch_norm
batch_norm_params = _build_batch_norm_params(
hyperparams_config.batch_norm, is_training)
if hyperparams_config.HasField('group_norm'):
normalizer_fn = slim.group_norm
affected_ops = [slim.conv2d, slim.separable_conv2d, slim.conv2d_transpose]
if hyperparams_config.HasField('op') and (
hyperparams_config.op == hyperparams_pb2.Hyperparams.FC):
affected_ops = [slim.fully_connected]
def scope_fn():
with (slim.arg_scope([slim.batch_norm], **batch_norm_params)
if batch_norm_params is not None else
context_manager.IdentityContextManager()):
with slim.arg_scope(
affected_ops,
weights_regularizer=_build_slim_regularizer(
hyperparams_config.regularizer),
weights_initializer=_build_initializer(
hyperparams_config.initializer),
activation_fn=_build_activation_fn(hyperparams_config.activation),
normalizer_fn=normalizer_fn) as sc:
return sc
return scope_fn
def _build_activation_fn(activation_fn):
"""Builds a callable activation from config.
Args:
activation_fn: hyperparams_pb2.Hyperparams.activation
Returns:
Callable activation function.
Raises:
ValueError: On unknown activation function.
"""
if activation_fn == hyperparams_pb2.Hyperparams.NONE:
return None
if activation_fn == hyperparams_pb2.Hyperparams.RELU:
return tf.nn.relu
if activation_fn == hyperparams_pb2.Hyperparams.RELU_6:
return tf.nn.relu6
if activation_fn == hyperparams_pb2.Hyperparams.SWISH:
return tf.nn.swish
raise ValueError('Unknown activation function: {}'.format(activation_fn))
def _build_slim_regularizer(regularizer):
"""Builds a tf-slim regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf-slim regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))
if regularizer_oneof is None:
return None
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def _build_keras_regularizer(regularizer):
"""Builds a keras regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
Keras regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return tf.keras.regularizers.l1(float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
# The Keras L2 regularizer weight differs from the Slim L2 regularizer
# weight by a factor of 2
return tf.keras.regularizers.l2(
float(regularizer.l2_regularizer.weight * 0.5))
if regularizer_oneof is None:
return None
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def _build_initializer(initializer, build_for_keras=False):
"""Build a tf initializer from config.
Args:
initializer: hyperparams_pb2.Hyperparams.regularizer proto.
build_for_keras: Whether the initializers should be built for Keras
operators. If false builds for Slim.
Returns:
tf initializer or string corresponding to the tf keras initializer name.
Raises:
ValueError: On unknown initializer.
"""
initializer_oneof = initializer.WhichOneof('initializer_oneof')
if initializer_oneof == 'truncated_normal_initializer':
return tf.truncated_normal_initializer(
mean=initializer.truncated_normal_initializer.mean,
stddev=initializer.truncated_normal_initializer.stddev)
if initializer_oneof == 'random_normal_initializer':
return tf.random_normal_initializer(
mean=initializer.random_normal_initializer.mean,
stddev=initializer.random_normal_initializer.stddev)
if initializer_oneof == 'variance_scaling_initializer':
enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer.
DESCRIPTOR.enum_types_by_name['Mode'])
mode = enum_descriptor.values_by_number[initializer.
variance_scaling_initializer.
mode].name
if build_for_keras:
if initializer.variance_scaling_initializer.uniform:
return tf.variance_scaling_initializer(
scale=initializer.variance_scaling_initializer.factor,
mode=mode.lower(),
distribution='uniform')
else:
# In TF 1.9 release and earlier, the truncated_normal distribution was
# not supported correctly. So, in these earlier versions of tensorflow,
# the ValueError will be raised, and we manually truncate the
# distribution scale.
#
# It is insufficient to just set distribution to `normal` from the
# start, because the `normal` distribution in newer Tensorflow versions
# creates a truncated distribution, whereas it created untruncated
# distributions in older versions.
try:
return tf.variance_scaling_initializer(
scale=initializer.variance_scaling_initializer.factor,
mode=mode.lower(),
distribution='truncated_normal')
except ValueError:
truncate_constant = 0.87962566103423978
truncated_scale = initializer.variance_scaling_initializer.factor / (
truncate_constant * truncate_constant
)
return tf.variance_scaling_initializer(
scale=truncated_scale,
mode=mode.lower(),
distribution='normal')
else:
return slim.variance_scaling_initializer(
factor=initializer.variance_scaling_initializer.factor,
mode=mode,
uniform=initializer.variance_scaling_initializer.uniform)
if initializer_oneof == 'keras_initializer_by_name':
if build_for_keras:
return initializer.keras_initializer_by_name
else:
raise ValueError(
'Unsupported non-Keras usage of keras_initializer_by_name: {}'.format(
initializer.keras_initializer_by_name))
if initializer_oneof is None:
return None
raise ValueError('Unknown initializer function: {}'.format(
initializer_oneof))
def _build_batch_norm_params(batch_norm, is_training):
"""Build a dictionary of batch_norm params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
is_training: Whether the models is in training mode.
Returns:
A dictionary containing batch_norm parameters.
"""
batch_norm_params = {
'decay': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_norm.epsilon,
# Remove is_training parameter from here and deprecate it in the proto
# once we refactor Faster RCNN models to set is_training through an outer
# arg_scope in the meta architecture.
'is_training': is_training and batch_norm.train,
}
return batch_norm_params
def _build_keras_batch_norm_params(batch_norm):
"""Build a dictionary of Keras BatchNormalization params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
Returns:
A dictionary containing Keras BatchNormalization parameters.
"""
# Note: Although decay is defined to be 1 - momentum in batch_norm,
# decay in the slim batch_norm layers was erroneously defined and is
# actually the same as momentum in the Keras batch_norm layers.
# For context, see: github.com/keras-team/keras/issues/6839
batch_norm_params = {
'momentum': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_norm.epsilon,
}
return batch_norm_params
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/hyperparams_builder.py | hyperparams_builder.py |
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generator_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.anchor_generators import flexible_grid_anchor_generator
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.anchor_generators import multiple_grid_anchor_generator
from object_detection.anchor_generators import multiscale_grid_anchor_generator
from object_detection.builders import anchor_generator_builder
from object_detection.protos import anchor_generator_pb2
class AnchorGeneratorBuilderTest(tf.test.TestCase):
def assert_almost_list_equal(self, expected_list, actual_list, delta=None):
self.assertEqual(len(expected_list), len(actual_list))
for expected_item, actual_item in zip(expected_list, actual_list):
self.assertAlmostEqual(expected_item, actual_item, delta=delta)
def test_build_grid_anchor_generator_with_defaults(self):
anchor_generator_text_proto = """
grid_anchor_generator {
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
grid_anchor_generator.GridAnchorGenerator)
self.assertListEqual(anchor_generator_object._scales, [])
self.assertListEqual(anchor_generator_object._aspect_ratios, [])
self.assertAllEqual(anchor_generator_object._anchor_offset, [0, 0])
self.assertAllEqual(anchor_generator_object._anchor_stride, [16, 16])
self.assertAllEqual(anchor_generator_object._base_anchor_size, [256, 256])
def test_build_grid_anchor_generator_with_non_default_parameters(self):
anchor_generator_text_proto = """
grid_anchor_generator {
height: 128
width: 512
height_stride: 10
width_stride: 20
height_offset: 30
width_offset: 40
scales: [0.4, 2.2]
aspect_ratios: [0.3, 4.5]
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
grid_anchor_generator.GridAnchorGenerator)
self.assert_almost_list_equal(anchor_generator_object._scales,
[0.4, 2.2])
self.assert_almost_list_equal(anchor_generator_object._aspect_ratios,
[0.3, 4.5])
self.assertAllEqual(anchor_generator_object._anchor_offset, [30, 40])
self.assertAllEqual(anchor_generator_object._anchor_stride, [10, 20])
self.assertAllEqual(anchor_generator_object._base_anchor_size, [128, 512])
def test_build_ssd_anchor_generator_with_defaults(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [1.0]
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator)
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.1, 0.2, 0.2),
(0.35, 0.418),
(0.499, 0.570),
(0.649, 0.721),
(0.799, 0.871),
(0.949, 0.974)]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
[(1.0, 2.0, 0.5)] + 5 * [(1.0, 1.0)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0])
def test_build_ssd_anchor_generator_with_custom_scales(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [1.0]
scales: [0.1, 0.15, 0.2, 0.4, 0.6, 0.8]
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator)
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.1, math.sqrt(0.1 * 0.15)),
(0.15, math.sqrt(0.15 * 0.2)),
(0.2, math.sqrt(0.2 * 0.4)),
(0.4, math.sqrt(0.4 * 0.6)),
(0.6, math.sqrt(0.6 * 0.8)),
(0.8, math.sqrt(0.8 * 1.0))]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
def test_build_ssd_anchor_generator_with_custom_interpolated_scale(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [0.5]
interpolated_scale_aspect_ratio: 0.5
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
6 * [(0.5, 0.5)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
def test_build_ssd_anchor_generator_without_reduced_boxes(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [1.0]
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator)
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.2, 0.264),
(0.35, 0.418),
(0.499, 0.570),
(0.649, 0.721),
(0.799, 0.871),
(0.949, 0.974)]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
6 * [(1.0, 1.0)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0])
def test_build_ssd_anchor_generator_with_non_default_parameters(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
num_layers: 2
min_scale: 0.3
max_scale: 0.8
aspect_ratios: [2.0]
height_stride: 16
height_stride: 32
width_stride: 20
width_stride: 30
height_offset: 8
height_offset: 16
width_offset: 0
width_offset: 10
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator)
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.1, 0.3, 0.3), (0.8, 0.894)]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
[(1.0, 2.0, 0.5), (2.0, 1.0)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
for actual_strides, expected_strides in zip(
list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]):
self.assert_almost_list_equal(expected_strides, actual_strides)
for actual_offsets, expected_offsets in zip(
list(anchor_generator_object._anchor_offsets), [(8, 0), (16, 10)]):
self.assert_almost_list_equal(expected_offsets, actual_offsets)
self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0])
def test_raise_value_error_on_empty_anchor_genertor(self):
anchor_generator_text_proto = """
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
with self.assertRaises(ValueError):
anchor_generator_builder.build(anchor_generator_proto)
def test_build_multiscale_anchor_generator_custom_aspect_ratios(self):
anchor_generator_text_proto = """
multiscale_anchor_generator {
aspect_ratios: [1.0]
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiscale_grid_anchor_generator.
MultiscaleGridAnchorGenerator)
for level, anchor_grid_info in zip(
range(3, 8), anchor_generator_object._anchor_grid_info):
self.assertEqual(set(anchor_grid_info.keys()), set(['level', 'info']))
self.assertTrue(level, anchor_grid_info['level'])
self.assertEqual(len(anchor_grid_info['info']), 4)
self.assertAllClose(anchor_grid_info['info'][0], [2**0, 2**0.5])
self.assertTrue(anchor_grid_info['info'][1], 1.0)
self.assertAllClose(anchor_grid_info['info'][2],
[4.0 * 2**level, 4.0 * 2**level])
self.assertAllClose(anchor_grid_info['info'][3], [2**level, 2**level])
self.assertTrue(anchor_generator_object._normalize_coordinates)
def test_build_multiscale_anchor_generator_with_anchors_in_pixel_coordinates(
self):
anchor_generator_text_proto = """
multiscale_anchor_generator {
aspect_ratios: [1.0]
normalize_coordinates: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
multiscale_grid_anchor_generator.
MultiscaleGridAnchorGenerator)
self.assertFalse(anchor_generator_object._normalize_coordinates)
def test_build_flexible_anchor_generator(self):
anchor_generator_text_proto = """
flexible_grid_anchor_generator {
anchor_grid {
base_sizes: [1.5]
aspect_ratios: [1.0]
height_stride: 16
width_stride: 20
height_offset: 8
width_offset: 9
}
anchor_grid {
base_sizes: [1.0, 2.0]
aspect_ratios: [1.0, 0.5]
height_stride: 32
width_stride: 30
height_offset: 10
width_offset: 11
}
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertIsInstance(anchor_generator_object,
flexible_grid_anchor_generator.
FlexibleGridAnchorGenerator)
for actual_base_sizes, expected_base_sizes in zip(
list(anchor_generator_object._base_sizes), [(1.5,), (1.0, 2.0)]):
self.assert_almost_list_equal(expected_base_sizes, actual_base_sizes)
for actual_aspect_ratios, expected_aspect_ratios in zip(
list(anchor_generator_object._aspect_ratios), [(1.0,), (1.0, 0.5)]):
self.assert_almost_list_equal(expected_aspect_ratios,
actual_aspect_ratios)
for actual_strides, expected_strides in zip(
list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]):
self.assert_almost_list_equal(expected_strides, actual_strides)
for actual_offsets, expected_offsets in zip(
list(anchor_generator_object._anchor_offsets), [(8, 9), (10, 11)]):
self.assert_almost_list_equal(expected_offsets, actual_offsets)
self.assertTrue(anchor_generator_object._normalize_coordinates)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/anchor_generator_builder_test.py | anchor_generator_builder_test.py |
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data.Dataset builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import tensorflow.compat.v1 as tf
from object_detection.builders import decoder_builder
from object_detection.protos import input_reader_pb2
def make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
This is useful in cases where make_one_shot_iterator wouldn't work because
the graph contains a hash table that needs to be initialized.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.data.Iterator`.
"""
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator
def _read_dataset_internal(file_read_func,
input_files,
num_readers,
config,
filename_shard_fn=None):
"""Reads a dataset, and handles repetition and shuffling.
Args:
file_read_func: Function to use in tf_data.parallel_interleave, to read
every individual file into a tf.data.Dataset.
input_files: A list of file paths to read.
num_readers: Number of readers to use.
config: A input_reader_builder.InputReader object.
filename_shard_fn: optional, A function used to shard filenames across
replicas. This function takes as input a TF dataset of filenames and is
expected to return its sharded version. It is useful when the dataset is
being loaded on one of possibly many replicas and we want to evenly shard
the files between the replicas.
Returns:
A tf.data.Dataset of (undecoded) tf-records based on config.
Raises:
RuntimeError: If no files are found at the supplied path(s).
"""
filenames = tf.gfile.Glob(input_files)
tf.logging.info('Reading record datasets for input file: %s' % input_files)
tf.logging.info('Number of filenames to read: %s' % len(filenames))
if not filenames:
raise RuntimeError('Did not find any input files matching the glob pattern '
'{}'.format(input_files))
if num_readers > len(filenames):
num_readers = len(filenames)
tf.logging.warning('num_readers has been reduced to %d to match input file '
'shards.' % num_readers)
filename_dataset = tf.data.Dataset.from_tensor_slices(filenames)
if config.shuffle:
filename_dataset = filename_dataset.shuffle(
config.filenames_shuffle_buffer_size)
elif num_readers > 1:
tf.logging.warning('`shuffle` is false, but the input data stream is '
'still slightly shuffled since `num_readers` > 1.')
if filename_shard_fn:
filename_dataset = filename_shard_fn(filename_dataset)
filename_dataset = filename_dataset.repeat(config.num_epochs or None)
records_dataset = filename_dataset.apply(
tf.data.experimental.parallel_interleave(
file_read_func,
cycle_length=num_readers,
block_length=config.read_block_length,
sloppy=config.shuffle))
if config.shuffle:
records_dataset = records_dataset.shuffle(config.shuffle_buffer_size)
return records_dataset
def read_dataset(file_read_func, input_files, config, filename_shard_fn=None):
"""Reads multiple datasets with sampling.
Args:
file_read_func: Function to use in tf_data.parallel_interleave, to read
every individual file into a tf.data.Dataset.
input_files: A list of file paths to read.
config: A input_reader_builder.InputReader object.
filename_shard_fn: optional, A function used to shard filenames across
replicas. This function takes as input a TF dataset of filenames and is
expected to return its sharded version. It is useful when the dataset is
being loaded on one of possibly many replicas and we want to evenly shard
the files between the replicas.
Returns:
A tf.data.Dataset of (undecoded) tf-records based on config.
Raises:
RuntimeError: If no files are found at the supplied path(s).
"""
if config.sample_from_datasets_weights:
tf.logging.info('Reading weighted datasets: %s' % input_files)
if len(input_files) != len(config.sample_from_datasets_weights):
raise ValueError('Expected the number of input files to be the same as '
'the number of dataset sample weights. But got '
'[input_files, sample_from_datasets_weights]: [' +
input_files + ', ' +
str(config.sample_from_datasets_weights) + ']')
tf.logging.info('Sampling from datasets %s with weights %s' %
(input_files, config.sample_from_datasets_weights))
records_datasets = []
dataset_weights = []
for i, input_file in enumerate(input_files):
weight = config.sample_from_datasets_weights[i]
num_readers = math.ceil(config.num_readers *
weight /
sum(config.sample_from_datasets_weights))
tf.logging.info(
'Num readers for dataset [%s]: %d', input_file, num_readers)
if num_readers == 0:
tf.logging.info('Skipping dataset due to zero weights: %s', input_file)
continue
tf.logging.info(
'Num readers for dataset [%s]: %d', input_file, num_readers)
records_dataset = _read_dataset_internal(file_read_func, [input_file],
num_readers, config,
filename_shard_fn)
dataset_weights.append(weight)
records_datasets.append(records_dataset)
return tf.data.experimental.sample_from_datasets(records_datasets,
dataset_weights)
else:
tf.logging.info('Reading unweighted datasets: %s' % input_files)
return _read_dataset_internal(file_read_func, input_files,
config.num_readers, config, filename_shard_fn)
def shard_function_for_context(input_context):
"""Returns a function that shards filenames based on the input context."""
if input_context is None:
return None
def shard_fn(dataset):
return dataset.shard(
input_context.num_input_pipelines, input_context.input_pipeline_id)
return shard_fn
def build(input_reader_config, batch_size=None, transform_input_data_fn=None,
input_context=None, reduce_to_frame_fn=None):
"""Builds a tf.data.Dataset.
Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all
records. Applies a padded batch to the resulting dataset.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
batch_size: Batch size. If batch size is None, no batching is performed.
transform_input_data_fn: Function to apply transformation to all records,
or None if no extra decoding is required.
input_context: optional, A tf.distribute.InputContext object used to
shard filenames and compute per-replica batch_size when this function
is being called per-replica.
reduce_to_frame_fn: Function that extracts frames from tf.SequenceExample
type input data.
Returns:
A tf.data.Dataset based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
decoder = decoder_builder.build(input_reader_config)
if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':
config = input_reader_config.tf_record_input_reader
if not config.input_path:
raise ValueError('At least one input path must be specified in '
'`input_reader_config`.')
def dataset_map_fn(dataset, fn_to_map, batch_size=None,
input_reader_config=None):
"""Handles whether or not to use the legacy map function.
Args:
dataset: A tf.Dataset.
fn_to_map: The function to be mapped for that dataset.
batch_size: Batch size. If batch size is None, no batching is performed.
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tf.data.Dataset mapped with fn_to_map.
"""
if hasattr(dataset, 'map_with_legacy_function'):
if batch_size:
num_parallel_calls = batch_size * (
input_reader_config.num_parallel_batches)
else:
num_parallel_calls = input_reader_config.num_parallel_map_calls
dataset = dataset.map_with_legacy_function(
fn_to_map, num_parallel_calls=num_parallel_calls)
else:
dataset = dataset.map(fn_to_map, tf.data.experimental.AUTOTUNE)
return dataset
shard_fn = shard_function_for_context(input_context)
if input_context is not None:
batch_size = input_context.get_per_replica_batch_size(batch_size)
dataset = read_dataset(
functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000),
config.input_path[:], input_reader_config, filename_shard_fn=shard_fn)
if input_reader_config.sample_1_of_n_examples > 1:
dataset = dataset.shard(input_reader_config.sample_1_of_n_examples, 0)
# TODO(rathodv): make batch size a required argument once the old binaries
# are deleted.
dataset = dataset_map_fn(dataset, decoder.decode, batch_size,
input_reader_config)
if reduce_to_frame_fn:
dataset = reduce_to_frame_fn(dataset, dataset_map_fn, batch_size,
input_reader_config)
if transform_input_data_fn is not None:
dataset = dataset_map_fn(dataset, transform_input_data_fn,
batch_size, input_reader_config)
if batch_size:
dataset = dataset.batch(batch_size,
drop_remainder=input_reader_config.drop_remainder)
dataset = dataset.prefetch(input_reader_config.num_prefetch_batches)
return dataset
raise ValueError('Unsupported input_reader_config.')
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/dataset_builder.py | dataset_builder.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for preprocessor_builder."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import preprocessor_builder
from object_detection.core import preprocessor
from object_detection.protos import preprocessor_pb2
class PreprocessorBuilderTest(tf.test.TestCase):
def assert_dictionary_close(self, dict1, dict2):
"""Helper to check if two dicts with floatst or integers are close."""
self.assertEqual(sorted(dict1.keys()), sorted(dict2.keys()))
for key in dict1:
value = dict1[key]
if isinstance(value, float):
self.assertAlmostEqual(value, dict2[key])
else:
self.assertEqual(value, dict2[key])
def test_build_normalize_image(self):
preprocessor_text_proto = """
normalize_image {
original_minval: 0.0
original_maxval: 255.0
target_minval: -1.0
target_maxval: 1.0
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.normalize_image)
self.assertEqual(args, {
'original_minval': 0.0,
'original_maxval': 255.0,
'target_minval': -1.0,
'target_maxval': 1.0,
})
def test_build_random_horizontal_flip(self):
preprocessor_text_proto = """
random_horizontal_flip {
keypoint_flip_permutation: 1
keypoint_flip_permutation: 0
keypoint_flip_permutation: 2
keypoint_flip_permutation: 3
keypoint_flip_permutation: 5
keypoint_flip_permutation: 4
probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_horizontal_flip)
self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4),
'probability': 0.5})
def test_build_random_vertical_flip(self):
preprocessor_text_proto = """
random_vertical_flip {
keypoint_flip_permutation: 1
keypoint_flip_permutation: 0
keypoint_flip_permutation: 2
keypoint_flip_permutation: 3
keypoint_flip_permutation: 5
keypoint_flip_permutation: 4
probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_vertical_flip)
self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4),
'probability': 0.5})
def test_build_random_rotation90(self):
preprocessor_text_proto = """
random_rotation90 {
keypoint_rot_permutation: 3
keypoint_rot_permutation: 0
keypoint_rot_permutation: 1
keypoint_rot_permutation: 2
probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_rotation90)
self.assertEqual(args, {'keypoint_rot_permutation': (3, 0, 1, 2),
'probability': 0.5})
def test_build_random_pixel_value_scale(self):
preprocessor_text_proto = """
random_pixel_value_scale {
minval: 0.8
maxval: 1.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_pixel_value_scale)
self.assert_dictionary_close(args, {'minval': 0.8, 'maxval': 1.2})
def test_build_random_image_scale(self):
preprocessor_text_proto = """
random_image_scale {
min_scale_ratio: 0.8
max_scale_ratio: 2.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_image_scale)
self.assert_dictionary_close(args, {'min_scale_ratio': 0.8,
'max_scale_ratio': 2.2})
def test_build_random_rgb_to_gray(self):
preprocessor_text_proto = """
random_rgb_to_gray {
probability: 0.8
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_rgb_to_gray)
self.assert_dictionary_close(args, {'probability': 0.8})
def test_build_random_adjust_brightness(self):
preprocessor_text_proto = """
random_adjust_brightness {
max_delta: 0.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_brightness)
self.assert_dictionary_close(args, {'max_delta': 0.2})
def test_build_random_adjust_contrast(self):
preprocessor_text_proto = """
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_contrast)
self.assert_dictionary_close(args, {'min_delta': 0.7, 'max_delta': 1.1})
def test_build_random_adjust_hue(self):
preprocessor_text_proto = """
random_adjust_hue {
max_delta: 0.01
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_hue)
self.assert_dictionary_close(args, {'max_delta': 0.01})
def test_build_random_adjust_saturation(self):
preprocessor_text_proto = """
random_adjust_saturation {
min_delta: 0.75
max_delta: 1.15
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_saturation)
self.assert_dictionary_close(args, {'min_delta': 0.75, 'max_delta': 1.15})
def test_build_random_distort_color(self):
preprocessor_text_proto = """
random_distort_color {
color_ordering: 1
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_distort_color)
self.assertEqual(args, {'color_ordering': 1})
def test_build_random_jitter_boxes(self):
preprocessor_text_proto = """
random_jitter_boxes {
ratio: 0.1
jitter_mode: SHRINK
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_jitter_boxes)
self.assert_dictionary_close(args, {'ratio': 0.1, 'jitter_mode': 'shrink'})
def test_build_random_crop_image(self):
preprocessor_text_proto = """
random_crop_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
})
def test_build_random_pad_image(self):
preprocessor_text_proto = """
random_pad_image {
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_pad_image)
self.assertEqual(args, {
'min_image_size': None,
'max_image_size': None,
'pad_color': None,
})
def test_build_random_absolute_pad_image(self):
preprocessor_text_proto = """
random_absolute_pad_image {
max_height_padding: 50
max_width_padding: 100
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_absolute_pad_image)
self.assertEqual(args, {
'max_height_padding': 50,
'max_width_padding': 100,
'pad_color': None,
})
def test_build_random_crop_pad_image(self):
preprocessor_text_proto = """
random_crop_pad_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_pad_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
'pad_color': None,
})
def test_build_random_crop_pad_image_with_optional_parameters(self):
preprocessor_text_proto = """
random_crop_pad_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
min_padded_size_ratio: 0.5
min_padded_size_ratio: 0.75
max_padded_size_ratio: 0.5
max_padded_size_ratio: 0.75
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_pad_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
'min_padded_size_ratio': (0.5, 0.75),
'max_padded_size_ratio': (0.5, 0.75),
'pad_color': None,
})
def test_build_random_crop_to_aspect_ratio(self):
preprocessor_text_proto = """
random_crop_to_aspect_ratio {
aspect_ratio: 0.85
overlap_thresh: 0.35
clip_boxes: False
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_to_aspect_ratio)
self.assert_dictionary_close(args, {'aspect_ratio': 0.85,
'overlap_thresh': 0.35,
'clip_boxes': False})
def test_build_random_black_patches(self):
preprocessor_text_proto = """
random_black_patches {
max_black_patches: 20
probability: 0.95
size_to_image_ratio: 0.12
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_black_patches)
self.assert_dictionary_close(args, {'max_black_patches': 20,
'probability': 0.95,
'size_to_image_ratio': 0.12})
def test_build_random_jpeg_quality(self):
preprocessor_text_proto = """
random_jpeg_quality {
random_coef: 0.5
min_jpeg_quality: 40
max_jpeg_quality: 90
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_jpeg_quality)
self.assert_dictionary_close(args, {'random_coef': 0.5,
'min_jpeg_quality': 40,
'max_jpeg_quality': 90})
def test_build_random_downscale_to_target_pixels(self):
preprocessor_text_proto = """
random_downscale_to_target_pixels {
random_coef: 0.5
min_target_pixels: 200
max_target_pixels: 900
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_downscale_to_target_pixels)
self.assert_dictionary_close(args, {
'random_coef': 0.5,
'min_target_pixels': 200,
'max_target_pixels': 900
})
def test_build_random_patch_gaussian(self):
preprocessor_text_proto = """
random_patch_gaussian {
random_coef: 0.5
min_patch_size: 10
max_patch_size: 300
min_gaussian_stddev: 0.2
max_gaussian_stddev: 1.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_patch_gaussian)
self.assert_dictionary_close(args, {
'random_coef': 0.5,
'min_patch_size': 10,
'max_patch_size': 300,
'min_gaussian_stddev': 0.2,
'max_gaussian_stddev': 1.5
})
def test_auto_augment_image(self):
preprocessor_text_proto = """
autoaugment_image {
policy_name: 'v0'
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.autoaugment_image)
self.assert_dictionary_close(args, {'policy_name': 'v0'})
def test_drop_label_probabilistically(self):
preprocessor_text_proto = """
drop_label_probabilistically{
label: 2
drop_probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.drop_label_probabilistically)
self.assert_dictionary_close(args, {
'dropped_label': 2,
'drop_probability': 0.5
})
def test_remap_labels(self):
preprocessor_text_proto = """
remap_labels{
original_labels: 1
original_labels: 2
new_label: 3
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.remap_labels)
self.assert_dictionary_close(args, {
'original_labels': [1, 2],
'new_label': 3
})
def test_build_random_resize_method(self):
preprocessor_text_proto = """
random_resize_method {
target_height: 75
target_width: 100
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_resize_method)
self.assert_dictionary_close(args, {'target_size': [75, 100]})
def test_build_scale_boxes_to_pixel_coordinates(self):
preprocessor_text_proto = """
scale_boxes_to_pixel_coordinates {}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.scale_boxes_to_pixel_coordinates)
self.assertEqual(args, {})
def test_build_resize_image(self):
preprocessor_text_proto = """
resize_image {
new_height: 75
new_width: 100
method: BICUBIC
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.resize_image)
self.assertEqual(args, {'new_height': 75,
'new_width': 100,
'method': tf.image.ResizeMethod.BICUBIC})
def test_build_rgb_to_gray(self):
preprocessor_text_proto = """
rgb_to_gray {}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.rgb_to_gray)
self.assertEqual(args, {})
def test_build_subtract_channel_mean(self):
preprocessor_text_proto = """
subtract_channel_mean {
means: [1.0, 2.0, 3.0]
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.subtract_channel_mean)
self.assertEqual(args, {'means': [1.0, 2.0, 3.0]})
def test_random_self_concat_image(self):
preprocessor_text_proto = """
random_self_concat_image {
concat_vertical_probability: 0.5
concat_horizontal_probability: 0.25
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_self_concat_image)
self.assertEqual(args, {'concat_vertical_probability': 0.5,
'concat_horizontal_probability': 0.25})
def test_build_ssd_random_crop(self):
preprocessor_text_proto = """
ssd_random_crop {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375]})
def test_build_ssd_random_crop_empty_operations(self):
preprocessor_text_proto = """
ssd_random_crop {
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop)
self.assertEqual(args, {})
def test_build_ssd_random_crop_pad(self):
preprocessor_text_proto = """
ssd_random_crop_pad {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
pad_color_r: 0.5
pad_color_g: 0.5
pad_color_b: 0.5
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
pad_color_r: 0.5
pad_color_g: 0.5
pad_color_b: 0.5
}
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop_pad)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375],
'min_padded_size_ratio': [(1.0, 1.0), (1.0, 1.0)],
'max_padded_size_ratio': [(2.0, 2.0), (2.0, 2.0)],
'pad_color': [(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)]})
def test_build_ssd_random_crop_fixed_aspect_ratio(self):
preprocessor_text_proto = """
ssd_random_crop_fixed_aspect_ratio {
operations {
min_object_covered: 0.0
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
aspect_ratio: 0.875
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop_fixed_aspect_ratio)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio': 0.875,
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375]})
def test_build_ssd_random_crop_pad_fixed_aspect_ratio(self):
preprocessor_text_proto = """
ssd_random_crop_pad_fixed_aspect_ratio {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
aspect_ratio: 0.875
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function,
preprocessor.ssd_random_crop_pad_fixed_aspect_ratio)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio': 0.875,
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375],
'min_padded_size_ratio': (1.0, 1.0),
'max_padded_size_ratio': (2.0, 2.0)})
def test_build_normalize_image_convert_class_logits_to_softmax(self):
preprocessor_text_proto = """
convert_class_logits_to_softmax {
temperature: 2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.convert_class_logits_to_softmax)
self.assertEqual(args, {'temperature': 2})
def test_random_crop_by_scale(self):
preprocessor_text_proto = """
random_square_crop_by_scale {
scale_min: 0.25
scale_max: 2.0
num_scales: 8
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_square_crop_by_scale)
self.assertEqual(args, {
'scale_min': 0.25,
'scale_max': 2.0,
'num_scales': 8,
'max_border': 128
})
def test_adjust_gamma(self):
preprocessor_text_proto = """
adjust_gamma {
gamma: 2.2
gain: 2.0
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.adjust_gamma)
self.assert_dictionary_close(args, {'gamma': 2.2, 'gain': 2.0})
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/preprocessor_builder_test.py | preprocessor_builder_test.py |
"""Tests for google3.third_party.tensorflow_models.object_detection.builders.target_assigner_builder."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import target_assigner_builder
from object_detection.core import target_assigner
from object_detection.protos import target_assigner_pb2
class TargetAssignerBuilderTest(tf.test.TestCase):
def test_build_a_target_assigner(self):
target_assigner_text_proto = """
matcher {
argmax_matcher {matched_threshold: 0.5}
}
similarity_calculator {
iou_similarity {}
}
box_coder {
faster_rcnn_box_coder {}
}
"""
target_assigner_proto = target_assigner_pb2.TargetAssigner()
text_format.Merge(target_assigner_text_proto, target_assigner_proto)
target_assigner_instance = target_assigner_builder.build(
target_assigner_proto)
self.assertIsInstance(target_assigner_instance,
target_assigner.TargetAssigner)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/target_assigner_builder_test.py | target_assigner_builder_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for matcher_builder."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import matcher_builder
from object_detection.matchers import argmax_matcher
from object_detection.protos import matcher_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
if tf_version.is_tf1():
from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top
class MatcherBuilderTest(test_case.TestCase):
def test_build_arg_max_matcher_with_defaults(self):
matcher_text_proto = """
argmax_matcher {
}
"""
matcher_proto = matcher_pb2.Matcher()
text_format.Merge(matcher_text_proto, matcher_proto)
matcher_object = matcher_builder.build(matcher_proto)
self.assertIsInstance(matcher_object, argmax_matcher.ArgMaxMatcher)
self.assertAlmostEqual(matcher_object._matched_threshold, 0.5)
self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.5)
self.assertTrue(matcher_object._negatives_lower_than_unmatched)
self.assertFalse(matcher_object._force_match_for_each_row)
def test_build_arg_max_matcher_without_thresholds(self):
matcher_text_proto = """
argmax_matcher {
ignore_thresholds: true
}
"""
matcher_proto = matcher_pb2.Matcher()
text_format.Merge(matcher_text_proto, matcher_proto)
matcher_object = matcher_builder.build(matcher_proto)
self.assertIsInstance(matcher_object, argmax_matcher.ArgMaxMatcher)
self.assertEqual(matcher_object._matched_threshold, None)
self.assertEqual(matcher_object._unmatched_threshold, None)
self.assertTrue(matcher_object._negatives_lower_than_unmatched)
self.assertFalse(matcher_object._force_match_for_each_row)
def test_build_arg_max_matcher_with_non_default_parameters(self):
matcher_text_proto = """
argmax_matcher {
matched_threshold: 0.7
unmatched_threshold: 0.3
negatives_lower_than_unmatched: false
force_match_for_each_row: true
use_matmul_gather: true
}
"""
matcher_proto = matcher_pb2.Matcher()
text_format.Merge(matcher_text_proto, matcher_proto)
matcher_object = matcher_builder.build(matcher_proto)
self.assertIsInstance(matcher_object, argmax_matcher.ArgMaxMatcher)
self.assertAlmostEqual(matcher_object._matched_threshold, 0.7)
self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.3)
self.assertFalse(matcher_object._negatives_lower_than_unmatched)
self.assertTrue(matcher_object._force_match_for_each_row)
self.assertTrue(matcher_object._use_matmul_gather)
def test_build_bipartite_matcher(self):
if tf_version.is_tf2():
self.skipTest('BipartiteMatcher unsupported in TF 2.X. Skipping.')
matcher_text_proto = """
bipartite_matcher {
}
"""
matcher_proto = matcher_pb2.Matcher()
text_format.Merge(matcher_text_proto, matcher_proto)
matcher_object = matcher_builder.build(matcher_proto)
self.assertIsInstance(matcher_object,
bipartite_matcher.GreedyBipartiteMatcher)
def test_raise_error_on_empty_matcher(self):
matcher_text_proto = """
"""
matcher_proto = matcher_pb2.Matcher()
text_format.Merge(matcher_text_proto, matcher_proto)
with self.assertRaises(ValueError):
matcher_builder.build(matcher_proto)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/matcher_builder_test.py | matcher_builder_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graph_rewriter_builder."""
import unittest
from unittest import mock # pylint: disable=g-importing-member
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.builders import graph_rewriter_builder
from object_detection.protos import graph_rewriter_pb2
from object_detection.utils import tf_version
if tf_version.is_tf1():
from tensorflow.contrib import quantize as contrib_quantize # pylint: disable=g-import-not-at-top
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class QuantizationBuilderTest(tf.test.TestCase):
def testQuantizationBuilderSetsUpCorrectTrainArguments(self):
with mock.patch.object(
contrib_quantize,
'experimental_create_training_graph') as mock_quant_fn:
with mock.patch.object(slim,
'summarize_collection') as mock_summarize_col:
graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_proto.quantization.delay = 10
graph_rewriter_proto.quantization.weight_bits = 8
graph_rewriter_proto.quantization.activation_bits = 8
graph_rewrite_fn = graph_rewriter_builder.build(
graph_rewriter_proto, is_training=True)
graph_rewrite_fn()
_, kwargs = mock_quant_fn.call_args
self.assertEqual(kwargs['input_graph'], tf.get_default_graph())
self.assertEqual(kwargs['quant_delay'], 10)
mock_summarize_col.assert_called_with('quant_vars')
def testQuantizationBuilderSetsUpCorrectEvalArguments(self):
with mock.patch.object(contrib_quantize,
'experimental_create_eval_graph') as mock_quant_fn:
with mock.patch.object(slim,
'summarize_collection') as mock_summarize_col:
graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_proto.quantization.delay = 10
graph_rewrite_fn = graph_rewriter_builder.build(
graph_rewriter_proto, is_training=False)
graph_rewrite_fn()
_, kwargs = mock_quant_fn.call_args
self.assertEqual(kwargs['input_graph'], tf.get_default_graph())
mock_summarize_col.assert_called_with('quant_vars')
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/graph_rewriter_builder_tf1_test.py | graph_rewriter_builder_tf1_test.py |
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_predictor_builder."""
import unittest
from unittest import mock # pylint: disable=g-importing-member
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.predictors import mask_rcnn_box_predictor
from object_detection.protos import box_predictor_pb2
from object_detection.protos import hyperparams_pb2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.')
class ConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
min_depth: 2
max_depth: 16
num_layers_before_predictor: 2
use_dropout: false
dropout_keep_probability: 0.4
kernel_size: 3
box_code_size: 3
apply_sigmoid_to_scores: true
class_prediction_bias_init: 4.0
use_depthwise: true
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10,
add_background_class=False)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._min_depth, 2)
self.assertEqual(box_predictor._max_depth, 16)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.4)
self.assertTrue(class_head._apply_sigmoid_to_scores)
self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0)
self.assertEqual(class_head._num_class_slots, 10)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
self.assertTrue(class_head._use_depthwise)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._min_depth, 0)
self.assertEqual(box_predictor._max_depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertTrue(class_head._use_dropout)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8)
self.assertFalse(class_head._apply_sigmoid_to_scores)
self.assertEqual(class_head._num_class_slots, 91)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertFalse(class_head._use_depthwise)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.')
class WeightSharedConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
(box_predictor_proto.weight_shared_convolutional_box_predictor
.conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
depth: 2
num_layers_before_predictor: 2
kernel_size: 7
box_code_size: 3
class_prediction_bias_init: 4.0
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
(box_predictor_proto.weight_shared_convolutional_box_predictor.
conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10,
add_background_class=False)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._depth, 2)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
self.assertEqual(box_predictor._apply_batch_norm, False)
def test_construct_non_default_depthwise_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
depth: 2
num_layers_before_predictor: 2
kernel_size: 7
box_code_size: 3
class_prediction_bias_init: 4.0
use_depthwise: true
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
(box_predictor_proto.weight_shared_convolutional_box_predictor.
conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10,
add_background_class=False)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._depth, 2)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertEqual(box_predictor._apply_batch_norm, False)
self.assertEqual(box_predictor._use_depthwise, True)
self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor._depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._apply_batch_norm, False)
def test_construct_default_conv_box_predictor_with_batch_norm(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
batch_norm {
train: true
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor._depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._apply_batch_norm, True)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.')
class MaskRCNNBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_builder_calls_fc_argscope_fn(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
op: FC
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
mock_argscope_fn.assert_called_with(hyperparams_proto, False)
self.assertEqual(box_predictor._box_prediction_head._fc_hyperparams_fn,
'arg_scope')
self.assertEqual(box_predictor._class_prediction_head._fc_hyperparams_fn,
'arg_scope')
def test_non_default_mask_rcnn_box_predictor(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
op: FC
"""
box_predictor_text_proto = """
mask_rcnn_box_predictor {
use_dropout: true
dropout_keep_probability: 0.8
box_code_size: 3
share_box_across_classes: true
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
def mock_fc_argscope_builder(fc_hyperparams_arg, is_training):
return (fc_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_fc_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
self.assertTrue(box_head._use_dropout)
self.assertTrue(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.8)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 3)
self.assertEqual(box_head._share_box_across_classes, True)
def test_build_default_mask_rcnn_box_predictor(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor = box_predictor_builder.build(
argscope_fn=mock.Mock(return_value='arg_scope'),
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
self.assertFalse(box_head._use_dropout)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 4)
self.assertEqual(len(box_predictor._third_stage_heads.keys()), 0)
def test_build_box_predictor_with_mask_branch(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = (
hyperparams_pb2.Hyperparams.CONV)
box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True
box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512
box_predictor_proto.mask_rcnn_box_predictor.mask_height = 16
box_predictor_proto.mask_rcnn_box_predictor.mask_width = 16
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
mock_argscope_fn.assert_has_calls(
[mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams,
True),
mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams,
True)], any_order=True)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
third_stage_heads = box_predictor._third_stage_heads
self.assertFalse(box_head._use_dropout)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 4)
self.assertIn(
mask_rcnn_box_predictor.MASK_PREDICTIONS, third_stage_heads)
self.assertEqual(
third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS]
._mask_prediction_conv_depth, 512)
def test_build_box_predictor_with_convlve_then_upsample_masks(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = (
hyperparams_pb2.Hyperparams.CONV)
box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True
box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512
box_predictor_proto.mask_rcnn_box_predictor.mask_height = 24
box_predictor_proto.mask_rcnn_box_predictor.mask_width = 24
box_predictor_proto.mask_rcnn_box_predictor.convolve_then_upsample_masks = (
True)
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
mock_argscope_fn.assert_has_calls(
[mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams,
True),
mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams,
True)], any_order=True)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
third_stage_heads = box_predictor._third_stage_heads
self.assertFalse(box_head._use_dropout)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 4)
self.assertIn(
mask_rcnn_box_predictor.MASK_PREDICTIONS, third_stage_heads)
self.assertEqual(
third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS]
._mask_prediction_conv_depth, 512)
self.assertTrue(third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS]
._convolve_then_upsample)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.')
class RfcnBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_fc_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_non_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
box_predictor_text_proto = """
rfcn_box_predictor {
num_spatial_bins_height: 4
num_spatial_bins_width: 4
depth: 4
box_code_size: 3
crop_height: 16
crop_width: 16
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 3)
self.assertEqual(box_predictor._num_spatial_bins, [4, 4])
self.assertEqual(box_predictor._crop_size, [16, 16])
def test_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertEqual(box_predictor._num_spatial_bins, [3, 3])
self.assertEqual(box_predictor._crop_size, [12, 12])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/box_predictor_builder_test.py | box_predictor_builder_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for input_reader_builder."""
import os
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import input_reader_builder
from object_detection.core import standard_fields as fields
from object_detection.dataset_tools import seq_example_util
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
from object_detection.utils import tf_version
def _get_labelmap_path():
"""Returns an absolute path to label map file."""
parent_path = os.path.dirname(tf.resource_loader.get_data_files_path())
return os.path.join(parent_path, 'data',
'pet_label_map.pbtxt')
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class InputReaderBuilderTest(tf.test.TestCase):
def create_tf_record(self):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
flat_mask = (4 * 5) * [1.0]
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/height': dataset_util.int64_feature(4),
'image/width': dataset_util.int64_feature(5),
'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]),
'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]),
'image/object/class/label': dataset_util.int64_list_feature([2]),
'image/object/mask': dataset_util.float_list_feature(flat_mask),
}))
writer.write(example.SerializeToString())
writer.close()
return path
def _make_random_serialized_jpeg_images(self, num_frames, image_height,
image_width):
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list]
with tf.Session() as sess:
encoded_images = sess.run(encoded_images_list)
return encoded_images
def create_tf_record_sequence_example(self):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
num_frames = 4
image_height = 20
image_width = 30
image_source_ids = [str(i) for i in range(num_frames)]
with self.test_session():
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_source_ids=image_source_ids,
image_format='JPEG',
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[]], # Frame 0.
[[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.],
[0.1, 0.1, 0.2, 0.2]], # Frame 2.
[[]], # Frame 3.
],
label_strings=[
[], # Frame 0.
['Abyssinian'], # Frame 1.
['Abyssinian', 'american_bulldog'], # Frame 2.
[], # Frame 3
]).SerializeToString()
writer.write(sequence_example_serialized)
writer.close()
return path
def create_tf_record_with_context(self):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
flat_mask = (4 * 5) * [1.0]
context_features = (10 * 3) * [1.0]
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/height':
dataset_util.int64_feature(4),
'image/width':
dataset_util.int64_feature(5),
'image/object/bbox/xmin':
dataset_util.float_list_feature([0.0]),
'image/object/bbox/xmax':
dataset_util.float_list_feature([1.0]),
'image/object/bbox/ymin':
dataset_util.float_list_feature([0.0]),
'image/object/bbox/ymax':
dataset_util.float_list_feature([1.0]),
'image/object/class/label':
dataset_util.int64_list_feature([2]),
'image/object/mask':
dataset_util.float_list_feature(flat_mask),
'image/context_features':
dataset_util.float_list_feature(context_features),
'image/context_feature_length':
dataset_util.int64_list_feature([10]),
}))
writer.write(example.SerializeToString())
writer.close()
return path
def test_build_tf_record_input_reader(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = input_reader_builder.build(input_reader_proto)
with tf.train.MonitoredSession() as sess:
output_dict = sess.run(tensor_dict)
self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks,
output_dict)
self.assertEqual((4, 5, 3), output_dict[fields.InputDataFields.image].shape)
self.assertEqual([2],
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEqual(
(1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0])
def test_build_tf_record_input_reader_sequence_example(self):
tf_record_path = self.create_tf_record_sequence_example()
input_reader_text_proto = """
shuffle: false
num_readers: 1
input_type: TF_SEQUENCE_EXAMPLE
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
input_reader_proto.label_map_path = _get_labelmap_path()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = input_reader_builder.build(input_reader_proto)
with tf.train.MonitoredSession() as sess:
output_dict = sess.run(tensor_dict)
expected_groundtruth_classes = [[-1, -1], [1, -1], [1, 2], [-1, -1]]
expected_groundtruth_boxes = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]
expected_num_groundtruth_boxes = [0, 1, 2, 0]
self.assertNotIn(
fields.InputDataFields.groundtruth_instance_masks, output_dict)
# sequence example images are encoded
self.assertEqual((4,), output_dict[fields.InputDataFields.image].shape)
self.assertAllEqual(expected_groundtruth_classes,
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEqual(
(4, 2, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllClose(expected_groundtruth_boxes,
output_dict[fields.InputDataFields.groundtruth_boxes])
self.assertAllClose(
expected_num_groundtruth_boxes,
output_dict[fields.InputDataFields.num_groundtruth_boxes])
def test_build_tf_record_input_reader_with_context(self):
tf_record_path = self.create_tf_record_with_context()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
input_reader_proto.load_context_features = True
tensor_dict = input_reader_builder.build(input_reader_proto)
with tf.train.MonitoredSession() as sess:
output_dict = sess.run(tensor_dict)
self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks,
output_dict)
self.assertEqual((4, 5, 3), output_dict[fields.InputDataFields.image].shape)
self.assertEqual([2],
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEqual(
(1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0])
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0])
self.assertAllEqual(
(3, 10), output_dict[fields.InputDataFields.context_features].shape)
self.assertAllEqual(
(10), output_dict[fields.InputDataFields.context_feature_length])
def test_build_tf_record_input_reader_and_load_instance_masks(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = input_reader_builder.build(input_reader_proto)
with tf.train.MonitoredSession() as sess:
output_dict = sess.run(tensor_dict)
self.assertEqual((4, 5, 3), output_dict[fields.InputDataFields.image].shape)
self.assertEqual([2],
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEqual(
(1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0])
self.assertAllEqual(
(1, 4, 5),
output_dict[fields.InputDataFields.groundtruth_instance_masks].shape)
def test_raises_error_with_no_input_paths(self):
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
"""
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
with self.assertRaises(ValueError):
input_reader_builder.build(input_reader_proto)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/input_reader_builder_tf1_test.py | input_reader_builder_tf1_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.builders.image_resizer_builder."""
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import image_resizer_builder
from object_detection.protos import image_resizer_pb2
from object_detection.utils import test_case
class ImageResizerBuilderTest(test_case.TestCase):
def _shape_of_resized_random_image_given_text_proto(self, input_shape,
text_proto):
image_resizer_config = image_resizer_pb2.ImageResizer()
text_format.Merge(text_proto, image_resizer_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
def graph_fn():
images = tf.cast(
tf.random_uniform(input_shape, minval=0, maxval=255, dtype=tf.int32),
dtype=tf.float32)
resized_images, _ = image_resizer_fn(images)
return resized_images
return self.execute_cpu(graph_fn, []).shape
def test_build_keep_aspect_ratio_resizer_returns_expected_shape(self):
image_resizer_text_proto = """
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (20, 10, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_keep_aspect_ratio_resizer_grayscale(self):
image_resizer_text_proto = """
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
convert_to_grayscale: true
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (20, 10, 1)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_keep_aspect_ratio_resizer_with_padding(self):
image_resizer_text_proto = """
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
pad_to_max_dimension: true
per_channel_pad_value: 3
per_channel_pad_value: 4
per_channel_pad_value: 5
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (20, 20, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_built_fixed_shape_resizer_returns_expected_shape(self):
image_resizer_text_proto = """
fixed_shape_resizer {
height: 10
width: 20
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (10, 20, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_built_fixed_shape_resizer_grayscale(self):
image_resizer_text_proto = """
fixed_shape_resizer {
height: 10
width: 20
convert_to_grayscale: true
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (10, 20, 1)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_identity_resizer_returns_expected_shape(self):
image_resizer_text_proto = """
identity_resizer {
}
"""
input_shape = (10, 20, 3)
expected_output_shape = (10, 20, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_raises_error_on_invalid_input(self):
invalid_input = 'invalid_input'
with self.assertRaises(ValueError):
image_resizer_builder.build(invalid_input)
def _resized_image_given_text_proto(self, image, text_proto):
image_resizer_config = image_resizer_pb2.ImageResizer()
text_format.Merge(text_proto, image_resizer_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
def graph_fn(image):
resized_image, _ = image_resizer_fn(image)
return resized_image
return self.execute_cpu(graph_fn, [image])
def test_fixed_shape_resizer_nearest_neighbor_method(self):
image_resizer_text_proto = """
fixed_shape_resizer {
height: 1
width: 1
resize_method: NEAREST_NEIGHBOR
}
"""
image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
image = np.expand_dims(image, axis=2)
image = np.tile(image, (1, 1, 3))
image = np.expand_dims(image, axis=0)
resized_image = self._resized_image_given_text_proto(
image, image_resizer_text_proto)
vals = np.unique(resized_image).tolist()
self.assertEqual(len(vals), 1)
self.assertEqual(vals[0], 1)
def test_build_conditional_shape_resizer_greater_returns_expected_shape(self):
image_resizer_text_proto = """
conditional_shape_resizer {
condition: GREATER
size_threshold: 30
}
"""
input_shape = (60, 30, 3)
expected_output_shape = (30, 15, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_conditional_shape_resizer_same_shape_with_no_resize(self):
image_resizer_text_proto = """
conditional_shape_resizer {
condition: GREATER
size_threshold: 30
}
"""
input_shape = (15, 15, 3)
expected_output_shape = (15, 15, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_conditional_shape_resizer_smaller_returns_expected_shape(self):
image_resizer_text_proto = """
conditional_shape_resizer {
condition: SMALLER
size_threshold: 30
}
"""
input_shape = (30, 15, 3)
expected_output_shape = (60, 30, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_conditional_shape_resizer_grayscale(self):
image_resizer_text_proto = """
conditional_shape_resizer {
condition: GREATER
size_threshold: 30
convert_to_grayscale: true
}
"""
input_shape = (60, 30, 3)
expected_output_shape = (30, 15, 1)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_conditional_shape_resizer_error_on_invalid_condition(self):
invalid_image_resizer_text_proto = """
conditional_shape_resizer {
condition: INVALID
size_threshold: 30
}
"""
with self.assertRaises(ValueError):
image_resizer_builder.build(invalid_image_resizer_text_proto)
def test_build_pad_to_multiple_resizer(self):
"""Test building a pad_to_multiple_resizer from proto."""
image_resizer_text_proto = """
pad_to_multiple_resizer {
multiple: 32
}
"""
input_shape = (60, 30, 3)
expected_output_shape = (64, 32, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_pad_to_multiple_resizer_invalid_multiple(self):
"""Test that building a pad_to_multiple_resizer errors with invalid multiple."""
image_resizer_text_proto = """
pad_to_multiple_resizer {
multiple: -10
}
"""
with self.assertRaises(ValueError):
image_resizer_builder.build(image_resizer_text_proto)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/image_resizer_builder_test.py | image_resizer_builder_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow ops to calibrate class predictions and background class."""
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
def _find_interval_containing_new_value(x, new_value):
"""Find the index of x (ascending-ordered) after which new_value occurs."""
new_value_shape = shape_utils.combined_static_and_dynamic_shape(new_value)[0]
x_shape = shape_utils.combined_static_and_dynamic_shape(x)[0]
compare = tf.cast(tf.reshape(new_value, shape=(new_value_shape, 1)) >=
tf.reshape(x, shape=(1, x_shape)),
dtype=tf.int32)
diff = compare[:, 1:] - compare[:, :-1]
interval_idx = tf.argmin(diff, axis=1)
return interval_idx
def _tf_linear_interp1d(x_to_interpolate, fn_x, fn_y):
"""Tensorflow implementation of 1d linear interpolation.
Args:
x_to_interpolate: tf.float32 Tensor of shape (num_examples,) over which 1d
linear interpolation is performed.
fn_x: Monotonically-increasing, non-repeating tf.float32 Tensor of shape
(length,) used as the domain to approximate a function.
fn_y: tf.float32 Tensor of shape (length,) used as the range to approximate
a function.
Returns:
tf.float32 Tensor of shape (num_examples,)
"""
x_pad = tf.concat([fn_x[:1] - 1, fn_x, fn_x[-1:] + 1], axis=0)
y_pad = tf.concat([fn_y[:1], fn_y, fn_y[-1:]], axis=0)
interval_idx = _find_interval_containing_new_value(x_pad, x_to_interpolate)
# Interpolate
alpha = (
(x_to_interpolate - tf.gather(x_pad, interval_idx)) /
(tf.gather(x_pad, interval_idx + 1) - tf.gather(x_pad, interval_idx)))
interpolation = ((1 - alpha) * tf.gather(y_pad, interval_idx) +
alpha * tf.gather(y_pad, interval_idx + 1))
return interpolation
def _function_approximation_proto_to_tf_tensors(x_y_pairs_message):
"""Extracts (x,y) pairs from a XYPairs message.
Args:
x_y_pairs_message: calibration_pb2..XYPairs proto
Returns:
tf_x: tf.float32 tensor of shape (number_xy_pairs,) for function domain.
tf_y: tf.float32 tensor of shape (number_xy_pairs,) for function range.
"""
tf_x = tf.convert_to_tensor([x_y_pair.x
for x_y_pair
in x_y_pairs_message.x_y_pair],
dtype=tf.float32)
tf_y = tf.convert_to_tensor([x_y_pair.y
for x_y_pair
in x_y_pairs_message.x_y_pair],
dtype=tf.float32)
return tf_x, tf_y
def _get_class_id_function_dict(calibration_config):
"""Create a dictionary mapping class id to function approximations.
Args:
calibration_config: calibration_pb2 proto containing
id_function_approximations.
Returns:
Dictionary mapping a class id to a tuple of TF tensors to be used for
function approximation.
"""
class_id_function_dict = {}
class_id_xy_pairs_map = (
calibration_config.class_id_function_approximations.class_id_xy_pairs_map)
for class_id in class_id_xy_pairs_map:
class_id_function_dict[class_id] = (
_function_approximation_proto_to_tf_tensors(
class_id_xy_pairs_map[class_id]))
return class_id_function_dict
def build(calibration_config):
"""Returns a function that calibrates Tensorflow model scores.
All returned functions are expected to apply positive monotonic
transformations to inputs (i.e. score ordering is strictly preserved or
adjacent scores are mapped to the same score, but an input of lower value
should never be exceed an input of higher value after transformation). For
class-agnostic calibration, positive monotonicity should hold across all
scores. In class-specific cases, positive monotonicity should hold within each
class.
Args:
calibration_config: calibration_pb2.CalibrationConfig proto.
Returns:
Function that that accepts class_predictions_with_background and calibrates
the output based on calibration_config's parameters.
Raises:
ValueError: No calibration builder defined for "Oneof" in
calibration_config.
"""
# Linear Interpolation (usually used as a result of calibration via
# isotonic regression).
if calibration_config.WhichOneof('calibrator') == 'function_approximation':
def calibration_fn(class_predictions_with_background):
"""Calibrate predictions via 1-d linear interpolation.
Predictions scores are linearly interpolated based on a class-agnostic
function approximation. Note that the 0-indexed background class is also
transformed.
Args:
class_predictions_with_background: tf.float32 tensor of shape
[batch_size, num_anchors, num_classes + 1] containing scores on the
interval [0,1]. This is usually produced by a sigmoid or softmax layer
and the result of calling the `predict` method of a detection model.
Returns:
tf.float32 tensor of the same shape as the input with values on the
interval [0, 1].
"""
# Flattening Tensors and then reshaping at the end.
flat_class_predictions_with_background = tf.reshape(
class_predictions_with_background, shape=[-1])
fn_x, fn_y = _function_approximation_proto_to_tf_tensors(
calibration_config.function_approximation.x_y_pairs)
updated_scores = _tf_linear_interp1d(
flat_class_predictions_with_background, fn_x, fn_y)
# Un-flatten the scores
original_detections_shape = shape_utils.combined_static_and_dynamic_shape(
class_predictions_with_background)
calibrated_class_predictions_with_background = tf.reshape(
updated_scores,
shape=original_detections_shape,
name='calibrate_scores')
return calibrated_class_predictions_with_background
elif (calibration_config.WhichOneof('calibrator') ==
'class_id_function_approximations'):
def calibration_fn(class_predictions_with_background):
"""Calibrate predictions per class via 1-d linear interpolation.
Prediction scores are linearly interpolated with class-specific function
approximations. Note that after calibration, an anchor's class scores will
not necessarily sum to 1, and score ordering may change, depending on each
class' calibration parameters.
Args:
class_predictions_with_background: tf.float32 tensor of shape
[batch_size, num_anchors, num_classes + 1] containing scores on the
interval [0,1]. This is usually produced by a sigmoid or softmax layer
and the result of calling the `predict` method of a detection model.
Returns:
tf.float32 tensor of the same shape as the input with values on the
interval [0, 1].
Raises:
KeyError: Calibration parameters are not present for a class.
"""
class_id_function_dict = _get_class_id_function_dict(calibration_config)
# Tensors are split by class and then recombined at the end to recover
# the input's original shape. If a class id does not have calibration
# parameters, it is left unchanged.
class_tensors = tf.unstack(class_predictions_with_background, axis=-1)
calibrated_class_tensors = []
for class_id, class_tensor in enumerate(class_tensors):
flat_class_tensor = tf.reshape(class_tensor, shape=[-1])
if class_id in class_id_function_dict:
output_tensor = _tf_linear_interp1d(
x_to_interpolate=flat_class_tensor,
fn_x=class_id_function_dict[class_id][0],
fn_y=class_id_function_dict[class_id][1])
else:
tf.logging.info(
'Calibration parameters for class id `%d` not not found',
class_id)
output_tensor = flat_class_tensor
calibrated_class_tensors.append(output_tensor)
combined_calibrated_tensor = tf.stack(calibrated_class_tensors, axis=1)
input_shape = shape_utils.combined_static_and_dynamic_shape(
class_predictions_with_background)
calibrated_class_predictions_with_background = tf.reshape(
combined_calibrated_tensor,
shape=input_shape,
name='calibrate_scores')
return calibrated_class_predictions_with_background
elif (calibration_config.WhichOneof('calibrator') ==
'temperature_scaling_calibration'):
def calibration_fn(class_predictions_with_background):
"""Calibrate predictions via temperature scaling.
Predictions logits scores are scaled by the temperature scaler. Note that
the 0-indexed background class is also transformed.
Args:
class_predictions_with_background: tf.float32 tensor of shape
[batch_size, num_anchors, num_classes + 1] containing logits scores.
This is usually produced before a sigmoid or softmax layer.
Returns:
tf.float32 tensor of the same shape as the input.
Raises:
ValueError: If temperature scaler is of incorrect value.
"""
scaler = calibration_config.temperature_scaling_calibration.scaler
if scaler <= 0:
raise ValueError('The scaler in temperature scaling must be positive.')
calibrated_class_predictions_with_background = tf.math.divide(
class_predictions_with_background,
scaler,
name='calibrate_score')
return calibrated_class_predictions_with_background
# TODO(zbeaver): Add sigmoid calibration.
else:
raise ValueError('No calibration builder defined for "Oneof" in '
'calibration_config.')
return calibration_fn
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/calibration_builder.py | calibration_builder.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder for region similarity calculators."""
from object_detection.core import region_similarity_calculator
from object_detection.protos import region_similarity_calculator_pb2
def build(region_similarity_calculator_config):
"""Builds region similarity calculator based on the configuration.
Builds one of [IouSimilarity, IoaSimilarity, NegSqDistSimilarity] objects. See
core/region_similarity_calculator.proto for details.
Args:
region_similarity_calculator_config: RegionSimilarityCalculator
configuration proto.
Returns:
region_similarity_calculator: RegionSimilarityCalculator object.
Raises:
ValueError: On unknown region similarity calculator.
"""
if not isinstance(
region_similarity_calculator_config,
region_similarity_calculator_pb2.RegionSimilarityCalculator):
raise ValueError(
'region_similarity_calculator_config not of type '
'region_similarity_calculator_pb2.RegionsSimilarityCalculator')
similarity_calculator = region_similarity_calculator_config.WhichOneof(
'region_similarity')
if similarity_calculator == 'iou_similarity':
return region_similarity_calculator.IouSimilarity()
if similarity_calculator == 'ioa_similarity':
return region_similarity_calculator.IoaSimilarity()
if similarity_calculator == 'neg_sq_dist_similarity':
return region_similarity_calculator.NegSqDistSimilarity()
if similarity_calculator == 'thresholded_iou_similarity':
return region_similarity_calculator.ThresholdedIouSimilarity(
region_similarity_calculator_config.thresholded_iou_similarity
.iou_threshold)
raise ValueError('Unknown region similarity calculator.')
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/region_similarity_calculator_builder.py | region_similarity_calculator_builder.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function for image resizing operations."""
import functools
import tensorflow.compat.v1 as tf
from object_detection.core import preprocessor
from object_detection.protos import image_resizer_pb2
def _tf_resize_method(resize_method):
"""Maps image resize method from enumeration type to TensorFlow.
Args:
resize_method: The resize_method attribute of keep_aspect_ratio_resizer or
fixed_shape_resizer.
Returns:
method: The corresponding TensorFlow ResizeMethod.
Raises:
ValueError: if `resize_method` is of unknown type.
"""
dict_method = {
image_resizer_pb2.BILINEAR:
tf.image.ResizeMethod.BILINEAR,
image_resizer_pb2.NEAREST_NEIGHBOR:
tf.image.ResizeMethod.NEAREST_NEIGHBOR,
image_resizer_pb2.BICUBIC:
tf.image.ResizeMethod.BICUBIC,
image_resizer_pb2.AREA:
tf.image.ResizeMethod.AREA
}
if resize_method in dict_method:
return dict_method[resize_method]
else:
raise ValueError('Unknown resize_method')
def build(image_resizer_config):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer):
raise ValueError('image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.')
image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof')
if image_resizer_oneof == 'keep_aspect_ratio_resizer':
keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer
if not (keep_aspect_ratio_config.min_dimension <=
keep_aspect_ratio_config.max_dimension):
raise ValueError('min_dimension > max_dimension')
method = _tf_resize_method(keep_aspect_ratio_config.resize_method)
per_channel_pad_value = (0, 0, 0)
if keep_aspect_ratio_config.per_channel_pad_value:
per_channel_pad_value = tuple(keep_aspect_ratio_config.
per_channel_pad_value)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=keep_aspect_ratio_config.min_dimension,
max_dimension=keep_aspect_ratio_config.max_dimension,
method=method,
pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension,
per_channel_pad_value=per_channel_pad_value)
if not keep_aspect_ratio_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'fixed_shape_resizer':
fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer
method = _tf_resize_method(fixed_shape_resizer_config.resize_method)
image_resizer_fn = functools.partial(
preprocessor.resize_image,
new_height=fixed_shape_resizer_config.height,
new_width=fixed_shape_resizer_config.width,
method=method)
if not fixed_shape_resizer_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'identity_resizer':
def image_resizer_fn(image, masks=None, **kwargs):
del kwargs
if masks is None:
return [image, tf.shape(image)]
else:
return [image, masks, tf.shape(image)]
return image_resizer_fn
elif image_resizer_oneof == 'conditional_shape_resizer':
conditional_shape_resize_config = (
image_resizer_config.conditional_shape_resizer)
method = _tf_resize_method(conditional_shape_resize_config.resize_method)
if conditional_shape_resize_config.condition == (
image_resizer_pb2.ConditionalShapeResizer.GREATER):
image_resizer_fn = functools.partial(
preprocessor.resize_to_max_dimension,
max_dimension=conditional_shape_resize_config.size_threshold,
method=method)
elif conditional_shape_resize_config.condition == (
image_resizer_pb2.ConditionalShapeResizer.SMALLER):
image_resizer_fn = functools.partial(
preprocessor.resize_to_min_dimension,
min_dimension=conditional_shape_resize_config.size_threshold,
method=method)
else:
raise ValueError(
'Invalid image resizer condition option for '
'ConditionalShapeResizer: \'%s\'.'
% conditional_shape_resize_config.condition)
if not conditional_shape_resize_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'pad_to_multiple_resizer':
pad_to_multiple_resizer_config = (
image_resizer_config.pad_to_multiple_resizer)
if pad_to_multiple_resizer_config.multiple < 0:
raise ValueError('`multiple` for pad_to_multiple_resizer should be > 0.')
else:
image_resizer_fn = functools.partial(
preprocessor.resize_pad_to_multiple,
multiple=pad_to_multiple_resizer_config.multiple)
if not pad_to_multiple_resizer_config.convert_to_grayscale:
return image_resizer_fn
else:
raise ValueError(
'Invalid image resizer option: \'%s\'.' % image_resizer_oneof)
def grayscale_image_resizer(image, masks=None):
"""Convert to grayscale before applying image_resizer_fn.
Args:
image: A 3D tensor of shape [height, width, 3]
masks: (optional) rank 3 float32 tensor with shape [num_instances, height,
width] containing instance masks.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, 1],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
"""
# image_resizer_fn returns [resized_image, resized_image_shape] if
# mask==None, otherwise it returns
# [resized_image, resized_mask, resized_image_shape]. In either case, we
# only deal with first and last element of the returned list.
retval = image_resizer_fn(image, masks)
resized_image = retval[0]
resized_image_shape = retval[-1]
retval[0] = preprocessor.rgb_to_gray(resized_image)
retval[-1] = tf.concat([resized_image_shape[:-1], [1]], 0)
return retval
return functools.partial(grayscale_image_resizer)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/image_resizer_builder.py | image_resizer_builder.py |
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input reader builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.data_decoders import tf_example_decoder
from object_detection.data_decoders import tf_sequence_example_decoder
from object_detection.protos import input_reader_pb2
parallel_reader = slim.parallel_reader
def build(input_reader_config):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':
config = input_reader_config.tf_record_input_reader
if not config.input_path:
raise ValueError('At least one input path must be specified in '
'`input_reader_config`.')
_, string_tensor = parallel_reader.parallel_read(
config.input_path[:], # Convert `RepeatedScalarContainer` to list.
reader_class=tf.TFRecordReader,
num_epochs=(input_reader_config.num_epochs
if input_reader_config.num_epochs else None),
num_readers=input_reader_config.num_readers,
shuffle=input_reader_config.shuffle,
dtypes=[tf.string, tf.string],
capacity=input_reader_config.queue_capacity,
min_after_dequeue=input_reader_config.min_after_dequeue)
label_map_proto_file = None
if input_reader_config.HasField('label_map_path'):
label_map_proto_file = input_reader_config.label_map_path
input_type = input_reader_config.input_type
if input_type == input_reader_pb2.InputType.Value('TF_EXAMPLE'):
decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=input_reader_config.load_instance_masks,
instance_mask_type=input_reader_config.mask_type,
label_map_proto_file=label_map_proto_file,
load_context_features=input_reader_config.load_context_features)
return decoder.decode(string_tensor)
elif input_type == input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE'):
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file,
load_context_features=input_reader_config.load_context_features,
load_context_image_ids=input_reader_config.load_context_image_ids)
return decoder.decode(string_tensor)
raise ValueError('Unsupported input_type.')
raise ValueError('Unsupported input_reader_config.')
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/input_reader_builder.py | input_reader_builder.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizer_builder."""
import unittest
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import optimizer_builder
from object_detection.protos import optimizer_pb2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class OptimizerBuilderV2Test(tf.test.TestCase):
"""Test building optimizers in V2 mode."""
def testBuildRMSPropOptimizer(self):
optimizer_text_proto = """
rms_prop_optimizer: {
learning_rate: {
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 800720
decay_factor: 0.95
}
}
momentum_optimizer_value: 0.9
decay: 0.9
epsilon: 1.0
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.RMSprop)
def testBuildMomentumOptimizer(self):
optimizer_text_proto = """
momentum_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.001
}
}
momentum_optimizer_value: 0.99
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.SGD)
def testBuildAdamOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.Adam)
def testBuildMovingAverageOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: True
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.Optimizer)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/optimizer_builder_tf2_test.py | optimizer_builder_tf2_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function for post processing operations."""
import functools
import tensorflow.compat.v1 as tf
from object_detection.builders import calibration_builder
from object_detection.core import post_processing
from object_detection.protos import post_processing_pb2
def build(post_processing_config):
"""Builds callables for post-processing operations.
Builds callables for non-max suppression, score conversion, and (optionally)
calibration based on the configuration.
Non-max suppression callable takes `boxes`, `scores`, and optionally
`clip_window`, `parallel_iterations` `masks, and `scope` as inputs. It returns
`nms_boxes`, `nms_scores`, `nms_classes` `nms_masks` and `num_detections`. See
post_processing.batch_multiclass_non_max_suppression for the type and shape
of these tensors.
Score converter callable should be called with `input` tensor. The callable
returns the output from one of 3 tf operations based on the configuration -
tf.identity, tf.sigmoid or tf.nn.softmax. If a calibration config is provided,
score_converter also applies calibration transformations, as defined in
calibration_builder.py. See tensorflow documentation for argument and return
value descriptions.
Args:
post_processing_config: post_processing.proto object containing the
parameters for the post-processing operations.
Returns:
non_max_suppressor_fn: Callable for non-max suppression.
score_converter_fn: Callable for score conversion.
Raises:
ValueError: if the post_processing_config is of incorrect type.
"""
if not isinstance(post_processing_config, post_processing_pb2.PostProcessing):
raise ValueError('post_processing_config not of type '
'post_processing_pb2.Postprocessing.')
non_max_suppressor_fn = _build_non_max_suppressor(
post_processing_config.batch_non_max_suppression)
score_converter_fn = _build_score_converter(
post_processing_config.score_converter,
post_processing_config.logit_scale)
if post_processing_config.HasField('calibration_config'):
score_converter_fn = _build_calibrated_score_converter(
score_converter_fn,
post_processing_config.calibration_config)
return non_max_suppressor_fn, score_converter_fn
def _build_non_max_suppressor(nms_config):
"""Builds non-max suppresson based on the nms config.
Args:
nms_config: post_processing_pb2.PostProcessing.BatchNonMaxSuppression proto.
Returns:
non_max_suppressor_fn: Callable non-max suppressor.
Raises:
ValueError: On incorrect iou_threshold or on incompatible values of
max_total_detections and max_detections_per_class or on negative
soft_nms_sigma.
"""
if nms_config.iou_threshold < 0 or nms_config.iou_threshold > 1.0:
raise ValueError('iou_threshold not in [0, 1.0].')
if nms_config.max_detections_per_class > nms_config.max_total_detections:
raise ValueError('max_detections_per_class should be no greater than '
'max_total_detections.')
if nms_config.soft_nms_sigma < 0.0:
raise ValueError('soft_nms_sigma should be non-negative.')
if nms_config.use_combined_nms and nms_config.use_class_agnostic_nms:
raise ValueError('combined_nms does not support class_agnostic_nms.')
non_max_suppressor_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=nms_config.score_threshold,
iou_thresh=nms_config.iou_threshold,
max_size_per_class=nms_config.max_detections_per_class,
max_total_size=nms_config.max_total_detections,
use_static_shapes=nms_config.use_static_shapes,
use_class_agnostic_nms=nms_config.use_class_agnostic_nms,
max_classes_per_detection=nms_config.max_classes_per_detection,
soft_nms_sigma=nms_config.soft_nms_sigma,
use_partitioned_nms=nms_config.use_partitioned_nms,
use_combined_nms=nms_config.use_combined_nms,
change_coordinate_frame=nms_config.change_coordinate_frame,
use_hard_nms=nms_config.use_hard_nms,
use_cpu_nms=nms_config.use_cpu_nms)
return non_max_suppressor_fn
def _score_converter_fn_with_logit_scale(tf_score_converter_fn, logit_scale):
"""Create a function to scale logits then apply a Tensorflow function."""
def score_converter_fn(logits):
scaled_logits = tf.multiply(logits, 1.0 / logit_scale, name='scale_logits')
return tf_score_converter_fn(scaled_logits, name='convert_scores')
score_converter_fn.__name__ = '%s_with_logit_scale' % (
tf_score_converter_fn.__name__)
return score_converter_fn
def _build_score_converter(score_converter_config, logit_scale):
"""Builds score converter based on the config.
Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on
the config.
Args:
score_converter_config: post_processing_pb2.PostProcessing.score_converter.
logit_scale: temperature to use for SOFTMAX score_converter.
Returns:
Callable score converter op.
Raises:
ValueError: On unknown score converter.
"""
if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY:
return _score_converter_fn_with_logit_scale(tf.identity, logit_scale)
if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID:
return _score_converter_fn_with_logit_scale(tf.sigmoid, logit_scale)
if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX:
return _score_converter_fn_with_logit_scale(tf.nn.softmax, logit_scale)
raise ValueError('Unknown score converter.')
def _build_calibrated_score_converter(score_converter_fn, calibration_config):
"""Wraps a score_converter_fn, adding a calibration step.
Builds a score converter function with a calibration transformation according
to calibration_builder.py. The score conversion function may be applied before
or after the calibration transformation, depending on the calibration method.
If the method is temperature scaling, the score conversion is
after the calibration transformation. Otherwise, the score conversion is
before the calibration transformation. Calibration applies positive monotonic
transformations to inputs (i.e. score ordering is strictly preserved or
adjacent scores are mapped to the same score). When calibration is
class-agnostic, the highest-scoring class remains unchanged, unless two
adjacent scores are mapped to the same value and one class arbitrarily
selected to break the tie. In per-class calibration, it's possible (though
rare in practice) that the highest-scoring class will change, since positive
monotonicity is only required to hold within each class.
Args:
score_converter_fn: callable that takes logit scores as input.
calibration_config: post_processing_pb2.PostProcessing.calibration_config.
Returns:
Callable calibrated score coverter op.
"""
calibration_fn = calibration_builder.build(calibration_config)
def calibrated_score_converter_fn(logits):
if (calibration_config.WhichOneof('calibrator') ==
'temperature_scaling_calibration'):
calibrated_logits = calibration_fn(logits)
return score_converter_fn(calibrated_logits)
else:
converted_logits = score_converter_fn(logits)
return calibration_fn(converted_logits)
calibrated_score_converter_fn.__name__ = (
'calibrate_with_%s' % calibration_config.WhichOneof('calibrator'))
return calibrated_score_converter_fn
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/post_processing_builder.py | post_processing_builder.py |
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizer_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import six
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import optimizer_builder
from object_detection.protos import optimizer_pb2
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf1():
from tensorflow.contrib import opt as contrib_opt
# pylint: enable=g-import-not-at-top
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class LearningRateBuilderTest(tf.test.TestCase):
def testBuildConstantLearningRate(self):
learning_rate_text_proto = """
constant_learning_rate {
learning_rate: 0.004
}
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
learning_rate = optimizer_builder._create_learning_rate(
learning_rate_proto)
self.assertTrue(
six.ensure_str(learning_rate.op.name).endswith('learning_rate'))
with self.test_session():
learning_rate_out = learning_rate.eval()
self.assertAlmostEqual(learning_rate_out, 0.004)
def testBuildExponentialDecayLearningRate(self):
learning_rate_text_proto = """
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 99999
decay_factor: 0.85
staircase: false
}
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
learning_rate = optimizer_builder._create_learning_rate(
learning_rate_proto)
self.assertTrue(
six.ensure_str(learning_rate.op.name).endswith('learning_rate'))
self.assertIsInstance(learning_rate, tf.Tensor)
def testBuildManualStepLearningRate(self):
learning_rate_text_proto = """
manual_step_learning_rate {
initial_learning_rate: 0.002
schedule {
step: 100
learning_rate: 0.006
}
schedule {
step: 90000
learning_rate: 0.00006
}
warmup: true
}
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
learning_rate = optimizer_builder._create_learning_rate(
learning_rate_proto)
self.assertIsInstance(learning_rate, tf.Tensor)
def testBuildCosineDecayLearningRate(self):
learning_rate_text_proto = """
cosine_decay_learning_rate {
learning_rate_base: 0.002
total_steps: 20000
warmup_learning_rate: 0.0001
warmup_steps: 1000
hold_base_rate_steps: 20000
}
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
learning_rate = optimizer_builder._create_learning_rate(
learning_rate_proto)
self.assertIsInstance(learning_rate, tf.Tensor)
def testRaiseErrorOnEmptyLearningRate(self):
learning_rate_text_proto = """
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
with self.assertRaises(ValueError):
optimizer_builder._create_learning_rate(learning_rate_proto)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class OptimizerBuilderTest(tf.test.TestCase):
def testBuildRMSPropOptimizer(self):
optimizer_text_proto = """
rms_prop_optimizer: {
learning_rate: {
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 800720
decay_factor: 0.95
}
}
momentum_optimizer_value: 0.9
decay: 0.9
epsilon: 1.0
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.train.RMSPropOptimizer)
def testBuildMomentumOptimizer(self):
optimizer_text_proto = """
momentum_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.001
}
}
momentum_optimizer_value: 0.99
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.train.MomentumOptimizer)
def testBuildAdamOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
epsilon: 1e-6
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.train.AdamOptimizer)
def testBuildMovingAverageOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: True
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, contrib_opt.MovingAverageOptimizer)
def testBuildMovingAverageOptimizerWithNonDefaultDecay(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: True
moving_average_decay: 0.2
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, contrib_opt.MovingAverageOptimizer)
# TODO(rathodv): Find a way to not depend on the private members.
self.assertAlmostEqual(optimizer._ema._decay, 0.2)
def testBuildEmptyOptimizer(self):
optimizer_text_proto = """
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
with self.assertRaises(ValueError):
optimizer_builder.build(optimizer_proto)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/optimizer_builder_tf1_test.py | optimizer_builder_tf1_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for post_processing_builder."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import post_processing_builder
from object_detection.protos import post_processing_pb2
from object_detection.utils import test_case
class PostProcessingBuilderTest(test_case.TestCase):
def test_build_non_max_suppressor_with_correct_parameters(self):
post_processing_text_proto = """
batch_non_max_suppression {
score_threshold: 0.7
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
soft_nms_sigma: 0.4
}
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
non_max_suppressor, _ = post_processing_builder.build(
post_processing_config)
self.assertEqual(non_max_suppressor.keywords['max_size_per_class'], 100)
self.assertEqual(non_max_suppressor.keywords['max_total_size'], 300)
self.assertAlmostEqual(non_max_suppressor.keywords['score_thresh'], 0.7)
self.assertAlmostEqual(non_max_suppressor.keywords['iou_thresh'], 0.6)
self.assertAlmostEqual(non_max_suppressor.keywords['soft_nms_sigma'], 0.4)
def test_build_non_max_suppressor_with_correct_parameters_classagnostic_nms(
self):
post_processing_text_proto = """
batch_non_max_suppression {
score_threshold: 0.7
iou_threshold: 0.6
max_detections_per_class: 10
max_total_detections: 300
use_class_agnostic_nms: True
max_classes_per_detection: 1
}
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
non_max_suppressor, _ = post_processing_builder.build(
post_processing_config)
self.assertEqual(non_max_suppressor.keywords['max_size_per_class'], 10)
self.assertEqual(non_max_suppressor.keywords['max_total_size'], 300)
self.assertEqual(non_max_suppressor.keywords['max_classes_per_detection'],
1)
self.assertEqual(non_max_suppressor.keywords['use_class_agnostic_nms'],
True)
self.assertAlmostEqual(non_max_suppressor.keywords['score_thresh'], 0.7)
self.assertAlmostEqual(non_max_suppressor.keywords['iou_thresh'], 0.6)
def test_build_identity_score_converter(self):
post_processing_text_proto = """
score_converter: IDENTITY
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(
post_processing_config)
self.assertEqual(score_converter.__name__, 'identity_with_logit_scale')
def graph_fn():
inputs = tf.constant([1, 1], tf.float32)
outputs = score_converter(inputs)
return outputs
converted_scores = self.execute_cpu(graph_fn, [])
self.assertAllClose(converted_scores, [1, 1])
def test_build_identity_score_converter_with_logit_scale(self):
post_processing_text_proto = """
score_converter: IDENTITY
logit_scale: 2.0
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'identity_with_logit_scale')
def graph_fn():
inputs = tf.constant([1, 1], tf.float32)
outputs = score_converter(inputs)
return outputs
converted_scores = self.execute_cpu(graph_fn, [])
self.assertAllClose(converted_scores, [.5, .5])
def test_build_sigmoid_score_converter(self):
post_processing_text_proto = """
score_converter: SIGMOID
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'sigmoid_with_logit_scale')
def test_build_softmax_score_converter(self):
post_processing_text_proto = """
score_converter: SOFTMAX
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'softmax_with_logit_scale')
def test_build_softmax_score_converter_with_temperature(self):
post_processing_text_proto = """
score_converter: SOFTMAX
logit_scale: 2.0
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'softmax_with_logit_scale')
def test_build_calibrator_with_nonempty_config(self):
"""Test that identity function used when no calibration_config specified."""
# Calibration config maps all scores to 0.5.
post_processing_text_proto = """
score_converter: SOFTMAX
calibration_config {
function_approximation {
x_y_pairs {
x_y_pair {
x: 0.0
y: 0.5
}
x_y_pair {
x: 1.0
y: 0.5
}}}}"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, calibrated_score_conversion_fn = post_processing_builder.build(
post_processing_config)
self.assertEqual(calibrated_score_conversion_fn.__name__,
'calibrate_with_function_approximation')
def graph_fn():
input_scores = tf.constant([1, 1], tf.float32)
outputs = calibrated_score_conversion_fn(input_scores)
return outputs
calibrated_scores = self.execute_cpu(graph_fn, [])
self.assertAllClose(calibrated_scores, [0.5, 0.5])
def test_build_temperature_scaling_calibrator(self):
post_processing_text_proto = """
score_converter: SOFTMAX
calibration_config {
temperature_scaling_calibration {
scaler: 2.0
}}"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, calibrated_score_conversion_fn = post_processing_builder.build(
post_processing_config)
self.assertEqual(calibrated_score_conversion_fn.__name__,
'calibrate_with_temperature_scaling_calibration')
def graph_fn():
input_scores = tf.constant([1, 1], tf.float32)
outputs = calibrated_score_conversion_fn(input_scores)
return outputs
calibrated_scores = self.execute_cpu(graph_fn, [])
self.assertAllClose(calibrated_scores, [0.5, 0.5])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/post_processing_builder_test.py | post_processing_builder_test.py |
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dataset_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import dataset_builder
from object_detection.core import standard_fields as fields
from object_detection.dataset_tools import seq_example_util
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
from object_detection.utils import test_case
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import lookup as contrib_lookup
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
def get_iterator_next_for_testing(dataset, is_tf2):
iterator = dataset.make_initializable_iterator()
if not is_tf2:
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator.get_next()
def _get_labelmap_path():
"""Returns an absolute path to label map file."""
parent_path = os.path.dirname(tf.resource_loader.get_data_files_path())
return os.path.join(parent_path, 'data',
'pet_label_map.pbtxt')
class DatasetBuilderTest(test_case.TestCase):
def create_tf_record(self, has_additional_channels=False, num_shards=1,
num_examples_per_shard=1):
def dummy_jpeg_fn():
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
additional_channels_tensor = np.random.randint(
255, size=(4, 5, 1)).astype(np.uint8)
encoded_jpeg = tf.image.encode_jpeg(image_tensor)
encoded_additional_channels_jpeg = tf.image.encode_jpeg(
additional_channels_tensor)
return encoded_jpeg, encoded_additional_channels_jpeg
encoded_jpeg, encoded_additional_channels_jpeg = self.execute(
dummy_jpeg_fn, [])
tmp_dir = self.get_temp_dir()
flat_mask = (4 * 5) * [1.0]
for i in range(num_shards):
path = os.path.join(tmp_dir, '%05d.tfrecord' % i)
writer = tf.python_io.TFRecordWriter(path)
for j in range(num_examples_per_shard):
if num_shards > 1:
source_id = (str(i) + '_' + str(j)).encode()
else:
source_id = str(j).encode()
features = {
'image/source_id': dataset_util.bytes_feature(source_id),
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/height': dataset_util.int64_feature(4),
'image/width': dataset_util.int64_feature(5),
'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]),
'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]),
'image/object/class/label': dataset_util.int64_list_feature([2]),
'image/object/mask': dataset_util.float_list_feature(flat_mask),
}
if has_additional_channels:
additional_channels_key = 'image/additional_channels/encoded'
features[additional_channels_key] = dataset_util.bytes_list_feature(
[encoded_additional_channels_jpeg] * 2)
example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(example.SerializeToString())
writer.close()
return os.path.join(self.get_temp_dir(), '?????.tfrecord')
def _make_random_serialized_jpeg_images(self, num_frames, image_height,
image_width):
def graph_fn():
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list]
return encoded_images_list
encoded_images = self.execute(graph_fn, [])
return encoded_images
def create_tf_record_sequence_example(self):
path = os.path.join(self.get_temp_dir(), 'seq_tfrecord')
writer = tf.python_io.TFRecordWriter(path)
num_frames = 4
image_height = 4
image_width = 5
image_source_ids = [str(i) for i in range(num_frames)]
with self.test_session():
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_source_ids=image_source_ids,
image_format='JPEG',
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[]], # Frame 0.
[[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.],
[0.1, 0.1, 0.2, 0.2]], # Frame 2.
[[]], # Frame 3.
],
label_strings=[
[], # Frame 0.
['Abyssinian'], # Frame 1.
['Abyssinian', 'american_bulldog'], # Frame 2.
[], # Frame 3
]).SerializeToString()
writer.write(sequence_example_serialized)
writer.close()
return path
def test_build_tf_record_input_reader(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def graph_fn():
return get_iterator_next_for_testing(
dataset_builder.build(input_reader_proto, batch_size=1),
self.is_tf2())
output_dict = self.execute(graph_fn, [])
self.assertNotIn(
fields.InputDataFields.groundtruth_instance_masks, output_dict)
self.assertEqual((1, 4, 5, 3),
output_dict[fields.InputDataFields.image].shape)
self.assertAllEqual([[2]],
output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEqual(
(1, 1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0][0])
def get_mock_reduce_to_frame_fn(self):
def mock_reduce_to_frame_fn(dataset, dataset_map_fn, batch_size, config):
def get_frame(tensor_dict):
out_tensor_dict = {}
out_tensor_dict[fields.InputDataFields.source_id] = (
tensor_dict[fields.InputDataFields.source_id][0])
return out_tensor_dict
return dataset_map_fn(dataset, get_frame, batch_size, config)
return mock_reduce_to_frame_fn
def test_build_tf_record_input_reader_sequence_example_train(self):
tf_record_path = self.create_tf_record_sequence_example()
label_map_path = _get_labelmap_path()
input_type = 'TF_SEQUENCE_EXAMPLE'
input_reader_text_proto = """
shuffle: false
num_readers: 1
input_type: {1}
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path, input_type)
input_reader_proto = input_reader_pb2.InputReader()
input_reader_proto.label_map_path = label_map_path
text_format.Merge(input_reader_text_proto, input_reader_proto)
reduce_to_frame_fn = self.get_mock_reduce_to_frame_fn()
def graph_fn():
return get_iterator_next_for_testing(
dataset_builder.build(input_reader_proto, batch_size=1,
reduce_to_frame_fn=reduce_to_frame_fn),
self.is_tf2())
output_dict = self.execute(graph_fn, [])
self.assertEqual((1,),
output_dict[fields.InputDataFields.source_id].shape)
def test_build_tf_record_input_reader_sequence_example_test(self):
tf_record_path = self.create_tf_record_sequence_example()
input_type = 'TF_SEQUENCE_EXAMPLE'
label_map_path = _get_labelmap_path()
input_reader_text_proto = """
shuffle: false
num_readers: 1
input_type: {1}
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path, input_type)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
input_reader_proto.label_map_path = label_map_path
reduce_to_frame_fn = self.get_mock_reduce_to_frame_fn()
def graph_fn():
return get_iterator_next_for_testing(
dataset_builder.build(input_reader_proto, batch_size=1,
reduce_to_frame_fn=reduce_to_frame_fn),
self.is_tf2())
output_dict = self.execute(graph_fn, [])
self.assertEqual((1,),
output_dict[fields.InputDataFields.source_id].shape)
def test_build_tf_record_input_reader_and_load_instance_masks(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def graph_fn():
return get_iterator_next_for_testing(
dataset_builder.build(input_reader_proto, batch_size=1),
self.is_tf2()
)
output_dict = self.execute(graph_fn, [])
self.assertAllEqual(
(1, 1, 4, 5),
output_dict[fields.InputDataFields.groundtruth_instance_masks].shape)
def test_build_tf_record_input_reader_with_batch_size_two(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def one_hot_class_encoding_fn(tensor_dict):
tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3)
return tensor_dict
def graph_fn():
return dataset_builder.make_initializable_iterator(
dataset_builder.build(
input_reader_proto,
transform_input_data_fn=one_hot_class_encoding_fn,
batch_size=2)).get_next()
output_dict = self.execute(graph_fn, [])
self.assertAllEqual([2, 4, 5, 3],
output_dict[fields.InputDataFields.image].shape)
self.assertAllEqual(
[2, 1, 3],
output_dict[fields.InputDataFields.groundtruth_classes].shape)
self.assertAllEqual(
[2, 1, 4], output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual([[[0.0, 0.0, 1.0, 1.0]], [[0.0, 0.0, 1.0, 1.0]]],
output_dict[fields.InputDataFields.groundtruth_boxes])
def test_build_tf_record_input_reader_with_batch_size_two_and_masks(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def one_hot_class_encoding_fn(tensor_dict):
tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot(
tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3)
return tensor_dict
def graph_fn():
return dataset_builder.make_initializable_iterator(
dataset_builder.build(
input_reader_proto,
transform_input_data_fn=one_hot_class_encoding_fn,
batch_size=2)).get_next()
output_dict = self.execute(graph_fn, [])
self.assertAllEqual(
[2, 1, 4, 5],
output_dict[fields.InputDataFields.groundtruth_instance_masks].shape)
def test_raises_error_with_no_input_paths(self):
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
"""
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
with self.assertRaises(ValueError):
dataset_builder.build(input_reader_proto, batch_size=1)
def test_sample_all_data(self):
tf_record_path = self.create_tf_record(num_examples_per_shard=2)
input_reader_text_proto = """
shuffle: false
num_readers: 1
sample_1_of_n_examples: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def graph_fn():
dataset = dataset_builder.build(input_reader_proto, batch_size=1)
sample1_ds = dataset.take(1)
sample2_ds = dataset.skip(1)
iter1 = dataset_builder.make_initializable_iterator(sample1_ds)
iter2 = dataset_builder.make_initializable_iterator(sample2_ds)
return iter1.get_next(), iter2.get_next()
output_dict1, output_dict2 = self.execute(graph_fn, [])
self.assertAllEqual([b'0'], output_dict1[fields.InputDataFields.source_id])
self.assertEqual([b'1'], output_dict2[fields.InputDataFields.source_id])
def test_sample_one_of_n_shards(self):
tf_record_path = self.create_tf_record(num_examples_per_shard=4)
input_reader_text_proto = """
shuffle: false
num_readers: 1
sample_1_of_n_examples: 2
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
def graph_fn():
dataset = dataset_builder.build(input_reader_proto, batch_size=1)
sample1_ds = dataset.take(1)
sample2_ds = dataset.skip(1)
iter1 = dataset_builder.make_initializable_iterator(sample1_ds)
iter2 = dataset_builder.make_initializable_iterator(sample2_ds)
return iter1.get_next(), iter2.get_next()
output_dict1, output_dict2 = self.execute(graph_fn, [])
self.assertAllEqual([b'0'], output_dict1[fields.InputDataFields.source_id])
self.assertEqual([b'2'], output_dict2[fields.InputDataFields.source_id])
def test_no_input_context(self):
"""Test that all samples are read with no input context given."""
tf_record_path = self.create_tf_record(num_examples_per_shard=16,
num_shards=2)
input_reader_text_proto = """
shuffle: false
num_readers: 1
num_epochs: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
for i in range(4):
# pylint:disable=cell-var-from-loop
def graph_fn():
dataset = dataset_builder.build(input_reader_proto, batch_size=8)
dataset = dataset.skip(i)
return get_iterator_next_for_testing(dataset, self.is_tf2())
batch = self.execute(graph_fn, [])
self.assertEqual(batch['image'].shape, (8, 4, 5, 3))
def graph_fn_last_batch():
dataset = dataset_builder.build(input_reader_proto, batch_size=8)
dataset = dataset.skip(4)
return get_iterator_next_for_testing(dataset, self.is_tf2())
self.assertRaises(tf.errors.OutOfRangeError, self.execute,
compute_fn=graph_fn_last_batch, inputs=[])
def test_with_input_context(self):
"""Test that a subset is read with input context given."""
tf_record_path = self.create_tf_record(num_examples_per_shard=16,
num_shards=2)
input_reader_text_proto = """
shuffle: false
num_readers: 1
num_epochs: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
input_context = tf.distribute.InputContext(
num_input_pipelines=2, input_pipeline_id=0, num_replicas_in_sync=4
)
for i in range(8):
# pylint:disable=cell-var-from-loop
def graph_fn():
dataset = dataset_builder.build(input_reader_proto, batch_size=8,
input_context=input_context)
dataset = dataset.skip(i)
return get_iterator_next_for_testing(dataset, self.is_tf2())
batch = self.execute(graph_fn, [])
self.assertEqual(batch['image'].shape, (2, 4, 5, 3))
def graph_fn_last_batch():
dataset = dataset_builder.build(input_reader_proto, batch_size=8,
input_context=input_context)
dataset = dataset.skip(8)
return get_iterator_next_for_testing(dataset, self.is_tf2())
self.assertRaises(tf.errors.OutOfRangeError, self.execute,
compute_fn=graph_fn_last_batch, inputs=[])
class ReadDatasetTest(test_case.TestCase):
def setUp(self):
self._path_template = os.path.join(self.get_temp_dir(), 'examples_%s.txt')
for i in range(5):
path = self._path_template % i
with tf.gfile.Open(path, 'wb') as f:
f.write('\n'.join([str(i + 1), str((i + 1) * 10)]))
self._shuffle_path_template = os.path.join(self.get_temp_dir(),
'shuffle_%s.txt')
for i in range(2):
path = self._shuffle_path_template % i
with tf.gfile.Open(path, 'wb') as f:
f.write('\n'.join([str(i)] * 5))
super(ReadDatasetTest, self).setUp()
def _get_dataset_next(self, files, config, batch_size, num_batches_skip=0):
def decode_func(value):
return [tf.string_to_number(value, out_type=tf.int32)]
dataset = dataset_builder.read_dataset(tf.data.TextLineDataset, files,
config)
dataset = dataset.map(decode_func)
dataset = dataset.batch(batch_size)
if num_batches_skip > 0:
dataset = dataset.skip(num_batches_skip)
return get_iterator_next_for_testing(dataset, self.is_tf2())
def _assert_item_count(self, data, item, percentage):
self.assertAlmostEqual(data.count(item)/len(data), percentage, places=1)
def test_make_initializable_iterator_with_hashTable(self):
def graph_fn():
keys = [1, 0, -1]
dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]])
try:
# Dynamically try to load the tf v2 lookup, falling back to contrib
lookup = tf.compat.v2.lookup
hash_table_class = tf.compat.v2.lookup.StaticHashTable
except AttributeError:
lookup = contrib_lookup
hash_table_class = contrib_lookup.HashTable
table = hash_table_class(
initializer=lookup.KeyValueTensorInitializer(
keys=keys, values=list(reversed(keys))),
default_value=100)
dataset = dataset.map(table.lookup)
return dataset_builder.make_initializable_iterator(dataset).get_next()
result = self.execute(graph_fn, [])
self.assertAllEqual(result, [-1, 100, 1, 100])
def test_read_dataset_sample_from_datasets_weights_equal_weight(self):
"""Ensure that the files' values are equally-weighted."""
config = input_reader_pb2.InputReader()
config.num_readers = 2
config.shuffle = False
config.sample_from_datasets_weights.extend([0.5, 0.5])
def graph_fn():
return self._get_dataset_next(
[self._path_template % '0', self._path_template % '1'],
config,
batch_size=1000)
data = list(self.execute(graph_fn, []))
self.assertEqual(len(data), 1000)
self._assert_item_count(data, 1, 0.25)
self._assert_item_count(data, 10, 0.25)
self._assert_item_count(data, 2, 0.25)
self._assert_item_count(data, 20, 0.25)
def test_read_dataset_sample_from_datasets_weights_non_normalized(self):
"""Ensure that the values are equally-weighted when not normalized."""
config = input_reader_pb2.InputReader()
config.num_readers = 2
config.shuffle = False
# Values are not normalized to sum to 1. In this case, it's a 50/50 split
# with each dataset having weight of 1.
config.sample_from_datasets_weights.extend([1, 1])
def graph_fn():
return self._get_dataset_next(
[self._path_template % '0', self._path_template % '1'],
config,
batch_size=1000)
data = list(self.execute(graph_fn, []))
self.assertEqual(len(data), 1000)
self._assert_item_count(data, 1, 0.25)
self._assert_item_count(data, 10, 0.25)
self._assert_item_count(data, 2, 0.25)
self._assert_item_count(data, 20, 0.25)
def test_read_dataset_sample_from_datasets_weights_zero_weight(self):
"""Ensure that the files' values are equally-weighted."""
config = input_reader_pb2.InputReader()
config.num_readers = 2
config.shuffle = False
config.sample_from_datasets_weights.extend([1.0, 0.0])
def graph_fn():
return self._get_dataset_next(
[self._path_template % '0', self._path_template % '1'],
config,
batch_size=1000)
data = list(self.execute(graph_fn, []))
self.assertEqual(len(data), 1000)
self._assert_item_count(data, 1, 0.5)
self._assert_item_count(data, 10, 0.5)
self._assert_item_count(data, 2, 0.0)
self._assert_item_count(data, 20, 0.0)
def test_read_dataset_sample_from_datasets_weights_unbalanced(self):
"""Ensure that the files' values are equally-weighted."""
config = input_reader_pb2.InputReader()
config.num_readers = 2
config.shuffle = False
config.sample_from_datasets_weights.extend([0.1, 0.9])
def graph_fn():
return self._get_dataset_next(
[self._path_template % '0', self._path_template % '1'],
config,
batch_size=1000)
data = list(self.execute(graph_fn, []))
self.assertEqual(len(data), 1000)
self._assert_item_count(data, 1, 0.05)
self._assert_item_count(data, 10, 0.05)
self._assert_item_count(data, 2, 0.45)
self._assert_item_count(data, 20, 0.45)
def test_read_dataset(self):
config = input_reader_pb2.InputReader()
config.num_readers = 1
config.shuffle = False
def graph_fn():
return self._get_dataset_next(
[self._path_template % '*'], config, batch_size=20)
data = self.execute(graph_fn, [])
# Note that the execute function extracts single outputs if the return
# value is of size 1.
self.assertCountEqual(
data, [
1, 10, 2, 20, 3, 30, 4, 40, 5, 50, 1, 10, 2, 20, 3, 30, 4, 40, 5,
50
])
def test_reduce_num_reader(self):
config = input_reader_pb2.InputReader()
config.num_readers = 10
config.shuffle = False
def graph_fn():
return self._get_dataset_next(
[self._path_template % '*'], config, batch_size=20)
data = self.execute(graph_fn, [])
# Note that the execute function extracts single outputs if the return
# value is of size 1.
self.assertCountEqual(
data, [
1, 10, 2, 20, 3, 30, 4, 40, 5, 50, 1, 10, 2, 20, 3, 30, 4, 40, 5,
50
])
def test_enable_shuffle(self):
config = input_reader_pb2.InputReader()
config.num_readers = 1
config.shuffle = True
tf.set_random_seed(1) # Set graph level seed.
def graph_fn():
return self._get_dataset_next(
[self._shuffle_path_template % '*'], config, batch_size=10)
expected_non_shuffle_output = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
data = self.execute(graph_fn, [])
self.assertTrue(
np.any(np.not_equal(data, expected_non_shuffle_output)))
def test_disable_shuffle_(self):
config = input_reader_pb2.InputReader()
config.num_readers = 1
config.shuffle = False
def graph_fn():
return self._get_dataset_next(
[self._shuffle_path_template % '*'], config, batch_size=10)
expected_non_shuffle_output1 = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
expected_non_shuffle_output2 = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
# Note that the execute function extracts single outputs if the return
# value is of size 1.
data = self.execute(graph_fn, [])
self.assertTrue(all(data == expected_non_shuffle_output1) or
all(data == expected_non_shuffle_output2))
def test_read_dataset_single_epoch(self):
config = input_reader_pb2.InputReader()
config.num_epochs = 1
config.num_readers = 1
config.shuffle = False
def graph_fn():
return self._get_dataset_next(
[self._path_template % '0'], config, batch_size=30)
data = self.execute(graph_fn, [])
# Note that the execute function extracts single outputs if the return
# value is of size 1.
self.assertAllEqual(data, [1, 10])
# First batch will retrieve as much as it can, second batch will fail.
def graph_fn_second_batch():
return self._get_dataset_next(
[self._path_template % '0'], config, batch_size=30,
num_batches_skip=1)
self.assertRaises(tf.errors.OutOfRangeError, self.execute,
compute_fn=graph_fn_second_batch, inputs=[])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/dataset_builder_test.py | dataset_builder_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Box predictor for object detectors.
Box predictors are classes that take a high level
image feature map as input and produce two predictions,
(1) a tensor encoding box locations, and
(2) a tensor encoding classes for each box.
These components are passed directly to loss functions
in our detection models.
These modules are separated from the main model since the same
few box predictor architectures are shared across many models.
"""
from abc import abstractmethod
import tensorflow.compat.v1 as tf
BOX_ENCODINGS = 'box_encodings'
CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'
MASK_PREDICTIONS = 'mask_predictions'
class BoxPredictor(object):
"""BoxPredictor."""
def __init__(self, is_training, num_classes):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
"""
self._is_training = is_training
self._num_classes = num_classes
@property
def is_keras_model(self):
return False
@property
def num_classes(self):
return self._num_classes
def predict(self, image_features, num_predictions_per_location,
scope=None, **params):
"""Computes encoded object locations and corresponding confidences.
Takes a list of high level image feature maps as input and produces a list
of box encodings and a list of class scores where each element in the output
lists correspond to the feature maps in the input list.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
num_predictions_per_location: A list of integers representing the number
of box predictions to be made per spatial location for each feature map.
scope: Variable and Op scope name.
**params: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A list of float tensors. Each entry in the list
corresponds to a feature map in the input `image_features` list. All
tensors in the list have one of the two following shapes:
a. [batch_size, num_anchors_i, q, code_size] representing the location
of the objects, where q is 1 or the number of classes.
b. [batch_size, num_anchors_i, code_size].
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
Raises:
ValueError: If length of `image_features` is not equal to length of
`num_predictions_per_location`.
"""
if len(image_features) != len(num_predictions_per_location):
raise ValueError('image_feature and num_predictions_per_location must '
'be of same length, found: {} vs {}'.
format(len(image_features),
len(num_predictions_per_location)))
if scope is not None:
with tf.variable_scope(scope):
return self._predict(image_features, num_predictions_per_location,
**params)
return self._predict(image_features, num_predictions_per_location,
**params)
# TODO(rathodv): num_predictions_per_location could be moved to constructor.
# This is currently only used by ConvolutionalBoxPredictor.
@abstractmethod
def _predict(self, image_features, num_predictions_per_location, **params):
"""Implementations must override this method.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
num_predictions_per_location: A list of integers representing the number
of box predictions to be made per spatial location for each feature map.
**params: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A list of float tensors. Each entry in the list
corresponds to a feature map in the input `image_features` list. All
tensors in the list have one of the two following shapes:
a. [batch_size, num_anchors_i, q, code_size] representing the location
of the objects, where q is 1 or the number of classes.
b. [batch_size, num_anchors_i, code_size].
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
pass
class KerasBoxPredictor(tf.keras.layers.Layer):
"""Keras-based BoxPredictor."""
def __init__(self, is_training, num_classes, freeze_batchnorm,
inplace_batchnorm_update, name=None):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
name: A string name scope to assign to the model. If `None`, Keras
will auto-generate one from the class name.
"""
super(KerasBoxPredictor, self).__init__(name=name)
self._is_training = is_training
self._num_classes = num_classes
self._freeze_batchnorm = freeze_batchnorm
self._inplace_batchnorm_update = inplace_batchnorm_update
@property
def is_keras_model(self):
return True
@property
def num_classes(self):
return self._num_classes
def call(self, image_features, **kwargs):
"""Computes encoded object locations and corresponding confidences.
Takes a list of high level image feature maps as input and produces a list
of box encodings and a list of class scores where each element in the output
lists correspond to the feature maps in the input list.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
**kwargs: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A list of float tensors. Each entry in the list
corresponds to a feature map in the input `image_features` list. All
tensors in the list have one of the two following shapes:
a. [batch_size, num_anchors_i, q, code_size] representing the location
of the objects, where q is 1 or the number of classes.
b. [batch_size, num_anchors_i, code_size].
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
return self._predict(image_features, **kwargs)
@abstractmethod
def _predict(self, image_features, **kwargs):
"""Implementations must override this method.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
**kwargs: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A list of float tensors. Each entry in the list
corresponds to a feature map in the input `image_features` list. All
tensors in the list have one of the two following shapes:
a. [batch_size, num_anchors_i, q, code_size] representing the location
of the objects, where q is 1 or the number of classes.
b. [batch_size, num_anchors_i, code_size].
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
raise NotImplementedError
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/box_predictor.py | box_predictor.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.freezable_batch_norm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl.testing import parameterized
import numpy as np
from six.moves import zip
import tensorflow as tf
from object_detection.core import freezable_batch_norm
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
from object_detection.core import freezable_sync_batch_norm
# pylint: enable=g-import-not-at-top
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class FreezableBatchNormTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for FreezableBatchNorm operations."""
def _build_model(self, use_sync_batch_norm, training=None):
model = tf.keras.models.Sequential()
norm = None
if use_sync_batch_norm:
norm = freezable_sync_batch_norm.FreezableSyncBatchNorm(training=training,
input_shape=(10,),
momentum=0.8)
else:
norm = freezable_batch_norm.FreezableBatchNorm(training=training,
input_shape=(10,),
momentum=0.8)
model.add(norm)
return model, norm
def _copy_weights(self, source_weights, target_weights):
for source, target in zip(source_weights, target_weights):
target.assign(source)
def _train_freezable_batch_norm(self, training_mean, training_var,
use_sync_batch_norm):
model, _ = self._build_model(use_sync_batch_norm=use_sync_batch_norm)
model.compile(loss='mse', optimizer='sgd')
# centered on training_mean, variance training_var
train_data = np.random.normal(
loc=training_mean,
scale=training_var,
size=(1000, 10))
model.fit(train_data, train_data, epochs=4, verbose=0)
return model.weights
def _test_batchnorm_layer(
self, norm, should_be_training, test_data,
testing_mean, testing_var, training_arg, training_mean, training_var):
out_tensor = norm(tf.convert_to_tensor(test_data, dtype=tf.float32),
training=training_arg)
out = out_tensor
out -= norm.beta
out /= norm.gamma
if not should_be_training:
out *= training_var
out += (training_mean - testing_mean)
out /= testing_var
np.testing.assert_allclose(out.numpy().mean(), 0.0, atol=1.5e-1)
np.testing.assert_allclose(out.numpy().std(), 1.0, atol=1.5e-1)
@parameterized.parameters(True, False)
def test_batchnorm_freezing_training_none(self, use_sync_batch_norm):
training_mean = 5.0
training_var = 10.0
testing_mean = -10.0
testing_var = 5.0
# Initially train the batch norm, and save the weights
trained_weights = self._train_freezable_batch_norm(training_mean,
training_var,
use_sync_batch_norm)
# Load the batch norm weights, freezing training to True.
# Apply the batch norm layer to testing data and ensure it is normalized
# according to the batch statistics.
model, norm = self._build_model(use_sync_batch_norm, training=True)
self._copy_weights(trained_weights, model.weights)
# centered on testing_mean, variance testing_var
test_data = np.random.normal(
loc=testing_mean,
scale=testing_var,
size=(1000, 10))
# Test with training=True passed to the call method:
training_arg = True
should_be_training = True
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Reset the weights, because they may have been updating by
# running with training=True
self._copy_weights(trained_weights, model.weights)
# Test with training=False passed to the call method:
training_arg = False
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Test the layer in various Keras learning phase scopes:
training_arg = None
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
tf.keras.backend.set_learning_phase(True)
should_be_training = True
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Reset the weights, because they may have been updating by
# running with training=True
self._copy_weights(trained_weights, model.weights)
tf.keras.backend.set_learning_phase(False)
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
@parameterized.parameters(True, False)
def test_batchnorm_freezing_training_false(self, use_sync_batch_norm):
training_mean = 5.0
training_var = 10.0
testing_mean = -10.0
testing_var = 5.0
# Initially train the batch norm, and save the weights
trained_weights = self._train_freezable_batch_norm(training_mean,
training_var,
use_sync_batch_norm)
# Load the batch norm back up, freezing training to False.
# Apply the batch norm layer to testing data and ensure it is normalized
# according to the training data's statistics.
model, norm = self._build_model(use_sync_batch_norm, training=False)
self._copy_weights(trained_weights, model.weights)
# centered on testing_mean, variance testing_var
test_data = np.random.normal(
loc=testing_mean,
scale=testing_var,
size=(1000, 10))
# Make sure that the layer is never training
# Test with training=True passed to the call method:
training_arg = True
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Test with training=False passed to the call method:
training_arg = False
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
# Test the layer in various Keras learning phase scopes:
training_arg = None
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
tf.keras.backend.set_learning_phase(True)
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
tf.keras.backend.set_learning_phase(False)
should_be_training = False
self._test_batchnorm_layer(norm, should_be_training, test_data,
testing_mean, testing_var, training_arg,
training_mean, training_var)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/freezable_batch_norm_tf2_test.py | freezable_batch_norm_tf2_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocess images and bounding boxes for detection.
We perform two sets of operations in preprocessing stage:
(a) operations that are applied to both training and testing data,
(b) operations that are applied only to training data for the purpose of
data augmentation.
A preprocessing function receives a set of inputs,
e.g. an image and bounding boxes,
performs an operation on them, and returns them.
Some examples are: randomly cropping the image, randomly mirroring the image,
randomly changing the brightness, contrast, hue and
randomly jittering the bounding boxes.
The preprocess function receives a tensor_dict which is a dictionary that maps
different field names to their tensors. For example,
tensor_dict[fields.InputDataFields.image] holds the image tensor.
The image is a rank 4 tensor: [1, height, width, channels] with
dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where
in each row there is a box with [ymin xmin ymax xmax].
Boxes are in normalized coordinates meaning
their coordinate values range in [0, 1]
To preprocess multiple images with the same operations in cases where
nondeterministic operations are used, a preprocessor_cache.PreprocessorCache
object can be passed into the preprocess function or individual operations.
All nondeterministic operations except random_jitter_boxes support caching.
E.g.
Let tensor_dict{1,2,3,4,5} be copies of the same inputs.
Let preprocess_options contain nondeterministic operation(s) excluding
random_jitter_boxes.
cache1 = preprocessor_cache.PreprocessorCache()
cache2 = preprocessor_cache.PreprocessorCache()
a = preprocess(tensor_dict1, preprocess_options, preprocess_vars_cache=cache1)
b = preprocess(tensor_dict2, preprocess_options, preprocess_vars_cache=cache1)
c = preprocess(tensor_dict3, preprocess_options, preprocess_vars_cache=cache2)
d = preprocess(tensor_dict4, preprocess_options, preprocess_vars_cache=cache2)
e = preprocess(tensor_dict5, preprocess_options)
Then correspondings tensors of object pairs (a,b) and (c,d)
are guaranteed to be equal element-wise, but the equality of any other object
pair cannot be determined.
Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing
functions receive a rank 3 tensor for processing the image. Thus, inside the
preprocess function we squeeze the image to become a rank 3 tensor and then
we pass it to the functions. At the end of the preprocess we expand the image
back to rank 4.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import math
import sys
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import control_flow_ops
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import densepose_ops
from object_detection.core import keypoint_ops
from object_detection.core import preprocessor_cache
from object_detection.core import standard_fields as fields
from object_detection.utils import autoaugment_utils
from object_detection.utils import ops
from object_detection.utils import patch_ops
from object_detection.utils import shape_utils
def _apply_with_random_selector(x,
func,
num_cases,
preprocess_vars_cache=None,
key=''):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
If both preprocess_vars_cache AND key are the same between two calls, sel will
be the same value in both calls.
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
key: variable identifier for preprocess_vars_cache.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
generator_func = functools.partial(
tf.random_uniform, [], maxval=num_cases, dtype=tf.int32)
rand_sel = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.SELECTOR,
preprocess_vars_cache, key)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([func(
control_flow_ops.switch(x, tf.equal(rand_sel, case))[1], case)
for case in range(num_cases)])[0]
def _apply_with_random_selector_tuples(x,
func,
num_cases,
preprocess_vars_cache=None,
key=''):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
If both preprocess_vars_cache AND key are the same between two calls, sel will
be the same value in both calls.
Args:
x: A tuple of input tensors.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
key: variable identifier for preprocess_vars_cache.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
num_inputs = len(x)
generator_func = functools.partial(
tf.random_uniform, [], maxval=num_cases, dtype=tf.int32)
rand_sel = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.SELECTOR_TUPLES,
preprocess_vars_cache, key)
# Pass the real x only to one of the func calls.
tuples = [list() for t in x]
for case in range(num_cases):
new_x = [control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x]
output = func(tuple(new_x), case)
for j in range(num_inputs):
tuples[j].append(output[j])
for i in range(num_inputs):
tuples[i] = control_flow_ops.merge(tuples[i])[0]
return tuple(tuples)
def _get_or_create_preprocess_rand_vars(generator_func,
function_id,
preprocess_vars_cache,
key=''):
"""Returns a tensor stored in preprocess_vars_cache or using generator_func.
If the tensor was previously generated and appears in the PreprocessorCache,
the previously generated tensor will be returned. Otherwise, a new tensor
is generated using generator_func and stored in the cache.
Args:
generator_func: A 0-argument function that generates a tensor.
function_id: identifier for the preprocessing function used.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
key: identifier for the variable stored.
Returns:
The generated tensor.
"""
if preprocess_vars_cache is not None:
var = preprocess_vars_cache.get(function_id, key)
if var is None:
var = generator_func()
preprocess_vars_cache.update(function_id, key, var)
else:
var = generator_func()
return var
def _random_integer(minval, maxval, seed):
"""Returns a random 0-D tensor between minval and maxval.
Args:
minval: minimum value of the random tensor.
maxval: maximum value of the random tensor.
seed: random seed.
Returns:
A random 0-D tensor between minval and maxval.
"""
return tf.random_uniform(
[], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed)
# TODO(mttang): This method is needed because the current
# tf.image.rgb_to_grayscale method does not support quantization. Replace with
# tf.image.rgb_to_grayscale after quantization support is added.
def _rgb_to_grayscale(images, name=None):
"""Converts one or more images from RGB to Grayscale.
Outputs a tensor of the same `DType` and rank as `images`. The size of the
last dimension of the output is 1, containing the Grayscale value of the
pixels.
Args:
images: The RGB tensor to convert. Last dimension must have size 3 and
should contain RGB values.
name: A name for the operation (optional).
Returns:
The converted grayscale image(s).
"""
with tf.name_scope(name, 'rgb_to_grayscale', [images]) as name:
images = tf.convert_to_tensor(images, name='images')
# Remember original dtype to so we can convert back if needed
orig_dtype = images.dtype
flt_image = tf.image.convert_image_dtype(images, tf.float32)
# Reference for converting between RGB and grayscale.
# https://en.wikipedia.org/wiki/Luma_%28video%29
rgb_weights = [0.2989, 0.5870, 0.1140]
rank_1 = tf.expand_dims(tf.rank(images) - 1, 0)
gray_float = tf.reduce_sum(
flt_image * rgb_weights, rank_1, keep_dims=True)
gray_float.set_shape(images.get_shape()[:-1].concatenate([1]))
return tf.image.convert_image_dtype(gray_float, orig_dtype, name=name)
def normalize_image(image, original_minval, original_maxval, target_minval,
target_maxval):
"""Normalizes pixel values in the image.
Moves the pixel values from the current [original_minval, original_maxval]
range to a the [target_minval, target_maxval] range.
Args:
image: rank 3 float32 tensor containing 1
image -> [height, width, channels].
original_minval: current image minimum value.
original_maxval: current image maximum value.
target_minval: target image minimum value.
target_maxval: target image maximum value.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('NormalizeImage', values=[image]):
original_minval = float(original_minval)
original_maxval = float(original_maxval)
target_minval = float(target_minval)
target_maxval = float(target_maxval)
image = tf.cast(image, dtype=tf.float32)
image = tf.subtract(image, original_minval)
image = tf.multiply(image, (target_maxval - target_minval) /
(original_maxval - original_minval))
image = tf.add(image, target_minval)
return image
def retain_boxes_above_threshold(boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
keypoints=None,
threshold=0.0):
"""Retains boxes whose label weight is above a given threshold.
If the label weight for a box is missing (represented by NaN), the box is
retained. The boxes that don't pass the threshold will not appear in the
returned tensor.
Args:
boxes: float32 tensor of shape [num_instance, 4] representing boxes
location in normalized coordinates.
labels: rank 1 int32 tensor of shape [num_instance] containing the object
classes.
label_weights: float32 tensor of shape [num_instance] representing the
weight for each box.
label_confidences: float32 tensor of shape [num_instance] representing the
confidence for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks are of
the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
threshold: scalar python float.
Returns:
retained_boxes: [num_retained_instance, 4]
retianed_labels: [num_retained_instance]
retained_label_weights: [num_retained_instance]
If multiclass_scores, masks, or keypoints are not None, the function also
returns:
retained_multiclass_scores: [num_retained_instance, num_classes]
retained_masks: [num_retained_instance, height, width]
retained_keypoints: [num_retained_instance, num_keypoints, 2]
"""
with tf.name_scope('RetainBoxesAboveThreshold',
values=[boxes, labels, label_weights]):
indices = tf.where(
tf.logical_or(label_weights > threshold, tf.is_nan(label_weights)))
indices = tf.squeeze(indices, axis=1)
retained_boxes = tf.gather(boxes, indices)
retained_labels = tf.gather(labels, indices)
retained_label_weights = tf.gather(label_weights, indices)
result = [retained_boxes, retained_labels, retained_label_weights]
if label_confidences is not None:
retained_label_confidences = tf.gather(label_confidences, indices)
result.append(retained_label_confidences)
if multiclass_scores is not None:
retained_multiclass_scores = tf.gather(multiclass_scores, indices)
result.append(retained_multiclass_scores)
if masks is not None:
retained_masks = tf.gather(masks, indices)
result.append(retained_masks)
if keypoints is not None:
retained_keypoints = tf.gather(keypoints, indices)
result.append(retained_keypoints)
return result
def drop_label_probabilistically(boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
keypoints=None,
dropped_label=None,
drop_probability=0.0,
seed=None):
"""Drops boxes of a certain label with probability drop_probability.
Boxes of the label dropped_label will not appear in the returned tensor.
Args:
boxes: float32 tensor of shape [num_instance, 4] representing boxes
location in normalized coordinates.
labels: rank 1 int32 tensor of shape [num_instance] containing the object
classes.
label_weights: float32 tensor of shape [num_instance] representing the
weight for each box.
label_confidences: float32 tensor of shape [num_instance] representing the
confidence for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks are of
the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
dropped_label: int32 id of label to drop.
drop_probability: float32 probability of dropping a label.
seed: random seed.
Returns:
retained_boxes: [num_retained_instance, 4]
retianed_labels: [num_retained_instance]
retained_label_weights: [num_retained_instance]
If multiclass_scores, masks, or keypoints are not None, the function also
returns:
retained_multiclass_scores: [num_retained_instance, num_classes]
retained_masks: [num_retained_instance, height, width]
retained_keypoints: [num_retained_instance, num_keypoints, 2]
"""
with tf.name_scope('DropLabelProbabilistically',
values=[boxes, labels]):
indices = tf.where(
tf.logical_or(
tf.random_uniform(tf.shape(labels), seed=seed) > drop_probability,
tf.not_equal(labels, dropped_label)))
indices = tf.squeeze(indices, axis=1)
retained_boxes = tf.gather(boxes, indices)
retained_labels = tf.gather(labels, indices)
retained_label_weights = tf.gather(label_weights, indices)
result = [retained_boxes, retained_labels, retained_label_weights]
if label_confidences is not None:
retained_label_confidences = tf.gather(label_confidences, indices)
result.append(retained_label_confidences)
if multiclass_scores is not None:
retained_multiclass_scores = tf.gather(multiclass_scores, indices)
result.append(retained_multiclass_scores)
if masks is not None:
retained_masks = tf.gather(masks, indices)
result.append(retained_masks)
if keypoints is not None:
retained_keypoints = tf.gather(keypoints, indices)
result.append(retained_keypoints)
return result
def remap_labels(labels,
original_labels=None,
new_label=None):
"""Remaps labels that have an id in original_labels to new_label.
Args:
labels: rank 1 int32 tensor of shape [num_instance] containing the object
classes.
original_labels: int list of original labels that should be mapped from.
new_label: int label to map to
Returns:
Remapped labels
"""
new_labels = labels
for original_label in original_labels:
change = tf.where(
tf.equal(new_labels, original_label),
tf.add(tf.zeros_like(new_labels), new_label - original_label),
tf.zeros_like(new_labels))
new_labels = tf.add(
new_labels,
change)
new_labels = tf.reshape(new_labels, tf.shape(labels))
return new_labels
def _flip_boxes_left_right(boxes):
"""Left-right flip the boxes.
Args:
boxes: Float32 tensor containing the bounding boxes -> [..., 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each last dimension is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=-1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], axis=-1)
return flipped_boxes
def _flip_boxes_up_down(boxes):
"""Up-down flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_ymin = tf.subtract(1.0, ymax)
flipped_ymax = tf.subtract(1.0, ymin)
flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], 1)
return flipped_boxes
def _rot90_boxes(boxes):
"""Rotate boxes counter-clockwise by 90 degrees.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Rotated boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
rotated_ymin = tf.subtract(1.0, xmax)
rotated_ymax = tf.subtract(1.0, xmin)
rotated_xmin = ymin
rotated_xmax = ymax
rotated_boxes = tf.concat(
[rotated_ymin, rotated_xmin, rotated_ymax, rotated_xmax], 1)
return rotated_boxes
def _flip_masks_left_right(masks):
"""Left-right flip masks.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, :, ::-1]
def _flip_masks_up_down(masks):
"""Up-down flip masks.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, ::-1, :]
def _rot90_masks(masks):
"""Rotate masks counter-clockwise by 90 degrees.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
rotated masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
masks = tf.transpose(masks, [0, 2, 1])
return masks[:, ::-1, :]
def random_horizontal_flip(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_visibilities=None,
densepose_part_ids=None,
densepose_surface_coords=None,
keypoint_depths=None,
keypoint_depth_weights=None,
keypoint_flip_permutation=None,
probability=0.5,
seed=None,
preprocess_vars_cache=None):
"""Randomly flips the image and detections horizontally.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_visibilities: (optional) rank 2 bool tensor with shape
[num_instances, num_keypoints].
densepose_part_ids: (optional) rank 2 int32 tensor with shape
[num_instances, num_points] holding the part id for each
sampled point. These part_ids are 0-indexed, where the
first non-background part has index 0.
densepose_surface_coords: (optional) rank 3 float32 tensor with shape
[num_instances, num_points, 4]. The DensePose
coordinates are of the form (y, x, v, u) where
(y, x) are the normalized image coordinates for a
sampled point, and (v, u) is the surface
coordinate for the part.
keypoint_depths: (optional) rank 2 float32 tensor with shape [num_instances,
num_keypoints] representing the relative depth of the
keypoints.
keypoint_depth_weights: (optional) rank 2 float32 tensor with shape
[num_instances, num_keypoints] representing the
weights of the relative depth of the keypoints.
keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
probability: the probability of performing this augmentation.
seed: random seed
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, keypoint_visibilities,
keypoint_flip_permutation, densepose_part_ids, or densepose_surface_coords
are not None,the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
keypoint_visibilities: rank 2 bool tensor with shape
[num_instances, num_keypoints].
densepose_part_ids: rank 2 int32 tensor with shape
[num_instances, num_points].
densepose_surface_coords: rank 3 float32 tensor with shape
[num_instances, num_points, 4].
keypoint_depths: rank 2 float32 tensor with shape [num_instances,
num_keypoints]
keypoint_depth_weights: rank 2 float32 tensor with shape [num_instances,
num_keypoints].
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
ValueError: if either densepose_part_ids or densepose_surface_coords is
not None, but both are not None.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_left_right(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
'keypoints are provided but keypoints_flip_permutation is not provided')
if ((densepose_part_ids is not None and densepose_surface_coords is None) or
(densepose_part_ids is None and densepose_surface_coords is not None)):
raise ValueError(
'Must provide both `densepose_part_ids` and `densepose_surface_coords`')
with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]):
result = []
# random variable defining whether to do flip or not
generator_func = functools.partial(tf.random_uniform, [], seed=seed)
do_a_flip_random = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.HORIZONTAL_FLIP,
preprocess_vars_cache)
do_a_flip_random = tf.less(do_a_flip_random, probability)
# flip image
image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes),
lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks),
lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
do_a_flip_random,
lambda: keypoint_ops.flip_horizontal(keypoints, 0.5, permutation),
lambda: keypoints)
result.append(keypoints)
# flip keypoint visibilities
if (keypoint_visibilities is not None and
keypoint_flip_permutation is not None):
kpt_flip_perm = keypoint_flip_permutation
keypoint_visibilities = tf.cond(
do_a_flip_random,
lambda: tf.gather(keypoint_visibilities, kpt_flip_perm, axis=1),
lambda: keypoint_visibilities)
result.append(keypoint_visibilities)
# flip DensePose parts and coordinates
if densepose_part_ids is not None:
flip_densepose_fn = functools.partial(
densepose_ops.flip_horizontal, densepose_part_ids,
densepose_surface_coords)
densepose_tensors = tf.cond(
do_a_flip_random,
flip_densepose_fn,
lambda: (densepose_part_ids, densepose_surface_coords))
result.extend(densepose_tensors)
# flip keypoint depths and weights.
if (keypoint_depths is not None and
keypoint_flip_permutation is not None):
kpt_flip_perm = keypoint_flip_permutation
keypoint_depths = tf.cond(
do_a_flip_random,
lambda: tf.gather(keypoint_depths, kpt_flip_perm, axis=1),
lambda: keypoint_depths)
keypoint_depth_weights = tf.cond(
do_a_flip_random,
lambda: tf.gather(keypoint_depth_weights, kpt_flip_perm, axis=1),
lambda: keypoint_depth_weights)
result.append(keypoint_depths)
result.append(keypoint_depth_weights)
return tuple(result)
def random_vertical_flip(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_flip_permutation=None,
probability=0.5,
seed=None,
preprocess_vars_cache=None):
"""Randomly flips the image and detections vertically.
The probability of flipping the image is 50%.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
probability: the probability of performing this augmentation.
seed: random seed
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, and keypoint_flip_permutation are not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_up_down(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
'keypoints are provided but keypoints_flip_permutation is not provided')
with tf.name_scope('RandomVerticalFlip', values=[image, boxes]):
result = []
# random variable defining whether to do flip or not
generator_func = functools.partial(tf.random_uniform, [], seed=seed)
do_a_flip_random = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.VERTICAL_FLIP,
preprocess_vars_cache)
do_a_flip_random = tf.less(do_a_flip_random, probability)
# flip image
image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_up_down(boxes),
lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(do_a_flip_random, lambda: _flip_masks_up_down(masks),
lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
do_a_flip_random,
lambda: keypoint_ops.flip_vertical(keypoints, 0.5, permutation),
lambda: keypoints)
result.append(keypoints)
return tuple(result)
def random_rotation90(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_rot_permutation=None,
probability=0.5,
seed=None,
preprocess_vars_cache=None):
"""Randomly rotates the image and detections 90 degrees counter-clockwise.
The probability of rotating the image is 50%. This can be combined with
random_horizontal_flip and random_vertical_flip to produce an output with a
uniform distribution of the eight possible 90 degree rotation / reflection
combinations.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_rot_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
probability: the probability of performing this augmentation.
seed: random seed
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
If boxes, masks, and keypoints, are not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
"""
def _rot90_image(image):
# flip image
image_rotated = tf.image.rot90(image)
return image_rotated
with tf.name_scope('RandomRotation90', values=[image, boxes]):
result = []
# random variable defining whether to rotate by 90 degrees or not
generator_func = functools.partial(tf.random_uniform, [], seed=seed)
do_a_rot90_random = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.ROTATION90,
preprocess_vars_cache)
do_a_rot90_random = tf.less(do_a_rot90_random, probability)
# flip image
image = tf.cond(do_a_rot90_random, lambda: _rot90_image(image),
lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(do_a_rot90_random, lambda: _rot90_boxes(boxes),
lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(do_a_rot90_random, lambda: _rot90_masks(masks),
lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None:
keypoints = tf.cond(
do_a_rot90_random,
lambda: keypoint_ops.rot90(keypoints, keypoint_rot_permutation),
lambda: keypoints)
result.append(keypoints)
return tuple(result)
def random_pixel_value_scale(image,
minval=0.9,
maxval=1.1,
seed=None,
preprocess_vars_cache=None):
"""Scales each value in the pixels of the image.
This function scales each pixel independent of the other ones.
For each value in image tensor, draws a random number between
minval and maxval and multiples the values with them.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
minval: lower ratio of scaling pixel values.
maxval: upper ratio of scaling pixel values.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomPixelValueScale', values=[image]):
generator_func = functools.partial(
tf.random_uniform, tf.shape(image),
minval=minval, maxval=maxval,
dtype=tf.float32, seed=seed)
color_coef = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.PIXEL_VALUE_SCALE,
preprocess_vars_cache)
image = tf.multiply(image, color_coef)
image = tf.clip_by_value(image, 0.0, 255.0)
return image
def random_image_scale(image,
masks=None,
min_scale_ratio=0.5,
max_scale_ratio=2.0,
seed=None,
preprocess_vars_cache=None):
"""Scales the image size.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels].
masks: (optional) rank 3 float32 tensor containing masks with
size [height, width, num_masks]. The value is set to None if there are no
masks.
min_scale_ratio: minimum scaling ratio.
max_scale_ratio: maximum scaling ratio.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
masks: If masks is not none, resized masks which are the same rank as input
masks will be returned.
"""
with tf.name_scope('RandomImageScale', values=[image]):
result = []
image_shape = tf.shape(image)
image_height = image_shape[0]
image_width = image_shape[1]
generator_func = functools.partial(
tf.random_uniform, [],
minval=min_scale_ratio, maxval=max_scale_ratio,
dtype=tf.float32, seed=seed)
size_coef = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.IMAGE_SCALE,
preprocess_vars_cache)
image_newysize = tf.cast(
tf.multiply(tf.cast(image_height, dtype=tf.float32), size_coef),
dtype=tf.int32)
image_newxsize = tf.cast(
tf.multiply(tf.cast(image_width, dtype=tf.float32), size_coef),
dtype=tf.int32)
image = tf.image.resize_images(
image, [image_newysize, image_newxsize], align_corners=True)
result.append(image)
if masks is not None:
masks = tf.image.resize_images(
masks, [image_newysize, image_newxsize],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
result.append(masks)
return tuple(result)
def _augment_only_rgb_channels(image, augment_function):
"""Augments only the RGB slice of an image with additional channels."""
rgb_slice = image[:, :, :3]
augmented_rgb_slice = augment_function(rgb_slice)
image = tf.concat([augmented_rgb_slice, image[:, :, 3:]], -1)
return image
def random_rgb_to_gray(image,
probability=0.1,
seed=None,
preprocess_vars_cache=None):
"""Changes the image from RGB to Grayscale with the given probability.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
probability: the probability of returning a grayscale image.
The probability should be a number between [0, 1].
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
"""
def _image_to_gray(image):
image_gray1 = _rgb_to_grayscale(image)
image_gray3 = tf.image.grayscale_to_rgb(image_gray1)
return image_gray3
with tf.name_scope('RandomRGBtoGray', values=[image]):
# random variable defining whether to change to grayscale or not
generator_func = functools.partial(tf.random_uniform, [], seed=seed)
do_gray_random = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.RGB_TO_GRAY,
preprocess_vars_cache)
image = tf.cond(
tf.greater(do_gray_random, probability), lambda: image,
lambda: _augment_only_rgb_channels(image, _image_to_gray))
return image
def adjust_gamma(image, gamma=1.0, gain=1.0):
"""Adjusts the gamma.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
gamma: the gamma value. Must be a non-negative real number.
gain: a constant multiplier.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('AdjustGamma', values=[image]):
def _adjust_gamma(image):
image = tf.image.adjust_gamma(image / 255, gamma, gain) * 255
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0)
return image
image = _augment_only_rgb_channels(image, _adjust_gamma)
return image
def random_adjust_brightness(image,
max_delta=0.2,
seed=None,
preprocess_vars_cache=None):
"""Randomly adjusts brightness.
Makes sure the output image is still between 0 and 255.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
max_delta: how much to change the brightness. A value between [0, 1).
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustBrightness', values=[image]):
generator_func = functools.partial(tf.random_uniform, [],
-max_delta, max_delta, seed=seed)
delta = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.ADJUST_BRIGHTNESS,
preprocess_vars_cache)
def _adjust_brightness(image):
image = tf.image.adjust_brightness(image / 255, delta) * 255
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0)
return image
image = _augment_only_rgb_channels(image, _adjust_brightness)
return image
def random_adjust_contrast(image,
min_delta=0.8,
max_delta=1.25,
seed=None,
preprocess_vars_cache=None):
"""Randomly adjusts contrast.
Makes sure the output image is still between 0 and 255.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
min_delta: see max_delta.
max_delta: how much to change the contrast. Contrast will change with a
value between min_delta and max_delta. This value will be
multiplied to the current contrast of the image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustContrast', values=[image]):
generator_func = functools.partial(tf.random_uniform, [],
min_delta, max_delta, seed=seed)
contrast_factor = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.ADJUST_CONTRAST,
preprocess_vars_cache)
def _adjust_contrast(image):
image = tf.image.adjust_contrast(image / 255, contrast_factor) * 255
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0)
return image
image = _augment_only_rgb_channels(image, _adjust_contrast)
return image
def random_adjust_hue(image,
max_delta=0.02,
seed=None,
preprocess_vars_cache=None):
"""Randomly adjusts hue.
Makes sure the output image is still between 0 and 255.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
max_delta: change hue randomly with a value between 0 and max_delta.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustHue', values=[image]):
generator_func = functools.partial(tf.random_uniform, [],
-max_delta, max_delta, seed=seed)
delta = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.ADJUST_HUE,
preprocess_vars_cache)
def _adjust_hue(image):
image = tf.image.adjust_hue(image / 255, delta) * 255
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0)
return image
image = _augment_only_rgb_channels(image, _adjust_hue)
return image
def random_adjust_saturation(image,
min_delta=0.8,
max_delta=1.25,
seed=None,
preprocess_vars_cache=None):
"""Randomly adjusts saturation.
Makes sure the output image is still between 0 and 255.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
min_delta: see max_delta.
max_delta: how much to change the saturation. Saturation will change with a
value between min_delta and max_delta. This value will be
multiplied to the current saturation of the image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustSaturation', values=[image]):
generator_func = functools.partial(tf.random_uniform, [],
min_delta, max_delta, seed=seed)
saturation_factor = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.ADJUST_SATURATION,
preprocess_vars_cache)
def _adjust_saturation(image):
image = tf.image.adjust_saturation(image / 255, saturation_factor) * 255
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0)
return image
image = _augment_only_rgb_channels(image, _adjust_saturation)
return image
def random_distort_color(image, color_ordering=0, preprocess_vars_cache=None):
"""Randomly distorts color.
Randomly distorts color using a combination of brightness, hue, contrast and
saturation changes. Makes sure the output image is still between 0 and 255.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
color_ordering: Python int, a type of distortion (valid values: 0, 1).
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
Raises:
ValueError: if color_ordering is not in {0, 1}.
"""
with tf.name_scope('RandomDistortColor', values=[image]):
if color_ordering == 0:
image = random_adjust_brightness(
image, max_delta=32. / 255.,
preprocess_vars_cache=preprocess_vars_cache)
image = random_adjust_saturation(
image, min_delta=0.5, max_delta=1.5,
preprocess_vars_cache=preprocess_vars_cache)
image = random_adjust_hue(
image, max_delta=0.2,
preprocess_vars_cache=preprocess_vars_cache)
image = random_adjust_contrast(
image, min_delta=0.5, max_delta=1.5,
preprocess_vars_cache=preprocess_vars_cache)
elif color_ordering == 1:
image = random_adjust_brightness(
image, max_delta=32. / 255.,
preprocess_vars_cache=preprocess_vars_cache)
image = random_adjust_contrast(
image, min_delta=0.5, max_delta=1.5,
preprocess_vars_cache=preprocess_vars_cache)
image = random_adjust_saturation(
image, min_delta=0.5, max_delta=1.5,
preprocess_vars_cache=preprocess_vars_cache)
image = random_adjust_hue(
image, max_delta=0.2,
preprocess_vars_cache=preprocess_vars_cache)
else:
raise ValueError('color_ordering must be in {0, 1}')
return image
def random_jitter_boxes(boxes, ratio=0.05, jitter_mode='default', seed=None):
"""Randomly jitters boxes in image.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
ratio: The ratio of the box width and height that the corners can jitter.
For example if the width is 100 pixels and ratio is 0.05,
the corners can jitter up to 5 pixels in the x direction.
jitter_mode: One of
shrink - Only shrinks boxes.
expand - Only expands boxes.
expand_symmetric - Expands the boxes symmetrically along height and width
dimensions without changing the box center. The ratios of expansion
along X, Y dimensions are independent
shrink_symmetric - Shrinks the boxes symmetrically along height and width
dimensions without changing the box center. The ratios of shrinking
along X, Y dimensions are independent
expand_symmetric_xy - Expands the boxes symetrically along height and
width dimensions and the ratio of expansion is same for both.
shrink_symmetric_xy - Shrinks the boxes symetrically along height and
width dimensions and the ratio of shrinking is same for both.
default - Randomly and independently perturbs each box boundary.
seed: random seed.
Returns:
boxes: boxes which is the same shape as input boxes.
"""
with tf.name_scope('RandomJitterBoxes'):
ymin, xmin, ymax, xmax = (boxes[:, i] for i in range(4))
blist = box_list.BoxList(boxes)
ycenter, xcenter, height, width = blist.get_center_coordinates_and_sizes()
height = tf.maximum(tf.abs(height), 1e-6)
width = tf.maximum(tf.abs(width), 1e-6)
if jitter_mode in ['shrink', 'shrink_symmetric', 'shrink_symmetric_xy']:
min_ratio, max_ratio = -ratio, 0
elif jitter_mode in ['expand', 'expand_symmetric', 'expand_symmetric_xy']:
min_ratio, max_ratio = 0, ratio
elif jitter_mode == 'default':
min_ratio, max_ratio = -ratio, ratio
else:
raise ValueError('Unknown jitter mode - %s' % jitter_mode)
num_boxes = tf.shape(boxes)[0]
if jitter_mode in ['expand_symmetric', 'shrink_symmetric',
'expand_symmetric_xy', 'shrink_symmetric_xy']:
distortion = 1.0 + tf.random.uniform(
[num_boxes, 2], minval=min_ratio, maxval=max_ratio, dtype=tf.float32,
seed=seed)
height_distortion = distortion[:, 0]
width_distortion = distortion[:, 1]
# This is to ensure that all boxes are augmented symmetrically. We clip
# each boundary to lie within the image, and when doing so, we also
# adjust its symmetric counterpart.
max_height_distortion = tf.abs(tf.minimum(
(2.0 * ycenter) / height, 2.0 * (1 - ycenter) / height))
max_width_distortion = tf.abs(tf.minimum(
(2.0 * xcenter) / width, 2.0 * (1 - xcenter) / width))
if jitter_mode in ['expand_symmetric_xy', 'shrink_symmetric_xy']:
height_distortion = width_distortion = distortion[:, 0]
max_height_distortion = max_width_distortion = (
tf.minimum(max_width_distortion, max_height_distortion))
height_distortion = tf.clip_by_value(
height_distortion, -max_height_distortion, max_height_distortion)
width_distortion = tf.clip_by_value(
width_distortion, -max_width_distortion, max_width_distortion)
ymin = ycenter - (height * height_distortion / 2.0)
ymax = ycenter + (height * height_distortion / 2.0)
xmin = xcenter - (width * width_distortion / 2.0)
xmax = xcenter + (width * width_distortion / 2.0)
elif jitter_mode in ['expand', 'shrink', 'default']:
distortion = 1.0 + tf.random.uniform(
[num_boxes, 4], minval=min_ratio, maxval=max_ratio, dtype=tf.float32,
seed=seed)
ymin_jitter = height * distortion[:, 0]
xmin_jitter = width * distortion[:, 1]
ymax_jitter = height * distortion[:, 2]
xmax_jitter = width * distortion[:, 3]
ymin, ymax = ycenter - (ymin_jitter / 2.0), ycenter + (ymax_jitter / 2.0)
xmin, xmax = xcenter - (xmin_jitter / 2.0), xcenter + (xmax_jitter / 2.0)
else:
raise ValueError('Unknown jitter mode - %s' % jitter_mode)
boxes = tf.stack([ymin, xmin, ymax, xmax], axis=1)
return tf.clip_by_value(boxes, 0.0, 1.0)
def _strict_random_crop_image(image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
mask_weights=None,
keypoints=None,
keypoint_visibilities=None,
densepose_num_points=None,
densepose_part_ids=None,
densepose_surface_coords=None,
min_object_covered=1.0,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.1, 1.0),
overlap_thresh=0.3,
clip_boxes=True,
preprocess_vars_cache=None):
"""Performs random crop.
Note: Keypoint coordinates that are outside the crop will be set to NaN, which
is consistent with the original keypoint encoding for non-existing keypoints.
This function always crops the image and is supposed to be used by
`random_crop_image` function which sometimes returns the image unchanged.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes with shape
[num_instances, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: (optional) float32 tensor of shape [num_instances]
representing the confidence for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
mask_weights: (optional) rank 1 float32 tensor with shape [num_instances]
with instance masks weights.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_visibilities: (optional) rank 2 bool tensor with shape
[num_instances, num_keypoints].
densepose_num_points: (optional) rank 1 int32 tensor with shape
[num_instances] with the number of sampled points per
instance.
densepose_part_ids: (optional) rank 2 int32 tensor with shape
[num_instances, num_points] holding the part id for each
sampled point. These part_ids are 0-indexed, where the
first non-background part has index 0.
densepose_surface_coords: (optional) rank 3 float32 tensor with shape
[num_instances, num_points, 4]. The DensePose
coordinates are of the form (y, x, v, u) where
(y, x) are the normalized image coordinates for a
sampled point, and (v, u) is the surface
coordinate for the part.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If label_weights, multiclass_scores, masks, mask_weights, keypoints,
keypoint_visibilities, densepose_num_points, densepose_part_ids, or
densepose_surface_coords is not None, the function also returns:
label_weights: rank 1 float32 tensor with shape [num_instances].
multiclass_scores: rank 2 float32 tensor with shape
[num_instances, num_classes]
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
mask_weights: rank 1 float32 tensor with shape [num_instances] with mask
weights.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
keypoint_visibilities: rank 2 bool tensor with shape
[num_instances, num_keypoints]
densepose_num_points: rank 1 int32 tensor with shape [num_instances].
densepose_part_ids: rank 2 int32 tensor with shape
[num_instances, num_points].
densepose_surface_coords: rank 3 float32 tensor with shape
[num_instances, num_points, 4].
Raises:
ValueError: If some but not all of the DensePose tensors are provided.
"""
with tf.name_scope('RandomCropImage', values=[image, boxes]):
densepose_tensors = [densepose_num_points, densepose_part_ids,
densepose_surface_coords]
if (any(t is not None for t in densepose_tensors) and
not all(t is not None for t in densepose_tensors)):
raise ValueError('If cropping DensePose labels, must provide '
'`densepose_num_points`, `densepose_part_ids`, and '
'`densepose_surface_coords`')
image_shape = tf.shape(image)
# boxes are [N, 4]. Lets first make them [N, 1, 4].
boxes_expanded = tf.expand_dims(
tf.clip_by_value(
boxes, clip_value_min=0.0, clip_value_max=1.0), 1)
generator_func = functools.partial(
tf.image.sample_distorted_bounding_box,
image_shape,
bounding_boxes=boxes_expanded,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=100,
use_image_if_no_bounding_boxes=True)
# for ssd cropping, each value of min_object_covered has its own
# cached random variable
sample_distorted_bounding_box = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.STRICT_CROP_IMAGE,
preprocess_vars_cache, key=min_object_covered)
im_box_begin, im_box_size, im_box = sample_distorted_bounding_box
im_box_end = im_box_begin + im_box_size
new_image = image[im_box_begin[0]:im_box_end[0],
im_box_begin[1]:im_box_end[1], :]
new_image.set_shape([None, None, image.get_shape()[2]])
# [1, 4]
im_box_rank2 = tf.squeeze(im_box, axis=[0])
# [4]
im_box_rank1 = tf.squeeze(im_box)
boxlist = box_list.BoxList(boxes)
boxlist.add_field('labels', labels)
if label_weights is not None:
boxlist.add_field('label_weights', label_weights)
if label_confidences is not None:
boxlist.add_field('label_confidences', label_confidences)
if multiclass_scores is not None:
boxlist.add_field('multiclass_scores', multiclass_scores)
im_boxlist = box_list.BoxList(im_box_rank2)
# remove boxes that are outside cropped image
boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window(
boxlist, im_box_rank1)
# remove boxes that are outside image
overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes(
boxlist, im_boxlist, overlap_thresh)
# change the coordinate of the remaining boxes
new_labels = overlapping_boxlist.get_field('labels')
new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist,
im_box_rank1)
new_boxes = new_boxlist.get()
if clip_boxes:
new_boxes = tf.clip_by_value(
new_boxes, clip_value_min=0.0, clip_value_max=1.0)
result = [new_image, new_boxes, new_labels]
if label_weights is not None:
new_label_weights = overlapping_boxlist.get_field('label_weights')
result.append(new_label_weights)
if label_confidences is not None:
new_label_confidences = overlapping_boxlist.get_field('label_confidences')
result.append(new_label_confidences)
if multiclass_scores is not None:
new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores')
result.append(new_multiclass_scores)
if masks is not None:
masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids)
masks_of_boxes_completely_inside_window = tf.gather(
masks_of_boxes_inside_window, keep_ids)
new_masks = masks_of_boxes_completely_inside_window[:, im_box_begin[
0]:im_box_end[0], im_box_begin[1]:im_box_end[1]]
result.append(new_masks)
if mask_weights is not None:
mask_weights_inside_window = tf.gather(mask_weights, inside_window_ids)
mask_weights_completely_inside_window = tf.gather(
mask_weights_inside_window, keep_ids)
result.append(mask_weights_completely_inside_window)
if keypoints is not None:
keypoints_of_boxes_inside_window = tf.gather(keypoints, inside_window_ids)
keypoints_of_boxes_completely_inside_window = tf.gather(
keypoints_of_boxes_inside_window, keep_ids)
new_keypoints = keypoint_ops.change_coordinate_frame(
keypoints_of_boxes_completely_inside_window, im_box_rank1)
if clip_boxes:
new_keypoints = keypoint_ops.prune_outside_window(new_keypoints,
[0.0, 0.0, 1.0, 1.0])
result.append(new_keypoints)
if keypoint_visibilities is not None:
kpt_vis_of_boxes_inside_window = tf.gather(keypoint_visibilities,
inside_window_ids)
kpt_vis_of_boxes_completely_inside_window = tf.gather(
kpt_vis_of_boxes_inside_window, keep_ids)
if clip_boxes:
# Set any keypoints with NaN coordinates to invisible.
new_kpt_visibilities = keypoint_ops.set_keypoint_visibilities(
new_keypoints, kpt_vis_of_boxes_completely_inside_window)
result.append(new_kpt_visibilities)
if densepose_num_points is not None:
filtered_dp_tensors = []
for dp_tensor in densepose_tensors:
dp_tensor_inside_window = tf.gather(dp_tensor, inside_window_ids)
dp_tensor_completely_inside_window = tf.gather(dp_tensor_inside_window,
keep_ids)
filtered_dp_tensors.append(dp_tensor_completely_inside_window)
new_dp_num_points = filtered_dp_tensors[0]
new_dp_point_ids = filtered_dp_tensors[1]
new_dp_surf_coords = densepose_ops.change_coordinate_frame(
filtered_dp_tensors[2], im_box_rank1)
if clip_boxes:
new_dp_num_points, new_dp_point_ids, new_dp_surf_coords = (
densepose_ops.prune_outside_window(
new_dp_num_points, new_dp_point_ids, new_dp_surf_coords,
window=[0.0, 0.0, 1.0, 1.0]))
result.extend([new_dp_num_points, new_dp_point_ids, new_dp_surf_coords])
return tuple(result)
def random_crop_image(image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
mask_weights=None,
keypoints=None,
keypoint_visibilities=None,
densepose_num_points=None,
densepose_part_ids=None,
densepose_surface_coords=None,
min_object_covered=1.0,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.1, 1.0),
overlap_thresh=0.3,
clip_boxes=True,
random_coef=0.0,
seed=None,
preprocess_vars_cache=None):
"""Randomly crops the image.
Given the input image and its bounding boxes, this op randomly
crops a subimage. Given a user-provided set of input constraints,
the crop window is resampled until it satisfies these constraints.
If within 100 trials it is unable to find a valid crop, the original
image is returned. See the Args section for a description of the input
constraints. Both input boxes and returned Boxes are in normalized
form (e.g., lie in the unit square [0, 1]).
This function will return the original image with probability random_coef.
Note: Keypoint coordinates that are outside the crop will be set to NaN, which
is consistent with the original keypoint encoding for non-existing keypoints.
Also, the keypoint visibility will be set to False.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes with shape
[num_instances, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: (optional) float32 tensor of shape [num_instances].
representing the confidence for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
mask_weights: (optional) rank 1 float32 tensor with shape [num_instances]
containing weights for each instance mask.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_visibilities: (optional) rank 2 bool tensor with shape
[num_instances, num_keypoints].
densepose_num_points: (optional) rank 1 int32 tensor with shape
[num_instances] with the number of sampled points per
instance.
densepose_part_ids: (optional) rank 2 int32 tensor with shape
[num_instances, num_points] holding the part id for each
sampled point. These part_ids are 0-indexed, where the
first non-background part has index 0.
densepose_surface_coords: (optional) rank 3 float32 tensor with shape
[num_instances, num_points, 4]. The DensePose
coordinates are of the form (y, x, v, u) where
(y, x) are the normalized image coordinates for a
sampled point, and (v, u) is the surface
coordinate for the part.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
labels: new labels.
If label_weights, multiclass_scores, masks, keypoints,
keypoint_visibilities, densepose_num_points, densepose_part_ids,
densepose_surface_coords is not None, the function also returns:
label_weights: rank 1 float32 tensor with shape [num_instances].
multiclass_scores: rank 2 float32 tensor with shape
[num_instances, num_classes]
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
mask_weights: rank 1 float32 tensor with shape [num_instances].
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
keypoint_visibilities: rank 2 bool tensor with shape
[num_instances, num_keypoints]
densepose_num_points: rank 1 int32 tensor with shape [num_instances].
densepose_part_ids: rank 2 int32 tensor with shape
[num_instances, num_points].
densepose_surface_coords: rank 3 float32 tensor with shape
[num_instances, num_points, 4].
"""
def strict_random_crop_image_fn():
return _strict_random_crop_image(
image,
boxes,
labels,
label_weights,
label_confidences=label_confidences,
multiclass_scores=multiclass_scores,
masks=masks,
mask_weights=mask_weights,
keypoints=keypoints,
keypoint_visibilities=keypoint_visibilities,
densepose_num_points=densepose_num_points,
densepose_part_ids=densepose_part_ids,
densepose_surface_coords=densepose_surface_coords,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
overlap_thresh=overlap_thresh,
clip_boxes=clip_boxes,
preprocess_vars_cache=preprocess_vars_cache)
# avoids tf.cond to make faster RCNN training on borg. See b/140057645.
if random_coef < sys.float_info.min:
result = strict_random_crop_image_fn()
else:
generator_func = functools.partial(tf.random_uniform, [], seed=seed)
do_a_crop_random = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.CROP_IMAGE,
preprocess_vars_cache)
do_a_crop_random = tf.greater(do_a_crop_random, random_coef)
outputs = [image, boxes, labels]
if label_weights is not None:
outputs.append(label_weights)
if label_confidences is not None:
outputs.append(label_confidences)
if multiclass_scores is not None:
outputs.append(multiclass_scores)
if masks is not None:
outputs.append(masks)
if mask_weights is not None:
outputs.append(mask_weights)
if keypoints is not None:
outputs.append(keypoints)
if keypoint_visibilities is not None:
outputs.append(keypoint_visibilities)
if densepose_num_points is not None:
outputs.extend([densepose_num_points, densepose_part_ids,
densepose_surface_coords])
result = tf.cond(do_a_crop_random, strict_random_crop_image_fn,
lambda: tuple(outputs))
return result
def random_pad_image(image,
boxes,
masks=None,
keypoints=None,
densepose_surface_coords=None,
min_image_size=None,
max_image_size=None,
pad_color=None,
center_pad=False,
seed=None,
preprocess_vars_cache=None):
"""Randomly pads the image.
This function randomly pads the image with zeros. The final size of the
padded image will be between min_image_size and max_image_size.
if min_image_size is smaller than the input image size, min_image_size will
be set to the input image size. The same for max_image_size. The input image
will be located at a uniformly random location inside the padded image.
The relative location of the boxes to the original image will remain the same.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[N, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[N, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
densepose_surface_coords: (optional) rank 3 float32 tensor with shape
[N, num_points, 4]. The DensePose coordinates are
of the form (y, x, v, u) where (y, x) are the
normalized image coordinates for a sampled point,
and (v, u) is the surface coordinate for the part.
min_image_size: a tensor of size [min_height, min_width], type tf.int32.
If passed as None, will be set to image size
[height, width].
max_image_size: a tensor of size [max_height, max_width], type tf.int32.
If passed as None, will be set to twice the
image [height * 2, width * 2].
pad_color: padding color. A rank 1 tensor of [channels] with dtype=
tf.float32. if set as None, it will be set to average color of
the input image.
center_pad: whether the original image will be padded to the center, or
randomly padded (which is default).
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
if masks is not None, the function also returns:
masks: rank 3 float32 tensor with shape [N, new_height, new_width]
if keypoints is not None, the function also returns:
keypoints: rank 3 float32 tensor with shape [N, num_keypoints, 2]
if densepose_surface_coords is not None, the function also returns:
densepose_surface_coords: rank 3 float32 tensor with shape
[num_instances, num_points, 4]
"""
if pad_color is None:
pad_color = tf.reduce_mean(image, axis=[0, 1])
image_shape = tf.shape(image)
image_height = image_shape[0]
image_width = image_shape[1]
if max_image_size is None:
max_image_size = tf.stack([image_height * 2, image_width * 2])
max_image_size = tf.maximum(max_image_size,
tf.stack([image_height, image_width]))
if min_image_size is None:
min_image_size = tf.stack([image_height, image_width])
min_image_size = tf.maximum(min_image_size,
tf.stack([image_height, image_width]))
target_height = tf.cond(
max_image_size[0] > min_image_size[0],
lambda: _random_integer(min_image_size[0], max_image_size[0], seed),
lambda: max_image_size[0])
target_width = tf.cond(
max_image_size[1] > min_image_size[1],
lambda: _random_integer(min_image_size[1], max_image_size[1], seed),
lambda: max_image_size[1])
offset_height = tf.cond(
target_height > image_height,
lambda: _random_integer(0, target_height - image_height, seed),
lambda: tf.constant(0, dtype=tf.int32))
offset_width = tf.cond(
target_width > image_width,
lambda: _random_integer(0, target_width - image_width, seed),
lambda: tf.constant(0, dtype=tf.int32))
if center_pad:
offset_height = tf.cast(tf.floor((target_height - image_height) / 2),
tf.int32)
offset_width = tf.cast(tf.floor((target_width - image_width) / 2),
tf.int32)
gen_func = lambda: (target_height, target_width, offset_height, offset_width)
params = _get_or_create_preprocess_rand_vars(
gen_func, preprocessor_cache.PreprocessorCache.PAD_IMAGE,
preprocess_vars_cache)
target_height, target_width, offset_height, offset_width = params
new_image = tf.image.pad_to_bounding_box(
image,
offset_height=offset_height,
offset_width=offset_width,
target_height=target_height,
target_width=target_width)
# Setting color of the padded pixels
image_ones = tf.ones_like(image)
image_ones_padded = tf.image.pad_to_bounding_box(
image_ones,
offset_height=offset_height,
offset_width=offset_width,
target_height=target_height,
target_width=target_width)
image_color_padded = (1.0 - image_ones_padded) * pad_color
new_image += image_color_padded
# setting boxes
new_window = tf.cast(
tf.stack([
-offset_height, -offset_width, target_height - offset_height,
target_width - offset_width
]),
dtype=tf.float32)
new_window /= tf.cast(
tf.stack([image_height, image_width, image_height, image_width]),
dtype=tf.float32)
boxlist = box_list.BoxList(boxes)
new_boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window)
new_boxes = new_boxlist.get()
result = [new_image, new_boxes]
if masks is not None:
new_masks = tf.image.pad_to_bounding_box(
masks[:, :, :, tf.newaxis],
offset_height=offset_height,
offset_width=offset_width,
target_height=target_height,
target_width=target_width)[:, :, :, 0]
result.append(new_masks)
if keypoints is not None:
new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, new_window)
result.append(new_keypoints)
if densepose_surface_coords is not None:
new_densepose_surface_coords = densepose_ops.change_coordinate_frame(
densepose_surface_coords, new_window)
result.append(new_densepose_surface_coords)
return tuple(result)
def random_absolute_pad_image(image,
boxes,
masks=None,
keypoints=None,
densepose_surface_coords=None,
max_height_padding=None,
max_width_padding=None,
pad_color=None,
seed=None,
preprocess_vars_cache=None):
"""Randomly pads the image by small absolute amounts.
As random_pad_image above, but the padding is of size [0, max_height_padding]
or [0, max_width_padding] instead of padding to a fixed size of
max_height_padding for all images.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[N, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[N, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
densepose_surface_coords: (optional) rank 3 float32 tensor with shape
[N, num_points, 4]. The DensePose coordinates are
of the form (y, x, v, u) where (y, x) are the
normalized image coordinates for a sampled point,
and (v, u) is the surface coordinate for the part.
max_height_padding: a scalar tf.int32 tensor denoting the maximum amount of
height padding. The padding will be chosen uniformly at
random from [0, max_height_padding).
max_width_padding: a scalar tf.int32 tensor denoting the maximum amount of
width padding. The padding will be chosen uniformly at
random from [0, max_width_padding).
pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.
if set as None, it will be set to average color of the input
image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
if masks is not None, the function also returns:
masks: rank 3 float32 tensor with shape [N, new_height, new_width]
if keypoints is not None, the function also returns:
keypoints: rank 3 float32 tensor with shape [N, num_keypoints, 2]
"""
min_image_size = tf.shape(image)[:2]
max_image_size = min_image_size + tf.cast(
[max_height_padding, max_width_padding], dtype=tf.int32)
return random_pad_image(
image,
boxes,
masks=masks,
keypoints=keypoints,
densepose_surface_coords=densepose_surface_coords,
min_image_size=min_image_size,
max_image_size=max_image_size,
pad_color=pad_color,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
def random_crop_pad_image(image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
min_object_covered=1.0,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.1, 1.0),
overlap_thresh=0.3,
clip_boxes=True,
random_coef=0.0,
min_padded_size_ratio=(1.0, 1.0),
max_padded_size_ratio=(2.0, 2.0),
pad_color=None,
seed=None,
preprocess_vars_cache=None):
"""Randomly crops and pads the image.
Given an input image and its bounding boxes, this op first randomly crops
the image and then randomly pads the image with background values. Parameters
min_padded_size_ratio and max_padded_size_ratio, determine the range of the
final output image size. Specifically, the final image size will have a size
in the range of min_padded_size_ratio * tf.shape(image) and
max_padded_size_ratio * tf.shape(image). Note that these ratios are with
respect to the size of the original image, so we can't capture the same
effect easily by independently applying RandomCropImage
followed by RandomPadImage.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: rank 1 float32 containing the label weights.
label_confidences: rank 1 float32 containing the label confidences.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
min_padded_size_ratio: min ratio of padded image height and width to the
input image's height and width.
max_padded_size_ratio: max ratio of padded image height and width to the
input image's height and width.
pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.
if set as None, it will be set to average color of the randomly
cropped image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
padded_image: padded image.
padded_boxes: boxes which is the same rank as input boxes. Boxes are in
normalized form.
cropped_labels: cropped labels.
if label_weights is not None also returns:
cropped_label_weights: cropped label weights.
if multiclass_scores is not None also returns:
cropped_multiclass_scores: cropped_multiclass_scores.
"""
image_size = tf.shape(image)
image_height = image_size[0]
image_width = image_size[1]
result = random_crop_image(
image=image,
boxes=boxes,
labels=labels,
label_weights=label_weights,
label_confidences=label_confidences,
multiclass_scores=multiclass_scores,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
overlap_thresh=overlap_thresh,
clip_boxes=clip_boxes,
random_coef=random_coef,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
cropped_image, cropped_boxes, cropped_labels = result[:3]
min_image_size = tf.cast(
tf.cast(tf.stack([image_height, image_width]), dtype=tf.float32) *
min_padded_size_ratio,
dtype=tf.int32)
max_image_size = tf.cast(
tf.cast(tf.stack([image_height, image_width]), dtype=tf.float32) *
max_padded_size_ratio,
dtype=tf.int32)
padded_image, padded_boxes = random_pad_image( # pylint: disable=unbalanced-tuple-unpacking
cropped_image,
cropped_boxes,
min_image_size=min_image_size,
max_image_size=max_image_size,
pad_color=pad_color,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
cropped_padded_output = (padded_image, padded_boxes, cropped_labels)
index = 3
if label_weights is not None:
cropped_label_weights = result[index]
cropped_padded_output += (cropped_label_weights,)
index += 1
if label_confidences is not None:
cropped_label_confidences = result[index]
cropped_padded_output += (cropped_label_confidences,)
index += 1
if multiclass_scores is not None:
cropped_multiclass_scores = result[index]
cropped_padded_output += (cropped_multiclass_scores,)
return cropped_padded_output
def random_crop_to_aspect_ratio(image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
keypoints=None,
aspect_ratio=1.0,
overlap_thresh=0.3,
clip_boxes=True,
center_crop=False,
seed=None,
preprocess_vars_cache=None):
"""Randomly crops an image to the specified aspect ratio.
Randomly crops the a portion of the image such that the crop is of the
specified aspect ratio, and the crop is as large as possible. If the specified
aspect ratio is larger than the aspect ratio of the image, this op will
randomly remove rows from the top and bottom of the image. If the specified
aspect ratio is less than the aspect ratio of the image, this op will randomly
remove cols from the left and right of the image. If the specified aspect
ratio is the same as the aspect ratio of the image, this op will return the
image.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: (optional) float32 tensor of shape [num_instances]
representing the confidence for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
aspect_ratio: the aspect ratio of cropped image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
center_crop: whether to take the center crop or a random crop.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If label_weights, masks, keypoints, or multiclass_scores is not None, the
function also returns:
label_weights: rank 1 float32 tensor with shape [num_instances].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
multiclass_scores: rank 2 float32 tensor with shape
[num_instances, num_classes]
Raises:
ValueError: If image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('RandomCropToAspectRatio', values=[image]):
image_shape = tf.shape(image)
orig_height = image_shape[0]
orig_width = image_shape[1]
orig_aspect_ratio = tf.cast(
orig_width, dtype=tf.float32) / tf.cast(
orig_height, dtype=tf.float32)
new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32)
def target_height_fn():
return tf.cast(
tf.round(tf.cast(orig_width, dtype=tf.float32) / new_aspect_ratio),
dtype=tf.int32)
target_height = tf.cond(orig_aspect_ratio >= new_aspect_ratio,
lambda: orig_height, target_height_fn)
def target_width_fn():
return tf.cast(
tf.round(tf.cast(orig_height, dtype=tf.float32) * new_aspect_ratio),
dtype=tf.int32)
target_width = tf.cond(orig_aspect_ratio <= new_aspect_ratio,
lambda: orig_width, target_width_fn)
# either offset_height = 0 and offset_width is randomly chosen from
# [0, offset_width - target_width), or else offset_width = 0 and
# offset_height is randomly chosen from [0, offset_height - target_height)
if center_crop:
offset_height = tf.cast(tf.math.floor((orig_height - target_height) / 2),
tf.int32)
offset_width = tf.cast(tf.math.floor((orig_width - target_width) / 2),
tf.int32)
else:
offset_height = _random_integer(0, orig_height - target_height + 1, seed)
offset_width = _random_integer(0, orig_width - target_width + 1, seed)
generator_func = lambda: (offset_height, offset_width)
offset_height, offset_width = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.CROP_TO_ASPECT_RATIO,
preprocess_vars_cache)
new_image = tf.image.crop_to_bounding_box(
image, offset_height, offset_width, target_height, target_width)
im_box = tf.stack([
tf.cast(offset_height, dtype=tf.float32) /
tf.cast(orig_height, dtype=tf.float32),
tf.cast(offset_width, dtype=tf.float32) /
tf.cast(orig_width, dtype=tf.float32),
tf.cast(offset_height + target_height, dtype=tf.float32) /
tf.cast(orig_height, dtype=tf.float32),
tf.cast(offset_width + target_width, dtype=tf.float32) /
tf.cast(orig_width, dtype=tf.float32)
])
boxlist = box_list.BoxList(boxes)
boxlist.add_field('labels', labels)
boxlist.add_field('label_weights', label_weights)
if label_confidences is not None:
boxlist.add_field('label_confidences', label_confidences)
if multiclass_scores is not None:
boxlist.add_field('multiclass_scores', multiclass_scores)
im_boxlist = box_list.BoxList(tf.expand_dims(im_box, 0))
# remove boxes whose overlap with the image is less than overlap_thresh
overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes(
boxlist, im_boxlist, overlap_thresh)
# change the coordinate of the remaining boxes
new_labels = overlapping_boxlist.get_field('labels')
new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist,
im_box)
if clip_boxes:
new_boxlist = box_list_ops.clip_to_window(
new_boxlist, tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32))
new_boxes = new_boxlist.get()
result = [new_image, new_boxes, new_labels]
new_label_weights = overlapping_boxlist.get_field('label_weights')
result.append(new_label_weights)
if label_confidences is not None:
new_label_confidences = (
overlapping_boxlist.get_field('label_confidences'))
result.append(new_label_confidences)
if multiclass_scores is not None:
new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores')
result.append(new_multiclass_scores)
if masks is not None:
masks_inside_window = tf.gather(masks, keep_ids)
masks_box_begin = tf.stack([0, offset_height, offset_width])
masks_box_size = tf.stack([-1, target_height, target_width])
new_masks = tf.slice(masks_inside_window, masks_box_begin, masks_box_size)
result.append(new_masks)
if keypoints is not None:
keypoints_inside_window = tf.gather(keypoints, keep_ids)
new_keypoints = keypoint_ops.change_coordinate_frame(
keypoints_inside_window, im_box)
if clip_boxes:
new_keypoints = keypoint_ops.prune_outside_window(new_keypoints,
[0.0, 0.0, 1.0, 1.0])
result.append(new_keypoints)
return tuple(result)
def random_pad_to_aspect_ratio(image,
boxes,
masks=None,
keypoints=None,
aspect_ratio=1.0,
min_padded_size_ratio=(1.0, 1.0),
max_padded_size_ratio=(2.0, 2.0),
seed=None,
preprocess_vars_cache=None):
"""Randomly zero pads an image to the specified aspect ratio.
Pads the image so that the resulting image will have the specified aspect
ratio without scaling less than the min_padded_size_ratio or more than the
max_padded_size_ratio. If the min_padded_size_ratio or max_padded_size_ratio
is lower than what is possible to maintain the aspect ratio, then this method
will use the least padding to achieve the specified aspect ratio.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
aspect_ratio: aspect ratio of the final image.
min_padded_size_ratio: min ratio of padded image height and width to the
input image's height and width.
max_padded_size_ratio: max ratio of padded image height and width to the
input image's height and width.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If masks, or keypoints is not None, the function also returns:
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: If image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('RandomPadToAspectRatio', values=[image]):
image_shape = tf.shape(image)
image_height = tf.cast(image_shape[0], dtype=tf.float32)
image_width = tf.cast(image_shape[1], dtype=tf.float32)
image_aspect_ratio = image_width / image_height
new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32)
target_height = tf.cond(
image_aspect_ratio <= new_aspect_ratio,
lambda: image_height,
lambda: image_width / new_aspect_ratio)
target_width = tf.cond(
image_aspect_ratio >= new_aspect_ratio,
lambda: image_width,
lambda: image_height * new_aspect_ratio)
min_height = tf.maximum(
min_padded_size_ratio[0] * image_height, target_height)
min_width = tf.maximum(
min_padded_size_ratio[1] * image_width, target_width)
max_height = tf.maximum(
max_padded_size_ratio[0] * image_height, target_height)
max_width = tf.maximum(
max_padded_size_ratio[1] * image_width, target_width)
max_scale = tf.minimum(max_height / target_height, max_width / target_width)
min_scale = tf.minimum(
max_scale,
tf.maximum(min_height / target_height, min_width / target_width))
generator_func = functools.partial(tf.random_uniform, [],
min_scale, max_scale, seed=seed)
scale = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.PAD_TO_ASPECT_RATIO,
preprocess_vars_cache)
target_height = tf.round(scale * target_height)
target_width = tf.round(scale * target_width)
new_image = tf.image.pad_to_bounding_box(
image, 0, 0, tf.cast(target_height, dtype=tf.int32),
tf.cast(target_width, dtype=tf.int32))
im_box = tf.stack([
0.0,
0.0,
target_height / image_height,
target_width / image_width
])
boxlist = box_list.BoxList(boxes)
new_boxlist = box_list_ops.change_coordinate_frame(boxlist, im_box)
new_boxes = new_boxlist.get()
result = [new_image, new_boxes]
if masks is not None:
new_masks = tf.expand_dims(masks, -1)
new_masks = tf.image.pad_to_bounding_box(
new_masks, 0, 0, tf.cast(target_height, dtype=tf.int32),
tf.cast(target_width, dtype=tf.int32))
new_masks = tf.squeeze(new_masks, [-1])
result.append(new_masks)
if keypoints is not None:
new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, im_box)
result.append(new_keypoints)
return tuple(result)
def random_black_patches(image,
max_black_patches=10,
probability=0.5,
size_to_image_ratio=0.1,
random_seed=None,
preprocess_vars_cache=None):
"""Randomly adds some black patches to the image.
This op adds up to max_black_patches square black patches of a fixed size
to the image where size is specified via the size_to_image_ratio parameter.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
max_black_patches: number of times that the function tries to add a
black box to the image.
probability: at each try, what is the chance of adding a box.
size_to_image_ratio: Determines the ratio of the size of the black patches
to the size of the image.
box_size = size_to_image_ratio *
min(image_width, image_height)
random_seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image
"""
def add_black_patch_to_image(image, idx):
"""Function for adding one patch to the image.
Args:
image: image
idx: counter for number of patches that could have been added
Returns:
image with a randomly added black box
"""
image_shape = tf.shape(image)
image_height = image_shape[0]
image_width = image_shape[1]
box_size = tf.cast(
tf.multiply(
tf.minimum(
tf.cast(image_height, dtype=tf.float32),
tf.cast(image_width, dtype=tf.float32)), size_to_image_ratio),
dtype=tf.int32)
generator_func = functools.partial(tf.random_uniform, [], minval=0.0,
maxval=(1.0 - size_to_image_ratio),
seed=random_seed)
normalized_y_min = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH,
preprocess_vars_cache, key=str(idx) + 'y')
normalized_x_min = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH,
preprocess_vars_cache, key=str(idx) + 'x')
y_min = tf.cast(
normalized_y_min * tf.cast(image_height, dtype=tf.float32),
dtype=tf.int32)
x_min = tf.cast(
normalized_x_min * tf.cast(image_width, dtype=tf.float32),
dtype=tf.int32)
black_box = tf.ones([box_size, box_size, 3], dtype=tf.float32)
mask = 1.0 - tf.image.pad_to_bounding_box(black_box, y_min, x_min,
image_height, image_width)
image = tf.multiply(image, mask)
return image
with tf.name_scope('RandomBlackPatchInImage', values=[image]):
for idx in range(max_black_patches):
generator_func = functools.partial(tf.random_uniform, [],
minval=0.0, maxval=1.0,
dtype=tf.float32, seed=random_seed)
random_prob = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.BLACK_PATCHES,
preprocess_vars_cache, key=idx)
image = tf.cond(
tf.greater(random_prob, probability), lambda: image,
functools.partial(add_black_patch_to_image, image=image, idx=idx))
return image
def random_jpeg_quality(image,
min_jpeg_quality=0,
max_jpeg_quality=100,
random_coef=0.0,
seed=None,
preprocess_vars_cache=None):
"""Randomly encode the image to a random JPEG quality level.
Args:
image: rank 3 float32 tensor with shape [height, width, channels] and
values in the range [0, 255].
min_jpeg_quality: An int for the lower bound for selecting a random jpeg
quality level.
max_jpeg_quality: An int for the upper bound for selecting a random jpeg
quality level.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the encoded image,
and if it is 1.0, we will always get the original image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this function is called
multiple times with the same non-null cache, it will perform
deterministically.
Returns:
image: image which is the same shape as input image.
"""
def _adjust_jpeg_quality():
"""Encodes the image as jpeg with a random quality and then decodes."""
generator_func = functools.partial(
tf.random_uniform, [],
minval=min_jpeg_quality,
maxval=max_jpeg_quality,
dtype=tf.int32,
seed=seed)
quality = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.JPEG_QUALITY,
preprocess_vars_cache, key='quality')
# Need to convert to uint8 before calling adjust_jpeg_quality since it
# assumes that float features are in the range [0, 1], where herein the
# range is [0, 255].
image_uint8 = tf.cast(image, tf.uint8)
adjusted_image = tf.image.adjust_jpeg_quality(image_uint8, quality)
return tf.cast(adjusted_image, tf.float32)
with tf.name_scope('RandomJpegQuality', values=[image]):
generator_func = functools.partial(tf.random_uniform, [], seed=seed)
do_encoding_random = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.JPEG_QUALITY,
preprocess_vars_cache)
do_encoding_random = tf.greater_equal(do_encoding_random, random_coef)
image = tf.cond(do_encoding_random, _adjust_jpeg_quality,
lambda: tf.cast(image, tf.float32))
return image
def random_downscale_to_target_pixels(image,
masks=None,
min_target_pixels=300000,
max_target_pixels=800000,
random_coef=0.0,
seed=None,
preprocess_vars_cache=None):
"""Randomly downscales the image to a target number of pixels.
If the image contains less than the chosen target number of pixels, it will
not be downscaled.
Args:
image: Rank 3 float32 tensor with shape [height, width, channels] and
values in the range [0, 255].
masks: (optional) Rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks are of
the same height, width as the input `image`.
min_target_pixels: Integer. An inclusive lower bound for for the target
number of pixels.
max_target_pixels: Integer. An exclusive upper bound for for the target
number of pixels.
random_coef: Float. Random coefficient that defines the chance of getting
the original image. If random_coef is 0, we will always apply downscaling,
and if it is 1.0, we will always get the original image.
seed: (optional) Integer. Random seed.
preprocess_vars_cache: (optional) PreprocessorCache object that records
previously performed augmentations. Updated in-place. If this function is
called multiple times with the same non-null cache, it will perform
deterministically.
Returns:
Tuple with elements:
image: Resized image which is the same rank as input image.
masks: If masks is not None, resized masks which are the same rank as
the input masks.
Raises:
ValueError: If min_target_pixels or max_target_pixels are not positive.
"""
if min_target_pixels <= 0:
raise ValueError('Minimum target pixels must be positive')
if max_target_pixels <= 0:
raise ValueError('Maximum target pixels must be positive')
def _resize_image_to_target(target_height, target_width):
# pylint: disable=unbalanced-tuple-unpacking
new_image, _ = resize_image(image, None, target_height, target_width)
return (new_image,)
def _resize_image_and_masks_to_target(target_height, target_width):
# pylint: disable=unbalanced-tuple-unpacking
new_image, new_masks, _ = resize_image(image, masks, target_height,
target_width)
return new_image, new_masks
with tf.name_scope('RandomDownscaleToTargetPixels', values=[image]):
generator_fn = functools.partial(tf.random_uniform, [], seed=seed)
do_downscale_random = _get_or_create_preprocess_rand_vars(
generator_fn,
preprocessor_cache.PreprocessorCache.DOWNSCALE_TO_TARGET_PIXELS,
preprocess_vars_cache)
do_downscale_random = tf.greater_equal(do_downscale_random, random_coef)
generator_fn = functools.partial(
tf.random_uniform, [],
minval=min_target_pixels,
maxval=max_target_pixels,
dtype=tf.int32,
seed=seed)
target_pixels = _get_or_create_preprocess_rand_vars(
generator_fn,
preprocessor_cache.PreprocessorCache.DOWNSCALE_TO_TARGET_PIXELS,
preprocess_vars_cache,
key='target_pixels')
image_shape = tf.shape(image)
image_height = image_shape[0]
image_width = image_shape[1]
image_pixels = image_height * image_width
scale_factor = tf.sqrt(
tf.cast(target_pixels, dtype=tf.float32) /
tf.cast(image_pixels, dtype=tf.float32))
target_height = tf.cast(
scale_factor * tf.cast(image_height, dtype=tf.float32), dtype=tf.int32)
target_width = tf.cast(
scale_factor * tf.cast(image_width, dtype=tf.float32), dtype=tf.int32)
image_larger_than_target = tf.greater(image_pixels, target_pixels)
should_apply_resize = tf.logical_and(do_downscale_random,
image_larger_than_target)
if masks is not None:
resize_fn = functools.partial(_resize_image_and_masks_to_target,
target_height, target_width)
return tf.cond(should_apply_resize, resize_fn,
lambda: (tf.cast(image, dtype=tf.float32), masks))
else:
resize_fn = lambda: _resize_image_to_target(target_height, target_width)
return tf.cond(should_apply_resize, resize_fn,
lambda: (tf.cast(image, dtype=tf.float32),))
def random_patch_gaussian(image,
min_patch_size=1,
max_patch_size=250,
min_gaussian_stddev=0.0,
max_gaussian_stddev=1.0,
random_coef=0.0,
seed=None,
preprocess_vars_cache=None):
"""Randomly applies gaussian noise to a random patch on the image.
The gaussian noise is applied to the image with values scaled to the range
[0.0, 1.0]. The result of applying gaussian noise to the scaled image is
clipped to be within the range [0.0, 1.0], equivalent to the range
[0.0, 255.0] after rescaling the image back.
See "Improving Robustness Without Sacrificing Accuracy with Patch Gaussian
Augmentation " by Lopes et al., 2019, for further details.
https://arxiv.org/abs/1906.02611
Args:
image: Rank 3 float32 tensor with shape [height, width, channels] and
values in the range [0.0, 255.0].
min_patch_size: Integer. An inclusive lower bound for the patch size.
max_patch_size: Integer. An exclusive upper bound for the patch size.
min_gaussian_stddev: Float. An inclusive lower bound for the standard
deviation of the gaussian noise.
max_gaussian_stddev: Float. An exclusive upper bound for the standard
deviation of the gaussian noise.
random_coef: Float. Random coefficient that defines the chance of getting
the original image. If random_coef is 0.0, we will always apply
downscaling, and if it is 1.0, we will always get the original image.
seed: (optional) Integer. Random seed.
preprocess_vars_cache: (optional) PreprocessorCache object that records
previously performed augmentations. Updated in-place. If this function is
called multiple times with the same non-null cache, it will perform
deterministically.
Returns:
Rank 3 float32 tensor with same shape as the input image and with gaussian
noise applied within a random patch.
Raises:
ValueError: If min_patch_size is < 1.
"""
if min_patch_size < 1:
raise ValueError('Minimum patch size must be >= 1.')
get_or_create_rand_vars_fn = functools.partial(
_get_or_create_preprocess_rand_vars,
function_id=preprocessor_cache.PreprocessorCache.PATCH_GAUSSIAN,
preprocess_vars_cache=preprocess_vars_cache)
def _apply_patch_gaussian(image):
"""Applies a patch gaussian with random size, location, and stddev."""
patch_size = get_or_create_rand_vars_fn(
functools.partial(
tf.random_uniform, [],
minval=min_patch_size,
maxval=max_patch_size,
dtype=tf.int32,
seed=seed),
key='patch_size')
gaussian_stddev = get_or_create_rand_vars_fn(
functools.partial(
tf.random_uniform, [],
minval=min_gaussian_stddev,
maxval=max_gaussian_stddev,
dtype=tf.float32,
seed=seed),
key='gaussian_stddev')
image_shape = tf.shape(image)
y = get_or_create_rand_vars_fn(
functools.partial(
tf.random_uniform, [],
minval=0,
maxval=image_shape[0],
dtype=tf.int32,
seed=seed),
key='y')
x = get_or_create_rand_vars_fn(
functools.partial(
tf.random_uniform, [],
minval=0,
maxval=image_shape[1],
dtype=tf.int32,
seed=seed),
key='x')
gaussian = get_or_create_rand_vars_fn(
functools.partial(
tf.random.normal,
image_shape,
stddev=gaussian_stddev,
dtype=tf.float32,
seed=seed),
key='gaussian')
scaled_image = image / 255.0
image_plus_gaussian = tf.clip_by_value(scaled_image + gaussian, 0.0, 1.0)
patch_mask = patch_ops.get_patch_mask(y, x, patch_size, image_shape)
patch_mask = tf.expand_dims(patch_mask, -1)
patch_mask = tf.tile(patch_mask, [1, 1, image_shape[2]])
patched_image = tf.where(patch_mask, image_plus_gaussian, scaled_image)
return patched_image * 255.0
with tf.name_scope('RandomPatchGaussian', values=[image]):
image = tf.cast(image, tf.float32)
patch_gaussian_random = get_or_create_rand_vars_fn(
functools.partial(tf.random_uniform, [], seed=seed))
do_patch_gaussian = tf.greater_equal(patch_gaussian_random, random_coef)
image = tf.cond(do_patch_gaussian,
lambda: _apply_patch_gaussian(image),
lambda: image)
return image
def autoaugment_image(image, boxes, policy_name='v0'):
"""Apply an autoaugment policy to the image and boxes.
See "AutoAugment: Learning Augmentation Policies from Data" by Cubuk et al.,
2018, for further details. https://arxiv.org/abs/1805.09501
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
boxes: rank 2 float32 tensor containing the bounding boxes with shape
[num_instances, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
policy_name: The name of the AutoAugment policy to use. The available
options are `v0`, `v1`, `v2`, `v3` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
image: the augmented image.
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form. boxes will have been augmented along with image.
"""
return autoaugment_utils.distort_image_with_autoaugment(
image, boxes, policy_name)
def image_to_float(image):
"""Used in Faster R-CNN. Casts image pixel values to float.
Args:
image: input image which might be in tf.uint8 or sth else format
Returns:
image: image in tf.float32 format.
"""
with tf.name_scope('ImageToFloat', values=[image]):
image = tf.cast(image, dtype=tf.float32)
return image
def random_resize_method(image, target_size, preprocess_vars_cache=None):
"""Uses a random resize method to resize the image to target size.
Args:
image: a rank 3 tensor.
target_size: a list of [target_height, target_width]
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
resized image.
"""
resized_image = _apply_with_random_selector(
image,
lambda x, method: tf.image.resize_images(x, target_size, method),
num_cases=4,
preprocess_vars_cache=preprocess_vars_cache,
key=preprocessor_cache.PreprocessorCache.RESIZE_METHOD)
return resized_image
def resize_to_range(image,
masks=None,
min_dimension=None,
max_dimension=None,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False,
pad_to_max_dimension=False,
per_channel_pad_value=(0, 0, 0)):
"""Resizes an image so its dimensions are within the provided value.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum dimension is equal to the
provided value without the other dimension exceeding max_dimension,
then do so.
2. Otherwise, resize so the largest dimension is equal to max_dimension.
Args:
image: A 3D tensor of shape [height, width, channels]
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks.
min_dimension: (optional) (scalar) desired size of the smaller image
dimension.
max_dimension: (optional) (scalar) maximum allowed size
of the larger image dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
align_corners: bool. If true, exactly align all 4 corners of the input
and output. Defaults to False.
pad_to_max_dimension: Whether to resize the image and pad it with zeros
so the resulting image is of the spatial size
[max_dimension, max_dimension]. If masks are included they are padded
similarly.
per_channel_pad_value: A tuple of per-channel scalar value to use for
padding. By default pads zeros.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, channels],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
def _resize_landscape_image(image):
# resize a landscape image
return tf.image.resize_images(
image, tf.stack([min_dimension, max_dimension]), method=method,
align_corners=align_corners, preserve_aspect_ratio=True)
def _resize_portrait_image(image):
# resize a portrait image
return tf.image.resize_images(
image, tf.stack([max_dimension, min_dimension]), method=method,
align_corners=align_corners, preserve_aspect_ratio=True)
with tf.name_scope('ResizeToRange', values=[image, min_dimension]):
if image.get_shape().is_fully_defined():
if image.get_shape()[0] < image.get_shape()[1]:
new_image = _resize_landscape_image(image)
else:
new_image = _resize_portrait_image(image)
new_size = tf.constant(new_image.get_shape().as_list())
else:
new_image = tf.cond(
tf.less(tf.shape(image)[0], tf.shape(image)[1]),
lambda: _resize_landscape_image(image),
lambda: _resize_portrait_image(image))
new_size = tf.shape(new_image)
if pad_to_max_dimension:
channels = tf.unstack(new_image, axis=2)
if len(channels) != len(per_channel_pad_value):
raise ValueError('Number of channels must be equal to the length of '
'per-channel pad value.')
new_image = tf.stack(
[
tf.pad( # pylint: disable=g-complex-comprehension
channels[i], [[0, max_dimension - new_size[0]],
[0, max_dimension - new_size[1]]],
constant_values=per_channel_pad_value[i])
for i in range(len(channels))
],
axis=2)
new_image.set_shape([max_dimension, max_dimension, len(channels)])
result = [new_image]
if masks is not None:
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize_images(
new_masks,
new_size[:-1],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
if pad_to_max_dimension:
new_masks = tf.image.pad_to_bounding_box(
new_masks, 0, 0, max_dimension, max_dimension)
new_masks = tf.squeeze(new_masks, 3)
result.append(new_masks)
result.append(new_size)
return result
def _get_image_info(image):
"""Returns the height, width and number of channels in the image."""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
num_channels = tf.shape(image)[2]
return (image_height, image_width, num_channels)
# TODO(alirezafathi): Make sure the static shapes are preserved.
def resize_to_min_dimension(image, masks=None, min_dimension=600,
method=tf.image.ResizeMethod.BILINEAR):
"""Resizes image and masks given the min size maintaining the aspect ratio.
If one of the image dimensions is smaller than min_dimension, it will scale
the image such that its smallest dimension is equal to min_dimension.
Otherwise, will keep the image size as is.
Args:
image: a tensor of size [height, width, channels].
masks: (optional) a tensors of size [num_instances, height, width].
min_dimension: minimum image dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
Returns:
An array containing resized_image, resized_masks, and resized_image_shape.
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A tensor of size [new_height, new_width, channels].
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width]
resized_image_shape: A 1D tensor of shape [3] containing the shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('ResizeGivenMinDimension', values=[image, min_dimension]):
(image_height, image_width, num_channels) = _get_image_info(image)
min_image_dimension = tf.minimum(image_height, image_width)
min_target_dimension = tf.maximum(min_image_dimension, min_dimension)
target_ratio = tf.cast(min_target_dimension, dtype=tf.float32) / tf.cast(
min_image_dimension, dtype=tf.float32)
target_height = tf.cast(
tf.cast(image_height, dtype=tf.float32) * target_ratio, dtype=tf.int32)
target_width = tf.cast(
tf.cast(image_width, dtype=tf.float32) * target_ratio, dtype=tf.int32)
image = tf.image.resize_images(
tf.expand_dims(image, axis=0), size=[target_height, target_width],
method=method,
align_corners=True)
result = [tf.squeeze(image, axis=0)]
if masks is not None:
masks = tf.image.resize_nearest_neighbor(
tf.expand_dims(masks, axis=3),
size=[target_height, target_width],
align_corners=True)
result.append(tf.squeeze(masks, axis=3))
result.append(tf.stack([target_height, target_width, num_channels]))
return result
def resize_to_max_dimension(image, masks=None, max_dimension=600,
method=tf.image.ResizeMethod.BILINEAR):
"""Resizes image and masks given the max size maintaining the aspect ratio.
If one of the image dimensions is greater than max_dimension, it will scale
the image such that its largest dimension is equal to max_dimension.
Otherwise, will keep the image size as is.
Args:
image: a tensor of size [height, width, channels].
masks: (optional) a tensors of size [num_instances, height, width].
max_dimension: maximum image dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
Returns:
An array containing resized_image, resized_masks, and resized_image_shape.
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A tensor of size [new_height, new_width, channels].
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width]
resized_image_shape: A 1D tensor of shape [3] containing the shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('ResizeGivenMaxDimension', values=[image, max_dimension]):
(image_height, image_width, num_channels) = _get_image_info(image)
max_image_dimension = tf.maximum(image_height, image_width)
max_target_dimension = tf.minimum(max_image_dimension, max_dimension)
target_ratio = tf.cast(max_target_dimension, dtype=tf.float32) / tf.cast(
max_image_dimension, dtype=tf.float32)
target_height = tf.cast(
tf.cast(image_height, dtype=tf.float32) * target_ratio, dtype=tf.int32)
target_width = tf.cast(
tf.cast(image_width, dtype=tf.float32) * target_ratio, dtype=tf.int32)
image = tf.image.resize_images(
tf.expand_dims(image, axis=0), size=[target_height, target_width],
method=method,
align_corners=True)
result = [tf.squeeze(image, axis=0)]
if masks is not None:
masks = tf.image.resize_nearest_neighbor(
tf.expand_dims(masks, axis=3),
size=[target_height, target_width],
align_corners=True)
result.append(tf.squeeze(masks, axis=3))
result.append(tf.stack([target_height, target_width, num_channels]))
return result
def resize_pad_to_multiple(image, masks=None, multiple=1):
"""Resize an image by zero padding it to the specified multiple.
For example, with an image of size (101, 199, 3) and multiple=4,
the returned image will have shape (104, 200, 3).
Args:
image: a tensor of shape [height, width, channels]
masks: (optional) a tensor of shape [num_instances, height, width]
multiple: int, the multiple to which the height and width of the input
will be padded.
Returns:
resized_image: The image with 0 padding applied, such that output
dimensions are divisible by `multiple`
resized_masks: If masks are given, they are resized to the same
spatial dimensions as the image.
resized_image_shape: An integer tensor of shape [3] which holds
the shape of the input image.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('ResizePadToMultiple', values=[image, multiple]):
image_height, image_width, num_channels = _get_image_info(image)
image = image[tf.newaxis, :, :, :]
image = ops.pad_to_multiple(image, multiple)[0, :, :, :]
result = [image]
if masks is not None:
masks = tf.transpose(masks, (1, 2, 0))
masks = masks[tf.newaxis, :, :, :]
masks = ops.pad_to_multiple(masks, multiple)[0, :, :, :]
masks = tf.transpose(masks, (2, 0, 1))
result.append(masks)
result.append(tf.stack([image_height, image_width, num_channels]))
return result
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):
"""Scales boxes from normalized to pixel coordinates.
Args:
image: A 3D float32 tensor of shape [height, width, channels].
boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding
boxes in normalized coordinates. Each row is of the form
[ymin, xmin, ymax, xmax].
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
Returns:
image: unchanged input image.
scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the
bounding boxes in pixel coordinates.
scaled_keypoints: a 3D float32 tensor with shape
[num_instances, num_keypoints, 2] containing the keypoints in pixel
coordinates.
"""
boxlist = box_list.BoxList(boxes)
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get()
result = [image, scaled_boxes]
if keypoints is not None:
scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width)
result.append(scaled_keypoints)
return tuple(result)
# TODO(alirezafathi): Investigate if instead the function should return None if
# masks is None.
# pylint: disable=g-doc-return-or-yield
def resize_image(image,
masks=None,
new_height=600,
new_width=1024,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False):
"""Resizes images to the given height and width.
Args:
image: A 3D tensor of shape [height, width, channels]
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks.
new_height: (optional) (scalar) desired height of the image.
new_width: (optional) (scalar) desired width of the image.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
align_corners: bool. If true, exactly align all 4 corners of the input
and output. Defaults to False.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A tensor of size [new_height, new_width, channels].
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width]
resized_image_shape: A 1D tensor of shape [3] containing the shape of the
resized image.
"""
with tf.name_scope(
'ResizeImage',
values=[image, new_height, new_width, method, align_corners]):
new_image = tf.image.resize_images(
image, tf.stack([new_height, new_width]),
method=method,
align_corners=align_corners)
image_shape = shape_utils.combined_static_and_dynamic_shape(image)
result = [new_image]
if masks is not None:
num_instances = tf.shape(masks)[0]
new_size = tf.stack([new_height, new_width])
def resize_masks_branch():
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize_nearest_neighbor(
new_masks, new_size, align_corners=align_corners)
new_masks = tf.squeeze(new_masks, axis=3)
return new_masks
def reshape_masks_branch():
# The shape function will be computed for both branches of the
# condition, regardless of which branch is actually taken. Make sure
# that we don't trigger an assertion in the shape function when trying
# to reshape a non empty tensor into an empty one.
new_masks = tf.reshape(masks, [-1, new_size[0], new_size[1]])
return new_masks
masks = tf.cond(num_instances > 0, resize_masks_branch,
reshape_masks_branch)
result.append(masks)
result.append(tf.stack([new_height, new_width, image_shape[2]]))
return result
def subtract_channel_mean(image, means=None):
"""Normalizes an image by subtracting a mean from each channel.
Args:
image: A 3D tensor of shape [height, width, channels]
means: float list containing a mean for each channel
Returns:
normalized_images: a tensor of shape [height, width, channels]
Raises:
ValueError: if images is not a 4D tensor or if the number of means is not
equal to the number of channels.
"""
with tf.name_scope('SubtractChannelMean', values=[image, means]):
if len(image.get_shape()) != 3:
raise ValueError('Input must be of size [height, width, channels]')
if len(means) != image.get_shape()[-1]:
raise ValueError('len(means) must match the number of channels')
return image - [[means]]
def one_hot_encoding(labels, num_classes=None):
"""One-hot encodes the multiclass labels.
Example usage:
labels = tf.constant([1, 4], dtype=tf.int32)
one_hot = OneHotEncoding(labels, num_classes=5)
one_hot.eval() # evaluates to [0, 1, 0, 0, 1]
Args:
labels: A tensor of shape [None] corresponding to the labels.
num_classes: Number of classes in the dataset.
Returns:
onehot_labels: a tensor of shape [num_classes] corresponding to the one hot
encoding of the labels.
Raises:
ValueError: if num_classes is not specified.
"""
with tf.name_scope('OneHotEncoding', values=[labels]):
if num_classes is None:
raise ValueError('num_classes must be specified')
labels = tf.one_hot(labels, num_classes, 1, 0)
return tf.reduce_max(labels, 0)
def rgb_to_gray(image):
"""Converts a 3 channel RGB image to a 1 channel grayscale image.
Args:
image: Rank 3 float32 tensor containing 1 image -> [height, width, 3]
with pixel values varying between [0, 1].
Returns:
image: A single channel grayscale image -> [image, height, 1].
"""
return _rgb_to_grayscale(image)
def random_self_concat_image(
image, boxes, labels, label_weights, label_confidences=None,
multiclass_scores=None, concat_vertical_probability=0.1,
concat_horizontal_probability=0.1, seed=None,
preprocess_vars_cache=None):
"""Randomly concatenates the image with itself.
This function randomly concatenates the image with itself; the random
variables for vertical and horizontal concatenation are independent.
Afterwards, we adjust the old bounding boxes, and add new bounding boxes
for the new objects.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: rank 1 float32 containing the label weights.
label_confidences: (optional) rank 1 float32 containing the label
confidences.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for
each box for each class.
concat_vertical_probability: (optional) a tf.float32 scalar denoting the
probability of a vertical concatenation.
concat_horizontal_probability: (optional) a tf.float32 scalar denoting the
probability of a horizontal concatenation.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
if label_confidences is not None also returns:
maybe_concat_label_confidences: cropped label weights.
if multiclass_scores is not None also returns:
maybe_concat_multiclass_scores: cropped_multiclass_scores.
"""
concat_vertical = (tf.random_uniform([], seed=seed) <
concat_vertical_probability)
# Note the seed + 1 so we get some semblance of independence even with
# fixed seeds.
concat_horizontal = (tf.random_uniform([], seed=seed + 1 if seed else None)
< concat_horizontal_probability)
gen_func = lambda: (concat_vertical, concat_horizontal)
params = _get_or_create_preprocess_rand_vars(
gen_func, preprocessor_cache.PreprocessorCache.SELF_CONCAT_IMAGE,
preprocess_vars_cache)
concat_vertical, concat_horizontal = params
def _concat_image(image, boxes, labels, label_weights, axis):
"""Concats the image to itself on `axis`."""
output_images = tf.concat([image, image], axis=axis)
if axis == 0:
# Concat vertically, so need to reduce the y coordinates.
old_scaling = tf.constant([0.5, 1.0, 0.5, 1.0])
new_translation = tf.constant([0.5, 0.0, 0.5, 0.0])
elif axis == 1:
old_scaling = tf.constant([1.0, 0.5, 1.0, 0.5])
new_translation = tf.constant([0.0, 0.5, 0.0, 0.5])
old_boxes = old_scaling * boxes
new_boxes = old_boxes + new_translation
all_boxes = tf.concat([old_boxes, new_boxes], axis=0)
return [output_images, all_boxes, tf.tile(labels, [2]), tf.tile(
label_weights, [2])]
image, boxes, labels, label_weights = tf.cond(
concat_vertical,
lambda: _concat_image(image, boxes, labels, label_weights, axis=0),
lambda: [image, boxes, labels, label_weights],
strict=True)
outputs = tf.cond(
concat_horizontal,
lambda: _concat_image(image, boxes, labels, label_weights, axis=1),
lambda: [image, boxes, labels, label_weights],
strict=True)
if label_confidences is not None:
label_confidences = tf.cond(concat_vertical,
lambda: tf.tile(label_confidences, [2]),
lambda: label_confidences)
outputs.append(tf.cond(concat_horizontal,
lambda: tf.tile(label_confidences, [2]),
lambda: label_confidences))
if multiclass_scores is not None:
multiclass_scores = tf.cond(concat_vertical,
lambda: tf.tile(multiclass_scores, [2, 1]),
lambda: multiclass_scores)
outputs.append(tf.cond(concat_horizontal,
lambda: tf.tile(multiclass_scores, [2, 1]),
lambda: multiclass_scores))
return outputs
def ssd_random_crop(image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
keypoints=None,
min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
aspect_ratio_range=((0.5, 2.0),) * 7,
area_range=((0.1, 1.0),) * 7,
overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
clip_boxes=(True,) * 7,
random_coef=(0.15,) * 7,
seed=None,
preprocess_vars_cache=None):
"""Random crop preprocessing with default parameters as in SSD paper.
Liu et al., SSD: Single shot multibox detector.
For further information on random crop preprocessing refer to RandomCrop
function above.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: rank 1 float32 tensor containing the weights.
label_confidences: rank 1 float32 tensor containing the confidences.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If label_weights, multiclass_scores, masks, or keypoints is not None, the
function also returns:
label_weights: rank 1 float32 tensor with shape [num_instances].
multiclass_scores: rank 2 float32 tensor with shape
[num_instances, num_classes]
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
"""
def random_crop_selector(selected_result, index):
"""Applies random_crop_image to selected result.
Args:
selected_result: A tuple containing image, boxes, labels, keypoints (if
not None), and masks (if not None).
index: The index that was randomly selected.
Returns: A tuple containing image, boxes, labels, keypoints (if not None),
and masks (if not None).
"""
i = 3
image, boxes, labels = selected_result[:i]
selected_label_weights = None
selected_label_confidences = None
selected_multiclass_scores = None
selected_masks = None
selected_keypoints = None
if label_weights is not None:
selected_label_weights = selected_result[i]
i += 1
if label_confidences is not None:
selected_label_confidences = selected_result[i]
i += 1
if multiclass_scores is not None:
selected_multiclass_scores = selected_result[i]
i += 1
if masks is not None:
selected_masks = selected_result[i]
i += 1
if keypoints is not None:
selected_keypoints = selected_result[i]
return random_crop_image(
image=image,
boxes=boxes,
labels=labels,
label_weights=selected_label_weights,
label_confidences=selected_label_confidences,
multiclass_scores=selected_multiclass_scores,
masks=selected_masks,
keypoints=selected_keypoints,
min_object_covered=min_object_covered[index],
aspect_ratio_range=aspect_ratio_range[index],
area_range=area_range[index],
overlap_thresh=overlap_thresh[index],
clip_boxes=clip_boxes[index],
random_coef=random_coef[index],
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
result = _apply_with_random_selector_tuples(
tuple(
t for t in (image, boxes, labels, label_weights, label_confidences,
multiclass_scores, masks, keypoints) if t is not None),
random_crop_selector,
num_cases=len(min_object_covered),
preprocess_vars_cache=preprocess_vars_cache,
key=preprocessor_cache.PreprocessorCache.SSD_CROP_SELECTOR_ID)
return result
def ssd_random_crop_pad(image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
min_object_covered=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
aspect_ratio_range=((0.5, 2.0),) * 6,
area_range=((0.1, 1.0),) * 6,
overlap_thresh=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
clip_boxes=(True,) * 6,
random_coef=(0.15,) * 6,
min_padded_size_ratio=((1.0, 1.0),) * 6,
max_padded_size_ratio=((2.0, 2.0),) * 6,
pad_color=(None,) * 6,
seed=None,
preprocess_vars_cache=None):
"""Random crop preprocessing with default parameters as in SSD paper.
Liu et al., SSD: Single shot multibox detector.
For further information on random crop preprocessing refer to RandomCrop
function above.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: float32 tensor of shape [num_instances] representing the
confidences for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
min_padded_size_ratio: min ratio of padded image height and width to the
input image's height and width.
max_padded_size_ratio: max ratio of padded image height and width to the
input image's height and width.
pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.
if set as None, it will be set to average color of the randomly
cropped image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
new_labels: new labels.
new_label_weights: new label weights.
"""
def random_crop_pad_selector(image_boxes_labels, index):
"""Random crop preprocessing helper."""
i = 3
image, boxes, labels = image_boxes_labels[:i]
selected_label_weights = None
selected_label_confidences = None
selected_multiclass_scores = None
if label_weights is not None:
selected_label_weights = image_boxes_labels[i]
i += 1
if label_confidences is not None:
selected_label_confidences = image_boxes_labels[i]
i += 1
if multiclass_scores is not None:
selected_multiclass_scores = image_boxes_labels[i]
return random_crop_pad_image(
image,
boxes,
labels,
label_weights=selected_label_weights,
label_confidences=selected_label_confidences,
multiclass_scores=selected_multiclass_scores,
min_object_covered=min_object_covered[index],
aspect_ratio_range=aspect_ratio_range[index],
area_range=area_range[index],
overlap_thresh=overlap_thresh[index],
clip_boxes=clip_boxes[index],
random_coef=random_coef[index],
min_padded_size_ratio=min_padded_size_ratio[index],
max_padded_size_ratio=max_padded_size_ratio[index],
pad_color=pad_color[index],
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
return _apply_with_random_selector_tuples(
tuple(t for t in (image, boxes, labels, label_weights, label_confidences,
multiclass_scores) if t is not None),
random_crop_pad_selector,
num_cases=len(min_object_covered),
preprocess_vars_cache=preprocess_vars_cache,
key=preprocessor_cache.PreprocessorCache.SSD_CROP_PAD_SELECTOR_ID)
def ssd_random_crop_fixed_aspect_ratio(
image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
keypoints=None,
min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
aspect_ratio=1.0,
area_range=((0.1, 1.0),) * 7,
overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
clip_boxes=(True,) * 7,
random_coef=(0.15,) * 7,
seed=None,
preprocess_vars_cache=None):
"""Random crop preprocessing with default parameters as in SSD paper.
Liu et al., SSD: Single shot multibox detector.
For further information on random crop preprocessing refer to RandomCrop
function above.
The only difference is that the aspect ratio of the crops are fixed.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: (optional) float32 tensor of shape [num_instances]
representing the confidences for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio: aspect ratio of the cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If multiclass_scores, masks, or keypoints is not None, the function also
returns:
multiclass_scores: rank 2 float32 tensor with shape
[num_instances, num_classes]
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
"""
aspect_ratio_range = ((aspect_ratio, aspect_ratio),) * len(area_range)
crop_result = ssd_random_crop(
image,
boxes,
labels,
label_weights=label_weights,
label_confidences=label_confidences,
multiclass_scores=multiclass_scores,
masks=masks,
keypoints=keypoints,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
overlap_thresh=overlap_thresh,
clip_boxes=clip_boxes,
random_coef=random_coef,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
i = 3
new_image, new_boxes, new_labels = crop_result[:i]
new_label_weights = None
new_label_confidences = None
new_multiclass_scores = None
new_masks = None
new_keypoints = None
if label_weights is not None:
new_label_weights = crop_result[i]
i += 1
if label_confidences is not None:
new_label_confidences = crop_result[i]
i += 1
if multiclass_scores is not None:
new_multiclass_scores = crop_result[i]
i += 1
if masks is not None:
new_masks = crop_result[i]
i += 1
if keypoints is not None:
new_keypoints = crop_result[i]
result = random_crop_to_aspect_ratio(
new_image,
new_boxes,
new_labels,
label_weights=new_label_weights,
label_confidences=new_label_confidences,
multiclass_scores=new_multiclass_scores,
masks=new_masks,
keypoints=new_keypoints,
aspect_ratio=aspect_ratio,
clip_boxes=clip_boxes,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
return result
def ssd_random_crop_pad_fixed_aspect_ratio(
image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
keypoints=None,
min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
aspect_ratio=1.0,
aspect_ratio_range=((0.5, 2.0),) * 7,
area_range=((0.1, 1.0),) * 7,
overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
clip_boxes=(True,) * 7,
random_coef=(0.15,) * 7,
min_padded_size_ratio=(1.0, 1.0),
max_padded_size_ratio=(2.0, 2.0),
seed=None,
preprocess_vars_cache=None):
"""Random crop and pad preprocessing with default parameters as in SSD paper.
Liu et al., SSD: Single shot multibox detector.
For further information on random crop preprocessing refer to RandomCrop
function above.
The only difference is that after the initial crop, images are zero-padded
to a fixed aspect ratio instead of being resized to that aspect ratio.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: (optional) float32 tensor of shape [num_instances]
representing the confidence for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio: the final aspect ratio to pad to.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
min_padded_size_ratio: min ratio of padded image height and width to the
input image's height and width.
max_padded_size_ratio: max ratio of padded image height and width to the
input image's height and width.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If multiclass_scores, masks, or keypoints is not None, the function also
returns:
multiclass_scores: rank 2 with shape [num_instances, num_classes]
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
"""
crop_result = ssd_random_crop(
image,
boxes,
labels,
label_weights=label_weights,
label_confidences=label_confidences,
multiclass_scores=multiclass_scores,
masks=masks,
keypoints=keypoints,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
overlap_thresh=overlap_thresh,
clip_boxes=clip_boxes,
random_coef=random_coef,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
i = 3
new_image, new_boxes, new_labels = crop_result[:i]
new_label_weights = None
new_label_confidences = None
new_multiclass_scores = None
new_masks = None
new_keypoints = None
if label_weights is not None:
new_label_weights = crop_result[i]
i += 1
if label_confidences is not None:
new_label_confidences = crop_result[i]
i += 1
if multiclass_scores is not None:
new_multiclass_scores = crop_result[i]
i += 1
if masks is not None:
new_masks = crop_result[i]
i += 1
if keypoints is not None:
new_keypoints = crop_result[i]
result = random_pad_to_aspect_ratio(
new_image,
new_boxes,
masks=new_masks,
keypoints=new_keypoints,
aspect_ratio=aspect_ratio,
min_padded_size_ratio=min_padded_size_ratio,
max_padded_size_ratio=max_padded_size_ratio,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
result = list(result)
i = 3
result.insert(2, new_labels)
if new_label_weights is not None:
result.insert(i, new_label_weights)
i += 1
if new_label_confidences is not None:
result.insert(i, new_label_confidences)
i += 1
if multiclass_scores is not None:
result.insert(i, new_multiclass_scores)
result = tuple(result)
return result
def convert_class_logits_to_softmax(multiclass_scores, temperature=1.0):
"""Converts multiclass logits to softmax scores after applying temperature.
Args:
multiclass_scores: float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
temperature: Scale factor to use prior to applying softmax. Larger
temperatures give more uniform distruibutions after softmax.
Returns:
multiclass_scores: float32 tensor of shape
[num_instances, num_classes] with scaling and softmax applied.
"""
# Multiclass scores must be stored as logits. Apply temp and softmax.
multiclass_scores_scaled = tf.multiply(
multiclass_scores, 1.0 / temperature, name='scale_logits')
multiclass_scores = tf.nn.softmax(multiclass_scores_scaled, name='softmax')
return multiclass_scores
def _get_crop_border(border, size):
border = tf.cast(border, tf.float32)
size = tf.cast(size, tf.float32)
i = tf.ceil(tf.log(2.0 * border / size) / tf.log(2.0))
divisor = tf.pow(2.0, i)
divisor = tf.clip_by_value(divisor, 1, border)
divisor = tf.cast(divisor, tf.int32)
return tf.cast(border, tf.int32) // divisor
def random_square_crop_by_scale(image, boxes, labels, label_weights,
label_confidences=None, masks=None,
keypoints=None, max_border=128, scale_min=0.6,
scale_max=1.3, num_scales=8, seed=None,
preprocess_vars_cache=None):
"""Randomly crop a square in proportion to scale and image size.
Extract a square sized crop from an image whose side length is sampled by
randomly scaling the maximum spatial dimension of the image. If part of
the crop falls outside the image, it is filled with zeros.
The augmentation is borrowed from [1]
[1]: https://arxiv.org/abs/1904.07850
Args:
image: rank 3 float32 tensor containing 1 image ->
[height, width, channels].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax].
Boxes on the crop boundary are clipped to the boundary and boxes
falling outside the crop are ignored.
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: (optional) float32 tensor of shape [num_instances]
representing the confidence for each box.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
max_border: The maximum size of the border. The border defines distance in
pixels to the image boundaries that will not be considered as a center of
a crop. To make sure that the border does not go over the center of the
image, we chose the border value by computing the minimum k, such that
(max_border / (2**k)) < image_dimension/2.
scale_min: float, the minimum value for scale.
scale_max: float, the maximum value for scale.
num_scales: int, the number of discrete scale values to sample between
[scale_min, scale_max]
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
label_weights: rank 1 float32 tensor with shape [num_instances].
label_confidences: (optional) float32 tensor of shape [num_instances]
representing the confidence for each box.
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
"""
img_shape = tf.shape(image)
height, width = img_shape[0], img_shape[1]
scales = tf.linspace(scale_min, scale_max, num_scales)
scale = _get_or_create_preprocess_rand_vars(
lambda: scales[_random_integer(0, num_scales, seed)],
preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE,
preprocess_vars_cache, 'scale')
image_size = scale * tf.cast(tf.maximum(height, width), tf.float32)
image_size = tf.cast(image_size, tf.int32)
h_border = _get_crop_border(max_border, height)
w_border = _get_crop_border(max_border, width)
def y_function():
y = _random_integer(h_border,
tf.cast(height, tf.int32) - h_border + 1,
seed)
return y
def x_function():
x = _random_integer(w_border,
tf.cast(width, tf.int32) - w_border + 1,
seed)
return x
y_center = _get_or_create_preprocess_rand_vars(
y_function,
preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE,
preprocess_vars_cache, 'y_center')
x_center = _get_or_create_preprocess_rand_vars(
x_function,
preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE,
preprocess_vars_cache, 'x_center')
half_size = tf.cast(image_size / 2, tf.int32)
crop_ymin, crop_ymax = y_center - half_size, y_center + half_size
crop_xmin, crop_xmax = x_center - half_size, x_center + half_size
ymin = tf.maximum(crop_ymin, 0)
xmin = tf.maximum(crop_xmin, 0)
ymax = tf.minimum(crop_ymax, height - 1)
xmax = tf.minimum(crop_xmax, width - 1)
cropped_image = image[ymin:ymax, xmin:xmax]
offset_y = tf.maximum(0, ymin - crop_ymin)
offset_x = tf.maximum(0, xmin - crop_xmin)
oy_i = offset_y
ox_i = offset_x
output_image = tf.image.pad_to_bounding_box(
cropped_image, offset_height=oy_i, offset_width=ox_i,
target_height=image_size, target_width=image_size)
if ymin == 0:
# We might be padding the image.
box_ymin = -offset_y
else:
box_ymin = crop_ymin
if xmin == 0:
# We might be padding the image.
box_xmin = -offset_x
else:
box_xmin = crop_xmin
box_ymax = box_ymin + image_size
box_xmax = box_xmin + image_size
image_box = [box_ymin / height, box_xmin / width,
box_ymax / height, box_xmax / width]
boxlist = box_list.BoxList(boxes)
boxlist = box_list_ops.change_coordinate_frame(boxlist, image_box)
boxlist, indices = box_list_ops.prune_completely_outside_window(
boxlist, [0.0, 0.0, 1.0, 1.0])
boxlist = box_list_ops.clip_to_window(boxlist, [0.0, 0.0, 1.0, 1.0],
filter_nonoverlapping=False)
return_values = [output_image, boxlist.get(),
tf.gather(labels, indices),
tf.gather(label_weights, indices)]
if label_confidences is not None:
return_values.append(tf.gather(label_confidences, indices))
if masks is not None:
new_masks = tf.expand_dims(masks, -1)
new_masks = new_masks[:, ymin:ymax, xmin:xmax]
new_masks = tf.image.pad_to_bounding_box(
new_masks, oy_i, ox_i, image_size, image_size)
new_masks = tf.squeeze(new_masks, [-1])
return_values.append(tf.gather(new_masks, indices))
if keypoints is not None:
keypoints = tf.gather(keypoints, indices)
keypoints = keypoint_ops.change_coordinate_frame(keypoints, image_box)
keypoints = keypoint_ops.prune_outside_window(keypoints,
[0.0, 0.0, 1.0, 1.0])
return_values.append(keypoints)
return return_values
def random_scale_crop_and_pad_to_square(
image,
boxes,
labels,
label_weights,
masks=None,
keypoints=None,
label_confidences=None,
scale_min=0.1,
scale_max=2.0,
output_size=512,
resize_method=tf.image.ResizeMethod.BILINEAR,
seed=None):
"""Randomly scale, crop, and then pad an image to fixed square dimensions.
Randomly scale, crop, and then pad an image to the desired square output
dimensions. Specifically, this method first samples a random_scale factor
from a uniform distribution between scale_min and scale_max, and then resizes
the image such that it's maximum dimension is (output_size * random_scale).
Secondly, a square output_size crop is extracted from the resized image
(note, this will only occur when random_scale > 1.0). Lastly, the cropped
region is padded to the desired square output_size, by filling with zeros.
The augmentation is borrowed from [1]
[1]: https://arxiv.org/abs/1911.09070
Args:
image: rank 3 float32 tensor containing 1 image ->
[height, width, channels].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes
are in normalized form meaning their coordinates vary between [0, 1]. Each
row is in the form of [ymin, xmin, ymax, xmax]. Boxes on the crop boundary
are clipped to the boundary and boxes falling outside the crop are
ignored.
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
masks: (optional) rank 3 float32 tensor with shape [num_instances, height,
width] containing instance masks. The masks are of the same height, width
as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape [num_instances,
num_keypoints, 2]. The keypoints are in y-x normalized coordinates.
label_confidences: (optional) float32 tensor of shape [num_instance]
representing the confidence for each box.
scale_min: float, the minimum value for the random scale factor.
scale_max: float, the maximum value for the random scale factor.
output_size: int, the desired (square) output image size.
resize_method: tf.image.ResizeMethod, resize method to use when scaling the
input images.
seed: random seed.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
label_weights: rank 1 float32 tensor with shape [num_instances].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
label_confidences: confidences for retained boxes.
"""
img_shape = tf.shape(image)
input_height, input_width = img_shape[0], img_shape[1]
random_scale = tf.random_uniform([], scale_min, scale_max, seed=seed)
# Compute the scaled height and width from the random scale.
max_input_dim = tf.cast(tf.maximum(input_height, input_width), tf.float32)
input_ar_y = tf.cast(input_height, tf.float32) / max_input_dim
input_ar_x = tf.cast(input_width, tf.float32) / max_input_dim
scaled_height = tf.cast(random_scale * output_size * input_ar_y, tf.int32)
scaled_width = tf.cast(random_scale * output_size * input_ar_x, tf.int32)
# Compute the offsets:
offset_y = tf.cast(scaled_height - output_size, tf.float32)
offset_x = tf.cast(scaled_width - output_size, tf.float32)
offset_y = tf.maximum(0.0, offset_y) * tf.random_uniform([], 0, 1, seed=seed)
offset_x = tf.maximum(0.0, offset_x) * tf.random_uniform([], 0, 1, seed=seed)
offset_y = tf.cast(offset_y, tf.int32)
offset_x = tf.cast(offset_x, tf.int32)
# Scale, crop, and pad the input image.
scaled_image = tf.image.resize_images(
image, [scaled_height, scaled_width], method=resize_method)
scaled_image = scaled_image[offset_y:offset_y + output_size,
offset_x:offset_x + output_size, :]
output_image = tf.image.pad_to_bounding_box(scaled_image, 0, 0, output_size,
output_size)
# Update the boxes.
new_window = tf.cast(
tf.stack([offset_y, offset_x,
offset_y + output_size, offset_x + output_size]),
dtype=tf.float32)
new_window /= tf.cast(
tf.stack([scaled_height, scaled_width, scaled_height, scaled_width]),
dtype=tf.float32)
boxlist = box_list.BoxList(boxes)
boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window)
boxlist, indices = box_list_ops.prune_completely_outside_window(
boxlist, [0.0, 0.0, 1.0, 1.0])
boxlist = box_list_ops.clip_to_window(
boxlist, [0.0, 0.0, 1.0, 1.0], filter_nonoverlapping=False)
return_values = [output_image, boxlist.get(),
tf.gather(labels, indices),
tf.gather(label_weights, indices)]
if masks is not None:
new_masks = tf.expand_dims(masks, -1)
new_masks = tf.image.resize_images(
new_masks, [scaled_height, scaled_width], method=resize_method)
new_masks = new_masks[:, offset_y:offset_y + output_size,
offset_x:offset_x + output_size, :]
new_masks = tf.image.pad_to_bounding_box(
new_masks, 0, 0, output_size, output_size)
new_masks = tf.squeeze(new_masks, [-1])
return_values.append(tf.gather(new_masks, indices))
if keypoints is not None:
keypoints = tf.gather(keypoints, indices)
keypoints = keypoint_ops.change_coordinate_frame(keypoints, new_window)
keypoints = keypoint_ops.prune_outside_window(
keypoints, [0.0, 0.0, 1.0, 1.0])
return_values.append(keypoints)
if label_confidences is not None:
return_values.append(tf.gather(label_confidences, indices))
return return_values
def get_default_func_arg_map(include_label_weights=True,
include_label_confidences=False,
include_multiclass_scores=False,
include_instance_masks=False,
include_instance_mask_weights=False,
include_keypoints=False,
include_keypoint_visibilities=False,
include_dense_pose=False,
include_keypoint_depths=False):
"""Returns the default mapping from a preprocessor function to its args.
Args:
include_label_weights: If True, preprocessing functions will modify the
label weights, too.
include_label_confidences: If True, preprocessing functions will modify the
label confidences, too.
include_multiclass_scores: If True, preprocessing functions will modify the
multiclass scores, too.
include_instance_masks: If True, preprocessing functions will modify the
instance masks, too.
include_instance_mask_weights: If True, preprocessing functions will modify
the instance mask weights.
include_keypoints: If True, preprocessing functions will modify the
keypoints, too.
include_keypoint_visibilities: If True, preprocessing functions will modify
the keypoint visibilities, too.
include_dense_pose: If True, preprocessing functions will modify the
DensePose labels, too.
include_keypoint_depths: If True, preprocessing functions will modify the
keypoint depth labels, too.
Returns:
A map from preprocessing functions to the arguments they receive.
"""
groundtruth_label_weights = None
if include_label_weights:
groundtruth_label_weights = (
fields.InputDataFields.groundtruth_weights)
groundtruth_label_confidences = None
if include_label_confidences:
groundtruth_label_confidences = (
fields.InputDataFields.groundtruth_confidences)
multiclass_scores = None
if include_multiclass_scores:
multiclass_scores = (fields.InputDataFields.multiclass_scores)
groundtruth_instance_masks = None
if include_instance_masks:
groundtruth_instance_masks = (
fields.InputDataFields.groundtruth_instance_masks)
groundtruth_instance_mask_weights = None
if include_instance_mask_weights:
groundtruth_instance_mask_weights = (
fields.InputDataFields.groundtruth_instance_mask_weights)
groundtruth_keypoints = None
if include_keypoints:
groundtruth_keypoints = fields.InputDataFields.groundtruth_keypoints
groundtruth_keypoint_visibilities = None
if include_keypoint_visibilities:
groundtruth_keypoint_visibilities = (
fields.InputDataFields.groundtruth_keypoint_visibilities)
groundtruth_dp_num_points = None
groundtruth_dp_part_ids = None
groundtruth_dp_surface_coords = None
if include_dense_pose:
groundtruth_dp_num_points = (
fields.InputDataFields.groundtruth_dp_num_points)
groundtruth_dp_part_ids = (
fields.InputDataFields.groundtruth_dp_part_ids)
groundtruth_dp_surface_coords = (
fields.InputDataFields.groundtruth_dp_surface_coords)
groundtruth_keypoint_depths = None
groundtruth_keypoint_depth_weights = None
if include_keypoint_depths:
groundtruth_keypoint_depths = (
fields.InputDataFields.groundtruth_keypoint_depths)
groundtruth_keypoint_depth_weights = (
fields.InputDataFields.groundtruth_keypoint_depth_weights)
prep_func_arg_map = {
normalize_image: (fields.InputDataFields.image,),
random_horizontal_flip: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
groundtruth_instance_masks,
groundtruth_keypoints,
groundtruth_keypoint_visibilities,
groundtruth_dp_part_ids,
groundtruth_dp_surface_coords,
groundtruth_keypoint_depths,
groundtruth_keypoint_depth_weights,
),
random_vertical_flip: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
groundtruth_instance_masks,
groundtruth_keypoints,
),
random_rotation90: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
groundtruth_instance_masks,
groundtruth_keypoints,
),
random_pixel_value_scale: (fields.InputDataFields.image,),
random_image_scale: (
fields.InputDataFields.image,
groundtruth_instance_masks,
),
random_rgb_to_gray: (fields.InputDataFields.image,),
random_adjust_brightness: (fields.InputDataFields.image,),
random_adjust_contrast: (fields.InputDataFields.image,),
random_adjust_hue: (fields.InputDataFields.image,),
random_adjust_saturation: (fields.InputDataFields.image,),
random_distort_color: (fields.InputDataFields.image,),
random_jitter_boxes: (fields.InputDataFields.groundtruth_boxes,),
random_crop_image:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights, groundtruth_label_confidences,
multiclass_scores, groundtruth_instance_masks,
groundtruth_instance_mask_weights, groundtruth_keypoints,
groundtruth_keypoint_visibilities, groundtruth_dp_num_points,
groundtruth_dp_part_ids, groundtruth_dp_surface_coords),
random_pad_image:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks,
groundtruth_keypoints, groundtruth_dp_surface_coords),
random_absolute_pad_image:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks,
groundtruth_keypoints, groundtruth_dp_surface_coords),
random_crop_pad_image: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences, multiclass_scores),
random_crop_to_aspect_ratio: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences,
multiclass_scores,
groundtruth_instance_masks,
groundtruth_keypoints,
),
random_pad_to_aspect_ratio: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
groundtruth_instance_masks,
groundtruth_keypoints,
),
random_black_patches: (fields.InputDataFields.image,),
random_jpeg_quality: (fields.InputDataFields.image,),
random_downscale_to_target_pixels: (
fields.InputDataFields.image,
groundtruth_instance_masks,
),
random_patch_gaussian: (fields.InputDataFields.image,),
autoaugment_image: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
),
retain_boxes_above_threshold: (
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences,
multiclass_scores,
groundtruth_instance_masks,
groundtruth_keypoints,
),
drop_label_probabilistically: (
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences,
multiclass_scores,
groundtruth_instance_masks,
groundtruth_keypoints,
),
remap_labels: (fields.InputDataFields.groundtruth_classes,),
image_to_float: (fields.InputDataFields.image,),
random_resize_method: (fields.InputDataFields.image,),
resize_to_range: (
fields.InputDataFields.image,
groundtruth_instance_masks,
),
resize_to_min_dimension: (
fields.InputDataFields.image,
groundtruth_instance_masks,
),
scale_boxes_to_pixel_coordinates: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
groundtruth_keypoints,
),
resize_image: (
fields.InputDataFields.image,
groundtruth_instance_masks,
),
subtract_channel_mean: (fields.InputDataFields.image,),
one_hot_encoding: (fields.InputDataFields.groundtruth_image_classes,),
rgb_to_gray: (fields.InputDataFields.image,),
random_self_concat_image:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights, groundtruth_label_confidences,
multiclass_scores),
ssd_random_crop: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences, multiclass_scores,
groundtruth_instance_masks, groundtruth_keypoints),
ssd_random_crop_pad: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences, multiclass_scores),
ssd_random_crop_fixed_aspect_ratio:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights, groundtruth_label_confidences,
multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints
),
ssd_random_crop_pad_fixed_aspect_ratio: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences,
multiclass_scores,
groundtruth_instance_masks,
groundtruth_keypoints,
),
convert_class_logits_to_softmax: (multiclass_scores,),
random_square_crop_by_scale:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights, groundtruth_label_confidences,
groundtruth_instance_masks, groundtruth_keypoints),
random_scale_crop_and_pad_to_square:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights, groundtruth_instance_masks,
groundtruth_keypoints, groundtruth_label_confidences),
adjust_gamma: (fields.InputDataFields.image,),
}
return prep_func_arg_map
def preprocess(tensor_dict,
preprocess_options,
func_arg_map=None,
preprocess_vars_cache=None):
"""Preprocess images and bounding boxes.
Various types of preprocessing (to be implemented) based on the
preprocess_options dictionary e.g. "crop image" (affects image and possibly
boxes), "white balance image" (affects only image), etc. If self._options
is None, no preprocessing is done.
Args:
tensor_dict: dictionary that contains images, boxes, and can contain other
things as well.
images-> rank 4 float32 tensor contains
1 image -> [1, height, width, 3].
with pixel values varying between [0, 1]
boxes-> rank 2 float32 tensor containing
the bounding boxes -> [N, 4].
Boxes are in normalized form meaning
their coordinates vary between [0, 1].
Each row is in the form
of [ymin, xmin, ymax, xmax].
preprocess_options: It is a list of tuples, where each tuple contains a
function and a dictionary that contains arguments and
their values.
func_arg_map: mapping from preprocessing functions to arguments that they
expect to receive and return.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
tensor_dict: which contains the preprocessed images, bounding boxes, etc.
Raises:
ValueError: (a) If the functions passed to Preprocess
are not in func_arg_map.
(b) If the arguments that a function needs
do not exist in tensor_dict.
(c) If image in tensor_dict is not rank 4
"""
if func_arg_map is None:
func_arg_map = get_default_func_arg_map()
# changes the images to image (rank 4 to rank 3) since the functions
# receive rank 3 tensor for image
if fields.InputDataFields.image in tensor_dict:
images = tensor_dict[fields.InputDataFields.image]
if len(images.get_shape()) != 4:
raise ValueError('images in tensor_dict should be rank 4')
image = tf.squeeze(images, axis=0)
tensor_dict[fields.InputDataFields.image] = image
# Preprocess inputs based on preprocess_options
for option in preprocess_options:
func, params = option
if func not in func_arg_map:
raise ValueError('The function %s does not exist in func_arg_map' %
(func.__name__))
arg_names = func_arg_map[func]
for a in arg_names:
if a is not None and a not in tensor_dict:
raise ValueError('The function %s requires argument %s' %
(func.__name__, a))
def get_arg(key):
return tensor_dict[key] if key is not None else None
args = [get_arg(a) for a in arg_names]
if preprocess_vars_cache is not None:
if six.PY2:
# pylint: disable=deprecated-method
arg_spec = inspect.getargspec(func)
# pylint: enable=deprecated-method
else:
arg_spec = inspect.getfullargspec(func)
if 'preprocess_vars_cache' in arg_spec.args:
params['preprocess_vars_cache'] = preprocess_vars_cache
results = func(*args, **params)
if not isinstance(results, (list, tuple)):
results = (results,)
# Removes None args since the return values will not contain those.
arg_names = [arg_name for arg_name in arg_names if arg_name is not None]
for res, arg_name in zip(results, arg_names):
tensor_dict[arg_name] = res
# changes the image to images (rank 3 to rank 4) to be compatible to what
# we received in the first place
if fields.InputDataFields.image in tensor_dict:
image = tensor_dict[fields.InputDataFields.image]
images = tf.expand_dims(image, 0)
tensor_dict[fields.InputDataFields.image] = images
return tensor_dict
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/preprocessor.py | preprocessor.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides functions to batch a dictionary of input tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.core import prefetcher
rt_shape_str = '_runtime_shapes'
class BatchQueue(object):
"""BatchQueue class.
This class creates a batch queue to asynchronously enqueue tensors_dict.
It also adds a FIFO prefetcher so that the batches are readily available
for the consumers. Dequeue ops for a BatchQueue object can be created via
the Dequeue method which evaluates to a batch of tensor_dict.
Example input pipeline with batching:
------------------------------------
key, string_tensor = slim.parallel_reader.parallel_read(...)
tensor_dict = decoder.decode(string_tensor)
tensor_dict = preprocessor.preprocess(tensor_dict, ...)
batch_queue = batcher.BatchQueue(tensor_dict,
batch_size=32,
batch_queue_capacity=2000,
num_batch_queue_threads=8,
prefetch_queue_capacity=20)
tensor_dict = batch_queue.dequeue()
outputs = Model(tensor_dict)
...
-----------------------------------
Notes:
-----
This class batches tensors of unequal sizes by zero padding and unpadding
them after generating a batch. This can be computationally expensive when
batching tensors (such as images) that are of vastly different sizes. So it is
recommended that the shapes of such tensors be fully defined in tensor_dict
while other lightweight tensors such as bounding box corners and class labels
can be of varying sizes. Use either crop or resize operations to fully define
the shape of an image in tensor_dict.
It is also recommended to perform any preprocessing operations on tensors
before passing to BatchQueue and subsequently calling the Dequeue method.
Another caveat is that this class does not read the last batch if it is not
full. The current implementation makes it hard to support that use case. So,
for evaluation, when it is critical to run all the examples through your
network use the input pipeline example mentioned in core/prefetcher.py.
"""
def __init__(self, tensor_dict, batch_size, batch_queue_capacity,
num_batch_queue_threads, prefetch_queue_capacity):
"""Constructs a batch queue holding tensor_dict.
Args:
tensor_dict: dictionary of tensors to batch.
batch_size: batch size.
batch_queue_capacity: max capacity of the queue from which the tensors are
batched.
num_batch_queue_threads: number of threads to use for batching.
prefetch_queue_capacity: max capacity of the queue used to prefetch
assembled batches.
"""
# Remember static shapes to set shapes of batched tensors.
static_shapes = collections.OrderedDict(
{key: tensor.get_shape() for key, tensor in tensor_dict.items()})
# Remember runtime shapes to unpad tensors after batching.
runtime_shapes = collections.OrderedDict(
{(key + rt_shape_str): tf.shape(tensor)
for key, tensor in tensor_dict.items()})
all_tensors = tensor_dict
all_tensors.update(runtime_shapes)
batched_tensors = tf.train.batch(
all_tensors,
capacity=batch_queue_capacity,
batch_size=batch_size,
dynamic_pad=True,
num_threads=num_batch_queue_threads)
self._queue = prefetcher.prefetch(batched_tensors,
prefetch_queue_capacity)
self._static_shapes = static_shapes
self._batch_size = batch_size
def dequeue(self):
"""Dequeues a batch of tensor_dict from the BatchQueue.
TODO: use allow_smaller_final_batch to allow running over the whole eval set
Returns:
A list of tensor_dicts of the requested batch_size.
"""
batched_tensors = self._queue.dequeue()
# Separate input tensors from tensors containing their runtime shapes.
tensors = {}
shapes = {}
for key, batched_tensor in batched_tensors.items():
unbatched_tensor_list = tf.unstack(batched_tensor)
for i, unbatched_tensor in enumerate(unbatched_tensor_list):
if rt_shape_str in key:
shapes[(key[:-len(rt_shape_str)], i)] = unbatched_tensor
else:
tensors[(key, i)] = unbatched_tensor
# Undo that padding using shapes and create a list of size `batch_size` that
# contains tensor dictionaries.
tensor_dict_list = []
batch_size = self._batch_size
for batch_id in range(batch_size):
tensor_dict = {}
for key in self._static_shapes:
tensor_dict[key] = tf.slice(tensors[(key, batch_id)],
tf.zeros_like(shapes[(key, batch_id)]),
shapes[(key, batch_id)])
tensor_dict[key].set_shape(self._static_shapes[key])
tensor_dict_list.append(tensor_dict)
return tensor_dict_list
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/batcher.py | batcher.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification and regression loss functions for object detection.
Localization losses:
* WeightedL2LocalizationLoss
* WeightedSmoothL1LocalizationLoss
* WeightedIOULocalizationLoss
Classification losses:
* WeightedSigmoidClassificationLoss
* WeightedSoftmaxClassificationLoss
* WeightedSoftmaxClassificationAgainstLogitsLoss
* BootstrappedSigmoidClassificationLoss
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
import tensorflow.compat.v1 as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.utils import ops
from object_detection.utils import shape_utils
class Loss(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract base class for loss functions."""
def __call__(self,
prediction_tensor,
target_tensor,
ignore_nan_targets=False,
losses_mask=None,
scope=None,
**params):
"""Call the loss function.
Args:
prediction_tensor: an N-d tensor of shape [batch, anchors, ...]
representing predicted quantities.
target_tensor: an N-d tensor of shape [batch, anchors, ...] representing
regression or classification targets.
ignore_nan_targets: whether to ignore nan targets in the loss computation.
E.g. can be used if the target tensor is missing groundtruth data that
shouldn't be factored into the loss.
losses_mask: A [batch] boolean tensor that indicates whether losses should
be applied to individual images in the batch. For elements that
are False, corresponding prediction, target, and weight tensors will not
contribute to loss computation. If None, no filtering will take place
prior to loss computation.
scope: Op scope name. Defaults to 'Loss' if None.
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: a tensor representing the value of the loss function.
"""
with tf.name_scope(scope, 'Loss',
[prediction_tensor, target_tensor, params]) as scope:
if ignore_nan_targets:
target_tensor = tf.where(tf.is_nan(target_tensor),
prediction_tensor,
target_tensor)
if losses_mask is not None:
tensor_multiplier = self._get_loss_multiplier_for_tensor(
prediction_tensor,
losses_mask)
prediction_tensor *= tensor_multiplier
target_tensor *= tensor_multiplier
if 'weights' in params:
params['weights'] = tf.convert_to_tensor(params['weights'])
weights_multiplier = self._get_loss_multiplier_for_tensor(
params['weights'],
losses_mask)
params['weights'] *= weights_multiplier
return self._compute_loss(prediction_tensor, target_tensor, **params)
def _get_loss_multiplier_for_tensor(self, tensor, losses_mask):
loss_multiplier_shape = tf.stack([-1] + [1] * (len(tensor.shape) - 1))
return tf.cast(tf.reshape(losses_mask, loss_multiplier_shape), tf.float32)
@abc.abstractmethod
def _compute_loss(self, prediction_tensor, target_tensor, **params):
"""Method to be overridden by implementations.
Args:
prediction_tensor: a tensor representing predicted quantities
target_tensor: a tensor representing regression or classification targets
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: an N-d tensor of shape [batch, anchors, ...] containing the loss per
anchor
"""
pass
class WeightedL2LocalizationLoss(Loss):
"""L2 localization loss function with anchorwise output support.
Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2
"""
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims(
weights, 2)
square_diff = 0.5 * tf.square(weighted_diff)
return tf.reduce_sum(square_diff, 2)
class WeightedSmoothL1LocalizationLoss(Loss):
"""Smooth L1 localization loss function aka Huber Loss..
The smooth L1_loss is defined elementwise as .5 x^2 if |x| <= delta and
delta * (|x|- 0.5*delta) otherwise, where x is the difference between
predictions and target.
See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015)
"""
def __init__(self, delta=1.0):
"""Constructor.
Args:
delta: delta for smooth L1 loss.
"""
super(WeightedSmoothL1LocalizationLoss, self).__init__()
self._delta = delta
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
return tf.reduce_sum(tf.losses.huber_loss(
target_tensor,
prediction_tensor,
delta=self._delta,
weights=tf.expand_dims(weights, axis=2),
loss_collection=None,
reduction=tf.losses.Reduction.NONE
), axis=2)
class WeightedIOULocalizationLoss(Loss):
"""IOU localization loss function.
Sums the IOU for corresponding pairs of predicted/groundtruth boxes
and for each pair assign a loss of 1 - IOU. We then compute a weighted
sum over all pairs which is returned as the total loss.
"""
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4]
representing the decoded predicted boxes
target_tensor: A float tensor of shape [batch_size, num_anchors, 4]
representing the decoded target boxes
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
predicted_boxes = box_list.BoxList(tf.reshape(prediction_tensor, [-1, 4]))
target_boxes = box_list.BoxList(tf.reshape(target_tensor, [-1, 4]))
per_anchor_iou_loss = 1.0 - box_list_ops.matched_iou(predicted_boxes,
target_boxes)
return tf.reshape(weights, [-1]) * per_anchor_iou_loss
class WeightedGIOULocalizationLoss(Loss):
"""GIOU localization loss function.
Sums the GIOU loss for corresponding pairs of predicted/groundtruth boxes
and for each pair assign a loss of 1 - GIOU. We then compute a weighted
sum over all pairs which is returned as the total loss.
"""
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4]
representing the decoded predicted boxes
target_tensor: A float tensor of shape [batch_size, num_anchors, 4]
representing the decoded target boxes
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
batch_size, num_anchors, _ = shape_utils.combined_static_and_dynamic_shape(
prediction_tensor)
predicted_boxes = tf.reshape(prediction_tensor, [-1, 4])
target_boxes = tf.reshape(target_tensor, [-1, 4])
per_anchor_iou_loss = 1 - ops.giou(predicted_boxes, target_boxes)
return tf.reshape(tf.reshape(weights, [-1]) * per_anchor_iou_loss,
[batch_size, num_anchors])
class WeightedSigmoidClassificationLoss(Loss):
"""Sigmoid cross entropy classification loss function."""
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape, either [batch_size, num_anchors,
num_classes] or [batch_size, num_anchors, 1]. If the shape is
[batch_size, num_anchors, 1], all the classses are equally weighted.
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
if class_indices is not None:
weights *= tf.reshape(
ops.indices_to_dense_vector(class_indices,
tf.shape(prediction_tensor)[2]),
[1, 1, -1])
per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
return per_entry_cross_ent * weights
class WeightedDiceClassificationLoss(Loss):
"""Dice loss for classification [1][2].
[1]: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
[2]: https://arxiv.org/abs/1606.04797
"""
def __init__(self, squared_normalization):
"""Initializes the loss object.
Args:
squared_normalization: boolean, if set, we square the probabilities in the
denominator term used for normalization.
"""
self._squared_normalization = squared_normalization
super(WeightedDiceClassificationLoss, self).__init__()
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Computes the loss value.
Dice loss uses the area of the ground truth and prediction tensors for
normalization. We compute area by summing along the anchors (2nd) dimension.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_pixels,
num_classes] representing the predicted logits for each class.
num_pixels denotes the total number of pixels in the spatial dimensions
of the mask after flattening.
target_tensor: A float tensor of shape [batch_size, num_pixels,
num_classes] representing one-hot encoded classification targets.
num_pixels denotes the total number of pixels in the spatial dimensions
of the mask after flattening.
weights: a float tensor of shape, either [batch_size, num_anchors,
num_classes] or [batch_size, num_anchors, 1]. If the shape is
[batch_size, num_anchors, 1], all the classses are equally weighted.
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_classes]
representing the value of the loss function.
"""
if class_indices is not None:
weights *= tf.reshape(
ops.indices_to_dense_vector(class_indices,
tf.shape(prediction_tensor)[2]),
[1, 1, -1])
prob_tensor = tf.nn.sigmoid(prediction_tensor)
if self._squared_normalization:
prob_tensor = tf.pow(prob_tensor, 2)
target_tensor = tf.pow(target_tensor, 2)
prob_tensor *= weights
target_tensor *= weights
prediction_area = tf.reduce_sum(prob_tensor, axis=1)
gt_area = tf.reduce_sum(target_tensor, axis=1)
intersection = tf.reduce_sum(prob_tensor * target_tensor, axis=1)
dice_coeff = 2 * intersection / tf.maximum(gt_area + prediction_area, 1.0)
dice_loss = 1 - dice_coeff
return dice_loss
class SigmoidFocalClassificationLoss(Loss):
"""Sigmoid focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
"""
super(SigmoidFocalClassificationLoss, self).__init__()
self._alpha = alpha
self._gamma = gamma
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape, either [batch_size, num_anchors,
num_classes] or [batch_size, num_anchors, 1]. If the shape is
[batch_size, num_anchors, 1], all the classses are equally weighted.
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
if class_indices is not None:
weights *= tf.reshape(
ops.indices_to_dense_vector(class_indices,
tf.shape(prediction_tensor)[2]),
[1, 1, -1])
per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
prediction_probabilities = tf.sigmoid(prediction_tensor)
p_t = ((target_tensor * prediction_probabilities) +
((1 - target_tensor) * (1 - prediction_probabilities)))
modulating_factor = 1.0
if self._gamma:
modulating_factor = tf.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = (target_tensor * self._alpha +
(1 - target_tensor) * (1 - self._alpha))
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return focal_cross_entropy_loss * weights
class WeightedSoftmaxClassificationLoss(Loss):
"""Softmax loss function."""
def __init__(self, logit_scale=1.0):
"""Constructor.
Args:
logit_scale: When this value is high, the prediction is "diffused" and
when this value is low, the prediction is made peakier.
(default 1.0)
"""
super(WeightedSoftmaxClassificationLoss, self).__init__()
self._logit_scale = logit_scale
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape, either [batch_size, num_anchors,
num_classes] or [batch_size, num_anchors, 1]. If the shape is
[batch_size, num_anchors, 1], all the classses are equally weighted.
Returns:
loss: a float tensor of shape [batch_size, num_anchors]
representing the value of the loss function.
"""
weights = tf.reduce_mean(weights, axis=2)
num_classes = prediction_tensor.get_shape().as_list()[-1]
prediction_tensor = tf.divide(
prediction_tensor, self._logit_scale, name='scale_logit')
per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
labels=tf.reshape(target_tensor, [-1, num_classes]),
logits=tf.reshape(prediction_tensor, [-1, num_classes])))
return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights
class WeightedSoftmaxClassificationAgainstLogitsLoss(Loss):
"""Softmax loss function against logits.
Targets are expected to be provided in logits space instead of "one hot" or
"probability distribution" space.
"""
def __init__(self, logit_scale=1.0):
"""Constructor.
Args:
logit_scale: When this value is high, the target is "diffused" and
when this value is low, the target is made peakier.
(default 1.0)
"""
super(WeightedSoftmaxClassificationAgainstLogitsLoss, self).__init__()
self._logit_scale = logit_scale
def _scale_and_softmax_logits(self, logits):
"""Scale logits then apply softmax."""
scaled_logits = tf.divide(logits, self._logit_scale, name='scale_logits')
return tf.nn.softmax(scaled_logits, name='convert_scores')
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing logit classification targets
weights: a float tensor of shape, either [batch_size, num_anchors,
num_classes] or [batch_size, num_anchors, 1]. If the shape is
[batch_size, num_anchors, 1], all the classses are equally weighted.
Returns:
loss: a float tensor of shape [batch_size, num_anchors]
representing the value of the loss function.
"""
weights = tf.reduce_mean(weights, axis=2)
num_classes = prediction_tensor.get_shape().as_list()[-1]
target_tensor = self._scale_and_softmax_logits(target_tensor)
prediction_tensor = tf.divide(prediction_tensor, self._logit_scale,
name='scale_logits')
per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
labels=tf.reshape(target_tensor, [-1, num_classes]),
logits=tf.reshape(prediction_tensor, [-1, num_classes])))
return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights
class BootstrappedSigmoidClassificationLoss(Loss):
"""Bootstrapped sigmoid cross entropy classification loss function.
This loss uses a convex combination of training labels and the current model's
predictions as training targets in the classification loss. The idea is that
as the model improves over time, its predictions can be trusted more and we
can use these predictions to mitigate the damage of noisy/incorrect labels,
because incorrect labels are likely to be eventually highly inconsistent with
other stimuli predicted to have the same label by the model.
In "soft" bootstrapping, we use all predicted class probabilities, whereas in
"hard" bootstrapping, we use the single class favored by the model.
See also Training Deep Neural Networks On Noisy Labels with Bootstrapping by
Reed et al. (ICLR 2015).
"""
def __init__(self, alpha, bootstrap_type='soft'):
"""Constructor.
Args:
alpha: a float32 scalar tensor between 0 and 1 representing interpolation
weight
bootstrap_type: set to either 'hard' or 'soft' (default)
Raises:
ValueError: if bootstrap_type is not either 'hard' or 'soft'
"""
super(BootstrappedSigmoidClassificationLoss, self).__init__()
if bootstrap_type != 'hard' and bootstrap_type != 'soft':
raise ValueError('Unrecognized bootstrap_type: must be one of '
'\'hard\' or \'soft.\'')
self._alpha = alpha
self._bootstrap_type = bootstrap_type
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape, either [batch_size, num_anchors,
num_classes] or [batch_size, num_anchors, 1]. If the shape is
[batch_size, num_anchors, 1], all the classses are equally weighted.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
if self._bootstrap_type == 'soft':
bootstrap_target_tensor = self._alpha * target_tensor + (
1.0 - self._alpha) * tf.sigmoid(prediction_tensor)
else:
bootstrap_target_tensor = self._alpha * target_tensor + (
1.0 - self._alpha) * tf.cast(
tf.sigmoid(prediction_tensor) > 0.5, tf.float32)
per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
labels=bootstrap_target_tensor, logits=prediction_tensor))
return per_entry_cross_ent * weights
class HardExampleMiner(object):
"""Hard example mining for regions in a list of images.
Implements hard example mining to select a subset of regions to be
back-propagated. For each image, selects the regions with highest losses,
subject to the condition that a newly selected region cannot have
an IOU > iou_threshold with any of the previously selected regions.
This can be achieved by re-using a greedy non-maximum suppression algorithm.
A constraint on the number of negatives mined per positive region can also be
enforced.
Reference papers: "Training Region-based Object Detectors with Online
Hard Example Mining" (CVPR 2016) by Srivastava et al., and
"SSD: Single Shot MultiBox Detector" (ECCV 2016) by Liu et al.
"""
def __init__(self,
num_hard_examples=64,
iou_threshold=0.7,
loss_type='both',
cls_loss_weight=0.05,
loc_loss_weight=0.06,
max_negatives_per_positive=None,
min_negatives_per_image=0):
"""Constructor.
The hard example mining implemented by this class can replicate the behavior
in the two aforementioned papers (Srivastava et al., and Liu et al).
To replicate the A2 paper (Srivastava et al), num_hard_examples is set
to a fixed parameter (64 by default) and iou_threshold is set to .7 for
running non-max-suppression the predicted boxes prior to hard mining.
In order to replicate the SSD paper (Liu et al), num_hard_examples should
be set to None, max_negatives_per_positive should be 3 and iou_threshold
should be 1.0 (in order to effectively turn off NMS).
Args:
num_hard_examples: maximum number of hard examples to be
selected per image (prior to enforcing max negative to positive ratio
constraint). If set to None, all examples obtained after NMS are
considered.
iou_threshold: minimum intersection over union for an example
to be discarded during NMS.
loss_type: use only classification losses ('cls', default),
localization losses ('loc') or both losses ('both').
In the last case, cls_loss_weight and loc_loss_weight are used to
compute weighted sum of the two losses.
cls_loss_weight: weight for classification loss.
loc_loss_weight: weight for location loss.
max_negatives_per_positive: maximum number of negatives to retain for
each positive anchor. By default, num_negatives_per_positive is None,
which means that we do not enforce a prespecified negative:positive
ratio. Note also that num_negatives_per_positives can be a float
(and will be converted to be a float even if it is passed in otherwise).
min_negatives_per_image: minimum number of negative anchors to sample for
a given image. Setting this to a positive number allows sampling
negatives in an image without any positive anchors and thus not biased
towards at least one detection per image.
"""
self._num_hard_examples = num_hard_examples
self._iou_threshold = iou_threshold
self._loss_type = loss_type
self._cls_loss_weight = cls_loss_weight
self._loc_loss_weight = loc_loss_weight
self._max_negatives_per_positive = max_negatives_per_positive
self._min_negatives_per_image = min_negatives_per_image
if self._max_negatives_per_positive is not None:
self._max_negatives_per_positive = float(self._max_negatives_per_positive)
self._num_positives_list = None
self._num_negatives_list = None
def __call__(self,
location_losses,
cls_losses,
decoded_boxlist_list,
match_list=None):
"""Computes localization and classification losses after hard mining.
Args:
location_losses: a float tensor of shape [num_images, num_anchors]
representing anchorwise localization losses.
cls_losses: a float tensor of shape [num_images, num_anchors]
representing anchorwise classification losses.
decoded_boxlist_list: a list of decoded BoxList representing location
predictions for each image.
match_list: an optional list of matcher.Match objects encoding the match
between anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors. Match objects in match_list are
used to reference which anchors are positive, negative or ignored. If
self._max_negatives_per_positive exists, these are then used to enforce
a prespecified negative to positive ratio.
Returns:
mined_location_loss: a float scalar with sum of localization losses from
selected hard examples.
mined_cls_loss: a float scalar with sum of classification losses from
selected hard examples.
Raises:
ValueError: if location_losses, cls_losses and decoded_boxlist_list do
not have compatible shapes (i.e., they must correspond to the same
number of images).
ValueError: if match_list is specified but its length does not match
len(decoded_boxlist_list).
"""
mined_location_losses = []
mined_cls_losses = []
location_losses = tf.unstack(location_losses)
cls_losses = tf.unstack(cls_losses)
num_images = len(decoded_boxlist_list)
if not match_list:
match_list = num_images * [None]
if not len(location_losses) == len(decoded_boxlist_list) == len(cls_losses):
raise ValueError('location_losses, cls_losses and decoded_boxlist_list '
'do not have compatible shapes.')
if not isinstance(match_list, list):
raise ValueError('match_list must be a list.')
if len(match_list) != len(decoded_boxlist_list):
raise ValueError('match_list must either be None or have '
'length=len(decoded_boxlist_list).')
num_positives_list = []
num_negatives_list = []
for ind, detection_boxlist in enumerate(decoded_boxlist_list):
box_locations = detection_boxlist.get()
match = match_list[ind]
image_losses = cls_losses[ind]
if self._loss_type == 'loc':
image_losses = location_losses[ind]
elif self._loss_type == 'both':
image_losses *= self._cls_loss_weight
image_losses += location_losses[ind] * self._loc_loss_weight
if self._num_hard_examples is not None:
num_hard_examples = self._num_hard_examples
else:
num_hard_examples = detection_boxlist.num_boxes()
selected_indices = tf.image.non_max_suppression(
box_locations, image_losses, num_hard_examples, self._iou_threshold)
if self._max_negatives_per_positive is not None and match:
(selected_indices, num_positives,
num_negatives) = self._subsample_selection_to_desired_neg_pos_ratio(
selected_indices, match, self._max_negatives_per_positive,
self._min_negatives_per_image)
num_positives_list.append(num_positives)
num_negatives_list.append(num_negatives)
mined_location_losses.append(
tf.reduce_sum(tf.gather(location_losses[ind], selected_indices)))
mined_cls_losses.append(
tf.reduce_sum(tf.gather(cls_losses[ind], selected_indices)))
location_loss = tf.reduce_sum(tf.stack(mined_location_losses))
cls_loss = tf.reduce_sum(tf.stack(mined_cls_losses))
if match and self._max_negatives_per_positive:
self._num_positives_list = num_positives_list
self._num_negatives_list = num_negatives_list
return (location_loss, cls_loss)
def summarize(self):
"""Summarize the number of positives and negatives after mining."""
if self._num_positives_list and self._num_negatives_list:
avg_num_positives = tf.reduce_mean(
tf.cast(self._num_positives_list, dtype=tf.float32))
avg_num_negatives = tf.reduce_mean(
tf.cast(self._num_negatives_list, dtype=tf.float32))
tf.summary.scalar('HardExampleMiner/NumPositives', avg_num_positives)
tf.summary.scalar('HardExampleMiner/NumNegatives', avg_num_negatives)
def _subsample_selection_to_desired_neg_pos_ratio(self,
indices,
match,
max_negatives_per_positive,
min_negatives_per_image=0):
"""Subsample a collection of selected indices to a desired neg:pos ratio.
This function takes a subset of M indices (indexing into a large anchor
collection of N anchors where M<N) which are labeled as positive/negative
via a Match object (matched indices are positive, unmatched indices
are negative). It returns a subset of the provided indices retaining all
positives as well as up to the first K negatives, where:
K=floor(num_negative_per_positive * num_positives).
For example, if indices=[2, 4, 5, 7, 9, 10] (indexing into 12 anchors),
with positives=[2, 5] and negatives=[4, 7, 9, 10] and
num_negatives_per_positive=1, then the returned subset of indices
is [2, 4, 5, 7].
Args:
indices: An integer tensor of shape [M] representing a collection
of selected anchor indices
match: A matcher.Match object encoding the match between anchors and
groundtruth boxes for a given image, with rows of the Match objects
corresponding to groundtruth boxes and columns corresponding to anchors.
max_negatives_per_positive: (float) maximum number of negatives for
each positive anchor.
min_negatives_per_image: minimum number of negative anchors for a given
image. Allow sampling negatives in image without any positive anchors.
Returns:
selected_indices: An integer tensor of shape [M'] representing a
collection of selected anchor indices with M' <= M.
num_positives: An integer tensor representing the number of positive
examples in selected set of indices.
num_negatives: An integer tensor representing the number of negative
examples in selected set of indices.
"""
positives_indicator = tf.gather(match.matched_column_indicator(), indices)
negatives_indicator = tf.gather(match.unmatched_column_indicator(), indices)
num_positives = tf.reduce_sum(tf.cast(positives_indicator, dtype=tf.int32))
max_negatives = tf.maximum(
min_negatives_per_image,
tf.cast(max_negatives_per_positive *
tf.cast(num_positives, dtype=tf.float32), dtype=tf.int32))
topk_negatives_indicator = tf.less_equal(
tf.cumsum(tf.cast(negatives_indicator, dtype=tf.int32)), max_negatives)
subsampled_selection_indices = tf.where(
tf.logical_or(positives_indicator, topk_negatives_indicator))
num_negatives = tf.size(subsampled_selection_indices) - num_positives
return (tf.reshape(tf.gather(indices, subsampled_selection_indices), [-1]),
num_positives, num_negatives)
class PenaltyReducedLogisticFocalLoss(Loss):
"""Penalty-reduced pixelwise logistic regression with focal loss.
The loss is defined in Equation (1) of the Objects as Points[1] paper.
Although the loss is defined per-pixel in the output space, this class
assumes that each pixel is an anchor to be compatible with the base class.
[1]: https://arxiv.org/abs/1904.07850
"""
def __init__(self, alpha=2.0, beta=4.0, sigmoid_clip_value=1e-4):
"""Constructor.
Args:
alpha: Focussing parameter of the focal loss. Increasing this will
decrease the loss contribution of the well classified examples.
beta: The local penalty reduction factor. Increasing this will decrease
the contribution of loss due to negative pixels near the keypoint.
sigmoid_clip_value: The sigmoid operation used internally will be clipped
between [sigmoid_clip_value, 1 - sigmoid_clip_value)
"""
self._alpha = alpha
self._beta = beta
self._sigmoid_clip_value = sigmoid_clip_value
super(PenaltyReducedLogisticFocalLoss, self).__init__()
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
In all input tensors, `num_anchors` is the total number of pixels in the
the output space.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted unscaled logits for each class.
The function will compute sigmoid on this tensor internally.
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing a tensor with the 'splatted' keypoints,
possibly using a gaussian kernel. This function assumes that
the target is bounded between [0, 1].
weights: a float tensor of shape, either [batch_size, num_anchors,
num_classes] or [batch_size, num_anchors, 1]. If the shape is
[batch_size, num_anchors, 1], all the classses are equally weighted.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
is_present_tensor = tf.math.equal(target_tensor, 1.0)
prediction_tensor = tf.clip_by_value(tf.sigmoid(prediction_tensor),
self._sigmoid_clip_value,
1 - self._sigmoid_clip_value)
positive_loss = (tf.math.pow((1 - prediction_tensor), self._alpha)*
tf.math.log(prediction_tensor))
negative_loss = (tf.math.pow((1 - target_tensor), self._beta)*
tf.math.pow(prediction_tensor, self._alpha)*
tf.math.log(1 - prediction_tensor))
loss = -tf.where(is_present_tensor, positive_loss, negative_loss)
return loss * weights
class L1LocalizationLoss(Loss):
"""L1 loss or absolute difference.
When used in a per-pixel manner, each pixel should be given as an anchor.
"""
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors]
representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors]
representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
return tf.losses.absolute_difference(
target_tensor,
prediction_tensor,
weights=weights,
loss_collection=None,
reduction=tf.losses.Reduction.NONE
)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/losses.py | losses.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matcher interface and Match class.
This module defines the Matcher interface and the Match object. The job of the
matcher is to match row and column indices based on the similarity matrix and
other optional parameters. Each column is matched to at most one row. There
are three possibilities for the matching:
1) match: A column matches a row.
2) no_match: A column does not match any row.
3) ignore: A column that is neither 'match' nor no_match.
The ignore case is regularly encountered in object detection: when an anchor has
a relatively small overlap with a ground-truth box, one neither wants to
consider this box a positive example (match) nor a negative example (no match).
The Match class is used to store the match results and it provides simple apis
to query the results.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
import tensorflow.compat.v1 as tf
from object_detection.utils import ops
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results, use_matmul_gather=False):
"""Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
use_matmul_gather: Use matrix multiplication based gather instead of
standard tf.gather. (Default: False).
Raises:
ValueError: if match_results does not have rank 1 or is not an
integer int32 scalar tensor
"""
if match_results.shape.ndims != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype != tf.int32:
raise ValueError('match_results should be an int32 or int64 scalar '
'tensor')
self._match_results = match_results
self._gather_op = tf.gather
if use_matmul_gather:
self._gather_op = ops.matmul_gather_on_zeroth_axis
@property
def match_results(self):
"""The accessor for match results.
Returns:
the tensor which encodes the match results.
"""
return self._match_results
def matched_column_indices(self):
"""Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1)))
def matched_column_indicator(self):
"""Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.greater_equal(self._match_results, 0)
def num_matched_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.size(self.matched_column_indices())
def unmatched_column_indices(self):
"""Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1)))
def unmatched_column_indicator(self):
"""Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.equal(self._match_results, -1)
def num_unmatched_columns(self):
"""Returns number (int32 scalar tensor) of unmatched columns."""
return tf.size(self.unmatched_column_indices())
def ignored_column_indices(self):
"""Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(self.ignored_column_indicator()))
def ignored_column_indicator(self):
"""Returns boolean column indicator where True means the colum is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column
indices.
"""
return tf.equal(self._match_results, -2)
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.size(self.ignored_column_indices())
def unmatched_or_ignored_column_indices(self):
"""Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results)))
def matched_row_indices(self):
"""Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence
with the output of matched_column_indicator(). For example if
self.matched_column_indicator() is [0,2], and self.matched_row_indices() is
[7, 3], then we know that column 0 was matched to row 7 and column 2 was
matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.
"""
return self._reshape_and_cast(
self._gather_op(tf.cast(self._match_results, dtype=tf.float32),
self.matched_column_indices()))
def num_matched_rows(self):
"""Returns number (int32 scalar tensor) of matched rows."""
unique_rows, _ = tf.unique(self.matched_row_indices())
return tf.size(unique_rows)
def _reshape_and_cast(self, t):
return tf.cast(tf.reshape(t, [-1]), tf.int32)
def gather_based_on_match(self, input_tensor, unmatched_value,
ignored_value):
"""Gathers elements from `input_tensor` based on match results.
For columns that are matched to a row, gathered_tensor[col] is set to
input_tensor[match_results[col]]. For columns that are unmatched,
gathered_tensor[col] is set to unmatched_value. Finally, for columns that
are ignored gathered_tensor[col] is set to ignored_value.
Note that the input_tensor.shape[1:] must match with unmatched_value.shape
and ignored_value.shape
Args:
input_tensor: Tensor to gather values from.
unmatched_value: Constant tensor value for unmatched columns.
ignored_value: Constant tensor value for ignored columns.
Returns:
gathered_tensor: A tensor containing values gathered from input_tensor.
The shape of the gathered tensor is [match_results.shape[0]] +
input_tensor.shape[1:].
"""
input_tensor = tf.concat(
[tf.stack([ignored_value, unmatched_value]),
input_tensor],
axis=0)
gather_indices = tf.maximum(self.match_results + 2, 0)
gathered_tensor = self._gather_op(input_tensor, gather_indices)
return gathered_tensor
class Matcher(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract base class for matcher.
"""
def __init__(self, use_matmul_gather=False):
"""Constructs a Matcher.
Args:
use_matmul_gather: Force constructed match objects to use matrix
multiplication based gather instead of standard tf.gather.
(Default: False).
"""
self._use_matmul_gather = use_matmul_gather
def match(self, similarity_matrix, valid_rows=None, scope=None):
"""Computes matches among row and column indices and returns the result.
Computes matches among the row and column indices based on the similarity
matrix and optional arguments.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
valid_rows: A boolean tensor of shape [N] indicating the rows that are
valid for matching.
scope: Op scope name. Defaults to 'Match' if None.
Returns:
A Match object with the results of matching.
"""
with tf.name_scope(scope, 'Match') as scope:
if valid_rows is None:
valid_rows = tf.ones(tf.shape(similarity_matrix)[0], dtype=tf.bool)
return Match(self._match(similarity_matrix, valid_rows),
self._use_matmul_gather)
@abc.abstractmethod
def _match(self, similarity_matrix, valid_rows):
"""Method to be overridden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
valid_rows: A boolean tensor of shape [N] indicating the rows that are
valid for matching.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 means
that column i is matched to row match_results[i], match_results[i]=-1
means that the column is not matched. match_results[i]=-2 means that
the column is ignored (usually this happens when there is a very weak
match which one neither wants as positive nor negative example).
"""
pass
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/matcher.py | matcher.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.third_party.tensorflow_models.object_detection.core.batch_multiclass_nms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.core import post_processing
from object_detection.utils import test_case
class BatchMulticlassNonMaxSuppressionTest(test_case.TestCase,
parameterized.TestCase):
def test_batch_multiclass_nms_with_batch_size_1(self):
boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]],
[[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]],
[[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]],
[[0, 10, 1, 11], [0, 10, 1, 11]],
[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]],
[[0, 100, 1, 101], [0, 100, 1, 101]],
[[0, 1000, 1, 1002], [0, 999, 2, 1004]],
[[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]],
np.float32)
scores = np.array([[[.9, 0.01], [.75, 0.05],
[.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01],
[.01, .85], [.01, .5]]], np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = [[[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 999, 2, 1004],
[0, 100, 1, 101]]]
exp_nms_scores = [[.95, .9, .85, .3]]
exp_nms_classes = [[0, 0, 1, 0]]
def graph_fn(boxes, scores):
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields, num_detections
) = post_processing.batch_multiclass_non_max_suppression(
boxes, scores, score_thresh, iou_thresh,
max_size_per_class=max_output_size,
max_total_size=max_output_size)
self.assertIsNone(nmsed_masks)
self.assertIsNone(nmsed_additional_fields)
return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections)
(nmsed_boxes, nmsed_scores, nmsed_classes,
num_detections) = self.execute_cpu(graph_fn, [boxes, scores])
self.assertAllClose(nmsed_boxes, exp_nms_corners)
self.assertAllClose(nmsed_scores, exp_nms_scores)
self.assertAllClose(nmsed_classes, exp_nms_classes)
self.assertEqual(num_detections, [4])
def test_batch_iou_with_negative_data(self):
def graph_fn():
boxes = tf.constant([[[0, -0.01, 0.1, 1.1], [0, 0.2, 0.2, 5.0],
[0, -0.01, 0.1, 1.], [-1, -1, -1, -1]]], tf.float32)
iou = post_processing.batch_iou(boxes, boxes)
return iou
iou = self.execute_cpu(graph_fn, [])
expected_iou = [[[0.99999994, 0.0917431, 0.9099099, -1.],
[0.0917431, 1., 0.08154944, -1.],
[0.9099099, 0.08154944, 1., -1.], [-1., -1., -1., -1.]]]
self.assertAllClose(iou, expected_iou)
@parameterized.parameters(False, True)
def test_batch_multiclass_nms_with_batch_size_2(self, use_dynamic_map_fn):
boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]],
[[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]],
[[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]],
[[0, 10, 1, 11], [0, 10, 1, 11]]],
[[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]],
[[0, 100, 1, 101], [0, 100, 1, 101]],
[[0, 1000, 1, 1002], [0, 999, 2, 1004]],
[[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]],
np.float32)
scores = np.array([[[.9, 0.01], [.75, 0.05],
[.6, 0.01], [.95, 0]],
[[.5, 0.01], [.3, 0.01],
[.01, .85], [.01, .5]]], np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = np.array([[[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 999, 2, 1004],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101],
[0, 0, 0, 0]]])
exp_nms_scores = np.array([[.95, .9, 0, 0],
[.85, .5, .3, 0]])
exp_nms_classes = np.array([[0, 0, 0, 0],
[1, 0, 0, 0]])
def graph_fn(boxes, scores):
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields, num_detections
) = post_processing.batch_multiclass_non_max_suppression(
boxes, scores, score_thresh, iou_thresh,
max_size_per_class=max_output_size,
max_total_size=max_output_size,
use_dynamic_map_fn=use_dynamic_map_fn)
self.assertIsNone(nmsed_masks)
self.assertIsNone(nmsed_additional_fields)
# Check static shapes
self.assertAllEqual(nmsed_boxes.shape.as_list(),
exp_nms_corners.shape)
self.assertAllEqual(nmsed_scores.shape.as_list(),
exp_nms_scores.shape)
self.assertAllEqual(nmsed_classes.shape.as_list(),
exp_nms_classes.shape)
self.assertEqual(num_detections.shape.as_list(), [2])
return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections)
(nmsed_boxes, nmsed_scores, nmsed_classes,
num_detections) = self.execute_cpu(graph_fn, [boxes, scores])
self.assertAllClose(nmsed_boxes, exp_nms_corners)
self.assertAllClose(nmsed_scores, exp_nms_scores)
self.assertAllClose(nmsed_classes, exp_nms_classes)
self.assertAllClose(num_detections, [2, 3])
def test_batch_multiclass_nms_with_per_batch_clip_window(self):
boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]],
[[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]],
[[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]],
[[0, 10, 1, 11], [0, 10, 1, 11]]],
[[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]],
[[0, 100, 1, 101], [0, 100, 1, 101]],
[[0, 1000, 1, 1002], [0, 999, 2, 1004]],
[[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]],
np.float32)
scores = np.array([[[.9, 0.01], [.75, 0.05],
[.6, 0.01], [.95, 0]],
[[.5, 0.01], [.3, 0.01],
[.01, .85], [.01, .5]]], np.float32)
clip_window = np.array([0., 0., 200., 200.], np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = np.array([[[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 10.1, 1, 11.1],
[0, 100, 1, 101],
[0, 0, 0, 0],
[0, 0, 0, 0]]])
exp_nms_scores = np.array([[.95, .9, 0, 0],
[.5, .3, 0, 0]])
exp_nms_classes = np.array([[0, 0, 0, 0],
[0, 0, 0, 0]])
def graph_fn(boxes, scores, clip_window):
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields, num_detections
) = post_processing.batch_multiclass_non_max_suppression(
boxes, scores, score_thresh, iou_thresh,
max_size_per_class=max_output_size, max_total_size=max_output_size,
clip_window=clip_window)
self.assertIsNone(nmsed_masks)
self.assertIsNone(nmsed_additional_fields)
# Check static shapes
self.assertAllEqual(nmsed_boxes.shape.as_list(),
exp_nms_corners.shape)
self.assertAllEqual(nmsed_scores.shape.as_list(),
exp_nms_scores.shape)
self.assertAllEqual(nmsed_classes.shape.as_list(),
exp_nms_classes.shape)
self.assertEqual(num_detections.shape.as_list(), [2])
return nmsed_boxes, nmsed_scores, nmsed_classes, num_detections
(nmsed_boxes, nmsed_scores, nmsed_classes,
num_detections) = self.execute_cpu(graph_fn, [boxes, scores, clip_window])
self.assertAllClose(nmsed_boxes, exp_nms_corners)
self.assertAllClose(nmsed_scores, exp_nms_scores)
self.assertAllClose(nmsed_classes, exp_nms_classes)
self.assertAllClose(num_detections, [2, 2])
def test_batch_multiclass_nms_with_per_image_clip_window(self):
boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]],
[[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]],
[[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]],
[[0, 10, 1, 11], [0, 10, 1, 11]]],
[[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]],
[[0, 100, 1, 101], [0, 100, 1, 101]],
[[0, 1000, 1, 1002], [0, 999, 2, 1004]],
[[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]],
np.float32)
scores = np.array([[[.9, 0.01], [.75, 0.05],
[.6, 0.01], [.95, 0]],
[[.5, 0.01], [.3, 0.01],
[.01, .85], [.01, .5]]], np.float32)
clip_window = np.array([[0., 0., 5., 5.],
[0., 0., 200., 200.]], np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = np.array([[[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 10.1, 1, 11.1],
[0, 100, 1, 101],
[0, 0, 0, 0],
[0, 0, 0, 0]]])
exp_nms_scores = np.array([[.9, 0., 0., 0.],
[.5, .3, 0, 0]])
exp_nms_classes = np.array([[0, 0, 0, 0],
[0, 0, 0, 0]])
def graph_fn(boxes, scores, clip_window):
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields, num_detections
) = post_processing.batch_multiclass_non_max_suppression(
boxes, scores, score_thresh, iou_thresh,
max_size_per_class=max_output_size, max_total_size=max_output_size,
clip_window=clip_window)
self.assertIsNone(nmsed_masks)
self.assertIsNone(nmsed_additional_fields)
# Check static shapes
self.assertAllEqual(nmsed_boxes.shape.as_list(),
exp_nms_corners.shape)
self.assertAllEqual(nmsed_scores.shape.as_list(),
exp_nms_scores.shape)
self.assertAllEqual(nmsed_classes.shape.as_list(),
exp_nms_classes.shape)
self.assertEqual(num_detections.shape.as_list(), [2])
return nmsed_boxes, nmsed_scores, nmsed_classes, num_detections
(nmsed_boxes, nmsed_scores, nmsed_classes,
num_detections) = self.execute_cpu(graph_fn, [boxes, scores, clip_window])
self.assertAllClose(nmsed_boxes, exp_nms_corners)
self.assertAllClose(nmsed_scores, exp_nms_scores)
self.assertAllClose(nmsed_classes, exp_nms_classes)
self.assertAllClose(num_detections, [1, 2])
def test_batch_multiclass_nms_with_masks(self):
boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]],
[[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]],
[[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]],
[[0, 10, 1, 11], [0, 10, 1, 11]]],
[[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]],
[[0, 100, 1, 101], [0, 100, 1, 101]],
[[0, 1000, 1, 1002], [0, 999, 2, 1004]],
[[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]],
np.float32)
scores = np.array([[[.9, 0.01], [.75, 0.05],
[.6, 0.01], [.95, 0]],
[[.5, 0.01], [.3, 0.01],
[.01, .85], [.01, .5]]], np.float32)
masks = np.array([[[[[0, 1], [2, 3]], [[1, 2], [3, 4]]],
[[[2, 3], [4, 5]], [[3, 4], [5, 6]]],
[[[4, 5], [6, 7]], [[5, 6], [7, 8]]],
[[[6, 7], [8, 9]], [[7, 8], [9, 10]]]],
[[[[8, 9], [10, 11]], [[9, 10], [11, 12]]],
[[[10, 11], [12, 13]], [[11, 12], [13, 14]]],
[[[12, 13], [14, 15]], [[13, 14], [15, 16]]],
[[[14, 15], [16, 17]], [[15, 16], [17, 18]]]]],
np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = np.array([[[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 999, 2, 1004],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101],
[0, 0, 0, 0]]])
exp_nms_scores = np.array([[.95, .9, 0, 0],
[.85, .5, .3, 0]])
exp_nms_classes = np.array([[0, 0, 0, 0],
[1, 0, 0, 0]])
exp_nms_masks = np.array([[[[6, 7], [8, 9]],
[[0, 1], [2, 3]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]]],
[[[13, 14], [15, 16]],
[[8, 9], [10, 11]],
[[10, 11], [12, 13]],
[[0, 0], [0, 0]]]])
def graph_fn(boxes, scores, masks):
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields, num_detections
) = post_processing.batch_multiclass_non_max_suppression(
boxes, scores, score_thresh, iou_thresh,
max_size_per_class=max_output_size, max_total_size=max_output_size,
masks=masks)
self.assertIsNone(nmsed_additional_fields)
# Check static shapes
self.assertAllEqual(nmsed_boxes.shape.as_list(), exp_nms_corners.shape)
self.assertAllEqual(nmsed_scores.shape.as_list(), exp_nms_scores.shape)
self.assertAllEqual(nmsed_classes.shape.as_list(), exp_nms_classes.shape)
self.assertAllEqual(nmsed_masks.shape.as_list(), exp_nms_masks.shape)
self.assertEqual(num_detections.shape.as_list(), [2])
return (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
num_detections)
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
num_detections) = self.execute_cpu(graph_fn, [boxes, scores, masks])
self.assertAllClose(nmsed_boxes, exp_nms_corners)
self.assertAllClose(nmsed_scores, exp_nms_scores)
self.assertAllClose(nmsed_classes, exp_nms_classes)
self.assertAllClose(num_detections, [2, 3])
self.assertAllClose(nmsed_masks, exp_nms_masks)
def test_batch_multiclass_nms_with_additional_fields(self):
boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]],
[[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]],
[[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]],
[[0, 10, 1, 11], [0, 10, 1, 11]]],
[[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]],
[[0, 100, 1, 101], [0, 100, 1, 101]],
[[0, 1000, 1, 1002], [0, 999, 2, 1004]],
[[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]],
np.float32)
scores = np.array([[[.9, 0.01], [.75, 0.05],
[.6, 0.01], [.95, 0]],
[[.5, 0.01], [.3, 0.01],
[.01, .85], [.01, .5]]], np.float32)
keypoints = np.array(
[[[[6, 7], [8, 9]],
[[0, 1], [2, 3]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]]],
[[[13, 14], [15, 16]],
[[8, 9], [10, 11]],
[[10, 11], [12, 13]],
[[0, 0], [0, 0]]]],
np.float32)
size = np.array(
[[[[6], [8]], [[0], [2]], [[0], [0]], [[0], [0]]],
[[[13], [15]], [[8], [10]], [[10], [12]], [[0], [0]]]], np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = np.array([[[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 999, 2, 1004],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101],
[0, 0, 0, 0]]])
exp_nms_scores = np.array([[.95, .9, 0, 0],
[.85, .5, .3, 0]])
exp_nms_classes = np.array([[0, 0, 0, 0],
[1, 0, 0, 0]])
exp_nms_additional_fields = {
'keypoints': np.array([[[[0, 0], [0, 0]],
[[6, 7], [8, 9]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]]],
[[[10, 11], [12, 13]],
[[13, 14], [15, 16]],
[[8, 9], [10, 11]],
[[0, 0], [0, 0]]]])
}
exp_nms_additional_fields['size'] = np.array([[[[0], [0]], [[6], [8]],
[[0], [0]], [[0], [0]]],
[[[10], [12]], [[13], [15]],
[[8], [10]], [[0], [0]]]])
def graph_fn(boxes, scores, keypoints, size):
additional_fields = {'keypoints': keypoints, 'size': size}
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields, num_detections
) = post_processing.batch_multiclass_non_max_suppression(
boxes, scores, score_thresh, iou_thresh,
max_size_per_class=max_output_size, max_total_size=max_output_size,
additional_fields=additional_fields)
self.assertIsNone(nmsed_masks)
# Check static shapes
self.assertAllEqual(nmsed_boxes.shape.as_list(), exp_nms_corners.shape)
self.assertAllEqual(nmsed_scores.shape.as_list(), exp_nms_scores.shape)
self.assertAllEqual(nmsed_classes.shape.as_list(), exp_nms_classes.shape)
self.assertEqual(len(nmsed_additional_fields),
len(exp_nms_additional_fields))
for key in exp_nms_additional_fields:
self.assertAllEqual(nmsed_additional_fields[key].shape.as_list(),
exp_nms_additional_fields[key].shape)
self.assertEqual(num_detections.shape.as_list(), [2])
return (nmsed_boxes, nmsed_scores, nmsed_classes,
nmsed_additional_fields['keypoints'],
nmsed_additional_fields['size'],
num_detections)
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_keypoints, nmsed_size,
num_detections) = self.execute_cpu(graph_fn, [boxes, scores, keypoints,
size])
self.assertAllClose(nmsed_boxes, exp_nms_corners)
self.assertAllClose(nmsed_scores, exp_nms_scores)
self.assertAllClose(nmsed_classes, exp_nms_classes)
self.assertAllClose(nmsed_keypoints,
exp_nms_additional_fields['keypoints'])
self.assertAllClose(nmsed_size,
exp_nms_additional_fields['size'])
self.assertAllClose(num_detections, [2, 3])
def test_batch_multiclass_nms_with_masks_and_num_valid_boxes(self):
boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]],
[[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]],
[[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]],
[[0, 10, 1, 11], [0, 10, 1, 11]]],
[[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]],
[[0, 100, 1, 101], [0, 100, 1, 101]],
[[0, 1000, 1, 1002], [0, 999, 2, 1004]],
[[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]],
np.float32)
scores = np.array([[[.9, 0.01], [.75, 0.05],
[.6, 0.01], [.95, 0]],
[[.5, 0.01], [.3, 0.01],
[.01, .85], [.01, .5]]], np.float32)
masks = np.array([[[[[0, 1], [2, 3]], [[1, 2], [3, 4]]],
[[[2, 3], [4, 5]], [[3, 4], [5, 6]]],
[[[4, 5], [6, 7]], [[5, 6], [7, 8]]],
[[[6, 7], [8, 9]], [[7, 8], [9, 10]]]],
[[[[8, 9], [10, 11]], [[9, 10], [11, 12]]],
[[[10, 11], [12, 13]], [[11, 12], [13, 14]]],
[[[12, 13], [14, 15]], [[13, 14], [15, 16]]],
[[[14, 15], [16, 17]], [[15, 16], [17, 18]]]]],
np.float32)
num_valid_boxes = np.array([1, 1], np.int32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = [[[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 10.1, 1, 11.1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_nms_scores = [[.9, 0, 0, 0],
[.5, 0, 0, 0]]
exp_nms_classes = [[0, 0, 0, 0],
[0, 0, 0, 0]]
exp_nms_masks = [[[[0, 1], [2, 3]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]]],
[[[8, 9], [10, 11]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]]]]
def graph_fn(boxes, scores, masks, num_valid_boxes):
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields, num_detections
) = post_processing.batch_multiclass_non_max_suppression(
boxes, scores, score_thresh, iou_thresh,
max_size_per_class=max_output_size, max_total_size=max_output_size,
masks=masks, num_valid_boxes=num_valid_boxes)
self.assertIsNone(nmsed_additional_fields)
return (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
num_detections)
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
num_detections) = self.execute_cpu(graph_fn, [boxes, scores, masks,
num_valid_boxes])
self.assertAllClose(nmsed_boxes, exp_nms_corners)
self.assertAllClose(nmsed_scores, exp_nms_scores)
self.assertAllClose(nmsed_classes, exp_nms_classes)
self.assertAllClose(num_detections, [1, 1])
self.assertAllClose(nmsed_masks, exp_nms_masks)
def test_batch_multiclass_nms_with_additional_fields_and_num_valid_boxes(
self):
boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]],
[[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]],
[[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]],
[[0, 10, 1, 11], [0, 10, 1, 11]]],
[[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]],
[[0, 100, 1, 101], [0, 100, 1, 101]],
[[0, 1000, 1, 1002], [0, 999, 2, 1004]],
[[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]],
np.float32)
scores = np.array([[[.9, 0.01], [.75, 0.05],
[.6, 0.01], [.95, 0]],
[[.5, 0.01], [.3, 0.01],
[.01, .85], [.01, .5]]], np.float32)
keypoints = np.array(
[[[[6, 7], [8, 9]],
[[0, 1], [2, 3]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]]],
[[[13, 14], [15, 16]],
[[8, 9], [10, 11]],
[[10, 11], [12, 13]],
[[0, 0], [0, 0]]]],
np.float32)
size = np.array(
[[[[7], [9]], [[1], [3]], [[0], [0]], [[0], [0]]],
[[[14], [16]], [[9], [11]], [[11], [13]], [[0], [0]]]], np.float32)
num_valid_boxes = np.array([1, 1], np.int32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = [[[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 10.1, 1, 11.1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_nms_scores = [[.9, 0, 0, 0],
[.5, 0, 0, 0]]
exp_nms_classes = [[0, 0, 0, 0],
[0, 0, 0, 0]]
exp_nms_additional_fields = {
'keypoints': np.array([[[[6, 7], [8, 9]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]]],
[[[13, 14], [15, 16]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]]]])
}
exp_nms_additional_fields['size'] = np.array([[[[7], [9]], [[0], [0]],
[[0], [0]], [[0], [0]]],
[[[14], [16]], [[0], [0]],
[[0], [0]], [[0], [0]]]])
def graph_fn(boxes, scores, keypoints, size, num_valid_boxes):
additional_fields = {'keypoints': keypoints, 'size': size}
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields, num_detections
) = post_processing.batch_multiclass_non_max_suppression(
boxes, scores, score_thresh, iou_thresh,
max_size_per_class=max_output_size, max_total_size=max_output_size,
num_valid_boxes=num_valid_boxes,
additional_fields=additional_fields)
self.assertIsNone(nmsed_masks)
return (nmsed_boxes, nmsed_scores, nmsed_classes,
nmsed_additional_fields['keypoints'],
nmsed_additional_fields['size'], num_detections)
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_keypoints, nmsed_size,
num_detections) = self.execute_cpu(graph_fn, [boxes, scores, keypoints,
size, num_valid_boxes])
self.assertAllClose(nmsed_boxes, exp_nms_corners)
self.assertAllClose(nmsed_scores, exp_nms_scores)
self.assertAllClose(nmsed_classes, exp_nms_classes)
self.assertAllClose(nmsed_keypoints,
exp_nms_additional_fields['keypoints'])
self.assertAllClose(nmsed_size,
exp_nms_additional_fields['size'])
self.assertAllClose(num_detections, [1, 1])
def test_combined_nms_with_batch_size_2(self):
"""Test use_combined_nms."""
boxes = np.array([[[[0, 0, 0.1, 0.1], [0, 0, 0.1, 0.1]],
[[0, 0.01, 1, 0.11], [0, 0.6, 0.1, 0.7]],
[[0, -0.01, 0.1, 0.09], [0, -0.1, 0.1, 0.09]],
[[0, 0.11, 0.1, 0.2], [0, 0.11, 0.1, 0.2]]],
[[[0, 0, 0.2, 0.2], [0, 0, 0.2, 0.2]],
[[0, 0.02, 0.2, 0.22], [0, 0.02, 0.2, 0.22]],
[[0, -0.02, 0.2, 0.19], [0, -0.02, 0.2, 0.19]],
[[0, 0.21, 0.2, 0.3], [0, 0.21, 0.2, 0.3]]]],
np.float32)
scores = np.array([[[.1, 0.9], [.75, 0.8],
[.6, 0.3], [0.95, 0.1]],
[[.1, 0.9], [.75, 0.8],
[.6, .3], [.95, .1]]], np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 3
exp_nms_corners = np.array([[[0, 0.11, 0.1, 0.2],
[0, 0, 0.1, 0.1],
[0, 0.6, 0.1, 0.7]],
[[0, 0.21, 0.2, 0.3],
[0, 0, 0.2, 0.2],
[0, 0.02, 0.2, 0.22]]])
exp_nms_scores = np.array([[.95, .9, 0.8],
[.95, .9, .75]])
exp_nms_classes = np.array([[0, 1, 1],
[0, 1, 0]])
def graph_fn(boxes, scores):
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields, num_detections
) = post_processing.batch_multiclass_non_max_suppression(
boxes, scores, score_thresh, iou_thresh,
max_size_per_class=max_output_size, max_total_size=max_output_size,
use_static_shapes=True,
use_combined_nms=True)
self.assertIsNone(nmsed_masks)
self.assertIsNone(nmsed_additional_fields)
return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections)
(nmsed_boxes, nmsed_scores, nmsed_classes,
num_detections) = self.execute_cpu(graph_fn, [boxes, scores])
self.assertAllClose(nmsed_boxes, exp_nms_corners)
self.assertAllClose(nmsed_scores, exp_nms_scores)
self.assertAllClose(nmsed_classes, exp_nms_classes)
self.assertListEqual(num_detections.tolist(), [3, 3])
def test_batch_multiclass_nms_with_use_static_shapes(self):
boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]],
[[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]],
[[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]],
[[0, 10, 1, 11], [0, 10, 1, 11]]],
[[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]],
[[0, 100, 1, 101], [0, 100, 1, 101]],
[[0, 1000, 1, 1002], [0, 999, 2, 1004]],
[[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]],
np.float32)
scores = np.array([[[.9, 0.01], [.75, 0.05],
[.6, 0.01], [.95, 0]],
[[.5, 0.01], [.3, 0.01],
[.01, .85], [.01, .5]]],
np.float32)
clip_window = np.array([[0., 0., 5., 5.],
[0., 0., 200., 200.]],
np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = np.array([[[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 10.1, 1, 11.1],
[0, 100, 1, 101],
[0, 0, 0, 0],
[0, 0, 0, 0]]])
exp_nms_scores = np.array([[.9, 0., 0., 0.],
[.5, .3, 0, 0]])
exp_nms_classes = np.array([[0, 0, 0, 0],
[0, 0, 0, 0]])
def graph_fn(boxes, scores, clip_window):
(nmsed_boxes, nmsed_scores, nmsed_classes, _, _, num_detections
) = post_processing.batch_multiclass_non_max_suppression(
boxes, scores, score_thresh, iou_thresh,
max_size_per_class=max_output_size, clip_window=clip_window,
use_static_shapes=True)
return nmsed_boxes, nmsed_scores, nmsed_classes, num_detections
(nmsed_boxes, nmsed_scores, nmsed_classes,
num_detections) = self.execute(graph_fn, [boxes, scores, clip_window])
for i in range(len(num_detections)):
self.assertAllClose(nmsed_boxes[i, 0:num_detections[i]],
exp_nms_corners[i, 0:num_detections[i]])
self.assertAllClose(nmsed_scores[i, 0:num_detections[i]],
exp_nms_scores[i, 0:num_detections[i]])
self.assertAllClose(nmsed_classes[i, 0:num_detections[i]],
exp_nms_classes[i, 0:num_detections[i]])
self.assertAllClose(num_detections, [1, 2])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/batch_multiclass_nms_test.py | batch_multiclass_nms_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.prefetcher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import prefetcher
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class PrefetcherTest(tf.test.TestCase):
"""Test class for prefetcher."""
def test_prefetch_tensors_with_fully_defined_shapes(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
examples = tf.Variable(tf.constant(0, dtype=tf.int64))
counter = examples.count_up_to(num_batches)
image = tf.random_normal([batch_size, image_size,
image_size, 3],
dtype=tf.float32,
name='images')
label = tf.random_uniform([batch_size, 1], 0, 10,
dtype=tf.int32, name='labels')
prefetch_queue = prefetcher.prefetch(tensor_dict={'counter': counter,
'image': image,
'label': label},
capacity=100)
tensor_dict = prefetch_queue.dequeue()
self.assertAllEqual(tensor_dict['image'].get_shape().as_list(),
[batch_size, image_size, image_size, 3])
self.assertAllEqual(tensor_dict['label'].get_shape().as_list(),
[batch_size, 1])
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
for _ in range(num_batches):
results = sess.run(tensor_dict)
self.assertEquals(results['image'].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results['label'].shape, (batch_size, 1))
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(tensor_dict)
def test_prefetch_tensors_with_partially_defined_shapes(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
examples = tf.Variable(tf.constant(0, dtype=tf.int64))
counter = examples.count_up_to(num_batches)
image = tf.random_normal([batch_size,
tf.Variable(image_size),
tf.Variable(image_size), 3],
dtype=tf.float32,
name='image')
image.set_shape([batch_size, None, None, 3])
label = tf.random_uniform([batch_size, tf.Variable(1)], 0,
10, dtype=tf.int32, name='label')
label.set_shape([batch_size, None])
prefetch_queue = prefetcher.prefetch(tensor_dict={'counter': counter,
'image': image,
'label': label},
capacity=100)
tensor_dict = prefetch_queue.dequeue()
self.assertAllEqual(tensor_dict['image'].get_shape().as_list(),
[batch_size, None, None, 3])
self.assertAllEqual(tensor_dict['label'].get_shape().as_list(),
[batch_size, None])
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
for _ in range(num_batches):
results = sess.run(tensor_dict)
self.assertEquals(results['image'].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results['label'].shape, (batch_size, 1))
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(tensor_dict)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/prefetcher_tf1_test.py | prefetcher_tf1_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.densepose_ops."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import densepose_ops
from object_detection.utils import test_case
class DensePoseOpsTest(test_case.TestCase):
"""Tests for common DensePose operations."""
def test_scale(self):
def graph_fn():
dp_surface_coords = tf.constant([
[[0.0, 0.0, 0.1, 0.2], [100.0, 200.0, 0.3, 0.4]],
[[50.0, 120.0, 0.5, 0.6], [100.0, 140.0, 0.7, 0.8]]
])
y_scale = tf.constant(1.0 / 100)
x_scale = tf.constant(1.0 / 200)
output = densepose_ops.scale(dp_surface_coords, y_scale, x_scale)
return output
output = self.execute(graph_fn, [])
expected_dp_surface_coords = np.array([
[[0., 0., 0.1, 0.2], [1.0, 1.0, 0.3, 0.4]],
[[0.5, 0.6, 0.5, 0.6], [1.0, 0.7, 0.7, 0.8]]
])
self.assertAllClose(output, expected_dp_surface_coords)
def test_clip_to_window(self):
def graph_fn():
dp_surface_coords = tf.constant([
[[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]],
[[0.5, 0.0, 0.5, 0.6], [1.0, 1.0, 0.7, 0.8]]
])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
output = densepose_ops.clip_to_window(dp_surface_coords, window)
return output
output = self.execute(graph_fn, [])
expected_dp_surface_coords = np.array([
[[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]],
[[0.5, 0.25, 0.5, 0.6], [0.75, 0.75, 0.7, 0.8]]
])
self.assertAllClose(output, expected_dp_surface_coords)
def test_prune_outside_window(self):
def graph_fn():
dp_num_points = tf.constant([2, 0, 1])
dp_part_ids = tf.constant([[1, 1], [0, 0], [16, 0]])
dp_surface_coords = tf.constant([
[[0.9, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
[[0.8, 0.5, 0.6, 0.6], [0.5, 0.5, 0.7, 0.7]]
])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
new_dp_num_points, new_dp_part_ids, new_dp_surface_coords = (
densepose_ops.prune_outside_window(dp_num_points, dp_part_ids,
dp_surface_coords, window))
return new_dp_num_points, new_dp_part_ids, new_dp_surface_coords
new_dp_num_points, new_dp_part_ids, new_dp_surface_coords = (
self.execute_cpu(graph_fn, []))
expected_dp_num_points = np.array([1, 0, 0])
expected_dp_part_ids = np.array([[1], [0], [0]])
expected_dp_surface_coords = np.array([
[[0.75, 0.75, 0.3, 0.4]],
[[0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0]]
])
self.assertAllEqual(new_dp_num_points, expected_dp_num_points)
self.assertAllEqual(new_dp_part_ids, expected_dp_part_ids)
self.assertAllClose(new_dp_surface_coords, expected_dp_surface_coords)
def test_change_coordinate_frame(self):
def graph_fn():
dp_surface_coords = tf.constant([
[[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]],
[[0.5, 0.0, 0.5, 0.6], [1.0, 1.0, 0.7, 0.8]]
])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
output = densepose_ops.change_coordinate_frame(dp_surface_coords, window)
return output
output = self.execute(graph_fn, [])
expected_dp_surface_coords = np.array([
[[0, 0.5, 0.1, 0.2], [1.0, 1.0, 0.3, 0.4]],
[[0.5, -0.5, 0.5, 0.6], [1.5, 1.5, 0.7, 0.8]]
])
self.assertAllClose(output, expected_dp_surface_coords)
def test_to_normalized_coordinates(self):
def graph_fn():
dp_surface_coords = tf.constant([
[[10., 30., 0.1, 0.2], [30., 45., 0.3, 0.4]],
[[20., 0., 0.5, 0.6], [40., 60., 0.7, 0.8]]
])
output = densepose_ops.to_normalized_coordinates(
dp_surface_coords, 40, 60)
return output
output = self.execute(graph_fn, [])
expected_dp_surface_coords = np.array([
[[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]],
[[0.5, 0.0, 0.5, 0.6], [1.0, 1.0, 0.7, 0.8]]
])
self.assertAllClose(output, expected_dp_surface_coords)
def test_to_absolute_coordinates(self):
def graph_fn():
dp_surface_coords = tf.constant([
[[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]],
[[0.5, 0.0, 0.5, 0.6], [1.0, 1.0, 0.7, 0.8]]
])
output = densepose_ops.to_absolute_coordinates(
dp_surface_coords, 40, 60)
return output
output = self.execute(graph_fn, [])
expected_dp_surface_coords = np.array([
[[10., 30., 0.1, 0.2], [30., 45., 0.3, 0.4]],
[[20., 0., 0.5, 0.6], [40., 60., 0.7, 0.8]]
])
self.assertAllClose(output, expected_dp_surface_coords)
def test_horizontal_flip(self):
part_ids_np = np.array([[1, 4], [0, 8]], dtype=np.int32)
surf_coords_np = np.array([
[[0.1, 0.7, 0.2, 0.4], [0.3, 0.8, 0.2, 0.4]],
[[0.0, 0.5, 0.8, 0.7], [0.6, 1.0, 0.7, 0.9]],
], dtype=np.float32)
def graph_fn():
part_ids = tf.constant(part_ids_np, dtype=tf.int32)
surf_coords = tf.constant(surf_coords_np, dtype=tf.float32)
flipped_part_ids, flipped_surf_coords = densepose_ops.flip_horizontal(
part_ids, surf_coords)
flipped_twice_part_ids, flipped_twice_surf_coords = (
densepose_ops.flip_horizontal(flipped_part_ids, flipped_surf_coords))
return (flipped_part_ids, flipped_surf_coords,
flipped_twice_part_ids, flipped_twice_surf_coords)
(flipped_part_ids, flipped_surf_coords, flipped_twice_part_ids,
flipped_twice_surf_coords) = self.execute(graph_fn, [])
expected_flipped_part_ids = [[1, 5], # 1->1, 4->5
[0, 9]] # 0->0, 8->9
expected_flipped_surf_coords_yx = np.array([
[[0.1, 1.0-0.7], [0.3, 1.0-0.8]],
[[0.0, 1.0-0.5], [0.6, 1.0-1.0]],
], dtype=np.float32)
self.assertAllEqual(expected_flipped_part_ids, flipped_part_ids)
self.assertAllClose(expected_flipped_surf_coords_yx,
flipped_surf_coords[:, :, 0:2])
self.assertAllEqual(part_ids_np, flipped_twice_part_ids)
self.assertAllClose(surf_coords_np, flipped_twice_surf_coords, rtol=1e-2,
atol=1e-2)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/densepose_ops_test.py | densepose_ops_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_coder
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import densepose_ops
from object_detection.core import keypoint_ops
from object_detection.core import matcher as mat
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import standard_fields as fields
from object_detection.matchers import argmax_matcher
from object_detection.matchers import hungarian_matcher
from object_detection.utils import shape_utils
from object_detection.utils import target_assigner_utils as ta_utils
from object_detection.utils import tf_version
if tf_version.is_tf1():
from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top
ResizeMethod = tf2.image.ResizeMethod
_DEFAULT_KEYPOINT_OFFSET_STD_DEV = 1.0
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self,
similarity_calc,
matcher,
box_coder_instance,
negative_class_weight=1.0):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: an object_detection.core.Matcher used to match groundtruth to
anchors.
box_coder_instance: an object_detection.core.BoxCoder used to encode
matching groundtruth boxes with respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator):
raise ValueError('similarity_calc must be a RegionSimilarityCalculator')
if not isinstance(matcher, mat.Matcher):
raise ValueError('matcher must be a Matcher')
if not isinstance(box_coder_instance, box_coder.BoxCoder):
raise ValueError('box_coder must be a BoxCoder')
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder_instance
self._negative_class_weight = negative_class_weight
@property
def box_coder(self):
return self._box_coder
# TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields.
def assign(self,
anchors,
groundtruth_boxes,
groundtruth_labels=None,
unmatched_class_label=None,
groundtruth_weights=None):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1. Generally no
groundtruth boxes with zero weight match to any anchors as matchers are
aware of groundtruth weights. Additionally, `cls_weights` and
`reg_weights` are calculated using groundtruth weights as an added
safety.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: an int32 tensor of shape [num_anchors] containing result of anchor
groundtruth matching. Each position in the tensor indicates an anchor
and holds the following meaning:
(1) if match[i] >= 0, anchor i is matched with groundtruth match[i].
(2) if match[i]=-1, anchor i is marked to be background .
(3) if match[i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if unmatched_class_label is None:
unmatched_class_label = tf.constant([0], tf.float32)
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(unmatched_class_label))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(
groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
# set scores on the gt boxes
scores = 1 - groundtruth_labels[:, 0]
groundtruth_boxes.add_field(fields.BoxListFields.scores, scores)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(groundtruth_weights, 0))
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
unmatched_class_label,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
# convert cls_weights from per-anchor to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_shape = tf.shape(cls_weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), class_label_shape],
axis=0)
for _ in range(len(cls_targets.get_shape()[1:])):
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return (cls_targets, cls_weights, reg_targets, reg_weights,
match.match_results)
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(fields.BoxListFields.keypoints):
groundtruth_keypoints = groundtruth_boxes.get_field(
fields.BoxListFields.keypoints)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(fields.BoxListFields.keypoints,
matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size*[0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels,
unmatched_class_label, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
# TODO(rathodv): This method pulls in all the implementation dependencies into
# core. Therefore its best to have this factory method outside of core.
def create_target_assigner(reference, stage=None,
negative_class_weight=1.0, use_matmul_gather=False):
"""Factory function for creating standard target assigners.
Args:
reference: string referencing the type of TargetAssigner.
stage: string denoting stage: {proposal, detection}.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0)
use_matmul_gather: whether to use matrix multiplication based gather which
are better suited for TPUs.
Returns:
TargetAssigner: desired target assigner.
Raises:
ValueError: if combination reference+stage is invalid.
"""
if reference == 'Multibox' and stage == 'proposal':
if tf_version.is_tf2():
raise ValueError('GreedyBipartiteMatcher is not supported in TF 2.X.')
similarity_calc = sim_calc.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder_instance = mean_stddev_box_coder.MeanStddevBoxCoder()
elif reference == 'FasterRCNN' and stage == 'proposal':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7,
unmatched_threshold=0.3,
force_match_for_each_row=True,
use_matmul_gather=use_matmul_gather)
box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FasterRCNN' and stage == 'detection':
similarity_calc = sim_calc.IouSimilarity()
# Uses all proposals with IOU < 0.5 as candidate negatives.
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
negatives_lower_than_unmatched=True,
use_matmul_gather=use_matmul_gather)
box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FastRCNN':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.1,
force_match_for_each_row=False,
negatives_lower_than_unmatched=False,
use_matmul_gather=use_matmul_gather)
box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder()
else:
raise ValueError('No valid combination of reference and stage.')
return TargetAssigner(similarity_calc, matcher, box_coder_instance,
negative_class_weight=negative_class_weight)
def batch_assign(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_targets_batch,
unmatched_class_label=None,
gt_weights_batch=None):
"""Batched assignment of classification and regression targets.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match: an int32 tensor of shape [batch_size, num_anchors] containing result
of anchor groundtruth matching. Each position in the tensor indicates an
anchor and holds the following meaning:
(1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i].
(2) if match[x, i]=-1, anchor i is marked to be background .
(3) if match[x, i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_targets_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_targets_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
for anchors, gt_boxes, gt_class_targets, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch):
(cls_targets, cls_weights,
reg_targets, reg_weights, match) = target_assigner.assign(
anchors, gt_boxes, gt_class_targets, unmatched_class_label,
gt_weights)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
batch_match = tf.stack(match_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, batch_match)
# Assign an alias to avoid large refactor of existing users.
batch_assign_targets = batch_assign
def batch_get_targets(batch_match, groundtruth_tensor_list,
groundtruth_weights_list, unmatched_value,
unmatched_weight):
"""Returns targets based on anchor-groundtruth box matching results.
Args:
batch_match: An int32 tensor of shape [batch, num_anchors] containing the
result of target assignment returned by TargetAssigner.assign(..).
groundtruth_tensor_list: A list of groundtruth tensors of shape
[num_groundtruth, d_1, d_2, ..., d_k]. The tensors can be of any type.
groundtruth_weights_list: A list of weights, one per groundtruth tensor, of
shape [num_groundtruth].
unmatched_value: A tensor of shape [d_1, d_2, ..., d_k] of the same type as
groundtruth tensor containing target value for anchors that remain
unmatched.
unmatched_weight: Scalar weight to assign to anchors that remain unmatched.
Returns:
targets: A tensor of shape [batch, num_anchors, d_1, d_2, ..., d_k]
containing targets for anchors.
weights: A float tensor of shape [batch, num_anchors] containing the weights
to assign to each target.
"""
match_list = tf.unstack(batch_match)
targets_list = []
weights_list = []
for match_tensor, groundtruth_tensor, groundtruth_weight in zip(
match_list, groundtruth_tensor_list, groundtruth_weights_list):
match_object = mat.Match(match_tensor)
targets = match_object.gather_based_on_match(
groundtruth_tensor,
unmatched_value=unmatched_value,
ignored_value=unmatched_value)
targets_list.append(targets)
weights = match_object.gather_based_on_match(
groundtruth_weight,
unmatched_value=unmatched_weight,
ignored_value=tf.zeros_like(unmatched_weight))
weights_list.append(weights)
return tf.stack(targets_list), tf.stack(weights_list)
def batch_assign_confidences(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_confidences_batch,
gt_weights_batch=None,
unmatched_class_label=None,
include_background_class=True,
implicit_class_weight=1.0):
"""Batched assignment of classification and regression targets.
This differences between batch_assign_confidences and batch_assign_targets:
- 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and
tensor (high-dimensional) targets. 'batch_assign_confidences' only support
scalar (agnostic) and vector (multiclass) targets.
- 'batch_assign_targets' assumes the input class tensor using the binary
one/K-hot encoding. 'batch_assign_confidences' takes the class confidence
scores as the input, where 1 means positive classes, 0 means implicit
negative classes, and -1 means explicit negative classes.
- 'batch_assign_confidences' assigns the targets in the similar way as
'batch_assign_targets' except that it gives different weights for implicit
and explicit classes. This allows user to control the negative gradients
pushed differently for implicit and explicit examples during the training.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_confidences_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch. Note that in this tensor, 1 means explicit positive class,
-1 means explicit negative class, and 0 means implicit negative class.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_gt_boxes_i] containing weights for groundtruth boxes.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
include_background_class: whether or not gt_class_confidences_batch includes
the background class.
implicit_class_weight: the weight assigned to implicit examples.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match: an int32 tensor of shape [batch_size, num_anchors] containing result
of anchor groundtruth matching. Each position in the tensor indicates an
anchor and holds the following meaning:
(1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i].
(2) if match[x, i]=-1, anchor i is marked to be background .
(3) if match[x, i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList, or if any element in gt_class_confidences_batch has rank > 2.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_confidences_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_confidences_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_confidences_batch)
for anchors, gt_boxes, gt_class_confidences, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_confidences_batch,
gt_weights_batch):
if (gt_class_confidences is not None and
len(gt_class_confidences.get_shape().as_list()) > 2):
raise ValueError('The shape of the class target is not supported. ',
gt_class_confidences.get_shape())
cls_targets, _, reg_targets, _, match = target_assigner.assign(
anchors, gt_boxes, gt_class_confidences, unmatched_class_label,
groundtruth_weights=gt_weights)
if include_background_class:
cls_targets_without_background = tf.slice(
cls_targets, [0, 1], [-1, -1])
else:
cls_targets_without_background = cls_targets
positive_mask = tf.greater(cls_targets_without_background, 0.0)
negative_mask = tf.less(cls_targets_without_background, 0.0)
explicit_example_mask = tf.logical_or(positive_mask, negative_mask)
positive_anchors = tf.reduce_any(positive_mask, axis=-1)
regression_weights = tf.cast(positive_anchors, dtype=tf.float32)
regression_targets = (
reg_targets * tf.expand_dims(regression_weights, axis=-1))
regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1)
cls_targets_without_background = (
cls_targets_without_background *
(1 - tf.cast(negative_mask, dtype=tf.float32)))
cls_weights_without_background = ((1 - implicit_class_weight) * tf.cast(
explicit_example_mask, dtype=tf.float32) + implicit_class_weight)
if include_background_class:
cls_weights_background = (
(1 - implicit_class_weight) * regression_weights_expanded
+ implicit_class_weight)
classification_weights = tf.concat(
[cls_weights_background, cls_weights_without_background], axis=-1)
cls_targets_background = 1 - regression_weights_expanded
classification_targets = tf.concat(
[cls_targets_background, cls_targets_without_background], axis=-1)
else:
classification_targets = cls_targets_without_background
classification_weights = cls_weights_without_background
cls_targets_list.append(classification_targets)
cls_weights_list.append(classification_weights)
reg_targets_list.append(regression_targets)
reg_weights_list.append(regression_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
batch_match = tf.stack(match_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, batch_match)
def _smallest_positive_root(a, b, c):
"""Returns the smallest positive root of a quadratic equation."""
discriminant = tf.sqrt(b ** 2 - 4 * a * c)
# TODO(vighneshb) We are currently using the slightly incorrect
# CenterNet implementation. The commented lines implement the fixed version
# in https://github.com/princeton-vl/CornerNet. Change the implementation
# after verifying it has no negative impact.
# root1 = (-b - discriminant) / (2 * a)
# root2 = (-b + discriminant) / (2 * a)
# return tf.where(tf.less(root1, 0), root2, root1)
return (-b + discriminant) / (2.0)
def max_distance_for_overlap(height, width, min_iou):
"""Computes how far apart bbox corners can lie while maintaining the iou.
Given a bounding box size, this function returns a lower bound on how far
apart the corners of another box can lie while still maintaining the given
IoU. The implementation is based on the `gaussian_radius` function in the
Objects as Points github repo: https://github.com/xingyizhou/CenterNet
Args:
height: A 1-D float Tensor representing height of the ground truth boxes.
width: A 1-D float Tensor representing width of the ground truth boxes.
min_iou: A float representing the minimum IoU desired.
Returns:
distance: A 1-D Tensor of distances, of the same length as the input
height and width tensors.
"""
# Given that the detected box is displaced at a distance `d`, the exact
# IoU value will depend on the angle at which each corner is displaced.
# We simplify our computation by assuming that each corner is displaced by
# a distance `d` in both x and y direction. This gives us a lower IoU than
# what is actually realizable and ensures that any box with corners less
# than `d` distance apart will always have an IoU greater than or equal
# to `min_iou`
# The following 3 cases can be worked on geometrically and come down to
# solving a quadratic inequality. In each case, to ensure `min_iou` we use
# the smallest positive root of the equation.
# Case where detected box is offset from ground truth and no box completely
# contains the other.
distance_detection_offset = _smallest_positive_root(
a=1, b=-(height + width),
c=width * height * ((1 - min_iou) / (1 + min_iou))
)
# Case where detection is smaller than ground truth and completely contained
# in it.
distance_detection_in_gt = _smallest_positive_root(
a=4, b=-2 * (height + width),
c=(1 - min_iou) * width * height
)
# Case where ground truth is smaller than detection and completely contained
# in it.
distance_gt_in_detection = _smallest_positive_root(
a=4 * min_iou, b=(2 * min_iou) * (width + height),
c=(min_iou - 1) * width * height
)
return tf.reduce_min([distance_detection_offset,
distance_gt_in_detection,
distance_detection_in_gt], axis=0)
def get_batch_predictions_from_indices(batch_predictions, indices):
"""Gets the values of predictions in a batch at the given indices.
The indices are expected to come from the offset targets generation functions
in this library. The returned value is intended to be used inside a loss
function.
Args:
batch_predictions: A tensor of shape [batch_size, height, width, channels]
or [batch_size, height, width, class, channels] for class-specific
features (e.g. keypoint joint offsets).
indices: A tensor of shape [num_instances, 3] for single class features or
[num_instances, 4] for multiple classes features.
Returns:
values: A tensor of shape [num_instances, channels] holding the predicted
values at the given indices.
"""
# Note, gather_nd (and its gradient scatter_nd) runs significantly slower (on
# TPU) than gather with flattened inputs, so reshape the tensor, flatten the
# indices, and run gather.
shape = shape_utils.combined_static_and_dynamic_shape(batch_predictions)
# [B, H, W, C] -> [H*W, W, 1] or [B, H, W, N, C] -> [H*W*N, W*N, N, 1]
rev_cum_interior_indices = tf.reverse(tf.math.cumprod(shape[-2:0:-1]), [0])
rev_cum_interior_indices = tf.concat([rev_cum_interior_indices, [1]], axis=0)
# Compute flattened indices and gather.
flattened_inds = tf.linalg.matmul(
indices, rev_cum_interior_indices[:, tf.newaxis])[:, 0]
batch_predictions_2d = tf.reshape(batch_predictions, [-1, shape[-1]])
return tf.gather(batch_predictions_2d, flattened_inds, axis=0)
def _compute_std_dev_from_box_size(boxes_height, boxes_width, min_overlap):
"""Computes the standard deviation of the Gaussian kernel from box size.
Args:
boxes_height: A 1D tensor with shape [num_instances] representing the height
of each box.
boxes_width: A 1D tensor with shape [num_instances] representing the width
of each box.
min_overlap: The minimum IOU overlap that boxes need to have to not be
penalized.
Returns:
A 1D tensor with shape [num_instances] representing the computed Gaussian
sigma for each of the box.
"""
# We are dividing by 3 so that points closer than the computed
# distance have a >99% CDF.
sigma = max_distance_for_overlap(boxes_height, boxes_width, min_overlap)
sigma = (2 * tf.math.maximum(tf.math.floor(sigma), 0.0) + 1) / 6.0
return sigma
def _preprocess_keypoints_and_weights(out_height, out_width, keypoints,
class_onehot, class_weights,
keypoint_weights, class_id,
keypoint_indices):
"""Preprocesses the keypoints and the corresponding keypoint weights.
This function performs several common steps to preprocess the keypoints and
keypoint weights features, including:
1) Select the subset of keypoints based on the keypoint indices, fill the
keypoint NaN values with zeros and convert to absolute coordinates.
2) Generate the weights of the keypoint using the following information:
a. The class of the instance.
b. The NaN value of the keypoint coordinates.
c. The provided keypoint weights.
Args:
out_height: An integer or an integer tensor indicating the output height
of the model.
out_width: An integer or an integer tensor indicating the output width of
the model.
keypoints: A float tensor of shape [num_instances, num_total_keypoints, 2]
representing the original keypoint grountruth coordinates.
class_onehot: A float tensor of shape [num_instances, num_classes]
containing the class targets with the 0th index assumed to map to the
first non-background class.
class_weights: A float tensor of shape [num_instances] containing weights
for groundtruth instances.
keypoint_weights: A float tensor of shape
[num_instances, num_total_keypoints] representing the weights of each
keypoints.
class_id: int, the ID of the class (0-indexed) that contains the target
keypoints to consider in this task.
keypoint_indices: A list of integers representing the indices of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints that should be considered in this task.
Returns:
A tuple of two tensors:
keypoint_absolute: A float tensor of shape
[num_instances, num_keypoints, 2] which is the selected and updated
keypoint coordinates.
keypoint_weights: A float tensor of shape [num_instances, num_keypoints]
representing the updated weight of each keypoint.
"""
# Select the targets keypoints by their type ids and generate the mask
# of valid elements.
valid_mask, keypoints = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoints,
class_id=class_id,
class_onehot=class_onehot,
class_weights=class_weights,
keypoint_indices=keypoint_indices)
# Keypoint coordinates in absolute coordinate system.
# The shape of the tensors: [num_instances, num_keypoints, 2].
keypoints_absolute = keypoint_ops.to_absolute_coordinates(
keypoints, out_height, out_width)
# Assign default weights for the keypoints.
if keypoint_weights is None:
keypoint_weights = tf.ones_like(keypoints[:, :, 0])
else:
keypoint_weights = tf.gather(
keypoint_weights, indices=keypoint_indices, axis=1)
keypoint_weights = keypoint_weights * valid_mask
return keypoints_absolute, keypoint_weights
class CenterNetCenterHeatmapTargetAssigner(object):
"""Wrapper to compute the object center heatmap."""
def __init__(self,
stride,
min_overlap=0.7,
compute_heatmap_sparse=False,
keypoint_class_id=None,
keypoint_indices=None,
keypoint_weights_for_center=None):
"""Initializes the target assigner.
Args:
stride: int, the stride of the network in output pixels.
min_overlap: The minimum IOU overlap that boxes need to have to not be
penalized.
compute_heatmap_sparse: bool, indicating whether or not to use the sparse
version of the Op that computes the heatmap. The sparse version scales
better with number of classes, but in some cases is known to cause
OOM error. See (b/170989061).
keypoint_class_id: int, the ID of the class (0-indexed) that contains the
target keypoints to consider in this task.
keypoint_indices: A list of integers representing the indices of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
keypoint_weights_for_center: The keypoint weights used for calculating the
location of object center. The number of weights need to be the same as
the number of keypoints. The object center is calculated by the weighted
mean of the keypoint locations. If not provided, the object center is
determined by the center of the bounding box (default behavior).
"""
self._stride = stride
self._min_overlap = min_overlap
self._compute_heatmap_sparse = compute_heatmap_sparse
self._keypoint_class_id = keypoint_class_id
self._keypoint_indices = keypoint_indices
self._keypoint_weights_for_center = keypoint_weights_for_center
def assign_center_targets_from_boxes(self,
height,
width,
gt_boxes_list,
gt_classes_list,
gt_weights_list=None):
"""Computes the object center heatmap target.
Args:
height: int, height of input to the model. This is used to
determine the height of the output.
width: int, width of the input to the model. This is used to
determine the width of the output.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The box coordinates are expected in normalized coordinates.
gt_classes_list: A list of float tensors with shape [num_boxes,
num_classes] representing the one-hot encoded class labels for each box
in the gt_boxes_list.
gt_weights_list: A list of float tensors with shape [num_boxes]
representing the weight of each groundtruth detection box.
Returns:
heatmap: A Tensor of size [batch_size, output_height, output_width,
num_classes] representing the per class center heatmap. output_height
and output_width are computed by dividing the input height and width by
the stride specified during initialization.
"""
out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32)
out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32)
# Compute the yx-grid to be used to generate the heatmap. Each returned
# tensor has shape of [out_height, out_width]
(y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width)
heatmaps = []
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
# TODO(vighneshb) Replace the for loop with a batch version.
for boxes, class_targets, weights in zip(gt_boxes_list, gt_classes_list,
gt_weights_list):
boxes = box_list.BoxList(boxes)
# Convert the box coordinates to absolute output image dimension space.
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box center coordinates. Each returned tensors have the shape of
# [num_instances]
(y_center, x_center, boxes_height,
boxes_width) = boxes.get_center_coordinates_and_sizes()
# Compute the sigma from box size. The tensor shape: [num_instances].
sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width,
self._min_overlap)
# Apply the Gaussian kernel to the center coordinates. Returned heatmap
# has shape of [out_height, out_width, num_classes]
heatmap = ta_utils.coordinates_to_heatmap(
y_grid=y_grid,
x_grid=x_grid,
y_coordinates=y_center,
x_coordinates=x_center,
sigma=sigma,
channel_onehot=class_targets,
channel_weights=weights,
sparse=self._compute_heatmap_sparse)
heatmaps.append(heatmap)
# Return the stacked heatmaps over the batch.
return tf.stack(heatmaps, axis=0)
def assign_center_targets_from_keypoints(self,
height,
width,
gt_classes_list,
gt_keypoints_list,
gt_weights_list=None,
gt_keypoints_weights_list=None):
"""Computes the object center heatmap target using keypoint locations.
Args:
height: int, height of input to the model. This is used to
determine the height of the output.
width: int, width of the input to the model. This is used to
determine the width of the output.
gt_classes_list: A list of float tensors with shape [num_boxes,
num_classes] representing the one-hot encoded class labels for each box
in the gt_boxes_list.
gt_keypoints_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The box coordinates are expected in normalized coordinates.
gt_weights_list: A list of float tensors with shape [num_boxes]
representing the weight of each groundtruth detection box.
gt_keypoints_weights_list: [Optional] a list of 3D tf.float32 tensors of
shape [num_instances, num_total_keypoints] representing the weights of
each keypoints. If not provided, then all not NaN keypoints will be
equally weighted.
Returns:
heatmap: A Tensor of size [batch_size, output_height, output_width,
num_classes] representing the per class center heatmap. output_height
and output_width are computed by dividing the input height and width by
the stride specified during initialization.
"""
assert (self._keypoint_weights_for_center is not None and
self._keypoint_class_id is not None and
self._keypoint_indices is not None)
out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32)
out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32)
# Compute the yx-grid to be used to generate the heatmap. Each returned
# tensor has shape of [out_height, out_width]
(y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width)
heatmaps = []
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
for keypoints, classes, kp_weights, weights in zip(
gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,
gt_weights_list):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=out_height,
out_width=out_width,
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._keypoint_class_id,
keypoint_indices=self._keypoint_indices)
# _, num_keypoints, _ = (
# shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# Update the keypoint weights by the specified keypoints weights.
kp_loc_weights = tf.constant(
self._keypoint_weights_for_center, dtype=tf.float32)
updated_kp_weights = kp_weights * kp_loc_weights[tf.newaxis, :]
# Obtain the sum of the weights for each instance.
# instance_weight_sum has shape: [num_instance].
instance_weight_sum = tf.reduce_sum(updated_kp_weights, axis=1)
# Weight the keypoint coordinates by updated_kp_weights.
# weighted_keypoints has shape: [num_instance, num_keypoints, 2]
weighted_keypoints = keypoints_absolute * tf.expand_dims(
updated_kp_weights, axis=2)
# Compute the mean of the keypoint coordinates over the weighted
# keypoints.
# keypoint_mean has shape: [num_instance, 2]
keypoint_mean = tf.math.divide(
tf.reduce_sum(weighted_keypoints, axis=1),
tf.expand_dims(instance_weight_sum, axis=-1))
# Replace the NaN values (due to divided by zeros in the above operation)
# by 0.0 where the sum of instance weight is zero.
# keypoint_mean has shape: [num_instance, 2]
keypoint_mean = tf.where(
tf.stack([instance_weight_sum, instance_weight_sum], axis=1) > 0.0,
keypoint_mean, tf.zeros_like(keypoint_mean))
# Compute the distance from each keypoint to the mean location using
# broadcasting and weighted by updated_kp_weights.
# keypoint_dist has shape: [num_instance, num_keypoints]
keypoint_mean = tf.expand_dims(keypoint_mean, axis=1)
keypoint_dist = tf.math.sqrt(
tf.reduce_sum(
tf.math.square(keypoints_absolute - keypoint_mean), axis=2))
keypoint_dist = keypoint_dist * updated_kp_weights
# Compute the average of the distances from each keypoint to the mean
# location and update the average value by zero when the instance weight
# is zero.
# avg_radius has shape: [num_instance]
avg_radius = tf.math.divide(
tf.reduce_sum(keypoint_dist, axis=1), instance_weight_sum)
avg_radius = tf.where(
instance_weight_sum > 0.0, avg_radius, tf.zeros_like(avg_radius))
# Update the class instance weight. If the instance doesn't contain enough
# valid keypoint values (i.e. instance_weight_sum == 0.0), then set the
# instance weight to zero.
# updated_class_weights has shape: [num_instance]
updated_class_weights = tf.where(
instance_weight_sum > 0.0, weights, tf.zeros_like(weights))
# Compute the sigma from average distance. We use 2 * average distance to
# to approximate the width/height of the bounding box.
# sigma has shape: [num_instances].
sigma = _compute_std_dev_from_box_size(2 * avg_radius, 2 * avg_radius,
self._min_overlap)
# Apply the Gaussian kernel to the center coordinates. Returned heatmap
# has shape of [out_height, out_width, num_classes]
heatmap = ta_utils.coordinates_to_heatmap(
y_grid=y_grid,
x_grid=x_grid,
y_coordinates=keypoint_mean[:, 0, 0],
x_coordinates=keypoint_mean[:, 0, 1],
sigma=sigma,
channel_onehot=classes,
channel_weights=updated_class_weights,
sparse=self._compute_heatmap_sparse)
heatmaps.append(heatmap)
# Return the stacked heatmaps over the batch.
return tf.stack(heatmaps, axis=0)
class CenterNetBoxTargetAssigner(object):
"""Wrapper to compute target tensors for the object detection task.
This class has methods that take as input a batch of ground truth tensors
(in the form of a list) and return the targets required to train the object
detection task.
"""
def __init__(self, stride):
"""Initializes the target assigner.
Args:
stride: int, the stride of the network in output pixels.
"""
self._stride = stride
def assign_size_and_offset_targets(self,
height,
width,
gt_boxes_list,
gt_weights_list=None):
"""Returns the box height/width and center offset targets and their indices.
The returned values are expected to be used with predicted tensors
of size (batch_size, height//self._stride, width//self._stride, 2). The
predicted values at the relevant indices can be retrieved with the
get_batch_predictions_from_indices function.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_weights_list: A list of tensors with shape [num_boxes] corresponding to
the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [num_boxes, 3] holding the
indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively.
batch_box_height_width: a float tensor of shape [num_boxes, 2] holding
expected height and width of each box in the output space.
batch_offsets: a float tensor of shape [num_boxes, 2] holding the
expected y and x offset of each box in the output space.
batch_weights: a float tensor of shape [num_boxes] indicating the
weight of each prediction.
"""
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
batch_indices = []
batch_box_height_width = []
batch_weights = []
batch_offsets = []
for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)):
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box center coordinates. Each returned tensors have the shape of
# [num_boxes]
(y_center, x_center, boxes_height,
boxes_width) = boxes.get_center_coordinates_and_sizes()
num_boxes = tf.shape(x_center)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_boxes, 2]
# indices: [num_boxes, 2]
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_center, x_source=x_center)
# Assign ones if weights are not provided.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
# Shape of [num_boxes, 1] integer tensor filled with current batch index.
batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_box_height_width.append(
tf.stack([boxes_height, boxes_width], axis=1))
batch_weights.append(weights)
batch_offsets.append(offsets)
batch_indices = tf.concat(batch_indices, axis=0)
batch_box_height_width = tf.concat(batch_box_height_width, axis=0)
batch_weights = tf.concat(batch_weights, axis=0)
batch_offsets = tf.concat(batch_offsets, axis=0)
return (batch_indices, batch_box_height_width, batch_offsets, batch_weights)
# TODO(yuhuic): Update this class to handle the instance/keypoint weights.
# Currently those weights are used as "mask" to indicate whether an
# instance/keypoint should be considered or not (expecting only either 0 or 1
# value). In reality, the weights can be any value and this class should handle
# those values properly.
class CenterNetKeypointTargetAssigner(object):
"""Wrapper to compute target tensors for the CenterNet keypoint estimation.
This class has methods that take as input a batch of groundtruth tensors
(in the form of a list) and returns the targets required to train the
CenterNet model for keypoint estimation. Specifically, the class methods
expect the groundtruth in the following formats (consistent with the
standard Object Detection API). Note that usually the groundtruth tensors are
packed with a list which represents the batch dimension:
gt_classes_list: [Required] a list of 2D tf.float32 one-hot
(or k-hot) tensors of shape [num_instances, num_classes] containing the
class targets with the 0th index assumed to map to the first non-background
class.
gt_keypoints_list: [Required] a list of 3D tf.float32 tensors of
shape [num_instances, num_total_keypoints, 2] containing keypoint
coordinates. Note that the "num_total_keypoints" should be the sum of the
num_keypoints over all possible keypoint types, e.g. human pose, face.
For example, if a dataset contains both 17 human pose keypoints and 5 face
keypoints, then num_total_keypoints = 17 + 5 = 22.
If an intance contains only a subet of keypoints (e.g. human pose keypoints
but not face keypoints), the face keypoints will be filled with zeros.
Also note that keypoints are assumed to be provided in normalized
coordinates and missing keypoints should be encoded as NaN.
gt_keypoints_weights_list: [Optional] a list 3D tf.float32 tensors of shape
[num_instances, num_total_keypoints] representing the weights of each
keypoints. If not provided, then all not NaN keypoints will be equally
weighted.
gt_boxes_list: [Optional] a list of 2D tf.float32 tensors of shape
[num_instances, 4] containing coordinates of the groundtruth boxes.
Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and
assumed to be normalized and clipped relative to the image window with
y_min <= y_max and x_min <= x_max.
Note that the boxes are only used to compute the center targets but are not
considered as required output of the keypoint task. If the boxes were not
provided, the center targets will be inferred from the keypoints
[not implemented yet].
gt_weights_list: [Optional] A list of 1D tf.float32 tensors of shape
[num_instances] containing weights for groundtruth boxes. Only useful when
gt_boxes_list is also provided.
"""
def __init__(self,
stride,
class_id,
keypoint_indices,
keypoint_std_dev=None,
per_keypoint_offset=False,
peak_radius=0,
compute_heatmap_sparse=False,
per_keypoint_depth=False):
"""Initializes a CenterNet keypoints target assigner.
Args:
stride: int, the stride of the network in output pixels.
class_id: int, the ID of the class (0-indexed) that contains the target
keypoints to consider in this task. For example, if the task is human
pose estimation, the class id should correspond to the "human" class.
keypoint_indices: A list of integers representing the indices of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
keypoint_std_dev: A list of floats represent the standard deviation of the
Gaussian kernel used to generate the keypoint heatmap (in the unit of
output pixels). It is to provide the flexibility of using different
sizes of Gaussian kernel for each keypoint type. If not provided, then
all standard deviation will be the same as the default value (10.0 in
the output pixel space). If provided, the length of keypoint_std_dev
needs to be the same as the length of keypoint_indices, indicating the
standard deviation of each keypoint type.
per_keypoint_offset: boolean, indicating whether to assign offset for
each keypoint channel. If set False, the output offset target will have
the shape [batch_size, out_height, out_width, 2]. If set True, the
output offset target will have the shape [batch_size, out_height,
out_width, 2 * num_keypoints].
peak_radius: int, the radius (in the unit of output pixel) around heatmap
peak to assign the offset targets.
compute_heatmap_sparse: bool, indicating whether or not to use the sparse
version of the Op that computes the heatmap. The sparse version scales
better with number of keypoint types, but in some cases is known to
cause an OOM error. See (b/170989061).
per_keypoint_depth: A bool indicates whether the model predicts the depth
of each keypoints in independent channels. Similar to
per_keypoint_offset but for the keypoint depth.
"""
self._stride = stride
self._class_id = class_id
self._keypoint_indices = keypoint_indices
self._per_keypoint_offset = per_keypoint_offset
self._per_keypoint_depth = per_keypoint_depth
self._peak_radius = peak_radius
self._compute_heatmap_sparse = compute_heatmap_sparse
if keypoint_std_dev is None:
self._keypoint_std_dev = ([_DEFAULT_KEYPOINT_OFFSET_STD_DEV] *
len(keypoint_indices))
else:
assert len(keypoint_indices) == len(keypoint_std_dev)
self._keypoint_std_dev = keypoint_std_dev
def assign_keypoint_heatmap_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_keypoints_weights_list=None,
gt_weights_list=None,
gt_boxes_list=None):
"""Returns the keypoint heatmap targets for the CenterNet model.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of float tensors with shape [num_instances,
num_total_keypoints, 2]. See class-level description for more detail.
gt_classes_list: A list of float tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_keypoints_weights_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the weight of each keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See
class-level description for more detail. If provided, the keypoint
standard deviations will be scaled based on the box sizes.
Returns:
heatmap: A float tensor of shape [batch_size, output_height, output_width,
num_keypoints] representing the per keypoint type center heatmap.
output_height and output_width are computed by dividing the input height
and width by the stride specified during initialization. Note that the
"num_keypoints" is defined by the length of keypoint_indices, which is
not necessarily equal to "num_total_keypoints".
num_instances_batch: A 2D int tensor of shape
[batch_size, num_keypoints] representing number of instances for each
keypoint type.
valid_mask: A float tensor with shape [batch_size, output_height,
output_width, num_keypoints] where all values within the regions of the
blackout boxes are 0.0 and 1.0 else where. Note that the blackout boxes
are per keypoint type and are blacked out if the keypoint
visibility/weight (of the corresponding keypoint type) is zero.
"""
out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32)
out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32)
# Compute the yx-grid to be used to generate the heatmap. Each returned
# tensor has shape of [out_height, out_width]
y_grid, x_grid = ta_utils.image_shape_to_grids(out_height, out_width)
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
if gt_boxes_list is None:
gt_boxes_list = [None] * len(gt_keypoints_list)
heatmaps = []
num_instances_list = []
valid_mask_list = []
for keypoints, classes, kp_weights, weights, boxes in zip(
gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,
gt_weights_list, gt_boxes_list):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=out_height,
out_width=out_width,
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# A tensor of shape [num_instances, num_keypoints] with
# each element representing the type dimension for each corresponding
# keypoint:
# [[0, 1, ..., k-1],
# [0, 1, ..., k-1],
# :
# [0, 1, ..., k-1]]
keypoint_types = tf.tile(
input=tf.expand_dims(tf.range(num_keypoints), axis=0),
multiples=[num_instances, 1])
# A tensor of shape [num_instances, num_keypoints] with
# each element representing the sigma of the Gaussian kernel for each
# keypoint.
keypoint_std_dev = tf.tile(
input=tf.expand_dims(tf.constant(self._keypoint_std_dev), axis=0),
multiples=[num_instances, 1])
# If boxes is not None, then scale the standard deviation based on the
# size of the object bounding boxes similar to object center heatmap.
if boxes is not None:
boxes = box_list.BoxList(boxes)
# Convert the box coordinates to absolute output image dimension space.
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box height and width. Each returned tensors have the shape
# of [num_instances]
(_, _, boxes_height,
boxes_width) = boxes.get_center_coordinates_and_sizes()
# Compute the sigma from box size. The tensor shape: [num_instances].
sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, 0.7)
keypoint_std_dev = keypoint_std_dev * tf.stack(
[sigma] * num_keypoints, axis=1)
# Generate the per-keypoint type valid region mask to ignore regions
# with keypoint weights equal to zeros (e.g. visibility is 0).
# shape of valid_mask: [out_height, out_width, num_keypoints]
kp_weight_list = tf.unstack(kp_weights, axis=1)
valid_mask_channel_list = []
for kp_weight in kp_weight_list:
blackout = kp_weight < 1e-3
valid_mask_channel_list.append(
ta_utils.blackout_pixel_weights_by_box_regions(
out_height, out_width, boxes.get(), blackout))
valid_mask = tf.stack(valid_mask_channel_list, axis=2)
valid_mask_list.append(valid_mask)
# Apply the Gaussian kernel to the keypoint coordinates. Returned heatmap
# has shape of [out_height, out_width, num_keypoints].
heatmap = ta_utils.coordinates_to_heatmap(
y_grid=y_grid,
x_grid=x_grid,
y_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]),
x_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]),
sigma=tf.keras.backend.flatten(keypoint_std_dev),
channel_onehot=tf.one_hot(
tf.keras.backend.flatten(keypoint_types), depth=num_keypoints),
channel_weights=tf.keras.backend.flatten(kp_weights))
num_instances_list.append(
tf.cast(tf.reduce_sum(kp_weights, axis=0), dtype=tf.int32))
heatmaps.append(heatmap)
return (tf.stack(heatmaps, axis=0), tf.stack(num_instances_list, axis=0),
tf.stack(valid_mask_list, axis=0))
def _get_keypoint_types(self, num_instances, num_keypoints, num_neighbors):
"""Gets keypoint type index tensor.
The function prepares the tensor of keypoint indices with shape
[num_instances, num_keypoints, num_neighbors]. Each element represents the
keypoint type index for each corresponding keypoint and tiled along the 3rd
axis:
[[0, 1, ..., num_keypoints - 1],
[0, 1, ..., num_keypoints - 1],
:
[0, 1, ..., num_keypoints - 1]]
Args:
num_instances: int, the number of instances, used to define the 1st
dimension.
num_keypoints: int, the number of keypoint types, used to define the 2nd
dimension.
num_neighbors: int, the number of neighborhood pixels to consider for each
keypoint, used to define the 3rd dimension.
Returns:
A integer tensor of shape [num_instances, num_keypoints, num_neighbors].
"""
keypoint_types = tf.range(num_keypoints)[tf.newaxis, :, tf.newaxis]
tiled_keypoint_types = tf.tile(keypoint_types,
multiples=[num_instances, 1, num_neighbors])
return tiled_keypoint_types
def assign_keypoints_offset_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_keypoints_weights_list=None,
gt_weights_list=None):
"""Returns the offsets and indices of the keypoints for location refinement.
The returned values are used to refine the location of each keypoints in the
heatmap. The predicted values at the relevant indices can be retrieved with
the get_batch_predictions_from_indices function.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of tensors with shape [num_instances,
num_total_keypoints]. See class-level description for more detail.
gt_classes_list: A list of tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_keypoints_weights_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the weight of each keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
Returns:
batch_indices: an integer tensor of shape [num_total_instances, 3] (or
[num_total_instances, 4] if 'per_keypoint_offset' is set True) holding
the indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively. The fourth column corresponds to the channel
dimension (if 'per_keypoint_offset' is set True).
batch_offsets: a float tensor of shape [num_total_instances, 2] holding
the expected y and x offset of each box in the output space.
batch_weights: a float tensor of shape [num_total_instances] indicating
the weight of each prediction.
Note that num_total_instances = batch_size * num_instances *
num_keypoints * num_neighbors
"""
batch_indices = []
batch_offsets = []
batch_weights = []
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
for i, (keypoints, classes, kp_weights, weights) in enumerate(
zip(gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,
gt_weights_list)):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=tf.maximum(height // self._stride, 1),
out_width=tf.maximum(width // self._stride, 1),
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# [num_instances * num_keypoints]
y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0])
x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1])
# All keypoint coordinates and their neighbors:
# [num_instance * num_keypoints, num_neighbors]
(y_source_neighbors, x_source_neighbors,
valid_sources) = ta_utils.get_surrounding_grids(
tf.cast(tf.maximum(height // self._stride, 1), tf.float32),
tf.cast(tf.maximum(width // self._stride, 1), tf.float32),
y_source, x_source,
self._peak_radius)
_, num_neighbors = shape_utils.combined_static_and_dynamic_shape(
y_source_neighbors)
# Update the valid keypoint weights.
# [num_instance * num_keypoints, num_neighbors]
valid_keypoints = tf.cast(
valid_sources, dtype=tf.float32) * tf.stack(
[tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_instances * num_keypoints, num_neighbors, 2]
# indices: [num_instances * num_keypoints, num_neighbors, 2]
offsets, indices = ta_utils.compute_floor_offsets_with_indices(
y_source=y_source_neighbors,
x_source=x_source_neighbors,
y_target=y_source,
x_target=x_source)
# Reshape to:
# offsets: [num_instances * num_keypoints * num_neighbors, 2]
# indices: [num_instances * num_keypoints * num_neighbors, 2]
offsets = tf.reshape(offsets, [-1, 2])
indices = tf.reshape(indices, [-1, 2])
# Prepare the batch indices to be prepended.
batch_index = tf.fill(
[num_instances * num_keypoints * num_neighbors, 1], i)
if self._per_keypoint_offset:
tiled_keypoint_types = self._get_keypoint_types(
num_instances, num_keypoints, num_neighbors)
batch_indices.append(
tf.concat([batch_index, indices,
tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))
else:
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_offsets.append(offsets)
batch_weights.append(tf.keras.backend.flatten(valid_keypoints))
# Concatenate the tensors in the batch in the first dimension:
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or
# [batch_size * num_instances * num_keypoints * num_neighbors, 4] if
# 'per_keypoint_offset' is set to True.
batch_indices = tf.concat(batch_indices, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors]
batch_weights = tf.concat(batch_weights, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 2]
batch_offsets = tf.concat(batch_offsets, axis=0)
return (batch_indices, batch_offsets, batch_weights)
def assign_keypoints_depth_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_keypoint_depths_list,
gt_keypoint_depth_weights_list,
gt_keypoints_weights_list=None,
gt_weights_list=None):
"""Returns the target depths of the keypoints.
The returned values are the relative depth information of each keypoints.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of tensors with shape [num_instances,
num_total_keypoints, 2]. See class-level description for more detail.
gt_classes_list: A list of tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_keypoint_depths_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the relative depth of the
keypoints.
gt_keypoint_depth_weights_list: A list of tensors with shape
[num_instances, num_total_keypoints] corresponding to the weights of
the relative depth.
gt_keypoints_weights_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the weight of each keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
Returns:
batch_indices: an integer tensor of shape [num_total_instances, 3] (or
[num_total_instances, 4] if 'per_keypoint_depth' is set True) holding
the indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively. The fourth column corresponds to the channel
dimension (if 'per_keypoint_offset' is set True).
batch_depths: a float tensor of shape [num_total_instances, 1] (or
[num_total_instances, num_keypoints] if per_keypoint_depth is set True)
indicating the target depth of each keypoint.
batch_weights: a float tensor of shape [num_total_instances] indicating
the weight of each prediction.
Note that num_total_instances = batch_size * num_instances *
num_keypoints * num_neighbors
"""
batch_indices = []
batch_weights = []
batch_depths = []
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
if gt_keypoint_depths_list is None:
gt_keypoint_depths_list = [None] * len(gt_classes_list)
for i, (keypoints, classes, kp_weights, weights,
keypoint_depths, keypoint_depth_weights) in enumerate(
zip(gt_keypoints_list, gt_classes_list,
gt_keypoints_weights_list, gt_weights_list,
gt_keypoint_depths_list, gt_keypoint_depth_weights_list)):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=tf.maximum(height // self._stride, 1),
out_width=tf.maximum(width // self._stride, 1),
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# [num_instances * num_keypoints]
y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0])
x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1])
# All keypoint coordinates and their neighbors:
# [num_instance * num_keypoints, num_neighbors]
(y_source_neighbors, x_source_neighbors,
valid_sources) = ta_utils.get_surrounding_grids(
tf.cast(tf.maximum(height // self._stride, 1), tf.float32),
tf.cast(tf.maximum(width // self._stride, 1), tf.float32),
y_source, x_source,
self._peak_radius)
_, num_neighbors = shape_utils.combined_static_and_dynamic_shape(
y_source_neighbors)
# Update the valid keypoint weights.
# [num_instance * num_keypoints, num_neighbors]
valid_keypoints = tf.cast(
valid_sources, dtype=tf.float32) * tf.stack(
[tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)
# Compute the offsets and indices of the box centers. Shape:
# indices: [num_instances * num_keypoints, num_neighbors, 2]
_, indices = ta_utils.compute_floor_offsets_with_indices(
y_source=y_source_neighbors,
x_source=x_source_neighbors,
y_target=y_source,
x_target=x_source)
# Reshape to:
# indices: [num_instances * num_keypoints * num_neighbors, 2]
indices = tf.reshape(indices, [-1, 2])
# Gather the keypoint depth from corresponding keypoint indices:
# [num_instances, num_keypoints]
keypoint_depths = tf.gather(
keypoint_depths, self._keypoint_indices, axis=1)
# Tile the depth target to surrounding pixels.
# [num_instances, num_keypoints, num_neighbors]
tiled_keypoint_depths = tf.tile(
tf.expand_dims(keypoint_depths, axis=-1),
multiples=[1, 1, num_neighbors])
# [num_instances, num_keypoints]
keypoint_depth_weights = tf.gather(
keypoint_depth_weights, self._keypoint_indices, axis=1)
# [num_instances, num_keypoints, num_neighbors]
keypoint_depth_weights = tf.tile(
tf.expand_dims(keypoint_depth_weights, axis=-1),
multiples=[1, 1, num_neighbors])
# Update the weights of keypoint depth by the weights of the keypoints.
# A keypoint depth target is valid only if its corresponding keypoint
# target is also valid.
# [num_instances, num_keypoints, num_neighbors]
tiled_depth_weights = (
tf.reshape(valid_keypoints,
[num_instances, num_keypoints, num_neighbors]) *
keypoint_depth_weights)
invalid_depths = tf.logical_or(
tf.math.is_nan(tiled_depth_weights),
tf.math.is_nan(tiled_keypoint_depths))
# Assign zero values and weights to NaN values.
final_keypoint_depths = tf.where(invalid_depths,
tf.zeros_like(tiled_keypoint_depths),
tiled_keypoint_depths)
final_keypoint_depth_weights = tf.where(
invalid_depths,
tf.zeros_like(tiled_depth_weights),
tiled_depth_weights)
# [num_instances * num_keypoints * num_neighbors, 1]
batch_depths.append(tf.reshape(final_keypoint_depths, [-1, 1]))
# Prepare the batch indices to be prepended.
batch_index = tf.fill(
[num_instances * num_keypoints * num_neighbors, 1], i)
if self._per_keypoint_depth:
tiled_keypoint_types = self._get_keypoint_types(
num_instances, num_keypoints, num_neighbors)
batch_indices.append(
tf.concat([batch_index, indices,
tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))
else:
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_weights.append(
tf.keras.backend.flatten(final_keypoint_depth_weights))
# Concatenate the tensors in the batch in the first dimension:
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or
# [batch_size * num_instances * num_keypoints * num_neighbors, 4] if
# 'per_keypoint_offset' is set to True.
batch_indices = tf.concat(batch_indices, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors]
batch_weights = tf.concat(batch_weights, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 1]
batch_depths = tf.concat(batch_depths, axis=0)
return (batch_indices, batch_depths, batch_weights)
def assign_joint_regression_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_boxes_list=None,
gt_keypoints_weights_list=None,
gt_weights_list=None):
"""Returns the joint regression from center grid to keypoints.
The joint regression is used as the grouping cue from the estimated
keypoints to instance center. The offsets are the vectors from the floored
object center coordinates to the keypoint coordinates.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of float tensors with shape [num_instances,
num_total_keypoints]. See class-level description for more detail.
gt_classes_list: A list of float tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See
class-level description for more detail. If provided, then the center
targets will be computed based on the center of the boxes.
gt_keypoints_weights_list: A list of float tensors with shape
[num_instances, num_total_keypoints] representing to the weight of each
keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
Returns:
batch_indices: an integer tensor of shape [num_instances, 4] holding the
indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively, the last dimension refers to the keypoint type
dimension.
batch_offsets: a float tensor of shape [num_instances, 2] holding the
expected y and x offset of each box in the output space.
batch_weights: a float tensor of shape [num_instances] indicating the
weight of each prediction.
Note that num_total_instances = batch_size * num_instances * num_keypoints
Raises:
NotImplementedError: currently the object center coordinates need to be
computed from groundtruth bounding boxes. The functionality of
generating the object center coordinates from keypoints is not
implemented yet.
"""
batch_indices = []
batch_offsets = []
batch_weights = []
batch_size = len(gt_keypoints_list)
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * batch_size
if gt_boxes_list is None:
gt_boxes_list = [None] * batch_size
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
for i, (keypoints, classes, boxes, kp_weights, weights) in enumerate(
zip(gt_keypoints_list, gt_classes_list,
gt_boxes_list, gt_keypoints_weights_list, gt_weights_list)):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=tf.maximum(height // self._stride, 1),
out_width=tf.maximum(width // self._stride, 1),
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# If boxes are provided, compute the joint center from it.
if boxes is not None:
# Compute joint center from boxes.
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
y_center, x_center, _, _ = boxes.get_center_coordinates_and_sizes()
else:
# TODO(yuhuic): Add the logic to generate object centers from keypoints.
raise NotImplementedError((
'The functionality of generating object centers from keypoints is'
' not implemented yet. Please provide groundtruth bounding boxes.'
))
# Tile the yx center coordinates to be the same shape as keypoints.
y_center_tiled = tf.tile(
tf.reshape(y_center, shape=[num_instances, 1]),
multiples=[1, num_keypoints])
x_center_tiled = tf.tile(
tf.reshape(x_center, shape=[num_instances, 1]),
multiples=[1, num_keypoints])
# [num_instance * num_keypoints, num_neighbors]
(y_source_neighbors, x_source_neighbors,
valid_sources) = ta_utils.get_surrounding_grids(
tf.cast(tf.maximum(height // self._stride, 1), tf.float32),
tf.cast(tf.maximum(width // self._stride, 1), tf.float32),
tf.keras.backend.flatten(y_center_tiled),
tf.keras.backend.flatten(x_center_tiled), self._peak_radius)
_, num_neighbors = shape_utils.combined_static_and_dynamic_shape(
y_source_neighbors)
valid_keypoints = tf.cast(
valid_sources, dtype=tf.float32) * tf.stack(
[tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_instances * num_keypoints, 2]
# indices: [num_instances * num_keypoints, 2]
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_source_neighbors,
x_source=x_source_neighbors,
y_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]),
x_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]))
# Reshape to:
# offsets: [num_instances * num_keypoints * num_neighbors, 2]
# indices: [num_instances * num_keypoints * num_neighbors, 2]
offsets = tf.reshape(offsets, [-1, 2])
indices = tf.reshape(indices, [-1, 2])
# keypoint type tensor: [num_instances, num_keypoints, num_neighbors].
tiled_keypoint_types = self._get_keypoint_types(
num_instances, num_keypoints, num_neighbors)
batch_index = tf.fill(
[num_instances * num_keypoints * num_neighbors, 1], i)
batch_indices.append(
tf.concat([batch_index, indices,
tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))
batch_offsets.append(offsets)
batch_weights.append(tf.keras.backend.flatten(valid_keypoints))
# Concatenate the tensors in the batch in the first dimension:
# shape: [batch_size * num_instances * num_keypoints, 4]
batch_indices = tf.concat(batch_indices, axis=0)
# shape: [batch_size * num_instances * num_keypoints]
batch_weights = tf.concat(batch_weights, axis=0)
# shape: [batch_size * num_instances * num_keypoints, 2]
batch_offsets = tf.concat(batch_offsets, axis=0)
return (batch_indices, batch_offsets, batch_weights)
def _resize_masks(masks, height, width, method):
# Resize segmentation masks to conform to output dimensions. Use TF2
# image resize because TF1's version is buggy:
# https://yaqs.corp.google.com/eng/q/4970450458378240
masks = tf2.image.resize(
masks[:, :, :, tf.newaxis],
size=(height, width),
method=method)
return masks[:, :, :, 0]
class CenterNetMaskTargetAssigner(object):
"""Wrapper to compute targets for segmentation masks."""
def __init__(self, stride):
self._stride = stride
def assign_segmentation_targets(
self, gt_masks_list, gt_classes_list, gt_boxes_list=None,
gt_mask_weights_list=None, mask_resize_method=ResizeMethod.BILINEAR):
"""Computes the segmentation targets.
This utility produces a semantic segmentation mask for each class, starting
with whole image instance segmentation masks. Effectively, each per-class
segmentation target is the union of all masks from that class.
Args:
gt_masks_list: A list of float tensors with shape [num_boxes,
input_height, input_width] with values in {0, 1} representing instance
masks for each object.
gt_classes_list: A list of float tensors with shape [num_boxes,
num_classes] representing the one-hot encoded class labels for each box
in the gt_boxes_list.
gt_boxes_list: An optional list of float tensors with shape [num_boxes, 4]
with normalized boxes corresponding to each mask. The boxes are used to
spatially allocate mask weights.
gt_mask_weights_list: An optional list of float tensors with shape
[num_boxes] with weights for each mask. If a mask has a zero weight, it
indicates that the box region associated with the mask should not
contribute to the loss. If not provided, will use a per-pixel weight of
1.
mask_resize_method: A `tf.compat.v2.image.ResizeMethod`. The method to use
when resizing masks from input resolution to output resolution.
Returns:
segmentation_targets: An int32 tensor of size [batch_size, output_height,
output_width, num_classes] representing the class of each location in
the output space.
segmentation_weight: A float32 tensor of size [batch_size, output_height,
output_width] indicating the loss weight to apply at each location.
"""
_, num_classes = shape_utils.combined_static_and_dynamic_shape(
gt_classes_list[0])
_, input_height, input_width = (
shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0]))
output_height = tf.maximum(input_height // self._stride, 1)
output_width = tf.maximum(input_width // self._stride, 1)
if gt_boxes_list is None:
gt_boxes_list = [None] * len(gt_masks_list)
if gt_mask_weights_list is None:
gt_mask_weights_list = [None] * len(gt_masks_list)
segmentation_targets_list = []
segmentation_weights_list = []
for gt_boxes, gt_masks, gt_mask_weights, gt_classes in zip(
gt_boxes_list, gt_masks_list, gt_mask_weights_list, gt_classes_list):
if gt_boxes is not None and gt_mask_weights is not None:
boxes = box_list.BoxList(gt_boxes)
# Convert the box coordinates to absolute output image dimension space.
boxes_absolute = box_list_ops.to_absolute_coordinates(
boxes, output_height, output_width)
# Generate a segmentation weight that applies mask weights in object
# regions.
blackout = gt_mask_weights <= 0
segmentation_weight_for_image = (
ta_utils.blackout_pixel_weights_by_box_regions(
output_height, output_width, boxes_absolute.get(), blackout,
weights=gt_mask_weights))
segmentation_weights_list.append(segmentation_weight_for_image)
else:
segmentation_weights_list.append(tf.ones((output_height, output_width),
dtype=tf.float32))
gt_masks = _resize_masks(gt_masks, output_height, output_width,
mask_resize_method)
gt_masks = gt_masks[:, :, :, tf.newaxis]
gt_classes_reshaped = tf.reshape(gt_classes, [-1, 1, 1, num_classes])
# Shape: [h, w, num_classes].
segmentations_for_image = tf.reduce_max(
gt_masks * gt_classes_reshaped, axis=0)
# Avoid the case where max of an empty array is -inf.
segmentations_for_image = tf.maximum(segmentations_for_image, 0.0)
segmentation_targets_list.append(segmentations_for_image)
segmentation_target = tf.stack(segmentation_targets_list, axis=0)
segmentation_weight = tf.stack(segmentation_weights_list, axis=0)
return segmentation_target, segmentation_weight
class CenterNetDensePoseTargetAssigner(object):
"""Wrapper to compute targets for DensePose task."""
def __init__(self, stride, num_parts=24):
self._stride = stride
self._num_parts = num_parts
def assign_part_and_coordinate_targets(self,
height,
width,
gt_dp_num_points_list,
gt_dp_part_ids_list,
gt_dp_surface_coords_list,
gt_weights_list=None):
"""Returns the DensePose part_id and coordinate targets and their indices.
The returned values are expected to be used with predicted tensors
of size (batch_size, height//self._stride, width//self._stride, 2). The
predicted values at the relevant indices can be retrieved with the
get_batch_predictions_from_indices function.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_dp_num_points_list: a list of 1-D tf.int32 tensors of shape [num_boxes]
containing the number of DensePose sampled points per box.
gt_dp_part_ids_list: a list of 2-D tf.int32 tensors of shape
[num_boxes, max_sampled_points] containing the DensePose part ids
(0-indexed) for each sampled point. Note that there may be padding, as
boxes may contain a different number of sampled points.
gt_dp_surface_coords_list: a list of 3-D tf.float32 tensors of shape
[num_boxes, max_sampled_points, 4] containing the DensePose surface
coordinates (normalized) for each sampled point. Note that there may be
padding.
gt_weights_list: A list of 1-D tensors with shape [num_boxes]
corresponding to the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [num_total_points, 4] holding
the indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively. The fourth column is the part index.
batch_part_ids: an int tensor of shape [num_total_points, num_parts]
holding 1-hot encodings of parts for each sampled point.
batch_surface_coords: a float tensor of shape [num_total_points, 2]
holding the expected (v, u) coordinates for each sampled point.
batch_weights: a float tensor of shape [num_total_points] indicating the
weight of each prediction.
Note that num_total_points = batch_size * num_boxes * max_sampled_points.
"""
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_dp_num_points_list)
batch_indices = []
batch_part_ids = []
batch_surface_coords = []
batch_weights = []
for i, (num_points, part_ids, surface_coords, weights) in enumerate(
zip(gt_dp_num_points_list, gt_dp_part_ids_list,
gt_dp_surface_coords_list, gt_weights_list)):
num_boxes, max_sampled_points = (
shape_utils.combined_static_and_dynamic_shape(part_ids))
part_ids_flattened = tf.reshape(part_ids, [-1])
part_ids_one_hot = tf.one_hot(part_ids_flattened, depth=self._num_parts)
# Get DensePose coordinates in the output space.
surface_coords_abs = densepose_ops.to_absolute_coordinates(
surface_coords,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
surface_coords_abs = tf.reshape(surface_coords_abs, [-1, 4])
# Each tensor has shape [num_boxes * max_sampled_points].
yabs, xabs, v, u = tf.unstack(surface_coords_abs, axis=-1)
# Get the indices (in output space) for the DensePose coordinates. Note
# that if self._stride is larger than 1, this will have the effect of
# reducing spatial resolution of the groundtruth points.
indices_y = tf.cast(yabs, tf.int32)
indices_x = tf.cast(xabs, tf.int32)
# Assign ones if weights are not provided.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
# Create per-point weights.
weights_per_point = tf.reshape(
tf.tile(weights[:, tf.newaxis], multiples=[1, max_sampled_points]),
shape=[-1])
# Mask out invalid (i.e. padded) DensePose points.
num_points_tiled = tf.tile(num_points[:, tf.newaxis],
multiples=[1, max_sampled_points])
range_tiled = tf.tile(tf.range(max_sampled_points)[tf.newaxis, :],
multiples=[num_boxes, 1])
valid_points = tf.math.less(range_tiled, num_points_tiled)
valid_points = tf.cast(tf.reshape(valid_points, [-1]), dtype=tf.float32)
weights_per_point = weights_per_point * valid_points
# Shape of [num_boxes * max_sampled_points] integer tensor filled with
# current batch index.
batch_index = i * tf.ones_like(indices_y, dtype=tf.int32)
batch_indices.append(
tf.stack([batch_index, indices_y, indices_x, part_ids_flattened],
axis=1))
batch_part_ids.append(part_ids_one_hot)
batch_surface_coords.append(tf.stack([v, u], axis=1))
batch_weights.append(weights_per_point)
batch_indices = tf.concat(batch_indices, axis=0)
batch_part_ids = tf.concat(batch_part_ids, axis=0)
batch_surface_coords = tf.concat(batch_surface_coords, axis=0)
batch_weights = tf.concat(batch_weights, axis=0)
return batch_indices, batch_part_ids, batch_surface_coords, batch_weights
class CenterNetTrackTargetAssigner(object):
"""Wrapper to compute targets for tracking task.
Reference paper: A Simple Baseline for Multi-Object Tracking [1]
[1]: https://arxiv.org/abs/2004.01888
"""
def __init__(self, stride, num_track_ids):
self._stride = stride
self._num_track_ids = num_track_ids
def assign_track_targets(self,
height,
width,
gt_track_ids_list,
gt_boxes_list,
gt_weights_list=None):
"""Computes the track ID targets.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_track_ids_list: A list of 1-D tensors with shape [num_boxes]
corresponding to the track ID of each groundtruth detection box.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_weights_list: A list of 1-D tensors with shape [num_boxes]
corresponding to the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [batch_size, num_boxes, 3]
holding the indices inside the predicted tensor which should be
penalized. The first column indicates the index along the batch
dimension and the second and third columns indicate the index
along the y and x dimensions respectively.
batch_weights: a float tensor of shape [batch_size, num_boxes] indicating
the weight of each prediction.
track_id_targets: An int32 tensor of size [batch_size, num_boxes,
num_track_ids] containing the one-hot track ID vector of each
groundtruth detection box.
"""
track_id_targets = tf.one_hot(
gt_track_ids_list, depth=self._num_track_ids, axis=-1)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
batch_indices = []
batch_weights = []
for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)):
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box center coordinates. Each returned tensors have the shape of
# [num_boxes]
(y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes()
num_boxes = tf.shape(x_center)
# Compute the indices of the box centers. Shape:
# indices: [num_boxes, 2]
(_, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_center, x_source=x_center)
# Assign ones if weights are not provided.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
# Shape of [num_boxes, 1] integer tensor filled with current batch index.
batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_weights.append(weights)
batch_indices = tf.stack(batch_indices, axis=0)
batch_weights = tf.stack(batch_weights, axis=0)
return batch_indices, batch_weights, track_id_targets
def filter_mask_overlap_min_area(masks):
"""If a pixel belongs to 2 instances, remove it from the larger instance."""
num_instances = tf.shape(masks)[0]
def _filter_min_area():
"""Helper function to filter non empty masks."""
areas = tf.reduce_sum(masks, axis=[1, 2], keepdims=True)
per_pixel_area = masks * areas
# Make sure background is ignored in argmin.
per_pixel_area = (masks * per_pixel_area +
(1 - masks) * per_pixel_area.dtype.max)
min_index = tf.cast(tf.argmin(per_pixel_area, axis=0), tf.int32)
filtered_masks = (
tf.range(num_instances)[:, tf.newaxis, tf.newaxis]
==
min_index[tf.newaxis, :, :]
)
return tf.cast(filtered_masks, tf.float32) * masks
return tf.cond(num_instances > 0, _filter_min_area,
lambda: masks)
def filter_mask_overlap(masks, method='min_area'):
if method == 'min_area':
return filter_mask_overlap_min_area(masks)
else:
raise ValueError('Unknown mask overlap filter type - {}'.format(method))
class CenterNetCornerOffsetTargetAssigner(object):
"""Wrapper to compute corner offsets for boxes using masks."""
def __init__(self, stride, overlap_resolution='min_area'):
"""Initializes the corner offset target assigner.
Args:
stride: int, the stride of the network in output pixels.
overlap_resolution: string, specifies how we handle overlapping
instance masks. Currently only 'min_area' is supported which assigns
overlapping pixels to the instance with the minimum area.
"""
self._stride = stride
self._overlap_resolution = overlap_resolution
def assign_corner_offset_targets(
self, gt_boxes_list, gt_masks_list):
"""Computes the corner offset targets and foreground map.
For each pixel that is part of any object's foreground, this function
computes the relative offsets to the top-left and bottom-right corners of
that instance's bounding box. It also returns a foreground map to indicate
which pixels contain valid corner offsets.
Args:
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_masks_list: A list of float tensors with shape [num_boxes,
input_height, input_width] with values in {0, 1} representing instance
masks for each object.
Returns:
corner_offsets: A float tensor of shape [batch_size, height, width, 4]
containing, in order, the (y, x) offsets to the top left corner and
the (y, x) offsets to the bottom right corner for each foregroung pixel
foreground: A float tensor of shape [batch_size, height, width] in which
each pixel is set to 1 if it is a part of any instance's foreground
(and thus contains valid corner offsets) and 0 otherwise.
"""
_, input_height, input_width = (
shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0]))
output_height = tf.maximum(input_height // self._stride, 1)
output_width = tf.maximum(input_width // self._stride, 1)
y_grid, x_grid = tf.meshgrid(
tf.range(output_height), tf.range(output_width),
indexing='ij')
y_grid, x_grid = tf.cast(y_grid, tf.float32), tf.cast(x_grid, tf.float32)
corner_targets = []
foreground_targets = []
for gt_masks, gt_boxes in zip(gt_masks_list, gt_boxes_list):
gt_masks = _resize_masks(gt_masks, output_height, output_width,
method=ResizeMethod.NEAREST_NEIGHBOR)
gt_masks = filter_mask_overlap(gt_masks, self._overlap_resolution)
output_height = tf.cast(output_height, tf.float32)
output_width = tf.cast(output_width, tf.float32)
ymin, xmin, ymax, xmax = tf.unstack(gt_boxes, axis=1)
ymin, ymax = ymin * output_height, ymax * output_height
xmin, xmax = xmin * output_width, xmax * output_width
top_y = ymin[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis]
left_x = xmin[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis]
bottom_y = ymax[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis]
right_x = xmax[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis]
foreground_target = tf.cast(tf.reduce_sum(gt_masks, axis=0) > 0.5,
tf.float32)
foreground_targets.append(foreground_target)
corner_target = tf.stack([
tf.reduce_sum(top_y * gt_masks, axis=0),
tf.reduce_sum(left_x * gt_masks, axis=0),
tf.reduce_sum(bottom_y * gt_masks, axis=0),
tf.reduce_sum(right_x * gt_masks, axis=0),
], axis=2)
corner_targets.append(corner_target)
return (tf.stack(corner_targets, axis=0),
tf.stack(foreground_targets, axis=0))
class CenterNetTemporalOffsetTargetAssigner(object):
"""Wrapper to compute target tensors for the temporal offset task.
This class has methods that take as input a batch of ground truth tensors
(in the form of a list) and returns the targets required to train the
temporal offset task.
"""
def __init__(self, stride):
"""Initializes the target assigner.
Args:
stride: int, the stride of the network in output pixels.
"""
self._stride = stride
def assign_temporal_offset_targets(self,
height,
width,
gt_boxes_list,
gt_offsets_list,
gt_match_list,
gt_weights_list=None):
"""Returns the temporal offset targets and their indices.
For each ground truth box, this function assigns it the corresponding
temporal offset to train the model.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_offsets_list: A list of 2-D tf.float32 tensors of shape [num_boxes, 2]
containing the spatial offsets of objects' centers compared with the
previous frame.
gt_match_list: A list of 1-D tf.float32 tensors of shape [num_boxes]
containing flags that indicate if an object has existed in the
previous frame.
gt_weights_list: A list of tensors with shape [num_boxes] corresponding to
the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [num_boxes, 3] holding the
indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively.
batch_temporal_offsets: a float tensor of shape [num_boxes, 2] of the
expected y and x temporal offset of each object center in the
output space.
batch_weights: a float tensor of shape [num_boxes] indicating the
weight of each prediction.
"""
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
batch_indices = []
batch_weights = []
batch_temporal_offsets = []
for i, (boxes, offsets, match_flags, weights) in enumerate(zip(
gt_boxes_list, gt_offsets_list, gt_match_list, gt_weights_list)):
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box center coordinates. Each returned tensors have the shape of
# [num_boxes]
(y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes()
num_boxes = tf.shape(x_center)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_boxes, 2]
# indices: [num_boxes, 2]
(_, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_center, x_source=x_center)
# Assign ones if weights are not provided.
# if an object is not matched, its weight becomes zero.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
weights *= match_flags
# Shape of [num_boxes, 1] integer tensor filled with current batch index.
batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_weights.append(weights)
batch_temporal_offsets.append(offsets)
batch_indices = tf.concat(batch_indices, axis=0)
batch_weights = tf.concat(batch_weights, axis=0)
batch_temporal_offsets = tf.concat(batch_temporal_offsets, axis=0)
return (batch_indices, batch_temporal_offsets, batch_weights)
class DETRTargetAssigner(object):
"""Target assigner for DETR (https://arxiv.org/abs/2005.12872).
Detection Transformer (DETR) matches predicted boxes to groundtruth directly
to determine targets instead of matching anchors to groundtruth. Hence, the
new target assigner.
"""
def __init__(self):
"""Construct Object Detection Target Assigner."""
self._similarity_calc = sim_calc.DETRSimilarity()
self._matcher = hungarian_matcher.HungarianBipartiteMatcher()
def batch_assign(self,
pred_box_batch,
gt_box_batch,
pred_class_batch,
gt_class_targets_batch,
gt_weights_batch=None,
unmatched_class_label_batch=None):
"""Batched assignment of classification and regression targets.
Args:
pred_box_batch: a tensor of shape [batch_size, num_queries, 4]
representing predicted bounding boxes.
gt_box_batch: a tensor of shape [batch_size, num_queries, 4]
representing groundtruth bounding boxes.
pred_class_batch: A list of tensors with length batch_size, where each
each tensor has shape [num_queries, num_classes] to be used
by certain similarity calculators.
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, num_classes] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
unmatched_class_label_batch: a float32 tensor with shape
[d_1, d_2, ..., d_k] which is consistent with the classification target
for each anchor (and can be empty for scalar targets). This shape must
thus be compatible with the `gt_class_targets_batch`.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_pred_boxes,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_pred_boxes,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_pred_boxes,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_pred_boxes].
"""
pred_box_batch = [
box_list.BoxList(pred_box)
for pred_box in tf.unstack(pred_box_batch)]
gt_box_batch = [
box_list.BoxList(gt_box)
for gt_box in tf.unstack(gt_box_batch)]
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
if unmatched_class_label_batch is None:
unmatched_class_label_batch = [None] * len(gt_class_targets_batch)
pred_class_batch = tf.unstack(pred_class_batch)
for (pred_boxes, gt_boxes, pred_class_batch, gt_class_targets, gt_weights,
unmatched_class_label) in zip(pred_box_batch, gt_box_batch,
pred_class_batch, gt_class_targets_batch,
gt_weights_batch,
unmatched_class_label_batch):
(cls_targets, cls_weights, reg_targets,
reg_weights) = self.assign(pred_boxes, gt_boxes, pred_class_batch,
gt_class_targets, gt_weights,
unmatched_class_label)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights)
def assign(self,
pred_boxes,
gt_boxes,
pred_classes,
gt_labels,
gt_weights=None,
unmatched_class_label=None):
"""Assign classification and regression targets to each box_pred.
For a given set of pred_boxes and groundtruth detections, match pred_boxes
to gt_boxes and assign classification and regression targets to
each box_pred as well as weights based on the resulting match (specifying,
e.g., which pred_boxes should not contribute to training loss).
pred_boxes that are not matched to anything are given a classification
target of `unmatched_cls_target`.
Args:
pred_boxes: a BoxList representing N pred_boxes
gt_boxes: a BoxList representing M groundtruth boxes
pred_classes: A tensor with shape [max_num_boxes, num_classes]
to be used by certain similarity calculators.
gt_labels: a tensor of shape [M, num_classes]
with labels for each of the ground_truth boxes. The subshape
[num_classes] can be empty (corresponding to scalar inputs). When set
to None, gt_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
gt_weights: a float tensor of shape [M] indicating the weight to
assign to all pred_boxes match to a particular groundtruth box. The
weights must be in [0., 1.]. If None, all weights are set to 1.
Generally no groundtruth boxes with zero weight match to any pred_boxes
as matchers are aware of groundtruth weights. Additionally,
`cls_weights` and `reg_weights` are calculated using groundtruth
weights as an added safety.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
Returns:
cls_targets: a float32 tensor with shape [num_pred_boxes, num_classes],
where the subshape [num_classes] is compatible with gt_labels
which has shape [num_gt_boxes, num_classes].
cls_weights: a float32 tensor with shape [num_pred_boxes, num_classes],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_pred_boxes,
box_code_dimension]
reg_weights: a float32 tensor with shape [num_pred_boxes]
"""
if not unmatched_class_label:
unmatched_class_label = tf.constant(
[1] + [0] * (gt_labels.shape[1] - 1), tf.float32)
if gt_weights is None:
num_gt_boxes = gt_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = gt_boxes.num_boxes()
gt_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
gt_boxes.add_field(fields.BoxListFields.classes, gt_labels)
pred_boxes.add_field(fields.BoxListFields.classes, pred_classes)
match_quality_matrix = self._similarity_calc.compare(
gt_boxes,
pred_boxes)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(gt_weights, 0))
matched_gt_boxes = match.gather_based_on_match(
gt_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
ty, tx, th, tw = matched_gt_boxlist.get_center_coordinates_and_sizes()
reg_targets = tf.transpose(tf.stack([ty, tx, th, tw]))
cls_targets = match.gather_based_on_match(
gt_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
reg_weights = match.gather_based_on_match(
gt_weights,
ignored_value=0.,
unmatched_value=0.)
cls_weights = match.gather_based_on_match(
gt_weights,
ignored_value=0.,
unmatched_value=1)
# convert cls_weights from per-box_pred to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_multiple = tf.concat(
[tf.constant([1]), class_label_shape],
axis=0)
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
return (cls_targets, cls_weights, reg_targets, reg_weights)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/target_assigner.py | target_assigner.py |
# Lint as: python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import six
import tensorflow.compat.v1 as tf
from object_detection.core import box_list_ops
from object_detection.core import standard_fields as fields
class RegionSimilarityCalculator(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for region similarity calculator."""
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overridden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope:
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return box_list_ops.iou(boxlist1, boxlist2)
class DETRSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity for the Detection Transformer model.
This class computes pairwise DETR similarity between two BoxLists using a
weighted combination of GIOU, classification scores, and the L1 loss.
"""
def __init__(self, l1_weight=5, giou_weight=2):
super().__init__()
self.l1_weight = l1_weight
self.giou_weight = giou_weight
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise DETR similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N groundtruth boxes.
boxlist2: BoxList holding M predicted boxes.
Returns:
A tensor with shape [N, M] representing pairwise DETR similarity scores.
"""
groundtruth_labels = boxlist1.get_field(fields.BoxListFields.classes)
predicted_labels = boxlist2.get_field(fields.BoxListFields.classes)
classification_scores = tf.matmul(groundtruth_labels,
predicted_labels,
transpose_b=True)
loss = self.l1_weight * box_list_ops.l1(
boxlist1, boxlist2) + self.giou_weight * (1 - box_list_ops.giou(
boxlist1, boxlist2)) - classification_scores
return -loss
class NegSqDistSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on the squared distance metric.
This class computes pairwise similarity between two BoxLists based on the
negative squared distance metric.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.
"""
return -1 * box_list_ops.sq_dist(boxlist1, boxlist2)
class IoaSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Area (IOA) metric.
This class computes pairwise similarity between two BoxLists based on their
pairwise intersections divided by the areas of second BoxLists.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOA similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise IOA scores.
"""
return box_list_ops.ioa(boxlist1, boxlist2)
class ThresholdedIouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on thresholded IOU and score.
This class computes pairwise similarity between two BoxLists based on IOU and
a 'score' present in boxlist1. If IOU > threshold, then the entry in the
output pairwise tensor will contain `score`, otherwise 0.
"""
def __init__(self, iou_threshold=0):
"""Initialize the ThresholdedIouSimilarity.
Args:
iou_threshold: For a given pair of boxes, if the IOU is > iou_threshold,
then the comparison result will be the foreground probability of
the first box, otherwise it will be zero.
"""
super(ThresholdedIouSimilarity, self).__init__()
self._iou_threshold = iou_threshold
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists and score.
Args:
boxlist1: BoxList holding N boxes. Must have a score field.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing scores threholded by pairwise
iou scores.
"""
ious = box_list_ops.iou(boxlist1, boxlist2)
scores = boxlist1.get_field(fields.BoxListFields.scores)
scores = tf.expand_dims(scores, axis=1)
row_replicated_scores = tf.tile(scores, [1, tf.shape(ious)[-1]])
thresholded_ious = tf.where(ious > self._iou_threshold,
row_replicated_scores, tf.zeros_like(ious))
return thresholded_ious
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/region_similarity_calculator.py | region_similarity_calculator.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides functions to prefetch tensors to feed into models."""
import tensorflow.compat.v1 as tf
def prefetch(tensor_dict, capacity):
"""Creates a prefetch queue for tensors.
Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a
dequeue op that evaluates to a tensor_dict. This function is useful in
prefetching preprocessed tensors so that the data is readily available for
consumers.
Example input pipeline when you don't need batching:
----------------------------------------------------
key, string_tensor = slim.parallel_reader.parallel_read(...)
tensor_dict = decoder.decode(string_tensor)
tensor_dict = preprocessor.preprocess(tensor_dict, ...)
prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20)
tensor_dict = prefetch_queue.dequeue()
outputs = Model(tensor_dict)
...
----------------------------------------------------
For input pipelines with batching, refer to core/batcher.py
Args:
tensor_dict: a dictionary of tensors to prefetch.
capacity: the size of the prefetch queue.
Returns:
a FIFO prefetcher queue
"""
names = list(tensor_dict.keys())
dtypes = [t.dtype for t in tensor_dict.values()]
shapes = [t.get_shape() for t in tensor_dict.values()]
prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes,
shapes=shapes,
names=names,
name='prefetch_queue')
enqueue_op = prefetch_queue.enqueue(tensor_dict)
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(
prefetch_queue, [enqueue_op]))
tf.summary.scalar(
'queue/%s/fraction_of_%d_full' % (prefetch_queue.name, capacity),
tf.cast(prefetch_queue.size(), dtype=tf.float32) * (1. / capacity))
return prefetch_queue
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/prefetcher.py | prefetcher.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keypoint operations.
Keypoints are represented as tensors of shape [num_instances, num_keypoints, 2],
where the last dimension holds rank 2 tensors of the form [y, x] representing
the coordinates of the keypoint.
"""
import numpy as np
import tensorflow.compat.v1 as tf
def scale(keypoints, y_scale, x_scale, scope=None):
"""Scales keypoint coordinates in x and y dimensions.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
new_keypoints = keypoints * [[[y_scale, x_scale]]]
return new_keypoints
def clip_to_window(keypoints, window, scope=None):
"""Clips keypoints to a window.
This op clips any input keypoints to a window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window to which the op should clip the keypoints.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
keypoints.get_shape().assert_has_rank(3)
with tf.name_scope(scope, 'ClipToWindow'):
y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
y = tf.maximum(tf.minimum(y, win_y_max), win_y_min)
x = tf.maximum(tf.minimum(x, win_x_max), win_x_min)
new_keypoints = tf.concat([y, x], 2)
return new_keypoints
def prune_outside_window(keypoints, window, scope=None):
"""Prunes keypoints that fall outside a given window.
This function replaces keypoints that fall outside the given window with nan.
See also clip_to_window which clips any keypoints that fall outside the given
window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window outside of which the op should prune the keypoints.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
keypoints.get_shape().assert_has_rank(3)
with tf.name_scope(scope, 'PruneOutsideWindow'):
y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
valid_indices = tf.logical_and(
tf.logical_and(y >= win_y_min, y <= win_y_max),
tf.logical_and(x >= win_x_min, x <= win_x_max))
new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y))
new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x))
new_keypoints = tf.concat([new_y, new_x], 2)
return new_keypoints
def change_coordinate_frame(keypoints, window, scope=None):
"""Changes coordinate frame of the keypoints to be relative to window's frame.
Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint
coordinates from keypoints of shape [num_instances, num_keypoints, 2]
to be relative to this window.
An example use case is data augmentation: where we are given groundtruth
keypoints and would like to randomly crop the image to some window. In this
case we need to change the coordinate frame of each groundtruth keypoint to be
relative to this new window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window we should change the coordinate frame to.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'ChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
new_keypoints = scale(keypoints - [window[0], window[1]], 1.0 / win_height,
1.0 / win_width)
return new_keypoints
def keypoints_to_enclosing_bounding_boxes(keypoints, keypoints_axis=1):
"""Creates enclosing bounding boxes from keypoints.
Args:
keypoints: a [num_instances, num_keypoints, 2] float32 tensor with keypoints
in [y, x] format.
keypoints_axis: An integer indicating the axis that correspond to the
keypoint dimension.
Returns:
A [num_instances, 4] float32 tensor that tightly covers all the keypoints
for each instance.
"""
ymin = tf.math.reduce_min(keypoints[..., 0], axis=keypoints_axis)
xmin = tf.math.reduce_min(keypoints[..., 1], axis=keypoints_axis)
ymax = tf.math.reduce_max(keypoints[..., 0], axis=keypoints_axis)
xmax = tf.math.reduce_max(keypoints[..., 1], axis=keypoints_axis)
return tf.stack([ymin, xmin, ymax, xmax], axis=keypoints_axis)
def to_normalized_coordinates(keypoints, height, width,
check_range=True, scope=None):
"""Converts absolute keypoint coordinates to normalized coordinates in [0, 1].
Usually one uses the dynamic shape of the image or conv-layer tensor:
keypoints = keypoint_ops.to_normalized_coordinates(keypoints,
tf.shape(images)[1],
tf.shape(images)[2]),
This function raises an assertion failed error at graph execution time when
the maximum coordinate is smaller than 1.01 (which means that coordinates are
already normalized). The value 1.01 is to deal with small rounding errors.
Args:
keypoints: A tensor of shape [num_instances, num_keypoints, 2].
height: Maximum value for y coordinate of absolute keypoint coordinates.
width: Maximum value for x coordinate of absolute keypoint coordinates.
check_range: If True, checks if the coordinates are normalized.
scope: name scope.
Returns:
tensor of shape [num_instances, num_keypoints, 2] with normalized
coordinates in [0, 1].
"""
with tf.name_scope(scope, 'ToNormalizedCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
if check_range:
max_val = tf.reduce_max(keypoints)
max_assert = tf.Assert(tf.greater(max_val, 1.01),
['max value is lower than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(keypoints, 1.0 / height, 1.0 / width)
def to_absolute_coordinates(keypoints, height, width,
check_range=True, scope=None):
"""Converts normalized keypoint coordinates to absolute pixel coordinates.
This function raises an assertion failed error when the maximum keypoint
coordinate value is larger than 1.01 (in which case coordinates are already
absolute).
Args:
keypoints: A tensor of shape [num_instances, num_keypoints, 2]
height: Maximum value for y coordinate of absolute keypoint coordinates.
width: Maximum value for x coordinate of absolute keypoint coordinates.
check_range: If True, checks if the coordinates are normalized or not.
scope: name scope.
Returns:
tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates
in terms of the image size.
"""
with tf.name_scope(scope, 'ToAbsoluteCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Ensure range of input keypoints is correct.
if check_range:
max_val = tf.reduce_max(keypoints)
max_assert = tf.Assert(tf.greater_equal(1.01, max_val),
['maximum keypoint coordinate value is larger '
'than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(keypoints, height, width)
def flip_horizontal(keypoints, flip_point, flip_permutation=None, scope=None):
"""Flips the keypoints horizontally around the flip_point.
This operation flips the x coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the x coordinate to flip the
keypoints around.
flip_permutation: integer list or rank 1 int32 tensor containing the
keypoint flip permutation. This specifies the mapping from original
keypoint indices to the flipped keypoint indices. This is used primarily
for keypoints that are not reflection invariant. E.g. Suppose there are 3
keypoints representing ['head', 'right_eye', 'left_eye'], then a logical
choice for flip_permutation might be [0, 2, 1] since we want to swap the
'left_eye' and 'right_eye' after a horizontal flip.
Default to None or empty list to keep the original order after flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
keypoints.get_shape().assert_has_rank(3)
with tf.name_scope(scope, 'FlipHorizontal'):
keypoints = tf.transpose(keypoints, [1, 0, 2])
if flip_permutation:
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
u = flip_point * 2.0 - u
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
def flip_vertical(keypoints, flip_point, flip_permutation=None, scope=None):
"""Flips the keypoints vertically around the flip_point.
This operation flips the y coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the y coordinate to flip the
keypoints around.
flip_permutation: integer list or rank 1 int32 tensor containing the
keypoint flip permutation. This specifies the mapping from original
keypoint indices to the flipped keypoint indices. This is used primarily
for keypoints that are not reflection invariant. E.g. Suppose there are 3
keypoints representing ['head', 'right_eye', 'left_eye'], then a logical
choice for flip_permutation might be [0, 2, 1] since we want to swap the
'left_eye' and 'right_eye' after a horizontal flip.
Default to None or empty list to keep the original order after flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
keypoints.get_shape().assert_has_rank(3)
with tf.name_scope(scope, 'FlipVertical'):
keypoints = tf.transpose(keypoints, [1, 0, 2])
if flip_permutation:
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
v = flip_point * 2.0 - v
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
def rot90(keypoints, rotation_permutation=None, scope=None):
"""Rotates the keypoints counter-clockwise by 90 degrees.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
rotation_permutation: integer list or rank 1 int32 tensor containing the
keypoint flip permutation. This specifies the mapping from original
keypoint indices to the rotated keypoint indices. This is used primarily
for keypoints that are not rotation invariant.
Default to None or empty list to keep the original order after rotation.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
keypoints.get_shape().assert_has_rank(3)
with tf.name_scope(scope, 'Rot90'):
keypoints = tf.transpose(keypoints, [1, 0, 2])
if rotation_permutation:
keypoints = tf.gather(keypoints, rotation_permutation)
v, u = tf.split(value=keypoints[:, :, ::-1], num_or_size_splits=2, axis=2)
v = 1.0 - v
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
def keypoint_weights_from_visibilities(keypoint_visibilities,
per_keypoint_weights=None):
"""Returns a keypoint weights tensor.
During training, it is often beneficial to consider only those keypoints that
are labeled. This function returns a weights tensor that combines default
per-keypoint weights, as well as the visibilities of individual keypoints.
The returned tensor satisfies:
keypoint_weights[i, k] = per_keypoint_weights[k] * keypoint_visibilities[i, k]
where per_keypoint_weights[k] is set to 1 if not provided.
Args:
keypoint_visibilities: A [num_instances, num_keypoints] boolean tensor
indicating whether a keypoint is labeled (and perhaps even visible).
per_keypoint_weights: A list or 1-d tensor of length `num_keypoints` with
per-keypoint weights. If None, will use 1 for each visible keypoint
weight.
Returns:
A [num_instances, num_keypoints] float32 tensor with keypoint weights. Those
keypoints deemed visible will have the provided per-keypoint weight, and
all others will be set to zero.
"""
keypoint_visibilities.get_shape().assert_has_rank(2)
if per_keypoint_weights is None:
num_keypoints = keypoint_visibilities.shape.as_list()[1]
per_keypoint_weight_mult = tf.ones((1, num_keypoints,), dtype=tf.float32)
else:
per_keypoint_weight_mult = tf.expand_dims(per_keypoint_weights, axis=0)
return per_keypoint_weight_mult * tf.cast(keypoint_visibilities, tf.float32)
def set_keypoint_visibilities(keypoints, initial_keypoint_visibilities=None):
"""Sets keypoint visibilities based on valid/invalid keypoints.
Some keypoint operations set invisible keypoints (e.g. cropped keypoints) to
NaN, without affecting any keypoint "visibility" variables. This function is
used to update (or create) keypoint visibilities to agree with visible /
invisible keypoint coordinates.
Args:
keypoints: a float32 tensor of shape [num_instances, num_keypoints, 2].
initial_keypoint_visibilities: a boolean tensor of shape
[num_instances, num_keypoints]. If provided, will maintain the visibility
designation of a keypoint, so long as the corresponding coordinates are
not NaN. If not provided, will create keypoint visibilities directly from
the values in `keypoints` (i.e. NaN coordinates map to False, otherwise
they map to True).
Returns:
keypoint_visibilities: a bool tensor of shape [num_instances, num_keypoints]
indicating whether a keypoint is visible or not.
"""
keypoints.get_shape().assert_has_rank(3)
if initial_keypoint_visibilities is not None:
keypoint_visibilities = tf.cast(initial_keypoint_visibilities, tf.bool)
else:
keypoint_visibilities = tf.ones_like(keypoints[:, :, 0], dtype=tf.bool)
keypoints_with_nan = tf.math.reduce_any(tf.math.is_nan(keypoints), axis=2)
keypoint_visibilities = tf.where(
keypoints_with_nan,
tf.zeros_like(keypoint_visibilities, dtype=tf.bool),
keypoint_visibilities)
return keypoint_visibilities
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/keypoint_ops.py | keypoint_ops.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.balanced_positive_negative_sampler."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import balanced_positive_negative_sampler
from object_detection.utils import test_case
class BalancedPositiveNegativeSamplerTest(test_case.TestCase):
def test_subsample_all_examples(self):
if self.has_tpu(): return
numpy_labels = np.random.permutation(300)
indicator = np.array(np.ones(300) == 1, np.bool)
numpy_labels = (numpy_labels - 200) > 0
labels = np.array(numpy_labels, np.bool)
def graph_fn(indicator, labels):
sampler = (
balanced_positive_negative_sampler.BalancedPositiveNegativeSampler())
return sampler.subsample(indicator, 64, labels)
is_sampled = self.execute_cpu(graph_fn, [indicator, labels])
self.assertEqual(sum(is_sampled), 64)
self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 32)
self.assertEqual(sum(np.logical_and(
np.logical_not(numpy_labels), is_sampled)), 32)
def test_subsample_all_examples_static(self):
if not self.has_tpu(): return
numpy_labels = np.random.permutation(300)
indicator = np.array(np.ones(300) == 1, np.bool)
numpy_labels = (numpy_labels - 200) > 0
labels = np.array(numpy_labels, np.bool)
def graph_fn(indicator, labels):
sampler = (
balanced_positive_negative_sampler.BalancedPositiveNegativeSampler(
is_static=True))
return sampler.subsample(indicator, 64, labels)
is_sampled = self.execute_tpu(graph_fn, [indicator, labels])
self.assertEqual(sum(is_sampled), 64)
self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 32)
self.assertEqual(sum(np.logical_and(
np.logical_not(numpy_labels), is_sampled)), 32)
def test_subsample_selection(self):
if self.has_tpu(): return
# Test random sampling when only some examples can be sampled:
# 100 samples, 20 positives, 10 positives cannot be sampled.
numpy_labels = np.arange(100)
numpy_indicator = numpy_labels < 90
indicator = np.array(numpy_indicator, np.bool)
numpy_labels = (numpy_labels - 80) >= 0
labels = np.array(numpy_labels, np.bool)
def graph_fn(indicator, labels):
sampler = (
balanced_positive_negative_sampler.BalancedPositiveNegativeSampler())
return sampler.subsample(indicator, 64, labels)
is_sampled = self.execute_cpu(graph_fn, [indicator, labels])
self.assertEqual(sum(is_sampled), 64)
self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10)
self.assertEqual(sum(np.logical_and(
np.logical_not(numpy_labels), is_sampled)), 54)
self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator))
def test_subsample_selection_static(self):
if not self.has_tpu(): return
# Test random sampling when only some examples can be sampled:
# 100 samples, 20 positives, 10 positives cannot be sampled.
numpy_labels = np.arange(100)
numpy_indicator = numpy_labels < 90
indicator = np.array(numpy_indicator, np.bool)
numpy_labels = (numpy_labels - 80) >= 0
labels = np.array(numpy_labels, np.bool)
def graph_fn(indicator, labels):
sampler = (
balanced_positive_negative_sampler.BalancedPositiveNegativeSampler(
is_static=True))
return sampler.subsample(indicator, 64, labels)
is_sampled = self.execute_tpu(graph_fn, [indicator, labels])
self.assertEqual(sum(is_sampled), 64)
self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10)
self.assertEqual(sum(np.logical_and(
np.logical_not(numpy_labels), is_sampled)), 54)
self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator))
def test_subsample_selection_larger_batch_size(self):
if self.has_tpu(): return
# Test random sampling when total number of examples that can be sampled are
# less than batch size:
# 100 samples, 50 positives, 40 positives cannot be sampled, batch size 64.
# It should still return 64 samples, with 4 of them that couldn't have been
# sampled.
numpy_labels = np.arange(100)
numpy_indicator = numpy_labels < 60
indicator = np.array(numpy_indicator, np.bool)
numpy_labels = (numpy_labels - 50) >= 0
labels = np.array(numpy_labels, np.bool)
def graph_fn(indicator, labels):
sampler = (
balanced_positive_negative_sampler.BalancedPositiveNegativeSampler())
return sampler.subsample(indicator, 64, labels)
is_sampled = self.execute_cpu(graph_fn, [indicator, labels])
self.assertEqual(sum(is_sampled), 60)
self.assertGreaterEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10)
self.assertGreaterEqual(
sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)), 50)
self.assertEqual(sum(np.logical_and(is_sampled, numpy_indicator)), 60)
def test_subsample_selection_larger_batch_size_static(self):
if not self.has_tpu(): return
# Test random sampling when total number of examples that can be sampled are
# less than batch size:
# 100 samples, 50 positives, 40 positives cannot be sampled, batch size 64.
# It should still return 64 samples, with 4 of them that couldn't have been
# sampled.
numpy_labels = np.arange(100)
numpy_indicator = numpy_labels < 60
indicator = np.array(numpy_indicator, np.bool)
numpy_labels = (numpy_labels - 50) >= 0
labels = np.array(numpy_labels, np.bool)
def graph_fn(indicator, labels):
sampler = (
balanced_positive_negative_sampler.BalancedPositiveNegativeSampler(
is_static=True))
return sampler.subsample(indicator, 64, labels)
is_sampled = self.execute_tpu(graph_fn, [indicator, labels])
self.assertEqual(sum(is_sampled), 64)
self.assertGreaterEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10)
self.assertGreaterEqual(
sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)), 50)
self.assertEqual(sum(np.logical_and(is_sampled, numpy_indicator)), 60)
def test_subsample_selection_no_batch_size(self):
if self.has_tpu(): return
# Test random sampling when only some examples can be sampled:
# 1000 samples, 6 positives (5 can be sampled).
numpy_labels = np.arange(1000)
numpy_indicator = numpy_labels < 999
numpy_labels = (numpy_labels - 994) >= 0
def graph_fn(indicator, labels):
sampler = (balanced_positive_negative_sampler.
BalancedPositiveNegativeSampler(0.01))
is_sampled = sampler.subsample(indicator, None, labels)
return is_sampled
is_sampled_out = self.execute_cpu(graph_fn, [numpy_indicator, numpy_labels])
self.assertEqual(sum(is_sampled_out), 500)
self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled_out)), 5)
self.assertEqual(sum(np.logical_and(
np.logical_not(numpy_labels), is_sampled_out)), 495)
self.assertAllEqual(is_sampled_out, np.logical_and(is_sampled_out,
numpy_indicator))
def test_subsample_selection_no_batch_size_static(self):
labels = tf.constant([[True, False, False]])
indicator = tf.constant([True, False, True])
sampler = (
balanced_positive_negative_sampler.BalancedPositiveNegativeSampler())
with self.assertRaises(ValueError):
sampler.subsample(indicator, None, labels)
def test_raises_error_with_incorrect_label_shape(self):
labels = tf.constant([[True, False, False]])
indicator = tf.constant([True, False, True])
sampler = (balanced_positive_negative_sampler.
BalancedPositiveNegativeSampler())
with self.assertRaises(ValueError):
sampler.subsample(indicator, 64, labels)
def test_raises_error_with_incorrect_indicator_shape(self):
labels = tf.constant([True, False, False])
indicator = tf.constant([[True, False, True]])
sampler = (balanced_positive_negative_sampler.
BalancedPositiveNegativeSampler())
with self.assertRaises(ValueError):
sampler.subsample(indicator, 64, labels)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/balanced_positive_negative_sampler_test.py | balanced_positive_negative_sampler_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.preprocessor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl.testing import parameterized
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import preprocessor
from object_detection.core import preprocessor_cache
from object_detection.core import standard_fields as fields
from object_detection.utils import test_case
from object_detection.utils import tf_version
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
mock = unittest.mock # pylint: disable=g-import-not-at-top
class PreprocessorTest(test_case.TestCase, parameterized.TestCase):
def createColorfulTestImage(self):
ch255 = tf.fill([1, 100, 200, 1], tf.constant(255, dtype=tf.uint8))
ch128 = tf.fill([1, 100, 200, 1], tf.constant(128, dtype=tf.uint8))
ch0 = tf.fill([1, 100, 200, 1], tf.constant(0, dtype=tf.uint8))
imr = tf.concat([ch255, ch0, ch0], 3)
img = tf.concat([ch255, ch255, ch0], 3)
imb = tf.concat([ch255, ch0, ch255], 3)
imw = tf.concat([ch128, ch128, ch128], 3)
imu = tf.concat([imr, img], 2)
imd = tf.concat([imb, imw], 2)
im = tf.concat([imu, imd], 1)
return im
def createTestImages(self):
images_r = tf.constant([[[128, 128, 128, 128], [0, 0, 128, 128],
[0, 128, 128, 128], [192, 192, 128, 128]]],
dtype=tf.uint8)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, 128, 128], [0, 0, 128, 128],
[0, 128, 192, 192], [192, 192, 128, 192]]],
dtype=tf.uint8)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[128, 128, 192, 0], [0, 0, 128, 192],
[0, 128, 128, 0], [192, 192, 192, 128]]],
dtype=tf.uint8)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def createEmptyTestBoxes(self):
boxes = tf.constant([[]], dtype=tf.float32)
return boxes
def createTestBoxes(self):
boxes = tf.constant(
[[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32)
return boxes
def createRandomTextBoxes(self):
random_boxes = tf.concat([tf.random.uniform([100, 2], 0.0, 0.5, seed=1),
tf.random.uniform([100, 2], 0.5, 1.0, seed=2)],
axis=1)
fixed_boxes = tf.constant(
[[0.0, 0.25, 0.75, 1.0],
[0.25, 0.5, 0.75, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.1, 0.2, 0.3, 0.4]], dtype=tf.float32)
zero_boxes = tf.zeros((50, 4))
return tf.concat([random_boxes, fixed_boxes, zero_boxes], axis=0)
def createTestGroundtruthWeights(self):
return tf.constant([1.0, 0.5], dtype=tf.float32)
def createZeroBoxes(self):
return tf.zeros((100, 4))
def createTestMasks(self):
mask = np.array([
[[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0]],
[[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def createTestKeypoints(self):
keypoints_np = np.array([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
])
keypoints = tf.constant(keypoints_np, dtype=tf.float32)
keypoint_visibilities = tf.constant(
[
[True, True, False],
[False, True, True]
])
return keypoints, keypoint_visibilities
def createTestKeypointDepths(self):
keypoint_depths = tf.constant([
[1.0, 0.9, 0.8],
[0.7, 0.6, 0.5]
], dtype=tf.float32)
keypoint_depth_weights = tf.constant([
[0.5, 0.6, 0.7],
[0.8, 0.9, 1.0]
], dtype=tf.float32)
return keypoint_depths, keypoint_depth_weights
def createTestKeypointsInsideCrop(self):
keypoints = np.array([
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
])
return tf.constant(keypoints, dtype=tf.float32)
def createTestKeypointsOutsideCrop(self):
keypoints = np.array([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
])
return tf.constant(keypoints, dtype=tf.float32)
def createTestDensePose(self):
dp_num_points = tf.constant([1, 3], dtype=tf.int32)
dp_part_ids = tf.constant(
[[4, 0, 0],
[1, 0, 5]], dtype=tf.int32)
dp_surface_coords = tf.constant(
[
# Instance 0.
[[0.1, 0.2, 0.6, 0.7],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]],
# Instance 1.
[[0.8, 0.9, 0.2, 0.4],
[0.1, 0.3, 0.2, 0.8],
[0.6, 1.0, 0.3, 0.4]],
], dtype=tf.float32)
return dp_num_points, dp_part_ids, dp_surface_coords
def createKeypointFlipPermutation(self):
return [0, 2, 1]
def createKeypointRotPermutation(self):
return [0, 2, 1]
def createTestLabels(self):
labels = tf.constant([1, 2], dtype=tf.int32)
return labels
def createTestLabelsLong(self):
labels = tf.constant([1, 2, 4], dtype=tf.int32)
return labels
def createTestBoxesOutOfImage(self):
boxes = tf.constant(
[[-0.1, 0.25, 0.75, 1], [0.25, 0.5, 0.75, 1.1]], dtype=tf.float32)
return boxes
def createTestMultiClassScores(self):
return tf.constant([[1.0, 0.0], [0.5, 0.5]], dtype=tf.float32)
def expectedImagesAfterNormalization(self):
images_r = tf.constant([[[0, 0, 0, 0], [-1, -1, 0, 0],
[-1, 0, 0, 0], [0.5, 0.5, 0, 0]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[-1, -1, 0, 0], [-1, -1, 0, 0],
[-1, 0, 0.5, 0.5], [0.5, 0.5, 0, 0.5]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[0, 0, 0.5, -1], [-1, -1, 0, 0.5],
[-1, 0, 0, -1], [0.5, 0.5, 0.5, 0]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedMaxImageAfterColorScale(self):
images_r = tf.constant([[[0.1, 0.1, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1],
[-0.9, 0.1, 0.1, 0.1], [0.6, 0.6, 0.1, 0.1]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[-0.9, -0.9, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1],
[-0.9, 0.1, 0.6, 0.6], [0.6, 0.6, 0.1, 0.6]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[0.1, 0.1, 0.6, -0.9], [-0.9, -0.9, 0.1, 0.6],
[-0.9, 0.1, 0.1, -0.9], [0.6, 0.6, 0.6, 0.1]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedMinImageAfterColorScale(self):
images_r = tf.constant([[[-0.1, -0.1, -0.1, -0.1], [-1, -1, -0.1, -0.1],
[-1, -0.1, -0.1, -0.1], [0.4, 0.4, -0.1, -0.1]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[-1, -1, -0.1, -0.1], [-1, -1, -0.1, -0.1],
[-1, -0.1, 0.4, 0.4], [0.4, 0.4, -0.1, 0.4]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[-0.1, -0.1, 0.4, -1], [-1, -1, -0.1, 0.4],
[-1, -0.1, -0.1, -1], [0.4, 0.4, 0.4, -0.1]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterLeftRightFlip(self):
images_r = tf.constant([[[0, 0, 0, 0], [0, 0, -1, -1],
[0, 0, 0, -1], [0, 0, 0.5, 0.5]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, -1, -1], [0, 0, -1, -1],
[0.5, 0.5, 0, -1], [0.5, 0, 0.5, 0.5]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[-1, 0.5, 0, 0], [0.5, 0, -1, -1],
[-1, 0, 0, -1], [0, 0.5, 0.5, 0.5]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterUpDownFlip(self):
images_r = tf.constant([[[0.5, 0.5, 0, 0], [-1, 0, 0, 0],
[-1, -1, 0, 0], [0, 0, 0, 0]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5],
[-1, -1, 0, 0], [-1, -1, 0, 0]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1],
[-1, -1, 0, 0.5], [0, 0, 0.5, -1]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterRot90(self):
images_r = tf.constant([[[0, 0, 0, 0], [0, 0, 0, 0],
[0, -1, 0, 0.5], [0, -1, -1, 0.5]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, 0.5, 0.5], [0, 0, 0.5, 0],
[-1, -1, 0, 0.5], [-1, -1, -1, 0.5]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[-1, 0.5, -1, 0], [0.5, 0, 0, 0.5],
[0, -1, 0, 0.5], [0, -1, -1, 0.5]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedBoxesAfterLeftRightFlip(self):
boxes = tf.constant([[0.0, 0.0, 0.75, 0.75], [0.25, 0.0, 0.75, 0.5]],
dtype=tf.float32)
return boxes
def expectedBoxesAfterUpDownFlip(self):
boxes = tf.constant([[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]],
dtype=tf.float32)
return boxes
def expectedBoxesAfterRot90(self):
boxes = tf.constant(
[[0.0, 0.0, 0.75, 0.75], [0.0, 0.25, 0.5, 0.75]], dtype=tf.float32)
return boxes
def expectedMasksAfterLeftRightFlip(self):
mask = np.array([
[[0.0, 0.0, 255.0],
[0.0, 0.0, 255.0],
[0.0, 0.0, 255.0]],
[[0.0, 255.0, 255.0],
[0.0, 255.0, 255.0],
[0.0, 255.0, 255.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedMasksAfterUpDownFlip(self):
mask = np.array([
[[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0]],
[[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedMasksAfterRot90(self):
mask = np.array([
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[255.0, 255.0, 255.0]],
[[0.0, 0.0, 0.0],
[255.0, 255.0, 255.0],
[255.0, 255.0, 255.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedLabelScoresAfterThresholding(self):
return tf.constant([1.0], dtype=tf.float32)
def expectedBoxesAfterThresholding(self):
return tf.constant([[0.0, 0.25, 0.75, 1.0]], dtype=tf.float32)
def expectedLabelsAfterThresholding(self):
return tf.constant([1], dtype=tf.float32)
def expectedMultiClassScoresAfterThresholding(self):
return tf.constant([[1.0, 0.0]], dtype=tf.float32)
def expectedMasksAfterThresholding(self):
mask = np.array([
[[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedKeypointsAfterThresholding(self):
keypoints = np.array([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]
])
return tf.constant(keypoints, dtype=tf.float32)
def expectedLabelScoresAfterThresholdingWithMissingScore(self):
return tf.constant([np.nan], dtype=tf.float32)
def expectedBoxesAfterThresholdingWithMissingScore(self):
return tf.constant([[0.25, 0.5, 0.75, 1]], dtype=tf.float32)
def expectedLabelsAfterThresholdingWithMissingScore(self):
return tf.constant([2], dtype=tf.float32)
def expectedLabelScoresAfterDropping(self):
return tf.constant([0.5], dtype=tf.float32)
def expectedBoxesAfterDropping(self):
return tf.constant([[0.25, 0.5, 0.75, 1.0]], dtype=tf.float32)
def expectedLabelsAfterDropping(self):
return tf.constant([2], dtype=tf.float32)
def expectedMultiClassScoresAfterDropping(self):
return tf.constant([[0.5, 0.5]], dtype=tf.float32)
def expectedMasksAfterDropping(self):
masks = np.array([[[255.0, 255.0, 0.0], [255.0, 255.0, 0.0],
[255.0, 255.0, 0.0]]])
return tf.constant(masks, dtype=tf.float32)
def expectedKeypointsAfterDropping(self):
keypoints = np.array([[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]])
return tf.constant(keypoints, dtype=tf.float32)
def expectedLabelsAfterRemapping(self):
return tf.constant([3, 3, 4], dtype=tf.float32)
def testRgbToGrayscale(self):
def graph_fn():
images = self.createTestImages()
grayscale_images = preprocessor._rgb_to_grayscale(images)
expected_images = tf.image.rgb_to_grayscale(images)
return grayscale_images, expected_images
(grayscale_images, expected_images) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(expected_images, grayscale_images)
def testNormalizeImage(self):
def graph_fn():
preprocess_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 256,
'target_minval': -1,
'target_maxval': 1
})]
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
images_expected = self.expectedImagesAfterNormalization()
return images, images_expected
images_, images_expected_ = self.execute_cpu(graph_fn, [])
images_shape_ = images_.shape
images_expected_shape_ = images_expected_.shape
expected_shape = [1, 4, 4, 3]
self.assertAllEqual(images_expected_shape_, images_shape_)
self.assertAllEqual(images_shape_, expected_shape)
self.assertAllClose(images_, images_expected_)
def testRetainBoxesAboveThreshold(self):
def graph_fn():
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
(retained_boxes, retained_labels,
retained_weights) = preprocessor.retain_boxes_above_threshold(
boxes, labels, weights, threshold=0.6)
return [
retained_boxes, retained_labels, retained_weights,
self.expectedBoxesAfterThresholding(),
self.expectedLabelsAfterThresholding(),
self.expectedLabelScoresAfterThresholding()
]
(retained_boxes_, retained_labels_, retained_weights_,
expected_retained_boxes_, expected_retained_labels_,
expected_retained_weights_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(
retained_boxes_, expected_retained_boxes_)
self.assertAllClose(
retained_labels_, expected_retained_labels_)
self.assertAllClose(
retained_weights_, expected_retained_weights_)
def testRetainBoxesAboveThresholdWithMultiClassScores(self):
def graph_fn():
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
multiclass_scores = self.createTestMultiClassScores()
(_, _, _,
retained_multiclass_scores) = preprocessor.retain_boxes_above_threshold(
boxes,
labels,
weights,
multiclass_scores=multiclass_scores,
threshold=0.6)
return [
retained_multiclass_scores,
self.expectedMultiClassScoresAfterThresholding()
]
(retained_multiclass_scores_,
expected_retained_multiclass_scores_) = self.execute(graph_fn, [])
self.assertAllClose(retained_multiclass_scores_,
expected_retained_multiclass_scores_)
def testRetainBoxesAboveThresholdWithMasks(self):
def graph_fn():
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
masks = self.createTestMasks()
_, _, _, retained_masks = preprocessor.retain_boxes_above_threshold(
boxes, labels, weights, masks, threshold=0.6)
return [
retained_masks, self.expectedMasksAfterThresholding()]
retained_masks_, expected_retained_masks_ = self.execute_cpu(graph_fn, [])
self.assertAllClose(
retained_masks_, expected_retained_masks_)
def testRetainBoxesAboveThresholdWithKeypoints(self):
def graph_fn():
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
keypoints, _ = self.createTestKeypoints()
(_, _, _, retained_keypoints) = preprocessor.retain_boxes_above_threshold(
boxes, labels, weights, keypoints=keypoints, threshold=0.6)
return [retained_keypoints, self.expectedKeypointsAfterThresholding()]
(retained_keypoints_,
expected_retained_keypoints_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(retained_keypoints_, expected_retained_keypoints_)
def testDropLabelProbabilistically(self):
def graph_fn():
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
(retained_boxes, retained_labels,
retained_weights) = preprocessor.drop_label_probabilistically(
boxes, labels, weights, dropped_label=1, drop_probability=1.0)
return [
retained_boxes, retained_labels, retained_weights,
self.expectedBoxesAfterDropping(),
self.expectedLabelsAfterDropping(),
self.expectedLabelScoresAfterDropping()
]
(retained_boxes_, retained_labels_, retained_weights_,
expected_retained_boxes_, expected_retained_labels_,
expected_retained_weights_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(retained_boxes_, expected_retained_boxes_)
self.assertAllClose(retained_labels_, expected_retained_labels_)
self.assertAllClose(retained_weights_, expected_retained_weights_)
def testDropLabelProbabilisticallyWithMultiClassScores(self):
def graph_fn():
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
multiclass_scores = self.createTestMultiClassScores()
(_, _, _,
retained_multiclass_scores) = preprocessor.drop_label_probabilistically(
boxes,
labels,
weights,
multiclass_scores=multiclass_scores,
dropped_label=1,
drop_probability=1.0)
return [retained_multiclass_scores,
self.expectedMultiClassScoresAfterDropping()]
(retained_multiclass_scores_,
expected_retained_multiclass_scores_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(retained_multiclass_scores_,
expected_retained_multiclass_scores_)
def testDropLabelProbabilisticallyWithMasks(self):
def graph_fn():
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
masks = self.createTestMasks()
(_, _, _, retained_masks) = preprocessor.drop_label_probabilistically(
boxes,
labels,
weights,
masks=masks,
dropped_label=1,
drop_probability=1.0)
return [retained_masks, self.expectedMasksAfterDropping()]
(retained_masks_, expected_retained_masks_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(retained_masks_, expected_retained_masks_)
def testDropLabelProbabilisticallyWithKeypoints(self):
def graph_fn():
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
keypoints, _ = self.createTestKeypoints()
(_, _, _, retained_keypoints) = preprocessor.drop_label_probabilistically(
boxes,
labels,
weights,
keypoints=keypoints,
dropped_label=1,
drop_probability=1.0)
return [retained_keypoints, self.expectedKeypointsAfterDropping()]
(retained_keypoints_,
expected_retained_keypoints_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(retained_keypoints_, expected_retained_keypoints_)
def testRemapLabels(self):
def graph_fn():
labels = self.createTestLabelsLong()
remapped_labels = preprocessor.remap_labels(labels, [1, 2], 3)
return [remapped_labels, self.expectedLabelsAfterRemapping()]
(remapped_labels_, expected_remapped_labels_) = self.execute_cpu(graph_fn,
[])
self.assertAllClose(remapped_labels_, expected_remapped_labels_)
def testFlipBoxesLeftRight(self):
def graph_fn():
boxes = self.createTestBoxes()
flipped_boxes = preprocessor._flip_boxes_left_right(boxes)
expected_boxes = self.expectedBoxesAfterLeftRightFlip()
return flipped_boxes, expected_boxes
flipped_boxes, expected_boxes = self.execute_cpu(graph_fn, [])
self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten())
def testFlipBoxesUpDown(self):
def graph_fn():
boxes = self.createTestBoxes()
flipped_boxes = preprocessor._flip_boxes_up_down(boxes)
expected_boxes = self.expectedBoxesAfterUpDownFlip()
return flipped_boxes, expected_boxes
flipped_boxes, expected_boxes = self.execute_cpu(graph_fn, [])
self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten())
def testRot90Boxes(self):
def graph_fn():
boxes = self.createTestBoxes()
rotated_boxes = preprocessor._rot90_boxes(boxes)
expected_boxes = self.expectedBoxesAfterRot90()
return rotated_boxes, expected_boxes
rotated_boxes, expected_boxes = self.execute_cpu(graph_fn, [])
self.assertAllEqual(rotated_boxes.flatten(), expected_boxes.flatten())
def testFlipMasksLeftRight(self):
def graph_fn():
test_mask = self.createTestMasks()
flipped_mask = preprocessor._flip_masks_left_right(test_mask)
expected_mask = self.expectedMasksAfterLeftRightFlip()
return flipped_mask, expected_mask
flipped_mask, expected_mask = self.execute_cpu(graph_fn, [])
self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten())
def testFlipMasksUpDown(self):
def graph_fn():
test_mask = self.createTestMasks()
flipped_mask = preprocessor._flip_masks_up_down(test_mask)
expected_mask = self.expectedMasksAfterUpDownFlip()
return flipped_mask, expected_mask
flipped_mask, expected_mask = self.execute_cpu(graph_fn, [])
self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten())
def testRot90Masks(self):
def graph_fn():
test_mask = self.createTestMasks()
rotated_mask = preprocessor._rot90_masks(test_mask)
expected_mask = self.expectedMasksAfterRot90()
return [rotated_mask, expected_mask]
rotated_mask, expected_mask = self.execute(graph_fn, [])
self.assertAllEqual(rotated_mask.flatten(), expected_mask.flatten())
def _testPreprocessorCache(self,
preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False):
if self.is_tf2(): return
def graph_fn():
cache = preprocessor_cache.PreprocessorCache()
images = self.createTestImages()
boxes = self.createTestBoxes()
weights = self.createTestGroundtruthWeights()
classes = self.createTestLabels()
masks = self.createTestMasks()
keypoints, _ = self.createTestKeypoints()
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=test_masks, include_keypoints=test_keypoints)
out = []
for _ in range(2):
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_weights: weights
}
if test_boxes:
tensor_dict[fields.InputDataFields.groundtruth_boxes] = boxes
tensor_dict[fields.InputDataFields.groundtruth_classes] = classes
if test_masks:
tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks
if test_keypoints:
tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints
out.append(
preprocessor.preprocess(tensor_dict, preprocess_options,
preprocessor_arg_map, cache))
return out
out1, out2 = self.execute_cpu_tf1(graph_fn, [])
for (_, v1), (_, v2) in zip(out1.items(), out2.items()):
self.assertAllClose(v1, v2)
def testRandomHorizontalFlip(self):
def graph_fn():
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected1 = self.expectedBoxesAfterLeftRightFlip()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
return [images_diff, images_diff_expected, boxes_diff,
boxes_diff_expected]
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomHorizontalFlipWithEmptyBoxes(self):
def graph_fn():
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
return [images_diff, images_diff_expected, boxes, boxes_expected]
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomHorizontalFlipWithCache(self):
keypoint_flip_permutation = self.createKeypointFlipPermutation()
preprocess_options = [
(preprocessor.random_horizontal_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomHorizontalFlipWithKeypointDepth(self):
def graph_fn():
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints, keypoint_visibilities = self.createTestKeypoints()
keypoint_depths, keypoint_depth_weights = self.createTestKeypointDepths()
keypoint_flip_permutation = self.createKeypointFlipPermutation()
tensor_dict = {
fields.InputDataFields.image:
images,
fields.InputDataFields.groundtruth_boxes:
boxes,
fields.InputDataFields.groundtruth_instance_masks:
masks,
fields.InputDataFields.groundtruth_keypoints:
keypoints,
fields.InputDataFields.groundtruth_keypoint_visibilities:
keypoint_visibilities,
fields.InputDataFields.groundtruth_keypoint_depths:
keypoint_depths,
fields.InputDataFields.groundtruth_keypoint_depth_weights:
keypoint_depth_weights,
}
preprocess_options = [(preprocessor.random_horizontal_flip, {
'keypoint_flip_permutation': keypoint_flip_permutation,
'probability': 1.0
})]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True,
include_keypoints=True,
include_keypoint_visibilities=True,
include_dense_pose=False,
include_keypoint_depths=True)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map)
keypoint_depths = tensor_dict[
fields.InputDataFields.groundtruth_keypoint_depths]
keypoint_depth_weights = tensor_dict[
fields.InputDataFields.groundtruth_keypoint_depth_weights]
output_tensors = [keypoint_depths, keypoint_depth_weights]
return output_tensors
output_tensors = self.execute_cpu(graph_fn, [])
expected_keypoint_depths = [[1.0, 0.8, 0.9], [0.7, 0.5, 0.6]]
expected_keypoint_depth_weights = [[0.5, 0.7, 0.6], [0.8, 1.0, 0.9]]
self.assertAllClose(expected_keypoint_depths, output_tensors[0])
self.assertAllClose(expected_keypoint_depth_weights, output_tensors[1])
def testRandomVerticalFlip(self):
def graph_fn():
preprocess_options = [(preprocessor.random_vertical_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes
}
images_expected1 = self.expectedImagesAfterUpDownFlip()
boxes_expected1 = self.expectedBoxesAfterUpDownFlip()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
return [
images_diff, images_diff_expected, boxes_diff, boxes_diff_expected
]
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomVerticalFlipWithEmptyBoxes(self):
def graph_fn():
preprocess_options = [(preprocessor.random_vertical_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes
}
images_expected1 = self.expectedImagesAfterUpDownFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
return [images_diff, images_diff_expected, boxes, boxes_expected]
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomVerticalFlipWithCache(self):
keypoint_flip_permutation = self.createKeypointFlipPermutation()
preprocess_options = [
(preprocessor.random_vertical_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomVerticalFlipWithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints, _ = self.createTestKeypoints()
keypoint_flip_permutation = self.createKeypointFlipPermutation()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocess_options = [
(preprocessor.random_vertical_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
self.assertIsNotNone(boxes)
self.assertIsNotNone(masks)
self.assertIsNotNone(keypoints)
def testRandomRotation90(self):
def graph_fn():
preprocess_options = [(preprocessor.random_rotation90, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes
}
images_expected1 = self.expectedImagesAfterRot90()
boxes_expected1 = self.expectedBoxesAfterRot90()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
return [
images_diff, images_diff_expected, boxes_diff, boxes_diff_expected
]
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomRotation90WithEmptyBoxes(self):
def graph_fn():
preprocess_options = [(preprocessor.random_rotation90, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes
}
images_expected1 = self.expectedImagesAfterRot90()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
return [images_diff, images_diff_expected, boxes, boxes_expected]
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomRotation90WithCache(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomRotation90WithMaskAndKeypoints(self):
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints, _ = self.createTestKeypoints()
keypoint_rot_permutation = self.createKeypointRotPermutation()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocess_options = [(preprocessor.random_rotation90, {
'keypoint_rot_permutation': keypoint_rot_permutation
})]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
self.assertIsNotNone(boxes)
self.assertIsNotNone(masks)
self.assertIsNotNone(keypoints)
def testRandomPixelValueScale(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_min = tf.cast(images, dtype=tf.float32) * 0.9 / 255.0
images_max = tf.cast(images, dtype=tf.float32) * 1.1 / 255.0
images = tensor_dict[fields.InputDataFields.image]
values_greater = tf.greater_equal(images, images_min)
values_less = tf.less_equal(images, images_max)
values_true = tf.fill([1, 4, 4, 3], True)
return [values_greater, values_less, values_true]
(values_greater_, values_less_,
values_true_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(values_greater_, values_true_)
self.assertAllClose(values_less_, values_true_)
def testRandomPixelValueScaleWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_pixel_value_scale, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testRandomImageScale(self):
def graph_fn():
preprocess_options = [(preprocessor.random_image_scale, {})]
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images_scaled = tensor_dict[fields.InputDataFields.image]
images_original_shape = tf.shape(images_original)
images_scaled_shape = tf.shape(images_scaled)
return [images_original_shape, images_scaled_shape]
(images_original_shape_,
images_scaled_shape_) = self.execute_cpu(graph_fn, [])
self.assertLessEqual(images_original_shape_[1] * 0.5,
images_scaled_shape_[1])
self.assertGreaterEqual(images_original_shape_[1] * 2.0,
images_scaled_shape_[1])
self.assertLessEqual(images_original_shape_[2] * 0.5,
images_scaled_shape_[2])
self.assertGreaterEqual(images_original_shape_[2] * 2.0,
images_scaled_shape_[2])
def testRandomImageScaleWithCache(self):
preprocess_options = [(preprocessor.random_image_scale, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomRGBtoGray(self):
def graph_fn():
preprocess_options = [(preprocessor.random_rgb_to_gray, {})]
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images_gray = tensor_dict[fields.InputDataFields.image]
images_gray_r, images_gray_g, images_gray_b = tf.split(
value=images_gray, num_or_size_splits=3, axis=3)
images_r, images_g, images_b = tf.split(
value=images_original, num_or_size_splits=3, axis=3)
images_r_diff1 = tf.squared_difference(
tf.cast(images_r, dtype=tf.float32),
tf.cast(images_gray_r, dtype=tf.float32))
images_r_diff2 = tf.squared_difference(
tf.cast(images_gray_r, dtype=tf.float32),
tf.cast(images_gray_g, dtype=tf.float32))
images_r_diff = tf.multiply(images_r_diff1, images_r_diff2)
images_g_diff1 = tf.squared_difference(
tf.cast(images_g, dtype=tf.float32),
tf.cast(images_gray_g, dtype=tf.float32))
images_g_diff2 = tf.squared_difference(
tf.cast(images_gray_g, dtype=tf.float32),
tf.cast(images_gray_b, dtype=tf.float32))
images_g_diff = tf.multiply(images_g_diff1, images_g_diff2)
images_b_diff1 = tf.squared_difference(
tf.cast(images_b, dtype=tf.float32),
tf.cast(images_gray_b, dtype=tf.float32))
images_b_diff2 = tf.squared_difference(
tf.cast(images_gray_b, dtype=tf.float32),
tf.cast(images_gray_r, dtype=tf.float32))
images_b_diff = tf.multiply(images_b_diff1, images_b_diff2)
image_zero1 = tf.constant(0, dtype=tf.float32, shape=[1, 4, 4, 1])
return [images_r_diff, images_g_diff, images_b_diff, image_zero1]
(images_r_diff_, images_g_diff_, images_b_diff_,
image_zero1_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(images_r_diff_, image_zero1_)
self.assertAllClose(images_g_diff_, image_zero1_)
self.assertAllClose(images_b_diff_, image_zero1_)
def testRandomRGBtoGrayWithCache(self):
preprocess_options = [(
preprocessor.random_rgb_to_gray, {'probability': 0.5})]
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomAdjustBrightness(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_adjust_brightness, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_bright = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_bright_shape = tf.shape(images_bright)
return [image_original_shape, image_bright_shape]
(image_original_shape_,
image_bright_shape_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image_original_shape_, image_bright_shape_)
def testRandomAdjustBrightnessWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_adjust_brightness, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomAdjustContrast(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_adjust_contrast, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_contrast = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_contrast_shape = tf.shape(images_contrast)
return [image_original_shape, image_contrast_shape]
(image_original_shape_,
image_contrast_shape_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image_original_shape_, image_contrast_shape_)
def testRandomAdjustContrastWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_adjust_contrast, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomAdjustHue(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_adjust_hue, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_hue = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_hue_shape = tf.shape(images_hue)
return [image_original_shape, image_hue_shape]
(image_original_shape_, image_hue_shape_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image_original_shape_, image_hue_shape_)
def testRandomAdjustHueWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_adjust_hue, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomDistortColor(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_distort_color, {}))
images_original = self.createTestImages()
images_original_shape = tf.shape(images_original)
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_distorted_color = tensor_dict[fields.InputDataFields.image]
images_distorted_color_shape = tf.shape(images_distorted_color)
return [images_original_shape, images_distorted_color_shape]
(images_original_shape_,
images_distorted_color_shape_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(images_original_shape_, images_distorted_color_shape_)
def testRandomDistortColorWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_distort_color, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomJitterBoxes(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.random_jitter_boxes, {}))
boxes = self.createRandomTextBoxes()
boxes_shape = tf.shape(boxes)
tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
distorted_boxes_shape = tf.shape(distorted_boxes)
return [boxes_shape, distorted_boxes_shape]
(boxes_shape_, distorted_boxes_shape_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(boxes_shape_, distorted_boxes_shape_)
@parameterized.parameters(
['expand', 'shrink', 'expand_symmetric', 'shrink_symmetric',
'expand_symmetric_xy', 'shrink_symmetric_xy']
)
def testRandomJitterBoxesZeroRatio(self, jitter_mode):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.random_jitter_boxes,
{'ratio': .0, 'jitter_mode': jitter_mode}))
boxes = self.createRandomTextBoxes()
tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
return [boxes, distorted_boxes]
(boxes, distorted_boxes) = self.execute_cpu(graph_fn, [])
self.assertAllClose(boxes, distorted_boxes)
def testRandomJitterBoxesExpand(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.random_jitter_boxes,
{'jitter_mode': 'expand'}))
boxes = self.createRandomTextBoxes()
tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
return [boxes, distorted_boxes]
boxes, distorted_boxes = self.execute_cpu(graph_fn, [])
ymin, xmin, ymax, xmax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
distorted_ymin, distorted_xmin, distorted_ymax, distorted_xmax = (
distorted_boxes[:, 0], distorted_boxes[:, 1], distorted_boxes[:, 2],
distorted_boxes[:, 3])
self.assertTrue(np.all(distorted_ymin <= ymin))
self.assertTrue(np.all(distorted_xmin <= xmin))
self.assertTrue(np.all(distorted_ymax >= ymax))
self.assertTrue(np.all(distorted_xmax >= xmax))
def testRandomJitterBoxesExpandSymmetric(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.random_jitter_boxes,
{'jitter_mode': 'expand_symmetric'}))
boxes = self.createRandomTextBoxes()
tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
return [boxes, distorted_boxes]
boxes, distorted_boxes = self.execute_cpu(graph_fn, [])
ymin, xmin, ymax, xmax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
distorted_ymin, distorted_xmin, distorted_ymax, distorted_xmax = (
distorted_boxes[:, 0], distorted_boxes[:, 1], distorted_boxes[:, 2],
distorted_boxes[:, 3])
self.assertTrue(np.all(distorted_ymin <= ymin))
self.assertTrue(np.all(distorted_xmin <= xmin))
self.assertTrue(np.all(distorted_ymax >= ymax))
self.assertTrue(np.all(distorted_xmax >= xmax))
self.assertAllClose(ymin - distorted_ymin, distorted_ymax - ymax, rtol=1e-5)
self.assertAllClose(xmin - distorted_xmin, distorted_xmax - xmax, rtol=1e-5)
def testRandomJitterBoxesExpandSymmetricXY(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.random_jitter_boxes,
{'jitter_mode': 'expand_symmetric_xy'}))
boxes = self.createRandomTextBoxes()
tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
return [boxes, distorted_boxes]
boxes, distorted_boxes = self.execute_cpu(graph_fn, [])
ymin, xmin, ymax, xmax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
distorted_ymin, distorted_xmin, distorted_ymax, distorted_xmax = (
distorted_boxes[:, 0], distorted_boxes[:, 1], distorted_boxes[:, 2],
distorted_boxes[:, 3])
self.assertTrue(np.all(distorted_ymin <= ymin))
self.assertTrue(np.all(distorted_xmin <= xmin))
self.assertTrue(np.all(distorted_ymax >= ymax))
self.assertTrue(np.all(distorted_xmax >= xmax))
self.assertAllClose(ymin - distorted_ymin, distorted_ymax - ymax, rtol=1e-5)
self.assertAllClose(xmin - distorted_xmin, distorted_xmax - xmax, rtol=1e-5)
height, width = tf.maximum(1e-6, ymax - ymin), tf.maximum(1e-6, xmax - xmin)
self.assertAllClose((distorted_ymax - ymax) / height,
(distorted_xmax - xmax) / width, rtol=1e-5)
self.assertAllLessEqual((distorted_ymax - ymax) / height, 0.05)
self.assertAllGreaterEqual((distorted_ymax - ymax) / width, 0.00)
def testRandomJitterBoxesShrink(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.random_jitter_boxes,
{'jitter_mode': 'shrink'}))
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
return [boxes, distorted_boxes]
boxes, distorted_boxes = self.execute_cpu(graph_fn, [])
ymin, xmin, ymax, xmax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
distorted_ymin, distorted_xmin, distorted_ymax, distorted_xmax = (
distorted_boxes[:, 0], distorted_boxes[:, 1], distorted_boxes[:, 2],
distorted_boxes[:, 3])
self.assertTrue(np.all(distorted_ymin >= ymin))
self.assertTrue(np.all(distorted_xmin >= xmin))
self.assertTrue(np.all(distorted_ymax <= ymax))
self.assertTrue(np.all(distorted_xmax <= xmax))
def testRandomJitterBoxesShrinkSymmetric(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.random_jitter_boxes,
{'jitter_mode': 'shrink_symmetric'}))
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
return [boxes, distorted_boxes]
boxes, distorted_boxes = self.execute_cpu(graph_fn, [])
ymin, xmin, ymax, xmax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
distorted_ymin, distorted_xmin, distorted_ymax, distorted_xmax = (
distorted_boxes[:, 0], distorted_boxes[:, 1], distorted_boxes[:, 2],
distorted_boxes[:, 3])
self.assertTrue(np.all(distorted_ymin >= ymin))
self.assertTrue(np.all(distorted_xmin >= xmin))
self.assertTrue(np.all(distorted_ymax <= ymax))
self.assertTrue(np.all(distorted_xmax <= xmax))
self.assertAllClose(ymin - distorted_ymin, distorted_ymax - ymax, rtol=1e-5)
self.assertAllClose(xmin - distorted_xmin, distorted_xmax - xmax, rtol=1e-5)
def testRandomJitterBoxesShrinkSymmetricXY(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.random_jitter_boxes,
{'jitter_mode': 'shrink_symmetric_xy'}))
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
return [boxes, distorted_boxes]
boxes, distorted_boxes = self.execute_cpu(graph_fn, [])
ymin, xmin, ymax, xmax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
distorted_ymin, distorted_xmin, distorted_ymax, distorted_xmax = (
distorted_boxes[:, 0], distorted_boxes[:, 1], distorted_boxes[:, 2],
distorted_boxes[:, 3])
self.assertTrue(np.all(distorted_ymin >= ymin))
self.assertTrue(np.all(distorted_xmin >= xmin))
self.assertTrue(np.all(distorted_ymax <= ymax))
self.assertTrue(np.all(distorted_xmax <= xmax))
self.assertAllClose(ymin - distorted_ymin, distorted_ymax - ymax, rtol=1e-5)
self.assertAllClose(xmin - distorted_xmin, distorted_xmax - xmax, rtol=1e-5)
height, width = tf.maximum(1e-6, ymax - ymin), tf.maximum(1e-6, xmax - xmin)
self.assertAllClose((ymax - distorted_ymax) / height,
(xmax - distorted_xmax) / width, rtol=1e-5)
self.assertAllLessEqual((ymax - distorted_ymax) / height, 0.05)
self.assertAllGreaterEqual((ymax - distorted_ymax)/ width, 0.00)
def testRandomCropImage(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
return [
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
]
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithCache(self):
preprocess_options = [(preprocessor.random_rgb_to_gray,
{'probability': 0.5}),
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,
}),
(preprocessor.random_crop_image, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testRandomCropImageGrayscale(self):
def graph_fn():
preprocessing_options = [(preprocessor.rgb_to_gray, {}),
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,
}), (preprocessor.random_crop_image, {})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
return [
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
]
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithBoxOutOfImage(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxesOutOfImage()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
return [
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
]
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithRandomCoefOne(self):
def graph_fn():
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_image, {
'random_coef': 1.0
})]
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_weights = distorted_tensor_dict[
fields.InputDataFields.groundtruth_weights]
boxes_shape = tf.shape(boxes)
distorted_boxes_shape = tf.shape(distorted_boxes)
images_shape = tf.shape(images)
distorted_images_shape = tf.shape(distorted_images)
return [
boxes_shape, distorted_boxes_shape, images_shape,
distorted_images_shape, images, distorted_images, boxes,
distorted_boxes, labels, distorted_labels, weights, distorted_weights
]
(boxes_shape_, distorted_boxes_shape_, images_shape_,
distorted_images_shape_, images_, distorted_images_, boxes_,
distorted_boxes_, labels_, distorted_labels_, weights_,
distorted_weights_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(boxes_shape_, distorted_boxes_shape_)
self.assertAllEqual(images_shape_, distorted_images_shape_)
self.assertAllClose(images_, distorted_images_)
self.assertAllClose(boxes_, distorted_boxes_)
self.assertAllEqual(labels_, distorted_labels_)
self.assertAllEqual(weights_, distorted_weights_)
def testRandomCropWithMockSampleDistortedBoundingBox(self):
def graph_fn():
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createColorfulTestImage()
boxes = tf.constant([[0.1, 0.1, 0.8, 0.3], [0.2, 0.4, 0.75, 0.75],
[0.3, 0.1, 0.4, 0.7]],
dtype=tf.float32)
labels = tf.constant([1, 7, 11], dtype=tf.int32)
weights = tf.constant([1.0, 0.5, 0.6], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
}
tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(tf.image, 'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (tf.constant(
[6, 143, 0], dtype=tf.int32), tf.constant(
[190, 237, -1], dtype=tf.int32), tf.constant(
[[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_weights = distorted_tensor_dict[
fields.InputDataFields.groundtruth_weights]
expected_boxes = tf.constant(
[[0.178947, 0.07173, 0.75789469, 0.66244733],
[0.28421, 0.0, 0.38947365, 0.57805908]],
dtype=tf.float32)
expected_labels = tf.constant([7, 11], dtype=tf.int32)
expected_weights = tf.constant([0.5, 0.6], dtype=tf.float32)
return [
distorted_boxes, distorted_labels, distorted_weights,
expected_boxes, expected_labels, expected_weights
]
(distorted_boxes_, distorted_labels_, distorted_weights_, expected_boxes_,
expected_labels_, expected_weights_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(distorted_boxes_, expected_boxes_)
self.assertAllEqual(distorted_labels_, expected_labels_)
self.assertAllEqual(distorted_weights_, expected_weights_)
def testRandomCropWithoutClipBoxes(self):
def graph_fn():
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createColorfulTestImage()
boxes = tf.constant([[0.1, 0.1, 0.8, 0.3],
[0.2, 0.4, 0.75, 0.75],
[0.3, 0.1, 0.4, 0.7]], dtype=tf.float32)
keypoints = tf.constant([
[[0.1, 0.1], [0.8, 0.3]],
[[0.2, 0.4], [0.75, 0.75]],
[[0.3, 0.1], [0.4, 0.7]],
], dtype=tf.float32)
labels = tf.constant([1, 7, 11], dtype=tf.int32)
weights = tf.constant([1.0, 0.5, 0.6], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_keypoints: keypoints,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
preprocessing_options = [(preprocessor.random_crop_image, {
'clip_boxes': False,
})]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
with mock.patch.object(tf.image, 'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (tf.constant(
[6, 143, 0], dtype=tf.int32), tf.constant(
[190, 237, -1], dtype=tf.int32), tf.constant(
[[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options,
func_arg_map=preprocessor_arg_map)
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_weights = distorted_tensor_dict[
fields.InputDataFields.groundtruth_weights]
expected_boxes = tf.constant(
[[0.178947, 0.07173, 0.75789469, 0.66244733],
[0.28421, -0.434599, 0.38947365, 0.57805908]],
dtype=tf.float32)
expected_keypoints = tf.constant(
[[[0.178947, 0.07173], [0.75789469, 0.66244733]],
[[0.28421, -0.434599], [0.38947365, 0.57805908]]],
dtype=tf.float32)
expected_labels = tf.constant([7, 11], dtype=tf.int32)
expected_weights = tf.constant([0.5, 0.6], dtype=tf.float32)
return [distorted_boxes, distorted_keypoints, distorted_labels,
distorted_weights, expected_boxes, expected_keypoints,
expected_labels, expected_weights]
(distorted_boxes_, distorted_keypoints_, distorted_labels_,
distorted_weights_, expected_boxes_, expected_keypoints_, expected_labels_,
expected_weights_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(distorted_boxes_, expected_boxes_)
self.assertAllClose(distorted_keypoints_, expected_keypoints_)
self.assertAllEqual(distorted_labels_, expected_labels_)
self.assertAllEqual(distorted_weights_, expected_weights_)
def testRandomCropImageWithMultiClassScores(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
multiclass_scores = self.createTestMultiClassScores()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
fields.InputDataFields.multiclass_scores: multiclass_scores
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_multiclass_scores = distorted_tensor_dict[
fields.InputDataFields.multiclass_scores]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
multiclass_scores_rank = tf.rank(multiclass_scores)
distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores)
return [
boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank,
distorted_images_rank, multiclass_scores_rank,
distorted_multiclass_scores_rank, distorted_multiclass_scores
]
(boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_, multiclass_scores_rank_,
distorted_multiclass_scores_rank_,
distorted_multiclass_scores_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
self.assertAllEqual(multiclass_scores_rank_,
distorted_multiclass_scores_rank_)
self.assertAllEqual(distorted_boxes_.shape[0],
distorted_multiclass_scores_.shape[0])
def testStrictRandomCropImageWithGroundtruthWeights(self):
def graph_fn():
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
new_image, new_boxes, new_labels, new_groundtruth_weights = (
preprocessor._strict_random_crop_image(
image, boxes, labels, weights))
return [new_image, new_boxes, new_labels, new_groundtruth_weights]
(new_image, new_boxes, _,
new_groundtruth_weights) = self.execute_cpu(graph_fn, [])
expected_boxes = np.array(
[[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllEqual(new_groundtruth_weights, [1.0, 0.5])
self.assertAllClose(
new_boxes.flatten(), expected_boxes.flatten())
def testStrictRandomCropImageWithMasks(self):
def graph_fn():
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
new_image, new_boxes, new_labels, new_weights, new_masks = (
preprocessor._strict_random_crop_image(
image, boxes, labels, weights, masks=masks))
return [new_image, new_boxes, new_labels, new_weights, new_masks]
(new_image, new_boxes, _, _,
new_masks) = self.execute_cpu(graph_fn, [])
expected_boxes = np.array(
[[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllEqual(new_masks.shape, [2, 190, 237])
self.assertAllClose(
new_boxes.flatten(), expected_boxes.flatten())
def testStrictRandomCropImageWithMaskWeights(self):
def graph_fn():
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
mask_weights = tf.constant([1.0, 0.0], dtype=tf.float32)
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
results = preprocessor._strict_random_crop_image(
image, boxes, labels, weights, masks=masks,
mask_weights=mask_weights)
return results
(new_image, new_boxes, _, _,
new_masks, new_mask_weights) = self.execute_cpu(graph_fn, [])
expected_boxes = np.array(
[[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllEqual(new_masks.shape, [2, 190, 237])
self.assertAllClose(new_mask_weights, [1.0, 0.0])
self.assertAllClose(
new_boxes.flatten(), expected_boxes.flatten())
def testStrictRandomCropImageWithKeypoints(self):
def graph_fn():
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
keypoints, keypoint_visibilities = self.createTestKeypoints()
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
(new_image, new_boxes, new_labels, new_weights, new_keypoints,
new_keypoint_visibilities) = preprocessor._strict_random_crop_image(
image, boxes, labels, weights, keypoints=keypoints,
keypoint_visibilities=keypoint_visibilities)
return [new_image, new_boxes, new_labels, new_weights, new_keypoints,
new_keypoint_visibilities]
(new_image, new_boxes, _, _, new_keypoints,
new_keypoint_visibilities) = self.execute_cpu(graph_fn, [])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],], dtype=np.float32)
expected_keypoints = np.array([
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]],
[[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277]]
], dtype=np.float32)
expected_keypoint_visibilities = [
[False, False, False],
[False, True, True]
]
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllClose(
new_boxes, expected_boxes)
self.assertAllClose(
new_keypoints, expected_keypoints)
self.assertAllEqual(
new_keypoint_visibilities, expected_keypoint_visibilities)
def testRunRandomCropImageWithMasks(self):
def graph_fn():
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
mask_weights = tf.constant([1.0, 0.0], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_instance_mask_weights:
mask_weights
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_instance_mask_weights=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict,
preprocessing_options,
func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
distorted_mask_weights = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_mask_weights]
return [distorted_image, distorted_boxes, distorted_labels,
distorted_masks, distorted_mask_weights]
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_masks_, distorted_mask_weights_) = self.execute_cpu(graph_fn, [])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_masks_.shape, [2, 190, 237])
self.assertAllClose(distorted_mask_weights_, [1.0, 0.0])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten())
def testRunRandomCropImageWithKeypointsInsideCrop(self):
def graph_fn():
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
keypoints = self.createTestKeypointsInsideCrop()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints,
fields.InputDataFields.groundtruth_weights: weights
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict,
preprocessing_options,
func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
return [distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints]
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = self.execute_cpu(graph_fn, [])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
], dtype=np.float32)
expected_keypoints = np.array([
[[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277]],
[[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277]]
])
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten())
self.assertAllClose(
distorted_keypoints_.flatten(), expected_keypoints.flatten())
def testRunRandomCropImageWithKeypointsOutsideCrop(self):
def graph_fn():
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
keypoints = self.createTestKeypointsOutsideCrop()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict,
preprocessing_options,
func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
return [distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints]
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = self.execute_cpu(graph_fn, [])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
], dtype=np.float32)
expected_keypoints = np.array([
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]],
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]],
])
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten())
self.assertAllClose(
distorted_keypoints_.flatten(), expected_keypoints.flatten())
def testRunRandomCropImageWithDensePose(self):
def graph_fn():
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
dp_num_points, dp_part_ids, dp_surface_coords = self.createTestDensePose()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
fields.InputDataFields.groundtruth_dp_num_points: dp_num_points,
fields.InputDataFields.groundtruth_dp_part_ids: dp_part_ids,
fields.InputDataFields.groundtruth_dp_surface_coords:
dp_surface_coords
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_dense_pose=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 40, 0], dtype=tf.int32),
tf.constant([134, 340, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.1, 0.7, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict,
preprocessing_options,
func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_dp_num_points = distorted_tensor_dict[
fields.InputDataFields.groundtruth_dp_num_points]
distorted_dp_part_ids = distorted_tensor_dict[
fields.InputDataFields.groundtruth_dp_part_ids]
distorted_dp_surface_coords = distorted_tensor_dict[
fields.InputDataFields.groundtruth_dp_surface_coords]
return [distorted_image, distorted_dp_num_points, distorted_dp_part_ids,
distorted_dp_surface_coords]
(distorted_image_, distorted_dp_num_points_, distorted_dp_part_ids_,
distorted_dp_surface_coords_) = self.execute_cpu(graph_fn, [])
expected_dp_num_points = np.array([1, 1])
expected_dp_part_ids = np.array([[4], [0]])
expected_dp_surface_coords = np.array([
[[0.10447761, 0.1176470, 0.6, 0.7]],
[[0.10447761, 0.2352941, 0.2, 0.8]],
])
self.assertAllEqual(distorted_image_.shape, [1, 134, 340, 3])
self.assertAllEqual(distorted_dp_num_points_, expected_dp_num_points)
self.assertAllEqual(distorted_dp_part_ids_, expected_dp_part_ids)
self.assertAllClose(distorted_dp_surface_coords_,
expected_dp_surface_coords)
def testRunRetainBoxesAboveThreshold(self):
def graph_fn():
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
}
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {'threshold': 0.6})
]
preprocessor_arg_map = preprocessor.get_default_func_arg_map()
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
retained_boxes = retained_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
retained_labels = retained_tensor_dict[
fields.InputDataFields.groundtruth_classes]
retained_weights = retained_tensor_dict[
fields.InputDataFields.groundtruth_weights]
return [retained_boxes, retained_labels, retained_weights,
self.expectedBoxesAfterThresholding(),
self.expectedLabelsAfterThresholding(),
self.expectedLabelScoresAfterThresholding()]
(retained_boxes_, retained_labels_, retained_weights_,
expected_retained_boxes_, expected_retained_labels_,
expected_retained_weights_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(retained_boxes_, expected_retained_boxes_)
self.assertAllClose(retained_labels_, expected_retained_labels_)
self.assertAllClose(
retained_weights_, expected_retained_weights_)
def testRunRetainBoxesAboveThresholdWithMasks(self):
def graph_fn():
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
masks = self.createTestMasks()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_weights=True,
include_instance_masks=True)
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {'threshold': 0.6})
]
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
retained_masks = retained_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
return [retained_masks, self.expectedMasksAfterThresholding()]
(retained_masks_, expected_masks_) = self.execute(graph_fn, [])
self.assertAllClose(retained_masks_, expected_masks_)
def testRunRetainBoxesAboveThresholdWithKeypoints(self):
def graph_fn():
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
keypoints, _ = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {'threshold': 0.6})
]
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
retained_keypoints = retained_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
return [retained_keypoints, self.expectedKeypointsAfterThresholding()]
(retained_keypoints_, expected_keypoints_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(retained_keypoints_, expected_keypoints_)
def testRandomCropToAspectRatioWithCache(self):
preprocess_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testRunRandomCropToAspectRatioWithMasks(self):
def graph_fn():
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
with mock.patch.object(preprocessor,
'_random_integer') as mock_random_integer:
mock_random_integer.return_value = tf.constant(0, dtype=tf.int32)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict,
preprocessing_options,
func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
return [
distorted_image, distorted_boxes, distorted_labels, distorted_masks
]
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_masks_) = self.execute_cpu(graph_fn, [])
expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3])
self.assertAllEqual(distorted_labels_, [1])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllEqual(distorted_masks_.shape, [1, 200, 200])
def testRunRandomCropToAspectRatioCenterCrop(self):
def graph_fn():
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {
'center_crop': True
})]
with mock.patch.object(preprocessor,
'_random_integer') as mock_random_integer:
mock_random_integer.return_value = tf.constant(0, dtype=tf.int32)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict,
preprocessing_options,
func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
return [
distorted_image, distorted_boxes, distorted_labels
]
(distorted_image_, distorted_boxes_, distorted_labels_) = self.execute_cpu(
graph_fn, [])
expected_boxes = np.array([[0.0, 0.0, 0.75, 1.0],
[0.25, 0.5, 0.75, 1.0]], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
def testRunRandomCropToAspectRatioWithKeypoints(self):
def graph_fn():
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
keypoints, _ = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
with mock.patch.object(preprocessor,
'_random_integer') as mock_random_integer:
mock_random_integer.return_value = tf.constant(0, dtype=tf.int32)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict,
preprocessing_options,
func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
return [distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints]
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = self.execute_cpu(graph_fn, [])
expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32)
expected_keypoints = np.array(
[[0.1, 0.2], [0.2, 0.4], [0.3, 0.6]], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3])
self.assertAllEqual(distorted_labels_, [1])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllClose(distorted_keypoints_.flatten(),
expected_keypoints.flatten())
def testRandomPadToAspectRatioWithCache(self):
preprocess_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomPadToAspectRatioWithMinMaxPaddedSizeRatios(self):
def graph_fn():
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map()
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio,
{'min_padded_size_ratio': (4.0, 4.0),
'max_padded_size_ratio': (4.0, 4.0)})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
return [distorted_image, distorted_boxes, distorted_labels]
distorted_image_, distorted_boxes_, distorted_labels_ = self.execute_cpu(
graph_fn, [])
expected_boxes = np.array(
[[0.0, 0.125, 0.1875, 0.5], [0.0625, 0.25, 0.1875, 0.5]],
dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 800, 800, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
def testRunRandomPadToAspectRatioWithMasks(self):
def graph_fn():
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
return [
distorted_image, distorted_boxes, distorted_labels, distorted_masks
]
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_masks_) = self.execute_cpu(graph_fn, [])
expected_boxes = np.array(
[[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllEqual(distorted_masks_.shape, [2, 400, 400])
def testRunRandomPadToAspectRatioWithKeypoints(self):
def graph_fn():
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints, _ = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
return [
distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints
]
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = self.execute_cpu(graph_fn, [])
expected_boxes = np.array(
[[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32)
expected_keypoints = np.array([
[[0.05, 0.1], [0.1, 0.2], [0.15, 0.3]],
[[0.2, 0.4], [0.25, 0.5], [0.3, 0.6]],
], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllClose(distorted_keypoints_.flatten(),
expected_keypoints.flatten())
def testRandomPadImageWithCache(self):
preprocess_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,}), (preprocessor.random_pad_image, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomPadImage(self):
def graph_fn():
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_pad_image, {})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
return [boxes_shape, padded_boxes_shape, images_shape,
padded_images_shape, boxes, padded_boxes]
(boxes_shape_, padded_boxes_shape_, images_shape_,
padded_images_shape_, boxes_, padded_boxes_) = self.execute_cpu(graph_fn,
[])
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all)
self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all)
self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all)
self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all)
self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= (
padded_boxes_[:, 2] - padded_boxes_[:, 0])))
self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= (
padded_boxes_[:, 3] - padded_boxes_[:, 1])))
def testRandomPadImageCenterPad(self):
def graph_fn():
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_pad_image, {
'center_pad': True,
'min_image_size': [400, 400],
'max_image_size': [400, 400],
})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
padded_labels = padded_tensor_dict[
fields.InputDataFields.groundtruth_classes]
return [padded_images, padded_boxes, padded_labels]
(padded_images_, padded_boxes_, padded_labels_) = self.execute_cpu(
graph_fn, [])
expected_boxes = np.array([[0.25, 0.25, 0.625, 1.0],
[0.375, 0.5, .625, 1.0]], dtype=np.float32)
self.assertAllEqual(padded_images_.shape, [1, 400, 400, 3])
self.assertAllEqual(padded_labels_, [1, 2])
self.assertAllClose(padded_boxes_.flatten(),
expected_boxes.flatten())
@parameterized.parameters(
{'include_dense_pose': False},
)
def testRandomPadImageWithKeypointsAndMasks(self, include_dense_pose):
def graph_fn():
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = self.createTestMasks()
keypoints, _ = self.createTestKeypoints()
_, _, dp_surface_coords = self.createTestDensePose()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints,
fields.InputDataFields.groundtruth_dp_surface_coords:
dp_surface_coords
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_pad_image, {})]
func_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True,
include_keypoints=True,
include_keypoint_visibilities=True,
include_dense_pose=include_dense_pose)
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options,
func_arg_map=func_arg_map)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
padded_masks = padded_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
padded_keypoints = padded_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
padded_masks_shape = tf.shape(padded_masks)
keypoints_shape = tf.shape(keypoints)
padded_keypoints_shape = tf.shape(padded_keypoints)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
outputs = [boxes_shape, padded_boxes_shape, padded_masks_shape,
keypoints_shape, padded_keypoints_shape, images_shape,
padded_images_shape, boxes, padded_boxes, keypoints,
padded_keypoints]
if include_dense_pose:
padded_dp_surface_coords = padded_tensor_dict[
fields.InputDataFields.groundtruth_dp_surface_coords]
outputs.extend([dp_surface_coords, padded_dp_surface_coords])
return outputs
outputs = self.execute_cpu(graph_fn, [])
boxes_shape_ = outputs[0]
padded_boxes_shape_ = outputs[1]
padded_masks_shape_ = outputs[2]
keypoints_shape_ = outputs[3]
padded_keypoints_shape_ = outputs[4]
images_shape_ = outputs[5]
padded_images_shape_ = outputs[6]
boxes_ = outputs[7]
padded_boxes_ = outputs[8]
keypoints_ = outputs[9]
padded_keypoints_ = outputs[10]
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertAllEqual(keypoints_shape_, padded_keypoints_shape_)
self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all)
self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all)
self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all)
self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all)
self.assertAllEqual(padded_masks_shape_[1:3], padded_images_shape_[1:3])
self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= (
padded_boxes_[:, 2] - padded_boxes_[:, 0])))
self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= (
padded_boxes_[:, 3] - padded_boxes_[:, 1])))
self.assertTrue(np.all((keypoints_[1, :, 0] - keypoints_[0, :, 0]) >= (
padded_keypoints_[1, :, 0] - padded_keypoints_[0, :, 0])))
self.assertTrue(np.all((keypoints_[1, :, 1] - keypoints_[0, :, 1]) >= (
padded_keypoints_[1, :, 1] - padded_keypoints_[0, :, 1])))
if include_dense_pose:
dp_surface_coords = outputs[11]
padded_dp_surface_coords = outputs[12]
self.assertAllClose(padded_dp_surface_coords[:, :, 2:],
dp_surface_coords[:, :, 2:])
def testRandomAbsolutePadImage(self):
height_padding = 10
width_padding = 20
def graph_fn():
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: tf.cast(images, dtype=tf.float32),
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
preprocessing_options = [(preprocessor.random_absolute_pad_image, {
'max_height_padding': height_padding,
'max_width_padding': width_padding})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
original_shape = tf.shape(images)
final_shape = tf.shape(padded_tensor_dict[fields.InputDataFields.image])
return original_shape, final_shape
for _ in range(100):
original_shape, output_shape = self.execute_cpu(graph_fn, [])
_, height, width, _ = original_shape
self.assertGreaterEqual(output_shape[1], height)
self.assertLess(output_shape[1], height + height_padding)
self.assertGreaterEqual(output_shape[2], width)
self.assertLess(output_shape[2], width + width_padding)
def testRandomAbsolutePadImageWithKeypoints(self):
height_padding = 10
width_padding = 20
def graph_fn():
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints, _ = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: tf.cast(images, dtype=tf.float32),
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints,
}
preprocessing_options = [(preprocessor.random_absolute_pad_image, {
'max_height_padding': height_padding,
'max_width_padding': width_padding
})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
original_shape = tf.shape(images)
final_shape = tf.shape(padded_tensor_dict[fields.InputDataFields.image])
padded_keypoints = padded_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
return (original_shape, final_shape, padded_keypoints)
for _ in range(100):
original_shape, output_shape, padded_keypoints_ = self.execute_cpu(
graph_fn, [])
_, height, width, _ = original_shape
self.assertGreaterEqual(output_shape[1], height)
self.assertLess(output_shape[1], height + height_padding)
self.assertGreaterEqual(output_shape[2], width)
self.assertLess(output_shape[2], width + width_padding)
# Verify the keypoints are populated. The correctness of the keypoint
# coordinates are already tested in random_pad_image function.
self.assertEqual(padded_keypoints_.shape, (2, 3, 2))
def testRandomCropPadImageWithCache(self):
preprocess_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,}), (preprocessor.random_crop_pad_image, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomCropPadImageWithRandomCoefOne(self):
def graph_fn():
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_pad_image, {
'random_coef': 1.0
})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
return [boxes_shape, padded_boxes_shape, images_shape,
padded_images_shape, boxes, padded_boxes]
(boxes_shape_, padded_boxes_shape_, images_shape_,
padded_images_shape_, boxes_, padded_boxes_) = self.execute_cpu(graph_fn,
[])
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all)
self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all)
self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all)
self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all)
self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= (
padded_boxes_[:, 2] - padded_boxes_[:, 0])))
self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= (
padded_boxes_[:, 3] - padded_boxes_[:, 1])))
def testRandomCropToAspectRatio(self):
def graph_fn():
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
}
tensor_dict = preprocessor.preprocess(tensor_dict, [])
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {
'aspect_ratio': 2.0
})]
cropped_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
cropped_images = cropped_tensor_dict[fields.InputDataFields.image]
cropped_boxes = cropped_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
cropped_boxes_shape = tf.shape(cropped_boxes)
images_shape = tf.shape(images)
cropped_images_shape = tf.shape(cropped_images)
return [
boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape
]
(boxes_shape_, cropped_boxes_shape_, images_shape_,
cropped_images_shape_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(boxes_shape_, cropped_boxes_shape_)
self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2)
self.assertEqual(images_shape_[2], cropped_images_shape_[2])
def testRandomPadToAspectRatio(self):
def graph_fn():
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, [])
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {
'aspect_ratio': 2.0
})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
return [
boxes_shape, padded_boxes_shape, images_shape, padded_images_shape
]
(boxes_shape_, padded_boxes_shape_, images_shape_,
padded_images_shape_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertEqual(images_shape_[1], padded_images_shape_[1])
self.assertEqual(2 * images_shape_[2], padded_images_shape_[2])
def testRandomBlackPatchesWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_black_patches, {
'size_to_image_ratio': 0.5
}))
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomBlackPatches(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_black_patches, {
'size_to_image_ratio': 0.5
}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
blacked_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
blacked_images = blacked_tensor_dict[fields.InputDataFields.image]
images_shape = tf.shape(images)
blacked_images_shape = tf.shape(blacked_images)
return [images_shape, blacked_images_shape]
(images_shape_, blacked_images_shape_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(images_shape_, blacked_images_shape_)
def testRandomJpegQuality(self):
def graph_fn():
preprocessing_options = [(preprocessor.random_jpeg_quality, {
'min_jpeg_quality': 0,
'max_jpeg_quality': 100
})]
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
processed_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
encoded_images = processed_tensor_dict[fields.InputDataFields.image]
images_shape = tf.shape(images)
encoded_images_shape = tf.shape(encoded_images)
return [images_shape, encoded_images_shape]
images_shape_out, encoded_images_shape_out = self.execute_cpu(graph_fn, [])
self.assertAllEqual(images_shape_out, encoded_images_shape_out)
def testRandomJpegQualityKeepsStaticChannelShape(self):
# Set at least three weeks past the forward compatibility horizon for
# tf 1.14 of 2019/11/01.
# https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/compat/compat.py#L30
if not tf.compat.forward_compatible(year=2019, month=12, day=1):
self.skipTest('Skipping test for future functionality.')
preprocessing_options = [(preprocessor.random_jpeg_quality, {
'min_jpeg_quality': 0,
'max_jpeg_quality': 100
})]
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
processed_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
encoded_images = processed_tensor_dict[fields.InputDataFields.image]
images_static_channels = images.shape[-1]
encoded_images_static_channels = encoded_images.shape[-1]
self.assertEqual(images_static_channels, encoded_images_static_channels)
def testRandomJpegQualityWithCache(self):
preprocessing_options = [(preprocessor.random_jpeg_quality, {
'min_jpeg_quality': 0,
'max_jpeg_quality': 100
})]
self._testPreprocessorCache(preprocessing_options)
def testRandomJpegQualityWithRandomCoefOne(self):
def graph_fn():
preprocessing_options = [(preprocessor.random_jpeg_quality, {
'random_coef': 1.0
})]
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
processed_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
encoded_images = processed_tensor_dict[fields.InputDataFields.image]
images_shape = tf.shape(images)
encoded_images_shape = tf.shape(encoded_images)
return [images, encoded_images, images_shape, encoded_images_shape]
(images_out, encoded_images_out, images_shape_out,
encoded_images_shape_out) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(images_shape_out, encoded_images_shape_out)
self.assertAllEqual(images_out, encoded_images_out)
def testRandomDownscaleToTargetPixels(self):
def graph_fn():
preprocessing_options = [(preprocessor.random_downscale_to_target_pixels,
{
'min_target_pixels': 100,
'max_target_pixels': 101
})]
images = tf.random_uniform([1, 25, 100, 3])
tensor_dict = {fields.InputDataFields.image: images}
processed_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
downscaled_images = processed_tensor_dict[fields.InputDataFields.image]
downscaled_shape = tf.shape(downscaled_images)
return downscaled_shape
expected_shape = [1, 5, 20, 3]
downscaled_shape_out = self.execute_cpu(graph_fn, [])
self.assertAllEqual(downscaled_shape_out, expected_shape)
def testRandomDownscaleToTargetPixelsWithMasks(self):
def graph_fn():
preprocessing_options = [(preprocessor.random_downscale_to_target_pixels,
{
'min_target_pixels': 100,
'max_target_pixels': 101
})]
images = tf.random_uniform([1, 25, 100, 3])
masks = tf.random_uniform([10, 25, 100])
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
processed_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
downscaled_images = processed_tensor_dict[fields.InputDataFields.image]
downscaled_masks = processed_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
downscaled_images_shape = tf.shape(downscaled_images)
downscaled_masks_shape = tf.shape(downscaled_masks)
return [downscaled_images_shape, downscaled_masks_shape]
expected_images_shape = [1, 5, 20, 3]
expected_masks_shape = [10, 5, 20]
(downscaled_images_shape_out,
downscaled_masks_shape_out) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(downscaled_images_shape_out, expected_images_shape)
self.assertAllEqual(downscaled_masks_shape_out, expected_masks_shape)
@parameterized.parameters(
{'test_masks': False},
{'test_masks': True}
)
def testRandomDownscaleToTargetPixelsWithCache(self, test_masks):
preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, {
'min_target_pixels': 100,
'max_target_pixels': 999
})]
self._testPreprocessorCache(preprocessing_options, test_masks=test_masks)
def testRandomDownscaleToTargetPixelsWithRandomCoefOne(self):
def graph_fn():
preprocessing_options = [(preprocessor.random_downscale_to_target_pixels,
{
'random_coef': 1.0,
'min_target_pixels': 10,
'max_target_pixels': 20,
})]
images = tf.random_uniform([1, 25, 100, 3])
tensor_dict = {fields.InputDataFields.image: images}
processed_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
downscaled_images = processed_tensor_dict[fields.InputDataFields.image]
images_shape = tf.shape(images)
downscaled_images_shape = tf.shape(downscaled_images)
return [images, downscaled_images, images_shape, downscaled_images_shape]
(images_out, downscaled_images_out, images_shape_out,
downscaled_images_shape_out) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(images_shape_out, downscaled_images_shape_out)
self.assertAllEqual(images_out, downscaled_images_out)
def testRandomDownscaleToTargetPixelsIgnoresSmallImages(self):
def graph_fn():
preprocessing_options = [(preprocessor.random_downscale_to_target_pixels,
{
'min_target_pixels': 1000,
'max_target_pixels': 1001
})]
images = tf.random_uniform([1, 10, 10, 3])
tensor_dict = {fields.InputDataFields.image: images}
processed_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
downscaled_images = processed_tensor_dict[fields.InputDataFields.image]
images_shape = tf.shape(images)
downscaled_images_shape = tf.shape(downscaled_images)
return [images, downscaled_images, images_shape, downscaled_images_shape]
(images_out, downscaled_images_out, images_shape_out,
downscaled_images_shape_out) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(images_shape_out, downscaled_images_shape_out)
self.assertAllEqual(images_out, downscaled_images_out)
def testRandomPatchGaussianShape(self):
preprocessing_options = [(preprocessor.random_patch_gaussian, {
'min_patch_size': 1,
'max_patch_size': 200,
'min_gaussian_stddev': 0.0,
'max_gaussian_stddev': 2.0
})]
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
processed_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
patched_images = processed_tensor_dict[fields.InputDataFields.image]
images_shape = tf.shape(images)
patched_images_shape = tf.shape(patched_images)
self.assertAllEqual(images_shape, patched_images_shape)
def testRandomPatchGaussianClippedToLowerBound(self):
def graph_fn():
preprocessing_options = [(preprocessor.random_patch_gaussian, {
'min_patch_size': 20,
'max_patch_size': 40,
'min_gaussian_stddev': 50,
'max_gaussian_stddev': 100
})]
images = tf.zeros([1, 5, 4, 3])
tensor_dict = {fields.InputDataFields.image: images}
processed_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
patched_images = processed_tensor_dict[fields.InputDataFields.image]
return patched_images
patched_images = self.execute_cpu(graph_fn, [])
self.assertAllGreaterEqual(patched_images, 0.0)
def testRandomPatchGaussianClippedToUpperBound(self):
def graph_fn():
preprocessing_options = [(preprocessor.random_patch_gaussian, {
'min_patch_size': 20,
'max_patch_size': 40,
'min_gaussian_stddev': 50,
'max_gaussian_stddev': 100
})]
images = tf.constant(255.0, shape=[1, 5, 4, 3])
tensor_dict = {fields.InputDataFields.image: images}
processed_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
patched_images = processed_tensor_dict[fields.InputDataFields.image]
return patched_images
patched_images = self.execute_cpu(graph_fn, [])
self.assertAllLessEqual(patched_images, 255.0)
def testRandomPatchGaussianWithCache(self):
preprocessing_options = [(preprocessor.random_patch_gaussian, {
'min_patch_size': 1,
'max_patch_size': 200,
'min_gaussian_stddev': 0.0,
'max_gaussian_stddev': 2.0
})]
self._testPreprocessorCache(preprocessing_options)
def testRandomPatchGaussianWithRandomCoefOne(self):
def graph_fn():
preprocessing_options = [(preprocessor.random_patch_gaussian, {
'random_coef': 1.0
})]
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
processed_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
patched_images = processed_tensor_dict[fields.InputDataFields.image]
images_shape = tf.shape(images)
patched_images_shape = tf.shape(patched_images)
return patched_images_shape, patched_images, images_shape, images
(patched_images_shape, patched_images, images_shape,
images) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(images_shape, patched_images_shape)
self.assertAllEqual(images, patched_images)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
def testAutoAugmentImage(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.autoaugment_image, {
'policy_name': 'v1'
}))
images = self.createTestImages()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
autoaugment_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options)
augmented_images = autoaugment_tensor_dict[fields.InputDataFields.image]
augmented_boxes = autoaugment_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_shape = tf.shape(images)
boxes_shape = tf.shape(boxes)
augmented_images_shape = tf.shape(augmented_images)
augmented_boxes_shape = tf.shape(augmented_boxes)
return [images_shape, boxes_shape, augmented_images_shape,
augmented_boxes_shape]
(images_shape_, boxes_shape_, augmented_images_shape_,
augmented_boxes_shape_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(images_shape_, augmented_images_shape_)
self.assertAllEqual(boxes_shape_, augmented_boxes_shape_)
def testRandomResizeMethodWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_resize_method, {
'target_size': (75, 150)
}))
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomResizeMethod(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_resize_method, {
'target_size': (75, 150)
}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
resized_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
resized_images = resized_tensor_dict[fields.InputDataFields.image]
resized_images_shape = tf.shape(resized_images)
expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32)
return [expected_images_shape, resized_images_shape]
(expected_images_shape_, resized_images_shape_) = self.execute_cpu(graph_fn,
[])
self.assertAllEqual(expected_images_shape_,
resized_images_shape_)
def testResizeImageWithMasks(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]]
def graph_fn(in_image_shape, in_masks_shape):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
return out_image_shape, out_masks_shape
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
(out_image_shape,
out_masks_shape) = self.execute_cpu(graph_fn, [
np.array(in_image_shape, np.int32),
np.array(in_masks_shape, np.int32)
])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeImageWithMasksTensorInputHeightAndWidth(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]]
def graph_fn(in_image_shape, in_masks_shape):
height = tf.constant(50, dtype=tf.int32)
width = tf.constant(100, dtype=tf.int32)
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
return out_image_shape, out_masks_shape
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
(out_image_shape,
out_masks_shape) = self.execute_cpu(graph_fn, [
np.array(in_image_shape, np.int32),
np.array(in_masks_shape, np.int32)
])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeImageWithNoInstanceMask(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 50, 100], [0, 50, 100]]
def graph_fn(in_image_shape, in_masks_shape):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
return out_image_shape, out_masks_shape
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
(out_image_shape,
out_masks_shape) = self.execute_cpu(graph_fn, [
np.array(in_image_shape, np.int32),
np.array(in_masks_shape, np.int32)
])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangePreservesStaticSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image, _ = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim)
self.assertAllEqual(out_image.get_shape().as_list(), expected_shape)
def testResizeToRangeWithDynamicSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
def graph_fn(in_image_shape):
in_image = tf.random_uniform(in_image_shape)
out_image, _ = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
return out_image_shape
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
out_image_shape = self.execute_cpu(graph_fn, [np.array(in_shape,
np.int32)])
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToRangeWithPadToMaxDimensionReturnsCorrectShapes(self):
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[100, 100, 3], [100, 100, 3], [100, 100, 3]]
def graph_fn(in_image):
out_image, _ = preprocessor.resize_to_range(
in_image,
min_dimension=min_dim,
max_dimension=max_dim,
pad_to_max_dimension=True)
return tf.shape(out_image)
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
out_image_shape = self.execute_cpu(
graph_fn, [np.random.rand(*in_shape).astype('f')])
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToRangeWithPadToMaxDimensionReturnsCorrectTensor(self):
in_image_np = np.array([[[0, 1, 2]]], np.float32)
ex_image_np = np.array(
[[[0, 1, 2], [123.68, 116.779, 103.939]],
[[123.68, 116.779, 103.939], [123.68, 116.779, 103.939]]], np.float32)
min_dim = 1
max_dim = 2
def graph_fn(in_image):
out_image, _ = preprocessor.resize_to_range(
in_image,
min_dimension=min_dim,
max_dimension=max_dim,
pad_to_max_dimension=True,
per_channel_pad_value=(123.68, 116.779, 103.939))
return out_image
out_image_np = self.execute_cpu(graph_fn, [in_image_np])
self.assertAllClose(ex_image_np, out_image_np)
def testResizeToRangeWithMasksPreservesStaticSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
self.assertAllEqual(out_masks.get_shape().as_list(), expected_mask_shape)
self.assertAllEqual(out_image.get_shape().as_list(), expected_image_shape)
def testResizeToRangeWithMasksAndPadToMaxDimension(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[100, 100, 3], [100, 100, 3]]
expected_masks_shape_list = [[15, 100, 100], [10, 100, 100]]
def graph_fn(in_image, in_masks):
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim,
max_dimension=max_dim, pad_to_max_dimension=True)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
return [out_image_shape, out_masks_shape]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
out_image_shape, out_masks_shape = self.execute_cpu(
graph_fn, [
np.random.rand(*in_image_shape).astype('f'),
np.random.rand(*in_masks_shape).astype('f'),
])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangeWithMasksAndDynamicSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
def graph_fn(in_image, in_masks):
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
return [out_image_shape, out_masks_shape]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
out_image_shape, out_masks_shape = self.execute_cpu(
graph_fn, [
np.random.rand(*in_image_shape).astype('f'),
np.random.rand(*in_masks_shape).astype('f'),
])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangeWithInstanceMasksTensorOfSizeZero(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]]
def graph_fn(in_image, in_masks):
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
return [out_image_shape, out_masks_shape]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
out_image_shape, out_masks_shape = self.execute_cpu(
graph_fn, [
np.random.rand(*in_image_shape).astype('f'),
np.random.rand(*in_masks_shape).astype('f'),
])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRange4DImageTensor(self):
image = tf.random_uniform([1, 200, 300, 3])
with self.assertRaises(ValueError):
preprocessor.resize_to_range(image, 500, 600)
def testResizeToRangeSameMinMax(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[312, 312, 3], [299, 299, 3]]
min_dim = 320
max_dim = 320
expected_shape_list = [[320, 320, 3], [320, 320, 3]]
def graph_fn(in_shape):
in_image = tf.random_uniform(in_shape)
out_image, _ = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
return out_image_shape
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
out_image_shape = self.execute_cpu(graph_fn, [np.array(in_shape,
np.int32)])
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToMaxDimensionTensorShapes(self):
"""Tests both cases where image should and shouldn't be resized."""
in_image_shape_list = [[100, 50, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 100, 50], [10, 15, 30]]
max_dim = 50
expected_image_shape_list = [[50, 25, 3], [15, 30, 3]]
expected_masks_shape_list = [[15, 50, 25], [10, 15, 30]]
def graph_fn(in_image_shape, in_masks_shape):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_max_dimension(
in_image, in_masks, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
return [out_image_shape, out_masks_shape]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
out_image_shape, out_masks_shape = self.execute_cpu(
graph_fn, [
np.array(in_image_shape, np.int32),
np.array(in_masks_shape, np.int32)
])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToMaxDimensionWithInstanceMasksTensorOfSizeZero(self):
"""Tests both cases where image should and shouldn't be resized."""
in_image_shape_list = [[100, 50, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 100, 50], [0, 15, 30]]
max_dim = 50
expected_image_shape_list = [[50, 25, 3], [15, 30, 3]]
expected_masks_shape_list = [[0, 50, 25], [0, 15, 30]]
def graph_fn(in_image_shape, in_masks_shape):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_max_dimension(
in_image, in_masks, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
return [out_image_shape, out_masks_shape]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
out_image_shape, out_masks_shape = self.execute_cpu(
graph_fn, [
np.array(in_image_shape, np.int32),
np.array(in_masks_shape, np.int32)
])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToMaxDimensionRaisesErrorOn4DImage(self):
image = tf.random_uniform([1, 200, 300, 3])
with self.assertRaises(ValueError):
preprocessor.resize_to_max_dimension(image, 500)
def testResizeToMinDimensionTensorShapes(self):
in_image_shape_list = [[60, 55, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 55], [10, 15, 30]]
min_dim = 50
expected_image_shape_list = [[60, 55, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 60, 55], [10, 50, 100]]
def graph_fn(in_image_shape, in_masks_shape):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_min_dimension(
in_image, in_masks, min_dimension=min_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
return [out_image_shape, out_masks_shape]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
out_image_shape, out_masks_shape = self.execute_cpu(
graph_fn, [
np.array(in_image_shape, np.int32),
np.array(in_masks_shape, np.int32)
])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToMinDimensionWithInstanceMasksTensorOfSizeZero(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
min_dim = 50
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]]
def graph_fn(in_image_shape, in_masks_shape):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_min_dimension(
in_image, in_masks, min_dimension=min_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
return [out_image_shape, out_masks_shape]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
out_image_shape, out_masks_shape = self.execute_cpu(
graph_fn, [
np.array(in_image_shape, np.int32),
np.array(in_masks_shape, np.int32)
])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToMinDimensionRaisesErrorOn4DImage(self):
image = tf.random_uniform([1, 200, 300, 3])
with self.assertRaises(ValueError):
preprocessor.resize_to_min_dimension(image, 500)
def testResizePadToMultipleNoMasks(self):
"""Tests resizing when padding to multiple without masks."""
def graph_fn():
image = tf.ones((200, 100, 3), dtype=tf.float32)
out_image, out_shape = preprocessor.resize_pad_to_multiple(
image, multiple=32)
return out_image, out_shape
out_image, out_shape = self.execute_cpu(graph_fn, [])
self.assertAllClose(out_image.sum(), 200 * 100 * 3)
self.assertAllEqual(out_shape, (200, 100, 3))
self.assertAllEqual(out_image.shape, (224, 128, 3))
def testResizePadToMultipleWithMasks(self):
"""Tests resizing when padding to multiple with masks."""
def graph_fn():
image = tf.ones((200, 100, 3), dtype=tf.float32)
masks = tf.ones((10, 200, 100), dtype=tf.float32)
_, out_masks, out_shape = preprocessor.resize_pad_to_multiple(
image, multiple=32, masks=masks)
return [out_masks, out_shape]
out_masks, out_shape = self.execute_cpu(graph_fn, [])
self.assertAllClose(out_masks.sum(), 200 * 100 * 10)
self.assertAllEqual(out_shape, (200, 100, 3))
self.assertAllEqual(out_masks.shape, (10, 224, 128))
def testResizePadToMultipleEmptyMasks(self):
"""Tests resizing when padding to multiple with an empty mask."""
def graph_fn():
image = tf.ones((200, 100, 3), dtype=tf.float32)
masks = tf.ones((0, 200, 100), dtype=tf.float32)
_, out_masks, out_shape = preprocessor.resize_pad_to_multiple(
image, multiple=32, masks=masks)
return [out_masks, out_shape]
out_masks, out_shape = self.execute_cpu(graph_fn, [])
self.assertAllEqual(out_shape, (200, 100, 3))
self.assertAllEqual(out_masks.shape, (0, 224, 128))
def testScaleBoxesToPixelCoordinates(self):
"""Tests box scaling, checking scaled values."""
def graph_fn():
in_shape = [60, 40, 3]
in_boxes = [[0.1, 0.2, 0.4, 0.6],
[0.5, 0.3, 0.9, 0.7]]
in_image = tf.random_uniform(in_shape)
in_boxes = tf.constant(in_boxes)
_, out_boxes = preprocessor.scale_boxes_to_pixel_coordinates(
in_image, boxes=in_boxes)
return out_boxes
expected_boxes = [[6., 8., 24., 24.],
[30., 12., 54., 28.]]
out_boxes = self.execute_cpu(graph_fn, [])
self.assertAllClose(out_boxes, expected_boxes)
def testScaleBoxesToPixelCoordinatesWithKeypoints(self):
"""Tests box and keypoint scaling, checking scaled values."""
def graph_fn():
in_shape = [60, 40, 3]
in_boxes = self.createTestBoxes()
in_keypoints, _ = self.createTestKeypoints()
in_image = tf.random_uniform(in_shape)
(_, out_boxes,
out_keypoints) = preprocessor.scale_boxes_to_pixel_coordinates(
in_image, boxes=in_boxes, keypoints=in_keypoints)
return out_boxes, out_keypoints
expected_boxes = [[0., 10., 45., 40.],
[15., 20., 45., 40.]]
expected_keypoints = [
[[6., 4.], [12., 8.], [18., 12.]],
[[24., 16.], [30., 20.], [36., 24.]],
]
out_boxes_, out_keypoints_ = self.execute_cpu(graph_fn, [])
self.assertAllClose(out_boxes_, expected_boxes)
self.assertAllClose(out_keypoints_, expected_keypoints)
def testSubtractChannelMean(self):
"""Tests whether channel means have been subtracted."""
def graph_fn():
image = tf.zeros((240, 320, 3))
means = [1, 2, 3]
actual = preprocessor.subtract_channel_mean(image, means=means)
return actual
actual = self.execute_cpu(graph_fn, [])
self.assertTrue((actual[:, :, 0], -1))
self.assertTrue((actual[:, :, 1], -2))
self.assertTrue((actual[:, :, 2], -3))
def testOneHotEncoding(self):
"""Tests one hot encoding of multiclass labels."""
def graph_fn():
labels = tf.constant([1, 4, 2], dtype=tf.int32)
one_hot = preprocessor.one_hot_encoding(labels, num_classes=5)
return one_hot
one_hot = self.execute_cpu(graph_fn, [])
self.assertAllEqual([0, 1, 1, 0, 1], one_hot)
def testRandomSelfConcatImageVertically(self):
def graph_fn():
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
confidences = weights
scores = self.createTestMultiClassScores()
tensor_dict = {
fields.InputDataFields.image: tf.cast(images, dtype=tf.float32),
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
fields.InputDataFields.groundtruth_confidences: confidences,
fields.InputDataFields.multiclass_scores: scores,
}
preprocessing_options = [(preprocessor.random_self_concat_image, {
'concat_vertical_probability': 1.0,
'concat_horizontal_probability': 0.0,
})]
func_arg_map = preprocessor.get_default_func_arg_map(
True, True, True)
output_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=func_arg_map)
original_shape = tf.shape(images)[1:3]
final_shape = tf.shape(output_tensor_dict[fields.InputDataFields.image])[
1:3]
return [
original_shape,
boxes,
labels,
confidences,
scores,
final_shape,
output_tensor_dict[fields.InputDataFields.groundtruth_boxes],
output_tensor_dict[fields.InputDataFields.groundtruth_classes],
output_tensor_dict[fields.InputDataFields.groundtruth_confidences],
output_tensor_dict[fields.InputDataFields.multiclass_scores],
]
(original_shape, boxes, labels, confidences, scores, final_shape, new_boxes,
new_labels, new_confidences, new_scores) = self.execute(graph_fn, [])
self.assertAllEqual(final_shape, original_shape * np.array([2, 1]))
self.assertAllEqual(2 * boxes.size, new_boxes.size)
self.assertAllEqual(2 * labels.size, new_labels.size)
self.assertAllEqual(2 * confidences.size, new_confidences.size)
self.assertAllEqual(2 * scores.size, new_scores.size)
def testRandomSelfConcatImageHorizontally(self):
def graph_fn():
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
confidences = weights
scores = self.createTestMultiClassScores()
tensor_dict = {
fields.InputDataFields.image: tf.cast(images, dtype=tf.float32),
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
fields.InputDataFields.groundtruth_confidences: confidences,
fields.InputDataFields.multiclass_scores: scores,
}
preprocessing_options = [(preprocessor.random_self_concat_image, {
'concat_vertical_probability': 0.0,
'concat_horizontal_probability': 1.0,
})]
func_arg_map = preprocessor.get_default_func_arg_map(
True, True, True)
output_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=func_arg_map)
original_shape = tf.shape(images)[1:3]
final_shape = tf.shape(output_tensor_dict[fields.InputDataFields.image])[
1:3]
return [
original_shape,
boxes,
labels,
confidences,
scores,
final_shape,
output_tensor_dict[fields.InputDataFields.groundtruth_boxes],
output_tensor_dict[fields.InputDataFields.groundtruth_classes],
output_tensor_dict[fields.InputDataFields.groundtruth_confidences],
output_tensor_dict[fields.InputDataFields.multiclass_scores],
]
(original_shape, boxes, labels, confidences, scores, final_shape, new_boxes,
new_labels, new_confidences, new_scores) = self.execute(graph_fn, [])
self.assertAllEqual(final_shape, original_shape * np.array([1, 2]))
self.assertAllEqual(2 * boxes.size, new_boxes.size)
self.assertAllEqual(2 * labels.size, new_labels.size)
self.assertAllEqual(2 * confidences.size, new_confidences.size)
self.assertAllEqual(2 * scores.size, new_scores.size)
def testSSDRandomCropWithCache(self):
preprocess_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testSSDRandomCrop(self):
def graph_fn():
preprocessing_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop, {})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
return [boxes_rank, distorted_boxes_rank, images_rank,
distorted_images_rank]
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropWithMultiClassScores(self):
def graph_fn():
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}), (preprocessor.ssd_random_crop, {})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
multiclass_scores = self.createTestMultiClassScores()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.multiclass_scores: multiclass_scores,
fields.InputDataFields.groundtruth_weights: weights,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_multiclass_scores=True)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_multiclass_scores = distorted_tensor_dict[
fields.InputDataFields.multiclass_scores]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
multiclass_scores_rank = tf.rank(multiclass_scores)
distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores)
return [
boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank,
distorted_images_rank, multiclass_scores_rank,
distorted_multiclass_scores, distorted_multiclass_scores_rank
]
(boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_, multiclass_scores_rank_,
distorted_multiclass_scores_,
distorted_multiclass_scores_rank_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
self.assertAllEqual(multiclass_scores_rank_,
distorted_multiclass_scores_rank_)
self.assertAllEqual(distorted_boxes_.shape[0],
distorted_multiclass_scores_.shape[0])
def testSSDRandomCropPad(self):
def graph_fn():
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
preprocessing_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop_pad, {})]
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
return [
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
]
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropFixedAspectRatioWithCache(self):
preprocess_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop_fixed_aspect_ratio, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def _testSSDRandomCropFixedAspectRatio(self,
include_multiclass_scores,
include_instance_masks,
include_keypoints):
def graph_fn():
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
weights = self.createTestGroundtruthWeights()
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}), (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})]
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_weights: weights
}
if include_multiclass_scores:
multiclass_scores = self.createTestMultiClassScores()
tensor_dict[fields.InputDataFields.multiclass_scores] = (
multiclass_scores)
if include_instance_masks:
masks = self.createTestMasks()
tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks
if include_keypoints:
keypoints, _ = self.createTestKeypoints()
tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
return [boxes_rank, distorted_boxes_rank, images_rank,
distorted_images_rank]
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropFixedAspectRatio(self):
self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False,
include_instance_masks=False,
include_keypoints=False)
def testSSDRandomCropFixedAspectRatioWithMultiClassScores(self):
self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=True,
include_instance_masks=False,
include_keypoints=False)
def testSSDRandomCropFixedAspectRatioWithMasksAndKeypoints(self):
self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False,
include_instance_masks=True,
include_keypoints=True)
def testSSDRandomCropFixedAspectRatioWithLabelScoresMasksAndKeypoints(self):
self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False,
include_instance_masks=True,
include_keypoints=True)
def testConvertClassLogitsToSoftmax(self):
def graph_fn():
multiclass_scores = tf.constant(
[[1.0, 0.0], [0.5, 0.5], [1000, 1]], dtype=tf.float32)
temperature = 2.0
converted_multiclass_scores = (
preprocessor.convert_class_logits_to_softmax(
multiclass_scores=multiclass_scores, temperature=temperature))
return converted_multiclass_scores
converted_multiclass_scores_ = self.execute_cpu(graph_fn, [])
expected_converted_multiclass_scores = [[0.62245935, 0.37754068],
[0.5, 0.5],
[1, 0]]
self.assertAllClose(converted_multiclass_scores_,
expected_converted_multiclass_scores)
@parameterized.named_parameters(
('scale_1', 1.0),
('scale_1.5', 1.5),
('scale_0.5', 0.5)
)
def test_square_crop_by_scale(self, scale):
def graph_fn():
image = np.random.randn(256, 256, 1)
masks = tf.constant(image[:, :, 0].reshape(1, 256, 256))
image = tf.constant(image)
keypoints = tf.constant([[[0.25, 0.25], [0.75, 0.75]]])
boxes = tf.constant([[0.25, .25, .75, .75]])
labels = tf.constant([[1]])
label_confidences = tf.constant([0.75])
label_weights = tf.constant([[1.]])
(new_image, new_boxes, _, _, new_confidences, new_masks,
new_keypoints) = preprocessor.random_square_crop_by_scale(
image,
boxes,
labels,
label_weights,
label_confidences,
masks=masks,
keypoints=keypoints,
max_border=256,
scale_min=scale,
scale_max=scale)
return new_image, new_boxes, new_confidences, new_masks, new_keypoints
image, boxes, confidences, masks, keypoints = self.execute_cpu(graph_fn, [])
ymin, xmin, ymax, xmax = boxes[0]
self.assertAlmostEqual(ymax - ymin, 0.5 / scale)
self.assertAlmostEqual(xmax - xmin, 0.5 / scale)
k1 = keypoints[0, 0]
k2 = keypoints[0, 1]
self.assertAlmostEqual(k2[0] - k1[0], 0.5 / scale)
self.assertAlmostEqual(k2[1] - k1[1], 0.5 / scale)
size = max(image.shape)
self.assertAlmostEqual(scale * 256.0, size)
self.assertAllClose(image[:, :, 0], masks[0, :, :])
self.assertAllClose(confidences, [0.75])
@parameterized.named_parameters(('scale_0_1', 0.1), ('scale_1_0', 1.0),
('scale_2_0', 2.0))
def test_random_scale_crop_and_pad_to_square(self, scale):
def graph_fn():
image = np.random.randn(512, 256, 1)
box_centers = [0.25, 0.5, 0.75]
box_size = 0.1
box_corners = []
box_labels = []
box_label_weights = []
keypoints = []
masks = []
for center_y in box_centers:
for center_x in box_centers:
box_corners.append(
[center_y - box_size / 2.0, center_x - box_size / 2.0,
center_y + box_size / 2.0, center_x + box_size / 2.0])
box_labels.append([1])
box_label_weights.append([1.])
keypoints.append(
[[center_y - box_size / 2.0, center_x - box_size / 2.0],
[center_y + box_size / 2.0, center_x + box_size / 2.0]])
masks.append(image[:, :, 0].reshape(512, 256))
image = tf.constant(image)
boxes = tf.constant(box_corners)
labels = tf.constant(box_labels)
label_weights = tf.constant(box_label_weights)
keypoints = tf.constant(keypoints)
masks = tf.constant(np.stack(masks))
(new_image, new_boxes, _, _, new_masks,
new_keypoints) = preprocessor.random_scale_crop_and_pad_to_square(
image,
boxes,
labels,
label_weights,
masks=masks,
keypoints=keypoints,
scale_min=scale,
scale_max=scale,
output_size=512)
return new_image, new_boxes, new_masks, new_keypoints
image, boxes, masks, keypoints = self.execute_cpu(graph_fn, [])
# Since random_scale_crop_and_pad_to_square may prune and clip boxes,
# we only need to find one of the boxes that was not clipped and check
# that it matches the expected dimensions. Note, assertAlmostEqual(a, b)
# is equivalent to round(a-b, 7) == 0.
any_box_has_correct_size = False
effective_scale_y = int(scale * 512) / 512.0
effective_scale_x = int(scale * 256) / 512.0
expected_size_y = 0.1 * effective_scale_y
expected_size_x = 0.1 * effective_scale_x
for box in boxes:
ymin, xmin, ymax, xmax = box
any_box_has_correct_size |= (
(round(ymin, 7) != 0.0) and (round(xmin, 7) != 0.0) and
(round(ymax, 7) != 1.0) and (round(xmax, 7) != 1.0) and
(round((ymax - ymin) - expected_size_y, 7) == 0.0) and
(round((xmax - xmin) - expected_size_x, 7) == 0.0))
self.assertTrue(any_box_has_correct_size)
# Similar to the approach above where we check for at least one box with the
# expected dimensions, we check for at least one pair of keypoints whose
# distance matches the expected dimensions.
any_keypoint_pair_has_correct_dist = False
for keypoint_pair in keypoints:
ymin, xmin = keypoint_pair[0]
ymax, xmax = keypoint_pair[1]
any_keypoint_pair_has_correct_dist |= (
(round(ymin, 7) != 0.0) and (round(xmin, 7) != 0.0) and
(round(ymax, 7) != 1.0) and (round(xmax, 7) != 1.0) and
(round((ymax - ymin) - expected_size_y, 7) == 0.0) and
(round((xmax - xmin) - expected_size_x, 7) == 0.0))
self.assertTrue(any_keypoint_pair_has_correct_dist)
self.assertAlmostEqual(512.0, image.shape[0])
self.assertAlmostEqual(512.0, image.shape[1])
self.assertAllClose(image[:, :, 0],
masks[0, :, :])
def test_random_scale_crop_and_pad_to_square_handles_confidences(self):
def graph_fn():
image = tf.zeros([10, 10, 1])
boxes = tf.constant([[0, 0, 0.5, 0.5], [0.5, 0.5, 0.75, 0.75]])
label_weights = tf.constant([1.0, 1.0])
box_labels = tf.constant([0, 1])
box_confidences = tf.constant([-1.0, 1.0])
(_, new_boxes, _, _,
new_confidences) = preprocessor.random_scale_crop_and_pad_to_square(
image,
boxes,
box_labels,
label_weights,
label_confidences=box_confidences,
scale_min=0.8,
scale_max=0.9,
output_size=10)
return new_boxes, new_confidences
boxes, confidences = self.execute_cpu(graph_fn, [])
self.assertLen(boxes, 2)
self.assertAllEqual(confidences, [-1.0, 1.0])
def testAdjustGamma(self):
def graph_fn():
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.adjust_gamma, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_gamma = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_gamma_shape = tf.shape(images_gamma)
return [image_original_shape, image_gamma_shape]
(image_original_shape_, image_gamma_shape_) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image_original_shape_, image_gamma_shape_)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/preprocessor_test.py | preprocessor_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Post-processing operations on detected boxes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.utils import shape_utils
_NMS_TILE_SIZE = 512
def batch_iou(boxes1, boxes2):
"""Calculates the overlap between proposal and ground truth boxes.
Some `boxes2` may have been padded. The returned `iou` tensor for these
boxes will be -1.
Args:
boxes1: a tensor with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment. The last dimension is the pixel
coordinates in [ymin, xmin, ymax, xmax] form.
boxes2: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This
tensor might have paddings with a negative value.
Returns:
iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES].
"""
with tf.name_scope('BatchIOU'):
y1_min, x1_min, y1_max, x1_max = tf.split(
value=boxes1, num_or_size_splits=4, axis=2)
y2_min, x2_min, y2_max, x2_max = tf.split(
value=boxes2, num_or_size_splits=4, axis=2)
# Calculates the intersection area.
intersection_xmin = tf.maximum(x1_min, tf.transpose(x2_min, [0, 2, 1]))
intersection_xmax = tf.minimum(x1_max, tf.transpose(x2_max, [0, 2, 1]))
intersection_ymin = tf.maximum(y1_min, tf.transpose(y2_min, [0, 2, 1]))
intersection_ymax = tf.minimum(y1_max, tf.transpose(y2_max, [0, 2, 1]))
intersection_area = tf.maximum(
(intersection_xmax - intersection_xmin), 0) * tf.maximum(
(intersection_ymax - intersection_ymin), 0)
# Calculates the union area.
area1 = (y1_max - y1_min) * (x1_max - x1_min)
area2 = (y2_max - y2_min) * (x2_max - x2_min)
# Adds a small epsilon to avoid divide-by-zero.
union_area = area1 + tf.transpose(area2,
[0, 2, 1]) - intersection_area + 1e-8
# Calculates IoU.
iou = intersection_area / union_area
# Fills -1 for padded ground truth boxes.
padding_mask = tf.logical_and(
tf.less(intersection_xmax, 0), tf.less(intersection_ymax, 0))
iou = tf.where(padding_mask, -tf.ones_like(iou), iou)
return iou
def _self_suppression(iou, iou_threshold, loop_condition, iou_sum):
"""Bounding-boxes self-suppression loop body.
Args:
iou: A float Tensor with shape [1, num_boxes, max_num_instance]: IOUs.
iou_threshold: A scalar, representing IOU threshold.
loop_condition: The loop condition returned from last iteration.
iou_sum: iou_sum_new returned from last iteration.
Returns:
iou_suppressed: A float Tensor with shape [1, num_boxes, max_num_instance],
IOU after suppression.
iou_threshold: A scalar, representing IOU threshold.
loop_condition: Bool Tensor of shape [], the loop condition.
iou_sum_new: The new IOU sum.
"""
del loop_condition
can_suppress_others = tf.cast(
tf.reshape(tf.reduce_max(iou, 1) <= iou_threshold, [1, -1, 1]), iou.dtype)
iou_suppressed = tf.reshape(
tf.cast(
tf.reduce_max(can_suppress_others * iou, 1) <= iou_threshold,
iou.dtype), [1, -1, 1]) * iou
iou_sum_new = tf.reduce_sum(iou_suppressed, [1, 2])
return [
iou_suppressed, iou_threshold,
tf.reduce_any(iou_sum - iou_sum_new > iou_threshold), iou_sum_new
]
def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx):
"""Bounding-boxes cross-suppression loop body.
Args:
boxes: A float Tensor of shape [1, anchors, 4], representing boxes.
box_slice: A float Tensor of shape [1, _NMS_TILE_SIZE, 4], the box tile
returned from last iteration
iou_threshold: A scalar, representing IOU threshold.
inner_idx: A scalar, representing inner index.
Returns:
boxes: A float Tensor of shape [1, anchors, 4], representing boxes.
ret_slice: A float Tensor of shape [1, _NMS_TILE_SIZE, 4], the box tile
after suppression
iou_threshold: A scalar, representing IOU threshold.
inner_idx: A scalar, inner index incremented.
"""
new_slice = tf.slice(boxes, [0, inner_idx * _NMS_TILE_SIZE, 0],
[1, _NMS_TILE_SIZE, 4])
iou = batch_iou(new_slice, box_slice)
ret_slice = tf.expand_dims(
tf.cast(tf.reduce_all(iou < iou_threshold, [1]), box_slice.dtype),
2) * box_slice
return boxes, ret_slice, iou_threshold, inner_idx + 1
def _suppression_loop_body(boxes, iou_threshold, output_size, idx):
"""Process boxes in the range [idx*_NMS_TILE_SIZE, (idx+1)*_NMS_TILE_SIZE).
Args:
boxes: a tensor with a shape of [1, anchors, 4].
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
output_size: an int32 tensor of size [1]. Representing the number of
selected boxes.
idx: an integer scalar representing induction variable.
Returns:
boxes: updated boxes.
iou_threshold: pass down iou_threshold to the next iteration.
output_size: the updated output_size.
idx: the updated induction variable.
"""
num_tiles = tf.shape(boxes)[1] // _NMS_TILE_SIZE
# Iterates over tiles that can possibly suppress the current tile.
box_slice = tf.slice(boxes, [0, idx * _NMS_TILE_SIZE, 0],
[1, _NMS_TILE_SIZE, 4])
_, box_slice, _, _ = tf.while_loop(
lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,
_cross_suppression, [boxes, box_slice, iou_threshold,
tf.constant(0)])
# Iterates over the current tile to compute self-suppression.
iou = batch_iou(box_slice, box_slice)
mask = tf.expand_dims(
tf.reshape(tf.range(_NMS_TILE_SIZE), [1, -1]) > tf.reshape(
tf.range(_NMS_TILE_SIZE), [-1, 1]), 0)
iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype)
suppressed_iou, _, _, _ = tf.while_loop(
lambda _iou, _threshold, loop_condition, _iou_sum: loop_condition,
_self_suppression,
[iou, iou_threshold,
tf.constant(True),
tf.reduce_sum(iou, [1, 2])])
suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0
box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2)
# Uses box_slice to update the input boxes.
mask = tf.reshape(
tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1])
boxes = tf.tile(tf.expand_dims(box_slice, [1]),
[1, num_tiles, 1, 1]) * mask + tf.reshape(
boxes, [1, num_tiles, _NMS_TILE_SIZE, 4]) * (1 - mask)
boxes = tf.reshape(boxes, [1, -1, 4])
# Updates output_size.
output_size += tf.reduce_sum(
tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1])
return boxes, iou_threshold, output_size, idx + 1
def partitioned_non_max_suppression_padded(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf')):
"""A tiled version of [`tf.image.non_max_suppression_padded`](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression_padded).
The overall design of the algorithm is to handle boxes tile-by-tile:
boxes = boxes.pad_to_multiple_of(tile_size)
num_tiles = len(boxes) // tile_size
output_boxes = []
for i in range(num_tiles):
box_tile = boxes[i*tile_size : (i+1)*tile_size]
for j in range(i - 1):
suppressing_tile = boxes[j*tile_size : (j+1)*tile_size]
iou = batch_iou(box_tile, suppressing_tile)
# if the box is suppressed in iou, clear it to a dot
box_tile *= _update_boxes(iou)
# Iteratively handle the diagonal tile.
iou = _box_overlap(box_tile, box_tile)
iou_changed = True
while iou_changed:
# boxes that are not suppressed by anything else
suppressing_boxes = _get_suppressing_boxes(iou)
# boxes that are suppressed by suppressing_boxes
suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes)
# clear iou to 0 for boxes that are suppressed, as they cannot be used
# to suppress other boxes any more
new_iou = _clear_iou(iou, suppressed_boxes)
iou_changed = (new_iou != iou)
iou = new_iou
# remaining boxes that can still suppress others, are selected boxes.
output_boxes.append(_get_suppressing_boxes(iou))
if len(output_boxes) >= max_output_size:
break
Args:
boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.
scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: a scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
score_threshold: A float representing the threshold for deciding when to
remove boxes based on score.
Returns:
selected_indices: a tensor of shape [anchors].
num_valid_boxes: a scalar int tensor.
nms_proposals: a tensor with a shape of [anchors, 4]. It has
same dtype as input boxes.
nms_scores: a tensor with a shape of [anchors]. It has same
dtype as input scores.
argsort_ids: a tensor of shape [anchors], mapping from input order of boxes
to output order of boxes.
"""
num_boxes = tf.shape(boxes)[0]
pad = tf.cast(
tf.ceil(tf.cast(num_boxes, tf.float32) / _NMS_TILE_SIZE),
tf.int32) * _NMS_TILE_SIZE - num_boxes
scores, argsort_ids = tf.nn.top_k(scores, k=num_boxes, sorted=True)
boxes = tf.gather(boxes, argsort_ids)
num_boxes = tf.shape(boxes)[0]
num_boxes += pad
boxes = tf.pad(
tf.cast(boxes, tf.float32), [[0, pad], [0, 0]], constant_values=-1)
scores = tf.pad(tf.cast(scores, tf.float32), [[0, pad]])
# mask boxes to -1 by score threshold
scores_mask = tf.expand_dims(
tf.cast(scores > score_threshold, boxes.dtype), axis=1)
boxes = ((boxes + 1.) * scores_mask) - 1.
boxes = tf.expand_dims(boxes, axis=0)
scores = tf.expand_dims(scores, axis=0)
def _loop_cond(unused_boxes, unused_threshold, output_size, idx):
return tf.logical_and(
tf.reduce_min(output_size) < max_output_size,
idx < num_boxes // _NMS_TILE_SIZE)
selected_boxes, _, output_size, _ = tf.while_loop(
_loop_cond, _suppression_loop_body,
[boxes, iou_threshold,
tf.zeros([1], tf.int32),
tf.constant(0)])
idx = num_boxes - tf.cast(
tf.nn.top_k(
tf.cast(tf.reduce_any(selected_boxes > 0, [2]), tf.int32) *
tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0],
tf.int32)
idx = tf.minimum(idx, num_boxes - 1 - pad)
idx = tf.reshape(idx + tf.reshape(tf.range(1) * num_boxes, [-1, 1]), [-1])
num_valid_boxes = tf.reduce_sum(output_size)
return (idx, num_valid_boxes, tf.reshape(boxes, [-1, 4]),
tf.reshape(scores, [-1]), argsort_ids)
def _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh,
change_coordinate_frame, clip_window):
"""Validates boxes, scores and iou_thresh.
This function validates the boxes, scores, iou_thresh
and if change_coordinate_frame is True, clip_window must be specified.
Args:
boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either
number of classes or 1 depending on whether a separate box is predicted
per class.
scores: A [k, num_classes] float32 tensor containing the scores for each of
the k detections. The scores have to be non-negative when
pad_to_max_output_size is True.
iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap
with previously selected boxes are removed).
change_coordinate_frame: Whether to normalize coordinates after clipping
relative to clip_window (this can only be set to True if a clip_window is
provided)
clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max]
representing the window to clip and normalize boxes to before performing
non-max suppression.
Raises:
ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not
have a valid scores field.
"""
if not 0 <= iou_thresh <= 1.0:
raise ValueError('iou_thresh must be between 0 and 1')
if scores.shape.ndims != 2:
raise ValueError('scores field must be of rank 2')
if shape_utils.get_dim_as_int(scores.shape[1]) is None:
raise ValueError('scores must have statically defined second ' 'dimension')
if boxes.shape.ndims != 3:
raise ValueError('boxes must be of rank 3.')
if not (shape_utils.get_dim_as_int(
boxes.shape[1]) == shape_utils.get_dim_as_int(scores.shape[1]) or
shape_utils.get_dim_as_int(boxes.shape[1]) == 1):
raise ValueError('second dimension of boxes must be either 1 or equal '
'to the second dimension of scores')
if shape_utils.get_dim_as_int(boxes.shape[2]) != 4:
raise ValueError('last dimension of boxes must be of size 4.')
if change_coordinate_frame and clip_window is None:
raise ValueError('if change_coordinate_frame is True, then a clip_window'
'must be specified.')
def _clip_window_prune_boxes(sorted_boxes, clip_window, pad_to_max_output_size,
change_coordinate_frame):
"""Prune boxes with zero area.
Args:
sorted_boxes: A BoxList containing k detections.
clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max]
representing the window to clip and normalize boxes to before performing
non-max suppression.
pad_to_max_output_size: flag indicating whether to pad to max output size or
not.
change_coordinate_frame: Whether to normalize coordinates after clipping
relative to clip_window (this can only be set to True if a clip_window is
provided).
Returns:
sorted_boxes: A BoxList containing k detections after pruning.
num_valid_nms_boxes_cumulative: Number of valid NMS boxes
"""
sorted_boxes = box_list_ops.clip_to_window(
sorted_boxes,
clip_window,
filter_nonoverlapping=not pad_to_max_output_size)
# Set the scores of boxes with zero area to -1 to keep the default
# behaviour of pruning out zero area boxes.
sorted_boxes_size = tf.shape(sorted_boxes.get())[0]
non_zero_box_area = tf.cast(box_list_ops.area(sorted_boxes), tf.bool)
sorted_boxes_scores = tf.where(
non_zero_box_area, sorted_boxes.get_field(fields.BoxListFields.scores),
-1 * tf.ones(sorted_boxes_size))
sorted_boxes.add_field(fields.BoxListFields.scores, sorted_boxes_scores)
num_valid_nms_boxes_cumulative = tf.reduce_sum(
tf.cast(tf.greater_equal(sorted_boxes_scores, 0), tf.int32))
sorted_boxes = box_list_ops.sort_by_field(sorted_boxes,
fields.BoxListFields.scores)
if change_coordinate_frame:
sorted_boxes = box_list_ops.change_coordinate_frame(sorted_boxes,
clip_window)
if sorted_boxes.has_field(fields.BoxListFields.keypoints):
sorted_keypoints = sorted_boxes.get_field(fields.BoxListFields.keypoints)
sorted_keypoints = keypoint_ops.change_coordinate_frame(sorted_keypoints,
clip_window)
sorted_boxes.set_field(fields.BoxListFields.keypoints, sorted_keypoints)
return sorted_boxes, num_valid_nms_boxes_cumulative
class NullContextmanager(object):
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False
def multiclass_non_max_suppression(boxes,
scores,
score_thresh,
iou_thresh,
max_size_per_class,
max_total_size=0,
clip_window=None,
change_coordinate_frame=False,
masks=None,
boundaries=None,
pad_to_max_output_size=False,
use_partitioned_nms=False,
additional_fields=None,
soft_nms_sigma=0.0,
use_hard_nms=False,
use_cpu_nms=False,
scope=None):
"""Multi-class version of non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. It operates independently for each class for
which scores are provided (via the scores field of the input box_list),
pruning boxes with score less than a provided threshold prior to
applying NMS.
Please note that this operation is performed on *all* classes, therefore any
background classes should be removed prior to calling this function.
Selected boxes are guaranteed to be sorted in decreasing order by score (but
the sort is not guaranteed to be stable).
Args:
boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either
number of classes or 1 depending on whether a separate box is predicted
per class.
scores: A [k, num_classes] float32 tensor containing the scores for each of
the k detections. The scores have to be non-negative when
pad_to_max_output_size is True.
score_thresh: scalar threshold for score (low scoring boxes are removed).
iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap
with previously selected boxes are removed).
max_size_per_class: maximum number of retained boxes per class.
max_total_size: maximum number of boxes retained over all classes. By
default returns all boxes retained after capping boxes per class.
clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max]
representing the window to clip and normalize boxes to before performing
non-max suppression.
change_coordinate_frame: Whether to normalize coordinates after clipping
relative to clip_window (this can only be set to True if a clip_window
is provided)
masks: (optional) a [k, q, mask_height, mask_width] float32 tensor
containing box masks. `q` can be either number of classes or 1 depending
on whether a separate mask is predicted per class.
boundaries: (optional) a [k, q, boundary_height, boundary_width] float32
tensor containing box boundaries. `q` can be either number of classes or 1
depending on whether a separate boundary is predicted per class.
pad_to_max_output_size: If true, the output nmsed boxes are padded to be of
length `max_size_per_class`. Defaults to false.
use_partitioned_nms: If true, use partitioned version of
non_max_suppression.
additional_fields: (optional) If not None, a dictionary that maps keys to
tensors whose first dimensions are all of size `k`. After non-maximum
suppression, all tensors corresponding to the selected boxes will be
added to resulting BoxList.
soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter;
See Bodla et al, https://arxiv.org/abs/1704.04503). When
`soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)
NMS. Soft NMS is currently only supported when pad_to_max_output_size is
False.
use_hard_nms: Enforce the usage of hard NMS.
use_cpu_nms: Enforce NMS to run on CPU.
scope: name scope.
Returns:
A tuple of sorted_boxes and num_valid_nms_boxes. The sorted_boxes is a
BoxList holds M boxes with a rank-1 scores field representing
corresponding scores for each box with scores sorted in decreasing order
and a rank-1 classes field representing a class label for each box. The
num_valid_nms_boxes is a 0-D integer tensor representing the number of
valid elements in `BoxList`, with the valid elements appearing first.
Raises:
ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have
a valid scores field.
ValueError: if Soft NMS (tf.image.non_max_suppression_with_scores) is not
supported in the current TF version and `soft_nms_sigma` is nonzero.
"""
_validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh,
change_coordinate_frame, clip_window)
if pad_to_max_output_size and soft_nms_sigma != 0.0:
raise ValueError('Soft NMS (soft_nms_sigma != 0.0) is currently not '
'supported when pad_to_max_output_size is True.')
with tf.name_scope(scope, 'MultiClassNonMaxSuppression'), tf.device(
'cpu:0') if use_cpu_nms else NullContextmanager():
num_scores = tf.shape(scores)[0]
num_classes = shape_utils.get_dim_as_int(scores.get_shape()[1])
selected_boxes_list = []
num_valid_nms_boxes_cumulative = tf.constant(0)
per_class_boxes_list = tf.unstack(boxes, axis=1)
if masks is not None:
per_class_masks_list = tf.unstack(masks, axis=1)
if boundaries is not None:
per_class_boundaries_list = tf.unstack(boundaries, axis=1)
boxes_ids = (range(num_classes) if len(per_class_boxes_list) > 1
else [0] * num_classes)
for class_idx, boxes_idx in zip(range(num_classes), boxes_ids):
per_class_boxes = per_class_boxes_list[boxes_idx]
boxlist_and_class_scores = box_list.BoxList(per_class_boxes)
class_scores = tf.reshape(
tf.slice(scores, [0, class_idx], tf.stack([num_scores, 1])), [-1])
boxlist_and_class_scores.add_field(fields.BoxListFields.scores,
class_scores)
if masks is not None:
per_class_masks = per_class_masks_list[boxes_idx]
boxlist_and_class_scores.add_field(fields.BoxListFields.masks,
per_class_masks)
if boundaries is not None:
per_class_boundaries = per_class_boundaries_list[boxes_idx]
boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries,
per_class_boundaries)
if additional_fields is not None:
for key, tensor in additional_fields.items():
boxlist_and_class_scores.add_field(key, tensor)
nms_result = None
selected_scores = None
if pad_to_max_output_size:
max_selection_size = max_size_per_class
if use_partitioned_nms:
(selected_indices, num_valid_nms_boxes,
boxlist_and_class_scores.data['boxes'],
boxlist_and_class_scores.data['scores'],
_) = partitioned_non_max_suppression_padded(
boxlist_and_class_scores.get(),
boxlist_and_class_scores.get_field(fields.BoxListFields.scores),
max_selection_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh)
else:
selected_indices, num_valid_nms_boxes = (
tf.image.non_max_suppression_padded(
boxlist_and_class_scores.get(),
boxlist_and_class_scores.get_field(
fields.BoxListFields.scores),
max_selection_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh,
pad_to_max_output_size=True))
nms_result = box_list_ops.gather(boxlist_and_class_scores,
selected_indices)
selected_scores = nms_result.get_field(fields.BoxListFields.scores)
else:
max_selection_size = tf.minimum(max_size_per_class,
boxlist_and_class_scores.num_boxes())
if (hasattr(tf.image, 'non_max_suppression_with_scores') and
tf.compat.forward_compatible(2019, 6, 6) and not use_hard_nms):
(selected_indices, selected_scores
) = tf.image.non_max_suppression_with_scores(
boxlist_and_class_scores.get(),
boxlist_and_class_scores.get_field(fields.BoxListFields.scores),
max_selection_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh,
soft_nms_sigma=soft_nms_sigma)
num_valid_nms_boxes = tf.shape(selected_indices)[0]
selected_indices = tf.concat(
[selected_indices,
tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0)
selected_scores = tf.concat(
[selected_scores,
tf.zeros(max_selection_size-num_valid_nms_boxes,
tf.float32)], -1)
nms_result = box_list_ops.gather(boxlist_and_class_scores,
selected_indices)
else:
if soft_nms_sigma != 0:
raise ValueError('Soft NMS not supported in current TF version!')
selected_indices = tf.image.non_max_suppression(
boxlist_and_class_scores.get(),
boxlist_and_class_scores.get_field(fields.BoxListFields.scores),
max_selection_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh)
num_valid_nms_boxes = tf.shape(selected_indices)[0]
selected_indices = tf.concat(
[selected_indices,
tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0)
nms_result = box_list_ops.gather(boxlist_and_class_scores,
selected_indices)
selected_scores = nms_result.get_field(fields.BoxListFields.scores)
# Make the scores -1 for invalid boxes.
valid_nms_boxes_indices = tf.less(
tf.range(max_selection_size), num_valid_nms_boxes)
nms_result.add_field(
fields.BoxListFields.scores,
tf.where(valid_nms_boxes_indices,
selected_scores, -1*tf.ones(max_selection_size)))
num_valid_nms_boxes_cumulative += num_valid_nms_boxes
nms_result.add_field(
fields.BoxListFields.classes, (tf.zeros_like(
nms_result.get_field(fields.BoxListFields.scores)) + class_idx))
selected_boxes_list.append(nms_result)
selected_boxes = box_list_ops.concatenate(selected_boxes_list)
sorted_boxes = box_list_ops.sort_by_field(selected_boxes,
fields.BoxListFields.scores)
if clip_window is not None:
# When pad_to_max_output_size is False, it prunes the boxes with zero
# area.
sorted_boxes, num_valid_nms_boxes_cumulative = _clip_window_prune_boxes(
sorted_boxes, clip_window, pad_to_max_output_size,
change_coordinate_frame)
if max_total_size:
max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes())
sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size))
num_valid_nms_boxes_cumulative = tf.where(
max_total_size > num_valid_nms_boxes_cumulative,
num_valid_nms_boxes_cumulative, max_total_size)
# Select only the valid boxes if pad_to_max_output_size is False.
if not pad_to_max_output_size:
sorted_boxes = box_list_ops.gather(
sorted_boxes, tf.range(num_valid_nms_boxes_cumulative))
return sorted_boxes, num_valid_nms_boxes_cumulative
def class_agnostic_non_max_suppression(boxes,
scores,
score_thresh,
iou_thresh,
max_classes_per_detection=1,
max_total_size=0,
clip_window=None,
change_coordinate_frame=False,
masks=None,
boundaries=None,
pad_to_max_output_size=False,
use_partitioned_nms=False,
additional_fields=None,
soft_nms_sigma=0.0,
scope=None):
"""Class-agnostic version of non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. It operates on all the boxes using
max scores across all classes for which scores are provided (via the scores
field of the input box_list), pruning boxes with score less than a provided
threshold prior to applying NMS.
Please note that this operation is performed in a class-agnostic way,
therefore any background classes should be removed prior to calling this
function.
Selected boxes are guaranteed to be sorted in decreasing order by score (but
the sort is not guaranteed to be stable).
Args:
boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either
number of classes or 1 depending on whether a separate box is predicted
per class.
scores: A [k, num_classes] float32 tensor containing the scores for each of
the k detections. The scores have to be non-negative when
pad_to_max_output_size is True.
score_thresh: scalar threshold for score (low scoring boxes are removed).
iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap
with previously selected boxes are removed).
max_classes_per_detection: maximum number of retained classes per detection
box in class-agnostic NMS.
max_total_size: maximum number of boxes retained over all classes. By
default returns all boxes retained after capping boxes per class.
clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max]
representing the window to clip and normalize boxes to before performing
non-max suppression.
change_coordinate_frame: Whether to normalize coordinates after clipping
relative to clip_window (this can only be set to True if a clip_window is
provided)
masks: (optional) a [k, q, mask_height, mask_width] float32 tensor
containing box masks. `q` can be either number of classes or 1 depending
on whether a separate mask is predicted per class.
boundaries: (optional) a [k, q, boundary_height, boundary_width] float32
tensor containing box boundaries. `q` can be either number of classes or 1
depending on whether a separate boundary is predicted per class.
pad_to_max_output_size: If true, the output nmsed boxes are padded to be of
length `max_size_per_class`. Defaults to false.
use_partitioned_nms: If true, use partitioned version of
non_max_suppression.
additional_fields: (optional) If not None, a dictionary that maps keys to
tensors whose first dimensions are all of size `k`. After non-maximum
suppression, all tensors corresponding to the selected boxes will be added
to resulting BoxList.
soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter;
See Bodla et al, https://arxiv.org/abs/1704.04503). When
`soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)
NMS. Soft NMS is currently only supported when pad_to_max_output_size is
False.
scope: name scope.
Returns:
A tuple of sorted_boxes and num_valid_nms_boxes. The sorted_boxes is a
BoxList holds M boxes with a rank-1 scores field representing
corresponding scores for each box with scores sorted in decreasing order
and a rank-1 classes field representing a class label for each box. The
num_valid_nms_boxes is a 0-D integer tensor representing the number of
valid elements in `BoxList`, with the valid elements appearing first.
Raises:
ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have
a valid scores field or if non-zero soft_nms_sigma is provided when
pad_to_max_output_size is True.
"""
_validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh,
change_coordinate_frame, clip_window)
if pad_to_max_output_size and soft_nms_sigma != 0.0:
raise ValueError('Soft NMS (soft_nms_sigma != 0.0) is currently not '
'supported when pad_to_max_output_size is True.')
if max_classes_per_detection > 1:
raise ValueError('Max classes per detection box >1 not supported.')
q = shape_utils.get_dim_as_int(boxes.shape[1])
if q > 1:
class_ids = tf.expand_dims(
tf.argmax(scores, axis=1, output_type=tf.int32), axis=1)
boxes = tf.batch_gather(boxes, class_ids)
if masks is not None:
masks = tf.batch_gather(masks, class_ids)
if boundaries is not None:
boundaries = tf.batch_gather(boundaries, class_ids)
boxes = tf.squeeze(boxes, axis=[1])
if masks is not None:
masks = tf.squeeze(masks, axis=[1])
if boundaries is not None:
boundaries = tf.squeeze(boundaries, axis=[1])
with tf.name_scope(scope, 'ClassAgnosticNonMaxSuppression'):
boxlist_and_class_scores = box_list.BoxList(boxes)
max_scores = tf.reduce_max(scores, axis=-1)
classes_with_max_scores = tf.argmax(scores, axis=-1)
boxlist_and_class_scores.add_field(fields.BoxListFields.scores, max_scores)
if masks is not None:
boxlist_and_class_scores.add_field(fields.BoxListFields.masks, masks)
if boundaries is not None:
boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries,
boundaries)
if additional_fields is not None:
for key, tensor in additional_fields.items():
boxlist_and_class_scores.add_field(key, tensor)
nms_result = None
selected_scores = None
if pad_to_max_output_size:
max_selection_size = max_total_size
if use_partitioned_nms:
(selected_indices, num_valid_nms_boxes,
boxlist_and_class_scores.data['boxes'],
boxlist_and_class_scores.data['scores'],
argsort_ids) = partitioned_non_max_suppression_padded(
boxlist_and_class_scores.get(),
boxlist_and_class_scores.get_field(fields.BoxListFields.scores),
max_selection_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh)
classes_with_max_scores = tf.gather(classes_with_max_scores,
argsort_ids)
else:
selected_indices, num_valid_nms_boxes = (
tf.image.non_max_suppression_padded(
boxlist_and_class_scores.get(),
boxlist_and_class_scores.get_field(fields.BoxListFields.scores),
max_selection_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh,
pad_to_max_output_size=True))
nms_result = box_list_ops.gather(boxlist_and_class_scores,
selected_indices)
selected_scores = nms_result.get_field(fields.BoxListFields.scores)
else:
max_selection_size = tf.minimum(max_total_size,
boxlist_and_class_scores.num_boxes())
if (hasattr(tf.image, 'non_max_suppression_with_scores') and
tf.compat.forward_compatible(2019, 6, 6)):
(selected_indices, selected_scores
) = tf.image.non_max_suppression_with_scores(
boxlist_and_class_scores.get(),
boxlist_and_class_scores.get_field(fields.BoxListFields.scores),
max_selection_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh,
soft_nms_sigma=soft_nms_sigma)
num_valid_nms_boxes = tf.shape(selected_indices)[0]
selected_indices = tf.concat([
selected_indices,
tf.zeros(max_selection_size - num_valid_nms_boxes, tf.int32)
], 0)
selected_scores = tf.concat(
[selected_scores,
tf.zeros(max_selection_size-num_valid_nms_boxes, tf.float32)], -1)
nms_result = box_list_ops.gather(boxlist_and_class_scores,
selected_indices)
else:
if soft_nms_sigma != 0:
raise ValueError('Soft NMS not supported in current TF version!')
selected_indices = tf.image.non_max_suppression(
boxlist_and_class_scores.get(),
boxlist_and_class_scores.get_field(fields.BoxListFields.scores),
max_selection_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh)
num_valid_nms_boxes = tf.shape(selected_indices)[0]
selected_indices = tf.concat(
[selected_indices,
tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0)
nms_result = box_list_ops.gather(boxlist_and_class_scores,
selected_indices)
selected_scores = nms_result.get_field(fields.BoxListFields.scores)
valid_nms_boxes_indices = tf.less(
tf.range(max_selection_size), num_valid_nms_boxes)
nms_result.add_field(
fields.BoxListFields.scores,
tf.where(valid_nms_boxes_indices,
selected_scores, -1*tf.ones(max_selection_size)))
selected_classes = tf.gather(classes_with_max_scores, selected_indices)
selected_classes = tf.cast(selected_classes, tf.float32)
nms_result.add_field(fields.BoxListFields.classes, selected_classes)
selected_boxes = nms_result
sorted_boxes = box_list_ops.sort_by_field(selected_boxes,
fields.BoxListFields.scores)
if clip_window is not None:
# When pad_to_max_output_size is False, it prunes the boxes with zero
# area.
sorted_boxes, num_valid_nms_boxes = _clip_window_prune_boxes(
sorted_boxes, clip_window, pad_to_max_output_size,
change_coordinate_frame)
if max_total_size:
max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes())
sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size))
num_valid_nms_boxes = tf.where(max_total_size > num_valid_nms_boxes,
num_valid_nms_boxes, max_total_size)
# Select only the valid boxes if pad_to_max_output_size is False.
if not pad_to_max_output_size:
sorted_boxes = box_list_ops.gather(sorted_boxes,
tf.range(num_valid_nms_boxes))
return sorted_boxes, num_valid_nms_boxes
def batch_multiclass_non_max_suppression(boxes,
scores,
score_thresh,
iou_thresh,
max_size_per_class,
max_total_size=0,
clip_window=None,
change_coordinate_frame=False,
num_valid_boxes=None,
masks=None,
additional_fields=None,
soft_nms_sigma=0.0,
scope=None,
use_static_shapes=False,
use_partitioned_nms=False,
parallel_iterations=32,
use_class_agnostic_nms=False,
max_classes_per_detection=1,
use_dynamic_map_fn=False,
use_combined_nms=False,
use_hard_nms=False,
use_cpu_nms=False):
"""Multi-class version of non maximum suppression that operates on a batch.
This op is similar to `multiclass_non_max_suppression` but operates on a batch
of boxes and scores. See documentation for `multiclass_non_max_suppression`
for details.
Args:
boxes: A [batch_size, num_anchors, q, 4] float32 tensor containing
detections. If `q` is 1 then same boxes are used for all classes
otherwise, if `q` is equal to number of classes, class-specific boxes are
used.
scores: A [batch_size, num_anchors, num_classes] float32 tensor containing
the scores for each of the `num_anchors` detections. The scores have to be
non-negative when use_static_shapes is set True.
score_thresh: scalar threshold for score (low scoring boxes are removed).
iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap
with previously selected boxes are removed).
max_size_per_class: maximum number of retained boxes per class.
max_total_size: maximum number of boxes retained over all classes. By
default returns all boxes retained after capping boxes per class.
clip_window: A float32 tensor of shape [batch_size, 4] where each entry is
of the form [y_min, x_min, y_max, x_max] representing the window to clip
boxes to before performing non-max suppression. This argument can also be
a tensor of shape [4] in which case, the same clip window is applied to
all images in the batch. If clip_widow is None, all boxes are used to
perform non-max suppression.
change_coordinate_frame: Whether to normalize coordinates after clipping
relative to clip_window (this can only be set to True if a clip_window is
provided)
num_valid_boxes: (optional) a Tensor of type `int32`. A 1-D tensor of shape
[batch_size] representing the number of valid boxes to be considered for
each image in the batch. This parameter allows for ignoring zero
paddings.
masks: (optional) a [batch_size, num_anchors, q, mask_height, mask_width]
float32 tensor containing box masks. `q` can be either number of classes
or 1 depending on whether a separate mask is predicted per class.
additional_fields: (optional) If not None, a dictionary that maps keys to
tensors whose dimensions are [batch_size, num_anchors, ...].
soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter;
See Bodla et al, https://arxiv.org/abs/1704.04503). When
`soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)
NMS. Soft NMS is currently only supported when pad_to_max_output_size is
False.
scope: tf scope name.
use_static_shapes: If true, the output nmsed boxes are padded to be of
length `max_size_per_class` and it doesn't clip boxes to max_total_size.
Defaults to false.
use_partitioned_nms: If true, use partitioned version of
non_max_suppression.
parallel_iterations: (optional) number of batch items to process in
parallel.
use_class_agnostic_nms: If true, this uses class-agnostic non max
suppression
max_classes_per_detection: Maximum number of retained classes per detection
box in class-agnostic NMS.
use_dynamic_map_fn: If true, images in the batch will be processed within a
dynamic loop. Otherwise, a static loop will be used if possible.
use_combined_nms: If true, it uses tf.image.combined_non_max_suppression (
multi-class version of NMS that operates on a batch).
It greedily selects a subset of detection bounding boxes, pruning away
boxes that have high IOU (intersection over union) overlap (> thresh) with
already selected boxes. It operates independently for each batch.
Within each batch, it operates independently for each class for which
scores are provided (via the scores field of the input box_list),
pruning boxes with score less than a provided threshold prior to applying
NMS. This operation is performed on *all* batches and *all* classes
in the batch, therefore any background classes should be removed prior to
calling this function.
Masks and additional fields are not supported.
See argument checks in the code below for unsupported arguments.
use_hard_nms: Enforce the usage of hard NMS.
use_cpu_nms: Enforce NMS to run on CPU.
Returns:
'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor
containing the non-max suppressed boxes.
'nmsed_scores': A [batch_size, max_detections] float32 tensor containing
the scores for the boxes.
'nmsed_classes': A [batch_size, max_detections] float32 tensor
containing the class for boxes.
'nmsed_masks': (optional) a
[batch_size, max_detections, mask_height, mask_width] float32 tensor
containing masks for each selected box. This is set to None if input
`masks` is None.
'nmsed_additional_fields': (optional) a dictionary of
[batch_size, max_detections, ...] float32 tensors corresponding to the
tensors specified in the input `additional_fields`. This is not returned
if input `additional_fields` is None.
'num_detections': A [batch_size] int32 tensor indicating the number of
valid detections per batch item. Only the top num_detections[i] entries in
nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the
entries are zero paddings.
Raises:
ValueError: if `q` in boxes.shape is not 1 or not equal to number of
classes as inferred from scores.shape.
"""
if use_combined_nms:
if change_coordinate_frame:
raise ValueError(
'change_coordinate_frame (normalizing coordinates'
' relative to clip_window) is not supported by combined_nms.')
if num_valid_boxes is not None:
raise ValueError('num_valid_boxes is not supported by combined_nms.')
if masks is not None:
raise ValueError('masks is not supported by combined_nms.')
if soft_nms_sigma != 0.0:
raise ValueError('Soft NMS is not supported by combined_nms.')
if use_class_agnostic_nms:
raise ValueError('class-agnostic NMS is not supported by combined_nms.')
if clip_window is not None:
tf.logging.warning(
'clip_window is not supported by combined_nms unless it is'
' [0. 0. 1. 1.] for each image.')
if additional_fields is not None:
tf.logging.warning('additional_fields is not supported by combined_nms.')
if parallel_iterations != 32:
tf.logging.warning('Number of batch items to be processed in parallel is'
' not configurable by combined_nms.')
if max_classes_per_detection > 1:
tf.logging.warning(
'max_classes_per_detection is not configurable by combined_nms.')
with tf.name_scope(scope, 'CombinedNonMaxSuppression'):
(batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes,
batch_num_detections) = tf.image.combined_non_max_suppression(
boxes=boxes,
scores=scores,
max_output_size_per_class=max_size_per_class,
max_total_size=max_total_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh,
pad_per_class=use_static_shapes)
# Not supported by combined_non_max_suppression.
batch_nmsed_masks = None
# Not supported by combined_non_max_suppression.
batch_nmsed_additional_fields = None
return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes,
batch_nmsed_masks, batch_nmsed_additional_fields,
batch_num_detections)
q = shape_utils.get_dim_as_int(boxes.shape[2])
num_classes = shape_utils.get_dim_as_int(scores.shape[2])
if q != 1 and q != num_classes:
raise ValueError('third dimension of boxes must be either 1 or equal '
'to the third dimension of scores.')
if change_coordinate_frame and clip_window is None:
raise ValueError('if change_coordinate_frame is True, then a clip_window'
'must be specified.')
original_masks = masks
# Create ordered dictionary using the sorted keys from
# additional fields to ensure getting the same key value assignment
# in _single_image_nms_fn(). The dictionary is thus a sorted version of
# additional_fields.
if additional_fields is None:
ordered_additional_fields = collections.OrderedDict()
else:
ordered_additional_fields = collections.OrderedDict(
sorted(additional_fields.items(), key=lambda item: item[0]))
with tf.name_scope(scope, 'BatchMultiClassNonMaxSuppression'):
boxes_shape = boxes.shape
batch_size = shape_utils.get_dim_as_int(boxes_shape[0])
num_anchors = shape_utils.get_dim_as_int(boxes_shape[1])
if batch_size is None:
batch_size = tf.shape(boxes)[0]
if num_anchors is None:
num_anchors = tf.shape(boxes)[1]
# If num valid boxes aren't provided, create one and mark all boxes as
# valid.
if num_valid_boxes is None:
num_valid_boxes = tf.ones([batch_size], dtype=tf.int32) * num_anchors
# If masks aren't provided, create dummy masks so we can only have one copy
# of _single_image_nms_fn and discard the dummy masks after map_fn.
if masks is None:
masks_shape = tf.stack([batch_size, num_anchors, q, 1, 1])
masks = tf.zeros(masks_shape)
if clip_window is None:
clip_window = tf.stack([
tf.reduce_min(boxes[:, :, :, 0]),
tf.reduce_min(boxes[:, :, :, 1]),
tf.reduce_max(boxes[:, :, :, 2]),
tf.reduce_max(boxes[:, :, :, 3])
])
if clip_window.shape.ndims == 1:
clip_window = tf.tile(tf.expand_dims(clip_window, 0), [batch_size, 1])
def _single_image_nms_fn(args):
"""Runs NMS on a single image and returns padded output.
Args:
args: A list of tensors consisting of the following:
per_image_boxes - A [num_anchors, q, 4] float32 tensor containing
detections. If `q` is 1 then same boxes are used for all classes
otherwise, if `q` is equal to number of classes, class-specific
boxes are used.
per_image_scores - A [num_anchors, num_classes] float32 tensor
containing the scores for each of the `num_anchors` detections.
per_image_masks - A [num_anchors, q, mask_height, mask_width] float32
tensor containing box masks. `q` can be either number of classes
or 1 depending on whether a separate mask is predicted per class.
per_image_clip_window - A 1D float32 tensor of the form
[ymin, xmin, ymax, xmax] representing the window to clip the boxes
to.
per_image_additional_fields - (optional) A variable number of float32
tensors each with size [num_anchors, ...].
per_image_num_valid_boxes - A tensor of type `int32`. A 1-D tensor of
shape [batch_size] representing the number of valid boxes to be
considered for each image in the batch. This parameter allows for
ignoring zero paddings.
Returns:
'nmsed_boxes': A [max_detections, 4] float32 tensor containing the
non-max suppressed boxes.
'nmsed_scores': A [max_detections] float32 tensor containing the scores
for the boxes.
'nmsed_classes': A [max_detections] float32 tensor containing the class
for boxes.
'nmsed_masks': (optional) a [max_detections, mask_height, mask_width]
float32 tensor containing masks for each selected box. This is set to
None if input `masks` is None.
'nmsed_additional_fields': (optional) A variable number of float32
tensors each with size [max_detections, ...] corresponding to the
input `per_image_additional_fields`.
'num_detections': A [batch_size] int32 tensor indicating the number of
valid detections per batch item. Only the top num_detections[i]
entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The
rest of the entries are zero paddings.
"""
per_image_boxes = args[0]
per_image_scores = args[1]
per_image_masks = args[2]
per_image_clip_window = args[3]
# Make sure that the order of elements passed in args is aligned with
# the iteration order of ordered_additional_fields
per_image_additional_fields = {
key: value
for key, value in zip(ordered_additional_fields, args[4:-1])
}
per_image_num_valid_boxes = args[-1]
if use_static_shapes:
total_proposals = tf.shape(per_image_scores)
per_image_scores = tf.where(
tf.less(tf.range(total_proposals[0]), per_image_num_valid_boxes),
per_image_scores,
tf.fill(total_proposals, np.finfo('float32').min))
else:
per_image_boxes = tf.reshape(
tf.slice(per_image_boxes, 3 * [0],
tf.stack([per_image_num_valid_boxes, -1, -1])), [-1, q, 4])
per_image_scores = tf.reshape(
tf.slice(per_image_scores, [0, 0],
tf.stack([per_image_num_valid_boxes, -1])),
[-1, num_classes])
per_image_masks = tf.reshape(
tf.slice(per_image_masks, 4 * [0],
tf.stack([per_image_num_valid_boxes, -1, -1, -1])),
[-1, q, shape_utils.get_dim_as_int(per_image_masks.shape[2]),
shape_utils.get_dim_as_int(per_image_masks.shape[3])])
if per_image_additional_fields is not None:
for key, tensor in per_image_additional_fields.items():
additional_field_shape = tensor.get_shape()
additional_field_dim = len(additional_field_shape)
per_image_additional_fields[key] = tf.reshape(
tf.slice(
per_image_additional_fields[key],
additional_field_dim * [0],
tf.stack([per_image_num_valid_boxes] +
(additional_field_dim - 1) * [-1])), [-1] + [
shape_utils.get_dim_as_int(dim)
for dim in additional_field_shape[1:]
])
if use_class_agnostic_nms:
nmsed_boxlist, num_valid_nms_boxes = class_agnostic_non_max_suppression(
per_image_boxes,
per_image_scores,
score_thresh,
iou_thresh,
max_classes_per_detection,
max_total_size,
clip_window=per_image_clip_window,
change_coordinate_frame=change_coordinate_frame,
masks=per_image_masks,
pad_to_max_output_size=use_static_shapes,
use_partitioned_nms=use_partitioned_nms,
additional_fields=per_image_additional_fields,
soft_nms_sigma=soft_nms_sigma)
else:
nmsed_boxlist, num_valid_nms_boxes = multiclass_non_max_suppression(
per_image_boxes,
per_image_scores,
score_thresh,
iou_thresh,
max_size_per_class,
max_total_size,
clip_window=per_image_clip_window,
change_coordinate_frame=change_coordinate_frame,
masks=per_image_masks,
pad_to_max_output_size=use_static_shapes,
use_partitioned_nms=use_partitioned_nms,
additional_fields=per_image_additional_fields,
soft_nms_sigma=soft_nms_sigma,
use_hard_nms=use_hard_nms,
use_cpu_nms=use_cpu_nms)
if not use_static_shapes:
nmsed_boxlist = box_list_ops.pad_or_clip_box_list(
nmsed_boxlist, max_total_size)
num_detections = num_valid_nms_boxes
nmsed_boxes = nmsed_boxlist.get()
nmsed_scores = nmsed_boxlist.get_field(fields.BoxListFields.scores)
nmsed_classes = nmsed_boxlist.get_field(fields.BoxListFields.classes)
nmsed_masks = nmsed_boxlist.get_field(fields.BoxListFields.masks)
nmsed_additional_fields = []
# Sorting is needed here to ensure that the values stored in
# nmsed_additional_fields are always kept in the same order
# across different execution runs.
for key in sorted(per_image_additional_fields.keys()):
nmsed_additional_fields.append(nmsed_boxlist.get_field(key))
return ([nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks] +
nmsed_additional_fields + [num_detections])
num_additional_fields = 0
if ordered_additional_fields:
num_additional_fields = len(ordered_additional_fields)
num_nmsed_outputs = 4 + num_additional_fields
if use_dynamic_map_fn:
map_fn = tf.map_fn
else:
map_fn = shape_utils.static_or_dynamic_map_fn
batch_outputs = map_fn(
_single_image_nms_fn,
elems=([boxes, scores, masks, clip_window] +
list(ordered_additional_fields.values()) + [num_valid_boxes]),
dtype=(num_nmsed_outputs * [tf.float32] + [tf.int32]),
parallel_iterations=parallel_iterations)
batch_nmsed_boxes = batch_outputs[0]
batch_nmsed_scores = batch_outputs[1]
batch_nmsed_classes = batch_outputs[2]
batch_nmsed_masks = batch_outputs[3]
batch_nmsed_values = batch_outputs[4:-1]
batch_nmsed_additional_fields = {}
if num_additional_fields > 0:
# Sort the keys to ensure arranging elements in same order as
# in _single_image_nms_fn.
batch_nmsed_keys = list(ordered_additional_fields.keys())
for i in range(len(batch_nmsed_keys)):
batch_nmsed_additional_fields[
batch_nmsed_keys[i]] = batch_nmsed_values[i]
batch_num_detections = batch_outputs[-1]
if original_masks is None:
batch_nmsed_masks = None
if not ordered_additional_fields:
batch_nmsed_additional_fields = None
return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes,
batch_nmsed_masks, batch_nmsed_additional_fields,
batch_num_detections)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/post_processing.py | post_processing.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A freezable batch norm layer that uses Keras sync batch normalization."""
import tensorflow as tf
class FreezableSyncBatchNorm(tf.keras.layers.experimental.SyncBatchNormalization
):
"""Sync Batch normalization layer (Ioffe and Szegedy, 2014).
This is a `freezable` batch norm layer that supports setting the `training`
parameter in the __init__ method rather than having to set it either via
the Keras learning phase or via the `call` method parameter. This layer will
forward all other parameters to the Keras `SyncBatchNormalization` layer
This is class is necessary because Object Detection model training sometimes
requires batch normalization layers to be `frozen` and used as if it was
evaluation time, despite still training (and potentially using dropout layers)
Like the default Keras SyncBatchNormalization layer, this will normalize the
activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
"""
def __init__(self, training=None, **kwargs):
"""Constructor.
Args:
training: If False, the layer will normalize using the moving average and
std. dev, without updating the learned avg and std. dev.
If None or True, the layer will follow the keras SyncBatchNormalization
layer strategy of checking the Keras learning phase at `call` time to
decide what to do.
**kwargs: The keyword arguments to forward to the keras
SyncBatchNormalization layer constructor.
"""
super(FreezableSyncBatchNorm, self).__init__(**kwargs)
self._training = training
def call(self, inputs, training=None):
# Override the call arg only if the batchnorm is frozen. (Ignore None)
if self._training is False: # pylint: disable=g-bool-id-comparison
training = self._training
return super(FreezableSyncBatchNorm, self).call(inputs, training=training)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/freezable_sync_batch_norm.py | freezable_sync_batch_norm.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A freezable batch norm layer that uses Keras batch normalization."""
import tensorflow.compat.v1 as tf
class FreezableBatchNorm(tf.keras.layers.BatchNormalization):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
This is a `freezable` batch norm layer that supports setting the `training`
parameter in the __init__ method rather than having to set it either via
the Keras learning phase or via the `call` method parameter. This layer will
forward all other parameters to the default Keras `BatchNormalization`
layer
This is class is necessary because Object Detection model training sometimes
requires batch normalization layers to be `frozen` and used as if it was
evaluation time, despite still training (and potentially using dropout layers)
Like the default Keras BatchNormalization layer, this will normalize the
activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
Args:
training: If False, the layer will normalize using the moving average and
std. dev, without updating the learned avg and std. dev.
If None or True, the layer will follow the keras BatchNormalization layer
strategy of checking the Keras learning phase at `call` time to decide
what to do.
**kwargs: The keyword arguments to forward to the keras BatchNormalization
layer constructor.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
"""
def __init__(self, training=None, **kwargs):
super(FreezableBatchNorm, self).__init__(**kwargs)
self._training = training
def call(self, inputs, training=None):
# Override the call arg only if the batchnorm is frozen. (Ignore None)
if self._training is False: # pylint: disable=g-bool-id-comparison
training = self._training
return super(FreezableBatchNorm, self).call(inputs, training=training)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/freezable_batch_norm.py | freezable_batch_norm.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.target_assigner."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.box_coders import keypoint_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_list
from object_detection.core import region_similarity_calculator
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner as targetassigner
from object_detection.matchers import argmax_matcher
from object_detection.utils import np_box_ops
from object_detection.utils import test_case
from object_detection.utils import tf_version
class TargetAssignerTest(test_case.TestCase):
def test_assign_agnostic(self):
def graph_fn(anchor_means, groundtruth_box_corners):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [[1], [1], [1]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_assign_class_agnostic_with_ignored_matches(self):
# Note: test is very similar to above. The third box matched with an IOU
# of 0.35, which is between the matched and unmatched threshold. This means
# That like above the expected classification targets are [1, 1, 0].
# Unlike above, the third target is ignored and therefore expected
# classification weights are [1, 1, 0].
def graph_fn(anchor_means, groundtruth_box_corners):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.3)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]], dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [[1], [1], [0]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_assign_agnostic_with_keypoints(self):
def graph_fn(anchor_means, groundtruth_box_corners,
groundtruth_keypoints):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0])
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
groundtruth_boxlist.add_field(fields.BoxListFields.keypoints,
groundtruth_keypoints)
result = target_assigner.assign(
anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.45, 0.45, 0.95, 0.95]],
dtype=np.float32)
groundtruth_keypoints = np.array(
[[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]],
[[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [[1], [1], [1]]
exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13,
-5],
[-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11,
-11, -7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means,
groundtruth_box_corners,
groundtruth_keypoints])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_assign_class_agnostic_with_keypoints_and_ignored_matches(self):
# Note: test is very similar to above. The third box matched with an IOU
# of 0.35, which is between the matched and unmatched threshold. This means
# That like above the expected classification targets are [1, 1, 0].
# Unlike above, the third target is ignored and therefore expected
# classification weights are [1, 1, 0].
def graph_fn(anchor_means, groundtruth_box_corners,
groundtruth_keypoints):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0])
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
groundtruth_boxlist.add_field(fields.BoxListFields.keypoints,
groundtruth_keypoints)
result = target_assigner.assign(
anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.45, 0.45, 0.95, 0.95]],
dtype=np.float32)
groundtruth_keypoints = np.array(
[[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]],
[[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [[1], [1], [1]]
exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13,
-5],
[-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11,
-11, -7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means,
groundtruth_box_corners,
groundtruth_keypoints])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_assign_multiclass(self):
def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist,
groundtruth_boxlist,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.float32)
exp_cls_targets = [[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]]
exp_cls_weights = [[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0],
[0, 0, -.5, .2]]
exp_reg_weights = [1, 1, 0, 1]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_assign_multiclass_with_groundtruth_weights(self):
def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels,
groundtruth_weights):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist,
groundtruth_boxlist,
groundtruth_labels,
unmatched_class_label=unmatched_class_label,
groundtruth_weights=groundtruth_weights)
(_, cls_weights, _, reg_weights, _) = result
return (cls_weights, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.float32)
groundtruth_weights = np.array([0.3, 0., 0.5], dtype=np.float32)
# background class gets weight of 1.
exp_cls_weights = [[0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]
exp_reg_weights = [0.3, 0., 0., 0.5] # background class gets weight of 0.
(cls_weights_out, reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_box_corners, groundtruth_labels,
groundtruth_weights
])
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_assign_multidimensional_class_targets(self):
def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
unmatched_class_label = tf.constant([[0, 0], [0, 0]], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist,
groundtruth_boxlist,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[[0, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 1], [1, .5]]], np.float32)
exp_cls_targets = [[[0, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 0], [0, 0]],
[[0, 1], [1, .5]]]
exp_cls_weights = [[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0],
[0, 0, -.5, .2]]
exp_reg_weights = [1, 1, 0, 1]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_assign_empty_groundtruth(self):
def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
unmatched_class_label = tf.constant([0, 0, 0], tf.float32)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
result = target_assigner.assign(
anchors_boxlist,
groundtruth_boxlist,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
groundtruth_labels = np.zeros((0, 3), dtype=np.float32)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]],
dtype=np.float32)
exp_cls_targets = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
exp_cls_weights = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
exp_reg_weights = [0, 0, 0, 0]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_raises_error_on_incompatible_groundtruth_boxes_and_labels(self):
similarity_calc = region_similarity_calculator.NegSqDistSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]])
priors = box_list.BoxList(prior_means)
box_corners = [[0.0, 0.0, 0.5, 0.5],
[0.0, 0.0, 0.5, 0.8],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]]
boxes = box_list.BoxList(tf.constant(box_corners))
groundtruth_labels = tf.constant([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], tf.float32)
with self.assertRaisesRegexp(ValueError, 'Unequal shapes'):
target_assigner.assign(
priors,
boxes,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
def test_raises_error_on_invalid_groundtruth_labels(self):
similarity_calc = region_similarity_calculator.NegSqDistSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=1.0)
unmatched_class_label = tf.constant([[0, 0], [0, 0], [0, 0]], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5]])
priors = box_list.BoxList(prior_means)
box_corners = [[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]]
boxes = box_list.BoxList(tf.constant(box_corners))
groundtruth_labels = tf.constant([[[0, 1], [1, 0]]], tf.float32)
with self.assertRaises(ValueError):
target_assigner.assign(
priors,
boxes,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
class BatchTargetAssignerTest(test_case.TestCase):
def _get_target_assigner(self):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder)
def test_batch_assign_targets(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [None, None]
anchors_boxlist = box_list.BoxList(anchor_means)
agnostic_target_assigner = self._get_target_assigner()
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
agnostic_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[1], [0], [0], [0]],
[[0], [1], [1], [0]]]
exp_cls_weights = [[[1], [1], [1], [1]],
[[1], [1], [1], [1]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multiclass_targets(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets, unmatched_class_label)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multiclass_targets_with_padded_groundtruth(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
gt_weights = [groundtruth_weights1, groundtruth_weights2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets, unmatched_class_label, gt_weights)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2],
[0., 0., 0., 0.]], dtype=np.float32)
groundtruth_weights1 = np.array([1, 0], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842],
[0, 0, 0, 0]],
dtype=np.float32)
groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multidimensional_targets(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
target_dimensions = (2, 3)
unmatched_class_label = tf.constant(np.zeros(target_dimensions),
tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets, unmatched_class_label)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[[0, 1, 1],
[1, 1, 0]]], dtype=np.float32)
class_targets2 = np.array([[[0, 1, 1],
[1, 1, 0]],
[[0, 0, 1],
[0, 0, 1]]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[[0., 1., 1.],
[1., 1., 0.]],
[[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.]]],
[[[0., 0., 0.],
[0., 0., 0.]],
[[0., 1., 1.],
[1., 1., 0.]],
[[0., 0., 1.],
[0., 0., 1.]],
[[0., 0., 0.],
[0., 0., 0.]]]]
exp_cls_weights = [[[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]]],
[[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_empty_groundtruth(self):
def graph_fn(anchor_means, groundtruth_box_corners, gt_class_targets):
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
gt_box_batch = [groundtruth_boxlist]
gt_class_targets_batch = [gt_class_targets]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist,
gt_box_batch, gt_class_targets_batch, unmatched_class_label)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 1, 1],
[1, 1, 1, 1]]]
exp_reg_targets = [[[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[0, 0]]
num_classes = 3
pad = 1
gt_class_targets = np.zeros((0, num_classes + pad), dtype=np.float32)
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners, gt_class_targets])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
class BatchGetTargetsTest(test_case.TestCase):
def test_scalar_targets(self):
batch_match = np.array([[1, 0, 1],
[-2, -1, 1]], dtype=np.int32)
groundtruth_tensors_list = np.array([[11, 12], [13, 14]], dtype=np.int32)
groundtruth_weights_list = np.array([[1.0, 1.0], [1.0, 0.5]],
dtype=np.float32)
unmatched_value = np.array(99, dtype=np.int32)
unmatched_weight = np.array(0.0, dtype=np.float32)
def graph_fn(batch_match, groundtruth_tensors_list,
groundtruth_weights_list, unmatched_value, unmatched_weight):
targets, weights = targetassigner.batch_get_targets(
batch_match, tf.unstack(groundtruth_tensors_list),
tf.unstack(groundtruth_weights_list),
unmatched_value, unmatched_weight)
return (targets, weights)
(targets_np, weights_np) = self.execute(graph_fn, [
batch_match, groundtruth_tensors_list, groundtruth_weights_list,
unmatched_value, unmatched_weight
])
self.assertAllEqual([[12, 11, 12],
[99, 99, 14]], targets_np)
self.assertAllClose([[1.0, 1.0, 1.0],
[0.0, 0.0, 0.5]], weights_np)
def test_1d_targets(self):
batch_match = np.array([[1, 0, 1],
[-2, -1, 1]], dtype=np.int32)
groundtruth_tensors_list = np.array([[[11, 12], [12, 13]],
[[13, 14], [14, 15]]],
dtype=np.float32)
groundtruth_weights_list = np.array([[1.0, 1.0], [1.0, 0.5]],
dtype=np.float32)
unmatched_value = np.array([99, 99], dtype=np.float32)
unmatched_weight = np.array(0.0, dtype=np.float32)
def graph_fn(batch_match, groundtruth_tensors_list,
groundtruth_weights_list, unmatched_value, unmatched_weight):
targets, weights = targetassigner.batch_get_targets(
batch_match, tf.unstack(groundtruth_tensors_list),
tf.unstack(groundtruth_weights_list),
unmatched_value, unmatched_weight)
return (targets, weights)
(targets_np, weights_np) = self.execute(graph_fn, [
batch_match, groundtruth_tensors_list, groundtruth_weights_list,
unmatched_value, unmatched_weight
])
self.assertAllClose([[[12, 13], [11, 12], [12, 13]],
[[99, 99], [99, 99], [14, 15]]], targets_np)
self.assertAllClose([[1.0, 1.0, 1.0],
[0.0, 0.0, 0.5]], weights_np)
class BatchTargetAssignConfidencesTest(test_case.TestCase):
def _get_target_assigner(self):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder)
def test_batch_assign_empty_groundtruth(self):
def graph_fn(anchor_means, groundtruth_box_corners, gt_class_confidences):
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
gt_box_batch = [groundtruth_boxlist]
gt_class_confidences_batch = [gt_class_confidences]
anchors_boxlist = box_list.BoxList(anchor_means)
num_classes = 3
implicit_class_weight = 0.5
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
multiclass_target_assigner = self._get_target_assigner()
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1]], dtype=np.float32)
num_classes = 3
pad = 1
gt_class_confidences = np.zeros((0, num_classes + pad), dtype=np.float32)
exp_cls_targets = [[[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5]]]
exp_reg_targets = [[[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[0, 0]]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn,
[anchor_means, groundtruth_box_corners, gt_class_confidences])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_agnostic(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [None, None]
anchors_boxlist = box_list.BoxList(anchor_means)
agnostic_target_assigner = self._get_target_assigner()
implicit_class_weight = 0.5
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
agnostic_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
include_background_class=False,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[1], [0], [0], [0]],
[[0], [1], [1], [0]]]
exp_cls_weights = [[[1], [0.5], [0.5], [0.5]],
[[0.5], [1], [1], [0.5]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_multiclass(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
implicit_class_weight = 0.5
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, -1, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5]],
[[0.5, 0.5, 0.5, 0.5],
[1, 0.5, 0.5, 1],
[0.5, 0.5, 1, 0.5],
[0.5, 0.5, 0.5, 0.5]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 0, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_multiclass_with_padded_groundtruth(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [class_targets1, class_targets2]
gt_weights = [groundtruth_weights1, groundtruth_weights2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
implicit_class_weight = 0.5
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
gt_weights,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2],
[0., 0., 0., 0.]], dtype=np.float32)
groundtruth_weights1 = np.array([1, 0], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842],
[0, 0, 0, 0]],
dtype=np.float32)
groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, -1, 0],
[0, 0, 0, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5]],
[[0.5, 0.5, 0.5, 0.5],
[1, 0.5, 0.5, 1],
[0.5, 0.5, 1, 0.5],
[0.5, 0.5, 0.5, 0.5]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 0, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_multidimensional(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
target_dimensions = (2, 3)
unmatched_class_label = tf.constant(np.zeros(target_dimensions),
tf.float32)
implicit_class_weight = 0.5
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=np.float32)
class_targets1 = np.array([[[0, 1, 1],
[1, 1, 0]]], dtype=np.float32)
class_targets2 = np.array([[[0, 1, 1],
[1, 1, 0]],
[[0, 0, 1],
[0, 0, 1]]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
with self.assertRaises(ValueError):
_, _, _, _ = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
class CreateTargetAssignerTest(test_case.TestCase):
def test_create_target_assigner(self):
"""Tests that named constructor gives working target assigners.
TODO(rathodv): Make this test more general.
"""
corners = [[0.0, 0.0, 1.0, 1.0]]
groundtruth = box_list.BoxList(tf.constant(corners))
priors = box_list.BoxList(tf.constant(corners))
if tf_version.is_tf1():
multibox_ta = (targetassigner
.create_target_assigner('Multibox', stage='proposal'))
multibox_ta.assign(priors, groundtruth)
# No tests on output, as that may vary arbitrarily as new target assigners
# are added. As long as it is constructed correctly and runs without errors,
# tests on the individual assigners cover correctness of the assignments.
anchors = box_list.BoxList(tf.constant(corners))
faster_rcnn_proposals_ta = (targetassigner
.create_target_assigner('FasterRCNN',
stage='proposal'))
faster_rcnn_proposals_ta.assign(anchors, groundtruth)
fast_rcnn_ta = (targetassigner
.create_target_assigner('FastRCNN'))
fast_rcnn_ta.assign(anchors, groundtruth)
faster_rcnn_detection_ta = (targetassigner
.create_target_assigner('FasterRCNN',
stage='detection'))
faster_rcnn_detection_ta.assign(anchors, groundtruth)
with self.assertRaises(ValueError):
targetassigner.create_target_assigner('InvalidDetector',
stage='invalid_stage')
def _array_argmax(array):
return np.unravel_index(np.argmax(array), array.shape)
class CenterNetCenterHeatmapTargetAssignerTest(test_case.TestCase,
parameterized.TestCase):
def setUp(self):
super(CenterNetCenterHeatmapTargetAssignerTest, self).setUp()
self._box_center = [0.0, 0.0, 1.0, 1.0]
self._box_center_small = [0.25, 0.25, 0.75, 0.75]
self._box_lower_left = [0.5, 0.0, 1.0, 0.5]
self._box_center_offset = [0.1, 0.05, 1.0, 1.0]
self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625]
def test_center_location(self):
"""Test that the centers are at the correct location."""
def graph_fn():
box_batch = [tf.constant([self._box_center, self._box_lower_left])]
classes = [
tf.one_hot([0, 1], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph_fn, [])
self.assertEqual((10, 10), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 10, 10, 0])
self.assertEqual((15, 5), _array_argmax(targets[0, :, :, 1]))
self.assertAlmostEqual(1.0, targets[0, 15, 5, 1])
@parameterized.parameters(
{'keypoint_weights_for_center': [1.0, 1.0, 1.0, 1.0]},
{'keypoint_weights_for_center': [0.0, 0.0, 1.0, 1.0]},
)
def test_center_location_by_keypoints(self, keypoint_weights_for_center):
"""Test that the centers are at the correct location."""
kpts_y = [[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.0, 0.0, 0.0, 0.0]]
kpts_x = [[0.5, 0.6, 0.7, 0.8], [0.1, 0.2, 0.3, 0.4], [0.0, 0.0, 0.0, 0.0]]
gt_keypoints_list = [
tf.stack([tf.constant(kpts_y), tf.constant(kpts_x)], axis=2)
]
kpts_weight = [[1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0]]
gt_keypoints_weights_list = [tf.constant(kpts_weight)]
gt_classes_list = [
tf.one_hot([0, 0, 0], depth=1),
]
gt_weights_list = [tf.constant([1.0, 1.0, 0.0])]
def graph_fn():
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4,
keypoint_class_id=0,
keypoint_indices=[0, 1, 2, 3],
keypoint_weights_for_center=keypoint_weights_for_center)
targets = assigner.assign_center_targets_from_keypoints(
80,
80,
gt_classes_list=gt_classes_list,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_keypoints_weights_list=gt_keypoints_weights_list)
return targets
targets = self.execute(graph_fn, [])
if sum(keypoint_weights_for_center) == 4.0:
# There should be two peaks at location (5, 13), and (12, 4).
# (5, 13) = ((0.1 + 0.2 + 0.3 + 0.4) / 4 * 80 / 4,
# (0.5 + 0.6 + 0.7 + 0.8) / 4 * 80 / 4)
# (12, 4) = ((0.5 + 0.7) / 2 * 80 / 4,
# (0.1 + 0.3) / 2 * 80 / 4)
self.assertEqual((5, 13), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 5, 13, 0])
self.assertEqual((1, 20, 20, 1), targets.shape)
targets[0, 5, 13, 0] = 0.0
self.assertEqual((12, 4), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 12, 4, 0])
else:
# There should be two peaks at location (5, 13), and (12, 4).
# (7, 15) = ((0.3 + 0.4) / 2 * 80 / 4,
# (0.7 + 0.8) / 2 * 80 / 4)
# (14, 6) = (0.7 * 80 / 4, 0.3 * 80 / 4)
self.assertEqual((7, 15), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 7, 15, 0])
self.assertEqual((1, 20, 20, 1), targets.shape)
targets[0, 7, 15, 0] = 0.0
self.assertEqual((14, 6), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 14, 6, 0])
def test_center_batch_shape(self):
"""Test that the shape of the target for a batch is correct."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center]),
tf.constant([self._box_center_small]),
]
classes = [
tf.one_hot([0, 1], depth=4),
tf.one_hot([2], depth=4),
tf.one_hot([3], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph_fn, [])
self.assertEqual((3, 20, 20, 4), targets.shape)
def test_center_overlap_maximum(self):
"""Test that when boxes overlap we, are computing the maximum."""
def graph_fn():
box_batch = [
tf.constant([
self._box_center, self._box_center_offset, self._box_center,
self._box_center_offset
])
]
classes = [
tf.one_hot([0, 0, 1, 2], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph_fn, [])
class0_targets = targets[0, :, :, 0]
class1_targets = targets[0, :, :, 1]
class2_targets = targets[0, :, :, 2]
np.testing.assert_allclose(class0_targets,
np.maximum(class1_targets, class2_targets))
def test_size_blur(self):
"""Test that the heatmap of a larger box is more blurred."""
def graph_fn():
box_batch = [tf.constant([self._box_center, self._box_center_small])]
classes = [
tf.one_hot([0, 1], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph_fn, [])
self.assertGreater(
np.count_nonzero(targets[:, :, :, 0]),
np.count_nonzero(targets[:, :, :, 1]))
def test_weights(self):
"""Test that the weights correctly ignore ground truth."""
def graph1_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center]),
tf.constant([self._box_center_small]),
]
classes = [
tf.one_hot([0, 1], depth=4),
tf.one_hot([2], depth=4),
tf.one_hot([3], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph1_fn, [])
self.assertAlmostEqual(1.0, targets[0, :, :, 0].max())
self.assertAlmostEqual(1.0, targets[0, :, :, 1].max())
self.assertAlmostEqual(1.0, targets[1, :, :, 2].max())
self.assertAlmostEqual(1.0, targets[2, :, :, 3].max())
self.assertAlmostEqual(0.0, targets[0, :, :, [2, 3]].max())
self.assertAlmostEqual(0.0, targets[1, :, :, [0, 1, 3]].max())
self.assertAlmostEqual(0.0, targets[2, :, :, :3].max())
def graph2_fn():
weights = [
tf.constant([0., 1.]),
tf.constant([1.]),
tf.constant([1.]),
]
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center]),
tf.constant([self._box_center_small]),
]
classes = [
tf.one_hot([0, 1], depth=4),
tf.one_hot([2], depth=4),
tf.one_hot([3], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes,
weights)
return targets
targets = self.execute(graph2_fn, [])
self.assertAlmostEqual(1.0, targets[0, :, :, 1].max())
self.assertAlmostEqual(1.0, targets[1, :, :, 2].max())
self.assertAlmostEqual(1.0, targets[2, :, :, 3].max())
self.assertAlmostEqual(0.0, targets[0, :, :, [0, 2, 3]].max())
self.assertAlmostEqual(0.0, targets[1, :, :, [0, 1, 3]].max())
self.assertAlmostEqual(0.0, targets[2, :, :, :3].max())
def test_low_overlap(self):
def graph1_fn():
box_batch = [tf.constant([self._box_center])]
classes = [
tf.one_hot([0], depth=2),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4, min_overlap=0.1)
targets_low_overlap = assigner.assign_center_targets_from_boxes(
80, 80, box_batch, classes)
return targets_low_overlap
targets_low_overlap = self.execute(graph1_fn, [])
self.assertLess(1, np.count_nonzero(targets_low_overlap))
def graph2_fn():
box_batch = [tf.constant([self._box_center])]
classes = [
tf.one_hot([0], depth=2),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4, min_overlap=0.6)
targets_medium_overlap = assigner.assign_center_targets_from_boxes(
80, 80, box_batch, classes)
return targets_medium_overlap
targets_medium_overlap = self.execute(graph2_fn, [])
self.assertLess(1, np.count_nonzero(targets_medium_overlap))
def graph3_fn():
box_batch = [tf.constant([self._box_center])]
classes = [
tf.one_hot([0], depth=2),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4, min_overlap=0.99)
targets_high_overlap = assigner.assign_center_targets_from_boxes(
80, 80, box_batch, classes)
return targets_high_overlap
targets_high_overlap = self.execute(graph3_fn, [])
self.assertTrue(np.all(targets_low_overlap >= targets_medium_overlap))
self.assertTrue(np.all(targets_medium_overlap >= targets_high_overlap))
def test_empty_box_list(self):
"""Test that an empty box list gives an all 0 heatmap."""
def graph_fn():
box_batch = [
tf.zeros((0, 4), dtype=tf.float32),
]
classes = [
tf.zeros((0, 5), dtype=tf.float32),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4, min_overlap=0.1)
targets = assigner.assign_center_targets_from_boxes(
80, 80, box_batch, classes)
return targets
targets = self.execute(graph_fn, [])
np.testing.assert_allclose(targets, 0.)
class CenterNetBoxTargetAssignerTest(test_case.TestCase):
def setUp(self):
super(CenterNetBoxTargetAssignerTest, self).setUp()
self._box_center = [0.0, 0.0, 1.0, 1.0]
self._box_center_small = [0.25, 0.25, 0.75, 0.75]
self._box_lower_left = [0.5, 0.0, 1.0, 0.5]
self._box_center_offset = [0.1, 0.05, 1.0, 1.0]
self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625]
def test_max_distance_for_overlap(self):
"""Test that the distance ensures the IoU with random boxes."""
# TODO(vighneshb) remove this after the `_smallest_positive_root`
# function if fixed.
self.skipTest(('Skipping test because we are using an incorrect version of'
'the `max_distance_for_overlap` function to reproduce'
' results.'))
rng = np.random.RandomState(0)
n_samples = 100
width = rng.uniform(1, 100, size=n_samples)
height = rng.uniform(1, 100, size=n_samples)
min_iou = rng.uniform(0.1, 1.0, size=n_samples)
def graph_fn():
max_dist = targetassigner.max_distance_for_overlap(height, width, min_iou)
return max_dist
max_dist = self.execute(graph_fn, [])
xmin1 = np.zeros(n_samples)
ymin1 = np.zeros(n_samples)
xmax1 = np.zeros(n_samples) + width
ymax1 = np.zeros(n_samples) + height
xmin2 = max_dist * np.cos(rng.uniform(0, 2 * np.pi))
ymin2 = max_dist * np.sin(rng.uniform(0, 2 * np.pi))
xmax2 = width + max_dist * np.cos(rng.uniform(0, 2 * np.pi))
ymax2 = height + max_dist * np.sin(rng.uniform(0, 2 * np.pi))
boxes1 = np.vstack([ymin1, xmin1, ymax1, xmax1]).T
boxes2 = np.vstack([ymin2, xmin2, ymax2, xmax2]).T
iou = np.diag(np_box_ops.iou(boxes1, boxes2))
self.assertTrue(np.all(iou >= min_iou))
def test_max_distance_for_overlap_centernet(self):
"""Test the version of the function used in the CenterNet paper."""
def graph_fn():
distance = targetassigner.max_distance_for_overlap(10, 5, 0.5)
return distance
distance = self.execute(graph_fn, [])
self.assertAlmostEqual(2.807764064, distance)
def test_assign_size_and_offset_targets(self):
"""Test the assign_size_and_offset_targets function."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center_offset]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
assigner = targetassigner.CenterNetBoxTargetAssigner(4)
indices, hw, yx_offset, weights = assigner.assign_size_and_offset_targets(
80, 80, box_batch)
return indices, hw, yx_offset, weights
indices, hw, yx_offset, weights = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (5, 3))
self.assertEqual(hw.shape, (5, 2))
self.assertEqual(yx_offset.shape, (5, 2))
self.assertEqual(weights.shape, (5,))
np.testing.assert_array_equal(
indices,
[[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]])
np.testing.assert_array_equal(
hw, [[20, 20], [10, 10], [18, 19], [10, 10], [8, 15]])
np.testing.assert_array_equal(
yx_offset, [[0, 0], [0, 0], [0, 0.5], [0, 0], [0.25, 0.75]])
np.testing.assert_array_equal(weights, 1)
def test_assign_size_and_offset_targets_weights(self):
"""Test the assign_size_and_offset_targets function with box weights."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_lower_left, self._box_center_small]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
cn_assigner = targetassigner.CenterNetBoxTargetAssigner(4)
weights_batch = [
tf.constant([0.0, 1.0]),
tf.constant([1.0, 1.0]),
tf.constant([0.0, 0.0])
]
indices, hw, yx_offset, weights = cn_assigner.assign_size_and_offset_targets(
80, 80, box_batch, weights_batch)
return indices, hw, yx_offset, weights
indices, hw, yx_offset, weights = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (6, 3))
self.assertEqual(hw.shape, (6, 2))
self.assertEqual(yx_offset.shape, (6, 2))
self.assertEqual(weights.shape, (6,))
np.testing.assert_array_equal(indices,
[[0, 10, 10], [0, 15, 5], [1, 15, 5],
[1, 10, 10], [2, 10, 10], [2, 7, 11]])
np.testing.assert_array_equal(
hw, [[20, 20], [10, 10], [10, 10], [10, 10], [10, 10], [8, 15]])
np.testing.assert_array_equal(
yx_offset, [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0.25, 0.75]])
np.testing.assert_array_equal(weights, [0, 1, 1, 1, 0, 0])
def test_get_batch_predictions_from_indices(self):
"""Test the get_batch_predictions_from_indices function.
This test verifies that the indices returned by
assign_size_and_offset_targets function work as expected with a predicted
tensor.
"""
def graph_fn():
pred_array = np.ones((2, 40, 20, 2), dtype=np.int32) * -1000
pred_array[0, 20, 10] = [1, 2]
pred_array[0, 30, 5] = [3, 4]
pred_array[1, 20, 10] = [5, 6]
pred_array[1, 14, 11] = [7, 8]
pred_tensor = tf.constant(pred_array)
indices = tf.constant([
[0, 20, 10],
[0, 30, 5],
[1, 20, 10],
[1, 14, 11]
], dtype=tf.int32)
preds = targetassigner.get_batch_predictions_from_indices(
pred_tensor, indices)
return preds
preds = self.execute(graph_fn, [])
np.testing.assert_array_equal(preds, [[1, 2], [3, 4], [5, 6], [7, 8]])
def test_get_batch_predictions_from_indices_with_class(self):
"""Test the get_batch_predictions_from_indices function with class axis.
This test verifies that the indices returned by
assign_size_and_offset_targets function work as expected with a predicted
tensor.
"""
def graph_fn():
pred_array = np.ones((2, 40, 20, 5, 2), dtype=np.int32) * -1000
pred_array[0, 20, 10, 0] = [1, 2]
pred_array[0, 30, 5, 2] = [3, 4]
pred_array[1, 20, 10, 1] = [5, 6]
pred_array[1, 14, 11, 4] = [7, 8]
pred_tensor = tf.constant(pred_array)
indices = tf.constant([
[0, 20, 10, 0],
[0, 30, 5, 2],
[1, 20, 10, 1],
[1, 14, 11, 4]
], dtype=tf.int32)
preds = targetassigner.get_batch_predictions_from_indices(
pred_tensor, indices)
return preds
preds = self.execute(graph_fn, [])
np.testing.assert_array_equal(preds, [[1, 2], [3, 4], [5, 6], [7, 8]])
class CenterNetKeypointTargetAssignerTest(test_case.TestCase):
def test_keypoint_heatmap_targets(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 1.0],
[0.4, 0.1, 0.4, 0.2, 0.1],
[float('nan'), 0.1, 0.5, 0.7, 0.6]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
gt_boxes_list = [
tf.constant(
np.array([[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.5, 0.5],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0]]),
dtype=tf.float32)
]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2])
(targets, num_instances_batch,
valid_mask) = cn_assigner.assign_keypoint_heatmap_targets(
120,
80,
gt_keypoints_list,
gt_classes_list,
gt_boxes_list=gt_boxes_list)
return targets, num_instances_batch, valid_mask
targets, num_instances_batch, valid_mask = self.execute(graph_fn, [])
# keypoint (0.5, 0.5) is selected. The peak is expected to appear at the
# center of the image.
self.assertEqual((15, 10), _array_argmax(targets[0, :, :, 1]))
self.assertAlmostEqual(1.0, targets[0, 15, 10, 1])
# No peak for the first class since NaN is selected.
self.assertAlmostEqual(0.0, targets[0, 15, 10, 0])
# Verify the output heatmap shape.
self.assertAllEqual([1, 30, 20, 2], targets.shape)
# Verify the number of instances is correct.
np.testing.assert_array_almost_equal([[0, 1]],
num_instances_batch)
self.assertAllEqual([1, 30, 20, 2], valid_mask.shape)
# When calling the function, we specify the class id to be 1 (1th and 3rd)
# instance and the keypoint indices to be [0, 2], meaning that the 1st
# instance is the target class with no valid keypoints in it. As a result,
# the region of both keypoint types of the 1st instance boxing box should be
# blacked out (0.0, 0.0, 0.5, 0.5), transfering to (0, 0, 15, 10) in
# absolute output space.
self.assertAlmostEqual(np.sum(valid_mask[:, 0:15, 0:10, 0:2]), 0.0)
# For the 2nd instance, only the 1st keypoint has visibility of 0 so only
# the corresponding valid mask contains zeros.
self.assertAlmostEqual(np.sum(valid_mask[:, 15:30, 10:20, 0]), 0.0)
# All other values are 1.0 so the sum is:
# 30 * 20 * 2 - 15 * 10 * 2 - 15 * 10 * 1 = 750.
self.assertAlmostEqual(np.sum(valid_mask), 750.0)
def test_assign_keypoints_offset_targets(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2])
(indices, offsets, weights) = cn_assigner.assign_keypoints_offset_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list)
return indices, weights, offsets
indices, weights, offsets = self.execute(graph_fn, [])
# Only the last element has positive weight.
np.testing.assert_array_almost_equal(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], weights)
# Validate the last element's indices and offsets.
np.testing.assert_array_equal([0, 3, 2], indices[7, :])
np.testing.assert_array_almost_equal([0.6, 0.4], offsets[7, :])
def test_assign_keypoint_depths_target(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, 0.7, 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
depths = tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[0.5, 0.0, 7.0, 0.7, 0.4]]),
dtype=tf.float32)
gt_keypoint_depths_list = [depths]
gt_keypoint_depth_weights = tf.constant(
np.array([[1.0, 1.0, 1.0, 1.0, 1.0],
[float('nan'), 0.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.5, 1.0, 1.0]]),
dtype=tf.float32)
gt_keypoint_depth_weights_list = [gt_keypoint_depth_weights]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2],
peak_radius=1)
(indices, depths, weights) = cn_assigner.assign_keypoints_depth_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_keypoint_depths_list=gt_keypoint_depths_list,
gt_keypoint_depth_weights_list=gt_keypoint_depth_weights_list)
return indices, depths, weights
indices, depths, weights = self.execute(graph_fn, [])
# Only the last 5 elements has positive weight.
np.testing.assert_array_almost_equal([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.5
], weights)
# Validate the last 5 elements' depth value.
np.testing.assert_array_almost_equal(
[7.0, 7.0, 7.0, 7.0, 7.0], depths[35:, 0])
self.assertEqual((40, 3), indices.shape)
np.testing.assert_array_equal([0, 2, 2], indices[35, :])
def test_assign_keypoint_depths_per_keypoints(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, 0.7, 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
depths = tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[0.5, 0.0, 7.0, 0.7, 0.4]]),
dtype=tf.float32)
gt_keypoint_depths_list = [depths]
gt_keypoint_depth_weights = tf.constant(
np.array([[1.0, 1.0, 1.0, 1.0, 1.0],
[float('nan'), 0.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.5, 1.0, 1.0]]),
dtype=tf.float32)
gt_keypoint_depth_weights_list = [gt_keypoint_depth_weights]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2],
peak_radius=1,
per_keypoint_depth=True)
(indices, depths, weights) = cn_assigner.assign_keypoints_depth_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_keypoint_depths_list=gt_keypoint_depths_list,
gt_keypoint_depth_weights_list=gt_keypoint_depth_weights_list)
return indices, depths, weights
indices, depths, weights = self.execute(graph_fn, [])
# Only the last 5 elements has positive weight.
np.testing.assert_array_almost_equal([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.5
], weights)
# Validate the last 5 elements' depth value.
np.testing.assert_array_almost_equal(
[7.0, 7.0, 7.0, 7.0, 7.0], depths[35:, 0])
self.assertEqual((40, 4), indices.shape)
np.testing.assert_array_equal([0, 2, 2, 1], indices[35, :])
def test_assign_keypoints_offset_targets_radius(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2],
peak_radius=1,
per_keypoint_offset=True)
(indices, offsets, weights) = cn_assigner.assign_keypoints_offset_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list)
return indices, weights, offsets
indices, weights, offsets = self.execute(graph_fn, [])
# There are total 8 * 5 (neighbors) = 40 targets.
self.assertAllEqual(indices.shape, [40, 4])
self.assertAllEqual(offsets.shape, [40, 2])
self.assertAllEqual(weights.shape, [40])
# Only the last 5 (radius 1 generates 5 valid points) element has positive
# weight.
np.testing.assert_array_almost_equal([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0
], weights)
# Validate the last element's (with neighbors) indices and offsets.
np.testing.assert_array_equal([0, 2, 2, 1], indices[35, :])
np.testing.assert_array_equal([0, 3, 1, 1], indices[36, :])
np.testing.assert_array_equal([0, 3, 2, 1], indices[37, :])
np.testing.assert_array_equal([0, 3, 3, 1], indices[38, :])
np.testing.assert_array_equal([0, 4, 2, 1], indices[39, :])
np.testing.assert_array_almost_equal([1.6, 0.4], offsets[35, :])
np.testing.assert_array_almost_equal([0.6, 1.4], offsets[36, :])
np.testing.assert_array_almost_equal([0.6, 0.4], offsets[37, :])
np.testing.assert_array_almost_equal([0.6, -0.6], offsets[38, :])
np.testing.assert_array_almost_equal([-0.4, 0.4], offsets[39, :])
def test_assign_joint_regression_targets(self):
def graph_fn():
gt_boxes_list = [
tf.constant(
np.array([[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0]]),
dtype=tf.float32)
]
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2])
(indices, offsets, weights) = cn_assigner.assign_joint_regression_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_boxes_list=gt_boxes_list)
return indices, offsets, weights
indices, offsets, weights = self.execute(graph_fn, [])
np.testing.assert_array_almost_equal(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], weights)
np.testing.assert_array_equal([0, 15, 10, 1], indices[7, :])
np.testing.assert_array_almost_equal([-11.4, -7.6], offsets[7, :])
def test_assign_joint_regression_targets_radius(self):
def graph_fn():
gt_boxes_list = [
tf.constant(
np.array([[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0]]),
dtype=tf.float32)
]
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2],
peak_radius=1)
(indices, offsets, weights) = cn_assigner.assign_joint_regression_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_boxes_list=gt_boxes_list)
return indices, offsets, weights
indices, offsets, weights = self.execute(graph_fn, [])
# There are total 8 * 5 (neighbors) = 40 targets.
self.assertAllEqual(indices.shape, [40, 4])
self.assertAllEqual(offsets.shape, [40, 2])
self.assertAllEqual(weights.shape, [40])
# Only the last 5 (radius 1 generates 5 valid points) element has positive
# weight.
np.testing.assert_array_almost_equal([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0
], weights)
# Test the values of the indices and offsets of the last 5 elements.
np.testing.assert_array_equal([0, 14, 10, 1], indices[35, :])
np.testing.assert_array_equal([0, 15, 9, 1], indices[36, :])
np.testing.assert_array_equal([0, 15, 10, 1], indices[37, :])
np.testing.assert_array_equal([0, 15, 11, 1], indices[38, :])
np.testing.assert_array_equal([0, 16, 10, 1], indices[39, :])
np.testing.assert_array_almost_equal([-10.4, -7.6], offsets[35, :])
np.testing.assert_array_almost_equal([-11.4, -6.6], offsets[36, :])
np.testing.assert_array_almost_equal([-11.4, -7.6], offsets[37, :])
np.testing.assert_array_almost_equal([-11.4, -8.6], offsets[38, :])
np.testing.assert_array_almost_equal([-12.4, -7.6], offsets[39, :])
class CenterNetMaskTargetAssignerTest(test_case.TestCase):
def test_assign_segmentation_targets(self):
def graph_fn():
gt_masks_list = [
# Example 0.
tf.constant([
[
[1., 0., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
],
[
[0., 0., 0., 0.],
[0., 0., 0., 1.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
],
[
[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.],
]
], dtype=tf.float32),
# Example 1.
tf.constant([
[
[1., 1., 0., 1.],
[1., 1., 1., 1.],
[0., 0., 1., 1.],
[0., 0., 0., 1.],
],
[
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[1., 1., 0., 0.],
[1., 1., 0., 0.],
],
], dtype=tf.float32),
]
gt_classes_list = [
# Example 0.
tf.constant([[1., 0., 0.],
[0., 1., 0.],
[1., 0., 0.]], dtype=tf.float32),
# Example 1.
tf.constant([[0., 1., 0.],
[0., 1., 0.]], dtype=tf.float32)
]
gt_boxes_list = [
# Example 0.
tf.constant([[0.0, 0.0, 0.5, 0.5],
[0.0, 0.5, 0.5, 1.0],
[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32),
# Example 1.
tf.constant([[0.0, 0.0, 1.0, 1.0],
[0.5, 0.0, 1.0, 0.5]], dtype=tf.float32)
]
gt_mask_weights_list = [
# Example 0.
tf.constant([0.0, 1.0, 1.0], dtype=tf.float32),
# Example 1.
tf.constant([1.0, 1.0], dtype=tf.float32)
]
cn_assigner = targetassigner.CenterNetMaskTargetAssigner(stride=2)
segmentation_target, segmentation_weight = (
cn_assigner.assign_segmentation_targets(
gt_masks_list=gt_masks_list,
gt_classes_list=gt_classes_list,
gt_boxes_list=gt_boxes_list,
gt_mask_weights_list=gt_mask_weights_list,
mask_resize_method=targetassigner.ResizeMethod.NEAREST_NEIGHBOR))
return segmentation_target, segmentation_weight
segmentation_target, segmentation_weight = self.execute(graph_fn, [])
expected_seg_target = np.array([
# Example 0 [[class 0, class 1], [background, class 0]]
[[[1, 0, 0], [0, 1, 0]],
[[0, 0, 0], [1, 0, 0]]],
# Example 1 [[class 1, class 1], [class 1, class 1]]
[[[0, 1, 0], [0, 1, 0]],
[[0, 1, 0], [0, 1, 0]]],
], dtype=np.float32)
np.testing.assert_array_almost_equal(
expected_seg_target, segmentation_target)
expected_seg_weight = np.array([
[[0, 1], [1, 1]],
[[1, 1], [1, 1]]], dtype=np.float32)
np.testing.assert_array_almost_equal(
expected_seg_weight, segmentation_weight)
def test_assign_segmentation_targets_no_objects(self):
def graph_fn():
gt_masks_list = [tf.zeros((0, 5, 5))]
gt_classes_list = [tf.zeros((0, 10))]
cn_assigner = targetassigner.CenterNetMaskTargetAssigner(stride=1)
segmentation_target, _ = cn_assigner.assign_segmentation_targets(
gt_masks_list=gt_masks_list,
gt_classes_list=gt_classes_list,
mask_resize_method=targetassigner.ResizeMethod.NEAREST_NEIGHBOR)
return segmentation_target
segmentation_target = self.execute(graph_fn, [])
expected_seg_target = np.zeros((1, 5, 5, 10))
np.testing.assert_array_almost_equal(
expected_seg_target, segmentation_target)
class CenterNetDensePoseTargetAssignerTest(test_case.TestCase):
def test_assign_part_and_coordinate_targets(self):
def graph_fn():
gt_dp_num_points_list = [
# Example 0.
tf.constant([2, 0, 3], dtype=tf.int32),
# Example 1.
tf.constant([1, 1], dtype=tf.int32),
]
gt_dp_part_ids_list = [
# Example 0.
tf.constant([[1, 6, 0],
[0, 0, 0],
[0, 2, 3]], dtype=tf.int32),
# Example 1.
tf.constant([[7, 0, 0],
[0, 0, 0]], dtype=tf.int32),
]
gt_dp_surface_coords_list = [
# Example 0.
tf.constant(
[[[0.11, 0.2, 0.3, 0.4], # Box 0.
[0.6, 0.4, 0.1, 0.0],
[0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0], # Box 1.
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]],
[[0.22, 0.1, 0.6, 0.8], # Box 2.
[0.0, 0.4, 0.5, 1.0],
[0.3, 0.2, 0.4, 0.1]]],
dtype=tf.float32),
# Example 1.
tf.constant(
[[[0.5, 0.5, 0.3, 1.0], # Box 0.
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]],
[[0.2, 0.2, 0.5, 0.8], # Box 1.
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]]],
dtype=tf.float32),
]
gt_weights_list = [
# Example 0.
tf.constant([1.0, 1.0, 0.5], dtype=tf.float32),
# Example 1.
tf.constant([0.0, 1.0], dtype=tf.float32),
]
cn_assigner = targetassigner.CenterNetDensePoseTargetAssigner(stride=4)
batch_indices, batch_part_ids, batch_surface_coords, batch_weights = (
cn_assigner.assign_part_and_coordinate_targets(
height=120,
width=80,
gt_dp_num_points_list=gt_dp_num_points_list,
gt_dp_part_ids_list=gt_dp_part_ids_list,
gt_dp_surface_coords_list=gt_dp_surface_coords_list,
gt_weights_list=gt_weights_list))
return batch_indices, batch_part_ids, batch_surface_coords, batch_weights
batch_indices, batch_part_ids, batch_surface_coords, batch_weights = (
self.execute(graph_fn, []))
expected_batch_indices = np.array([
# Example 0. e.g.
# The first set of indices is calculated as follows:
# floor(0.11*120/4) = 3, floor(0.2*80/4) = 4.
[0, 3, 4, 1], [0, 18, 8, 6], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],
[0, 0, 0, 0], [0, 6, 2, 0], [0, 0, 8, 2], [0, 9, 4, 3],
# Example 1.
[1, 15, 10, 7], [1, 0, 0, 0], [1, 0, 0, 0], [1, 6, 4, 0], [1, 0, 0, 0],
[1, 0, 0, 0]
], dtype=np.int32)
expected_batch_part_ids = tf.one_hot(
[1, 6, 0, 0, 0, 0, 0, 2, 3, 7, 0, 0, 0, 0, 0], depth=24).numpy()
expected_batch_surface_coords = np.array([
# Box 0.
[0.3, 0.4], [0.1, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0],
[0.6, 0.8], [0.5, 1.0], [0.4, 0.1],
# Box 1.
[0.3, 1.0], [0.0, 0.0], [0.0, 0.0], [0.5, 0.8], [0.0, 0.0], [0.0, 0.0],
], np.float32)
expected_batch_weights = np.array([
# Box 0.
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5,
# Box 1.
0.0, 0.0, 0.0, 1.0, 0.0, 0.0
], dtype=np.float32)
self.assertAllEqual(expected_batch_indices, batch_indices)
self.assertAllEqual(expected_batch_part_ids, batch_part_ids)
self.assertAllClose(expected_batch_surface_coords, batch_surface_coords)
self.assertAllClose(expected_batch_weights, batch_weights)
class CenterNetTrackTargetAssignerTest(test_case.TestCase):
def setUp(self):
super(CenterNetTrackTargetAssignerTest, self).setUp()
self._box_center = [0.0, 0.0, 1.0, 1.0]
self._box_center_small = [0.25, 0.25, 0.75, 0.75]
self._box_lower_left = [0.5, 0.0, 1.0, 0.5]
self._box_center_offset = [0.1, 0.05, 1.0, 1.0]
self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625]
def test_assign_track_targets(self):
"""Test the assign_track_targets function."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_lower_left, self._box_center_small]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
track_id_batch = [
tf.constant([0, 1]),
tf.constant([1, 0]),
tf.constant([0, 2]),
]
assigner = targetassigner.CenterNetTrackTargetAssigner(
stride=4, num_track_ids=3)
(batch_indices, batch_weights,
track_targets) = assigner.assign_track_targets(
height=80,
width=80,
gt_track_ids_list=track_id_batch,
gt_boxes_list=box_batch)
return batch_indices, batch_weights, track_targets
indices, weights, track_ids = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (3, 2, 3))
self.assertEqual(track_ids.shape, (3, 2, 3))
self.assertEqual(weights.shape, (3, 2))
np.testing.assert_array_equal(indices,
[[[0, 10, 10], [0, 15, 5]],
[[1, 15, 5], [1, 10, 10]],
[[2, 10, 10], [2, 7, 11]]])
np.testing.assert_array_equal(track_ids,
[[[1, 0, 0], [0, 1, 0]],
[[0, 1, 0], [1, 0, 0]],
[[1, 0, 0], [0, 0, 1]]])
np.testing.assert_array_equal(weights, [[1, 1], [1, 1], [1, 1]])
def test_assign_track_targets_weights(self):
"""Test the assign_track_targets function with box weights."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_lower_left, self._box_center_small]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
track_id_batch = [
tf.constant([0, 1]),
tf.constant([1, 0]),
tf.constant([0, 2]),
]
weights_batch = [
tf.constant([0.0, 1.0]),
tf.constant([1.0, 1.0]),
tf.constant([0.0, 0.0])
]
assigner = targetassigner.CenterNetTrackTargetAssigner(
stride=4, num_track_ids=3)
(batch_indices, batch_weights,
track_targets) = assigner.assign_track_targets(
height=80,
width=80,
gt_track_ids_list=track_id_batch,
gt_boxes_list=box_batch,
gt_weights_list=weights_batch)
return batch_indices, batch_weights, track_targets
indices, weights, track_ids = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (3, 2, 3))
self.assertEqual(track_ids.shape, (3, 2, 3))
self.assertEqual(weights.shape, (3, 2))
np.testing.assert_array_equal(indices,
[[[0, 10, 10], [0, 15, 5]],
[[1, 15, 5], [1, 10, 10]],
[[2, 10, 10], [2, 7, 11]]])
np.testing.assert_array_equal(track_ids,
[[[1, 0, 0], [0, 1, 0]],
[[0, 1, 0], [1, 0, 0]],
[[1, 0, 0], [0, 0, 1]]])
np.testing.assert_array_equal(weights, [[0, 1], [1, 1], [0, 0]])
# TODO(xwwang): Add a test for the case when no objects are detected.
class CornerOffsetTargetAssignerTest(test_case.TestCase):
def test_filter_overlap_min_area_empty(self):
"""Test that empty masks work on CPU."""
def graph_fn(masks):
return targetassigner.filter_mask_overlap_min_area(masks)
masks = self.execute_cpu(graph_fn, [np.zeros((0, 5, 5), dtype=np.float32)])
self.assertEqual(masks.shape, (0, 5, 5))
def test_filter_overlap_min_area(self):
"""Test the object with min. area is selected instead of overlap."""
def graph_fn(masks):
return targetassigner.filter_mask_overlap_min_area(masks)
masks = np.zeros((3, 4, 4), dtype=np.float32)
masks[0, :2, :2] = 1.0
masks[1, :3, :3] = 1.0
masks[2, 3, 3] = 1.0
masks = self.execute(graph_fn, [masks])
self.assertAllClose(masks[0],
[[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
self.assertAllClose(masks[1],
[[0, 0, 1, 0],
[0, 0, 1, 0],
[1, 1, 1, 0],
[0, 0, 0, 0]])
self.assertAllClose(masks[2],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
def test_assign_corner_offset_single_object(self):
"""Test that corner offsets are correct with a single object."""
assigner = targetassigner.CenterNetCornerOffsetTargetAssigner(stride=1)
def graph_fn():
boxes = [
tf.constant([[0., 0., 1., 1.]])
]
mask = np.zeros((1, 4, 4), dtype=np.float32)
mask[0, 1:3, 1:3] = 1.0
masks = [tf.constant(mask)]
return assigner.assign_corner_offset_targets(boxes, masks)
corner_offsets, foreground = self.execute(graph_fn, [])
self.assertAllClose(foreground[0],
[[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]])
self.assertAllClose(corner_offsets[0, :, :, 0],
[[0, 0, 0, 0],
[0, -1, -1, 0],
[0, -2, -2, 0],
[0, 0, 0, 0]])
self.assertAllClose(corner_offsets[0, :, :, 1],
[[0, 0, 0, 0],
[0, -1, -2, 0],
[0, -1, -2, 0],
[0, 0, 0, 0]])
self.assertAllClose(corner_offsets[0, :, :, 2],
[[0, 0, 0, 0],
[0, 3, 3, 0],
[0, 2, 2, 0],
[0, 0, 0, 0]])
self.assertAllClose(corner_offsets[0, :, :, 3],
[[0, 0, 0, 0],
[0, 3, 2, 0],
[0, 3, 2, 0],
[0, 0, 0, 0]])
def test_assign_corner_offset_multiple_objects(self):
"""Test corner offsets are correct with multiple objects."""
assigner = targetassigner.CenterNetCornerOffsetTargetAssigner(stride=1)
def graph_fn():
boxes = [
tf.constant([[0., 0., 1., 1.], [0., 0., 0., 0.]]),
tf.constant([[0., 0., .25, .25], [.25, .25, 1., 1.]])
]
mask1 = np.zeros((2, 4, 4), dtype=np.float32)
mask1[0, 0, 0] = 1.0
mask1[0, 3, 3] = 1.0
mask2 = np.zeros((2, 4, 4), dtype=np.float32)
mask2[0, :2, :2] = 1.0
mask2[1, 1:, 1:] = 1.0
masks = [tf.constant(mask1), tf.constant(mask2)]
return assigner.assign_corner_offset_targets(boxes, masks)
corner_offsets, foreground = self.execute(graph_fn, [])
self.assertEqual(corner_offsets.shape, (2, 4, 4, 4))
self.assertEqual(foreground.shape, (2, 4, 4))
self.assertAllClose(foreground[0],
[[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
self.assertAllClose(corner_offsets[0, :, :, 0],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, -3]])
self.assertAllClose(corner_offsets[0, :, :, 1],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, -3]])
self.assertAllClose(corner_offsets[0, :, :, 2],
[[4, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
self.assertAllClose(corner_offsets[0, :, :, 3],
[[4, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
self.assertAllClose(foreground[1],
[[1, 1, 0, 0],
[1, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
self.assertAllClose(corner_offsets[1, :, :, 0],
[[0, 0, 0, 0],
[-1, -1, 0, 0],
[0, -1, -1, -1],
[0, -2, -2, -2]])
self.assertAllClose(corner_offsets[1, :, :, 1],
[[0, -1, 0, 0],
[0, -1, -1, -2],
[0, 0, -1, -2],
[0, 0, -1, -2]])
self.assertAllClose(corner_offsets[1, :, :, 2],
[[1, 1, 0, 0],
[0, 0, 3, 3],
[0, 2, 2, 2],
[0, 1, 1, 1]])
self.assertAllClose(corner_offsets[1, :, :, 3],
[[1, 0, 0, 0],
[1, 0, 2, 1],
[0, 3, 2, 1],
[0, 3, 2, 1]])
def test_assign_corner_offsets_no_objects(self):
"""Test assignment works with empty input on cpu."""
assigner = targetassigner.CenterNetCornerOffsetTargetAssigner(stride=1)
def graph_fn():
boxes = [
tf.zeros((0, 4), dtype=tf.float32)
]
masks = [tf.zeros((0, 5, 5), dtype=tf.float32)]
return assigner.assign_corner_offset_targets(boxes, masks)
corner_offsets, foreground = self.execute_cpu(graph_fn, [])
self.assertAllClose(corner_offsets, np.zeros((1, 5, 5, 4)))
self.assertAllClose(foreground, np.zeros((1, 5, 5)))
class CenterNetTemporalOffsetTargetAssigner(test_case.TestCase):
def setUp(self):
super(CenterNetTemporalOffsetTargetAssigner, self).setUp()
self._box_center = [0.0, 0.0, 1.0, 1.0]
self._box_center_small = [0.25, 0.25, 0.75, 0.75]
self._box_lower_left = [0.5, 0.0, 1.0, 0.5]
self._box_center_offset = [0.1, 0.05, 1.0, 1.0]
self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625]
self._offset_center = [0.5, 0.4]
self._offset_center_small = [0.1, 0.1]
self._offset_lower_left = [-0.1, 0.1]
self._offset_center_offset = [0.4, 0.3]
self._offset_odd_coord = [0.125, -0.125]
def test_assign_empty_groundtruths(self):
"""Tests the assign_offset_targets function with empty inputs."""
def graph_fn():
box_batch = [
tf.zeros((0, 4), dtype=tf.float32),
]
offset_batch = [
tf.zeros((0, 2), dtype=tf.float32),
]
match_flag_batch = [
tf.zeros((0), dtype=tf.float32),
]
assigner = targetassigner.CenterNetTemporalOffsetTargetAssigner(4)
indices, temporal_offset, weights = assigner.assign_temporal_offset_targets(
80, 80, box_batch, offset_batch, match_flag_batch)
return indices, temporal_offset, weights
indices, temporal_offset, weights = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (0, 3))
self.assertEqual(temporal_offset.shape, (0, 2))
self.assertEqual(weights.shape, (0,))
def test_assign_offset_targets(self):
"""Tests the assign_offset_targets function."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center_offset]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
offset_batch = [
tf.constant([self._offset_center, self._offset_lower_left]),
tf.constant([self._offset_center_offset]),
tf.constant([self._offset_center_small, self._offset_odd_coord]),
]
match_flag_batch = [
tf.constant([1.0, 1.0]),
tf.constant([1.0]),
tf.constant([1.0, 1.0]),
]
assigner = targetassigner.CenterNetTemporalOffsetTargetAssigner(4)
indices, temporal_offset, weights = assigner.assign_temporal_offset_targets(
80, 80, box_batch, offset_batch, match_flag_batch)
return indices, temporal_offset, weights
indices, temporal_offset, weights = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (5, 3))
self.assertEqual(temporal_offset.shape, (5, 2))
self.assertEqual(weights.shape, (5,))
np.testing.assert_array_equal(
indices,
[[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]])
np.testing.assert_array_almost_equal(
temporal_offset,
[[0.5, 0.4], [-0.1, 0.1], [0.4, 0.3], [0.1, 0.1], [0.125, -0.125]])
np.testing.assert_array_equal(weights, 1)
def test_assign_offset_targets_with_match_flags(self):
"""Tests the assign_offset_targets function with match flags."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center_offset]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
offset_batch = [
tf.constant([self._offset_center, self._offset_lower_left]),
tf.constant([self._offset_center_offset]),
tf.constant([self._offset_center_small, self._offset_odd_coord]),
]
match_flag_batch = [
tf.constant([0.0, 1.0]),
tf.constant([1.0]),
tf.constant([1.0, 1.0]),
]
cn_assigner = targetassigner.CenterNetTemporalOffsetTargetAssigner(4)
weights_batch = [
tf.constant([1.0, 0.0]),
tf.constant([1.0]),
tf.constant([1.0, 1.0])
]
indices, temporal_offset, weights = cn_assigner.assign_temporal_offset_targets(
80, 80, box_batch, offset_batch, match_flag_batch, weights_batch)
return indices, temporal_offset, weights
indices, temporal_offset, weights = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (5, 3))
self.assertEqual(temporal_offset.shape, (5, 2))
self.assertEqual(weights.shape, (5,))
np.testing.assert_array_equal(
indices,
[[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]])
np.testing.assert_array_almost_equal(
temporal_offset,
[[0.5, 0.4], [-0.1, 0.1], [0.4, 0.3], [0.1, 0.1], [0.125, -0.125]])
np.testing.assert_array_equal(weights, [0, 0, 1, 1, 1])
class DETRTargetAssignerTest(test_case.TestCase):
def test_assign_detr(self):
def graph_fn(pred_corners, groundtruth_box_corners,
groundtruth_labels, predicted_labels):
detr_target_assigner = targetassigner.DETRTargetAssigner()
pred_boxlist = box_list.BoxList(pred_corners)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = detr_target_assigner.assign(
pred_boxlist, groundtruth_boxlist,
predicted_labels, groundtruth_labels)
(cls_targets, cls_weights, reg_targets, reg_weights) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
pred_corners = np.array([[0.25, 0.25, 0.4, 0.2],
[0.5, 0.8, 1.0, 0.8],
[0.9, 0.5, 0.1, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]],
dtype=np.float32)
predicted_labels = np.array([[-3.0, 3.0], [2.0, 9.4], [5.0, 1.0]],
dtype=np.float32)
groundtruth_labels = np.array([[0.0, 1.0], [0.0, 1.0]],
dtype=np.float32)
exp_cls_targets = [[0, 1], [0, 1], [1, 0]]
exp_cls_weights = [[1, 1], [1, 1], [1, 1]]
exp_reg_targets = [[0.25, 0.25, 0.5, 0.5],
[0.7, 0.7, 0.4, 0.4],
[0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute_cpu(
graph_fn, [pred_corners, groundtruth_box_corners,
groundtruth_labels, predicted_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_batch_assign_detr(self):
def graph_fn(pred_corners, groundtruth_box_corners,
groundtruth_labels, predicted_labels):
detr_target_assigner = targetassigner.DETRTargetAssigner()
result = detr_target_assigner.batch_assign(
pred_corners, groundtruth_box_corners,
[predicted_labels], [groundtruth_labels])
(cls_targets, cls_weights, reg_targets, reg_weights) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
pred_corners = np.array([[[0.25, 0.25, 0.4, 0.2],
[0.5, 0.8, 1.0, 0.8],
[0.9, 0.5, 0.1, 1.0]]], dtype=np.float32)
groundtruth_box_corners = np.array([[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]]],
dtype=np.float32)
predicted_labels = np.array([[-3.0, 3.0], [2.0, 9.4], [5.0, 1.0]],
dtype=np.float32)
groundtruth_labels = np.array([[0.0, 1.0], [0.0, 1.0]],
dtype=np.float32)
exp_cls_targets = [[[0, 1], [0, 1], [1, 0]]]
exp_cls_weights = [[[1, 1], [1, 1], [1, 1]]]
exp_reg_targets = [[[0.25, 0.25, 0.5, 0.5],
[0.7, 0.7, 0.4, 0.4],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 1, 0]]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute_cpu(
graph_fn, [pred_corners, groundtruth_box_corners,
groundtruth_labels, predicted_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/target_assigner_test.py | target_assigner_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base box coder.
Box coders convert between coordinate frames, namely image-centric
(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
defined by a specific anchor).
Users of a BoxCoder can call two methods:
encode: which encodes a box with respect to a given anchor
(or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
decode: which inverts this encoding with a decode operation.
In both cases, the arguments are assumed to be in 1-1 correspondence already;
it is not the job of a BoxCoder to perform matching.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import six
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
# Box coder types.
FASTER_RCNN = 'faster_rcnn'
KEYPOINT = 'keypoint'
MEAN_STDDEV = 'mean_stddev'
SQUARE = 'square'
class BoxCoder(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for box coder."""
@abstractproperty
def code_size(self):
"""Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant
"""
pass
def encode(self, boxes, anchors):
"""Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
with tf.name_scope('Encode'):
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
"""Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
with tf.name_scope('Decode'):
return self._decode(rel_codes, anchors)
@abstractmethod
def _encode(self, boxes, anchors):
"""Method to be overriden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
pass
@abstractmethod
def _decode(self, rel_codes, anchors):
"""Method to be overriden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
pass
def batch_decode(encoded_boxes, box_coder, anchors):
"""Decode a batch of encoded boxes.
This op takes a batch of encoded bounding boxes and transforms
them to a batch of bounding boxes specified by their corners in
the order of [y_min, x_min, y_max, x_max].
Args:
encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
code_size] representing the location of the objects.
box_coder: a BoxCoder object.
anchors: a BoxList of anchors used to encode `encoded_boxes`.
Returns:
decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
coder_size] representing the corners of the objects in the order
of [y_min, x_min, y_max, x_max].
Raises:
ValueError: if batch sizes of the inputs are inconsistent, or if
the number of anchors inferred from encoded_boxes and anchors are
inconsistent.
"""
encoded_boxes.get_shape().assert_has_rank(3)
if (shape_utils.get_dim_as_int(encoded_boxes.get_shape()[1])
!= anchors.num_boxes_static()):
raise ValueError('The number of anchors inferred from encoded_boxes'
' and anchors are inconsistent: shape[1] of encoded_boxes'
' %s should be equal to the number of anchors: %s.' %
(shape_utils.get_dim_as_int(encoded_boxes.get_shape()[1]),
anchors.num_boxes_static()))
decoded_boxes = tf.stack([
box_coder.decode(boxes, anchors).get()
for boxes in tf.unstack(encoded_boxes)
])
return decoded_boxes
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/box_coder.py | box_coder.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.box_coder."""
import tensorflow.compat.v1 as tf
from object_detection.core import box_coder
from object_detection.core import box_list
from object_detection.utils import test_case
class MockBoxCoder(box_coder.BoxCoder):
"""Test BoxCoder that encodes/decodes using the multiply-by-two function."""
def code_size(self):
return 4
def _encode(self, boxes, anchors):
return 2.0 * boxes.get()
def _decode(self, rel_codes, anchors):
return box_list.BoxList(rel_codes / 2.0)
class BoxCoderTest(test_case.TestCase):
def test_batch_decode(self):
expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]],
[[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]]
def graph_fn():
mock_anchor_corners = tf.constant(
[[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32)
mock_anchors = box_list.BoxList(mock_anchor_corners)
mock_box_coder = MockBoxCoder()
encoded_boxes_list = [mock_box_coder.encode(
box_list.BoxList(tf.constant(boxes)), mock_anchors)
for boxes in expected_boxes]
encoded_boxes = tf.stack(encoded_boxes_list)
decoded_boxes = box_coder.batch_decode(
encoded_boxes, mock_box_coder, mock_anchors)
return decoded_boxes
decoded_boxes_result = self.execute(graph_fn, [])
self.assertAllClose(expected_boxes, decoded_boxes_result)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/box_coder_test.py | box_coder_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List operations.
Example box operations that are supported:
* areas: compute bounding box areas
* iou: pairwise intersection-over-union scores
* sq_dist: pairwise distances between bounding boxes
Whenever box_list_ops functions output a BoxList, the fields of the incoming
BoxList are retained unless documented otherwise.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.core import box_list
from object_detection.utils import ops
from object_detection.utils import shape_utils
class SortOrder(object):
"""Enum class for sort order.
Attributes:
ascend: ascend order.
descend: descend order.
"""
ascend = 1
descend = 2
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, 'Area'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def height_width(boxlist, scope=None):
"""Computes height and width of boxes in boxlist.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
Height: A tensor with shape [N] representing box heights.
Width: A tensor with shape [N] representing box widths.
"""
with tf.name_scope(scope, 'HeightWidth'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1])
def scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def scale_height_width(boxlist, y_scale, x_scale, scope=None):
"""Scale the height and width of boxes, leaving centers unchanged.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope(scope, 'ScaleHeightWidth'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
yc, xc, height_orig, width_orig = boxlist.get_center_coordinates_and_sizes()
y_min = yc - 0.5 * y_scale * height_orig
y_max = yc + 0.5 * y_scale * height_orig
x_min = xc - 0.5 * x_scale * width_orig
x_max = xc + 0.5 * x_scale * width_orig
scaled_boxlist = box_list.BoxList(
tf.stack([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None):
"""Clip bounding boxes to a window.
This op clips any input bounding boxes (represented by bounding box
corners) to a window, optionally filtering out boxes that do not
overlap at all with the window.
Args:
boxlist: BoxList holding M_in boxes
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window to which the op should clip boxes.
filter_nonoverlapping: whether to filter out boxes that do not overlap at
all with the window.
scope: name scope.
Returns:
a BoxList holding M_out boxes where M_out <= M_in
"""
with tf.name_scope(scope, 'ClipToWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min = window[0]
win_x_min = window[1]
win_y_max = window[2]
win_x_max = window[3]
y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min)
y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min)
x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min)
x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min)
clipped = box_list.BoxList(
tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped],
1))
clipped = _copy_extra_fields(clipped, boxlist)
if filter_nonoverlapping:
areas = area(clipped)
nonzero_area_indices = tf.cast(
tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32)
clipped = gather(clipped, nonzero_area_indices)
return clipped
def prune_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also clip_to_window which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def prune_completely_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall completely outside of the given window.
The function clip_to_window prunes bounding boxes that fall
completely outside the window, but also clips any bounding boxes that
partially overflow. This function does not clip partially overflowing boxes.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
the window.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, 'Intersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def matched_intersection(boxlist1, boxlist2, scope=None):
"""Compute intersection areas between corresponding boxes in two boxlists.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing pairwise intersections
"""
with tf.name_scope(scope, 'MatchedIntersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
min_ymax = tf.minimum(y_max1, y_max2)
max_ymin = tf.maximum(y_min1, y_min2)
intersect_heights = tf.maximum(0.0, min_ymax - max_ymin)
min_xmax = tf.minimum(x_max1, x_max2)
max_xmin = tf.maximum(x_min1, x_min2)
intersect_widths = tf.maximum(0.0, min_xmax - max_xmin)
return tf.reshape(intersect_heights * intersect_widths, [-1])
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
def l1(boxlist1, boxlist2, scope=None):
"""Computes l1 loss (pairwise) between two boxlists.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing the pairwise L1 loss.
"""
with tf.name_scope(scope, 'PairwiseL1'):
ycenter1, xcenter1, h1, w1 = boxlist1.get_center_coordinates_and_sizes()
ycenter2, xcenter2, h2, w2 = boxlist2.get_center_coordinates_and_sizes()
ycenters = tf.abs(tf.expand_dims(ycenter2, axis=0) - tf.expand_dims(
tf.transpose(ycenter1), axis=1))
xcenters = tf.abs(tf.expand_dims(xcenter2, axis=0) - tf.expand_dims(
tf.transpose(xcenter1), axis=1))
heights = tf.abs(tf.expand_dims(h2, axis=0) - tf.expand_dims(
tf.transpose(h1), axis=1))
widths = tf.abs(tf.expand_dims(w2, axis=0) - tf.expand_dims(
tf.transpose(w1), axis=1))
return ycenters + xcenters + heights + widths
def giou(boxlist1, boxlist2, scope=None):
"""Computes pairwise generalized IOU between two boxlists.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing the pairwise GIoU loss.
"""
with tf.name_scope(scope, 'PairwiseGIoU'):
n = boxlist1.num_boxes()
m = boxlist2.num_boxes()
boxes1 = tf.repeat(boxlist1.get(), repeats=m, axis=0)
boxes2 = tf.tile(boxlist2.get(), multiples=[n, 1])
return tf.reshape(ops.giou(boxes1, boxes2), [n, m])
def matched_iou(boxlist1, boxlist2, scope=None):
"""Compute intersection-over-union between corresponding boxes in boxlists.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'MatchedIOU'):
intersections = matched_intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = areas1 + areas2 - intersections
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
def ioa(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-area between box collections.
intersection-over-area (IOA) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, ioa(box1, box2) != ioa(box2, box1).
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise ioa scores.
"""
with tf.name_scope(scope, 'IOA'):
intersections = intersection(boxlist1, boxlist2)
areas = tf.expand_dims(area(boxlist2), 0)
return tf.truediv(intersections, areas)
def prune_non_overlapping_boxes(
boxlist1, boxlist2, min_overlap=0.0, scope=None):
"""Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2.
For each box in boxlist1, we want its IOA to be more than minoverlap with
at least one of the boxes in boxlist2. If it does not, we remove it.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
min_overlap: Minimum required overlap between boxes, to count them as
overlapping.
scope: name scope.
Returns:
new_boxlist1: A pruned boxlist with size [N', 4].
keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the
first input BoxList `boxlist1`.
"""
with tf.name_scope(scope, 'PruneNonOverlappingBoxes'):
ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor
ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor
keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap))
keep_inds = tf.squeeze(tf.where(keep_bool), axis=[1])
new_boxlist1 = gather(boxlist1, keep_inds)
return new_boxlist1, keep_inds
def prune_small_boxes(boxlist, min_side, scope=None):
"""Prunes small boxes in the boxlist which have a side smaller than min_side.
Args:
boxlist: BoxList holding N boxes.
min_side: Minimum width AND height of box to survive pruning.
scope: name scope.
Returns:
A pruned boxlist.
"""
with tf.name_scope(scope, 'PruneSmallBoxes'):
height, width = height_width(boxlist)
is_valid = tf.logical_and(tf.greater_equal(width, min_side),
tf.greater_equal(height, min_side))
return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
def change_coordinate_frame(boxlist, window, scope=None):
"""Change coordinate frame of the boxlist to be relative to window's frame.
Given a window of the form [ymin, xmin, ymax, xmax],
changes bounding box coordinates from boxlist to be relative to this window
(e.g., the min corner maps to (0,0) and the max corner maps to (1,1)).
An example use case is data augmentation: where we are given groundtruth
boxes (boxlist) and would like to randomly crop the image to some
window (window). In this case we need to change the coordinate frame of
each groundtruth box to be relative to this new window.
Args:
boxlist: A BoxList object holding N boxes.
window: A rank 1 tensor [4].
scope: name scope.
Returns:
Returns a BoxList object with N boxes.
"""
with tf.name_scope(scope, 'ChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
boxlist_new = scale(box_list.BoxList(
boxlist.get() - [window[0], window[1], window[0], window[1]]),
1.0 / win_height, 1.0 / win_width)
boxlist_new = _copy_extra_fields(boxlist_new, boxlist)
return boxlist_new
def sq_dist(boxlist1, boxlist2, scope=None):
"""Computes the pairwise squared distances between box corners.
This op treats each box as if it were a point in a 4d Euclidean space and
computes pairwise squared distances.
Mathematically, we are given two matrices of box coordinates X and Y,
where X(i,:) is the i'th row of X, containing the 4 numbers defining the
corners of the i'th box in boxlist1. Similarly Y(j,:) corresponds to
boxlist2. We compute
Z(i,j) = ||X(i,:) - Y(j,:)||^2
= ||X(i,:)||^2 + ||Y(j,:)||^2 - 2 X(i,:)' * Y(j,:),
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise distances
"""
with tf.name_scope(scope, 'SqDist'):
sqnorm1 = tf.reduce_sum(tf.square(boxlist1.get()), 1, keep_dims=True)
sqnorm2 = tf.reduce_sum(tf.square(boxlist2.get()), 1, keep_dims=True)
innerprod = tf.matmul(boxlist1.get(), boxlist2.get(),
transpose_a=False, transpose_b=True)
return sqnorm1 + tf.transpose(sqnorm2) - 2.0 * innerprod
def boolean_mask(boxlist, indicator, fields=None, scope=None,
use_static_shapes=False, indicator_sum=None):
"""Select boxes from BoxList according to indicator and return new BoxList.
`boolean_mask` returns the subset of boxes that are marked as "True" by the
indicator tensor. By default, `boolean_mask` returns boxes corresponding to
the input index list, as well as all additional fields stored in the boxlist
(indexing into the first dimension). However one can optionally only draw
from a subset of fields.
Args:
boxlist: BoxList holding N boxes
indicator: a rank-1 boolean tensor
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
scope: name scope.
use_static_shapes: Whether to use an implementation with static shape
gurantees.
indicator_sum: An integer containing the sum of `indicator` vector. Only
required if `use_static_shape` is True.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indicator
Raises:
ValueError: if `indicator` is not a rank-1 boolean tensor.
"""
with tf.name_scope(scope, 'BooleanMask'):
if indicator.shape.ndims != 1:
raise ValueError('indicator should have rank 1')
if indicator.dtype != tf.bool:
raise ValueError('indicator should be a boolean tensor')
if use_static_shapes:
if not (indicator_sum and isinstance(indicator_sum, int)):
raise ValueError('`indicator_sum` must be a of type int')
selected_positions = tf.cast(indicator, dtype=tf.float32)
indexed_positions = tf.cast(
tf.multiply(
tf.cumsum(selected_positions), selected_positions),
dtype=tf.int32)
one_hot_selector = tf.one_hot(
indexed_positions - 1, indicator_sum, dtype=tf.float32)
sampled_indices = tf.cast(
tf.tensordot(
tf.cast(tf.range(tf.shape(indicator)[0]), dtype=tf.float32),
one_hot_selector,
axes=[0, 0]),
dtype=tf.int32)
return gather(boxlist, sampled_indices, use_static_shapes=True)
else:
subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator))
if fields is None:
fields = boxlist.get_extra_fields()
for field in fields:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator)
subboxlist.add_field(field, subfieldlist)
return subboxlist
def gather(boxlist, indices, fields=None, scope=None, use_static_shapes=False):
"""Gather boxes from BoxList according to indices and return new BoxList.
By default, `gather` returns boxes corresponding to the input index list, as
well as all additional fields stored in the boxlist (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
boxlist: BoxList holding N boxes
indices: a rank-1 tensor of type int32 / int64
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
scope: name scope.
use_static_shapes: Whether to use an implementation with static shape
gurantees.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indices
Raises:
ValueError: if specified field is not contained in boxlist or if the
indices are not of type int32
"""
with tf.name_scope(scope, 'Gather'):
if len(indices.shape.as_list()) != 1:
raise ValueError('indices should have rank 1')
if indices.dtype != tf.int32 and indices.dtype != tf.int64:
raise ValueError('indices should be an int32 / int64 tensor')
gather_op = tf.gather
if use_static_shapes:
gather_op = ops.matmul_gather_on_zeroth_axis
subboxlist = box_list.BoxList(gather_op(boxlist.get(), indices))
if fields is None:
fields = boxlist.get_extra_fields()
fields += ['boxes']
for field in fields:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = gather_op(boxlist.get_field(field), indices)
subboxlist.add_field(field, subfieldlist)
return subboxlist
def concatenate(boxlists, fields=None, scope=None):
"""Concatenate list of BoxLists.
This op concatenates a list of input BoxLists into a larger BoxList. It also
handles concatenation of BoxList fields as long as the field tensor shapes
are equal except for the first dimension.
Args:
boxlists: list of BoxList objects
fields: optional list of fields to also concatenate. By default, all
fields from the first BoxList in the list are included in the
concatenation.
scope: name scope.
Returns:
a BoxList with number of boxes equal to
sum([boxlist.num_boxes() for boxlist in BoxList])
Raises:
ValueError: if boxlists is invalid (i.e., is not a list, is empty, or
contains non BoxList objects), or if requested fields are not contained in
all boxlists
"""
with tf.name_scope(scope, 'Concatenate'):
if not isinstance(boxlists, list):
raise ValueError('boxlists should be a list')
if not boxlists:
raise ValueError('boxlists should have nonzero length')
for boxlist in boxlists:
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('all elements of boxlists should be BoxList objects')
concatenated = box_list.BoxList(
tf.concat([boxlist.get() for boxlist in boxlists], 0))
if fields is None:
fields = boxlists[0].get_extra_fields()
for field in fields:
first_field_shape = boxlists[0].get_field(field).get_shape().as_list()
first_field_shape[0] = -1
if None in first_field_shape:
raise ValueError('field %s must have fully defined shape except for the'
' 0th dimension.' % field)
for boxlist in boxlists:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all requested fields')
field_shape = boxlist.get_field(field).get_shape().as_list()
field_shape[0] = -1
if field_shape != first_field_shape:
raise ValueError('field %s must have same shape for all boxlists '
'except for the 0th dimension.' % field)
concatenated_field = tf.concat(
[boxlist.get_field(field) for boxlist in boxlists], 0)
concatenated.add_field(field, concatenated_field)
return concatenated
def sort_by_field(boxlist, field, order=SortOrder.descend, scope=None):
"""Sort boxes and associated fields according to a scalar field.
A common use case is reordering the boxes according to descending scores.
Args:
boxlist: BoxList holding N boxes.
field: A BoxList field for sorting and reordering the BoxList.
order: (Optional) descend or ascend. Default is descend.
scope: name scope.
Returns:
sorted_boxlist: A sorted BoxList with the field in the specified order.
Raises:
ValueError: if specified field does not exist
ValueError: if the order is not either descend or ascend
"""
with tf.name_scope(scope, 'SortByField'):
if order != SortOrder.descend and order != SortOrder.ascend:
raise ValueError('Invalid sort order')
field_to_sort = boxlist.get_field(field)
if len(field_to_sort.shape.as_list()) != 1:
raise ValueError('Field should have rank 1')
num_boxes = boxlist.num_boxes()
num_entries = tf.size(field_to_sort)
length_assert = tf.Assert(
tf.equal(num_boxes, num_entries),
['Incorrect field size: actual vs expected.', num_entries, num_boxes])
with tf.control_dependencies([length_assert]):
_, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True)
if order == SortOrder.ascend:
sorted_indices = tf.reverse_v2(sorted_indices, [0])
return gather(boxlist, sorted_indices)
def visualize_boxes_in_image(image, boxlist, normalized=False, scope=None):
"""Overlay bounding box list on image.
Currently this visualization plots a 1 pixel thick red bounding box on top
of the image. Note that tf.image.draw_bounding_boxes essentially is
1 indexed.
Args:
image: an image tensor with shape [height, width, 3]
boxlist: a BoxList
normalized: (boolean) specify whether corners are to be interpreted
as absolute coordinates in image space or normalized with respect to the
image size.
scope: name scope.
Returns:
image_and_boxes: an image tensor with shape [height, width, 3]
"""
with tf.name_scope(scope, 'VisualizeBoxesInImage'):
if not normalized:
height, width, _ = tf.unstack(tf.shape(image))
boxlist = scale(boxlist,
1.0 / tf.cast(height, tf.float32),
1.0 / tf.cast(width, tf.float32))
corners = tf.expand_dims(boxlist.get(), 0)
image = tf.expand_dims(image, 0)
return tf.squeeze(tf.image.draw_bounding_boxes(image, corners), [0])
def filter_field_value_equals(boxlist, field, value, scope=None):
"""Filter to keep only boxes with field entries equal to the given value.
Args:
boxlist: BoxList holding N boxes.
field: field name for filtering.
value: scalar value.
scope: name scope.
Returns:
a BoxList holding M boxes where M <= N
Raises:
ValueError: if boxlist not a BoxList object or if it does not have
the specified field.
"""
with tf.name_scope(scope, 'FilterFieldValueEquals'):
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field(field):
raise ValueError('boxlist must contain the specified field')
filter_field = boxlist.get_field(field)
gather_index = tf.reshape(tf.where(tf.equal(filter_field, value)), [-1])
return gather(boxlist, gather_index)
def filter_greater_than(boxlist, thresh, scope=None):
"""Filter to keep only boxes with score exceeding a given threshold.
This op keeps the collection of boxes whose corresponding scores are
greater than the input threshold.
TODO(jonathanhuang): Change function name to filter_scores_greater_than
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
scope: name scope.
Returns:
a BoxList holding M boxes where M <= N
Raises:
ValueError: if boxlist not a BoxList object or if it does not
have a scores field
"""
with tf.name_scope(scope, 'FilterGreaterThan'):
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
scores = boxlist.get_field('scores')
if len(scores.shape.as_list()) > 2:
raise ValueError('Scores should have rank 1 or 2')
if len(scores.shape.as_list()) == 2 and scores.shape.as_list()[1] != 1:
raise ValueError('Scores should have rank 1 or have shape '
'consistent with [None, 1]')
high_score_indices = tf.cast(tf.reshape(
tf.where(tf.greater(scores, thresh)),
[-1]), tf.int32)
return gather(boxlist, high_score_indices)
def non_max_suppression(boxlist, thresh, max_output_size, scope=None):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. Note that this only works for a single class ---
to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
max_output_size: maximum number of retained boxes
scope: name scope.
Returns:
a BoxList holding M boxes where M <= max_output_size
Raises:
ValueError: if thresh is not in [0, 1]
"""
with tf.name_scope(scope, 'NonMaxSuppression'):
if not 0 <= thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
selected_indices = tf.image.non_max_suppression(
boxlist.get(), boxlist.get_field('scores'),
max_output_size, iou_threshold=thresh)
return gather(boxlist, selected_indices)
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def to_normalized_coordinates(boxlist, height, width,
check_range=True, scope=None):
"""Converts absolute box coordinates to normalized coordinates in [0, 1].
Usually one uses the dynamic shape of the image or conv-layer tensor:
boxlist = box_list_ops.to_normalized_coordinates(boxlist,
tf.shape(images)[1],
tf.shape(images)[2]),
This function raises an assertion failed error at graph execution time when
the maximum coordinate is smaller than 1.01 (which means that coordinates are
already normalized). The value 1.01 is to deal with small rounding errors.
Args:
boxlist: BoxList with coordinates in terms of pixel-locations.
height: Maximum value for height of absolute box coordinates.
width: Maximum value for width of absolute box coordinates.
check_range: If True, checks if the coordinates are normalized or not.
scope: name scope.
Returns:
boxlist with normalized coordinates in [0, 1].
"""
with tf.name_scope(scope, 'ToNormalizedCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
if check_range:
max_val = tf.reduce_max(boxlist.get())
max_assert = tf.Assert(tf.greater(max_val, 1.01),
['max value is lower than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(boxlist, 1 / height, 1 / width)
def to_absolute_coordinates(boxlist,
height,
width,
check_range=True,
maximum_normalized_coordinate=1.1,
scope=None):
"""Converts normalized box coordinates to absolute pixel coordinates.
This function raises an assertion failed error when the maximum box coordinate
value is larger than maximum_normalized_coordinate (in which case coordinates
are already absolute).
Args:
boxlist: BoxList with coordinates in range [0, 1].
height: Maximum value for height of absolute box coordinates.
width: Maximum value for width of absolute box coordinates.
check_range: If True, checks if the coordinates are normalized or not.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
scope: name scope.
Returns:
boxlist with absolute coordinates in terms of the image size.
"""
with tf.name_scope(scope, 'ToAbsoluteCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Ensure range of input boxes is correct.
if check_range:
box_maximum = tf.reduce_max(boxlist.get())
max_assert = tf.Assert(
tf.greater_equal(maximum_normalized_coordinate, box_maximum),
['maximum box coordinate value is larger '
'than %f: ' % maximum_normalized_coordinate, box_maximum])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(boxlist, height, width)
def refine_boxes_multi_class(pool_boxes,
num_classes,
nms_iou_thresh,
nms_max_detections,
voting_iou_thresh=0.5):
"""Refines a pool of boxes using non max suppression and box voting.
Box refinement is done independently for each class.
Args:
pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must
have a rank 1 'scores' field and a rank 1 'classes' field.
num_classes: (int scalar) Number of classes.
nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS).
nms_max_detections: (int scalar) maximum output size for NMS.
voting_iou_thresh: (float scalar) iou threshold for box voting.
Returns:
BoxList of refined boxes.
Raises:
ValueError: if
a) nms_iou_thresh or voting_iou_thresh is not in [0, 1].
b) pool_boxes is not a BoxList.
c) pool_boxes does not have a scores and classes field.
"""
if not 0.0 <= nms_iou_thresh <= 1.0:
raise ValueError('nms_iou_thresh must be between 0 and 1')
if not 0.0 <= voting_iou_thresh <= 1.0:
raise ValueError('voting_iou_thresh must be between 0 and 1')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
if not pool_boxes.has_field('classes'):
raise ValueError('pool_boxes must have a \'classes\' field')
refined_boxes = []
for i in range(num_classes):
boxes_class = filter_field_value_equals(pool_boxes, 'classes', i)
refined_boxes_class = refine_boxes(boxes_class, nms_iou_thresh,
nms_max_detections, voting_iou_thresh)
refined_boxes.append(refined_boxes_class)
return sort_by_field(concatenate(refined_boxes), 'scores')
def refine_boxes(pool_boxes,
nms_iou_thresh,
nms_max_detections,
voting_iou_thresh=0.5):
"""Refines a pool of boxes using non max suppression and box voting.
Args:
pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must
have a rank 1 'scores' field.
nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS).
nms_max_detections: (int scalar) maximum output size for NMS.
voting_iou_thresh: (float scalar) iou threshold for box voting.
Returns:
BoxList of refined boxes.
Raises:
ValueError: if
a) nms_iou_thresh or voting_iou_thresh is not in [0, 1].
b) pool_boxes is not a BoxList.
c) pool_boxes does not have a scores field.
"""
if not 0.0 <= nms_iou_thresh <= 1.0:
raise ValueError('nms_iou_thresh must be between 0 and 1')
if not 0.0 <= voting_iou_thresh <= 1.0:
raise ValueError('voting_iou_thresh must be between 0 and 1')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
nms_boxes = non_max_suppression(
pool_boxes, nms_iou_thresh, nms_max_detections)
return box_voting(nms_boxes, pool_boxes, voting_iou_thresh)
def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5):
"""Performs box voting as described in S. Gidaris and N. Komodakis, ICCV 2015.
Performs box voting as described in 'Object detection via a multi-region &
semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For
each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes
with iou overlap >= iou_thresh. The location of B is set to the weighted
average location of boxes in S (scores are used for weighting). And the score
of B is set to the average score of boxes in S.
Args:
selected_boxes: BoxList containing a subset of boxes in pool_boxes. These
boxes are usually selected from pool_boxes using non max suppression.
pool_boxes: BoxList containing a set of (possibly redundant) boxes.
iou_thresh: (float scalar) iou threshold for matching boxes in
selected_boxes and pool_boxes.
Returns:
BoxList containing averaged locations and scores for each box in
selected_boxes.
Raises:
ValueError: if
a) selected_boxes or pool_boxes is not a BoxList.
b) if iou_thresh is not in [0, 1].
c) pool_boxes does not have a scores field.
"""
if not 0.0 <= iou_thresh <= 1.0:
raise ValueError('iou_thresh must be between 0 and 1')
if not isinstance(selected_boxes, box_list.BoxList):
raise ValueError('selected_boxes must be a BoxList')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
iou_ = iou(selected_boxes, pool_boxes)
match_indicator = tf.cast(tf.greater(iou_, iou_thresh), dtype=tf.float32)
num_matches = tf.reduce_sum(match_indicator, 1)
# TODO(kbanoop): Handle the case where some boxes in selected_boxes do not
# match to any boxes in pool_boxes. For such boxes without any matches, we
# should return the original boxes without voting.
match_assert = tf.Assert(
tf.reduce_all(tf.greater(num_matches, 0)),
['Each box in selected_boxes must match with at least one box '
'in pool_boxes.'])
scores = tf.expand_dims(pool_boxes.get_field('scores'), 1)
scores_assert = tf.Assert(
tf.reduce_all(tf.greater_equal(scores, 0)),
['Scores must be non negative.'])
with tf.control_dependencies([scores_assert, match_assert]):
sum_scores = tf.matmul(match_indicator, scores)
averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches
box_locations = tf.matmul(match_indicator,
pool_boxes.get() * scores) / sum_scores
averaged_boxes = box_list.BoxList(box_locations)
_copy_extra_fields(averaged_boxes, selected_boxes)
averaged_boxes.add_field('scores', averaged_scores)
return averaged_boxes
def pad_or_clip_box_list(boxlist, num_boxes, scope=None):
"""Pads or clips all fields of a BoxList.
Args:
boxlist: A BoxList with arbitrary of number of boxes.
num_boxes: First num_boxes in boxlist are kept.
The fields are zero-padded if num_boxes is bigger than the
actual number of boxes.
scope: name scope.
Returns:
BoxList with all fields padded or clipped.
"""
with tf.name_scope(scope, 'PadOrClipBoxList'):
subboxlist = box_list.BoxList(shape_utils.pad_or_clip_tensor(
boxlist.get(), num_boxes))
for field in boxlist.get_extra_fields():
subfield = shape_utils.pad_or_clip_tensor(
boxlist.get_field(field), num_boxes)
subboxlist.add_field(field, subfield)
return subboxlist
def select_random_box(boxlist,
default_box=None,
seed=None,
scope=None):
"""Selects a random bounding box from a `BoxList`.
Args:
boxlist: A BoxList.
default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,
this default box will be returned. If None, will use a default box of
[[-1., -1., -1., -1.]].
seed: Random seed.
scope: Name scope.
Returns:
bbox: A [1, 4] tensor with a random bounding box.
valid: A bool tensor indicating whether a valid bounding box is returned
(True) or whether the default box is returned (False).
"""
with tf.name_scope(scope, 'SelectRandomBox'):
bboxes = boxlist.get()
combined_shape = shape_utils.combined_static_and_dynamic_shape(bboxes)
number_of_boxes = combined_shape[0]
default_box = default_box or tf.constant([[-1., -1., -1., -1.]])
def select_box():
random_index = tf.random_uniform([],
maxval=number_of_boxes,
dtype=tf.int32,
seed=seed)
return tf.expand_dims(bboxes[random_index], axis=0), tf.constant(True)
return tf.cond(
tf.greater_equal(number_of_boxes, 1),
true_fn=select_box,
false_fn=lambda: (default_box, tf.constant(False)))
def get_minimal_coverage_box(boxlist,
default_box=None,
scope=None):
"""Creates a single bounding box which covers all boxes in the boxlist.
Args:
boxlist: A Boxlist.
default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,
this default box will be returned. If None, will use a default box of
[[0., 0., 1., 1.]].
scope: Name scope.
Returns:
A [1, 4] float32 tensor with a bounding box that tightly covers all the
boxes in the box list. If the boxlist does not contain any boxes, the
default box is returned.
"""
with tf.name_scope(scope, 'CreateCoverageBox'):
num_boxes = boxlist.num_boxes()
def coverage_box(bboxes):
y_min, x_min, y_max, x_max = tf.split(
value=bboxes, num_or_size_splits=4, axis=1)
y_min_coverage = tf.reduce_min(y_min, axis=0)
x_min_coverage = tf.reduce_min(x_min, axis=0)
y_max_coverage = tf.reduce_max(y_max, axis=0)
x_max_coverage = tf.reduce_max(x_max, axis=0)
return tf.stack(
[y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage],
axis=1)
default_box = default_box or tf.constant([[0., 0., 1., 1.]])
return tf.cond(
tf.greater_equal(num_boxes, 1),
true_fn=lambda: coverage_box(boxlist.get()),
false_fn=lambda: default_box)
def sample_boxes_by_jittering(boxlist,
num_boxes_to_sample,
stddev=0.1,
scope=None):
"""Samples num_boxes_to_sample boxes by jittering around boxlist boxes.
It is possible that this function might generate boxes with size 0. The larger
the stddev, this is more probable. For a small stddev of 0.1 this probability
is very small.
Args:
boxlist: A boxlist containing N boxes in normalized coordinates.
num_boxes_to_sample: A positive integer containing the number of boxes to
sample.
stddev: Standard deviation. This is used to draw random offsets for the
box corners from a normal distribution. The offset is multiplied by the
box size so will be larger in terms of pixels for larger boxes.
scope: Name scope.
Returns:
sampled_boxlist: A boxlist containing num_boxes_to_sample boxes in
normalized coordinates.
"""
with tf.name_scope(scope, 'SampleBoxesByJittering'):
num_boxes = boxlist.num_boxes()
box_indices = tf.random_uniform(
[num_boxes_to_sample],
minval=0,
maxval=num_boxes,
dtype=tf.int32)
sampled_boxes = tf.gather(boxlist.get(), box_indices)
sampled_boxes_height = sampled_boxes[:, 2] - sampled_boxes[:, 0]
sampled_boxes_width = sampled_boxes[:, 3] - sampled_boxes[:, 1]
rand_miny_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
rand_minx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
rand_maxy_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
rand_maxx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
miny = rand_miny_gaussian * sampled_boxes_height + sampled_boxes[:, 0]
minx = rand_minx_gaussian * sampled_boxes_width + sampled_boxes[:, 1]
maxy = rand_maxy_gaussian * sampled_boxes_height + sampled_boxes[:, 2]
maxx = rand_maxx_gaussian * sampled_boxes_width + sampled_boxes[:, 3]
maxy = tf.maximum(miny, maxy)
maxx = tf.maximum(minx, maxx)
sampled_boxes = tf.stack([miny, minx, maxy, maxx], axis=1)
sampled_boxes = tf.maximum(tf.minimum(sampled_boxes, 1.0), 0.0)
return box_list.BoxList(sampled_boxes)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/box_list_ops.py | box_list_ops.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DensePose operations.
DensePose part ids are represented as tensors of shape
[num_instances, num_points] and coordinates are represented as tensors of shape
[num_instances, num_points, 4] where each point holds (y, x, v, u). The location
of the DensePose sampled point is (y, x) in normalized coordinates. The surface
coordinate (in the part coordinate frame) is (v, u). Note that dim 1 of both
tensors may contain padding, since the number of sampled points per instance
is not fixed. The value `num_points` represents the maximum number of sampled
points for an instance in the example.
"""
import os
import numpy as np
import scipy.io
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
PART_NAMES = [
b'torso_back', b'torso_front', b'right_hand', b'left_hand', b'left_foot',
b'right_foot', b'right_upper_leg_back', b'left_upper_leg_back',
b'right_upper_leg_front', b'left_upper_leg_front', b'right_lower_leg_back',
b'left_lower_leg_back', b'right_lower_leg_front', b'left_lower_leg_front',
b'left_upper_arm_back', b'right_upper_arm_back', b'left_upper_arm_front',
b'right_upper_arm_front', b'left_lower_arm_back', b'right_lower_arm_back',
b'left_lower_arm_front', b'right_lower_arm_front', b'right_face',
b'left_face',
]
def scale(dp_surface_coords, y_scale, x_scale, scope=None):
"""Scales DensePose coordinates in y and x dimensions.
Args:
dp_surface_coords: a tensor of shape [num_instances, num_points, 4], with
coordinates in (y, x, v, u) format.
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4]
"""
with tf.name_scope(scope, 'DensePoseScale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
new_keypoints = dp_surface_coords * [[[y_scale, x_scale, 1, 1]]]
return new_keypoints
def clip_to_window(dp_surface_coords, window, scope=None):
"""Clips DensePose points to a window.
This op clips any input DensePose points to a window.
Args:
dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with
DensePose surface coordinates in (y, x, v, u) format.
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window to which the op should clip the keypoints.
scope: name scope.
Returns:
new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4].
"""
with tf.name_scope(scope, 'DensePoseClipToWindow'):
y, x, v, u = tf.split(value=dp_surface_coords, num_or_size_splits=4, axis=2)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
y = tf.maximum(tf.minimum(y, win_y_max), win_y_min)
x = tf.maximum(tf.minimum(x, win_x_max), win_x_min)
new_dp_surface_coords = tf.concat([y, x, v, u], 2)
return new_dp_surface_coords
def prune_outside_window(dp_num_points, dp_part_ids, dp_surface_coords, window,
scope=None):
"""Prunes DensePose points that fall outside a given window.
This function replaces points that fall outside the given window with zeros.
See also clip_to_window which clips any DensePose points that fall outside the
given window.
Note that this operation uses dynamic shapes, and therefore is not currently
suitable for TPU.
Args:
dp_num_points: a tensor of shape [num_instances] that indicates how many
(non-padded) DensePose points there are per instance.
dp_part_ids: a tensor of shape [num_instances, num_points] with DensePose
part ids. These part_ids are 0-indexed, where the first non-background
part has index 0.
dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with
DensePose surface coordinates in (y, x, v, u) format.
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window outside of which the op should prune the points.
scope: name scope.
Returns:
new_dp_num_points: a tensor of shape [num_instances] that indicates how many
(non-padded) DensePose points there are per instance after pruning.
new_dp_part_ids: a tensor of shape [num_instances, num_points] with
DensePose part ids. These part_ids are 0-indexed, where the first
non-background part has index 0.
new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with
DensePose surface coordinates after pruning.
"""
with tf.name_scope(scope, 'DensePosePruneOutsideWindow'):
y, x, _, _ = tf.unstack(dp_surface_coords, axis=-1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
num_instances, num_points = shape_utils.combined_static_and_dynamic_shape(
dp_part_ids)
dp_num_points_tiled = tf.tile(dp_num_points[:, tf.newaxis],
multiples=[1, num_points])
range_tiled = tf.tile(tf.range(num_points)[tf.newaxis, :],
multiples=[num_instances, 1])
valid_initial = range_tiled < dp_num_points_tiled
valid_in_window = tf.logical_and(
tf.logical_and(y >= win_y_min, y <= win_y_max),
tf.logical_and(x >= win_x_min, x <= win_x_max))
valid_indices = tf.logical_and(valid_initial, valid_in_window)
new_dp_num_points = tf.math.reduce_sum(
tf.cast(valid_indices, tf.int32), axis=1)
max_num_points = tf.math.reduce_max(new_dp_num_points)
def gather_and_reshuffle(elems):
dp_part_ids, dp_surface_coords, valid_indices = elems
locs = tf.where(valid_indices)[:, 0]
valid_part_ids = tf.gather(dp_part_ids, locs, axis=0)
valid_part_ids_padded = shape_utils.pad_or_clip_nd(
valid_part_ids, output_shape=[max_num_points])
valid_surface_coords = tf.gather(dp_surface_coords, locs, axis=0)
valid_surface_coords_padded = shape_utils.pad_or_clip_nd(
valid_surface_coords, output_shape=[max_num_points, 4])
return [valid_part_ids_padded, valid_surface_coords_padded]
new_dp_part_ids, new_dp_surface_coords = (
shape_utils.static_or_dynamic_map_fn(
gather_and_reshuffle,
elems=[dp_part_ids, dp_surface_coords, valid_indices],
dtype=[tf.int32, tf.float32],
back_prop=False))
return new_dp_num_points, new_dp_part_ids, new_dp_surface_coords
def change_coordinate_frame(dp_surface_coords, window, scope=None):
"""Changes coordinate frame of the points to be relative to window's frame.
Given a window of the form [y_min, x_min, y_max, x_max] in normalized
coordinates, changes DensePose coordinates to be relative to this window.
An example use case is data augmentation: where we are given groundtruth
points and would like to randomly crop the image to some window. In this
case we need to change the coordinate frame of each sampled point to be
relative to this new window.
Args:
dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with
DensePose surface coordinates in (y, x, v, u) format.
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window we should change the coordinate frame to.
scope: name scope.
Returns:
new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4].
"""
with tf.name_scope(scope, 'DensePoseChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
new_dp_surface_coords = scale(
dp_surface_coords - [window[0], window[1], 0, 0],
1.0 / win_height, 1.0 / win_width)
return new_dp_surface_coords
def to_normalized_coordinates(dp_surface_coords, height, width,
check_range=True, scope=None):
"""Converts absolute DensePose coordinates to normalized in range [0, 1].
This function raises an assertion failed error at graph execution time when
the maximum coordinate is smaller than 1.01 (which means that coordinates are
already normalized). The value 1.01 is to deal with small rounding errors.
Args:
dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with
DensePose absolute surface coordinates in (y, x, v, u) format.
height: Height of image.
width: Width of image.
check_range: If True, checks if the coordinates are already normalized.
scope: name scope.
Returns:
A tensor of shape [num_instances, num_points, 4] with normalized
coordinates.
"""
with tf.name_scope(scope, 'DensePoseToNormalizedCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
if check_range:
max_val = tf.reduce_max(dp_surface_coords[:, :, :2])
max_assert = tf.Assert(tf.greater(max_val, 1.01),
['max value is lower than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(dp_surface_coords, 1.0 / height, 1.0 / width)
def to_absolute_coordinates(dp_surface_coords, height, width,
check_range=True, scope=None):
"""Converts normalized DensePose coordinates to absolute pixel coordinates.
This function raises an assertion failed error when the maximum
coordinate value is larger than 1.01 (in which case coordinates are already
absolute).
Args:
dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with
DensePose normalized surface coordinates in (y, x, v, u) format.
height: Height of image.
width: Width of image.
check_range: If True, checks if the coordinates are normalized or not.
scope: name scope.
Returns:
A tensor of shape [num_instances, num_points, 4] with absolute coordinates.
"""
with tf.name_scope(scope, 'DensePoseToAbsoluteCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
if check_range:
max_val = tf.reduce_max(dp_surface_coords[:, :, :2])
max_assert = tf.Assert(tf.greater_equal(1.01, max_val),
['maximum coordinate value is larger than 1.01: ',
max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(dp_surface_coords, height, width)
class DensePoseHorizontalFlip(object):
"""Class responsible for horizontal flipping of parts and surface coords."""
def __init__(self):
"""Constructor."""
path = os.path.dirname(os.path.abspath(__file__))
uv_symmetry_transforms_path = tf.resource_loader.get_path_to_datafile(
os.path.join(path, '..', 'dataset_tools', 'densepose',
'UV_symmetry_transforms.mat'))
tf.logging.info('Loading DensePose symmetry transforms file from {}'.format(
uv_symmetry_transforms_path))
with tf.io.gfile.GFile(uv_symmetry_transforms_path, 'rb') as f:
data = scipy.io.loadmat(f)
# Create lookup maps which indicate how a VU coordinate changes after a
# horizontal flip.
uv_symmetry_map = {}
for key in ('U_transforms', 'V_transforms'):
uv_symmetry_map_per_part = []
for i in range(data[key].shape[1]):
# The following tensor has shape [256, 256]. The raw data is stored as
# uint8 values, so convert to float and scale to the range [0., 1.]
data_normalized = data[key][0, i].astype(np.float32) / 255.
map_per_part = tf.constant(data_normalized, dtype=tf.float32)
uv_symmetry_map_per_part.append(map_per_part)
uv_symmetry_map[key] = tf.reshape(
tf.stack(uv_symmetry_map_per_part, axis=0), [-1])
# The following dictionary contains flattened lookup maps for the U and V
# coordinates separately. The shape of each is [24 * 256 * 256].
self.uv_symmetries = uv_symmetry_map
# Create a list of that maps part index to flipped part index (0-indexed).
part_symmetries = []
for i, part_name in enumerate(PART_NAMES):
if b'left' in part_name:
part_symmetries.append(PART_NAMES.index(
part_name.replace(b'left', b'right')))
elif b'right' in part_name:
part_symmetries.append(PART_NAMES.index(
part_name.replace(b'right', b'left')))
else:
part_symmetries.append(i)
self.part_symmetries = part_symmetries
def flip_parts_and_coords(self, part_ids, vu):
"""Flips part ids and coordinates.
Args:
part_ids: a [num_instances, num_points] int32 tensor with pre-flipped part
ids. These part_ids are 0-indexed, where the first non-background part
has index 0.
vu: a [num_instances, num_points, 2] float32 tensor with pre-flipped vu
normalized coordinates.
Returns:
new_part_ids: a [num_instances, num_points] int32 tensor with post-flipped
part ids. These part_ids are 0-indexed, where the first non-background
part has index 0.
new_vu: a [num_instances, num_points, 2] float32 tensor with post-flipped
vu coordinates.
"""
num_instances, num_points = shape_utils.combined_static_and_dynamic_shape(
part_ids)
part_ids_flattened = tf.reshape(part_ids, [-1])
new_part_ids_flattened = tf.gather(self.part_symmetries, part_ids_flattened)
new_part_ids = tf.reshape(new_part_ids_flattened,
[num_instances, num_points])
# Convert VU floating point coordinates to values in [256, 256] grid.
vu = tf.math.minimum(tf.math.maximum(vu, 0.0), 1.0)
vu_locs = tf.cast(vu * 256., dtype=tf.int32)
vu_locs_flattened = tf.reshape(vu_locs, [-1, 2])
v_locs_flattened, u_locs_flattened = tf.unstack(vu_locs_flattened, axis=1)
# Convert vu_locs into lookup indices (in flattened part symmetries map).
symmetry_lookup_inds = (
part_ids_flattened * 65536 + 256 * v_locs_flattened + u_locs_flattened)
# New VU coordinates.
v_new = tf.gather(self.uv_symmetries['V_transforms'], symmetry_lookup_inds)
u_new = tf.gather(self.uv_symmetries['U_transforms'], symmetry_lookup_inds)
new_vu_flattened = tf.stack([v_new, u_new], axis=1)
new_vu = tf.reshape(new_vu_flattened, [num_instances, num_points, 2])
return new_part_ids, new_vu
def flip_horizontal(dp_part_ids, dp_surface_coords, scope=None):
"""Flips the DensePose points horizontally around the flip_point.
This operation flips dense pose annotations horizontally. Note that part ids
and surface coordinates may or may not change as a result of the flip.
Args:
dp_part_ids: a tensor of shape [num_instances, num_points] with DensePose
part ids. These part_ids are 0-indexed, where the first non-background
part has index 0.
dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with
DensePose surface coordinates in (y, x, v, u) normalized format.
scope: name scope.
Returns:
new_dp_part_ids: a tensor of shape [num_instances, num_points] with
DensePose part ids after flipping.
new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with
DensePose surface coordinates after flipping.
"""
with tf.name_scope(scope, 'DensePoseFlipHorizontal'):
# First flip x coordinate.
y, x, vu = tf.split(dp_surface_coords, num_or_size_splits=[1, 1, 2], axis=2)
xflipped = 1.0 - x
# Flip part ids and surface coordinates.
horizontal_flip = DensePoseHorizontalFlip()
new_dp_part_ids, new_vu = horizontal_flip.flip_parts_and_coords(
dp_part_ids, vu)
new_dp_surface_coords = tf.concat([y, xflipped, new_vu], axis=2)
return new_dp_part_ids, new_dp_surface_coords
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/densepose_ops.py | densepose_ops.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as tensorflow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferrable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following tensorflow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data: {}'.format(
boxes.shape))
if boxes.dtype != tf.float32:
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return tf.shape(self.data['boxes'])[0]
def num_boxes_static(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferrable at graph construction time.
"""
return shape_utils.get_dim_as_int(self.data['boxes'].get_shape()[0])
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data.keys() if k != 'boxes']
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field('boxes')
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field %s does not exist' % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
with tf.name_scope(scope, 'get_center_coordinates_and_sizes'):
box_corners = self.get()
ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
with tf.name_scope(scope, 'transpose_coordinates'):
y_min, x_min, y_max, x_max = tf.split(
value=self.get(), num_or_size_splits=4, axis=1)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/box_list.py | box_list.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Interface for data parsers.
Data parser parses input data and returns a dictionary of numpy arrays
keyed by the entries in standard_fields.py. Since the parser parses records
to numpy arrays (materialized tensors) directly, it is used to read data for
evaluation/visualization; to parse the data during training, DataDecoder should
be used.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import six
class DataToNumpyParser(six.with_metaclass(ABCMeta, object)):
"""Abstract interface for data parser that produces numpy arrays."""
@abstractmethod
def parse(self, input_data):
"""Parses input and returns a numpy array or a dictionary of numpy arrays.
Args:
input_data: an input data
Returns:
A numpy array or a dictionary of numpy arrays or None, if input
cannot be parsed.
"""
pass
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/data_parser.py | data_parser.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for region_similarity_calculator."""
import tensorflow.compat.v1 as tf
from object_detection.core import box_list
from object_detection.core import region_similarity_calculator
from object_detection.core import standard_fields as fields
from object_detection.utils import test_case
class RegionSimilarityCalculatorTest(test_case.TestCase):
def test_get_correct_pairwise_similarity_based_on_iou(self):
def graph_fn():
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
iou_similarity_calculator = region_similarity_calculator.IouSimilarity()
iou_similarity = iou_similarity_calculator.compare(boxes1, boxes2)
return iou_similarity
exp_output = [[2.0 / 16.0, 0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]]
iou_output = self.execute(graph_fn, [])
self.assertAllClose(iou_output, exp_output)
def test_get_correct_pairwise_similarity_based_on_squared_distances(self):
def graph_fn():
corners1 = tf.constant([[0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 2.0]])
corners2 = tf.constant([[3.0, 4.0, 1.0, 0.0],
[-4.0, 0.0, 0.0, 3.0],
[0.0, 0.0, 0.0, 0.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
dist_similarity_calc = region_similarity_calculator.NegSqDistSimilarity()
dist_similarity = dist_similarity_calc.compare(boxes1, boxes2)
return dist_similarity
exp_output = [[-26, -25, 0], [-18, -27, -6]]
iou_output = self.execute(graph_fn, [])
self.assertAllClose(iou_output, exp_output)
def test_get_correct_pairwise_similarity_based_on_ioa(self):
def graph_fn():
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
ioa_similarity_calculator = region_similarity_calculator.IoaSimilarity()
ioa_similarity_1 = ioa_similarity_calculator.compare(boxes1, boxes2)
ioa_similarity_2 = ioa_similarity_calculator.compare(boxes2, boxes1)
return ioa_similarity_1, ioa_similarity_2
exp_output_1 = [[2.0 / 12.0, 0, 6.0 / 400.0],
[1.0 / 12.0, 0.0, 5.0 / 400.0]]
exp_output_2 = [[2.0 / 6.0, 1.0 / 5.0],
[0, 0],
[6.0 / 6.0, 5.0 / 5.0]]
iou_output_1, iou_output_2 = self.execute(graph_fn, [])
self.assertAllClose(iou_output_1, exp_output_1)
self.assertAllClose(iou_output_2, exp_output_2)
def test_get_correct_pairwise_similarity_based_on_thresholded_iou(self):
def graph_fn():
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
scores = tf.constant([.3, .6])
iou_threshold = .013
boxes1 = box_list.BoxList(corners1)
boxes1.add_field(fields.BoxListFields.scores, scores)
boxes2 = box_list.BoxList(corners2)
iou_similarity_calculator = (
region_similarity_calculator.ThresholdedIouSimilarity(
iou_threshold=iou_threshold))
iou_similarity = iou_similarity_calculator.compare(boxes1, boxes2)
return iou_similarity
exp_output = tf.constant([[0.3, 0., 0.3], [0.6, 0., 0.]])
iou_output = self.execute(graph_fn, [])
self.assertAllClose(iou_output, exp_output)
def test_detr_similarity(self):
def graph_fn():
corners1 = tf.constant([[5.0, 7.0, 7.0, 9.0]])
corners2 = tf.constant([[5.0, 7.0, 7.0, 9.0], [5.0, 11.0, 7.0, 13.0]])
groundtruth_labels = tf.constant([[1.0, 0.0]])
predicted_labels = tf.constant([[0.0, 1000.0], [1000.0, 0.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
boxes1.add_field(fields.BoxListFields.classes, groundtruth_labels)
boxes2.add_field(fields.BoxListFields.classes, predicted_labels)
detr_similarity_calculator = \
region_similarity_calculator.DETRSimilarity()
detr_similarity = detr_similarity_calculator.compare(
boxes1, boxes2, None)
return detr_similarity
exp_output = [[0.0, -20 - 8.0/3.0 + 1000.0]]
sim_output = self.execute(graph_fn, [])
self.assertAllClose(sim_output, exp_output)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/region_similarity_calculator_test.py | region_similarity_calculator_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base minibatch sampler module.
The job of the minibatch_sampler is to subsample a minibatch based on some
criterion.
The main function call is:
subsample(indicator, batch_size, **params).
Indicator is a 1d boolean tensor where True denotes which examples can be
sampled. It returns a boolean indicator where True denotes an example has been
sampled..
Subclasses should implement the Subsample function and can make use of the
@staticmethod SubsampleIndicator.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import six
import tensorflow.compat.v1 as tf
from object_detection.utils import ops
class MinibatchSampler(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for subsampling minibatches."""
def __init__(self):
"""Constructs a minibatch sampler."""
pass
@abstractmethod
def subsample(self, indicator, batch_size, **params):
"""Returns subsample of entries in indicator.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size.
**params: additional keyword arguments for specific implementations of
the MinibatchSampler.
Returns:
sample_indicator: boolean tensor of shape [N] whose True entries have been
sampled. If sum(indicator) >= batch_size, sum(is_sampled) = batch_size
"""
pass
@staticmethod
def subsample_indicator(indicator, num_samples):
"""Subsample indicator vector.
Given a boolean indicator vector with M elements set to `True`, the function
assigns all but `num_samples` of these previously `True` elements to
`False`. If `num_samples` is greater than M, the original indicator vector
is returned.
Args:
indicator: a 1-dimensional boolean tensor indicating which elements
are allowed to be sampled and which are not.
num_samples: int32 scalar tensor
Returns:
a boolean tensor with the same shape as input (indicator) tensor
"""
indices = tf.where(indicator)
indices = tf.random_shuffle(indices)
indices = tf.reshape(indices, [-1])
num_samples = tf.minimum(tf.size(indices), num_samples)
selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))
selected_indicator = ops.indices_to_dense_vector(selected_indices,
tf.shape(indicator)[0])
return tf.equal(selected_indicator, 1)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/minibatch_sampler.py | minibatch_sampler.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.keypoint_ops."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import keypoint_ops
from object_detection.utils import test_case
class KeypointOpsTest(test_case.TestCase):
"""Tests for common keypoint operations."""
def test_scale(self):
def graph_fn():
keypoints = tf.constant([
[[0.0, 0.0], [100.0, 200.0]],
[[50.0, 120.0], [100.0, 140.0]]
])
y_scale = tf.constant(1.0 / 100)
x_scale = tf.constant(1.0 / 200)
expected_keypoints = tf.constant([
[[0., 0.], [1.0, 1.0]],
[[0.5, 0.6], [1.0, 0.7]]
])
output = keypoint_ops.scale(keypoints, y_scale, x_scale)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_clip_to_window(self):
def graph_fn():
keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
expected_keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.25], [0.75, 0.75]]
])
output = keypoint_ops.clip_to_window(keypoints, window)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_prune_outside_window(self):
def graph_fn():
keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
expected_keypoints = tf.constant([[[0.25, 0.5], [0.75, 0.75]],
[[np.nan, np.nan], [np.nan, np.nan]]])
output = keypoint_ops.prune_outside_window(keypoints, window)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_change_coordinate_frame(self):
def graph_fn():
keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
expected_keypoints = tf.constant([
[[0, 0.5], [1.0, 1.0]],
[[0.5, -0.5], [1.5, 1.5]]
])
output = keypoint_ops.change_coordinate_frame(keypoints, window)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_keypoints_to_enclosing_bounding_boxes(self):
def graph_fn():
keypoints = tf.constant(
[
[ # Instance 0.
[5., 10.],
[3., 20.],
[8., 4.],
],
[ # Instance 1.
[2., 12.],
[0., 3.],
[5., 19.],
],
], dtype=tf.float32)
bboxes = keypoint_ops.keypoints_to_enclosing_bounding_boxes(keypoints)
return bboxes
output = self.execute(graph_fn, [])
expected_bboxes = np.array(
[
[3., 4., 8., 20.],
[0., 3., 5., 19.]
])
self.assertAllClose(expected_bboxes, output)
def test_keypoints_to_enclosing_bounding_boxes_axis2(self):
def graph_fn():
keypoints = tf.constant(
[
[ # Instance 0.
[5., 10.],
[3., 20.],
[8., 4.],
],
[ # Instance 1.
[2., 12.],
[0., 3.],
[5., 19.],
],
], dtype=tf.float32)
keypoints = tf.stack([keypoints, keypoints], axis=0)
bboxes = keypoint_ops.keypoints_to_enclosing_bounding_boxes(
keypoints, keypoints_axis=2)
return bboxes
output = self.execute(graph_fn, [])
expected_bboxes = np.array(
[
[3., 4., 8., 20.],
[0., 3., 5., 19.]
])
self.assertAllClose(expected_bboxes, output[0])
self.assertAllClose(expected_bboxes, output[1])
def test_to_normalized_coordinates(self):
def graph_fn():
keypoints = tf.constant([
[[10., 30.], [30., 45.]],
[[20., 0.], [40., 60.]]
])
output = keypoint_ops.to_normalized_coordinates(
keypoints, 40, 60)
expected_keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_to_normalized_coordinates_already_normalized(self):
if self.has_tpu(): return
def graph_fn():
keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
output = keypoint_ops.to_normalized_coordinates(
keypoints, 40, 60)
return output
with self.assertRaisesOpError('assertion failed'):
self.execute_cpu(graph_fn, [])
def test_to_absolute_coordinates(self):
def graph_fn():
keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
output = keypoint_ops.to_absolute_coordinates(
keypoints, 40, 60)
expected_keypoints = tf.constant([
[[10., 30.], [30., 45.]],
[[20., 0.], [40., 60.]]
])
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_to_absolute_coordinates_already_absolute(self):
if self.has_tpu(): return
def graph_fn():
keypoints = tf.constant([
[[10., 30.], [30., 45.]],
[[20., 0.], [40., 60.]]
])
output = keypoint_ops.to_absolute_coordinates(
keypoints, 40, 60)
return output
with self.assertRaisesOpError('assertion failed'):
self.execute_cpu(graph_fn, [])
def test_flip_horizontal(self):
def graph_fn():
keypoints = tf.constant([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]
])
expected_keypoints = tf.constant([
[[0.1, 0.9], [0.2, 0.8], [0.3, 0.7]],
[[0.4, 0.6], [0.5, 0.5], [0.6, 0.4]],
])
output = keypoint_ops.flip_horizontal(keypoints, 0.5)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_flip_horizontal_permutation(self):
def graph_fn():
keypoints = tf.constant([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]])
flip_permutation = [0, 2, 1]
expected_keypoints = tf.constant([
[[0.1, 0.9], [0.3, 0.7], [0.2, 0.8]],
[[0.4, 0.6], [0.6, 0.4], [0.5, 0.5]],
])
output = keypoint_ops.flip_horizontal(keypoints, 0.5, flip_permutation)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_flip_vertical(self):
def graph_fn():
keypoints = tf.constant([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]
])
expected_keypoints = tf.constant([
[[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]],
[[0.6, 0.4], [0.5, 0.5], [0.4, 0.6]],
])
output = keypoint_ops.flip_vertical(keypoints, 0.5)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_flip_vertical_permutation(self):
def graph_fn():
keypoints = tf.constant([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]])
flip_permutation = [0, 2, 1]
expected_keypoints = tf.constant([
[[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]],
[[0.6, 0.4], [0.4, 0.6], [0.5, 0.5]],
])
output = keypoint_ops.flip_vertical(keypoints, 0.5, flip_permutation)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_rot90(self):
def graph_fn():
keypoints = tf.constant([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]]
])
expected_keypoints = tf.constant([
[[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]],
[[0.4, 0.4], [0.4, 0.5], [0.3, 0.6]],
])
output = keypoint_ops.rot90(keypoints)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_rot90_permutation(self):
def graph_fn():
keypoints = tf.constant([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]]])
rot_permutation = [0, 2, 1]
expected_keypoints = tf.constant([
[[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]],
[[0.4, 0.4], [0.3, 0.6], [0.4, 0.5]],
])
output = keypoint_ops.rot90(keypoints,
rotation_permutation=rot_permutation)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_keypoint_weights_from_visibilities(self):
def graph_fn():
keypoint_visibilities = tf.constant([
[True, True, False],
[False, True, False]
])
per_keypoint_weights = [1.0, 2.0, 3.0]
keypoint_weights = keypoint_ops.keypoint_weights_from_visibilities(
keypoint_visibilities, per_keypoint_weights)
return keypoint_weights
expected_keypoint_weights = [
[1.0, 2.0, 0.0],
[0.0, 2.0, 0.0]
]
output = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoint_weights)
def test_keypoint_weights_from_visibilities_no_per_kpt_weights(self):
def graph_fn():
keypoint_visibilities = tf.constant([
[True, True, False],
[False, True, False]
])
keypoint_weights = keypoint_ops.keypoint_weights_from_visibilities(
keypoint_visibilities)
return keypoint_weights
expected_keypoint_weights = [
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0]
]
output = self.execute(graph_fn, [])
self.assertAllClose(expected_keypoint_weights, output)
def test_set_keypoint_visibilities_no_initial_kpt_vis(self):
keypoints_np = np.array(
[
[[np.nan, 0.2],
[np.nan, np.nan],
[-3., 7.]],
[[0.5, 0.2],
[4., 1.0],
[-3., np.nan]],
], dtype=np.float32)
def graph_fn():
keypoints = tf.constant(keypoints_np, dtype=tf.float32)
keypoint_visibilities = keypoint_ops.set_keypoint_visibilities(
keypoints)
return keypoint_visibilities
expected_kpt_vis = [
[False, False, True],
[True, True, False]
]
output = self.execute(graph_fn, [])
self.assertAllEqual(expected_kpt_vis, output)
def test_set_keypoint_visibilities(self):
keypoints_np = np.array(
[
[[np.nan, 0.2],
[np.nan, np.nan],
[-3., 7.]],
[[0.5, 0.2],
[4., 1.0],
[-3., np.nan]],
], dtype=np.float32)
initial_keypoint_visibilities_np = np.array(
[
[False,
True, # Will be overriden by NaN coords.
False], # Will be maintained, even though non-NaN coords.
[True,
False, # Will be maintained, even though non-NaN coords.
False]
])
def graph_fn():
keypoints = tf.constant(keypoints_np, dtype=tf.float32)
initial_keypoint_visibilities = tf.constant(
initial_keypoint_visibilities_np, dtype=tf.bool)
keypoint_visibilities = keypoint_ops.set_keypoint_visibilities(
keypoints, initial_keypoint_visibilities)
return keypoint_visibilities
expected_kpt_vis = [
[False, False, False],
[True, False, False]
]
output = self.execute(graph_fn, [])
self.assertAllEqual(expected_kpt_vis, output)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/keypoint_ops_test.py | keypoint_ops_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.research.vale.object_detection.losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import box_list
from object_detection.core import losses
from object_detection.core import matcher
from object_detection.utils import test_case
class WeightedL2LocalizationLossTest(test_case.TestCase):
def testReturnsCorrectWeightedLoss(self):
batch_size = 3
num_anchors = 10
code_size = 4
def graph_fn():
prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], tf.float32)
loss_op = losses.WeightedL2LocalizationLoss()
loss = tf.reduce_sum(loss_op(prediction_tensor, target_tensor,
weights=weights))
return loss
expected_loss = (3 * 5 * 4) / 2.0
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, expected_loss)
def testReturnsCorrectAnchorwiseLoss(self):
batch_size = 3
num_anchors = 16
code_size = 4
def graph_fn():
prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.ones([batch_size, num_anchors])
loss_op = losses.WeightedL2LocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
return loss
expected_loss = np.ones((batch_size, num_anchors)) * 2
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, expected_loss)
def testReturnsCorrectNanLoss(self):
batch_size = 3
num_anchors = 10
code_size = 4
def graph_fn():
prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
target_tensor = tf.concat([
tf.zeros([batch_size, num_anchors, code_size / 2]),
tf.ones([batch_size, num_anchors, code_size / 2]) * np.nan
], axis=2)
weights = tf.ones([batch_size, num_anchors])
loss_op = losses.WeightedL2LocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
ignore_nan_targets=True)
loss = tf.reduce_sum(loss)
return loss
expected_loss = (3 * 5 * 4) / 2.0
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, expected_loss)
def testReturnsCorrectWeightedLossWithLossesMask(self):
batch_size = 4
num_anchors = 10
code_size = 4
def graph_fn():
prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], tf.float32)
losses_mask = tf.constant([True, False, True, True], tf.bool)
loss_op = losses.WeightedL2LocalizationLoss()
loss = tf.reduce_sum(loss_op(prediction_tensor, target_tensor,
weights=weights, losses_mask=losses_mask))
return loss
expected_loss = (3 * 5 * 4) / 2.0
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, expected_loss)
class WeightedSmoothL1LocalizationLossTest(test_case.TestCase):
def testReturnsCorrectLoss(self):
batch_size = 2
num_anchors = 3
code_size = 4
def graph_fn():
prediction_tensor = tf.constant([[[2.5, 0, .4, 0],
[0, 0, 0, 0],
[0, 2.5, 0, .4]],
[[3.5, 0, 0, 0],
[0, .4, 0, .9],
[0, 0, 1.5, 0]]], tf.float32)
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.constant([[2, 1, 1],
[0, 3, 0]], tf.float32)
loss_op = losses.WeightedSmoothL1LocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
exp_loss = 7.695
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithLossesMask(self):
batch_size = 3
num_anchors = 3
code_size = 4
def graph_fn():
prediction_tensor = tf.constant([[[2.5, 0, .4, 0],
[0, 0, 0, 0],
[0, 2.5, 0, .4]],
[[3.5, 0, 0, 0],
[0, .4, 0, .9],
[0, 0, 1.5, 0]],
[[3.5, 7., 0, 0],
[0, .4, 0, .9],
[2.2, 2.2, 1.5, 0]]], tf.float32)
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.constant([[2, 1, 1],
[0, 3, 0],
[4, 3, 0]], tf.float32)
losses_mask = tf.constant([True, True, False], tf.bool)
loss_op = losses.WeightedSmoothL1LocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss)
return loss
exp_loss = 7.695
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
class WeightedIOULocalizationLossTest(test_case.TestCase):
def testReturnsCorrectLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[0, 0, .5, .25]]])
target_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[50, 50, 500.5, 100.25]]])
weights = [[1.0, .5, 2.0]]
loss_op = losses.WeightedIOULocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
exp_loss = 2.0
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithNoLabels(self):
def graph_fn():
prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[0, 0, .5, .25]]])
target_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[50, 50, 500.5, 100.25]]])
weights = [[1.0, .5, 2.0]]
losses_mask = tf.constant([False], tf.bool)
loss_op = losses.WeightedIOULocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss)
return loss
exp_loss = 0.0
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
class WeightedGIOULocalizationLossTest(test_case.TestCase):
def testReturnsCorrectLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[0, 0, 0, 0]]])
target_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[5, 5, 10, 10]]])
weights = [[1.0, .5, 2.0]]
loss_op = losses.WeightedGIOULocalizationLoss()
loss = loss_op(prediction_tensor,
target_tensor,
weights=weights)
loss = tf.reduce_sum(loss)
return loss
exp_loss = 3.5
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithNoLabels(self):
def graph_fn():
prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[0, 0, .5, .25]]])
target_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[50, 50, 500.5, 100.25]]])
weights = [[1.0, .5, 2.0]]
losses_mask = tf.constant([False], tf.bool)
loss_op = losses.WeightedGIOULocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss)
return loss
exp_loss = 0.0
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
class WeightedSigmoidClassificationLossTest(test_case.TestCase):
def testReturnsCorrectLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSigmoidClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
exp_loss = -2 * math.log(.5)
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSigmoidClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss, axis=2)
return loss
exp_loss = np.matrix([[0, 0, -math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithClassIndices(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100, 100],
[100, -100, -100, -100],
[100, 0, -100, 100],
[-100, -100, 100, -100]],
[[-100, 0, 100, 100],
[-100, 100, -100, 100],
[100, 100, 100, 100],
[0, 0, -1, 100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0, 0],
[1, 0, 0, 1],
[1, 0, 0, 0],
[0, 0, 1, 1]],
[[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 1, 1, 0],
[1, 0, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 0, 0]]], tf.float32)
# Ignores the last class.
class_indices = tf.constant([0, 1, 2], tf.int32)
loss_op = losses.WeightedSigmoidClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
class_indices=class_indices)
loss = tf.reduce_sum(loss, axis=2)
return loss
exp_loss = np.matrix([[0, 0, -math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithLossesMask(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
losses_mask = tf.constant([True, True, False], tf.bool)
loss_op = losses.WeightedSigmoidClassificationLoss()
loss_per_anchor = loss_op(prediction_tensor, target_tensor,
weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss_per_anchor)
return loss
exp_loss = -2 * math.log(.5)
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def _logit(probability):
return math.log(probability / (1. - probability))
class SigmoidFocalClassificationLossTest(test_case.TestCase):
def testEasyExamplesProduceSmallLossComparedToSigmoidXEntropy(self):
def graph_fn():
prediction_tensor = tf.constant([[[_logit(0.97)],
[_logit(0.91)],
[_logit(0.73)],
[_logit(0.27)],
[_logit(0.09)],
[_logit(0.03)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0,
alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAllClose(order_of_ratio, [[1000, 100, 10, 10, 100, 1000]])
def testHardExamplesProduceLossComparableToSigmoidXEntropy(self):
def graph_fn():
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0,
alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAllClose(order_of_ratio, [[1., 1., 1., 1., 1.]])
def testNonAnchorWiseOutputComparableToSigmoidXEntropy(self):
def graph_fn():
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0,
alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights))
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAlmostEqual(order_of_ratio, 1.)
def testIgnoreNegativeExampleLossViaAlphaMultiplier(self):
def graph_fn():
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0,
alpha=1.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
self.assertAllClose(focal_loss[0][3:], [0., 0.])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss[0][:3] /
focal_loss[0][:3])))
self.assertAllClose(order_of_ratio, [1., 1., 1.])
def testIgnorePositiveExampleLossViaAlphaMultiplier(self):
def graph_fn():
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0,
alpha=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
self.assertAllClose(focal_loss[0][:3], [0., 0., 0.])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss[0][3:] /
focal_loss[0][3:])))
self.assertAllClose(order_of_ratio, [1., 1.])
def testSimilarToSigmoidXEntropyWithHalfAlphaAndZeroGammaUpToAScale(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.5,
gamma=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = focal_loss_op(prediction_tensor, target_tensor,
weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
weights=weights)
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
self.assertAllClose(sigmoid_loss, focal_loss * 2)
def testSameAsSigmoidXEntropyWithNoAlphaAndZeroGamma(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=None,
gamma=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = focal_loss_op(prediction_tensor, target_tensor,
weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
weights=weights)
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
self.assertAllClose(sigmoid_loss, focal_loss)
def testExpectedLossWithAlphaOneAndZeroGamma(self):
def graph_fn():
# All zeros correspond to 0.5 probability.
prediction_tensor = tf.constant([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=1.0,
gamma=0.0)
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
return focal_loss
focal_loss = self.execute(graph_fn, [])
self.assertAllClose(
(-math.log(.5) * # x-entropy per class per anchor
1.0 * # alpha
8), # positives from 8 anchors
focal_loss)
def testExpectedLossWithAlpha75AndZeroGamma(self):
def graph_fn():
# All zeros correspond to 0.5 probability.
prediction_tensor = tf.constant([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.75,
gamma=0.0)
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
return focal_loss
focal_loss = self.execute(graph_fn, [])
self.assertAllClose(
(-math.log(.5) * # x-entropy per class per anchor.
((0.75 * # alpha for positives.
8) + # positives from 8 anchors.
(0.25 * # alpha for negatives.
8 * 2))), # negatives from 8 anchors for two classes.
focal_loss)
def testExpectedLossWithLossesMask(self):
def graph_fn():
# All zeros correspond to 0.5 probability.
prediction_tensor = tf.constant([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
losses_mask = tf.constant([True, True, False], tf.bool)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.75,
gamma=0.0)
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights,
losses_mask=losses_mask))
return focal_loss
focal_loss = self.execute(graph_fn, [])
self.assertAllClose(
(-math.log(.5) * # x-entropy per class per anchor.
((0.75 * # alpha for positives.
8) + # positives from 8 anchors.
(0.25 * # alpha for negatives.
8 * 2))), # negatives from 8 anchors for two classes.
focal_loss)
class WeightedSoftmaxClassificationLossTest(test_case.TestCase):
def testReturnsCorrectLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[0.5, 0.5, 0.5],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = - 1.5 * math.log(.5)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[0.5, 0.5, 0.5],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLossWithHighLogitScaleSetting(self):
"""At very high logit_scale, all predictions will be ~0.33."""
def graph_fn():
# TODO(yonib): Also test logit_scale with anchorwise=False.
logit_scale = 10e16
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss(
logit_scale=logit_scale)
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
return loss
uniform_distribution_loss = - math.log(.33333333333)
exp_loss = np.matrix([[uniform_distribution_loss] * 4,
[uniform_distribution_loss] * 4])
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithLossesMask(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[0.5, 0.5, 0.5],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
losses_mask = tf.constant([True, True, False], tf.bool)
loss_op = losses.WeightedSoftmaxClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = - 1.5 * math.log(.5)
self.assertAllClose(loss_output, exp_loss)
class WeightedSoftmaxClassificationAgainstLogitsLossTest(test_case.TestCase):
def testReturnsCorrectLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, -100, -100],
[-100, -100, 100]],
[[-100, -100, 100],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
weights = tf.constant([[1, 1, .5, 1],
[1, 1, 1, 1]], tf.float32)
weights_shape = tf.shape(weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), tf.constant([3])],
axis=0)
weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple)
loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = - 1.5 * math.log(.5)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, -100, -100],
[-100, -100, 100]],
[[-100, -100, 100],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
weights = tf.constant([[1, 1, .5, 1],
[1, 1, 1, 0]], tf.float32)
weights_shape = tf.shape(weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), tf.constant([3])],
axis=0)
weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple)
loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLossWithLogitScaleSetting(self):
def graph_fn():
logit_scale = 100.
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
weights = tf.constant([[1, 1, .5, 1],
[1, 1, 1, 0]], tf.float32)
weights_shape = tf.shape(weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), tf.constant([3])],
axis=0)
weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple)
loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=logit_scale)
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
return loss
# find softmax of the two prediction types above
softmax_pred1 = [np.exp(-1), np.exp(-1), np.exp(1)]
softmax_pred1 /= sum(softmax_pred1)
softmax_pred2 = [np.exp(0), np.exp(0), np.exp(-1)]
softmax_pred2 /= sum(softmax_pred2)
# compute the expected cross entropy for perfect matches
exp_entropy1 = sum(
[-x*np.log(x) for x in softmax_pred1])
exp_entropy2 = sum(
[-x*np.log(x) for x in softmax_pred2])
# weighted expected losses
exp_loss = np.matrix(
[[exp_entropy1, exp_entropy1, exp_entropy2*.5, exp_entropy1],
[exp_entropy2, exp_entropy1, exp_entropy1, 0.]])
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
class BootstrappedSigmoidClassificationLossTest(test_case.TestCase):
def testReturnsCorrectLossSoftBootstrapping(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, 0],
[100, -100, -100],
[100, -100, -100],
[-100, -100, 100]],
[[-100, -100, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
alpha = tf.constant(.5, tf.float32)
loss_op = losses.BootstrappedSigmoidClassificationLoss(
alpha, bootstrap_type='soft')
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = -math.log(.5)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossHardBootstrapping(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, 0],
[100, -100, -100],
[100, -100, -100],
[-100, -100, 100]],
[[-100, -100, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
alpha = tf.constant(.5, tf.float32)
loss_op = losses.BootstrappedSigmoidClassificationLoss(
alpha, bootstrap_type='hard')
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = -math.log(.5)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
alpha = tf.constant(.5, tf.float32)
loss_op = losses.BootstrappedSigmoidClassificationLoss(
alpha, bootstrap_type='hard')
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss, axis=2)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = np.matrix([[0, 0, -math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
self.assertAllClose(loss_output, exp_loss)
class HardExampleMinerTest(test_case.TestCase):
def testHardMiningWithSingleLossType(self):
def graph_fn():
location_losses = tf.constant([[100, 90, 80, 0],
[0, 1, 2, 3]], tf.float32)
cls_losses = tf.constant([[0, 10, 50, 110],
[9, 6, 3, 0]], tf.float32)
box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9]], tf.float32)
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
decoded_boxlist_list.append(box_list.BoxList(box_corners))
# Uses only location loss to select hard examples
loss_op = losses.HardExampleMiner(num_hard_examples=1,
iou_threshold=0.0,
loss_type='loc',
cls_loss_weight=1,
loc_loss_weight=1)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list)
return loc_loss, cls_loss
loc_loss_output, cls_loss_output = self.execute(graph_fn, [])
exp_loc_loss = 100 + 3
exp_cls_loss = 0 + 0
self.assertAllClose(loc_loss_output, exp_loc_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testHardMiningWithBothLossType(self):
def graph_fn():
location_losses = tf.constant([[100, 90, 80, 0],
[0, 1, 2, 3]], tf.float32)
cls_losses = tf.constant([[0, 10, 50, 110],
[9, 6, 3, 0]], tf.float32)
box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9]], tf.float32)
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
decoded_boxlist_list.append(box_list.BoxList(box_corners))
loss_op = losses.HardExampleMiner(num_hard_examples=1,
iou_threshold=0.0,
loss_type='both',
cls_loss_weight=1,
loc_loss_weight=1)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list)
return loc_loss, cls_loss
loc_loss_output, cls_loss_output = self.execute(graph_fn, [])
exp_loc_loss = 80 + 0
exp_cls_loss = 50 + 9
self.assertAllClose(loc_loss_output, exp_loc_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testHardMiningNMS(self):
def graph_fn():
location_losses = tf.constant([[100, 90, 80, 0],
[0, 1, 2, 3]], tf.float32)
cls_losses = tf.constant([[0, 10, 50, 110],
[9, 6, 3, 0]], tf.float32)
box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9],
[0.9, 0.9, 0.99, 0.99],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9]], tf.float32)
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
decoded_boxlist_list.append(box_list.BoxList(box_corners))
loss_op = losses.HardExampleMiner(num_hard_examples=2,
iou_threshold=0.5,
loss_type='cls',
cls_loss_weight=1,
loc_loss_weight=1)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list)
return loc_loss, cls_loss
loc_loss_output, cls_loss_output = self.execute(graph_fn, [])
exp_loc_loss = 0 + 90 + 0 + 1
exp_cls_loss = 110 + 10 + 9 + 6
self.assertAllClose(loc_loss_output, exp_loc_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testEnforceNegativesPerPositiveRatio(self):
location_losses = np.array([[100, 90, 80, 0, 1, 2,
3, 10, 20, 100, 20, 3]], np.float32)
cls_losses = np.array([[0, 0, 100, 0, 90, 70,
0, 60, 0, 17, 13, 0]], np.float32)
box_corners = np.array([[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.5, 0.1],
[0.0, 0.0, 0.6, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.8, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 1.0, 0.1],
[0.0, 0.0, 1.1, 0.1],
[0.0, 0.0, 0.2, 0.1]], np.float32)
match_results = np.array([2, -1, 0, -1, -1, 1, -1, -1, -1, -1, -1, 3],
np.int32)
max_negatives_per_positive_list = [0.0, 0.5, 1.0, 1.5, 10]
exp_loc_loss_list = [80 + 2,
80 + 1 + 2,
80 + 1 + 2 + 10,
80 + 1 + 2 + 10 + 100,
80 + 1 + 2 + 10 + 100 + 20]
exp_cls_loss_list = [100 + 70,
100 + 90 + 70,
100 + 90 + 70 + 60,
100 + 90 + 70 + 60 + 17,
100 + 90 + 70 + 60 + 17 + 13]
# pylint: disable=cell-var-from-loop
for max_negatives_per_positive, exp_loc_loss, exp_cls_loss in zip(
max_negatives_per_positive_list, exp_loc_loss_list, exp_cls_loss_list):
def graph_fn():
loss_op = losses.HardExampleMiner(
num_hard_examples=None, iou_threshold=0.9999, loss_type='cls',
cls_loss_weight=1, loc_loss_weight=1,
max_negatives_per_positive=max_negatives_per_positive)
match_list = [matcher.Match(tf.constant(match_results))]
decoded_boxlist_list = [box_list.BoxList(tf.constant(box_corners))]
(loc_loss, cls_loss) = loss_op(tf.constant(location_losses),
tf.constant(cls_losses),
decoded_boxlist_list, match_list)
return loc_loss, cls_loss
loc_loss_output, cls_loss_output = self.execute_cpu(graph_fn, [])
self.assertAllClose(loc_loss_output, exp_loc_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
# pylint: enable=cell-var-from-loop
def testEnforceNegativesPerPositiveRatioWithMinNegativesPerImage(self):
location_losses = np.array([[100, 90, 80, 0, 1, 2,
3, 10, 20, 100, 20, 3]], np.float32)
cls_losses = np.array([[0, 0, 100, 0, 90, 70,
0, 60, 0, 17, 13, 0]], np.float32)
box_corners = np.array([[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.5, 0.1],
[0.0, 0.0, 0.6, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.8, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 1.0, 0.1],
[0.0, 0.0, 1.1, 0.1],
[0.0, 0.0, 0.2, 0.1]], np.float32)
match_results = np.array([-1] * 12, np.int32)
min_negatives_per_image_list = [0, 1, 2, 4, 5, 6]
exp_loc_loss_list = [0,
80,
80 + 1,
80 + 1 + 2 + 10,
80 + 1 + 2 + 10 + 100,
80 + 1 + 2 + 10 + 100 + 20]
exp_cls_loss_list = [0,
100,
100 + 90,
100 + 90 + 70 + 60,
100 + 90 + 70 + 60 + 17,
100 + 90 + 70 + 60 + 17 + 13]
# pylint: disable=cell-var-from-loop
for min_negatives_per_image, exp_loc_loss, exp_cls_loss in zip(
min_negatives_per_image_list, exp_loc_loss_list, exp_cls_loss_list):
def graph_fn():
loss_op = losses.HardExampleMiner(
num_hard_examples=None, iou_threshold=0.9999, loss_type='cls',
cls_loss_weight=1, loc_loss_weight=1,
max_negatives_per_positive=3,
min_negatives_per_image=min_negatives_per_image)
match_list = [matcher.Match(tf.constant(match_results))]
decoded_boxlist_list = [box_list.BoxList(tf.constant(box_corners))]
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list, match_list)
return loc_loss, cls_loss
loc_loss_output, cls_loss_output = self.execute_cpu(graph_fn, [])
self.assertAllClose(loc_loss_output, exp_loc_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
# pylint: enable=cell-var-from-loop
LOG_2 = np.log(2)
LOG_3 = np.log(3)
class PenaltyReducedLogisticFocalLossTest(test_case.TestCase):
"""Testing loss function from Equation (1) in [1].
[1]: https://arxiv.org/abs/1904.07850
"""
def setUp(self):
super(PenaltyReducedLogisticFocalLossTest, self).setUp()
self._prediction = np.array([
# First batch
[[1 / 2, 1 / 4, 3 / 4],
[3 / 4, 1 / 3, 1 / 3]],
# Second Batch
[[0.0, 1.0, 1 / 2],
[3 / 4, 2 / 3, 1 / 3]]], np.float32)
self._prediction = np.log(self._prediction/(1 - self._prediction))
self._target = np.array([
# First batch
[[1.0, 0.91, 1.0],
[0.36, 0.84, 1.0]],
# Second Batch
[[0.01, 1.0, 0.75],
[0.96, 1.0, 1.0]]], np.float32)
def test_returns_correct_loss(self):
def graph_fn(prediction, target):
weights = tf.constant([
[[1.0], [1.0]],
[[1.0], [1.0]],
])
loss = losses.PenaltyReducedLogisticFocalLoss(alpha=2.0, beta=0.5)
computed_value = loss._compute_loss(prediction, target,
weights)
return computed_value
computed_value = self.execute(graph_fn, [self._prediction, self._target])
expected_value = np.array([
# First batch
[[1 / 4 * LOG_2,
0.3 * 0.0625 * (2 * LOG_2 - LOG_3),
1 / 16 * (2 * LOG_2 - LOG_3)],
[0.8 * 9 / 16 * 2 * LOG_2,
0.4 * 1 / 9 * (LOG_3 - LOG_2),
4 / 9 * LOG_3]],
# Second Batch
[[0.0,
0.0,
1 / 2 * 1 / 4 * LOG_2],
[0.2 * 9 / 16 * 2 * LOG_2,
1 / 9 * (LOG_3 - LOG_2),
4 / 9 * LOG_3]]])
self.assertAllClose(computed_value, expected_value, rtol=1e-3, atol=1e-3)
def test_returns_correct_loss_weighted(self):
def graph_fn(prediction, target):
weights = tf.constant([
[[1.0, 0.0, 1.0], [0.0, 0.0, 1.0]],
[[1.0, 1.0, 1.0], [0.0, 0.0, 0.0]],
])
loss = losses.PenaltyReducedLogisticFocalLoss(alpha=2.0, beta=0.5)
computed_value = loss._compute_loss(prediction, target,
weights)
return computed_value
computed_value = self.execute(graph_fn, [self._prediction, self._target])
expected_value = np.array([
# First batch
[[1 / 4 * LOG_2,
0.0,
1 / 16 * (2 * LOG_2 - LOG_3)],
[0.0,
0.0,
4 / 9 * LOG_3]],
# Second Batch
[[0.0,
0.0,
1 / 2 * 1 / 4 * LOG_2],
[0.0,
0.0,
0.0]]])
self.assertAllClose(computed_value, expected_value, rtol=1e-3, atol=1e-3)
class L1LocalizationLossTest(test_case.TestCase):
def test_returns_correct_loss(self):
def graph_fn():
loss = losses.L1LocalizationLoss()
pred = [[0.1, 0.2], [0.7, 0.5]]
target = [[0.9, 1.0], [0.1, 0.4]]
weights = [[1.0, 0.0], [1.0, 1.0]]
return loss._compute_loss(pred, target, weights)
computed_value = self.execute(graph_fn, [])
self.assertAllClose(computed_value, [[0.8, 0.0], [0.6, 0.1]], rtol=1e-6)
class WeightedDiceClassificationLoss(test_case.TestCase):
def test_compute_weights_1(self):
def graph_fn():
loss = losses.WeightedDiceClassificationLoss(squared_normalization=False)
pred = np.zeros((2, 3, 4), dtype=np.float32)
target = np.zeros((2, 3, 4), dtype=np.float32)
pred[0, 1, 0] = _logit(0.9)
pred[0, 2, 0] = _logit(0.1)
pred[0, 2, 2] = _logit(0.5)
pred[0, 1, 3] = _logit(0.1)
pred[1, 2, 3] = _logit(0.2)
pred[1, 1, 1] = _logit(0.3)
pred[1, 0, 2] = _logit(0.1)
target[0, 1, 0] = 1.0
target[0, 2, 2] = 1.0
target[0, 1, 3] = 1.0
target[1, 2, 3] = 1.0
target[1, 1, 1] = 0.0
target[1, 0, 2] = 0.0
weights = np.ones_like(target)
return loss._compute_loss(pred, target, weights)
dice_coeff = np.zeros((2, 4))
dice_coeff[0, 0] = 2 * 0.9 / 2.5
dice_coeff[0, 2] = 2 * 0.5 / 2.5
dice_coeff[0, 3] = 2 * 0.1 / 2.1
dice_coeff[1, 3] = 2 * 0.2 / 2.2
computed_value = self.execute(graph_fn, [])
self.assertAllClose(computed_value, 1 - dice_coeff, rtol=1e-6)
def test_compute_weights_set(self):
def graph_fn():
loss = losses.WeightedDiceClassificationLoss(squared_normalization=False)
pred = np.zeros((2, 3, 4), dtype=np.float32)
target = np.zeros((2, 3, 4), dtype=np.float32)
pred[0, 1, 0] = _logit(0.9)
pred[0, 2, 0] = _logit(0.1)
pred[0, 2, 2] = _logit(0.5)
pred[0, 1, 3] = _logit(0.1)
pred[1, 2, 3] = _logit(0.2)
pred[1, 1, 1] = _logit(0.3)
pred[1, 0, 2] = _logit(0.1)
target[0, 1, 0] = 1.0
target[0, 2, 2] = 1.0
target[0, 1, 3] = 1.0
target[1, 2, 3] = 1.0
target[1, 1, 1] = 0.0
target[1, 0, 2] = 0.0
weights = np.ones_like(target)
weights[:, :, 0] = 0.0
return loss._compute_loss(pred, target, weights)
dice_coeff = np.zeros((2, 4))
dice_coeff[0, 2] = 2 * 0.5 / 2.5
dice_coeff[0, 3] = 2 * 0.1 / 2.1
dice_coeff[1, 3] = 2 * 0.2 / 2.2
computed_value = self.execute(graph_fn, [])
self.assertAllClose(computed_value, 1 - dice_coeff, rtol=1e-6)
def test_class_indices(self):
def graph_fn():
loss = losses.WeightedDiceClassificationLoss(squared_normalization=False)
pred = np.zeros((2, 3, 4), dtype=np.float32)
target = np.zeros((2, 3, 4), dtype=np.float32)
pred[0, 1, 0] = _logit(0.9)
pred[0, 2, 0] = _logit(0.1)
pred[0, 2, 2] = _logit(0.5)
pred[0, 1, 3] = _logit(0.1)
pred[1, 2, 3] = _logit(0.2)
pred[1, 1, 1] = _logit(0.3)
pred[1, 0, 2] = _logit(0.1)
target[0, 1, 0] = 1.0
target[0, 2, 2] = 1.0
target[0, 1, 3] = 1.0
target[1, 2, 3] = 1.0
target[1, 1, 1] = 0.0
target[1, 0, 2] = 0.0
weights = np.ones_like(target)
return loss._compute_loss(pred, target, weights, class_indices=[0])
dice_coeff = np.zeros((2, 4))
dice_coeff[0, 0] = 2 * 0.9 / 2.5
computed_value = self.execute(graph_fn, [])
self.assertAllClose(computed_value, 1 - dice_coeff, rtol=1e-6)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/losses_test.py | losses_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base anchor generator.
The job of the anchor generator is to create (or load) a collection
of bounding boxes to be used as anchors.
Generated anchors are assumed to match some convolutional grid or list of grid
shapes. For example, we might want to generate anchors matching an 8x8
feature map and a 4x4 feature map. If we place 3 anchors per grid location
on the first feature map and 6 anchors per grid location on the second feature
map, then 3*8*8 + 6*4*4 = 288 anchors are generated in total.
To support fully convolutional settings, feature map shapes are passed
dynamically at generation time. The number of anchors to place at each location
is static --- implementations of AnchorGenerator must always be able return
the number of anchors that it uses per location for each feature map.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import six
from six.moves import zip
import tensorflow.compat.v1 as tf
class AnchorGenerator(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for anchor generators."""
@abstractmethod
def name_scope(self):
"""Name scope.
Must be defined by implementations.
Returns:
a string representing the name scope of the anchor generation operation.
"""
pass
@property
def check_num_anchors(self):
"""Whether to dynamically check the number of anchors generated.
Can be overridden by implementations that would like to disable this
behavior.
Returns:
a boolean controlling whether the Generate function should dynamically
check the number of anchors generated against the mathematically
expected number of anchors.
"""
return True
@abstractmethod
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the `generate` function.
"""
pass
def generate(self, feature_map_shape_list, **params):
"""Generates a collection of bounding boxes to be used as anchors.
TODO(rathodv): remove **params from argument list and make stride and
offsets (for multiple_grid_anchor_generator) constructor arguments.
Args:
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with. Pairs can be provided as 1-dimensional
integer tensors of length 2 or simply as tuples of integers.
**params: parameters for anchor generation op
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if the number of feature map shapes does not match the length
of NumAnchorsPerLocation.
"""
if self.check_num_anchors and (
len(feature_map_shape_list) != len(self.num_anchors_per_location())):
raise ValueError('Number of feature maps is expected to equal the length '
'of `num_anchors_per_location`.')
with tf.name_scope(self.name_scope()):
anchors_list = self._generate(feature_map_shape_list, **params)
if self.check_num_anchors:
with tf.control_dependencies([
self._assert_correct_number_of_anchors(
anchors_list, feature_map_shape_list)]):
for item in anchors_list:
item.set(tf.identity(item.get()))
return anchors_list
@abstractmethod
def _generate(self, feature_map_shape_list, **params):
"""To be overridden by implementations.
Args:
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with.
**params: parameters for anchor generation op
Returns:
boxes_list: a list of BoxList, each holding a collection of N anchor
boxes.
"""
pass
def anchor_index_to_feature_map_index(self, boxlist_list):
"""Returns a 1-D array of feature map indices for each anchor.
Args:
boxlist_list: a list of Boxlist, each holding a collection of N anchor
boxes. This list is produced in self.generate().
Returns:
A [num_anchors] integer array, where each element indicates which feature
map index the anchor belongs to.
"""
feature_map_indices_list = []
for i, boxes in enumerate(boxlist_list):
feature_map_indices_list.append(
i * tf.ones([boxes.num_boxes()], dtype=tf.int32))
return tf.concat(feature_map_indices_list, axis=0)
def _assert_correct_number_of_anchors(self, anchors_list,
feature_map_shape_list):
"""Assert that correct number of anchors was generated.
Args:
anchors_list: A list of box_list.BoxList object holding anchors generated.
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with.
Returns:
Op that raises InvalidArgumentError if the number of anchors does not
match the number of expected anchors.
"""
expected_num_anchors = 0
actual_num_anchors = 0
for num_anchors_per_location, feature_map_shape, anchors in zip(
self.num_anchors_per_location(), feature_map_shape_list, anchors_list):
expected_num_anchors += (num_anchors_per_location
* feature_map_shape[0]
* feature_map_shape[1])
actual_num_anchors += anchors.num_boxes()
return tf.assert_equal(expected_num_anchors, actual_num_anchors)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/anchor_generator.py | anchor_generator.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.matcher."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import matcher
from object_detection.utils import test_case
class MatchTest(test_case.TestCase):
def test_get_correct_matched_columnIndices(self):
def graph_fn():
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
matched_column_indices = match.matched_column_indices()
return matched_column_indices
expected_column_indices = [0, 1, 3, 5]
matched_column_indices = self.execute(graph_fn, [])
self.assertAllEqual(matched_column_indices, expected_column_indices)
def test_get_correct_counts(self):
def graph_fn():
match_results = tf.constant([3, 1, -1, 0, -1, 1, -2])
match = matcher.Match(match_results)
num_matched_columns = match.num_matched_columns()
num_unmatched_columns = match.num_unmatched_columns()
num_ignored_columns = match.num_ignored_columns()
num_matched_rows = match.num_matched_rows()
return [num_matched_columns, num_unmatched_columns, num_ignored_columns,
num_matched_rows]
(num_matched_columns_out, num_unmatched_columns_out,
num_ignored_columns_out,
num_matched_rows_out) = self.execute_cpu(graph_fn, [])
exp_num_matched_columns = 4
exp_num_unmatched_columns = 2
exp_num_ignored_columns = 1
exp_num_matched_rows = 3
self.assertAllEqual(num_matched_columns_out, exp_num_matched_columns)
self.assertAllEqual(num_unmatched_columns_out, exp_num_unmatched_columns)
self.assertAllEqual(num_ignored_columns_out, exp_num_ignored_columns)
self.assertAllEqual(num_matched_rows_out, exp_num_matched_rows)
def testGetCorrectUnmatchedColumnIndices(self):
def graph_fn():
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
unmatched_column_indices = match.unmatched_column_indices()
return unmatched_column_indices
unmatched_column_indices = self.execute(graph_fn, [])
expected_column_indices = [2, 4]
self.assertAllEqual(unmatched_column_indices, expected_column_indices)
def testGetCorrectMatchedRowIndices(self):
def graph_fn():
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
matched_row_indices = match.matched_row_indices()
return matched_row_indices
matched_row_indices = self.execute(graph_fn, [])
expected_row_indices = [3, 1, 0, 5]
self.assertAllEqual(matched_row_indices, expected_row_indices)
def test_get_correct_ignored_column_indices(self):
def graph_fn():
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
ignored_column_indices = match.ignored_column_indices()
return ignored_column_indices
ignored_column_indices = self.execute(graph_fn, [])
expected_column_indices = [6]
self.assertAllEqual(ignored_column_indices, expected_column_indices)
def test_get_correct_matched_column_indicator(self):
def graph_fn():
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
matched_column_indicator = match.matched_column_indicator()
return matched_column_indicator
expected_column_indicator = [True, True, False, True, False, True, False]
matched_column_indicator = self.execute(graph_fn, [])
self.assertAllEqual(matched_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_column_indicator(self):
def graph_fn():
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
unmatched_column_indicator = match.unmatched_column_indicator()
return unmatched_column_indicator
expected_column_indicator = [False, False, True, False, True, False, False]
unmatched_column_indicator = self.execute(graph_fn, [])
self.assertAllEqual(unmatched_column_indicator, expected_column_indicator)
def test_get_correct_ignored_column_indicator(self):
def graph_fn():
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
ignored_column_indicator = match.ignored_column_indicator()
return ignored_column_indicator
expected_column_indicator = [False, False, False, False, False, False, True]
ignored_column_indicator = self.execute(graph_fn, [])
self.assertAllEqual(ignored_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_ignored_column_indices(self):
def graph_fn():
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
unmatched_ignored_column_indices = (match.
unmatched_or_ignored_column_indices())
return unmatched_ignored_column_indices
expected_column_indices = [2, 4, 6]
unmatched_ignored_column_indices = self.execute(graph_fn, [])
self.assertAllEqual(unmatched_ignored_column_indices,
expected_column_indices)
def test_all_columns_accounted_for(self):
# Note: deliberately setting to small number so not always
# all possibilities appear (matched, unmatched, ignored)
def graph_fn():
match_results = tf.random_uniform(
[num_matches], minval=-2, maxval=5, dtype=tf.int32)
match = matcher.Match(match_results)
matched_column_indices = match.matched_column_indices()
unmatched_column_indices = match.unmatched_column_indices()
ignored_column_indices = match.ignored_column_indices()
return (matched_column_indices, unmatched_column_indices,
ignored_column_indices)
num_matches = 10
matched, unmatched, ignored = self.execute(graph_fn, [])
all_indices = np.hstack((matched, unmatched, ignored))
all_indices_sorted = np.sort(all_indices)
self.assertAllEqual(all_indices_sorted,
np.arange(num_matches, dtype=np.int32))
def test_scalar_gather_based_on_match(self):
def graph_fn():
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
input_tensor = tf.constant([0, 1, 2, 3, 4, 5, 6, 7], dtype=tf.float32)
match = matcher.Match(match_results)
gathered_tensor = match.gather_based_on_match(input_tensor,
unmatched_value=100.,
ignored_value=200.)
return gathered_tensor
expected_gathered_tensor = [3, 1, 100, 0, 100, 5, 200]
gathered_tensor_out = self.execute(graph_fn, [])
self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out)
def test_multidimensional_gather_based_on_match(self):
def graph_fn():
match_results = tf.constant([1, -1, -2])
input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]],
dtype=tf.float32)
match = matcher.Match(match_results)
gathered_tensor = match.gather_based_on_match(input_tensor,
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
return gathered_tensor
expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]]
gathered_tensor_out = self.execute(graph_fn, [])
self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out)
def test_multidimensional_gather_based_on_match_with_matmul_gather_op(self):
def graph_fn():
match_results = tf.constant([1, -1, -2])
input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]],
dtype=tf.float32)
match = matcher.Match(match_results, use_matmul_gather=True)
gathered_tensor = match.gather_based_on_match(input_tensor,
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
return gathered_tensor
expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]]
gathered_tensor_out = self.execute(graph_fn, [])
self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/matcher_test.py | matcher_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.box_list_ops."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.utils import test_case
class BoxListOpsTest(test_case.TestCase):
"""Tests for common bounding box operations."""
def test_area(self):
def graph_fn():
corners = tf.constant([[0.0, 0.0, 10.0, 20.0], [1.0, 2.0, 3.0, 4.0]])
boxes = box_list.BoxList(corners)
areas = box_list_ops.area(boxes)
return areas
areas_out = self.execute(graph_fn, [])
exp_output = [200.0, 4.0]
self.assertAllClose(areas_out, exp_output)
def test_height_width(self):
def graph_fn():
corners = tf.constant([[0.0, 0.0, 10.0, 20.0], [1.0, 2.0, 3.0, 4.0]])
boxes = box_list.BoxList(corners)
return box_list_ops.height_width(boxes)
heights_out, widths_out = self.execute(graph_fn, [])
exp_output_heights = [10., 2.]
exp_output_widths = [20., 2.]
self.assertAllClose(heights_out, exp_output_heights)
self.assertAllClose(widths_out, exp_output_widths)
def test_scale(self):
def graph_fn():
corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]],
dtype=tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.constant([[1], [2]]))
y_scale = tf.constant(1.0/100)
x_scale = tf.constant(1.0/200)
scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale)
return scaled_boxes.get(), scaled_boxes.get_field('extra_data')
scaled_corners_out, extra_data_out = self.execute(graph_fn, [])
exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]]
self.assertAllClose(scaled_corners_out, exp_output)
self.assertAllEqual(extra_data_out, [[1], [2]])
def test_scale_height_width(self):
def graph_fn():
corners = tf.constant([[-10, -20, 10, 20], [0, 100, 100, 200]],
dtype=tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.constant([[1], [2]]))
y_scale = tf.constant(2.)
x_scale = tf.constant(0.5)
scaled_boxes = box_list_ops.scale_height_width(boxes, y_scale, x_scale)
return scaled_boxes.get(), scaled_boxes.get_field('extra_data')
exp_output = [
[-20., -10, 20., 10],
[-50., 125, 150., 175.]]
scaled_corners_out, extra_data_out = self.execute(graph_fn, [])
self.assertAllClose(scaled_corners_out, exp_output)
self.assertAllEqual(extra_data_out, [[1], [2]])
def test_clip_to_window_filter_boxes_which_fall_outside_the_window(
self):
def graph_fn():
window = tf.constant([0, 0, 9, 14], tf.float32)
corners = tf.constant([[5.0, 5.0, 6.0, 6.0],
[-1.0, -2.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0],
[0.0, 0.0, 9.0, 14.0],
[-100.0, -100.0, 300.0, 600.0],
[-10.0, -10.0, -9.0, -9.0]])
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]]))
pruned = box_list_ops.clip_to_window(
boxes, window, filter_nonoverlapping=True)
return pruned.get(), pruned.get_field('extra_data')
exp_output = [[5.0, 5.0, 6.0, 6.0], [0.0, 0.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0],
[0.0, 0.0, 9.0, 14.0]]
pruned_output, extra_data_out = self.execute_cpu(graph_fn, [])
self.assertAllClose(pruned_output, exp_output)
self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [5]])
def test_clip_to_window_without_filtering_boxes_which_fall_outside_the_window(
self):
def graph_fn():
window = tf.constant([0, 0, 9, 14], tf.float32)
corners = tf.constant([[5.0, 5.0, 6.0, 6.0],
[-1.0, -2.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0],
[0.0, 0.0, 9.0, 14.0],
[-100.0, -100.0, 300.0, 600.0],
[-10.0, -10.0, -9.0, -9.0]])
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]]))
pruned = box_list_ops.clip_to_window(
boxes, window, filter_nonoverlapping=False)
return pruned.get(), pruned.get_field('extra_data')
pruned_output, extra_data_out = self.execute(graph_fn, [])
exp_output = [[5.0, 5.0, 6.0, 6.0], [0.0, 0.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0],
[0.0, 0.0, 9.0, 14.0], [0.0, 0.0, 0.0, 0.0]]
self.assertAllClose(pruned_output, exp_output)
self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [5], [6]])
def test_prune_outside_window_filters_boxes_which_fall_outside_the_window(
self):
def graph_fn():
window = tf.constant([0, 0, 9, 14], tf.float32)
corners = tf.constant([[5.0, 5.0, 6.0, 6.0],
[-1.0, -2.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0],
[0.0, 0.0, 9.0, 14.0],
[-10.0, -10.0, -9.0, -9.0],
[-100.0, -100.0, 300.0, 600.0]])
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]]))
pruned, keep_indices = box_list_ops.prune_outside_window(boxes, window)
return pruned.get(), pruned.get_field('extra_data'), keep_indices
pruned_output, extra_data_out, keep_indices_out = self.execute_cpu(graph_fn,
[])
exp_output = [[5.0, 5.0, 6.0, 6.0],
[2.0, 3.0, 5.0, 9.0],
[0.0, 0.0, 9.0, 14.0]]
self.assertAllClose(pruned_output, exp_output)
self.assertAllEqual(keep_indices_out, [0, 2, 3])
self.assertAllEqual(extra_data_out, [[1], [3], [4]])
def test_prune_completely_outside_window(self):
def graph_fn():
window = tf.constant([0, 0, 9, 14], tf.float32)
corners = tf.constant([[5.0, 5.0, 6.0, 6.0],
[-1.0, -2.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0],
[0.0, 0.0, 9.0, 14.0],
[-10.0, -10.0, -9.0, -9.0],
[-100.0, -100.0, 300.0, 600.0]])
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]]))
pruned, keep_indices = box_list_ops.prune_completely_outside_window(
boxes, window)
return pruned.get(), pruned.get_field('extra_data'), keep_indices
pruned_output, extra_data_out, keep_indices_out = self.execute(graph_fn, [])
exp_output = [[5.0, 5.0, 6.0, 6.0],
[-1.0, -2.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0],
[0.0, 0.0, 9.0, 14.0],
[-100.0, -100.0, 300.0, 600.0]]
self.assertAllClose(pruned_output, exp_output)
self.assertAllEqual(keep_indices_out, [0, 1, 2, 3, 5])
self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [6]])
def test_prune_completely_outside_window_with_empty_boxlist(self):
def graph_fn():
window = tf.constant([0, 0, 9, 14], tf.float32)
corners = tf.zeros(shape=[0, 4], dtype=tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.zeros(shape=[0], dtype=tf.int32))
pruned, keep_indices = box_list_ops.prune_completely_outside_window(
boxes, window)
pruned_boxes = pruned.get()
extra = pruned.get_field('extra_data')
return pruned_boxes, extra, keep_indices
pruned_boxes_out, extra_out, keep_indices_out = self.execute(graph_fn, [])
exp_pruned_boxes = np.zeros(shape=[0, 4], dtype=np.float32)
exp_extra = np.zeros(shape=[0], dtype=np.int32)
self.assertAllClose(exp_pruned_boxes, pruned_boxes_out)
self.assertAllEqual([], keep_indices_out)
self.assertAllEqual(exp_extra, extra_out)
def test_intersection(self):
def graph_fn():
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
intersect = box_list_ops.intersection(boxes1, boxes2)
return intersect
exp_output = [[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]]
intersect_out = self.execute(graph_fn, [])
self.assertAllClose(intersect_out, exp_output)
def test_matched_intersection(self):
def graph_fn():
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
intersect = box_list_ops.matched_intersection(boxes1, boxes2)
return intersect
exp_output = [2.0, 0.0]
intersect_out = self.execute(graph_fn, [])
self.assertAllClose(intersect_out, exp_output)
def test_iou(self):
def graph_fn():
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
iou = box_list_ops.iou(boxes1, boxes2)
return iou
exp_output = [[2.0 / 16.0, 0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]]
iou_output = self.execute(graph_fn, [])
self.assertAllClose(iou_output, exp_output)
def test_l1(self):
def graph_fn():
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
l1 = box_list_ops.l1(boxes1, boxes2)
return l1
exp_output = [[5.0, 22.5, 45.5], [8.5, 19.0, 40.0]]
l1_output = self.execute(graph_fn, [])
self.assertAllClose(l1_output, exp_output)
def test_giou(self):
def graph_fn():
corners1 = tf.constant([[5.0, 7.0, 7.0, 9.0]])
corners2 = tf.constant([[5.0, 7.0, 7.0, 9.0], [5.0, 11.0, 7.0, 13.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
giou = box_list_ops.giou(boxes1, boxes2)
return giou
exp_output = [[1.0, -1.0 / 3.0]]
giou_output = self.execute(graph_fn, [])
self.assertAllClose(giou_output, exp_output)
def test_matched_iou(self):
def graph_fn():
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
iou = box_list_ops.matched_iou(boxes1, boxes2)
return iou
exp_output = [2.0 / 16.0, 0]
iou_output = self.execute(graph_fn, [])
self.assertAllClose(iou_output, exp_output)
def test_iouworks_on_empty_inputs(self):
def graph_fn():
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
boxes_empty = box_list.BoxList(tf.zeros((0, 4)))
iou_empty_1 = box_list_ops.iou(boxes1, boxes_empty)
iou_empty_2 = box_list_ops.iou(boxes_empty, boxes2)
iou_empty_3 = box_list_ops.iou(boxes_empty, boxes_empty)
return iou_empty_1, iou_empty_2, iou_empty_3
iou_output_1, iou_output_2, iou_output_3 = self.execute(graph_fn, [])
self.assertAllEqual(iou_output_1.shape, (2, 0))
self.assertAllEqual(iou_output_2.shape, (0, 3))
self.assertAllEqual(iou_output_3.shape, (0, 0))
def test_ioa(self):
def graph_fn():
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
ioa_1 = box_list_ops.ioa(boxes1, boxes2)
ioa_2 = box_list_ops.ioa(boxes2, boxes1)
return ioa_1, ioa_2
exp_output_1 = [[2.0 / 12.0, 0, 6.0 / 400.0],
[1.0 / 12.0, 0.0, 5.0 / 400.0]]
exp_output_2 = [[2.0 / 6.0, 1.0 / 5.0],
[0, 0],
[6.0 / 6.0, 5.0 / 5.0]]
ioa_output_1, ioa_output_2 = self.execute(graph_fn, [])
self.assertAllClose(ioa_output_1, exp_output_1)
self.assertAllClose(ioa_output_2, exp_output_2)
def test_prune_non_overlapping_boxes(self):
def graph_fn():
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
minoverlap = 0.5
exp_output_1 = boxes1
exp_output_2 = box_list.BoxList(tf.constant(0.0, shape=[0, 4]))
output_1, keep_indices_1 = box_list_ops.prune_non_overlapping_boxes(
boxes1, boxes2, min_overlap=minoverlap)
output_2, keep_indices_2 = box_list_ops.prune_non_overlapping_boxes(
boxes2, boxes1, min_overlap=minoverlap)
return (output_1.get(), keep_indices_1, output_2.get(), keep_indices_2,
exp_output_1.get(), exp_output_2.get())
(output_1_, keep_indices_1_, output_2_, keep_indices_2_, exp_output_1_,
exp_output_2_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(output_1_, exp_output_1_)
self.assertAllClose(output_2_, exp_output_2_)
self.assertAllEqual(keep_indices_1_, [0, 1])
self.assertAllEqual(keep_indices_2_, [])
def test_prune_small_boxes(self):
def graph_fn():
boxes = tf.constant([[4.0, 3.0, 7.0, 5.0],
[5.0, 6.0, 10.0, 7.0],
[3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes = box_list.BoxList(boxes)
pruned_boxes = box_list_ops.prune_small_boxes(boxes, 3)
return pruned_boxes.get()
exp_boxes = [[3.0, 4.0, 6.0, 8.0],
[0.0, 0.0, 20.0, 20.0]]
pruned_boxes = self.execute(graph_fn, [])
self.assertAllEqual(pruned_boxes, exp_boxes)
def test_prune_small_boxes_prunes_boxes_with_negative_side(self):
def graph_fn():
boxes = tf.constant([[4.0, 3.0, 7.0, 5.0],
[5.0, 6.0, 10.0, 7.0],
[3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0],
[2.0, 3.0, 1.5, 7.0], # negative height
[2.0, 3.0, 5.0, 1.7]]) # negative width
boxes = box_list.BoxList(boxes)
pruned_boxes = box_list_ops.prune_small_boxes(boxes, 3)
return pruned_boxes.get()
exp_boxes = [[3.0, 4.0, 6.0, 8.0],
[0.0, 0.0, 20.0, 20.0]]
pruned_boxes = self.execute_cpu(graph_fn, [])
self.assertAllEqual(pruned_boxes, exp_boxes)
def test_change_coordinate_frame(self):
def graph_fn():
corners = tf.constant([[0.25, 0.5, 0.75, 0.75], [0.5, 0.0, 1.0, 1.0]])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
boxes = box_list.BoxList(corners)
expected_corners = tf.constant([[0, 0.5, 1.0, 1.0],
[0.5, -0.5, 1.5, 1.5]])
expected_boxes = box_list.BoxList(expected_corners)
output = box_list_ops.change_coordinate_frame(boxes, window)
return output.get(), expected_boxes.get()
output_, expected_boxes_ = self.execute(graph_fn, [])
self.assertAllClose(output_, expected_boxes_)
def test_ioaworks_on_empty_inputs(self):
def graph_fn():
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
boxes_empty = box_list.BoxList(tf.zeros((0, 4)))
ioa_empty_1 = box_list_ops.ioa(boxes1, boxes_empty)
ioa_empty_2 = box_list_ops.ioa(boxes_empty, boxes2)
ioa_empty_3 = box_list_ops.ioa(boxes_empty, boxes_empty)
return ioa_empty_1, ioa_empty_2, ioa_empty_3
ioa_output_1, ioa_output_2, ioa_output_3 = self.execute(graph_fn, [])
self.assertAllEqual(ioa_output_1.shape, (2, 0))
self.assertAllEqual(ioa_output_2.shape, (0, 3))
self.assertAllEqual(ioa_output_3.shape, (0, 0))
def test_pairwise_distances(self):
def graph_fn():
corners1 = tf.constant([[0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 2.0]])
corners2 = tf.constant([[3.0, 4.0, 1.0, 0.0],
[-4.0, 0.0, 0.0, 3.0],
[0.0, 0.0, 0.0, 0.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
dist_matrix = box_list_ops.sq_dist(boxes1, boxes2)
return dist_matrix
exp_output = [[26, 25, 0], [18, 27, 6]]
dist_output = self.execute(graph_fn, [])
self.assertAllClose(dist_output, exp_output)
def test_boolean_mask(self):
def graph_fn():
corners = tf.constant(
[4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]])
indicator = tf.constant([True, False, True, False, True], tf.bool)
boxes = box_list.BoxList(corners)
subset = box_list_ops.boolean_mask(boxes, indicator)
return subset.get()
expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]]
subset_output = self.execute_cpu(graph_fn, [])
self.assertAllClose(subset_output, expected_subset)
def test_static_boolean_mask_with_field(self):
def graph_fn(corners, weights, indicator):
boxes = box_list.BoxList(corners)
boxes.add_field('weights', weights)
subset = box_list_ops.boolean_mask(
boxes,
indicator, ['weights'],
use_static_shapes=True,
indicator_sum=3)
return (subset.get_field('boxes'), subset.get_field('weights'))
corners = np.array(
[4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]],
dtype=np.float32)
indicator = np.array([True, False, True, False, True], dtype=np.bool)
weights = np.array([[.1], [.3], [.5], [.7], [.9]], dtype=np.float32)
result_boxes, result_weights = self.execute_cpu(
graph_fn, [corners, weights, indicator])
expected_boxes = [4 * [0.0], 4 * [2.0], 4 * [4.0]]
expected_weights = [[.1], [.5], [.9]]
self.assertAllClose(result_boxes, expected_boxes)
self.assertAllClose(result_weights, expected_weights)
def test_gather(self):
def graph_fn():
corners = tf.constant(
[4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]])
indices = tf.constant([0, 2, 4], tf.int32)
boxes = box_list.BoxList(corners)
subset = box_list_ops.gather(boxes, indices)
return subset.get()
expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]]
subset_output = self.execute(graph_fn, [])
self.assertAllClose(subset_output, expected_subset)
def test_static_gather_with_field(self):
def graph_fn(corners, weights, indices):
boxes = box_list.BoxList(corners)
boxes.add_field('weights', weights)
subset = box_list_ops.gather(
boxes, indices, ['weights'], use_static_shapes=True)
return (subset.get_field('boxes'), subset.get_field('weights'))
corners = np.array([4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0],
4 * [4.0]], dtype=np.float32)
weights = np.array([[.1], [.3], [.5], [.7], [.9]], dtype=np.float32)
indices = np.array([0, 2, 4], dtype=np.int32)
result_boxes, result_weights = self.execute(graph_fn,
[corners, weights, indices])
expected_boxes = [4 * [0.0], 4 * [2.0], 4 * [4.0]]
expected_weights = [[.1], [.5], [.9]]
self.assertAllClose(result_boxes, expected_boxes)
self.assertAllClose(result_weights, expected_weights)
def test_gather_with_invalid_field(self):
corners = tf.constant([4 * [0.0], 4 * [1.0]])
indices = tf.constant([0, 1], tf.int32)
weights = tf.constant([[.1], [.3]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('weights', weights)
with self.assertRaises(ValueError):
box_list_ops.gather(boxes, indices, ['foo', 'bar'])
def test_gather_with_invalid_inputs(self):
corners = tf.constant(
[4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]])
indices_float32 = tf.constant([0, 2, 4], tf.float32)
boxes = box_list.BoxList(corners)
with self.assertRaises(ValueError):
_ = box_list_ops.gather(boxes, indices_float32)
indices_2d = tf.constant([[0, 2, 4]], tf.int32)
boxes = box_list.BoxList(corners)
with self.assertRaises(ValueError):
_ = box_list_ops.gather(boxes, indices_2d)
def test_gather_with_dynamic_indexing(self):
def graph_fn():
corners = tf.constant(
[4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]])
weights = tf.constant([.5, .3, .7, .1, .9], tf.float32)
indices = tf.reshape(tf.where(tf.greater(weights, 0.4)), [-1])
boxes = box_list.BoxList(corners)
boxes.add_field('weights', weights)
subset = box_list_ops.gather(boxes, indices, ['weights'])
return subset.get(), subset.get_field('weights')
expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]]
expected_weights = [.5, .7, .9]
subset_output, weights_output = self.execute(graph_fn, [])
self.assertAllClose(subset_output, expected_subset)
self.assertAllClose(weights_output, expected_weights)
def test_sort_by_field_ascending_order(self):
exp_corners = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
exp_scores = [.95, .9, .75, .6, .5, .3]
exp_weights = [.2, .45, .6, .75, .8, .92]
def graph_fn():
shuffle = [2, 4, 0, 5, 1, 3]
corners = tf.constant([exp_corners[i] for i in shuffle], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant(
[exp_scores[i] for i in shuffle], tf.float32))
boxes.add_field('weights', tf.constant(
[exp_weights[i] for i in shuffle], tf.float32))
sort_by_weight = box_list_ops.sort_by_field(
boxes,
'weights',
order=box_list_ops.SortOrder.ascend)
return [sort_by_weight.get(), sort_by_weight.get_field('scores'),
sort_by_weight.get_field('weights')]
corners_out, scores_out, weights_out = self.execute(graph_fn, [])
self.assertAllClose(corners_out, exp_corners)
self.assertAllClose(scores_out, exp_scores)
self.assertAllClose(weights_out, exp_weights)
def test_sort_by_field_descending_order(self):
exp_corners = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
exp_scores = [.95, .9, .75, .6, .5, .3]
exp_weights = [.2, .45, .6, .75, .8, .92]
def graph_fn():
shuffle = [2, 4, 0, 5, 1, 3]
corners = tf.constant([exp_corners[i] for i in shuffle], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant(
[exp_scores[i] for i in shuffle], tf.float32))
boxes.add_field('weights', tf.constant(
[exp_weights[i] for i in shuffle], tf.float32))
sort_by_score = box_list_ops.sort_by_field(boxes, 'scores')
return (sort_by_score.get(), sort_by_score.get_field('scores'),
sort_by_score.get_field('weights'))
corners_out, scores_out, weights_out = self.execute(graph_fn, [])
self.assertAllClose(corners_out, exp_corners)
self.assertAllClose(scores_out, exp_scores)
self.assertAllClose(weights_out, exp_weights)
def test_sort_by_field_invalid_inputs(self):
corners = tf.constant([4 * [0.0], 4 * [0.5], 4 * [1.0], 4 * [2.0], 4 *
[3.0], 4 * [4.0]])
misc = tf.constant([[.95, .9], [.5, .3]], tf.float32)
weights = tf.constant([[.1, .2]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('misc', misc)
boxes.add_field('weights', weights)
with self.assertRaises(ValueError):
box_list_ops.sort_by_field(boxes, 'area')
with self.assertRaises(ValueError):
box_list_ops.sort_by_field(boxes, 'misc')
with self.assertRaises(ValueError):
box_list_ops.sort_by_field(boxes, 'weights')
def test_visualize_boxes_in_image(self):
def graph_fn():
image = tf.zeros((6, 4, 3))
corners = tf.constant([[0, 0, 5, 3],
[0, 0, 3, 2]], tf.float32)
boxes = box_list.BoxList(corners)
image_and_boxes = box_list_ops.visualize_boxes_in_image(image, boxes)
image_and_boxes_bw = tf.cast(
tf.greater(tf.reduce_sum(image_and_boxes, 2), 0.0), dtype=tf.float32)
return image_and_boxes_bw
exp_result = [[1, 1, 1, 0],
[1, 1, 1, 0],
[1, 1, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, 0],
[0, 0, 0, 0]]
output = self.execute_cpu(graph_fn, [])
self.assertAllEqual(output.astype(int), exp_result)
def test_filter_field_value_equals(self):
def graph_fn():
corners = tf.constant([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('classes', tf.constant([1, 2, 1, 2, 2, 1]))
filtered_boxes1 = box_list_ops.filter_field_value_equals(
boxes, 'classes', 1)
filtered_boxes2 = box_list_ops.filter_field_value_equals(
boxes, 'classes', 2)
return filtered_boxes1.get(), filtered_boxes2.get()
exp_output1 = [[0, 0, 1, 1], [0, -0.1, 1, 0.9], [0, 100, 1, 101]]
exp_output2 = [[0, 0.1, 1, 1.1], [0, 10, 1, 11], [0, 10.1, 1, 11.1]]
filtered_output1, filtered_output2 = self.execute_cpu(graph_fn, [])
self.assertAllClose(filtered_output1, exp_output1)
self.assertAllClose(filtered_output2, exp_output2)
def test_filter_greater_than(self):
def graph_fn():
corners = tf.constant([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant([.1, .75, .9, .5, .5, .8]))
thresh = .6
filtered_boxes = box_list_ops.filter_greater_than(boxes, thresh)
return filtered_boxes.get()
exp_output = [[0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 100, 1, 101]]
filtered_output = self.execute_cpu(graph_fn, [])
self.assertAllClose(filtered_output, exp_output)
def test_clip_box_list(self):
def graph_fn():
boxlist = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5],
[0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]], tf.float32))
boxlist.add_field('classes', tf.constant([0, 0, 1, 1]))
boxlist.add_field('scores', tf.constant([0.75, 0.65, 0.3, 0.2]))
num_boxes = 2
clipped_boxlist = box_list_ops.pad_or_clip_box_list(boxlist, num_boxes)
return (clipped_boxlist.get(), clipped_boxlist.get_field('classes'),
clipped_boxlist.get_field('scores'))
expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]]
expected_classes = [0, 0]
expected_scores = [0.75, 0.65]
boxes_out, classes_out, scores_out = self.execute(graph_fn, [])
self.assertAllClose(expected_boxes, boxes_out)
self.assertAllEqual(expected_classes, classes_out)
self.assertAllClose(expected_scores, scores_out)
def test_pad_box_list(self):
def graph_fn():
boxlist = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32))
boxlist.add_field('classes', tf.constant([0, 1]))
boxlist.add_field('scores', tf.constant([0.75, 0.2]))
num_boxes = 4
padded_boxlist = box_list_ops.pad_or_clip_box_list(boxlist, num_boxes)
return (padded_boxlist.get(), padded_boxlist.get_field('classes'),
padded_boxlist.get_field('scores'))
expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5],
[0, 0, 0, 0], [0, 0, 0, 0]]
expected_classes = [0, 1, 0, 0]
expected_scores = [0.75, 0.2, 0, 0]
boxes_out, classes_out, scores_out = self.execute(graph_fn, [])
self.assertAllClose(expected_boxes, boxes_out)
self.assertAllEqual(expected_classes, classes_out)
self.assertAllClose(expected_scores, scores_out)
def test_select_random_box(self):
boxes = [[0., 0., 1., 1.],
[0., 1., 2., 3.],
[0., 2., 3., 4.]]
def graph_fn():
corners = tf.constant(boxes, dtype=tf.float32)
boxlist = box_list.BoxList(corners)
random_bbox, valid = box_list_ops.select_random_box(boxlist)
return random_bbox, valid
random_bbox_out, valid_out = self.execute(graph_fn, [])
norm_small = any(
[np.linalg.norm(random_bbox_out - box) < 1e-6 for box in boxes])
self.assertTrue(norm_small)
self.assertTrue(valid_out)
def test_select_random_box_with_empty_boxlist(self):
def graph_fn():
corners = tf.constant([], shape=[0, 4], dtype=tf.float32)
boxlist = box_list.BoxList(corners)
random_bbox, valid = box_list_ops.select_random_box(boxlist)
return random_bbox, valid
random_bbox_out, valid_out = self.execute_cpu(graph_fn, [])
expected_bbox_out = np.array([[-1., -1., -1., -1.]], dtype=np.float32)
self.assertAllEqual(expected_bbox_out, random_bbox_out)
self.assertFalse(valid_out)
def test_get_minimal_coverage_box(self):
def graph_fn():
boxes = [[0., 0., 1., 1.],
[-1., 1., 2., 3.],
[0., 2., 3., 4.]]
corners = tf.constant(boxes, dtype=tf.float32)
boxlist = box_list.BoxList(corners)
coverage_box = box_list_ops.get_minimal_coverage_box(boxlist)
return coverage_box
coverage_box_out = self.execute(graph_fn, [])
expected_coverage_box = [[-1., 0., 3., 4.]]
self.assertAllClose(expected_coverage_box, coverage_box_out)
def test_get_minimal_coverage_box_with_empty_boxlist(self):
def graph_fn():
corners = tf.constant([], shape=[0, 4], dtype=tf.float32)
boxlist = box_list.BoxList(corners)
coverage_box = box_list_ops.get_minimal_coverage_box(boxlist)
return coverage_box
coverage_box_out = self.execute(graph_fn, [])
self.assertAllClose([[0.0, 0.0, 1.0, 1.0]], coverage_box_out)
class ConcatenateTest(test_case.TestCase):
def test_invalid_input_box_list_list(self):
with self.assertRaises(ValueError):
box_list_ops.concatenate(None)
with self.assertRaises(ValueError):
box_list_ops.concatenate([])
with self.assertRaises(ValueError):
corners = tf.constant([[0, 0, 0, 0]], tf.float32)
boxlist = box_list.BoxList(corners)
box_list_ops.concatenate([boxlist, 2])
def test_concatenate_with_missing_fields(self):
corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32)
scores1 = tf.constant([1.0, 2.1])
corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8]], tf.float32)
boxlist1 = box_list.BoxList(corners1)
boxlist1.add_field('scores', scores1)
boxlist2 = box_list.BoxList(corners2)
with self.assertRaises(ValueError):
box_list_ops.concatenate([boxlist1, boxlist2])
def test_concatenate_with_incompatible_field_shapes(self):
corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32)
scores1 = tf.constant([1.0, 2.1])
corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8]], tf.float32)
scores2 = tf.constant([[1.0, 1.0], [2.1, 3.2]])
boxlist1 = box_list.BoxList(corners1)
boxlist1.add_field('scores', scores1)
boxlist2 = box_list.BoxList(corners2)
boxlist2.add_field('scores', scores2)
with self.assertRaises(ValueError):
box_list_ops.concatenate([boxlist1, boxlist2])
def test_concatenate_is_correct(self):
def graph_fn():
corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32)
scores1 = tf.constant([1.0, 2.1])
corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8], [1, 0, 5, 10]],
tf.float32)
scores2 = tf.constant([1.0, 2.1, 5.6])
boxlist1 = box_list.BoxList(corners1)
boxlist1.add_field('scores', scores1)
boxlist2 = box_list.BoxList(corners2)
boxlist2.add_field('scores', scores2)
result = box_list_ops.concatenate([boxlist1, boxlist2])
return result.get(), result.get_field('scores')
exp_corners = [[0, 0, 0, 0],
[1, 2, 3, 4],
[0, 3, 1, 6],
[2, 4, 3, 8],
[1, 0, 5, 10]]
exp_scores = [1.0, 2.1, 1.0, 2.1, 5.6]
corners_output, scores_output = self.execute(graph_fn, [])
self.assertAllClose(corners_output, exp_corners)
self.assertAllClose(scores_output, exp_scores)
class NonMaxSuppressionTest(test_case.TestCase):
def test_select_from_three_clusters(self):
def graph_fn():
corners = tf.constant([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3]))
iou_thresh = .5
max_output_size = 3
nms = box_list_ops.non_max_suppression(
boxes, iou_thresh, max_output_size)
return nms.get()
exp_nms = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 100, 1, 101]]
nms_output = self.execute_cpu(graph_fn, [])
self.assertAllClose(nms_output, exp_nms)
def test_select_at_most_two_boxes_from_three_clusters(self):
def graph_fn():
corners = tf.constant([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3]))
iou_thresh = .5
max_output_size = 2
nms = box_list_ops.non_max_suppression(
boxes, iou_thresh, max_output_size)
return nms.get()
exp_nms = [[0, 10, 1, 11],
[0, 0, 1, 1]]
nms_output = self.execute_cpu(graph_fn, [])
self.assertAllClose(nms_output, exp_nms)
def test_select_at_most_thirty_boxes_from_three_clusters(self):
def graph_fn():
corners = tf.constant([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3]))
iou_thresh = .5
max_output_size = 30
nms = box_list_ops.non_max_suppression(
boxes, iou_thresh, max_output_size)
return nms.get()
exp_nms = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 100, 1, 101]]
nms_output = self.execute_cpu(graph_fn, [])
self.assertAllClose(nms_output, exp_nms)
def test_select_single_box(self):
def graph_fn():
corners = tf.constant([[0, 0, 1, 1]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant([.9]))
iou_thresh = .5
max_output_size = 3
nms = box_list_ops.non_max_suppression(
boxes, iou_thresh, max_output_size)
return nms.get()
exp_nms = [[0, 0, 1, 1]]
nms_output = self.execute_cpu(graph_fn, [])
self.assertAllClose(nms_output, exp_nms)
def test_select_from_ten_identical_boxes(self):
def graph_fn():
corners = tf.constant(10 * [[0, 0, 1, 1]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant(10 * [.9]))
iou_thresh = .5
max_output_size = 3
nms = box_list_ops.non_max_suppression(
boxes, iou_thresh, max_output_size)
return nms.get()
exp_nms = [[0, 0, 1, 1]]
nms_output = self.execute_cpu(graph_fn, [])
self.assertAllClose(nms_output, exp_nms)
def test_copy_extra_fields(self):
tensor1 = np.array([[1], [4]])
tensor2 = np.array([[1, 1], [2, 2]])
def graph_fn():
corners = tf.constant([[0, 0, 1, 1],
[0, 0.1, 1, 1.1]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('tensor1', tf.constant(tensor1))
boxes.add_field('tensor2', tf.constant(tensor2))
new_boxes = box_list.BoxList(tf.constant([[0, 0, 10, 10],
[1, 3, 5, 5]], tf.float32))
new_boxes = box_list_ops._copy_extra_fields(new_boxes, boxes)
return new_boxes.get_field('tensor1'), new_boxes.get_field('tensor2')
tensor1_out, tensor2_out = self.execute_cpu(graph_fn, [])
self.assertAllClose(tensor1, tensor1_out)
self.assertAllClose(tensor2, tensor2_out)
class CoordinatesConversionTest(test_case.TestCase):
def test_to_normalized_coordinates(self):
def graph_fn():
coordinates = tf.constant([[0, 0, 100, 100],
[25, 25, 75, 75]], tf.float32)
img = tf.ones((128, 100, 100, 3))
boxlist = box_list.BoxList(coordinates)
normalized_boxlist = box_list_ops.to_normalized_coordinates(
boxlist, tf.shape(img)[1], tf.shape(img)[2])
return normalized_boxlist.get()
expected_boxes = [[0, 0, 1, 1],
[0.25, 0.25, 0.75, 0.75]]
normalized_boxes = self.execute(graph_fn, [])
self.assertAllClose(normalized_boxes, expected_boxes)
def test_to_normalized_coordinates_already_normalized(self):
def graph_fn():
coordinates = tf.constant([[0, 0, 1, 1],
[0.25, 0.25, 0.75, 0.75]], tf.float32)
img = tf.ones((128, 100, 100, 3))
boxlist = box_list.BoxList(coordinates)
normalized_boxlist = box_list_ops.to_normalized_coordinates(
boxlist, tf.shape(img)[1], tf.shape(img)[2])
return normalized_boxlist.get()
with self.assertRaisesOpError('assertion failed'):
self.execute_cpu(graph_fn, [])
def test_to_absolute_coordinates(self):
def graph_fn():
coordinates = tf.constant([[0, 0, 1, 1],
[0.25, 0.25, 0.75, 0.75]], tf.float32)
img = tf.ones((128, 100, 100, 3))
boxlist = box_list.BoxList(coordinates)
absolute_boxlist = box_list_ops.to_absolute_coordinates(boxlist,
tf.shape(img)[1],
tf.shape(img)[2])
return absolute_boxlist.get()
expected_boxes = [[0, 0, 100, 100],
[25, 25, 75, 75]]
absolute_boxes = self.execute(graph_fn, [])
self.assertAllClose(absolute_boxes, expected_boxes)
def test_to_absolute_coordinates_already_abolute(self):
def graph_fn():
coordinates = tf.constant([[0, 0, 100, 100],
[25, 25, 75, 75]], tf.float32)
img = tf.ones((128, 100, 100, 3))
boxlist = box_list.BoxList(coordinates)
absolute_boxlist = box_list_ops.to_absolute_coordinates(boxlist,
tf.shape(img)[1],
tf.shape(img)[2])
return absolute_boxlist.get()
with self.assertRaisesOpError('assertion failed'):
self.execute_cpu(graph_fn, [])
def test_convert_to_normalized_and_back(self):
coordinates = np.random.uniform(size=(100, 4))
coordinates = np.round(np.sort(coordinates) * 200)
coordinates[:, 2:4] += 1
coordinates[99, :] = [0, 0, 201, 201]
def graph_fn():
img = tf.ones((128, 202, 202, 3))
boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32))
boxlist = box_list_ops.to_normalized_coordinates(boxlist,
tf.shape(img)[1],
tf.shape(img)[2])
boxlist = box_list_ops.to_absolute_coordinates(boxlist,
tf.shape(img)[1],
tf.shape(img)[2])
return boxlist.get()
out = self.execute(graph_fn, [])
self.assertAllClose(out, coordinates)
def test_convert_to_absolute_and_back(self):
coordinates = np.random.uniform(size=(100, 4))
coordinates = np.sort(coordinates)
coordinates[99, :] = [0, 0, 1, 1]
def graph_fn():
img = tf.ones((128, 202, 202, 3))
boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32))
boxlist = box_list_ops.to_absolute_coordinates(boxlist,
tf.shape(img)[1],
tf.shape(img)[2])
boxlist = box_list_ops.to_normalized_coordinates(boxlist,
tf.shape(img)[1],
tf.shape(img)[2])
return boxlist.get()
out = self.execute(graph_fn, [])
self.assertAllClose(out, coordinates)
def test_to_absolute_coordinates_maximum_coordinate_check(self):
def graph_fn():
coordinates = tf.constant([[0, 0, 1.2, 1.2],
[0.25, 0.25, 0.75, 0.75]], tf.float32)
img = tf.ones((128, 100, 100, 3))
boxlist = box_list.BoxList(coordinates)
absolute_boxlist = box_list_ops.to_absolute_coordinates(
boxlist,
tf.shape(img)[1],
tf.shape(img)[2],
maximum_normalized_coordinate=1.1)
return absolute_boxlist.get()
with self.assertRaisesOpError('assertion failed'):
self.execute_cpu(graph_fn, [])
class BoxRefinementTest(test_case.TestCase):
def test_box_voting(self):
def graph_fn():
candidates = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.6, 0.6, 0.8, 0.8]], tf.float32))
candidates.add_field('ExtraField', tf.constant([1, 2]))
pool = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5],
[0.6, 0.6, 0.8, 0.8]], tf.float32))
pool.add_field('scores', tf.constant([0.75, 0.25, 0.3]))
averaged_boxes = box_list_ops.box_voting(candidates, pool)
return (averaged_boxes.get(), averaged_boxes.get_field('scores'),
averaged_boxes.get_field('ExtraField'))
expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8]]
expected_scores = [0.5, 0.3]
boxes_out, scores_out, extra_field_out = self.execute(graph_fn, [])
self.assertAllClose(expected_boxes, boxes_out)
self.assertAllClose(expected_scores, scores_out)
self.assertAllEqual(extra_field_out, [1, 2])
def test_box_voting_fails_with_negative_scores(self):
def graph_fn():
candidates = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32))
pool = box_list.BoxList(tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32))
pool.add_field('scores', tf.constant([-0.2]))
averaged_boxes = box_list_ops.box_voting(candidates, pool)
return averaged_boxes.get()
with self.assertRaisesOpError('Scores must be non negative'):
self.execute_cpu(graph_fn, [])
def test_box_voting_fails_when_unmatched(self):
def graph_fn():
candidates = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32))
pool = box_list.BoxList(tf.constant([[0.6, 0.6, 0.8, 0.8]], tf.float32))
pool.add_field('scores', tf.constant([0.2]))
averaged_boxes = box_list_ops.box_voting(candidates, pool)
return averaged_boxes.get()
with self.assertRaisesOpError('Each box in selected_boxes must match '
'with at least one box in pool_boxes.'):
self.execute_cpu(graph_fn, [])
def test_refine_boxes(self):
def graph_fn():
pool = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5],
[0.6, 0.6, 0.8, 0.8]], tf.float32))
pool.add_field('ExtraField', tf.constant([1, 2, 3]))
pool.add_field('scores', tf.constant([0.75, 0.25, 0.3]))
averaged_boxes = box_list_ops.refine_boxes(pool, 0.5, 10)
return (averaged_boxes.get(), averaged_boxes.get_field('scores'),
averaged_boxes.get_field('ExtraField'))
boxes_out, scores_out, extra_field_out = self.execute_cpu(graph_fn, [])
expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8]]
expected_scores = [0.5, 0.3]
self.assertAllClose(expected_boxes, boxes_out)
self.assertAllClose(expected_scores, scores_out)
self.assertAllEqual(extra_field_out, [1, 3])
def test_refine_boxes_multi_class(self):
def graph_fn():
pool = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5],
[0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]], tf.float32))
pool.add_field('classes', tf.constant([0, 0, 1, 1]))
pool.add_field('scores', tf.constant([0.75, 0.25, 0.3, 0.2]))
averaged_boxes = box_list_ops.refine_boxes_multi_class(pool, 3, 0.5, 10)
return (averaged_boxes.get(), averaged_boxes.get_field('scores'),
averaged_boxes.get_field('classes'))
boxes_out, scores_out, extra_field_out = self.execute_cpu(graph_fn, [])
expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8],
[0.2, 0.2, 0.3, 0.3]]
expected_scores = [0.5, 0.3, 0.2]
self.assertAllClose(expected_boxes, boxes_out)
self.assertAllClose(expected_scores, scores_out)
self.assertAllEqual(extra_field_out, [0, 1, 1])
def test_sample_boxes_by_jittering(self):
def graph_fn():
boxes = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4],
[0.1, 0.1, 0.5, 0.5],
[0.6, 0.6, 0.8, 0.8],
[0.2, 0.2, 0.3, 0.3]], tf.float32))
sampled_boxes = box_list_ops.sample_boxes_by_jittering(
boxlist=boxes, num_boxes_to_sample=10)
iou = box_list_ops.iou(boxes, sampled_boxes)
iou_max = tf.reduce_max(iou, axis=0)
return sampled_boxes.get(), iou_max
np_sampled_boxes, np_iou_max = self.execute(graph_fn, [])
self.assertAllEqual(np_sampled_boxes.shape, [10, 4])
self.assertAllGreater(np_iou_max, 0.3)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/box_list_ops_test.py | box_list_ops_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract detection model.
This file defines a generic base class for detection models. Programs that are
designed to work with arbitrary detection models should only depend on this
class. We intend for the functions in this class to follow tensor-in/tensor-out
design, thus all functions have tensors or lists/dictionaries holding tensors as
inputs and outputs.
Abstractly, detection models predict output tensors given input images
which can be passed to a loss function at training time or passed to a
postprocessing function at eval time. The computation graphs at a high level
consequently look as follows:
Training time:
inputs (images tensor) -> preprocess -> predict -> loss -> outputs (loss tensor)
Evaluation time:
inputs (images tensor) -> preprocess -> predict -> postprocess
-> outputs (boxes tensor, scores tensor, classes tensor, num_detections tensor)
DetectionModels must thus implement four functions (1) preprocess, (2) predict,
(3) postprocess and (4) loss. DetectionModels should make no assumptions about
the input size or aspect ratio --- they are responsible for doing any
resize/reshaping necessary (see docstring for the preprocess function).
Output classes are always integers in the range [0, num_classes). Any mapping
of these integers to semantic labels is to be handled outside of this class.
Images are resized in the `preprocess` method. All of `preprocess`, `predict`,
and `postprocess` should be reentrant.
The `preprocess` method runs `image_resizer_fn` that returns resized_images and
`true_image_shapes`. Since `image_resizer_fn` can pad the images with zeros,
true_image_shapes indicate the slices that contain the image without padding.
This is useful for padding images to be a fixed size for batching.
The `postprocess` method uses the true image shapes to clip predictions that lie
outside of images.
By default, DetectionModels produce bounding box detections; However, we support
a handful of auxiliary annotations associated with each bounding box, namely,
instance masks and keypoints.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
# If using a new enough version of TensorFlow, detection models should be a
# tf module or keras model for tracking.
try:
_BaseClass = tf.keras.layers.Layer
except AttributeError:
_BaseClass = object
class DetectionModel(six.with_metaclass(abc.ABCMeta, _BaseClass)):
"""Abstract base class for detection models.
Extends tf.Module to guarantee variable tracking.
"""
def __init__(self, num_classes):
"""Constructor.
Args:
num_classes: number of classes. Note that num_classes *does not* include
background categories that might be implicitly predicted in various
implementations.
"""
self._num_classes = num_classes
self._groundtruth_lists = {}
super(DetectionModel, self).__init__()
@property
def num_classes(self):
return self._num_classes
def groundtruth_lists(self, field):
"""Access list of groundtruth tensors.
Args:
field: a string key, options are
fields.BoxListFields.{boxes,classes,masks,mask_weights,keypoints,
keypoint_visibilities, densepose_*, track_ids,
temporal_offsets, track_match_flags}
fields.InputDataFields.is_annotated.
Returns:
a list of tensors holding groundtruth information (see also
provide_groundtruth function below), with one entry for each image in the
batch.
Raises:
RuntimeError: if the field has not been provided via provide_groundtruth.
"""
if field not in self._groundtruth_lists:
raise RuntimeError('Groundtruth tensor {} has not been provided'.format(
field))
return self._groundtruth_lists[field]
def groundtruth_has_field(self, field):
"""Determines whether the groundtruth includes the given field.
Args:
field: a string key, options are
fields.BoxListFields.{boxes,classes,masks,mask_weights,keypoints,
keypoint_visibilities, densepose_*, track_ids} or
fields.InputDataFields.is_annotated.
Returns:
True if the groundtruth includes the given field, False otherwise.
"""
return field in self._groundtruth_lists
@staticmethod
def get_side_inputs(features):
"""Get side inputs from input features.
This placeholder method provides a way for a meta-architecture to specify
how to grab additional side inputs from input features (in addition to the
image itself) and allows models to depend on contextual information. By
default, detection models do not use side information (and thus this method
returns an empty dictionary by default. However it can be overridden if
side inputs are necessary."
Args:
features: A dictionary of tensors.
Returns:
An empty dictionary by default.
"""
return {}
@abc.abstractmethod
def preprocess(self, inputs):
"""Input preprocessing.
To be overridden by implementations.
This function is responsible for any scaling/shifting of input values that
is necessary prior to running the detector on an input image.
It is also responsible for any resizing, padding that might be necessary
as images are assumed to arrive in arbitrary sizes. While this function
could conceivably be part of the predict method (below), it is often
convenient to keep these separate --- for example, we may want to preprocess
on one device, place onto a queue, and let another device (e.g., the GPU)
handle prediction.
A few important notes about the preprocess function:
+ We assume that this operation does not have any trainable variables nor
does it affect the groundtruth annotations in any way (thus data
augmentation operations such as random cropping should be performed
externally).
+ There is no assumption that the batchsize in this function is the same as
the batch size in the predict function. In fact, we recommend calling the
preprocess function prior to calling any batching operations (which should
happen outside of the model) and thus assuming that batch sizes are equal
to 1 in the preprocess function.
+ There is also no explicit assumption that the output resolutions
must be fixed across inputs --- this is to support "fully convolutional"
settings in which input images can have different shapes/resolutions.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
pass
@abc.abstractmethod
def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs):
"""Predict prediction tensors from inputs tensor.
Outputs of this function can be passed to loss or postprocess functions.
Args:
preprocessed_inputs: a [batch, height, width, channels] float32 tensor
representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**side_inputs: additional tensors that are required by the network.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
pass
@abc.abstractmethod
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Convert predicted output tensors to final detections.
This stage typically performs a few things such as
* Non-Max Suppression to remove overlapping detection boxes.
* Score conversion and background class removal.
Outputs adhere to the following conventions:
* Classes are integers in [0, num_classes); background classes are removed
and the first non-background class is mapped to 0. If the model produces
class-agnostic detections, then no output is produced for classes.
* Boxes are to be interpreted as being in [y_min, x_min, y_max, x_max]
format and normalized relative to the image window.
* `num_detections` is provided for settings where detections are padded to a
fixed number of boxes.
* We do not specifically assume any kind of probabilistic interpretation
of the scores --- the only important thing is their relative ordering.
Thus implementations of the postprocess function are free to output
logits, probabilities, calibrated probabilities, or anything else.
Args:
prediction_dict: a dictionary holding prediction tensors.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary containing the following fields
detection_boxes: [batch, max_detections, 4]
detection_scores: [batch, max_detections]
detection_classes: [batch, max_detections]
(If a model is producing class-agnostic detections, this field may be
missing)
detection_masks: [batch, max_detections, mask_height, mask_width]
(optional)
detection_keypoints: [batch, max_detections, num_keypoints, 2]
(optional)
detection_keypoint_scores: [batch, max_detections, num_keypoints]
(optional)
detection_surface_coords: [batch, max_detections, mask_height,
mask_width, 2] (optional)
num_detections: [batch]
In addition to the above fields this stage also outputs the following
raw tensors:
raw_detection_boxes: [batch, total_detections, 4] tensor containing
all detection boxes from `prediction_dict` in the format
[ymin, xmin, ymax, xmax] and normalized co-ordinates.
raw_detection_scores: [batch, total_detections,
num_classes_with_background] tensor of class score logits for
raw detection boxes.
"""
pass
@abc.abstractmethod
def loss(self, prediction_dict, true_image_shapes):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.
"""
pass
def provide_groundtruth(
self,
groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list=None,
groundtruth_mask_weights_list=None,
groundtruth_keypoints_list=None,
groundtruth_keypoint_visibilities_list=None,
groundtruth_dp_num_points_list=None,
groundtruth_dp_part_ids_list=None,
groundtruth_dp_surface_coords_list=None,
groundtruth_track_ids_list=None,
groundtruth_temporal_offsets_list=None,
groundtruth_track_match_flags_list=None,
groundtruth_weights_list=None,
groundtruth_confidences_list=None,
groundtruth_is_crowd_list=None,
groundtruth_group_of_list=None,
groundtruth_area_list=None,
is_annotated_list=None,
groundtruth_labeled_classes=None,
groundtruth_verified_neg_classes=None,
groundtruth_not_exhaustive_classes=None,
groundtruth_keypoint_depths_list=None,
groundtruth_keypoint_depth_weights_list=None):
"""Provide groundtruth tensors.
Args:
groundtruth_boxes_list: a list of 2-D tf.float32 tensors of shape
[num_boxes, 4] containing coordinates of the groundtruth boxes.
Groundtruth boxes are provided in [y_min, x_min, y_max, x_max]
format and assumed to be normalized and clipped
relative to the image window with y_min <= y_max and x_min <= x_max.
groundtruth_classes_list: a list of 2-D tf.float32 one-hot (or k-hot)
tensors of shape [num_boxes, num_classes] containing the class targets
with the 0th index assumed to map to the first non-background class.
groundtruth_masks_list: a list of 3-D tf.float32 tensors of
shape [num_boxes, height_in, width_in] containing instance
masks with values in {0, 1}. If None, no masks are provided.
Mask resolution `height_in`x`width_in` must agree with the resolution
of the input image tensor provided to the `preprocess` function.
groundtruth_mask_weights_list: a list of 1-D tf.float32 tensors of shape
[num_boxes] with weights for each instance mask.
groundtruth_keypoints_list: a list of 3-D tf.float32 tensors of
shape [num_boxes, num_keypoints, 2] containing keypoints.
Keypoints are assumed to be provided in normalized coordinates and
missing keypoints should be encoded as NaN (but it is recommended to use
`groundtruth_keypoint_visibilities_list`).
groundtruth_keypoint_visibilities_list: a list of 3-D tf.bool tensors
of shape [num_boxes, num_keypoints] containing keypoint visibilities.
groundtruth_dp_num_points_list: a list of 1-D tf.int32 tensors of shape
[num_boxes] containing the number of DensePose sampled points.
groundtruth_dp_part_ids_list: a list of 2-D tf.int32 tensors of shape
[num_boxes, max_sampled_points] containing the DensePose part ids
(0-indexed) for each sampled point. Note that there may be padding.
groundtruth_dp_surface_coords_list: a list of 3-D tf.float32 tensors of
shape [num_boxes, max_sampled_points, 4] containing the DensePose
surface coordinates for each sampled point. Note that there may be
padding.
groundtruth_track_ids_list: a list of 1-D tf.int32 tensors of shape
[num_boxes] containing the track IDs of groundtruth objects.
groundtruth_temporal_offsets_list: a list of 2-D tf.float32 tensors
of shape [num_boxes, 2] containing the spatial offsets of objects'
centers compared with the previous frame.
groundtruth_track_match_flags_list: a list of 1-D tf.float32 tensors
of shape [num_boxes] containing 0-1 flags that indicate if an object
has existed in the previous frame.
groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
groundtruth_confidences_list: A list of 2-D tf.float32 tensors of shape
[num_boxes, num_classes] containing class confidences for groundtruth
boxes.
groundtruth_is_crowd_list: A list of 1-D tf.bool tensors of shape
[num_boxes] containing is_crowd annotations.
groundtruth_group_of_list: A list of 1-D tf.bool tensors of shape
[num_boxes] containing group_of annotations.
groundtruth_area_list: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing the area (in the original absolute coordinates)
of the annotations.
is_annotated_list: A list of scalar tf.bool tensors indicating whether
images have been labeled or not.
groundtruth_labeled_classes: A list of 1-D tf.float32 tensors of shape
[num_classes], containing label indices encoded as k-hot of the classes
that are exhaustively annotated.
groundtruth_verified_neg_classes: A list of 1-D tf.float32 tensors of
shape [num_classes], containing a K-hot representation of classes
which were verified as not present in the image.
groundtruth_not_exhaustive_classes: A list of 1-D tf.float32 tensors of
shape [num_classes], containing a K-hot representation of classes
which don't have all of their instances marked exhaustively.
groundtruth_keypoint_depths_list: a list of 2-D tf.float32 tensors
of shape [num_boxes, num_keypoints] containing keypoint relative depths.
groundtruth_keypoint_depth_weights_list: a list of 2-D tf.float32 tensors
of shape [num_boxes, num_keypoints] containing the weights of the
relative depths.
"""
self._groundtruth_lists[fields.BoxListFields.boxes] = groundtruth_boxes_list
self._groundtruth_lists[
fields.BoxListFields.classes] = groundtruth_classes_list
if groundtruth_weights_list:
self._groundtruth_lists[fields.BoxListFields.
weights] = groundtruth_weights_list
if groundtruth_confidences_list:
self._groundtruth_lists[fields.BoxListFields.
confidences] = groundtruth_confidences_list
if groundtruth_masks_list:
self._groundtruth_lists[
fields.BoxListFields.masks] = groundtruth_masks_list
if groundtruth_mask_weights_list:
self._groundtruth_lists[
fields.BoxListFields.mask_weights] = groundtruth_mask_weights_list
if groundtruth_keypoints_list:
self._groundtruth_lists[
fields.BoxListFields.keypoints] = groundtruth_keypoints_list
if groundtruth_keypoint_visibilities_list:
self._groundtruth_lists[
fields.BoxListFields.keypoint_visibilities] = (
groundtruth_keypoint_visibilities_list)
if groundtruth_keypoint_depths_list:
self._groundtruth_lists[
fields.BoxListFields.keypoint_depths] = (
groundtruth_keypoint_depths_list)
if groundtruth_keypoint_depth_weights_list:
self._groundtruth_lists[
fields.BoxListFields.keypoint_depth_weights] = (
groundtruth_keypoint_depth_weights_list)
if groundtruth_dp_num_points_list:
self._groundtruth_lists[
fields.BoxListFields.densepose_num_points] = (
groundtruth_dp_num_points_list)
if groundtruth_dp_part_ids_list:
self._groundtruth_lists[
fields.BoxListFields.densepose_part_ids] = (
groundtruth_dp_part_ids_list)
if groundtruth_dp_surface_coords_list:
self._groundtruth_lists[
fields.BoxListFields.densepose_surface_coords] = (
groundtruth_dp_surface_coords_list)
if groundtruth_track_ids_list:
self._groundtruth_lists[
fields.BoxListFields.track_ids] = groundtruth_track_ids_list
if groundtruth_temporal_offsets_list:
self._groundtruth_lists[
fields.BoxListFields.temporal_offsets] = (
groundtruth_temporal_offsets_list)
if groundtruth_track_match_flags_list:
self._groundtruth_lists[
fields.BoxListFields.track_match_flags] = (
groundtruth_track_match_flags_list)
if groundtruth_is_crowd_list:
self._groundtruth_lists[
fields.BoxListFields.is_crowd] = groundtruth_is_crowd_list
if groundtruth_group_of_list:
self._groundtruth_lists[
fields.BoxListFields.group_of] = groundtruth_group_of_list
if groundtruth_area_list:
self._groundtruth_lists[
fields.InputDataFields.groundtruth_area] = groundtruth_area_list
if is_annotated_list:
self._groundtruth_lists[
fields.InputDataFields.is_annotated] = is_annotated_list
if groundtruth_labeled_classes:
self._groundtruth_lists[
fields.InputDataFields
.groundtruth_labeled_classes] = groundtruth_labeled_classes
if groundtruth_verified_neg_classes:
self._groundtruth_lists[
fields.InputDataFields
.groundtruth_verified_neg_classes] = groundtruth_verified_neg_classes
if groundtruth_not_exhaustive_classes:
self._groundtruth_lists[
fields.InputDataFields
.groundtruth_not_exhaustive_classes] = (
groundtruth_not_exhaustive_classes)
@abc.abstractmethod
def regularization_losses(self):
"""Returns a list of regularization losses for this model.
Returns a list of regularization losses for this model that the estimator
needs to use during training/optimization.
Returns:
A list of regularization loss tensors.
"""
pass
@abc.abstractmethod
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
"""Returns a map of variables to load from a foreign checkpoint.
Returns a map of variable names to load from a checkpoint to variables in
the model graph. This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
load_all_detection_checkpoint_vars: whether to load all variables (when
`fine_tune_checkpoint_type` is `detection`). If False, only variables
within the feature extractor scope are included. Default False.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
pass
@abc.abstractmethod
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of variables to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (above) is intended
to be used to restore Slim-based models when running Tensorflow 1.x.
TODO(jonathanhuang,rathodv): Check tf_version and raise unimplemented
error for both restore_map and restore_from_objects depending on version.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
"""
pass
@abc.abstractmethod
def updates(self):
"""Returns a list of update operators for this model.
Returns a list of update operators for this model that must be executed at
each training step. The estimator's train op needs to have a control
dependency on these updates.
Returns:
A list of update operators.
"""
pass
def call(self, images):
"""Returns detections from a batch of images.
This method calls the preprocess, predict and postprocess function
sequentially and returns the output.
Args:
images: a [batch_size, height, width, channels] float tensor.
Returns:
detetcions: The dict of tensors returned by the postprocess function.
"""
preprocessed_images, shapes = self.preprocess(images)
prediction_dict = self.predict(preprocessed_images, shapes)
return self.postprocess(prediction_dict, shapes)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/model.py | model.py |
123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/__init__.py | __init__.py |
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.third_party.tensorflow_models.object_detection.core.class_agnostic_nms."""
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from object_detection.core import post_processing
from object_detection.core import standard_fields as fields
from object_detection.utils import test_case
class ClassAgnosticNonMaxSuppressionTest(test_case.TestCase,
parameterized.TestCase):
def test_class_agnostic_nms_select_with_shared_boxes(self):
def graph_fn():
boxes = tf.constant(
[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]],
[[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]],
[[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], tf.float32)
scores = tf.constant([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]])
score_thresh = 0.1
iou_thresh = .5
max_classes_per_detection = 1
max_output_size = 4
nms, _ = post_processing.class_agnostic_non_max_suppression(
boxes, scores, score_thresh, iou_thresh, max_classes_per_detection,
max_output_size)
return (nms.get(), nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes))
exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002],
[0, 100, 1, 101]]
exp_nms_scores = [.95, .9, .85, .3]
exp_nms_classes = [0, 0, 1, 0]
(nms_corners_output, nms_scores_output,
nms_classes_output) = self.execute_cpu(graph_fn, [])
self.assertAllClose(nms_corners_output, exp_nms_corners)
self.assertAllClose(nms_scores_output, exp_nms_scores)
self.assertAllClose(nms_classes_output, exp_nms_classes)
def test_class_agnostic_nms_select_with_per_class_boxes(self):
def graph_fn():
boxes = tf.constant(
[[[4, 5, 9, 10], [0, 0, 1, 1]],
[[0, 0.1, 1, 1.1], [4, 5, 9, 10]],
[[0, -0.1, 1, 0.9], [4, 5, 9, 10]],
[[0, 10, 1, 11], [4, 5, 9, 10]],
[[0, 10.1, 1, 11.1], [4, 5, 9, 10]],
[[0, 100, 1, 101], [4, 5, 9, 10]],
[[4, 5, 9, 10], [0, 1000, 1, 1002]],
[[4, 5, 9, 10], [0, 1000, 1, 1002.1]]], tf.float32)
scores = tf.constant([[.01, 0.9],
[.75, 0.05],
[.6, 0.01],
[.95, 0],
[.5, 0.01],
[.3, 0.01],
[.01, .85],
[.01, .5]])
score_thresh = 0.1
iou_thresh = .5
max_classes_per_detection = 1
max_output_size = 4
nms, _ = post_processing.class_agnostic_non_max_suppression(
boxes, scores, score_thresh, iou_thresh, max_classes_per_detection,
max_output_size)
return (nms.get(), nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes))
(nms_corners_output, nms_scores_output,
nms_classes_output) = self.execute_cpu(graph_fn, [])
exp_nms_corners = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 1000, 1, 1002],
[0, 100, 1, 101]]
exp_nms_scores = [.95, .9, .85, .3]
exp_nms_classes = [0, 1, 1, 0]
self.assertAllClose(nms_corners_output, exp_nms_corners)
self.assertAllClose(nms_scores_output, exp_nms_scores)
self.assertAllClose(nms_classes_output, exp_nms_classes)
# Two cases will be tested here: using / not using static shapes.
# Named the two test cases for easier control during testing, with a flag of
# '--test_filter=ClassAgnosticNonMaxSuppressionTest.test_batch_classagnostic_nms_with_batch_size_1'
# or
# '--test_filter=ClassAgnosticNonMaxSuppressionTest.test_batch_classagnostic_nms_with_batch_size_1_use_static_shapes'.
@parameterized.named_parameters(('', False), ('_use_static_shapes', True))
def test_batch_classagnostic_nms_with_batch_size_1(self,
use_static_shapes=False):
def graph_fn():
boxes = tf.constant(
[[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]],
[[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]],
[[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]]], tf.float32)
scores = tf.constant([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]])
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
max_classes_per_detection = 1
use_class_agnostic_nms = True
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields,
num_detections) = post_processing.batch_multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_size_per_class=max_output_size,
max_total_size=max_output_size,
use_class_agnostic_nms=use_class_agnostic_nms,
use_static_shapes=use_static_shapes,
max_classes_per_detection=max_classes_per_detection)
self.assertIsNone(nmsed_masks)
self.assertIsNone(nmsed_additional_fields)
return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections)
exp_nms_corners = [[[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002],
[0, 100, 1, 101]]]
exp_nms_scores = [[.95, .9, .85, .3]]
exp_nms_classes = [[0, 0, 1, 0]]
(nmsed_boxes, nmsed_scores, nmsed_classes,
num_detections) = self.execute_cpu(graph_fn, [])
self.assertAllClose(nmsed_boxes, exp_nms_corners)
self.assertAllClose(nmsed_scores, exp_nms_scores)
self.assertAllClose(nmsed_classes, exp_nms_classes)
self.assertEqual(num_detections, [4])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/class_agnostic_nms_test.py | class_agnostic_nms_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.research.vale.object_detection.minibatch_sampler."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import minibatch_sampler
from object_detection.utils import test_case
class MinibatchSamplerTest(test_case.TestCase):
def test_subsample_indicator_when_more_true_elements_than_num_samples(self):
np_indicator = np.array([True, False, True, False, True, True, False])
def graph_fn(indicator):
samples = minibatch_sampler.MinibatchSampler.subsample_indicator(
indicator, 3)
return samples
samples_out = self.execute(graph_fn, [np_indicator])
self.assertTrue(np.sum(samples_out), 3)
self.assertAllEqual(samples_out,
np.logical_and(samples_out, np_indicator))
def test_subsample_indicator_when_less_true_elements_than_num_samples(self):
np_indicator = np.array([True, False, True, False, True, True, False])
def graph_fn(indicator):
samples = minibatch_sampler.MinibatchSampler.subsample_indicator(
indicator, 5)
return samples
samples_out = self.execute(graph_fn, [np_indicator])
self.assertTrue(np.sum(samples_out), 4)
self.assertAllEqual(samples_out,
np.logical_and(samples_out, np_indicator))
def test_subsample_indicator_when_num_samples_is_zero(self):
np_indicator = np.array([True, False, True, False, True, True, False])
def graph_fn(indicator):
samples_none = minibatch_sampler.MinibatchSampler.subsample_indicator(
indicator, 0)
return samples_none
samples_out = self.execute(graph_fn, [np_indicator])
self.assertAllEqual(
np.zeros_like(samples_out, dtype=bool),
samples_out)
def test_subsample_indicator_when_indicator_all_false(self):
indicator_empty = np.zeros([0], dtype=np.bool)
def graph_fn(indicator):
samples_empty = minibatch_sampler.MinibatchSampler.subsample_indicator(
indicator, 4)
return samples_empty
samples_out = self.execute(graph_fn, [indicator_empty])
self.assertEqual(0, samples_out.size)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/minibatch_sampler_test.py | minibatch_sampler_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.object_detection.core.post_processing."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import post_processing
from object_detection.core import standard_fields as fields
from object_detection.utils import test_case
class MulticlassNonMaxSuppressionTest(test_case.TestCase):
def test_multiclass_nms_select_with_shared_boxes_cpu_only(self):
boxes = np.array(
[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]],
[[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]],
[[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32)
scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]],
np.float32)
def graph_fn(boxes, scores):
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
nms, _ = post_processing.multiclass_non_max_suppression(
boxes, scores, score_thresh, iou_thresh, max_output_size)
return (nms.get(), nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes))
exp_nms_corners = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 1000, 1, 1002],
[0, 100, 1, 101]]
exp_nms_scores = [.95, .9, .85, .3]
exp_nms_classes = [0, 0, 1, 0]
(nms_corners_output, nms_scores_output,
nms_classes_output) = self.execute_cpu(graph_fn, [boxes, scores])
self.assertAllClose(nms_corners_output, exp_nms_corners)
self.assertAllClose(nms_scores_output, exp_nms_scores)
self.assertAllClose(nms_classes_output, exp_nms_classes)
def test_multiclass_nms_select_with_shared_boxes_pad_to_max_output_size(self):
boxes = np.array([[[0, 0, 1, 1]],
[[0, 0.1, 1, 1.1]],
[[0, -0.1, 1, 0.9]],
[[0, 10, 1, 11]],
[[0, 10.1, 1, 11.1]],
[[0, 100, 1, 101]],
[[0, 1000, 1, 1002]],
[[0, 1000, 1, 1002.1]]], np.float32)
scores = np.array([[.9, 0.01], [.75, 0.05],
[.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01],
[.01, .85], [.01, .5]], np.float32)
score_thresh = 0.1
iou_thresh = .5
max_size_per_class = 4
max_output_size = 5
exp_nms_corners = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 1000, 1, 1002],
[0, 100, 1, 101]]
exp_nms_scores = [.95, .9, .85, .3]
exp_nms_classes = [0, 0, 1, 0]
def graph_fn(boxes, scores):
nms, num_valid_nms_boxes = post_processing.multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_size_per_class,
max_total_size=max_output_size,
pad_to_max_output_size=True)
return [nms.get(), nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes), num_valid_nms_boxes]
[nms_corners_output, nms_scores_output, nms_classes_output,
num_valid_nms_boxes] = self.execute(graph_fn, [boxes, scores])
self.assertEqual(num_valid_nms_boxes, 4)
self.assertAllClose(nms_corners_output[0:num_valid_nms_boxes],
exp_nms_corners)
self.assertAllClose(nms_scores_output[0:num_valid_nms_boxes],
exp_nms_scores)
self.assertAllClose(nms_classes_output[0:num_valid_nms_boxes],
exp_nms_classes)
def test_multiclass_nms_select_with_shared_boxes_given_keypoints(self):
boxes = np.array(
[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]],
[[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]],
[[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32)
scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]],
np.float32)
num_keypoints = 6
keypoints = np.tile(np.reshape(range(8), [8, 1, 1]),
[1, num_keypoints, 2]).astype(np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
def graph_fn(boxes, scores, keypoints):
nms, nms_valid = post_processing.multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_output_size,
pad_to_max_output_size=True,
additional_fields={fields.BoxListFields.keypoints: keypoints})
return [
nms.get(),
nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes),
nms.get_field(fields.BoxListFields.keypoints), nms_valid
]
exp_nms_corners = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 1000, 1, 1002],
[0, 100, 1, 101]]
exp_nms_scores = [.95, .9, .85, .3]
exp_nms_classes = [0, 0, 1, 0]
exp_nms_keypoints = np.tile(
np.reshape(np.array([3, 0, 6, 5], np.float32), [4, 1, 1]),
[1, num_keypoints, 2])
(nms_corners_output, nms_scores_output, nms_classes_output, nms_keypoints,
nms_valid) = self.execute(graph_fn, [boxes, scores, keypoints])
self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners)
self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores)
self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes)
self.assertAllEqual(nms_keypoints[:nms_valid], exp_nms_keypoints)
def test_multiclass_nms_with_shared_boxes_given_keypoint_heatmaps(self):
boxes = np.array(
[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]],
[[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]],
[[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32)
scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]],
np.float32)
num_boxes = boxes.shape[0]
heatmap_height = 5
heatmap_width = 5
num_keypoints = 17
keypoint_heatmaps = np.ones(
[num_boxes, heatmap_height, heatmap_width, num_keypoints],
dtype=np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 1000, 1, 1002],
[0, 100, 1, 101]]
exp_nms_scores = [.95, .9, .85, .3]
exp_nms_classes = [0, 0, 1, 0]
exp_nms_keypoint_heatmaps = np.ones(
(4, heatmap_height, heatmap_width, num_keypoints), dtype=np.float32)
def graph_fn(boxes, scores, keypoint_heatmaps):
nms, nms_valid = post_processing.multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_output_size,
pad_to_max_output_size=True,
additional_fields={
fields.BoxListFields.keypoint_heatmaps: keypoint_heatmaps
})
return [
nms.get(),
nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes),
nms.get_field(fields.BoxListFields.keypoint_heatmaps), nms_valid
]
(nms_corners_output, nms_scores_output, nms_classes_output,
nms_keypoint_heatmaps,
nms_valid) = self.execute(graph_fn, [boxes, scores, keypoint_heatmaps])
self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners)
self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores)
self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes)
self.assertAllEqual(nms_keypoint_heatmaps[:nms_valid],
exp_nms_keypoint_heatmaps)
def test_multiclass_nms_with_additional_fields(self):
boxes = np.array(
[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]],
[[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]],
[[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32)
scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]],
np.float32)
coarse_boxes_key = 'coarse_boxes'
coarse_boxes = np.array(
[[0.1, 0.1, 1.1, 1.1], [0.1, 0.2, 1.1, 1.2], [0.1, -0.2, 1.1, 1.0],
[0.1, 10.1, 1.1, 11.1], [0.1, 10.2, 1.1, 11.2], [
0.1, 100.1, 1.1, 101.1
], [0.1, 1000.1, 1.1, 1002.1], [0.1, 1000.1, 1.1, 1002.2]], np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = np.array([[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 1000, 1, 1002],
[0, 100, 1, 101]], dtype=np.float32)
exp_nms_coarse_corners = np.array([[0.1, 10.1, 1.1, 11.1],
[0.1, 0.1, 1.1, 1.1],
[0.1, 1000.1, 1.1, 1002.1],
[0.1, 100.1, 1.1, 101.1]],
dtype=np.float32)
exp_nms_scores = [.95, .9, .85, .3]
exp_nms_classes = [0, 0, 1, 0]
def graph_fn(boxes, scores, coarse_boxes):
nms, nms_valid = post_processing.multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_output_size,
pad_to_max_output_size=True,
additional_fields={coarse_boxes_key: coarse_boxes})
return [
nms.get(),
nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes),
nms.get_field(coarse_boxes_key),
nms_valid,
]
(nms_corners_output, nms_scores_output, nms_classes_output,
nms_coarse_corners,
nms_valid) = self.execute(graph_fn, [boxes, scores, coarse_boxes])
self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners)
self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores)
self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes)
self.assertAllEqual(nms_coarse_corners[:nms_valid], exp_nms_coarse_corners)
def test_multiclass_nms_select_with_shared_boxes_given_masks(self):
boxes = np.array(
[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]],
[[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]],
[[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32)
scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]],
np.float32)
num_classes = 2
mask_height = 3
mask_width = 3
masks = np.tile(
np.reshape(range(8), [8, 1, 1, 1]),
[1, num_classes, mask_height, mask_width])
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 1000, 1, 1002],
[0, 100, 1, 101]]
exp_nms_scores = [.95, .9, .85, .3]
exp_nms_classes = [0, 0, 1, 0]
exp_nms_masks_tensor = np.tile(
np.reshape(np.array([3, 0, 6, 5], np.float32), [4, 1, 1]),
[1, mask_height, mask_width])
def graph_fn(boxes, scores, masks):
nms, nms_valid = post_processing.multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_output_size,
masks=masks,
pad_to_max_output_size=True)
return [
nms.get(),
nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes),
nms.get_field(fields.BoxListFields.masks), nms_valid
]
(nms_corners_output, nms_scores_output, nms_classes_output, nms_masks,
nms_valid) = self.execute(graph_fn, [boxes, scores, masks])
self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners)
self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores)
self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes)
self.assertAllEqual(nms_masks[:nms_valid], exp_nms_masks_tensor)
def test_multiclass_nms_select_with_clip_window(self):
boxes = np.array([[[0, 0, 10, 10]], [[1, 1, 11, 11]]], np.float32)
scores = np.array([[.9], [.75]], np.float32)
clip_window = np.array([5, 4, 8, 7], np.float32)
score_thresh = 0.0
iou_thresh = 0.5
max_output_size = 100
exp_nms_corners = [[5, 4, 8, 7]]
exp_nms_scores = [.9]
exp_nms_classes = [0]
def graph_fn(boxes, scores, clip_window):
nms, nms_valid = post_processing.multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_output_size,
pad_to_max_output_size=True,
clip_window=clip_window)
return [
nms.get(),
nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes), nms_valid
]
(nms_corners_output, nms_scores_output, nms_classes_output,
nms_valid) = self.execute(graph_fn, [boxes, scores, clip_window])
self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners)
self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores)
self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes)
def test_multiclass_nms_select_with_clip_window_change_coordinate_frame(self):
boxes = np.array([[[0, 0, 10, 10]], [[1, 1, 11, 11]]], np.float32)
scores = np.array([[.9], [.75]], np.float32)
clip_window = np.array([5, 4, 8, 7], np.float32)
score_thresh = 0.0
iou_thresh = 0.5
max_output_size = 100
exp_nms_corners = [[0, 0, 1, 1]]
exp_nms_scores = [.9]
exp_nms_classes = [0]
def graph_fn(boxes, scores, clip_window):
nms, nms_valid = post_processing.multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_output_size,
clip_window=clip_window,
pad_to_max_output_size=True,
change_coordinate_frame=True)
return [
nms.get(),
nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes), nms_valid
]
(nms_corners_output, nms_scores_output, nms_classes_output,
nms_valid) = self.execute(graph_fn, [boxes, scores, clip_window])
self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners)
self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores)
self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes)
def test_multiclass_nms_select_with_per_class_cap(self):
boxes = np.array(
[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]],
[[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]],
[[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32)
scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]],
np.float32)
score_thresh = 0.1
iou_thresh = .5
max_size_per_class = 2
exp_nms_corners = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 1000, 1, 1002]]
exp_nms_scores = [.95, .9, .85]
exp_nms_classes = [0, 0, 1]
def graph_fn(boxes, scores):
nms, nms_valid = post_processing.multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_size_per_class,
pad_to_max_output_size=True)
return [
nms.get(),
nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes),
nms_valid
]
(nms_corners_output, nms_scores_output,
nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores])
self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners)
self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores)
self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes)
def test_multiclass_nms_select_with_total_cap(self):
boxes = np.array(
[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]],
[[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]],
[[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32)
scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]],
np.float32)
score_thresh = 0.1
iou_thresh = .5
max_size_per_class = 4
max_total_size = 2
exp_nms_corners = [[0, 10, 1, 11],
[0, 0, 1, 1]]
exp_nms_scores = [.95, .9]
exp_nms_classes = [0, 0]
def graph_fn(boxes, scores):
nms, nms_valid = post_processing.multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_size_per_class,
max_total_size,
pad_to_max_output_size=True)
return [
nms.get(),
nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes),
nms_valid
]
(nms_corners_output, nms_scores_output,
nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores])
self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners)
self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores)
self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes)
def test_multiclass_nms_threshold_then_select_with_shared_boxes(self):
boxes = np.array(
[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]],
[[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]],
[[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32)
scores = np.array([[.9], [.75], [.6], [.95], [.5], [.3], [.01], [.01]],
np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 3
exp_nms = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 100, 1, 101]]
def graph_fn(boxes, scores):
nms, nms_valid = post_processing.multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_output_size,
pad_to_max_output_size=True)
return nms.get(), nms_valid
nms_output, nms_valid = self.execute(graph_fn, [boxes, scores])
self.assertAllClose(nms_output[:nms_valid], exp_nms)
def test_multiclass_nms_select_with_separate_boxes(self):
boxes = np.array(
[[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]],
[[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [
0, 10, 1, 11
]], [[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]],
[[0, 100, 1, 101], [0, 100, 1, 101]],
[[0, 1000, 1, 1002], [0, 999, 2, 1004]],
[[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]], np.float32)
scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]],
np.float32)
score_thresh = 0.1
iou_thresh = .5
max_output_size = 4
exp_nms_corners = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 999, 2, 1004],
[0, 100, 1, 101]]
exp_nms_scores = [.95, .9, .85, .3]
exp_nms_classes = [0, 0, 1, 0]
def graph_fn(boxes, scores):
nms, nms_valid = post_processing.multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_output_size,
pad_to_max_output_size=True)
return [
nms.get(),
nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes),
nms_valid
]
(nms_corners_output, nms_scores_output,
nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores])
self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners)
self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores)
self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes)
def test_multiclass_soft_nms_select_with_shared_boxes_cpu_only(self):
boxes = np.array(
[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]],
[[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]],
[[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32)
scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0],
[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]],
np.float32)
score_thresh = 0.1
iou_thresh = 1.0
max_output_size = 4
exp_nms_corners = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 1000, 1, 1002],
[0, 0.1, 1, 1.1]]
exp_nms_scores = [.95, .9, .85, .384]
exp_nms_classes = [0, 0, 1, 0]
def graph_fn(boxes, scores):
nms, _ = post_processing.multiclass_non_max_suppression(
boxes,
scores,
score_thresh,
iou_thresh,
max_size_per_class=max_output_size,
max_total_size=max_output_size,
soft_nms_sigma=0.5)
return [
nms.get(),
nms.get_field(fields.BoxListFields.scores),
nms.get_field(fields.BoxListFields.classes)
]
(nms_corners_output, nms_scores_output,
nms_classes_output) = self.execute_cpu(graph_fn, [boxes, scores])
self.assertAllClose(
nms_corners_output, exp_nms_corners, rtol=1e-2, atol=1e-2)
self.assertAllClose(nms_scores_output, exp_nms_scores, rtol=1e-2, atol=1e-2)
self.assertAllClose(
nms_classes_output, exp_nms_classes, rtol=1e-2, atol=1e-2)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/multiclass_nms_test.py | multiclass_nms_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Interface for data decoders.
Data decoders decode the input data and return a dictionary of tensors keyed by
the entries in core.reader.Fields.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import six
class DataDecoder(six.with_metaclass(ABCMeta, object)):
"""Interface for data decoders."""
@abstractmethod
def decode(self, data):
"""Return a single image and associated labels.
Args:
data: a string tensor holding a serialized protocol buffer corresponding
to data for a single image.
Returns:
tensor_dict: a dictionary containing tensors. Possible keys are defined in
reader.Fields.
"""
pass
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/data_decoder.py | data_decoder.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Records previous preprocessing operations and allows them to be repeated.
Used with object_detection.core.preprocessor. Passing a PreprocessorCache
into individual data augmentation functions or the general preprocess() function
will store all randomly generated variables in the PreprocessorCache. When
a preprocessor function is called multiple times with the same
PreprocessorCache object, that function will perform the same augmentation
on all calls.
"""
import collections
class PreprocessorCache(object):
"""Dictionary wrapper storing random variables generated during preprocessing.
"""
# Constant keys representing different preprocessing functions
ROTATION90 = 'rotation90'
HORIZONTAL_FLIP = 'horizontal_flip'
VERTICAL_FLIP = 'vertical_flip'
PIXEL_VALUE_SCALE = 'pixel_value_scale'
IMAGE_SCALE = 'image_scale'
RGB_TO_GRAY = 'rgb_to_gray'
ADJUST_BRIGHTNESS = 'adjust_brightness'
ADJUST_CONTRAST = 'adjust_contrast'
ADJUST_HUE = 'adjust_hue'
ADJUST_SATURATION = 'adjust_saturation'
DISTORT_COLOR = 'distort_color'
STRICT_CROP_IMAGE = 'strict_crop_image'
CROP_IMAGE = 'crop_image'
PAD_IMAGE = 'pad_image'
CROP_TO_ASPECT_RATIO = 'crop_to_aspect_ratio'
RESIZE_METHOD = 'resize_method'
PAD_TO_ASPECT_RATIO = 'pad_to_aspect_ratio'
BLACK_PATCHES = 'black_patches'
ADD_BLACK_PATCH = 'add_black_patch'
SELECTOR = 'selector'
SELECTOR_TUPLES = 'selector_tuples'
SELF_CONCAT_IMAGE = 'self_concat_image'
SSD_CROP_SELECTOR_ID = 'ssd_crop_selector_id'
SSD_CROP_PAD_SELECTOR_ID = 'ssd_crop_pad_selector_id'
JPEG_QUALITY = 'jpeg_quality'
DOWNSCALE_TO_TARGET_PIXELS = 'downscale_to_target_pixels'
PATCH_GAUSSIAN = 'patch_gaussian'
SQUARE_CROP_BY_SCALE = 'square_crop_scale'
# 27 permitted function ids
_VALID_FNS = [ROTATION90, HORIZONTAL_FLIP, VERTICAL_FLIP, PIXEL_VALUE_SCALE,
IMAGE_SCALE, RGB_TO_GRAY, ADJUST_BRIGHTNESS, ADJUST_CONTRAST,
ADJUST_HUE, ADJUST_SATURATION, DISTORT_COLOR, STRICT_CROP_IMAGE,
CROP_IMAGE, PAD_IMAGE, CROP_TO_ASPECT_RATIO, RESIZE_METHOD,
PAD_TO_ASPECT_RATIO, BLACK_PATCHES, ADD_BLACK_PATCH, SELECTOR,
SELECTOR_TUPLES, SELF_CONCAT_IMAGE, SSD_CROP_SELECTOR_ID,
SSD_CROP_PAD_SELECTOR_ID, JPEG_QUALITY,
DOWNSCALE_TO_TARGET_PIXELS, PATCH_GAUSSIAN,
SQUARE_CROP_BY_SCALE]
def __init__(self):
self._history = collections.defaultdict(dict)
def clear(self):
"""Resets cache."""
self._history = collections.defaultdict(dict)
def get(self, function_id, key):
"""Gets stored value given a function id and key.
Args:
function_id: identifier for the preprocessing function used.
key: identifier for the variable stored.
Returns:
value: the corresponding value, expected to be a tensor or
nested structure of tensors.
Raises:
ValueError: if function_id is not one of the 23 valid function ids.
"""
if function_id not in self._VALID_FNS:
raise ValueError('Function id not recognized: %s.' % str(function_id))
return self._history[function_id].get(key)
def update(self, function_id, key, value):
"""Adds a value to the dictionary.
Args:
function_id: identifier for the preprocessing function used.
key: identifier for the variable stored.
value: the value to store, expected to be a tensor or nested structure
of tensors.
Raises:
ValueError: if function_id is not one of the 23 valid function ids.
"""
if function_id not in self._VALID_FNS:
raise ValueError('Function id not recognized: %s.' % str(function_id))
self._history[function_id][key] = value
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/preprocessor_cache.py | preprocessor_cache.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to subsample minibatches by balancing positives and negatives.
Subsamples minibatches based on a pre-specified positive fraction in range
[0,1]. The class presumes there are many more negatives than positive examples:
if the desired batch_size cannot be achieved with the pre-specified positive
fraction, it fills the rest with negative examples. If this is not sufficient
for obtaining the desired batch_size, it returns fewer examples.
The main function to call is Subsample(self, indicator, labels). For convenience
one can also call SubsampleWeights(self, weights, labels) which is defined in
the minibatch_sampler base class.
When is_static is True, it implements a method that guarantees static shapes.
It also ensures the length of output of the subsample is always batch_size, even
when number of examples set to True in indicator is less than batch_size.
"""
import tensorflow.compat.v1 as tf
from object_detection.core import minibatch_sampler
class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler):
"""Subsamples minibatches to a desired balance of positives and negatives."""
def __init__(self, positive_fraction=0.5, is_static=False):
"""Constructs a minibatch sampler.
Args:
positive_fraction: desired fraction of positive examples (scalar in [0,1])
in the batch.
is_static: If True, uses an implementation with static shape guarantees.
Raises:
ValueError: if positive_fraction < 0, or positive_fraction > 1
"""
if positive_fraction < 0 or positive_fraction > 1:
raise ValueError('positive_fraction should be in range [0,1]. '
'Received: %s.' % positive_fraction)
self._positive_fraction = positive_fraction
self._is_static = is_static
def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size):
"""Counts the number of positives and negatives numbers to be sampled.
Args:
sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains
the signed indices of the examples where the sign is based on the label
value. The examples that cannot be sampled are set to 0. It samples
atmost sample_size*positive_fraction positive examples and remaining
from negative examples.
sample_size: Size of subsamples.
Returns:
A tuple containing the number of positive and negative labels in the
subsample.
"""
input_length = tf.shape(sorted_indices_tensor)[0]
valid_positive_index = tf.greater(sorted_indices_tensor,
tf.zeros(input_length, tf.int32))
num_sampled_pos = tf.reduce_sum(tf.cast(valid_positive_index, tf.int32))
max_num_positive_samples = tf.constant(
int(sample_size * self._positive_fraction), tf.int32)
num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos)
num_negative_samples = tf.constant(sample_size,
tf.int32) - num_positive_samples
return num_positive_samples, num_negative_samples
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
num_end_samples, total_num_samples):
"""slices num_start_samples and last num_end_samples from input_tensor.
Args:
input_tensor: An int32 tensor of shape [N] to be sliced.
num_start_samples: Number of examples to be sliced from the beginning
of the input tensor.
num_end_samples: Number of examples to be sliced from the end of the
input tensor.
total_num_samples: Sum of is num_start_samples and num_end_samples. This
should be a scalar.
Returns:
A tensor containing the first num_start_samples and last num_end_samples
from input_tensor.
"""
input_length = tf.shape(input_tensor)[0]
start_positions = tf.less(tf.range(input_length), num_start_samples)
end_positions = tf.greater_equal(
tf.range(input_length), input_length - num_end_samples)
selected_positions = tf.logical_or(start_positions, end_positions)
selected_positions = tf.cast(selected_positions, tf.float32)
indexed_positions = tf.multiply(tf.cumsum(selected_positions),
selected_positions)
one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
total_num_samples,
dtype=tf.float32)
return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
one_hot_selector, axes=[0, 0]), tf.int32)
def _static_subsample(self, indicator, batch_size, labels):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
N should be a complie time constant.
batch_size: desired batch size. This scalar cannot be None.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples. N should be a complie time constant.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled. It ensures the length of output of the subsample is always
batch_size, even when number of examples set to True in indicator is
less than batch_size.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
# Check if indicator and labels have a static size.
if not indicator.shape.is_fully_defined():
raise ValueError('indicator must be static in shape when is_static is'
'True')
if not labels.shape.is_fully_defined():
raise ValueError('labels must be static in shape when is_static is'
'True')
if not isinstance(batch_size, int):
raise ValueError('batch_size has to be an integer when is_static is'
'True.')
input_length = tf.shape(indicator)[0]
# Set the number of examples set True in indicator to be at least
# batch_size.
num_true_sampled = tf.reduce_sum(tf.cast(indicator, tf.float32))
additional_false_sample = tf.less_equal(
tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)),
batch_size - num_true_sampled)
indicator = tf.logical_or(indicator, additional_false_sample)
# Shuffle indicator and label. Need to store the permutation to restore the
# order post sampling.
permutation = tf.random_shuffle(tf.range(input_length))
indicator = tf.gather(indicator, permutation, axis=0)
labels = tf.gather(labels, permutation, axis=0)
# index (starting from 1) when indicator is True, 0 when False
indicator_idx = tf.where(
indicator, tf.range(1, input_length + 1),
tf.zeros(input_length, tf.int32))
# Replace -1 for negative, +1 for positive labels
signed_label = tf.where(
labels, tf.ones(input_length, tf.int32),
tf.scalar_mul(-1, tf.ones(input_length, tf.int32)))
# negative of index for negative label, positive index for positive label,
# 0 when indicator is False.
signed_indicator_idx = tf.multiply(indicator_idx, signed_label)
sorted_signed_indicator_idx = tf.nn.top_k(
signed_indicator_idx, input_length, sorted=True).values
[num_positive_samples,
num_negative_samples] = self._get_num_pos_neg_samples(
sorted_signed_indicator_idx, batch_size)
sampled_idx = self._get_values_from_start_and_end(
sorted_signed_indicator_idx, num_positive_samples,
num_negative_samples, batch_size)
# Shift the indices to start from 0 and remove any samples that are set as
# False.
sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32)
sampled_idx = tf.multiply(
tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32),
sampled_idx)
sampled_idx_indicator = tf.cast(tf.reduce_sum(
tf.one_hot(sampled_idx, depth=input_length),
axis=0), tf.bool)
# project back the order based on stored permutations
idx_indicator = tf.scatter_nd(
tf.expand_dims(permutation, -1), sampled_idx_indicator,
shape=(input_length,))
return idx_indicator
def subsample(self, indicator, batch_size, labels, scope=None):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size. If None, keeps all positive samples and
randomly selects negative samples so that the positive sample fraction
matches self._positive_fraction. It cannot be None is is_static is True.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples.
scope: name scope.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
if len(indicator.get_shape().as_list()) != 1:
raise ValueError('indicator must be 1 dimensional, got a tensor of '
'shape %s' % indicator.get_shape())
if len(labels.get_shape().as_list()) != 1:
raise ValueError('labels must be 1 dimensional, got a tensor of '
'shape %s' % labels.get_shape())
if labels.dtype != tf.bool:
raise ValueError('labels should be of type bool. Received: %s' %
labels.dtype)
if indicator.dtype != tf.bool:
raise ValueError('indicator should be of type bool. Received: %s' %
indicator.dtype)
with tf.name_scope(scope, 'BalancedPositiveNegativeSampler'):
if self._is_static:
return self._static_subsample(indicator, batch_size, labels)
else:
# Only sample from indicated samples
negative_idx = tf.logical_not(labels)
positive_idx = tf.logical_and(labels, indicator)
negative_idx = tf.logical_and(negative_idx, indicator)
# Sample positive and negative samples separately
if batch_size is None:
max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32))
else:
max_num_pos = int(self._positive_fraction * batch_size)
sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos)
num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32))
if batch_size is None:
negative_positive_ratio = (
1 - self._positive_fraction) / self._positive_fraction
max_num_neg = tf.cast(
negative_positive_ratio *
tf.cast(num_sampled_pos, dtype=tf.float32),
dtype=tf.int32)
else:
max_num_neg = batch_size - num_sampled_pos
sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg)
return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/balanced_positive_negative_sampler.py | balanced_positive_negative_sampler.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.box_list."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import box_list
from object_detection.utils import test_case
class BoxListTest(test_case.TestCase):
"""Tests for BoxList class."""
def test_num_boxes(self):
def graph_fn():
data = tf.constant([[0, 0, 1, 1], [1, 1, 2, 3], [3, 4, 5, 5]], tf.float32)
boxes = box_list.BoxList(data)
return boxes.num_boxes()
num_boxes_out = self.execute(graph_fn, [])
self.assertEqual(num_boxes_out, 3)
def test_get_correct_center_coordinates_and_sizes(self):
boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]],
np.float32)
def graph_fn(boxes):
boxes = box_list.BoxList(boxes)
centers_sizes = boxes.get_center_coordinates_and_sizes()
return centers_sizes
centers_sizes_out = self.execute(graph_fn, [boxes])
expected_centers_sizes = [[15, 0.35], [12.5, 0.25], [10, 0.3], [5, 0.3]]
self.assertAllClose(centers_sizes_out, expected_centers_sizes)
def test_create_box_list_with_dynamic_shape(self):
def graph_fn():
data = tf.constant([[0, 0, 1, 1], [1, 1, 2, 3], [3, 4, 5, 5]], tf.float32)
indices = tf.reshape(tf.where(tf.greater([1, 0, 1], 0)), [-1])
data = tf.gather(data, indices)
assert data.get_shape().as_list() == [None, 4]
boxes = box_list.BoxList(data)
return boxes.num_boxes()
num_boxes = self.execute(graph_fn, [])
self.assertEqual(num_boxes, 2)
def test_transpose_coordinates(self):
boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]],
np.float32)
def graph_fn(boxes):
boxes = box_list.BoxList(boxes)
boxes.transpose_coordinates()
return boxes.get()
transpoded_boxes = self.execute(graph_fn, [boxes])
expected_corners = [[10.0, 10.0, 15.0, 20.0], [0.1, 0.2, 0.4, 0.5]]
self.assertAllClose(transpoded_boxes, expected_corners)
def test_box_list_invalid_inputs(self):
data0 = tf.constant([[[0, 0, 1, 1], [3, 4, 5, 5]]], tf.float32)
data1 = tf.constant([[0, 0, 1], [1, 1, 2], [3, 4, 5]], tf.float32)
data2 = tf.constant([[0, 0, 1], [1, 1, 2], [3, 4, 5]], tf.int32)
with self.assertRaises(ValueError):
_ = box_list.BoxList(data0)
with self.assertRaises(ValueError):
_ = box_list.BoxList(data1)
with self.assertRaises(ValueError):
_ = box_list.BoxList(data2)
def test_num_boxes_static(self):
box_corners = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
boxes = box_list.BoxList(tf.constant(box_corners))
self.assertEqual(boxes.num_boxes_static(), 2)
self.assertEqual(type(boxes.num_boxes_static()), int)
def test_as_tensor_dict(self):
boxes = tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]],
tf.float32)
boxlist = box_list.BoxList(boxes)
classes = tf.constant([0, 1])
boxlist.add_field('classes', classes)
scores = tf.constant([0.75, 0.2])
boxlist.add_field('scores', scores)
tensor_dict = boxlist.as_tensor_dict()
self.assertDictEqual(tensor_dict, {'scores': scores, 'classes': classes,
'boxes': boxes})
def test_as_tensor_dict_with_features(self):
boxes = tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]],
tf.float32)
boxlist = box_list.BoxList(boxes)
classes = tf.constant([0, 1])
boxlist.add_field('classes', classes)
scores = tf.constant([0.75, 0.2])
boxlist.add_field('scores', scores)
tensor_dict = boxlist.as_tensor_dict(['scores', 'classes'])
self.assertDictEqual(tensor_dict, {'scores': scores, 'classes': classes})
def test_as_tensor_dict_missing_field(self):
boxlist = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32))
boxlist.add_field('classes', tf.constant([0, 1]))
boxlist.add_field('scores', tf.constant([0.75, 0.2]))
with self.assertRaises(ValueError):
boxlist.as_tensor_dict(['foo', 'bar'])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/box_list_test.py | box_list_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains classes specifying naming conventions used for object detection.
Specifies:
InputDataFields: standard fields used by reader/preprocessor/batcher.
DetectionResultFields: standard fields returned by object detector.
BoxListFields: standard field used by BoxList
TfExampleFields: standard fields for tf-example data format (go/tf-example).
"""
class InputDataFields(object):
"""Names for the input tensors.
Holds the standard data field names to use for identifying input tensors. This
should be used by the decoder to identify keys for the returned tensor_dict
containing input tensors. And it should be used by the model to identify the
tensors it needs.
Attributes:
image: image.
image_additional_channels: additional channels.
original_image: image in the original input size.
original_image_spatial_shape: image in the original input size.
key: unique key corresponding to image.
source_id: source of the original image.
filename: original filename of the dataset (without common path).
groundtruth_image_classes: image-level class labels.
groundtruth_image_confidences: image-level class confidences.
groundtruth_labeled_classes: image-level annotation that indicates the
classes for which an image has been labeled.
groundtruth_boxes: coordinates of the ground truth boxes in the image.
groundtruth_classes: box-level class labels.
groundtruth_track_ids: box-level track ID labels.
groundtruth_temporal_offset: box-level temporal offsets, i.e.,
movement of the box center in adjacent frames.
groundtruth_track_match_flags: box-level flags indicating if objects
exist in the previous frame.
groundtruth_confidences: box-level class confidences. The shape should be
the same as the shape of groundtruth_classes.
groundtruth_label_types: box-level label types (e.g. explicit negative).
groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead]
is the groundtruth a single object or a crowd.
groundtruth_area: area of a groundtruth segment.
groundtruth_difficult: is a `difficult` object
groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the
same class, forming a connected group, where instances are heavily
occluding each other.
proposal_boxes: coordinates of object proposal boxes.
proposal_objectness: objectness score of each proposal.
groundtruth_instance_masks: ground truth instance masks.
groundtruth_instance_mask_weights: ground truth instance masks weights.
groundtruth_instance_boundaries: ground truth instance boundaries.
groundtruth_instance_classes: instance mask-level class labels.
groundtruth_keypoints: ground truth keypoints.
groundtruth_keypoint_depths: Relative depth of the keypoints.
groundtruth_keypoint_depth_weights: Weights of the relative depth of the
keypoints.
groundtruth_keypoint_visibilities: ground truth keypoint visibilities.
groundtruth_keypoint_weights: groundtruth weight factor for keypoints.
groundtruth_label_weights: groundtruth label weights.
groundtruth_verified_negative_classes: groundtruth verified negative classes
groundtruth_not_exhaustive_classes: groundtruth not-exhaustively labeled
classes.
groundtruth_weights: groundtruth weight factor for bounding boxes.
groundtruth_dp_num_points: The number of DensePose sampled points for each
instance.
groundtruth_dp_part_ids: Part indices for DensePose points.
groundtruth_dp_surface_coords: Image locations and UV coordinates for
DensePose points.
num_groundtruth_boxes: number of groundtruth boxes.
is_annotated: whether an image has been labeled or not.
true_image_shapes: true shapes of images in the resized images, as resized
images can be padded with zeros.
multiclass_scores: the label score per class for each box.
context_features: a flattened list of contextual features.
context_feature_length: the fixed length of each feature in
context_features, used for reshaping.
valid_context_size: the valid context size, used in filtering the padded
context features.
context_features_image_id_list: the list of image source ids corresponding
to the features in context_features
image_format: format for the images, used to decode
image_height: height of images, used to decode
image_width: width of images, used to decode
"""
image = 'image'
image_additional_channels = 'image_additional_channels'
original_image = 'original_image'
original_image_spatial_shape = 'original_image_spatial_shape'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_image_confidences = 'groundtruth_image_confidences'
groundtruth_labeled_classes = 'groundtruth_labeled_classes'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_track_ids = 'groundtruth_track_ids'
groundtruth_temporal_offset = 'groundtruth_temporal_offset'
groundtruth_track_match_flags = 'groundtruth_track_match_flags'
groundtruth_confidences = 'groundtruth_confidences'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
groundtruth_group_of = 'groundtruth_group_of'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_mask_weights = 'groundtruth_instance_mask_weights'
groundtruth_instance_boundaries = 'groundtruth_instance_boundaries'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_depths = 'groundtruth_keypoint_depths'
groundtruth_keypoint_depth_weights = 'groundtruth_keypoint_depth_weights'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_keypoint_weights = 'groundtruth_keypoint_weights'
groundtruth_label_weights = 'groundtruth_label_weights'
groundtruth_verified_neg_classes = 'groundtruth_verified_neg_classes'
groundtruth_not_exhaustive_classes = 'groundtruth_not_exhaustive_classes'
groundtruth_weights = 'groundtruth_weights'
groundtruth_dp_num_points = 'groundtruth_dp_num_points'
groundtruth_dp_part_ids = 'groundtruth_dp_part_ids'
groundtruth_dp_surface_coords = 'groundtruth_dp_surface_coords'
num_groundtruth_boxes = 'num_groundtruth_boxes'
is_annotated = 'is_annotated'
true_image_shape = 'true_image_shape'
multiclass_scores = 'multiclass_scores'
context_features = 'context_features'
context_feature_length = 'context_feature_length'
valid_context_size = 'valid_context_size'
context_features_image_id_list = 'context_features_image_id_list'
image_timestamps = 'image_timestamps'
image_format = 'image_format'
image_height = 'image_height'
image_width = 'image_width'
class DetectionResultFields(object):
"""Naming conventions for storing the output of the detector.
Attributes:
source_id: source of the original image.
key: unique key corresponding to image.
detection_boxes: coordinates of the detection boxes in the image.
detection_scores: detection scores for the detection boxes in the image.
detection_multiclass_scores: class score distribution (including background)
for detection boxes in the image including background class.
detection_classes: detection-level class labels.
detection_masks: contains a segmentation mask for each detection box.
detection_surface_coords: contains DensePose surface coordinates for each
box.
detection_boundaries: contains an object boundary for each detection box.
detection_keypoints: contains detection keypoints for each detection box.
detection_keypoint_scores: contains detection keypoint scores.
detection_keypoint_depths: contains detection keypoint depths.
num_detections: number of detections in the batch.
raw_detection_boxes: contains decoded detection boxes without Non-Max
suppression.
raw_detection_scores: contains class score logits for raw detection boxes.
detection_anchor_indices: The anchor indices of the detections after NMS.
detection_features: contains extracted features for each detected box
after NMS.
"""
source_id = 'source_id'
key = 'key'
detection_boxes = 'detection_boxes'
detection_scores = 'detection_scores'
detection_multiclass_scores = 'detection_multiclass_scores'
detection_features = 'detection_features'
detection_classes = 'detection_classes'
detection_masks = 'detection_masks'
detection_surface_coords = 'detection_surface_coords'
detection_boundaries = 'detection_boundaries'
detection_keypoints = 'detection_keypoints'
detection_keypoint_scores = 'detection_keypoint_scores'
detection_keypoint_depths = 'detection_keypoint_depths'
detection_embeddings = 'detection_embeddings'
detection_offsets = 'detection_temporal_offsets'
num_detections = 'num_detections'
raw_detection_boxes = 'raw_detection_boxes'
raw_detection_scores = 'raw_detection_scores'
detection_anchor_indices = 'detection_anchor_indices'
class BoxListFields(object):
"""Naming conventions for BoxLists.
Attributes:
boxes: bounding box coordinates.
classes: classes per bounding box.
scores: scores per bounding box.
weights: sample weights per bounding box.
objectness: objectness score per bounding box.
masks: masks per bounding box.
mask_weights: mask weights for each bounding box.
boundaries: boundaries per bounding box.
keypoints: keypoints per bounding box.
keypoint_visibilities: keypoint visibilities per bounding box.
keypoint_heatmaps: keypoint heatmaps per bounding box.
keypoint_depths: keypoint depths per bounding box.
keypoint_depth_weights: keypoint depth weights per bounding box.
densepose_num_points: number of DensePose points per bounding box.
densepose_part_ids: DensePose part ids per bounding box.
densepose_surface_coords: DensePose surface coordinates per bounding box.
is_crowd: is_crowd annotation per bounding box.
temporal_offsets: temporal center offsets per bounding box.
track_match_flags: match flags per bounding box.
"""
boxes = 'boxes'
classes = 'classes'
scores = 'scores'
weights = 'weights'
confidences = 'confidences'
objectness = 'objectness'
masks = 'masks'
mask_weights = 'mask_weights'
boundaries = 'boundaries'
keypoints = 'keypoints'
keypoint_visibilities = 'keypoint_visibilities'
keypoint_heatmaps = 'keypoint_heatmaps'
keypoint_depths = 'keypoint_depths'
keypoint_depth_weights = 'keypoint_depth_weights'
densepose_num_points = 'densepose_num_points'
densepose_part_ids = 'densepose_part_ids'
densepose_surface_coords = 'densepose_surface_coords'
is_crowd = 'is_crowd'
group_of = 'group_of'
track_ids = 'track_ids'
temporal_offsets = 'temporal_offsets'
track_match_flags = 'track_match_flags'
class PredictionFields(object):
"""Naming conventions for standardized prediction outputs.
Attributes:
feature_maps: List of feature maps for prediction.
anchors: Generated anchors.
raw_detection_boxes: Decoded detection boxes without NMS.
raw_detection_feature_map_indices: Feature map indices from which each raw
detection box was produced.
"""
feature_maps = 'feature_maps'
anchors = 'anchors'
raw_detection_boxes = 'raw_detection_boxes'
raw_detection_feature_map_indices = 'raw_detection_feature_map_indices'
class TfExampleFields(object):
"""TF-example proto feature names for object detection.
Holds the standard feature names to load from an Example proto for object
detection.
Attributes:
image_encoded: JPEG encoded string
image_format: image format, e.g. "JPEG"
filename: filename
channels: number of channels of image
colorspace: colorspace, e.g. "RGB"
height: height of image in pixels, e.g. 462
width: width of image in pixels, e.g. 581
source_id: original source of the image
image_class_text: image-level label in text format
image_class_label: image-level label in numerical format
image_class_confidence: image-level confidence of the label
object_class_text: labels in text format, e.g. ["person", "cat"]
object_class_label: labels in numbers, e.g. [16, 8]
object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30
object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40
object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50
object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70
object_view: viewpoint of object, e.g. ["frontal", "left"]
object_truncated: is object truncated, e.g. [true, false]
object_occluded: is object occluded, e.g. [true, false]
object_difficult: is object difficult, e.g. [true, false]
object_group_of: is object a single object or a group of objects
object_depiction: is object a depiction
object_is_crowd: [DEPRECATED, use object_group_of instead]
is the object a single object or a crowd
object_segment_area: the area of the segment.
object_weight: a weight factor for the object's bounding box.
instance_masks: instance segmentation masks.
instance_boundaries: instance boundaries.
instance_classes: Classes for each instance segmentation mask.
detection_class_label: class label in numbers.
detection_bbox_ymin: ymin coordinates of a detection box.
detection_bbox_xmin: xmin coordinates of a detection box.
detection_bbox_ymax: ymax coordinates of a detection box.
detection_bbox_xmax: xmax coordinates of a detection box.
detection_score: detection score for the class label and box.
"""
image_encoded = 'image/encoded'
image_format = 'image/format' # format is reserved keyword
filename = 'image/filename'
channels = 'image/channels'
colorspace = 'image/colorspace'
height = 'image/height'
width = 'image/width'
source_id = 'image/source_id'
image_class_text = 'image/class/text'
image_class_label = 'image/class/label'
image_class_confidence = 'image/class/confidence'
object_class_text = 'image/object/class/text'
object_class_label = 'image/object/class/label'
object_bbox_ymin = 'image/object/bbox/ymin'
object_bbox_xmin = 'image/object/bbox/xmin'
object_bbox_ymax = 'image/object/bbox/ymax'
object_bbox_xmax = 'image/object/bbox/xmax'
object_view = 'image/object/view'
object_truncated = 'image/object/truncated'
object_occluded = 'image/object/occluded'
object_difficult = 'image/object/difficult'
object_group_of = 'image/object/group_of'
object_depiction = 'image/object/depiction'
object_is_crowd = 'image/object/is_crowd'
object_segment_area = 'image/object/segment/area'
object_weight = 'image/object/weight'
instance_masks = 'image/segmentation/object'
instance_boundaries = 'image/boundaries/object'
instance_classes = 'image/segmentation/object/class'
detection_class_label = 'image/detection/label'
detection_bbox_ymin = 'image/detection/bbox/ymin'
detection_bbox_xmin = 'image/detection/bbox/xmin'
detection_bbox_ymax = 'image/detection/bbox/ymax'
detection_bbox_xmax = 'image/detection/bbox/xmax'
detection_score = 'image/detection/score'
# Sequence fields for SequenceExample inputs.
# All others are considered context fields.
SEQUENCE_FIELDS = [InputDataFields.image,
InputDataFields.source_id,
InputDataFields.groundtruth_boxes,
InputDataFields.num_groundtruth_boxes,
InputDataFields.groundtruth_classes,
InputDataFields.groundtruth_weights,
InputDataFields.source_id,
InputDataFields.is_annotated]
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/standard_fields.py | standard_fields.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.core import model
from object_detection.utils import test_case
class FakeModel(model.DetectionModel):
def __init__(self):
# sub-networks containing weights of different shapes.
self._network1 = tf.keras.Sequential([
tf.keras.layers.Conv2D(8, 1)
])
self._network2 = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, 1)
])
super(FakeModel, self).__init__(num_classes=0)
def preprocess(self, images):
return images, tf.shape(images)
def predict(self, images, shapes):
return {'prediction': self._network2(self._network1(images))}
def postprocess(self, prediction_dict, shapes):
return prediction_dict
def loss(self):
return tf.constant(0.0)
def updates(self):
return []
def restore_map(self):
return {}
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def regularization_losses(self):
return []
class ModelTest(test_case.TestCase):
def test_model_call(self):
detection_model = FakeModel()
def graph_fn():
return detection_model(tf.zeros((1, 128, 128, 3)))
result = self.execute(graph_fn, [])
self.assertEqual(result['prediction'].shape,
(1, 128, 128, 16))
def test_freeze(self):
detection_model = FakeModel()
detection_model(tf.zeros((1, 128, 128, 3)))
net1_var_shapes = [tuple(var.get_shape().as_list()) for var in
detection_model._network1.trainable_variables]
del detection_model
detection_model = FakeModel()
detection_model._network2.trainable = False
detection_model(tf.zeros((1, 128, 128, 3)))
var_shapes = [tuple(var.get_shape().as_list()) for var in
detection_model._network1.trainable_variables]
self.assertEqual(set(net1_var_shapes), set(var_shapes))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/model_test.py | model_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.batcher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import batcher
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class BatcherTest(tf.test.TestCase):
def test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension(self):
with self.test_session() as sess:
batch_size = 3
num_batches = 2
examples = tf.Variable(tf.constant(2, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 2)
boxes = tf.tile(
tf.reshape(tf.range(4), [1, 4]), tf.stack([counter, tf.constant(1)]))
batch_queue = batcher.BatchQueue(
tensor_dict={'boxes': boxes},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([None, 4], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 2
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor, np.tile(np.arange(4), (i, 1)))
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
def test_batch_and_unpad_2d_tensors_of_different_sizes_in_all_dimensions(
self):
with self.test_session() as sess:
batch_size = 3
num_batches = 2
examples = tf.Variable(tf.constant(2, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 2)
image = tf.reshape(
tf.range(counter * counter), tf.stack([counter, counter]))
batch_queue = batcher.BatchQueue(
tensor_dict={'image': image},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([None, None], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 2
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i)))
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
def test_batch_and_unpad_2d_tensors_of_same_size_in_all_dimensions(self):
with self.test_session() as sess:
batch_size = 3
num_batches = 2
examples = tf.Variable(tf.constant(1, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 1)
image = tf.reshape(tf.range(1, 13), [4, 3]) * counter
batch_queue = batcher.BatchQueue(
tensor_dict={'image': image},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([4, 3], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 1
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor, np.arange(1, 13).reshape((4, 3)) * i)
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
def test_batcher_when_batch_size_is_one(self):
with self.test_session() as sess:
batch_size = 1
num_batches = 2
examples = tf.Variable(tf.constant(2, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 2)
image = tf.reshape(
tf.range(counter * counter), tf.stack([counter, counter]))
batch_queue = batcher.BatchQueue(
tensor_dict={'image': image},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([None, None], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 2
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i)))
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/batcher_tf1_test.py | batcher_tf1_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""object_detection_evaluation module.
ObjectDetectionEvaluation is a class which manages ground truth information of a
object detection dataset, and computes frequently used detection metrics such as
Precision, Recall, CorLoc of the provided detection results.
It supports the following operations:
1) Add ground truth information of images sequentially.
2) Add detection result of images sequentially.
3) Evaluate detection metrics on already inserted detection results.
4) Write evaluation result into a pickle file for future processing or
visualization.
Note: This module operates on numpy boxes and box lists.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import collections
import logging
import unicodedata
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.utils import label_map_util
from object_detection.utils import metrics
from object_detection.utils import per_image_evaluation
class DetectionEvaluator(six.with_metaclass(ABCMeta, object)):
"""Interface for object detection evalution classes.
Example usage of the Evaluator:
------------------------------
evaluator = DetectionEvaluator(categories)
# Detections and groundtruth for image 1.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
# Detections and groundtruth for image 2.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
metrics_dict = evaluator.evaluate()
"""
def __init__(self, categories):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
"""
self._categories = categories
def observe_result_dict_for_single_example(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
raise NotImplementedError('Not implemented for this evaluator!')
@abstractmethod
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary of groundtruth numpy arrays required for
evaluations.
"""
pass
@abstractmethod
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary of detection numpy arrays required for
evaluation.
"""
pass
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns dict of metrics to use with `tf.estimator.EstimatorSpec`.
Note that this must only be implemented if performing evaluation with a
`tf.estimator.Estimator`.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in `tf.estimator.EstimatorSpec`.
"""
pass
@abstractmethod
def evaluate(self):
"""Evaluates detections and returns a dictionary of metrics."""
pass
@abstractmethod
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
pass
class ObjectDetectionEvaluator(DetectionEvaluator):
"""A class to evaluate detections."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
recall_lower_bound=0.0,
recall_upper_bound=1.0,
evaluate_corlocs=False,
evaluate_precision_recall=False,
metric_prefix=None,
use_weighted_mean_ap=False,
evaluate_masks=False,
group_of_weight=0.0,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
recall_lower_bound: lower bound of recall operating area.
recall_upper_bound: upper bound of recall operating area.
evaluate_corlocs: (optional) boolean which determines if corloc scores are
to be returned or not.
evaluate_precision_recall: (optional) boolean which determines if
precision and recall values are to be returned or not.
metric_prefix: (optional) string prefix for metric name; if None, no
prefix is used.
use_weighted_mean_ap: (optional) boolean which determines if the mean
average precision is computed directly from the scores and tp_fp_labels
of all classes.
evaluate_masks: If False, evaluation will be performed based on boxes. If
True, mask evaluation will be performed instead.
group_of_weight: Weight of group-of boxes.If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
nms_iou_threshold: NMS IoU threashold.
nms_max_output_boxes: maximal number of boxes after NMS.
Raises:
ValueError: If the category ids are not 1-indexed.
"""
super(ObjectDetectionEvaluator, self).__init__(categories)
self._num_classes = max([cat['id'] for cat in categories])
if min(cat['id'] for cat in categories) < 1:
raise ValueError('Classes should be 1-indexed.')
self._matching_iou_threshold = matching_iou_threshold
self._recall_lower_bound = recall_lower_bound
self._recall_upper_bound = recall_upper_bound
self._use_weighted_mean_ap = use_weighted_mean_ap
self._label_id_offset = 1
self._evaluate_masks = evaluate_masks
self._group_of_weight = group_of_weight
self._nms_iou_threshold = nms_iou_threshold
self._nms_max_output_boxes = nms_max_output_boxes
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
recall_lower_bound=self._recall_lower_bound,
recall_upper_bound=self._recall_upper_bound,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset,
group_of_weight=self._group_of_weight,
nms_iou_threshold=self._nms_iou_threshold,
nms_max_output_boxes=self._nms_max_output_boxes)
self._image_ids = set([])
self._evaluate_corlocs = evaluate_corlocs
self._evaluate_precision_recall = evaluate_precision_recall
self._metric_prefix = (metric_prefix + '_') if metric_prefix else ''
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_difficult,
standard_fields.InputDataFields.groundtruth_instance_masks,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
standard_fields.DetectionResultFields.detection_masks
])
self._build_metric_names()
def get_internal_state(self):
"""Returns internal state and image ids that lead to the state.
Note that only evaluation results will be returned (e.g. not raw predictions
or groundtruth.
"""
return self._evaluation.get_internal_state(), self._image_ids
def merge_internal_state(self, image_ids, state_tuple):
"""Merges internal state with the existing state of evaluation.
If image_id is already seen by evaluator, an error will be thrown.
Args:
image_ids: list of images whose state is stored in the tuple.
state_tuple: state.
"""
for image_id in image_ids:
if image_id in self._image_ids:
logging.warning('Image with id %s already added.', image_id)
self._evaluation.merge_internal_state(state_tuple)
def _build_metric_names(self):
"""Builds a list with metric names."""
if self._recall_lower_bound > 0.0 or self._recall_upper_bound < 1.0:
self._metric_names = [
self._metric_prefix +
'Precision/mAP@{}IOU@[{:.1f},{:.1f}]Recall'.format(
self._matching_iou_threshold, self._recall_lower_bound,
self._recall_upper_bound)
]
else:
self._metric_names = [
self._metric_prefix +
'Precision/mAP@{}IOU'.format(self._matching_iou_threshold)
]
if self._evaluate_corlocs:
self._metric_names.append(
self._metric_prefix +
'Precision/meanCorLoc@{}IOU'.format(self._matching_iou_threshold))
category_index = label_map_util.create_category_index(self._categories)
for idx in range(self._num_classes):
if idx + self._label_id_offset in category_index:
category_name = category_index[idx + self._label_id_offset]['name']
try:
category_name = six.text_type(category_name, 'utf-8')
except TypeError:
pass
category_name = unicodedata.normalize('NFKD', category_name)
if six.PY2:
category_name = category_name.encode('ascii', 'ignore')
self._metric_names.append(
self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
if self._evaluate_corlocs:
self._metric_names.append(
self._metric_prefix +
'PerformanceByCategory/CorLoc@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_difficult: Optional length M
numpy boolean array denoting whether a ground truth box is a difficult
instance or not. This field is optional to support the case that no
boxes are difficult.
standard_fields.InputDataFields.groundtruth_instance_masks: Optional
numpy array of shape [num_boxes, height, width] with values in {0, 1}.
Raises:
ValueError: On adding groundtruth for an image more than once. Will also
raise error if instance masks are not in groundtruth dictionary.
"""
if image_id in self._image_ids:
logging.warning('Image with id %s already added.', image_id)
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is empty
# (unless there are no annotations for the groundtruth on this image)
# use values from the dictionary or insert None otherwise.
if (standard_fields.InputDataFields.groundtruth_difficult in six.viewkeys(
groundtruth_dict) and
(groundtruth_dict[standard_fields.InputDataFields.groundtruth_difficult]
.size or not groundtruth_classes.size)):
groundtruth_difficult = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_difficult]
else:
groundtruth_difficult = None
if not len(self._image_ids) % 1000:
logging.warning(
'image %s does not have groundtruth difficult flag specified',
image_id)
groundtruth_masks = None
if self._evaluate_masks:
if (standard_fields.InputDataFields.groundtruth_instance_masks not in
groundtruth_dict):
raise ValueError('Instance masks not in groundtruth dictionary.')
groundtruth_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
self._evaluation.add_single_ground_truth_image_info(
image_key=image_id,
groundtruth_boxes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_class_labels=groundtruth_classes,
groundtruth_is_difficult_list=groundtruth_difficult,
groundtruth_masks=groundtruth_masks)
self._image_ids.update([image_id])
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
standard_fields.DetectionResultFields.detection_masks: uint8 numpy array
of shape [num_boxes, height, width] containing `num_boxes` masks of
values ranging between 0 and 1.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
detection_classes = (
detections_dict[standard_fields.DetectionResultFields.detection_classes]
- self._label_id_offset)
detection_masks = None
if self._evaluate_masks:
if (standard_fields.DetectionResultFields.detection_masks not in
detections_dict):
raise ValueError('Detection masks not in detections dictionary.')
detection_masks = detections_dict[
standard_fields.DetectionResultFields.detection_masks]
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detections_dict[
standard_fields.DetectionResultFields.detection_boxes],
detected_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores],
detected_class_labels=detection_classes,
detected_masks=detection_masks)
def evaluate(self):
"""Compute evaluation result.
Returns:
A dictionary of metrics with the following fields -
1. summary_metrics:
'<prefix if not empty>_Precision/mAP@<matching_iou_threshold>IOU': mean
average precision at the specified IOU threshold.
2. per_category_ap: category specific results with keys of the form
'<prefix if not empty>_PerformanceByCategory/
mAP@<matching_iou_threshold>IOU/category'.
"""
(per_class_ap, mean_ap, per_class_precision, per_class_recall,
per_class_corloc, mean_corloc) = (
self._evaluation.evaluate())
pascal_metrics = {self._metric_names[0]: mean_ap}
if self._evaluate_corlocs:
pascal_metrics[self._metric_names[1]] = mean_corloc
category_index = label_map_util.create_category_index(self._categories)
for idx in range(per_class_ap.size):
if idx + self._label_id_offset in category_index:
category_name = category_index[idx + self._label_id_offset]['name']
try:
category_name = six.text_type(category_name, 'utf-8')
except TypeError:
pass
category_name = unicodedata.normalize('NFKD', category_name)
if six.PY2:
category_name = category_name.encode('ascii', 'ignore')
display_name = (
self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_ap[idx]
# Optionally add precision and recall values
if self._evaluate_precision_recall:
display_name = (
self._metric_prefix +
'PerformanceByCategory/Precision@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_precision[idx]
display_name = (
self._metric_prefix +
'PerformanceByCategory/Recall@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_recall[idx]
# Optionally add CorLoc metrics.classes
if self._evaluate_corlocs:
display_name = (
self._metric_prefix +
'PerformanceByCategory/CorLoc@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_corloc[idx]
return pascal_metrics
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset,
nms_iou_threshold=self._nms_iou_threshold,
nms_max_output_boxes=self._nms_max_output_boxes,
)
self._image_ids.clear()
def add_eval_dict(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
# remove unexpected fields
eval_dict_filtered = dict()
for key, value in eval_dict.items():
if key in self._expected_keys:
eval_dict_filtered[key] = value
eval_dict_keys = list(eval_dict_filtered.keys())
def update_op(image_id, *eval_dict_batched_as_list):
"""Update operation that adds batch of images to ObjectDetectionEvaluator.
Args:
image_id: image id (single id or an array)
*eval_dict_batched_as_list: the values of the dictionary of tensors.
"""
if np.isscalar(image_id):
single_example_dict = dict(
zip(eval_dict_keys, eval_dict_batched_as_list))
self.add_single_ground_truth_image_info(image_id, single_example_dict)
self.add_single_detected_image_info(image_id, single_example_dict)
else:
for unzipped_tuple in zip(*eval_dict_batched_as_list):
single_example_dict = dict(zip(eval_dict_keys, unzipped_tuple))
image_id = single_example_dict[standard_fields.InputDataFields.key]
self.add_single_ground_truth_image_info(image_id, single_example_dict)
self.add_single_detected_image_info(image_id, single_example_dict)
args = [eval_dict_filtered[standard_fields.InputDataFields.key]]
args.extend(six.itervalues(eval_dict_filtered))
return tf.py_func(update_op, args, [])
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns dict of metrics to use with `tf.estimator.EstimatorSpec`.
Note that this must only be implemented if performing evaluation with a
`tf.estimator.Estimator`.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example(). It must contain
standard_fields.InputDataFields.key.
Returns:
A dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in `tf.estimator.EstimatorSpec`.
"""
update_op = self.add_eval_dict(eval_dict)
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[self._metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {self._metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in self._metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
class PascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using PASCAL metrics."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000):
super(PascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='PascalBoxes',
use_weighted_mean_ap=False,
nms_iou_threshold=nms_iou_threshold,
nms_max_output_boxes=nms_max_output_boxes)
class WeightedPascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using weighted PASCAL metrics.
Weighted PASCAL metrics computes the mean average precision as the average
precision given the scores and tp_fp_labels of all classes. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
This definition is very similar to the mean of the per-class average
precisions weighted by class frequency. However, they are typically not the
same as the average precision is not a linear function of the scores and
tp_fp_labels.
"""
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='WeightedPascalBoxes',
use_weighted_mean_ap=True)
class PrecisionAtRecallDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using precision@recall metrics."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
recall_lower_bound=0.0,
recall_upper_bound=1.0):
super(PrecisionAtRecallDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
recall_lower_bound=recall_lower_bound,
recall_upper_bound=recall_upper_bound,
evaluate_corlocs=False,
metric_prefix='PrecisionAtRecallBoxes',
use_weighted_mean_ap=False)
class PascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate instance masks using PASCAL metrics."""
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalInstanceSegmentationEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='PascalMasks',
use_weighted_mean_ap=False,
evaluate_masks=True)
class WeightedPascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate instance masks using weighted PASCAL metrics.
Weighted PASCAL metrics computes the mean average precision as the average
precision given the scores and tp_fp_labels of all classes. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
This definition is very similar to the mean of the per-class average
precisions weighted by class frequency. However, they are typically not the
same as the average precision is not a linear function of the scores and
tp_fp_labels.
"""
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalInstanceSegmentationEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='WeightedPascalMasks',
use_weighted_mean_ap=True,
evaluate_masks=True)
class OpenImagesDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using Open Images V2 metrics.
Open Images V2 introduce group_of type of bounding boxes and this metric
handles those boxes appropriately.
"""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_masks=False,
evaluate_corlocs=False,
metric_prefix='OpenImagesV2',
group_of_weight=0.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_masks: if True, evaluator evaluates masks.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
metric_prefix: Prefix name of the metric.
group_of_weight: Weight of the group-of bounding box. If set to 0 (default
for Open Images V2 detection protocol), detections of the correct class
within a group-of box are ignored. If weight is > 0, then if at least
one detection falls within a group-of box with matching_iou_threshold,
weight group_of_weight is added to true positives. Consequently, if no
detection falls within a group-of box, weight group_of_weight is added
to false negatives.
"""
super(OpenImagesDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold,
evaluate_corlocs,
metric_prefix=metric_prefix,
group_of_weight=group_of_weight,
evaluate_masks=evaluate_masks)
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_group_of,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
])
if evaluate_masks:
self._expected_keys.add(
standard_fields.InputDataFields.groundtruth_instance_masks)
self._expected_keys.add(
standard_fields.DetectionResultFields.detection_masks)
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_group_of: Optional length M
numpy boolean array denoting whether a groundtruth box contains a
group of instances.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
if image_id in self._image_ids:
logging.warning('Image with id %s already added.', image_id)
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is empty
# (unless there are no annotations for the groundtruth on this image)
# use values from the dictionary or insert None otherwise.
if (standard_fields.InputDataFields.groundtruth_group_of in six.viewkeys(
groundtruth_dict) and
(groundtruth_dict[standard_fields.InputDataFields.groundtruth_group_of]
.size or not groundtruth_classes.size)):
groundtruth_group_of = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_group_of]
else:
groundtruth_group_of = None
if not len(self._image_ids) % 1000:
logging.warning(
'image %s does not have groundtruth group_of flag specified',
image_id)
if self._evaluate_masks:
groundtruth_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
else:
groundtruth_masks = None
self._evaluation.add_single_ground_truth_image_info(
image_id,
groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_classes,
groundtruth_is_difficult_list=None,
groundtruth_is_group_of_list=groundtruth_group_of,
groundtruth_masks=groundtruth_masks)
self._image_ids.update([image_id])
class OpenImagesChallengeEvaluator(OpenImagesDetectionEvaluator):
"""A class implements Open Images Challenge metrics.
Both Detection and Instance Segmentation evaluation metrics are implemented.
Open Images Challenge Detection metric has two major changes in comparison
with Open Images V2 detection metric:
- a custom weight might be specified for detecting an object contained in
a group-of box.
- verified image-level labels should be explicitelly provided for
evaluation: in case in image has neither positive nor negative image level
label of class c, all detections of this class on this image will be
ignored.
Open Images Challenge Instance Segmentation metric allows to measure per
formance of models in case of incomplete annotations: some instances are
annotations only on box level and some - on image-level. In addition,
image-level labels are taken into account as in detection metric.
Open Images Challenge Detection metric default parameters:
evaluate_masks = False
group_of_weight = 1.0
Open Images Challenge Instance Segmentation metric default parameters:
evaluate_masks = True
(group_of_weight will not matter)
"""
def __init__(self,
categories,
evaluate_masks=False,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
group_of_weight=1.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
evaluate_masks: set to true for instance segmentation metric and to false
for detection metric.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
group_of_weight: Weight of group-of boxes. If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
"""
if not evaluate_masks:
metrics_prefix = 'OpenImagesDetectionChallenge'
else:
metrics_prefix = 'OpenImagesInstanceSegmentationChallenge'
super(OpenImagesChallengeEvaluator, self).__init__(
categories,
matching_iou_threshold,
evaluate_masks=evaluate_masks,
evaluate_corlocs=evaluate_corlocs,
group_of_weight=group_of_weight,
metric_prefix=metrics_prefix)
self._evaluatable_labels = {}
# Only one of the two has to be provided, but both options are given
# for compatibility with previous codebase.
self._expected_keys.update([
standard_fields.InputDataFields.groundtruth_image_classes,
standard_fields.InputDataFields.groundtruth_labeled_classes])
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_image_classes: integer 1D
numpy array containing all classes for which labels are verified.
standard_fields.InputDataFields.groundtruth_group_of: Optional length M
numpy boolean array denoting whether a groundtruth box contains a
group of instances.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
super(OpenImagesChallengeEvaluator,
self).add_single_ground_truth_image_info(image_id, groundtruth_dict)
input_fields = standard_fields.InputDataFields
groundtruth_classes = (
groundtruth_dict[input_fields.groundtruth_classes] -
self._label_id_offset)
image_classes = np.array([], dtype=int)
if input_fields.groundtruth_image_classes in groundtruth_dict:
image_classes = groundtruth_dict[input_fields.groundtruth_image_classes]
elif input_fields.groundtruth_labeled_classes in groundtruth_dict:
image_classes = groundtruth_dict[input_fields.groundtruth_labeled_classes]
image_classes -= self._label_id_offset
self._evaluatable_labels[image_id] = np.unique(
np.concatenate((image_classes, groundtruth_classes)))
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
if image_id not in self._image_ids:
# Since for the correct work of evaluator it is assumed that groundtruth
# is inserted first we make sure to break the code if is it not the case.
self._image_ids.update([image_id])
self._evaluatable_labels[image_id] = np.array([])
detection_classes = (
detections_dict[standard_fields.DetectionResultFields.detection_classes]
- self._label_id_offset)
allowed_classes = np.where(
np.isin(detection_classes, self._evaluatable_labels[image_id]))
detection_classes = detection_classes[allowed_classes]
detected_boxes = detections_dict[
standard_fields.DetectionResultFields.detection_boxes][allowed_classes]
detected_scores = detections_dict[
standard_fields.DetectionResultFields.detection_scores][allowed_classes]
if self._evaluate_masks:
detection_masks = detections_dict[standard_fields.DetectionResultFields
.detection_masks][allowed_classes]
else:
detection_masks = None
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detection_classes,
detected_masks=detection_masks)
def clear(self):
"""Clears stored data."""
super(OpenImagesChallengeEvaluator, self).clear()
self._evaluatable_labels.clear()
ObjectDetectionEvalMetrics = collections.namedtuple(
'ObjectDetectionEvalMetrics', [
'average_precisions', 'mean_ap', 'precisions', 'recalls', 'corlocs',
'mean_corloc'
])
class OpenImagesDetectionChallengeEvaluator(OpenImagesChallengeEvaluator):
"""A class implements Open Images Detection Challenge metric."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
"""
super(OpenImagesDetectionChallengeEvaluator, self).__init__(
categories=categories,
evaluate_masks=False,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
group_of_weight=1.0)
class OpenImagesInstanceSegmentationChallengeEvaluator(
OpenImagesChallengeEvaluator):
"""A class implements Open Images Instance Segmentation Challenge metric."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
"""
super(OpenImagesInstanceSegmentationChallengeEvaluator, self).__init__(
categories=categories,
evaluate_masks=True,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
group_of_weight=0.0)
ObjectDetectionEvaluationState = collections.namedtuple(
'ObjectDetectionEvaluationState', [
'num_gt_instances_per_class',
'scores_per_class',
'tp_fp_labels_per_class',
'num_gt_imgs_per_class',
'num_images_correctly_detected_per_class',
])
class ObjectDetectionEvaluation(object):
"""Internal implementation of Pascal object detection metrics."""
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000,
recall_lower_bound=0.0,
recall_upper_bound=1.0,
use_weighted_mean_ap=False,
label_id_offset=0,
group_of_weight=0.0,
per_image_eval_class=per_image_evaluation.PerImageEvaluation):
"""Constructor.
Args:
num_groundtruth_classes: Number of ground-truth classes.
matching_iou_threshold: IOU threshold used for matching detected boxes to
ground-truth boxes.
nms_iou_threshold: IOU threshold used for non-maximum suppression.
nms_max_output_boxes: Maximum number of boxes returned by non-maximum
suppression.
recall_lower_bound: lower bound of recall operating area
recall_upper_bound: upper bound of recall operating area
use_weighted_mean_ap: (optional) boolean which determines if the mean
average precision is computed directly from the scores and tp_fp_labels
of all classes.
label_id_offset: The label id offset.
group_of_weight: Weight of group-of boxes.If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
per_image_eval_class: The class that contains functions for computing per
image metrics.
Raises:
ValueError: if num_groundtruth_classes is smaller than 1.
"""
if num_groundtruth_classes < 1:
raise ValueError('Need at least 1 groundtruth class for evaluation.')
self.per_image_eval = per_image_eval_class(
num_groundtruth_classes=num_groundtruth_classes,
matching_iou_threshold=matching_iou_threshold,
nms_iou_threshold=nms_iou_threshold,
nms_max_output_boxes=nms_max_output_boxes,
group_of_weight=group_of_weight)
self.recall_lower_bound = recall_lower_bound
self.recall_upper_bound = recall_upper_bound
self.group_of_weight = group_of_weight
self.num_class = num_groundtruth_classes
self.use_weighted_mean_ap = use_weighted_mean_ap
self.label_id_offset = label_id_offset
self.groundtruth_boxes = {}
self.groundtruth_class_labels = {}
self.groundtruth_masks = {}
self.groundtruth_is_difficult_list = {}
self.groundtruth_is_group_of_list = {}
self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=float)
self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
self._initialize_detections()
def _initialize_detections(self):
"""Initializes internal data structures."""
self.detection_keys = set()
self.scores_per_class = [[] for _ in range(self.num_class)]
self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
self.average_precision_per_class = np.empty(self.num_class, dtype=float)
self.average_precision_per_class.fill(np.nan)
self.precisions_per_class = [np.nan] * self.num_class
self.recalls_per_class = [np.nan] * self.num_class
self.sum_tp_class = [np.nan] * self.num_class
self.corloc_per_class = np.ones(self.num_class, dtype=float)
def clear_detections(self):
self._initialize_detections()
def get_internal_state(self):
"""Returns internal state of the evaluation.
NOTE: that only evaluation results will be returned
(e.g. no raw predictions or groundtruth).
Returns:
internal state of the evaluation.
"""
return ObjectDetectionEvaluationState(
self.num_gt_instances_per_class, self.scores_per_class,
self.tp_fp_labels_per_class, self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
def merge_internal_state(self, state_tuple):
"""Merges internal state of the evaluation with the current state.
Args:
state_tuple: state tuple representing evaluation state: should be of type
ObjectDetectionEvaluationState.
"""
(num_gt_instances_per_class, scores_per_class, tp_fp_labels_per_class,
num_gt_imgs_per_class, num_images_correctly_detected_per_class) = (
state_tuple)
assert self.num_class == len(num_gt_instances_per_class)
assert self.num_class == len(scores_per_class)
assert self.num_class == len(tp_fp_labels_per_class)
for i in range(self.num_class):
self.scores_per_class[i].extend(scores_per_class[i])
self.tp_fp_labels_per_class[i].extend(tp_fp_labels_per_class[i])
self.num_gt_instances_per_class[i] += num_gt_instances_per_class[i]
self.num_gt_imgs_per_class[i] += num_gt_imgs_per_class[i]
self.num_images_correctly_detected_per_class[
i] += num_images_correctly_detected_per_class[i]
def add_single_ground_truth_image_info(self,
image_key,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_list=None,
groundtruth_is_group_of_list=None,
groundtruth_masks=None):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
groundtruth_boxes: float32 numpy array of shape [num_boxes, 4] containing
`num_boxes` groundtruth boxes of the format [ymin, xmin, ymax, xmax] in
absolute image coordinates.
groundtruth_class_labels: integer numpy array of shape [num_boxes]
containing 0-indexed groundtruth classes for the boxes.
groundtruth_is_difficult_list: A length M numpy boolean array denoting
whether a ground truth box is a difficult instance or not. To support
the case that no boxes are difficult, it is by default set as None.
groundtruth_is_group_of_list: A length M numpy boolean array denoting
whether a ground truth box is a group-of box or not. To support the case
that no boxes are groups-of, it is by default set as None.
groundtruth_masks: uint8 numpy array of shape [num_boxes, height, width]
containing `num_boxes` groundtruth masks. The mask values range from 0
to 1.
"""
if image_key in self.groundtruth_boxes:
logging.warning(
'image %s has already been added to the ground truth database.',
image_key)
return
self.groundtruth_boxes[image_key] = groundtruth_boxes
self.groundtruth_class_labels[image_key] = groundtruth_class_labels
self.groundtruth_masks[image_key] = groundtruth_masks
if groundtruth_is_difficult_list is None:
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool)
self.groundtruth_is_difficult_list[
image_key] = groundtruth_is_difficult_list.astype(dtype=bool)
if groundtruth_is_group_of_list is None:
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_group_of_list = np.zeros(num_boxes, dtype=bool)
if groundtruth_masks is None:
num_boxes = groundtruth_boxes.shape[0]
mask_presence_indicator = np.zeros(num_boxes, dtype=bool)
else:
mask_presence_indicator = (np.sum(groundtruth_masks,
axis=(1, 2)) == 0).astype(dtype=bool)
self.groundtruth_is_group_of_list[
image_key] = groundtruth_is_group_of_list.astype(dtype=bool)
self._update_ground_truth_statistics(
groundtruth_class_labels,
groundtruth_is_difficult_list.astype(dtype=bool)
| mask_presence_indicator, # ignore boxes without masks
groundtruth_is_group_of_list.astype(dtype=bool))
def add_single_detected_image_info(self,
image_key,
detected_boxes,
detected_scores,
detected_class_labels,
detected_masks=None):
"""Adds detections for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
detected_boxes: float32 numpy array of shape [num_boxes, 4] containing
`num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax] in
absolute image coordinates.
detected_scores: float32 numpy array of shape [num_boxes] containing
detection scores for the boxes.
detected_class_labels: integer numpy array of shape [num_boxes] containing
0-indexed detection classes for the boxes.
detected_masks: np.uint8 numpy array of shape [num_boxes, height, width]
containing `num_boxes` detection masks with values ranging between 0 and
1.
Raises:
ValueError: if the number of boxes, scores and class labels differ in
length.
"""
if (len(detected_boxes) != len(detected_scores) or
len(detected_boxes) != len(detected_class_labels)):
raise ValueError(
'detected_boxes, detected_scores and '
'detected_class_labels should all have same lengths. Got'
'[%d, %d, %d]' % len(detected_boxes), len(detected_scores),
len(detected_class_labels))
if image_key in self.detection_keys:
logging.warning(
'image %s has already been added to the detection result database',
image_key)
return
self.detection_keys.add(image_key)
if image_key in self.groundtruth_boxes:
groundtruth_boxes = self.groundtruth_boxes[image_key]
groundtruth_class_labels = self.groundtruth_class_labels[image_key]
# Masks are popped instead of look up. The reason is that we do not want
# to keep all masks in memory which can cause memory overflow.
groundtruth_masks = self.groundtruth_masks.pop(image_key)
groundtruth_is_difficult_list = self.groundtruth_is_difficult_list[
image_key]
groundtruth_is_group_of_list = self.groundtruth_is_group_of_list[
image_key]
else:
groundtruth_boxes = np.empty(shape=[0, 4], dtype=float)
groundtruth_class_labels = np.array([], dtype=int)
if detected_masks is None:
groundtruth_masks = None
else:
groundtruth_masks = np.empty(shape=[0, 1, 1], dtype=float)
groundtruth_is_difficult_list = np.array([], dtype=bool)
groundtruth_is_group_of_list = np.array([], dtype=bool)
scores, tp_fp_labels, is_class_correctly_detected_in_image = (
self.per_image_eval.compute_object_detection_metrics(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
groundtruth_is_difficult_list=groundtruth_is_difficult_list,
groundtruth_is_group_of_list=groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks))
for i in range(self.num_class):
if scores[i].shape[0] > 0:
self.scores_per_class[i].append(scores[i])
self.tp_fp_labels_per_class[i].append(tp_fp_labels[i])
(self.num_images_correctly_detected_per_class
) += is_class_correctly_detected_in_image
def _update_ground_truth_statistics(self, groundtruth_class_labels,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list):
"""Update grouth truth statitistics.
1. Difficult boxes are ignored when counting the number of ground truth
instances as done in Pascal VOC devkit.
2. Difficult boxes are treated as normal boxes when computing CorLoc related
statitistics.
Args:
groundtruth_class_labels: An integer numpy array of length M, representing
M class labels of object instances in ground truth
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box is a group-of box or not
"""
for class_index in range(self.num_class):
num_gt_instances = np.sum(groundtruth_class_labels[
~groundtruth_is_difficult_list
& ~groundtruth_is_group_of_list] == class_index)
num_groupof_gt_instances = self.group_of_weight * np.sum(
groundtruth_class_labels[
groundtruth_is_group_of_list
& ~groundtruth_is_difficult_list] == class_index)
self.num_gt_instances_per_class[
class_index] += num_gt_instances + num_groupof_gt_instances
if np.any(groundtruth_class_labels == class_index):
self.num_gt_imgs_per_class[class_index] += 1
def evaluate(self):
"""Compute evaluation result.
Returns:
A named tuple with the following fields -
average_precision: float numpy array of average precision for
each class.
mean_ap: mean average precision of all classes, float scalar
precisions: List of precisions, each precision is a float numpy
array
recalls: List of recalls, each recall is a float numpy array
corloc: numpy float array
mean_corloc: Mean CorLoc score for each class, float scalar
"""
if (self.num_gt_instances_per_class == 0).any():
logging.warning(
'The following classes have no ground truth examples: %s',
np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
self.label_id_offset)
if self.use_weighted_mean_ap:
all_scores = np.array([], dtype=float)
all_tp_fp_labels = np.array([], dtype=bool)
for class_index in range(self.num_class):
if self.num_gt_instances_per_class[class_index] == 0:
continue
if not self.scores_per_class[class_index]:
scores = np.array([], dtype=float)
tp_fp_labels = np.array([], dtype=float)
else:
scores = np.concatenate(self.scores_per_class[class_index])
tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
if self.use_weighted_mean_ap:
all_scores = np.append(all_scores, scores)
all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
precision, recall = metrics.compute_precision_recall(
scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
recall_within_bound_indices = [
index for index, value in enumerate(recall) if
value >= self.recall_lower_bound and value <= self.recall_upper_bound
]
recall_within_bound = recall[recall_within_bound_indices]
precision_within_bound = precision[recall_within_bound_indices]
self.precisions_per_class[class_index] = precision_within_bound
self.recalls_per_class[class_index] = recall_within_bound
self.sum_tp_class[class_index] = tp_fp_labels.sum()
average_precision = metrics.compute_average_precision(
precision_within_bound, recall_within_bound)
self.average_precision_per_class[class_index] = average_precision
logging.info('average_precision: %f', average_precision)
self.corloc_per_class = metrics.compute_cor_loc(
self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
if self.use_weighted_mean_ap:
num_gt_instances = np.sum(self.num_gt_instances_per_class)
precision, recall = metrics.compute_precision_recall(
all_scores, all_tp_fp_labels, num_gt_instances)
recall_within_bound_indices = [
index for index, value in enumerate(recall) if
value >= self.recall_lower_bound and value <= self.recall_upper_bound
]
recall_within_bound = recall[recall_within_bound_indices]
precision_within_bound = precision[recall_within_bound_indices]
mean_ap = metrics.compute_average_precision(precision_within_bound,
recall_within_bound)
else:
mean_ap = np.nanmean(self.average_precision_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return ObjectDetectionEvalMetrics(self.average_precision_per_class, mean_ap,
self.precisions_per_class,
self.recalls_per_class,
self.corloc_per_class, mean_corloc)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/object_detection_evaluation.py | object_detection_evaluation.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_mask_list_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_box_mask_list
from object_detection.utils import np_box_mask_list_ops
class AreaRelatedTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=float)
masks1_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks1_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks1 = np.stack([masks1_0, masks1_1])
boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
masks2_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_2 = np.array([[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0]],
dtype=np.uint8)
masks2 = np.stack([masks2_0, masks2_1, masks2_2])
self.box_mask_list1 = np_box_mask_list.BoxMaskList(
box_data=boxes1, mask_data=masks1)
self.box_mask_list2 = np_box_mask_list.BoxMaskList(
box_data=boxes2, mask_data=masks2)
def test_area(self):
areas = np_box_mask_list_ops.area(self.box_mask_list1)
expected_areas = np.array([8.0, 10.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def test_intersection(self):
intersection = np_box_mask_list_ops.intersection(self.box_mask_list1,
self.box_mask_list2)
expected_intersection = np.array([[8.0, 0.0, 8.0], [0.0, 9.0, 7.0]],
dtype=float)
self.assertAllClose(intersection, expected_intersection)
def test_iou(self):
iou = np_box_mask_list_ops.iou(self.box_mask_list1, self.box_mask_list2)
expected_iou = np.array(
[[1.0, 0.0, 8.0 / 25.0], [0.0, 9.0 / 16.0, 7.0 / 28.0]], dtype=float)
self.assertAllClose(iou, expected_iou)
def test_ioa(self):
ioa21 = np_box_mask_list_ops.ioa(self.box_mask_list1, self.box_mask_list2)
expected_ioa21 = np.array([[1.0, 0.0, 8.0/25.0],
[0.0, 9.0/15.0, 7.0/25.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array(
[[4.0, 3.0, 7.0, 6.0], [5.0, 6.0, 10.0, 10.0]], dtype=float)
boxes2 = np.array(
[[3.0, 4.0, 6.0, 8.0], [5.0, 6.0, 10.0, 10.0], [1.0, 1.0, 10.0, 10.0]],
dtype=float)
masks1 = np.array(
[[[0, 1, 0], [1, 1, 0], [0, 0, 0]], [[0, 1, 1], [0, 1, 1], [0, 1, 1]]],
dtype=np.uint8)
masks2 = np.array(
[[[0, 1, 0], [1, 1, 1], [0, 0, 0]], [[0, 1, 0], [0, 0, 1], [0, 1, 1]],
[[0, 1, 1], [0, 1, 1], [0, 1, 1]]],
dtype=np.uint8)
self.boxes1 = boxes1
self.boxes2 = boxes2
self.masks1 = masks1
self.masks2 = masks2
def test_with_no_scores_field(self):
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=self.boxes1, mask_data=self.masks1)
max_output_size = 3
iou_threshold = 0.5
with self.assertRaises(ValueError):
np_box_mask_list_ops.non_max_suppression(
box_mask_list, max_output_size, iou_threshold)
def test_nms_disabled_max_output_size_equals_one(self):
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=self.boxes2, mask_data=self.masks2)
box_mask_list.add_field('scores',
np.array([.9, .75, .6], dtype=float))
max_output_size = 1
iou_threshold = 1. # No NMS
expected_boxes = np.array([[3.0, 4.0, 6.0, 8.0]], dtype=float)
expected_masks = np.array(
[[[0, 1, 0], [1, 1, 1], [0, 0, 0]]], dtype=np.uint8)
nms_box_mask_list = np_box_mask_list_ops.non_max_suppression(
box_mask_list, max_output_size, iou_threshold)
self.assertAllClose(nms_box_mask_list.get(), expected_boxes)
self.assertAllClose(nms_box_mask_list.get_masks(), expected_masks)
def test_multiclass_nms(self):
boxes = np.array(
[[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],
dtype=np.float32)
mask0 = np.array([[0, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
mask1 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
mask2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]],
dtype=np.uint8)
masks = np.stack([mask0, mask1, mask2])
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=masks)
scores = np.array([[-0.2, 0.1, 0.5, -0.4, 0.3],
[0.7, -0.7, 0.6, 0.2, -0.9],
[0.4, 0.34, -0.9, 0.2, 0.31]],
dtype=np.float32)
box_mask_list.add_field('scores', scores)
box_mask_list_clean = np_box_mask_list_ops.multi_class_non_max_suppression(
box_mask_list, score_thresh=0.25, iou_thresh=0.1, max_output_size=3)
scores_clean = box_mask_list_clean.get_field('scores')
classes_clean = box_mask_list_clean.get_field('classes')
boxes = box_mask_list_clean.get()
masks = box_mask_list_clean.get_masks()
expected_scores = np.array([0.7, 0.6, 0.34, 0.31])
expected_classes = np.array([0, 2, 1, 4])
expected_boxes = np.array([[0.4, 0.2, 0.8, 0.8],
[0.4, 0.2, 0.8, 0.8],
[0.6, 0.0, 1.0, 1.0],
[0.6, 0.0, 1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(scores_clean, expected_scores)
self.assertAllClose(classes_clean, expected_classes)
self.assertAllClose(boxes, expected_boxes)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/np_box_mask_list_ops_test.py | np_box_mask_list_ops_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.category_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
from object_detection.utils import category_util
class EvalUtilTest(tf.test.TestCase):
def test_load_categories_from_csv_file(self):
csv_data = """
0,"cat"
1,"dog"
2,"bird"
""".strip(' ')
csv_path = os.path.join(self.get_temp_dir(), 'test.csv')
with tf.gfile.Open(csv_path, 'wb') as f:
f.write(csv_data)
categories = category_util.load_categories_from_csv_file(csv_path)
self.assertTrue({'id': 0, 'name': 'cat'} in categories)
self.assertTrue({'id': 1, 'name': 'dog'} in categories)
self.assertTrue({'id': 2, 'name': 'bird'} in categories)
def test_save_categories_to_csv_file(self):
categories = [
{'id': 0, 'name': 'cat'},
{'id': 1, 'name': 'dog'},
{'id': 2, 'name': 'bird'},
]
csv_path = os.path.join(self.get_temp_dir(), 'test.csv')
category_util.save_categories_to_csv_file(categories, csv_path)
saved_categories = category_util.load_categories_from_csv_file(csv_path)
self.assertEqual(saved_categories, categories)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/category_util_test.py | category_util_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.spatial_transform_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.utils import spatial_transform_ops as spatial_ops
from object_detection.utils import test_case
class BoxGridCoordinateTest(test_case.TestCase):
def test_4x4_grid(self):
boxes = np.array([[[0., 0., 6., 6.]]], dtype=np.float32)
def graph_fn(boxes):
return spatial_ops.box_grid_coordinate_vectors(boxes, size_y=4, size_x=4)
grid_y, grid_x = self.execute(graph_fn, [boxes])
expected_grid_y = np.array([[[0.75, 2.25, 3.75, 5.25]]])
expected_grid_x = np.array([[[0.75, 2.25, 3.75, 5.25]]])
self.assertAllClose(expected_grid_y, grid_y)
self.assertAllClose(expected_grid_x, grid_x)
def test_2x2_grid(self):
def graph_fn(boxes):
return spatial_ops.box_grid_coordinate_vectors(boxes, size_x=2, size_y=2)
boxes = np.array([[[0., 0., 6., 3.],
[0., 0., 3., 6.]]], dtype=np.float32)
grid_y, grid_x = self.execute(graph_fn, [boxes])
expected_grid_y = np.array([[[1.5, 4.5],
[0.75, 2.25]]])
expected_grid_x = np.array([[[0.75, 2.25],
[1.5, 4.5]]])
self.assertAllClose(expected_grid_y, grid_y)
self.assertAllClose(expected_grid_x, grid_x)
def test_2x4_grid(self):
boxes = np.array([[[0., 0., 6., 6.]]], dtype=np.float32)
def graph_fn(boxes):
return spatial_ops.box_grid_coordinate_vectors(boxes, size_y=2, size_x=4)
grid_y, grid_x = self.execute(graph_fn, [boxes])
expected_grid_y = np.array([[[1.5, 4.5]]])
expected_grid_x = np.array([[[0.75, 2.25, 3.75, 5.25]]])
self.assertAllClose(expected_grid_y, grid_y)
self.assertAllClose(expected_grid_x, grid_x)
def test_2x4_grid_with_aligned_corner(self):
boxes = np.array([[[0., 0., 6., 6.]]], dtype=np.float32)
def graph_fn(boxes):
return spatial_ops.box_grid_coordinate_vectors(boxes, size_y=2, size_x=4,
align_corners=True)
grid_y, grid_x = self.execute(graph_fn, [boxes])
expected_grid_y = np.array([[[0, 6]]])
expected_grid_x = np.array([[[0, 2, 4, 6]]])
self.assertAllClose(expected_grid_y, grid_y)
self.assertAllClose(expected_grid_x, grid_x)
def test_offgrid_boxes(self):
boxes = np.array([[[1.2, 2.3, 7.2, 8.3]]], dtype=np.float32)
def graph_fn(boxes):
return spatial_ops.box_grid_coordinate_vectors(boxes, size_y=4, size_x=4)
grid_y, grid_x = self.execute(graph_fn, [boxes])
expected_grid_y = np.array([[[0.75, 2.25, 3.75, 5.25]]]) + 1.2
expected_grid_x = np.array([[[0.75, 2.25, 3.75, 5.25]]]) + 2.3
self.assertAllClose(expected_grid_y, grid_y)
self.assertAllClose(expected_grid_x, grid_x)
class FeatureGridCoordinateTest(test_case.TestCase):
def test_snap_box_points_to_nearest_4_pixels(self):
box_grid_y = np.array([[[1.5, 4.6]]], dtype=np.float32)
box_grid_x = np.array([[[2.4, 5.3]]], dtype=np.float32)
def graph_fn(box_grid_y, box_grid_x):
return spatial_ops.feature_grid_coordinate_vectors(box_grid_y, box_grid_x)
(feature_grid_y0,
feature_grid_x0, feature_grid_y1, feature_grid_x1) = self.execute(
graph_fn, [box_grid_y, box_grid_x])
expected_grid_y0 = np.array([[[1, 4]]])
expected_grid_y1 = np.array([[[2, 5]]])
expected_grid_x0 = np.array([[[2, 5]]])
expected_grid_x1 = np.array([[[3, 6]]])
self.assertAllEqual(expected_grid_y0, feature_grid_y0)
self.assertAllEqual(expected_grid_y1, feature_grid_y1)
self.assertAllEqual(expected_grid_x0, feature_grid_x0)
self.assertAllEqual(expected_grid_x1, feature_grid_x1)
def test_snap_box_points_outside_pixel_grid_to_nearest_neighbor(self):
box_grid_y = np.array([[[0.33, 1., 1.66]]], dtype=np.float32)
box_grid_x = np.array([[[-0.5, 1., 1.66]]], dtype=np.float32)
def graph_fn(box_grid_y, box_grid_x):
return spatial_ops.feature_grid_coordinate_vectors(box_grid_y, box_grid_x)
(feature_grid_y0,
feature_grid_x0, feature_grid_y1, feature_grid_x1) = self.execute(
graph_fn, [box_grid_y, box_grid_x])
expected_grid_y0 = np.array([[[0, 1, 1]]])
expected_grid_y1 = np.array([[[1, 2, 2]]])
expected_grid_x0 = np.array([[[-1, 1, 1]]])
expected_grid_x1 = np.array([[[0, 2, 2]]])
self.assertAllEqual(expected_grid_y0, feature_grid_y0)
self.assertAllEqual(expected_grid_y1, feature_grid_y1)
self.assertAllEqual(expected_grid_x0, feature_grid_x0)
self.assertAllEqual(expected_grid_x1, feature_grid_x1)
class RavelIndicesTest(test_case.TestCase):
def test_feature_point_indices(self):
feature_grid_y = np.array([[[1, 2, 4, 5],
[2, 3, 4, 5]]], dtype=np.int32)
feature_grid_x = np.array([[[1, 3, 4],
[2, 3, 4]]], dtype=np.int32)
num_feature_levels = 2
feature_height = 6
feature_width = 5
box_levels = np.array([[0, 1]], dtype=np.int32)
def graph_fn(feature_grid_y, feature_grid_x, box_levels):
return spatial_ops.ravel_indices(feature_grid_y, feature_grid_x,
num_feature_levels, feature_height,
feature_width, box_levels)
indices = self.execute(graph_fn,
[feature_grid_y, feature_grid_x, box_levels])
expected_indices = np.array([[[[6, 8, 9],
[11, 13, 14],
[21, 23, 24],
[26, 28, 29]],
[[42, 43, 44],
[47, 48, 49],
[52, 53, 54],
[57, 58, 59]]]])
self.assertAllEqual(expected_indices.flatten(), indices)
class MultiLevelRoIAlignTest(test_case.TestCase):
def test_perfectly_aligned_cell_center_and_feature_pixels(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[2, 2])
image = np.arange(25).reshape(1, 5, 5, 1).astype(np.float32)
boxes = np.array([[[0, 0, 1.0, 1.0]]], dtype=np.float32)
box_levels = np.array([[0]], dtype=np.int32)
expected_output = [[[[[6], [8]],
[[16], [18]]]]]
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(crop_output, expected_output)
def test_interpolation_with_4_points_per_bin(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[1, 1],
num_samples_per_cell_y=2,
num_samples_per_cell_x=2)
image = np.array([[[[1], [2], [3], [4]],
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]],
dtype=np.float32)
boxes = np.array([[[1./3, 1./3, 2./3, 2./3]]], dtype=np.float32)
box_levels = np.array([[0]], dtype=np.int32)
expected_output = [[[[[(7.25 + 7.75 + 9.25 + 9.75) / 4]]]]]
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(expected_output, crop_output)
def test_1x1_crop_on_2x2_features(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[1, 1])
image = np.array([[[[1], [2]],
[[3], [4]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1]]], dtype=np.float32)
box_levels = np.array([[0]], dtype=np.int32)
expected_output = [[[[[2.5]]]]]
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(crop_output, expected_output)
def test_3x3_crops_on_2x2_features(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[3, 3])
image = np.array([[[[1], [2]],
[[3], [4]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1]]], dtype=np.float32)
box_levels = np.array([[0]], dtype=np.int32)
expected_output = [[[[[9./6], [11./6], [13./6]],
[[13./6], [15./6], [17./6]],
[[17./6], [19./6], [21./6]]]]]
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(crop_output, expected_output)
def test_2x2_crops_on_3x3_features(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[2, 2])
image = np.array([[[[1], [2], [3]],
[[4], [5], [6]],
[[7], [8], [9]]]],
dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1],
[0, 0, .5, .5]]],
dtype=np.float32)
box_levels = np.array([[0, 0]], dtype=np.int32)
expected_output = [[[[[3], [4]],
[[6], [7]]],
[[[2.], [2.5]],
[[3.5], [4.]]]]]
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(crop_output, expected_output)
def test_2x2_crop_on_4x4_features(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[2, 2])
image = np.array([[[[0], [1], [2], [3]],
[[4], [5], [6], [7]],
[[8], [9], [10], [11]],
[[12], [13], [14], [15]]]],
dtype=np.float32)
boxes = np.array([[[0, 0, 2./3, 2./3],
[0, 0, 2./3, 1.0]]],
dtype=np.float32)
box_levels = np.array([[0, 0]], dtype=np.int32)
expected_output = np.array([[[[[2.5], [3.5]],
[[6.5], [7.5]]],
[[[2.75], [4.25]],
[[6.75], [8.25]]]]])
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(expected_output, crop_output)
def test_extrapolate_3x3_crop_on_2x2_features(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[3, 3])
image = np.array([[[[1], [2]],
[[3], [4]]]], dtype=np.float32)
boxes = np.array([[[-1, -1, 2, 2]]], dtype=np.float32)
box_levels = np.array([[0]], dtype=np.int32)
expected_output = np.array([[[[[0.25], [0.75], [0.5]],
[[1.0], [2.5], [1.5]],
[[0.75], [1.75], [1]]]]])
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(expected_output, crop_output)
def test_extrapolate_with_non_zero_value(self):
def graph_fn(image, boxes, levels):
return spatial_ops.multilevel_roi_align([image],
boxes,
levels,
output_size=[3, 3],
extrapolation_value=2.0)
image = np.array([[[[4], [4]],
[[4], [4]]]], dtype=np.float32)
boxes = np.array([[[-1, -1, 2, 2]]], dtype=np.float32)
box_levels = np.array([[0]], dtype=np.int32)
expected_output = np.array([[[[[2.5], [3.0], [2.5]],
[[3.0], [4.0], [3.0]],
[[2.5], [3.0], [2.5]]]]])
crop_output = self.execute(graph_fn, [image, boxes, box_levels])
self.assertAllClose(expected_output, crop_output)
def test_multilevel_roi_align(self):
image_size = 640
fpn_min_level = 2
fpn_max_level = 5
batch_size = 1
output_size = [2, 2]
num_filters = 1
features = []
for level in range(fpn_min_level, fpn_max_level + 1):
feat_size = int(image_size / 2**level)
features.append(
float(level) *
np.ones([batch_size, feat_size, feat_size, num_filters],
dtype=np.float32))
boxes = np.array(
[
[
[0, 0, 111, 111], # Level 2.
[0, 0, 113, 113], # Level 3.
[0, 0, 223, 223], # Level 3.
[0, 0, 225, 225], # Level 4.
[0, 0, 449, 449] # Level 5.
],
],
dtype=np.float32) / image_size
levels = np.array([[0, 1, 1, 2, 3]], dtype=np.int32)
def graph_fn(feature1, feature2, feature3, feature4, boxes, levels):
roi_features = spatial_ops.multilevel_roi_align(
[feature1, feature2, feature3, feature4],
boxes,
levels,
output_size)
return roi_features
roi_features = self.execute(graph_fn, features + [boxes, levels])
self.assertAllClose(roi_features[0][0], 2 * np.ones((2, 2, 1)))
self.assertAllClose(roi_features[0][1], 3 * np.ones((2, 2, 1)))
self.assertAllClose(roi_features[0][2], 3 * np.ones((2, 2, 1)))
self.assertAllClose(roi_features[0][3], 4 * np.ones((2, 2, 1)))
self.assertAllClose(roi_features[0][4], 5 * np.ones((2, 2, 1)))
def test_large_input(self):
if self.has_tpu():
input_size = 1408
min_level = 2
max_level = 6
batch_size = 2
num_boxes = 512
num_filters = 256
output_size = [7, 7]
features = []
for level in range(min_level, max_level + 1):
feat_size = int(input_size / 2**level)
features.append(
np.reshape(
np.arange(
batch_size * feat_size * feat_size * num_filters,
dtype=np.float32),
[batch_size, feat_size, feat_size, num_filters]))
boxes = np.array([
[[0, 0, 256, 256]]*num_boxes,
], dtype=np.float32) / input_size
boxes = np.tile(boxes, [batch_size, 1, 1])
levels = np.random.randint(5, size=[batch_size, num_boxes],
dtype=np.int32)
def crop_and_resize_fn():
tf_features = [
tf.constant(feature, dtype=tf.bfloat16) for feature in features
]
return spatial_ops.multilevel_roi_align(
tf_features, tf.constant(boxes), tf.constant(levels), output_size)
roi_features = self.execute_tpu(crop_and_resize_fn, [])
self.assertEqual(roi_features.shape,
(batch_size, num_boxes, output_size[0],
output_size[1], num_filters))
class MatMulCropAndResizeTest(test_case.TestCase):
def testMatMulCropAndResize2x2To1x1(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[1, 1])
image = np.array([[[[1], [2]], [[3], [4]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1]]], dtype=np.float32)
expected_output = [[[[[2.5]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMatMulCropAndResize2x2To1x1Flipped(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[1, 1])
image = np.array([[[[1], [2]], [[3], [4]]]], dtype=np.float32)
boxes = np.array([[[1, 1, 0, 0]]], dtype=np.float32)
expected_output = [[[[[2.5]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMatMulCropAndResize2x2To3x3(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[3, 3])
image = np.array([[[[1], [2]], [[3], [4]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1]]], dtype=np.float32)
expected_output = [[[[[1.0], [1.5], [2.0]],
[[2.0], [2.5], [3.0]],
[[3.0], [3.5], [4.0]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMatMulCropAndResize2x2To3x3Flipped(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[3, 3])
image = np.array([[[[1], [2]], [[3], [4]]]], dtype=np.float32)
boxes = np.array([[[1, 1, 0, 0]]], dtype=np.float32)
expected_output = [[[[[4.0], [3.5], [3.0]],
[[3.0], [2.5], [2.0]],
[[2.0], [1.5], [1.0]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMatMulCropAndResize3x3To2x2(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[2, 2])
image = np.array([[[[1], [2], [3]],
[[4], [5], [6]],
[[7], [8], [9]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1],
[0, 0, .5, .5]]], dtype=np.float32)
expected_output = [[[[[1], [3]], [[7], [9]]],
[[[1], [2]], [[4], [5]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMatMulCropAndResize3x3To2x2_2Channels(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[2, 2])
image = np.array([[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1],
[0, 0, .5, .5]]], dtype=np.float32)
expected_output = [[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]],
[[[1, 0], [2, 1]], [[4, 3], [5, 4]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testBatchMatMulCropAndResize3x3To2x2_2Channels(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[2, 2])
image = np.array([[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]],
[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1],
[0, 0, .5, .5]],
[[1, 1, 0, 0],
[.5, .5, 0, 0]]], dtype=np.float32)
expected_output = [[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]],
[[[1, 0], [2, 1]], [[4, 3], [5, 4]]]],
[[[[9, 8], [7, 6]], [[3, 2], [1, 0]]],
[[[5, 4], [4, 3]], [[2, 1], [1, 0]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMatMulCropAndResize3x3To2x2Flipped(self):
def graph_fn(image, boxes):
return spatial_ops.matmul_crop_and_resize(image, boxes, crop_size=[2, 2])
image = np.array([[[[1], [2], [3]],
[[4], [5], [6]],
[[7], [8], [9]]]], dtype=np.float32)
boxes = np.array([[[1, 1, 0, 0],
[.5, .5, 0, 0]]], dtype=np.float32)
expected_output = [[[[[9], [7]], [[3], [1]]],
[[[5], [4]], [[2], [1]]]]]
crop_output = self.execute(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMultilevelMatMulCropAndResize(self):
def graph_fn(image1, image2, boxes, box_levels):
return spatial_ops.multilevel_matmul_crop_and_resize([image1, image2],
boxes,
box_levels,
crop_size=[2, 2])
image = [np.array([[[[1, 0], [2, 0], [3, 0]],
[[4, 0], [5, 0], [6, 0]],
[[7, 0], [8, 0], [9, 0]]],
[[[1, 0], [2, 0], [3, 0]],
[[4, 0], [5, 0], [6, 0]],
[[7, 0], [8, 0], [9, 0]]]], dtype=np.float32),
np.array([[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]],
[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]]], dtype=np.float32)]
boxes = np.array([[[1, 1, 0, 0],
[.5, .5, 0, 0]],
[[0, 0, 1, 1],
[0, 0, .5, .5]]], dtype=np.float32)
box_levels = np.array([[0, 1], [1, 1]], dtype=np.int32)
expected_output = [[[[[9, 0], [7, 0]], [[3, 0], [1, 0]]],
[[[5, 4], [4, 3]], [[2, 1], [1, 0]]]],
[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]],
[[[1, 0], [2, 1]], [[4, 3], [5, 4]]]]]
crop_output = self.execute(graph_fn, image + [boxes, box_levels])
self.assertAllClose(crop_output, expected_output)
class NativeCropAndResizeTest(test_case.TestCase):
def testBatchCropAndResize3x3To2x2_2Channels(self):
def graph_fn(image, boxes):
return spatial_ops.native_crop_and_resize(image, boxes, crop_size=[2, 2])
image = np.array([[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]],
[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]]], dtype=np.float32)
boxes = np.array([[[0, 0, 1, 1],
[0, 0, .5, .5]],
[[1, 1, 0, 0],
[.5, .5, 0, 0]]], dtype=np.float32)
expected_output = [[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]],
[[[1, 0], [2, 1]], [[4, 3], [5, 4]]]],
[[[[9, 8], [7, 6]], [[3, 2], [1, 0]]],
[[[5, 4], [4, 3]], [[2, 1], [1, 0]]]]]
crop_output = self.execute_cpu(graph_fn, [image, boxes])
self.assertAllClose(crop_output, expected_output)
def testMultilevelBatchCropAndResize3x3To2x2_2Channels(self):
def graph_fn(image1, image2, boxes, box_levels):
return spatial_ops.multilevel_native_crop_and_resize([image1, image2],
boxes,
box_levels,
crop_size=[2, 2])
image = [np.array([[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]],
[[[1, 0], [2, 1], [3, 2]],
[[4, 3], [5, 4], [6, 5]],
[[7, 6], [8, 7], [9, 8]]]], dtype=np.float32),
np.array([[[[1, 0], [2, 1]],
[[4, 3], [5, 4]]],
[[[1, 0], [2, 1]],
[[4, 3], [5, 4]]]], dtype=np.float32)]
boxes = np.array([[[0, 0, 1, 1],
[0, 0, .5, .5]],
[[1, 1, 0, 0],
[.5, .5, 0, 0]]], dtype=np.float32)
box_levels = np.array([[0, 1], [0, 0]], dtype=np.float32)
expected_output = [[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]],
[[[1, 0], [1.5, 0.5]], [[2.5, 1.5], [3, 2]]]],
[[[[9, 8], [7, 6]], [[3, 2], [1, 0]]],
[[[5, 4], [4, 3]], [[2, 1], [1, 0]]]]]
crop_output = self.execute_cpu(graph_fn, image + [boxes, box_levels])
self.assertAllClose(crop_output, expected_output)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/spatial_transform_ops_test.py | spatial_transform_ops_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_mask_list_test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_box_mask_list
class BoxMaskListTest(tf.test.TestCase):
def test_invalid_box_mask_data(self):
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=[0, 0, 1, 1],
mask_data=np.zeros([1, 3, 3], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([[0, 0, 1, 1]], dtype=int),
mask_data=np.zeros([1, 3, 3], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([0, 1, 1, 3, 4], dtype=float),
mask_data=np.zeros([1, 3, 3], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([[0, 1, 1, 3], [3, 1, 1, 5]], dtype=float),
mask_data=np.zeros([2, 3, 3], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([[0, 1, 1, 3], [1, 1, 1, 5]], dtype=float),
mask_data=np.zeros([3, 5, 5], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([[0, 1, 1, 3], [1, 1, 1, 5]], dtype=float),
mask_data=np.zeros([2, 5], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([[0, 1, 1, 3], [1, 1, 1, 5]], dtype=float),
mask_data=np.zeros([2, 5, 5, 5], dtype=np.uint8))
with self.assertRaises(ValueError):
np_box_mask_list.BoxMaskList(
box_data=np.array([[0, 1, 1, 3], [1, 1, 1, 5]], dtype=float),
mask_data=np.zeros([2, 5, 5], dtype=np.int32))
def test_has_field_with_existed_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=np.zeros([3, 5, 5], dtype=np.uint8))
self.assertTrue(box_mask_list.has_field('boxes'))
self.assertTrue(box_mask_list.has_field('masks'))
def test_has_field_with_nonexisted_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=np.zeros([3, 3, 3], dtype=np.uint8))
self.assertFalse(box_mask_list.has_field('scores'))
def test_get_field_with_existed_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
masks = np.zeros([3, 3, 3], dtype=np.uint8)
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=masks)
self.assertTrue(np.allclose(box_mask_list.get_field('boxes'), boxes))
self.assertTrue(np.allclose(box_mask_list.get_field('masks'), masks))
def test_get_field_with_nonexited_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
masks = np.zeros([3, 3, 3], dtype=np.uint8)
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=masks)
with self.assertRaises(ValueError):
box_mask_list.get_field('scores')
class AddExtraFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
masks = np.zeros([3, 3, 3], dtype=np.uint8)
self.box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=masks)
def test_add_already_existed_field_bbox(self):
with self.assertRaises(ValueError):
self.box_mask_list.add_field('boxes',
np.array([[0, 0, 0, 1, 0]], dtype=float))
def test_add_already_existed_field_mask(self):
with self.assertRaises(ValueError):
self.box_mask_list.add_field('masks',
np.zeros([3, 3, 3], dtype=np.uint8))
def test_add_invalid_field_data(self):
with self.assertRaises(ValueError):
self.box_mask_list.add_field('scores', np.array([0.5, 0.7], dtype=float))
with self.assertRaises(ValueError):
self.box_mask_list.add_field('scores',
np.array([0.5, 0.7, 0.9, 0.1], dtype=float))
def test_add_single_dimensional_field_data(self):
box_mask_list = self.box_mask_list
scores = np.array([0.5, 0.7, 0.9], dtype=float)
box_mask_list.add_field('scores', scores)
self.assertTrue(np.allclose(scores, self.box_mask_list.get_field('scores')))
def test_add_multi_dimensional_field_data(self):
box_mask_list = self.box_mask_list
labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]],
dtype=int)
box_mask_list.add_field('labels', labels)
self.assertTrue(np.allclose(labels, self.box_mask_list.get_field('labels')))
def test_get_extra_fields(self):
box_mask_list = self.box_mask_list
self.assertItemsEqual(box_mask_list.get_extra_fields(), ['masks'])
scores = np.array([0.5, 0.7, 0.9], dtype=float)
box_mask_list.add_field('scores', scores)
self.assertItemsEqual(box_mask_list.get_extra_fields(), ['masks', 'scores'])
labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]],
dtype=int)
box_mask_list.add_field('labels', labels)
self.assertItemsEqual(box_mask_list.get_extra_fields(),
['masks', 'scores', 'labels'])
def test_get_coordinates(self):
y_min, x_min, y_max, x_max = self.box_mask_list.get_coordinates()
expected_y_min = np.array([3.0, 14.0, 0.0], dtype=float)
expected_x_min = np.array([4.0, 14.0, 0.0], dtype=float)
expected_y_max = np.array([6.0, 15.0, 20.0], dtype=float)
expected_x_max = np.array([8.0, 15.0, 20.0], dtype=float)
self.assertTrue(np.allclose(y_min, expected_y_min))
self.assertTrue(np.allclose(x_min, expected_x_min))
self.assertTrue(np.allclose(y_max, expected_y_max))
self.assertTrue(np.allclose(x_max, expected_x_max))
def test_num_boxes(self):
boxes = np.array([[0., 0., 100., 100.], [10., 30., 50., 70.]], dtype=float)
masks = np.zeros([2, 5, 5], dtype=np.uint8)
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=masks)
expected_num_boxes = 2
self.assertEquals(box_mask_list.num_boxes(), expected_num_boxes)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/np_box_mask_list_test.py | np_box_mask_list_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluates Visual Relations Detection(VRD) result evaluation on an image.
Annotate each VRD result as true positives or false positive according to
a predefined IOU ratio. Multi-class detection is supported by default.
Based on the settings, per image evaluation is performed either on phrase
detection subtask or on relation detection subtask.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
class PerImageVRDEvaluation(object):
"""Evaluate vrd result of a single image."""
def __init__(self, matching_iou_threshold=0.5):
"""Initialized PerImageVRDEvaluation by evaluation parameters.
Args:
matching_iou_threshold: A ratio of area intersection to union, which is
the threshold to consider whether a detection is true positive or not;
in phrase detection subtask.
"""
self.matching_iou_threshold = matching_iou_threshold
def compute_detection_tp_fp(self, detected_box_tuples, detected_scores,
detected_class_tuples, groundtruth_box_tuples,
groundtruth_class_tuples):
"""Evaluates VRD as being tp, fp from a single image.
Args:
detected_box_tuples: A numpy array of structures with shape [N,],
representing N tuples, each tuple containing the same number of named
bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max].
detected_scores: A float numpy array of shape [N,], representing
the confidence scores of the detected N object instances.
detected_class_tuples: A numpy array of structures shape [N,],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
groundtruth_box_tuples: A float numpy array of structures with the shape
[M,], representing M tuples, each tuple containing the same number
of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max].
groundtruth_class_tuples: A numpy array of structures shape [M,],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
Returns:
scores: A single numpy array with shape [N,], representing N scores
detected with object class, sorted in descentent order.
tp_fp_labels: A single boolean numpy array of shape [N,], representing N
True/False positive label, one label per tuple. The labels are sorted
so that the order of the labels matches the order of the scores.
result_mapping: A numpy array with shape [N,] with original index of each
entry.
"""
scores, tp_fp_labels, result_mapping = self._compute_tp_fp(
detected_box_tuples=detected_box_tuples,
detected_scores=detected_scores,
detected_class_tuples=detected_class_tuples,
groundtruth_box_tuples=groundtruth_box_tuples,
groundtruth_class_tuples=groundtruth_class_tuples)
return scores, tp_fp_labels, result_mapping
def _compute_tp_fp(self, detected_box_tuples, detected_scores,
detected_class_tuples, groundtruth_box_tuples,
groundtruth_class_tuples):
"""Labels as true/false positives detection tuples across all classes.
Args:
detected_box_tuples: A numpy array of structures with shape [N,],
representing N tuples, each tuple containing the same number of named
bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N,], representing
the confidence scores of the detected N object instances.
detected_class_tuples: A numpy array of structures shape [N,],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
groundtruth_box_tuples: A float numpy array of structures with the shape
[M,], representing M tuples, each tuple containing the same number
of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max]
groundtruth_class_tuples: A numpy array of structures shape [M,],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
Returns:
scores: A single numpy array with shape [N,], representing N scores
detected with object class, sorted in descentent order.
tp_fp_labels: A single boolean numpy array of shape [N,], representing N
True/False positive label, one label per tuple. The labels are sorted
so that the order of the labels matches the order of the scores.
result_mapping: A numpy array with shape [N,] with original index of each
entry.
"""
unique_gt_tuples = np.unique(
np.concatenate((groundtruth_class_tuples, detected_class_tuples)))
result_scores = []
result_tp_fp_labels = []
result_mapping = []
for unique_tuple in unique_gt_tuples:
detections_selector = (detected_class_tuples == unique_tuple)
gt_selector = (groundtruth_class_tuples == unique_tuple)
selector_mapping = np.where(detections_selector)[0]
detection_scores_per_tuple = detected_scores[detections_selector]
detection_box_per_tuple = detected_box_tuples[detections_selector]
sorted_indices = np.argsort(detection_scores_per_tuple)
sorted_indices = sorted_indices[::-1]
tp_fp_labels = self._compute_tp_fp_for_single_class(
detected_box_tuples=detection_box_per_tuple[sorted_indices],
groundtruth_box_tuples=groundtruth_box_tuples[gt_selector])
result_scores.append(detection_scores_per_tuple[sorted_indices])
result_tp_fp_labels.append(tp_fp_labels)
result_mapping.append(selector_mapping[sorted_indices])
if result_scores:
result_scores = np.concatenate(result_scores)
result_tp_fp_labels = np.concatenate(result_tp_fp_labels)
result_mapping = np.concatenate(result_mapping)
else:
result_scores = np.array([], dtype=float)
result_tp_fp_labels = np.array([], dtype=bool)
result_mapping = np.array([], dtype=int)
sorted_indices = np.argsort(result_scores)
sorted_indices = sorted_indices[::-1]
return result_scores[sorted_indices], result_tp_fp_labels[
sorted_indices], result_mapping[sorted_indices]
def _get_overlaps_and_scores_relation_tuples(self, detected_box_tuples,
groundtruth_box_tuples):
"""Computes overlaps and scores between detected and groundtruth tuples.
Both detections and groundtruth boxes have the same class tuples.
Args:
detected_box_tuples: A numpy array of structures with shape [N,],
representing N tuples, each tuple containing the same number of named
bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max]
groundtruth_box_tuples: A float numpy array of structures with the shape
[M,], representing M tuples, each tuple containing the same number
of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max]
Returns:
result_iou: A float numpy array of size
[num_detected_tuples, num_gt_box_tuples].
"""
result_iou = np.ones(
(detected_box_tuples.shape[0], groundtruth_box_tuples.shape[0]),
dtype=float)
for field in detected_box_tuples.dtype.fields:
detected_boxlist_field = np_box_list.BoxList(detected_box_tuples[field])
gt_boxlist_field = np_box_list.BoxList(groundtruth_box_tuples[field])
iou_field = np_box_list_ops.iou(detected_boxlist_field, gt_boxlist_field)
result_iou = np.minimum(iou_field, result_iou)
return result_iou
def _compute_tp_fp_for_single_class(self, detected_box_tuples,
groundtruth_box_tuples):
"""Labels boxes detected with the same class from the same image as tp/fp.
Detection boxes are expected to be already sorted by score.
Args:
detected_box_tuples: A numpy array of structures with shape [N,],
representing N tuples, each tuple containing the same number of named
bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max]
groundtruth_box_tuples: A float numpy array of structures with the shape
[M,], representing M tuples, each tuple containing the same number
of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max]
Returns:
tp_fp_labels: a boolean numpy array indicating whether a detection is a
true positive.
"""
if detected_box_tuples.size == 0:
return np.array([], dtype=bool)
min_iou = self._get_overlaps_and_scores_relation_tuples(
detected_box_tuples, groundtruth_box_tuples)
num_detected_tuples = detected_box_tuples.shape[0]
tp_fp_labels = np.zeros(num_detected_tuples, dtype=bool)
if min_iou.shape[1] > 0:
max_overlap_gt_ids = np.argmax(min_iou, axis=1)
is_gt_tuple_detected = np.zeros(min_iou.shape[1], dtype=bool)
for i in range(num_detected_tuples):
gt_id = max_overlap_gt_ids[i]
if min_iou[i, gt_id] >= self.matching_iou_threshold:
if not is_gt_tuple_detected[gt_id]:
tp_fp_labels[i] = True
is_gt_tuple_detected[gt_id] = True
return tp_fp_labels
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/per_image_vrd_evaluation.py | per_image_vrd_evaluation.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.visualization_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import numpy as np
import PIL.Image as Image
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.utils import test_case
from object_detection.utils import visualization_utils
_TESTDATA_PATH = 'object_detection/test_images'
def get_iterator_next_for_testing(dataset, is_tf2):
# In TF2, lookup tables are not supported in one shot iterators, but
# initialization is implicit.
if is_tf2:
return dataset.make_initializable_iterator().get_next()
# In TF1, we use one shot iterator because it does not require running
# a separate init op.
else:
return dataset.make_one_shot_iterator().get_next()
class VisualizationUtilsTest(test_case.TestCase):
def test_get_prime_multiplier_for_color_randomness(self):
# Show that default multipler is not 1 and does not divide the total number
# of standard colors.
multiplier = visualization_utils._get_multiplier_for_color_randomness()
self.assertNotEqual(
0, multiplier % len(visualization_utils.STANDARD_COLORS))
self.assertNotEqual(1, multiplier)
# Show that with 34 colors, the closest prime number to 34/10 that
# satisfies the constraints is 5.
default_standard_colors = visualization_utils.STANDARD_COLORS
visualization_utils.STANDARD_COLORS = [
'color_{}'.format(str(i)) for i in range(34)
]
multiplier = visualization_utils._get_multiplier_for_color_randomness()
self.assertEqual(5, multiplier)
# Show that with 110 colors, the closest prime number to 110/10 that
# satisfies the constraints is 13 (since 11 equally divides 110).
visualization_utils.STANDARD_COLORS = [
'color_{}'.format(str(i)) for i in range(110)
]
multiplier = visualization_utils._get_multiplier_for_color_randomness()
self.assertEqual(13, multiplier)
visualization_utils.STANDARD_COLORS = default_standard_colors
def create_colorful_test_image(self):
"""This function creates an image that can be used to test vis functions.
It makes an image composed of four colored rectangles.
Returns:
colorful test numpy array image.
"""
ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)
ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)
ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)
imr = np.concatenate((ch255, ch128, ch128), axis=2)
img = np.concatenate((ch255, ch255, ch0), axis=2)
imb = np.concatenate((ch255, ch0, ch255), axis=2)
imw = np.concatenate((ch128, ch128, ch128), axis=2)
imu = np.concatenate((imr, img), axis=1)
imd = np.concatenate((imb, imw), axis=1)
image = np.concatenate((imu, imd), axis=0)
return image
def create_test_image_with_five_channels(self):
return np.full([100, 200, 5], 255, dtype=np.uint8)
def create_test_grayscale_image(self):
return np.full([100, 200, 1], 255, dtype=np.uint8)
def test_draw_bounding_box_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
ymin = 0.25
ymax = 0.75
xmin = 0.4
xmax = 0.6
visualization_utils.draw_bounding_box_on_image(test_image, ymin, xmin, ymax,
xmax)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_box_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
ymin = 0.25
ymax = 0.75
xmin = 0.4
xmax = 0.6
visualization_utils.draw_bounding_box_on_image_array(
test_image, ymin, xmin, ymax, xmax)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
boxes = np.array([[0.25, 0.75, 0.4, 0.6],
[0.1, 0.1, 0.9, 0.9]])
visualization_utils.draw_bounding_boxes_on_image(test_image, boxes)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
boxes = np.array([[0.25, 0.75, 0.4, 0.6],
[0.1, 0.1, 0.9, 0.9]])
visualization_utils.draw_bounding_boxes_on_image_array(test_image, boxes)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image_tensors(self):
"""Tests that bounding box utility produces reasonable results."""
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
fname = os.path.join(_TESTDATA_PATH, 'image1.jpg')
image_np = np.array(Image.open(fname))
images_np = np.stack((image_np, image_np), axis=0)
original_image_shape = [[636, 512], [636, 512]]
def graph_fn():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant(original_image_shape, dtype=tf.int32)
boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75], [0.5, 0.3, 0.6, 0.9]],
[[0.25, 0.25, 0.75, 0.75], [0.1, 0.3, 0.6, 1.0]]])
classes = tf.constant([[1, 1], [1, 2]], dtype=tf.int64)
scores = tf.constant([[0.8, 0.1], [0.6, 0.5]])
keypoints = tf.random.uniform((2, 2, 4, 2), maxval=1.0, dtype=tf.float32)
keypoint_edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
keypoints=keypoints,
min_score_thresh=0.2,
keypoint_edges=keypoint_edges))
return images_with_boxes
# Write output images for visualization.
images_with_boxes_np = self.execute(graph_fn, [])
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
def test_draw_bounding_boxes_on_image_tensors_with_track_ids(self):
"""Tests that bounding box utility produces reasonable results."""
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
fname = os.path.join(_TESTDATA_PATH, 'image1.jpg')
image_np = np.array(Image.open(fname))
images_np = np.stack((image_np, image_np), axis=0)
original_image_shape = [[636, 512], [636, 512]]
def graph_fn():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant(original_image_shape, dtype=tf.int32)
boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75],
[0.5, 0.3, 0.7, 0.9],
[0.7, 0.5, 0.8, 0.9]],
[[0.41, 0.25, 0.75, 0.75],
[0.51, 0.3, 0.7, 0.9],
[0.75, 0.5, 0.8, 0.9]]])
classes = tf.constant([[1, 1, 2], [1, 1, 2]], dtype=tf.int64)
scores = tf.constant([[0.8, 0.5, 0.7], [0.6, 0.5, 0.8]])
track_ids = tf.constant([[3, 9, 7], [3, 9, 144]], dtype=tf.int32)
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
track_ids=track_ids,
min_score_thresh=0.2))
return images_with_boxes
# Write output images for visualization.
images_with_boxes_np = self.execute(graph_fn, [])
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_with_track_ids_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
def test_draw_bounding_boxes_on_image_tensors_with_additional_channels(self):
"""Tests the case where input image tensor has more than 3 channels."""
category_index = {1: {'id': 1, 'name': 'dog'}}
image_np = self.create_test_image_with_five_channels()
images_np = np.stack((image_np, image_np), axis=0)
def graph_fn():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
boxes = tf.constant(0, dtype=tf.float32, shape=[2, 0, 4])
classes = tf.constant(0, dtype=tf.int64, shape=[2, 0])
scores = tf.constant(0, dtype=tf.float32, shape=[2, 0])
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
min_score_thresh=0.2))
return images_with_boxes
final_images_np = self.execute(graph_fn, [])
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
def test_draw_bounding_boxes_on_image_tensors_grayscale(self):
"""Tests the case where input image tensor has one channel."""
category_index = {1: {'id': 1, 'name': 'dog'}}
image_np = self.create_test_grayscale_image()
images_np = np.stack((image_np, image_np), axis=0)
def graph_fn():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant([[100, 200], [100, 200]], dtype=tf.int32)
boxes = tf.constant(0, dtype=tf.float32, shape=[2, 0, 4])
classes = tf.constant(0, dtype=tf.int64, shape=[2, 0])
scores = tf.constant(0, dtype=tf.float32, shape=[2, 0])
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
min_score_thresh=0.2))
return images_with_boxes
final_images_np = self.execute(graph_fn, [])
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
def test_draw_keypoints_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]
keypoint_scores = [0.8, 0.2, 0.2, 0.7]
keypoint_edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
visualization_utils.draw_keypoints_on_image(
test_image,
keypoints,
keypoint_scores,
keypoint_edges=keypoint_edges,
keypoint_edge_width=1,
keypoint_edge_color='green')
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_keypoints_on_image_with_default_keypoint_scores(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
keypoints = [[0.25, np.nan], [0.4, 0.6], [np.nan, np.nan], [0.9, 0.9]]
keypoint_edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
visualization_utils.draw_keypoints_on_image(
test_image,
keypoints,
keypoint_edges=keypoint_edges,
keypoint_edge_width=1,
keypoint_edge_color='green')
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_keypoints_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]
keypoint_edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
visualization_utils.draw_keypoints_on_image_array(
test_image,
keypoints,
keypoint_edges=keypoint_edges,
keypoint_edge_width=1,
keypoint_edge_color='green')
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_mask_on_image_array(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
mask = np.asarray([[0, 1],
[1, 1]], dtype=np.uint8)
expected_result = np.asarray([[[0, 0, 0], [0, 0, 127]],
[[0, 0, 127], [0, 0, 127]]], dtype=np.uint8)
visualization_utils.draw_mask_on_image_array(test_image, mask,
color='Blue', alpha=.5)
self.assertAllEqual(test_image, expected_result)
def test_draw_part_mask_on_image_array(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
mask = np.asarray([[0, 1],
[1, 6]], dtype=np.uint8)
visualization_utils.draw_part_mask_on_image_array(test_image, mask,
alpha=.5)
self.assertAllEqual([0, 0, 0], test_image[0, 0])
self.assertAllGreater(test_image[0, 1], 0)
self.assertAllGreater(test_image[1, 0], 0)
self.assertAllGreater(test_image[1, 1], 0)
self.assertAllEqual(test_image[0, 1], test_image[1, 0])
def test_draw_float_channel_on_image_array(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
channel = np.asarray([[0., 0.5],
[0., 1.]], dtype=np.float32)
mask = np.asarray([[0, 1],
[1, 1]], dtype=np.uint8)
# The colormap ('bwr') maps the values as follows:
# 0.0 -> Blue
# 0.5 -> White
# 1.0 -> Red
visualization_utils.draw_float_channel_on_image_array(
test_image, channel, mask, alpha=1.0, cmap='bwr')
expected_result = np.asarray([[[0, 0, 0], [255, 254, 254]],
[[0, 0, 255], [255, 0, 0]]], dtype=np.uint8)
self.assertAllEqual(test_image, expected_result)
def test_draw_heatmaps_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
heatmaps = np.ones(shape=[10, 20, 1], dtype=float)
visualization_utils.draw_heatmaps_on_image(test_image, heatmaps)
width_final, height_final = test_image.size
pixels = list(test_image.getdata())
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
# The pixel shoud be painted as AliceBlue with RGB (240, 248, 255).
self.assertAllEqual((240, 248, 255), pixels[10])
def test_draw_heatmaps_on_image_array(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
heatmap1 = np.asarray([[1, 0],
[0, 1]], dtype=np.float)
heatmap2 = np.asarray([[0, 1],
[1, 0]], dtype=np.float)
heatmaps = np.stack([heatmap1, heatmap2], axis=0)
output_image = visualization_utils.draw_heatmaps_on_image_array(
test_image, heatmaps)
# Output image should be painted as "AliceBlue" at (0, 0), (1, 1)
# and "Chartreuse" at (0, 1), (1, 0).
self.assertAllEqual(
output_image,
np.array([[[240, 248, 255], [127, 255, 0]],
[[127, 255, 0], [240, 248, 255]]]))
def test_draw_heatmaps_on_image_tensors(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
heatmap1 = np.asarray([[1, 0],
[0, 1]], dtype=np.float)
heatmap2 = np.asarray([[0, 1],
[1, 0]], dtype=np.float)
heatmaps = np.stack([heatmap1, heatmap2], axis=0)
def graph_fn():
image_tensor = tf.constant(test_image, dtype=tf.uint8)
image_tensor = tf.expand_dims(image_tensor, axis=0)
heatmaps_tensor = tf.expand_dims(
tf.constant(heatmaps, dtype=tf.float32), axis=0)
output_image = visualization_utils.draw_heatmaps_on_image_tensors(
images=image_tensor,
heatmaps=heatmaps_tensor,
apply_sigmoid=False)
return output_image
output_image_np = self.execute(graph_fn, [])
self.assertAllEqual(
output_image_np,
np.expand_dims(
np.array([[[240, 248, 255], [127, 255, 0]],
[[127, 255, 0], [240, 248, 255]]]),
axis=0))
def test_add_cdf_image_summary(self):
def graph_fn():
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
visualization_utils.add_cdf_image_summary(values, 'PositiveAnchorLoss')
cdf_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
return cdf_image_summary
self.execute(graph_fn, [])
def test_add_hist_image_summary(self):
def graph_fn():
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
bins = [0.01 * i for i in range(101)]
visualization_utils.add_hist_image_summary(values, bins,
'ScoresDistribution')
hist_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
return hist_image_summary
self.execute(graph_fn, [])
def test_eval_metric_ops(self):
if self.is_tf2():
self.skipTest('This test is only compatible with Tensorflow 1.X, '
'estimator eval ops are not supported in Tensorflow 2.')
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
max_examples_to_draw = 4
metric_op_base = 'Detections_Left_Groundtruth_Right'
eval_metric_ops = visualization_utils.VisualizeSingleFrameDetections(
category_index,
max_examples_to_draw=max_examples_to_draw,
summary_name_prefix=metric_op_base)
original_image = tf.placeholder(tf.uint8, [4, None, None, 3])
original_image_spatial_shape = tf.placeholder(tf.int32, [4, 2])
true_image_shape = tf.placeholder(tf.int32, [4, 3])
detection_boxes = tf.random_uniform([4, 20, 4],
minval=0.0,
maxval=1.0,
dtype=tf.float32)
detection_classes = tf.random_uniform([4, 20],
minval=1,
maxval=3,
dtype=tf.int64)
detection_scores = tf.random_uniform([4, 20],
minval=0.,
maxval=1.,
dtype=tf.float32)
groundtruth_boxes = tf.random_uniform([4, 8, 4],
minval=0.0,
maxval=1.0,
dtype=tf.float32)
num_groundtruth_boxes = tf.constant([3, 8, 0, 2], tf.int32)
groundtruth_classes = tf.random_uniform([4, 8],
minval=1,
maxval=3,
dtype=tf.int64)
eval_dict = {
fields.DetectionResultFields.detection_boxes:
detection_boxes,
fields.DetectionResultFields.detection_classes:
detection_classes,
fields.DetectionResultFields.detection_scores:
detection_scores,
fields.InputDataFields.original_image:
original_image,
fields.InputDataFields.original_image_spatial_shape: (
original_image_spatial_shape),
fields.InputDataFields.true_image_shape: (true_image_shape),
fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
fields.InputDataFields.groundtruth_classes:
groundtruth_classes,
fields.InputDataFields.num_groundtruth_boxes:
num_groundtruth_boxes
}
metric_ops = eval_metric_ops.get_estimator_eval_metric_ops(eval_dict)
_, update_op = metric_ops[next(six.iterkeys(metric_ops))]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
value_ops = {}
for key, (value_op, _) in six.iteritems(metric_ops):
value_ops[key] = value_op
# First run enough update steps to surpass `max_examples_to_draw`.
for i in range(max_examples_to_draw):
# Use a unique image shape on each eval image.
sess.run(
update_op,
feed_dict={
original_image:
np.random.randint(
low=0,
high=256,
size=(4, 6 + i, 7 + i, 3),
dtype=np.uint8),
original_image_spatial_shape: [[6 + i, 7 + i], [6 + i, 7 + i],
[6 + i, 7 + i], [6 + i, 7 + i]],
true_image_shape: [[6 + i, 7 + i, 3], [6 + i, 7 + i, 3],
[6 + i, 7 + i, 3], [6 + i, 7 + i, 3]]
})
value_ops_out = sess.run(value_ops)
for key, value_op in six.iteritems(value_ops_out):
self.assertNotEqual('', value_op)
# Now run fewer update steps than `max_examples_to_draw`. A single value
# op will be the empty string, since not enough image summaries can be
# produced.
for i in range(max_examples_to_draw - 1):
# Use a unique image shape on each eval image.
sess.run(
update_op,
feed_dict={
original_image:
np.random.randint(
low=0,
high=256,
size=(4, 6 + i, 7 + i, 3),
dtype=np.uint8),
original_image_spatial_shape: [[6 + i, 7 + i], [6 + i, 7 + i],
[6 + i, 7 + i], [6 + i, 7 + i]],
true_image_shape: [[6 + i, 7 + i, 3], [6 + i, 7 + i, 3],
[6 + i, 7 + i, 3], [6 + i, 7 + i, 3]]
})
value_ops_out = sess.run(value_ops)
self.assertEqual(
six.b(''),
value_ops_out[metric_op_base + '/' + str(max_examples_to_draw - 1)])
def test_visualize_boxes_and_labels_on_image_array(self):
ori_image = np.ones([360, 480, 3], dtype=np.int32) * 255
test_image = np.ones([360, 480, 3], dtype=np.int32) * 255
detections = np.array([[0.8, 0.1, 0.9, 0.1, 1., 0.1],
[0.1, 0.3, 0.8, 0.7, 1., 0.6]])
keypoints = np.array(np.random.rand(2, 5, 2), dtype=np.float32)
labelmap = {1: {'id': 1, 'name': 'cat'}, 2: {'id': 2, 'name': 'dog'}}
visualization_utils.visualize_boxes_and_labels_on_image_array(
test_image,
detections[:, :4],
detections[:, 4].astype(np.int32),
detections[:, 5],
labelmap,
keypoints=keypoints,
track_ids=None,
use_normalized_coordinates=True,
max_boxes_to_draw=1,
min_score_thresh=0.2,
agnostic_mode=False,
line_thickness=8)
self.assertGreater(np.abs(np.sum(test_image - ori_image)), 0)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/visualization_utils_test.py | visualization_utils_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluate Object Detection result on a single image.
Annotate each detected result as true positives or false positive according to
a predefined IOU ratio. Non Maximum Supression is used by default. Multi class
detection is supported by default.
Based on the settings, per image evaluation is either performed on boxes or
on object masks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
from object_detection.utils import np_box_mask_list
from object_detection.utils import np_box_mask_list_ops
class PerImageEvaluation(object):
"""Evaluate detection result of a single image."""
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=0.3,
nms_max_output_boxes=50,
group_of_weight=0.0):
"""Initialized PerImageEvaluation by evaluation parameters.
Args:
num_groundtruth_classes: Number of ground truth object classes
matching_iou_threshold: A ratio of area intersection to union, which is
the threshold to consider whether a detection is true positive or not
nms_iou_threshold: IOU threshold used in Non Maximum Suppression.
nms_max_output_boxes: Number of maximum output boxes in NMS.
group_of_weight: Weight of the group-of boxes.
"""
self.matching_iou_threshold = matching_iou_threshold
self.nms_iou_threshold = nms_iou_threshold
self.nms_max_output_boxes = nms_max_output_boxes
self.num_groundtruth_classes = num_groundtruth_classes
self.group_of_weight = group_of_weight
def compute_object_detection_metrics(self,
detected_boxes,
detected_scores,
detected_class_labels,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list,
detected_masks=None,
groundtruth_masks=None):
"""Evaluates detections as being tp, fp or weighted from a single image.
The evaluation is done in two stages:
1. All detections are matched to non group-of boxes; true positives are
determined and detections matched to difficult boxes are ignored.
2. Detections that are determined as false positives are matched against
group-of boxes and weighted if matched.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions. Each row is of the format [y_min,
x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing the
confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1], repreneting
the class labels of the detected N object instances.
groundtruth_boxes: A float numpy array of shape [M, 4], representing M
regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag
detected_masks: (optional) A uint8 numpy array of shape [N, height,
width]. If not None, the metrics will be computed based on masks.
groundtruth_masks: (optional) A uint8 numpy array of shape [M, height,
width]. Can have empty masks, i.e. where all values are 0.
Returns:
scores: A list of C float numpy arrays. Each numpy array is of
shape [K, 1], representing K scores detected with object class
label c
tp_fp_labels: A list of C boolean numpy arrays. Each numpy array
is of shape [K, 1], representing K True/False positive label of
object instances detected with class label c
is_class_correctly_detected_in_image: a numpy integer array of
shape [C, 1], indicating whether the correponding class has a least
one instance being correctly detected in the image
"""
detected_boxes, detected_scores, detected_class_labels, detected_masks = (
self._remove_invalid_boxes(detected_boxes, detected_scores,
detected_class_labels, detected_masks))
scores, tp_fp_labels = self._compute_tp_fp(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
groundtruth_is_difficult_list=groundtruth_is_difficult_list,
groundtruth_is_group_of_list=groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks)
is_class_correctly_detected_in_image = self._compute_cor_loc(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks)
return scores, tp_fp_labels, is_class_correctly_detected_in_image
def _compute_cor_loc(self,
detected_boxes,
detected_scores,
detected_class_labels,
groundtruth_boxes,
groundtruth_class_labels,
detected_masks=None,
groundtruth_masks=None):
"""Compute CorLoc score for object detection result.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions. Each row is of the format [y_min,
x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing the
confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1], repreneting
the class labels of the detected N object instances.
groundtruth_boxes: A float numpy array of shape [M, 4], representing M
regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
detected_masks: (optional) A uint8 numpy array of shape [N, height,
width]. If not None, the scores will be computed based on masks.
groundtruth_masks: (optional) A uint8 numpy array of shape [M, height,
width].
Returns:
is_class_correctly_detected_in_image: a numpy integer array of
shape [C, 1], indicating whether the correponding class has a least
one instance being correctly detected in the image
Raises:
ValueError: If detected masks is not None but groundtruth masks are None,
or the other way around.
"""
if (detected_masks is not None and
groundtruth_masks is None) or (detected_masks is None and
groundtruth_masks is not None):
raise ValueError(
'If `detected_masks` is provided, then `groundtruth_masks` should '
'also be provided.')
is_class_correctly_detected_in_image = np.zeros(
self.num_groundtruth_classes, dtype=int)
for i in range(self.num_groundtruth_classes):
(gt_boxes_at_ith_class, gt_masks_at_ith_class,
detected_boxes_at_ith_class, detected_scores_at_ith_class,
detected_masks_at_ith_class) = self._get_ith_class_arrays(
detected_boxes, detected_scores, detected_masks,
detected_class_labels, groundtruth_boxes, groundtruth_masks,
groundtruth_class_labels, i)
is_class_correctly_detected_in_image[i] = (
self._compute_is_class_correctly_detected_in_image(
detected_boxes=detected_boxes_at_ith_class,
detected_scores=detected_scores_at_ith_class,
groundtruth_boxes=gt_boxes_at_ith_class,
detected_masks=detected_masks_at_ith_class,
groundtruth_masks=gt_masks_at_ith_class))
return is_class_correctly_detected_in_image
def _compute_is_class_correctly_detected_in_image(self,
detected_boxes,
detected_scores,
groundtruth_boxes,
detected_masks=None,
groundtruth_masks=None):
"""Compute CorLoc score for a single class.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
detected_masks: (optional) A np.uint8 numpy array of shape [N, height,
width]. If not None, the scores will be computed based on masks.
groundtruth_masks: (optional) A np.uint8 numpy array of shape [M, height,
width].
Returns:
is_class_correctly_detected_in_image: An integer 1 or 0 denoting whether a
class is correctly detected in the image or not
"""
if detected_boxes.size > 0:
if groundtruth_boxes.size > 0:
max_score_id = np.argmax(detected_scores)
mask_mode = False
if detected_masks is not None and groundtruth_masks is not None:
mask_mode = True
if mask_mode:
detected_boxlist = np_box_mask_list.BoxMaskList(
box_data=np.expand_dims(detected_boxes[max_score_id], axis=0),
mask_data=np.expand_dims(detected_masks[max_score_id], axis=0))
gt_boxlist = np_box_mask_list.BoxMaskList(
box_data=groundtruth_boxes, mask_data=groundtruth_masks)
iou = np_box_mask_list_ops.iou(detected_boxlist, gt_boxlist)
else:
detected_boxlist = np_box_list.BoxList(
np.expand_dims(detected_boxes[max_score_id, :], axis=0))
gt_boxlist = np_box_list.BoxList(groundtruth_boxes)
iou = np_box_list_ops.iou(detected_boxlist, gt_boxlist)
if np.max(iou) >= self.matching_iou_threshold:
return 1
return 0
def _compute_tp_fp(self,
detected_boxes,
detected_scores,
detected_class_labels,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list,
detected_masks=None,
groundtruth_masks=None):
"""Labels true/false positives of detections of an image across all classes.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions. Each row is of the format [y_min,
x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing the
confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1], repreneting
the class labels of the detected N object instances.
groundtruth_boxes: A float numpy array of shape [M, 4], representing M
regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag
detected_masks: (optional) A np.uint8 numpy array of shape [N, height,
width]. If not None, the scores will be computed based on masks.
groundtruth_masks: (optional) A np.uint8 numpy array of shape [M, height,
width].
Returns:
result_scores: A list of float numpy arrays. Each numpy array is of
shape [K, 1], representing K scores detected with object class
label c
result_tp_fp_labels: A list of boolean numpy array. Each numpy array is of
shape [K, 1], representing K True/False positive label of object
instances detected with class label c
Raises:
ValueError: If detected masks is not None but groundtruth masks are None,
or the other way around.
"""
if detected_masks is not None and groundtruth_masks is None:
raise ValueError(
'Detected masks is available but groundtruth masks is not.')
if detected_masks is None and groundtruth_masks is not None:
raise ValueError(
'Groundtruth masks is available but detected masks is not.')
result_scores = []
result_tp_fp_labels = []
for i in range(self.num_groundtruth_classes):
groundtruth_is_difficult_list_at_ith_class = (
groundtruth_is_difficult_list[groundtruth_class_labels == i])
groundtruth_is_group_of_list_at_ith_class = (
groundtruth_is_group_of_list[groundtruth_class_labels == i])
(gt_boxes_at_ith_class, gt_masks_at_ith_class,
detected_boxes_at_ith_class, detected_scores_at_ith_class,
detected_masks_at_ith_class) = self._get_ith_class_arrays(
detected_boxes, detected_scores, detected_masks,
detected_class_labels, groundtruth_boxes, groundtruth_masks,
groundtruth_class_labels, i)
scores, tp_fp_labels = self._compute_tp_fp_for_single_class(
detected_boxes=detected_boxes_at_ith_class,
detected_scores=detected_scores_at_ith_class,
groundtruth_boxes=gt_boxes_at_ith_class,
groundtruth_is_difficult_list=groundtruth_is_difficult_list_at_ith_class,
groundtruth_is_group_of_list=groundtruth_is_group_of_list_at_ith_class,
detected_masks=detected_masks_at_ith_class,
groundtruth_masks=gt_masks_at_ith_class)
result_scores.append(scores)
result_tp_fp_labels.append(tp_fp_labels)
return result_scores, result_tp_fp_labels
def _get_overlaps_and_scores_mask_mode(self, detected_boxes, detected_scores,
detected_masks, groundtruth_boxes,
groundtruth_masks,
groundtruth_is_group_of_list):
"""Computes overlaps and scores between detected and groudntruth masks.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
detected_masks: A uint8 numpy array of shape [N, height, width]. If not
None, the scores will be computed based on masks.
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
groundtruth_masks: A uint8 numpy array of shape [M, height, width].
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag. If a groundtruth box is
group-of box, every detection matching this box is ignored.
Returns:
iou: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If
gt_non_group_of_boxlist.num_boxes() == 0 it will be None.
ioa: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If
gt_group_of_boxlist.num_boxes() == 0 it will be None.
scores: The score of the detected boxlist.
num_boxes: Number of non-maximum suppressed detected boxes.
"""
detected_boxlist = np_box_mask_list.BoxMaskList(
box_data=detected_boxes, mask_data=detected_masks)
detected_boxlist.add_field('scores', detected_scores)
detected_boxlist = np_box_mask_list_ops.non_max_suppression(
detected_boxlist, self.nms_max_output_boxes, self.nms_iou_threshold)
gt_non_group_of_boxlist = np_box_mask_list.BoxMaskList(
box_data=groundtruth_boxes[~groundtruth_is_group_of_list],
mask_data=groundtruth_masks[~groundtruth_is_group_of_list])
gt_group_of_boxlist = np_box_mask_list.BoxMaskList(
box_data=groundtruth_boxes[groundtruth_is_group_of_list],
mask_data=groundtruth_masks[groundtruth_is_group_of_list])
iou = np_box_mask_list_ops.iou(detected_boxlist, gt_non_group_of_boxlist)
ioa = np.transpose(
np_box_mask_list_ops.ioa(gt_group_of_boxlist, detected_boxlist))
scores = detected_boxlist.get_field('scores')
num_boxes = detected_boxlist.num_boxes()
return iou, ioa, scores, num_boxes
def _get_overlaps_and_scores_box_mode(self, detected_boxes, detected_scores,
groundtruth_boxes,
groundtruth_is_group_of_list):
"""Computes overlaps and scores between detected and groudntruth boxes.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag. If a groundtruth box is
group-of box, every detection matching this box is ignored.
Returns:
iou: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If
gt_non_group_of_boxlist.num_boxes() == 0 it will be None.
ioa: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If
gt_group_of_boxlist.num_boxes() == 0 it will be None.
scores: The score of the detected boxlist.
num_boxes: Number of non-maximum suppressed detected boxes.
"""
detected_boxlist = np_box_list.BoxList(detected_boxes)
detected_boxlist.add_field('scores', detected_scores)
detected_boxlist = np_box_list_ops.non_max_suppression(
detected_boxlist, self.nms_max_output_boxes, self.nms_iou_threshold)
gt_non_group_of_boxlist = np_box_list.BoxList(
groundtruth_boxes[~groundtruth_is_group_of_list])
gt_group_of_boxlist = np_box_list.BoxList(
groundtruth_boxes[groundtruth_is_group_of_list])
iou = np_box_list_ops.iou(detected_boxlist, gt_non_group_of_boxlist)
ioa = np.transpose(
np_box_list_ops.ioa(gt_group_of_boxlist, detected_boxlist))
scores = detected_boxlist.get_field('scores')
num_boxes = detected_boxlist.num_boxes()
return iou, ioa, scores, num_boxes
def _compute_tp_fp_for_single_class(self,
detected_boxes,
detected_scores,
groundtruth_boxes,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list,
detected_masks=None,
groundtruth_masks=None):
"""Labels boxes detected with the same class from the same image as tp/fp.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not. If a
groundtruth box is difficult, every detection matching this box is
ignored.
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag. If a groundtruth box is
group-of box, every detection matching this box is ignored.
detected_masks: (optional) A uint8 numpy array of shape [N, height,
width]. If not None, the scores will be computed based on masks.
groundtruth_masks: (optional) A uint8 numpy array of shape [M, height,
width].
Returns:
Two arrays of the same size, containing all boxes that were evaluated as
being true positives or false positives; if a box matched to a difficult
box or to a group-of box, it is ignored.
scores: A numpy array representing the detection scores.
tp_fp_labels: a boolean numpy array indicating whether a detection is a
true positive.
"""
if detected_boxes.size == 0:
return np.array([], dtype=float), np.array([], dtype=bool)
mask_mode = False
if detected_masks is not None and groundtruth_masks is not None:
mask_mode = True
iou = np.ndarray([0, 0])
ioa = np.ndarray([0, 0])
iou_mask = np.ndarray([0, 0])
ioa_mask = np.ndarray([0, 0])
if mask_mode:
# For Instance Segmentation Evaluation on Open Images V5, not all boxed
# instances have corresponding segmentation annotations. Those boxes that
# dont have segmentation annotations are represented as empty masks in
# groundtruth_masks nd array.
mask_presence_indicator = (np.sum(groundtruth_masks, axis=(1, 2)) > 0)
(iou_mask, ioa_mask, scores,
num_detected_boxes) = self._get_overlaps_and_scores_mask_mode(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_masks=detected_masks,
groundtruth_boxes=groundtruth_boxes[mask_presence_indicator, :],
groundtruth_masks=groundtruth_masks[mask_presence_indicator, :],
groundtruth_is_group_of_list=groundtruth_is_group_of_list[
mask_presence_indicator])
if sum(mask_presence_indicator) < len(mask_presence_indicator):
# Not all masks are present - some masks are empty
(iou, ioa, _,
num_detected_boxes) = self._get_overlaps_and_scores_box_mode(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
groundtruth_boxes=groundtruth_boxes[~mask_presence_indicator, :],
groundtruth_is_group_of_list=groundtruth_is_group_of_list[
~mask_presence_indicator])
num_detected_boxes = detected_boxes.shape[0]
else:
mask_presence_indicator = np.zeros(
groundtruth_is_group_of_list.shape, dtype=bool)
(iou, ioa, scores,
num_detected_boxes) = self._get_overlaps_and_scores_box_mode(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
groundtruth_boxes=groundtruth_boxes,
groundtruth_is_group_of_list=groundtruth_is_group_of_list)
if groundtruth_boxes.size == 0:
return scores, np.zeros(num_detected_boxes, dtype=bool)
tp_fp_labels = np.zeros(num_detected_boxes, dtype=bool)
is_matched_to_box = np.zeros(num_detected_boxes, dtype=bool)
is_matched_to_difficult = np.zeros(num_detected_boxes, dtype=bool)
is_matched_to_group_of = np.zeros(num_detected_boxes, dtype=bool)
def compute_match_iou(iou, groundtruth_nongroup_of_is_difficult_list,
is_box):
"""Computes TP/FP for non group-of box matching.
The function updates the following local variables:
tp_fp_labels - if a box is matched to group-of
is_matched_to_difficult - the detections that were processed at this are
matched to difficult box.
is_matched_to_box - the detections that were processed at this stage are
marked as is_box.
Args:
iou: intersection-over-union matrix [num_gt_boxes]x[num_det_boxes].
groundtruth_nongroup_of_is_difficult_list: boolean that specifies if gt
box is difficult.
is_box: boolean that specifies if currently boxes or masks are
processed.
"""
max_overlap_gt_ids = np.argmax(iou, axis=1)
is_gt_detected = np.zeros(iou.shape[1], dtype=bool)
for i in range(num_detected_boxes):
gt_id = max_overlap_gt_ids[i]
is_evaluatable = (not tp_fp_labels[i] and
not is_matched_to_difficult[i] and
iou[i, gt_id] >= self.matching_iou_threshold and
not is_matched_to_group_of[i])
if is_evaluatable:
if not groundtruth_nongroup_of_is_difficult_list[gt_id]:
if not is_gt_detected[gt_id]:
tp_fp_labels[i] = True
is_gt_detected[gt_id] = True
is_matched_to_box[i] = is_box
else:
is_matched_to_difficult[i] = True
def compute_match_ioa(ioa, is_box):
"""Computes TP/FP for group-of box matching.
The function updates the following local variables:
is_matched_to_group_of - if a box is matched to group-of
is_matched_to_box - the detections that were processed at this stage are
marked as is_box.
Args:
ioa: intersection-over-area matrix [num_gt_boxes]x[num_det_boxes].
is_box: boolean that specifies if currently boxes or masks are
processed.
Returns:
scores_group_of: of detections matched to group-of boxes
[num_groupof_matched].
tp_fp_labels_group_of: boolean array of size [num_groupof_matched], all
values are True.
"""
scores_group_of = np.zeros(ioa.shape[1], dtype=float)
tp_fp_labels_group_of = self.group_of_weight * np.ones(
ioa.shape[1], dtype=float)
max_overlap_group_of_gt_ids = np.argmax(ioa, axis=1)
for i in range(num_detected_boxes):
gt_id = max_overlap_group_of_gt_ids[i]
is_evaluatable = (not tp_fp_labels[i] and
not is_matched_to_difficult[i] and
ioa[i, gt_id] >= self.matching_iou_threshold and
not is_matched_to_group_of[i])
if is_evaluatable:
is_matched_to_group_of[i] = True
is_matched_to_box[i] = is_box
scores_group_of[gt_id] = max(scores_group_of[gt_id], scores[i])
selector = np.where((scores_group_of > 0) & (tp_fp_labels_group_of > 0))
scores_group_of = scores_group_of[selector]
tp_fp_labels_group_of = tp_fp_labels_group_of[selector]
return scores_group_of, tp_fp_labels_group_of
# The evaluation is done in two stages:
# 1. Evaluate all objects that actually have instance level masks.
# 2. Evaluate all objects that are not already evaluated as boxes.
if iou_mask.shape[1] > 0:
groundtruth_is_difficult_mask_list = groundtruth_is_difficult_list[
mask_presence_indicator]
groundtruth_is_group_of_mask_list = groundtruth_is_group_of_list[
mask_presence_indicator]
compute_match_iou(
iou_mask,
groundtruth_is_difficult_mask_list[
~groundtruth_is_group_of_mask_list],
is_box=False)
scores_mask_group_of = np.ndarray([0], dtype=float)
tp_fp_labels_mask_group_of = np.ndarray([0], dtype=float)
if ioa_mask.shape[1] > 0:
scores_mask_group_of, tp_fp_labels_mask_group_of = compute_match_ioa(
ioa_mask, is_box=False)
# Tp-fp evaluation for non-group of boxes (if any).
if iou.shape[1] > 0:
groundtruth_is_difficult_box_list = groundtruth_is_difficult_list[
~mask_presence_indicator]
groundtruth_is_group_of_box_list = groundtruth_is_group_of_list[
~mask_presence_indicator]
compute_match_iou(
iou,
groundtruth_is_difficult_box_list[~groundtruth_is_group_of_box_list],
is_box=True)
scores_box_group_of = np.ndarray([0], dtype=float)
tp_fp_labels_box_group_of = np.ndarray([0], dtype=float)
if ioa.shape[1] > 0:
scores_box_group_of, tp_fp_labels_box_group_of = compute_match_ioa(
ioa, is_box=True)
if mask_mode:
# Note: here crowds are treated as ignore regions.
valid_entries = (~is_matched_to_difficult & ~is_matched_to_group_of
& ~is_matched_to_box)
return np.concatenate(
(scores[valid_entries], scores_mask_group_of)), np.concatenate(
(tp_fp_labels[valid_entries].astype(float),
tp_fp_labels_mask_group_of))
else:
valid_entries = (~is_matched_to_difficult & ~is_matched_to_group_of)
return np.concatenate(
(scores[valid_entries], scores_box_group_of)), np.concatenate(
(tp_fp_labels[valid_entries].astype(float),
tp_fp_labels_box_group_of))
def _get_ith_class_arrays(self, detected_boxes, detected_scores,
detected_masks, detected_class_labels,
groundtruth_boxes, groundtruth_masks,
groundtruth_class_labels, class_index):
"""Returns numpy arrays belonging to class with index `class_index`.
Args:
detected_boxes: A numpy array containing detected boxes.
detected_scores: A numpy array containing detected scores.
detected_masks: A numpy array containing detected masks.
detected_class_labels: A numpy array containing detected class labels.
groundtruth_boxes: A numpy array containing groundtruth boxes.
groundtruth_masks: A numpy array containing groundtruth masks.
groundtruth_class_labels: A numpy array containing groundtruth class
labels.
class_index: An integer index.
Returns:
gt_boxes_at_ith_class: A numpy array containing groundtruth boxes labeled
as ith class.
gt_masks_at_ith_class: A numpy array containing groundtruth masks labeled
as ith class.
detected_boxes_at_ith_class: A numpy array containing detected boxes
corresponding to the ith class.
detected_scores_at_ith_class: A numpy array containing detected scores
corresponding to the ith class.
detected_masks_at_ith_class: A numpy array containing detected masks
corresponding to the ith class.
"""
selected_groundtruth = (groundtruth_class_labels == class_index)
gt_boxes_at_ith_class = groundtruth_boxes[selected_groundtruth]
if groundtruth_masks is not None:
gt_masks_at_ith_class = groundtruth_masks[selected_groundtruth]
else:
gt_masks_at_ith_class = None
selected_detections = (detected_class_labels == class_index)
detected_boxes_at_ith_class = detected_boxes[selected_detections]
detected_scores_at_ith_class = detected_scores[selected_detections]
if detected_masks is not None:
detected_masks_at_ith_class = detected_masks[selected_detections]
else:
detected_masks_at_ith_class = None
return (gt_boxes_at_ith_class, gt_masks_at_ith_class,
detected_boxes_at_ith_class, detected_scores_at_ith_class,
detected_masks_at_ith_class)
def _remove_invalid_boxes(self,
detected_boxes,
detected_scores,
detected_class_labels,
detected_masks=None):
"""Removes entries with invalid boxes.
A box is invalid if either its xmax is smaller than its xmin, or its ymax
is smaller than its ymin.
Args:
detected_boxes: A float numpy array of size [num_boxes, 4] containing box
coordinates in [ymin, xmin, ymax, xmax] format.
detected_scores: A float numpy array of size [num_boxes].
detected_class_labels: A int32 numpy array of size [num_boxes].
detected_masks: A uint8 numpy array of size [num_boxes, height, width].
Returns:
valid_detected_boxes: A float numpy array of size [num_valid_boxes, 4]
containing box coordinates in [ymin, xmin, ymax, xmax] format.
valid_detected_scores: A float numpy array of size [num_valid_boxes].
valid_detected_class_labels: A int32 numpy array of size
[num_valid_boxes].
valid_detected_masks: A uint8 numpy array of size
[num_valid_boxes, height, width].
"""
valid_indices = np.logical_and(detected_boxes[:, 0] < detected_boxes[:, 2],
detected_boxes[:, 1] < detected_boxes[:, 3])
detected_boxes = detected_boxes[valid_indices]
detected_scores = detected_scores[valid_indices]
detected_class_labels = detected_class_labels[valid_indices]
if detected_masks is not None:
detected_masks = detected_masks[valid_indices]
return [
detected_boxes, detected_scores, detected_class_labels, detected_masks
]
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/per_image_evaluation.py | per_image_evaluation.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for np_box_mask_list.BoxMaskList.
Example box operations that are supported:
* Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from object_detection.utils import np_box_list_ops
from object_detection.utils import np_box_mask_list
from object_detection.utils import np_mask_ops
def box_list_to_box_mask_list(boxlist):
"""Converts a BoxList containing 'masks' into a BoxMaskList.
Args:
boxlist: An np_box_list.BoxList object.
Returns:
An np_box_mask_list.BoxMaskList object.
Raises:
ValueError: If boxlist does not contain `masks` as a field.
"""
if not boxlist.has_field('masks'):
raise ValueError('boxlist does not contain mask field.')
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxlist.get(),
mask_data=boxlist.get_field('masks'))
extra_fields = boxlist.get_extra_fields()
for key in extra_fields:
if key != 'masks':
box_mask_list.data[key] = boxlist.get_field(key)
return box_mask_list
def area(box_mask_list):
"""Computes area of masks.
Args:
box_mask_list: np_box_mask_list.BoxMaskList holding N boxes and masks
Returns:
a numpy array with shape [N*1] representing mask areas
"""
return np_mask_ops.area(box_mask_list.get_masks())
def intersection(box_mask_list1, box_mask_list2):
"""Compute pairwise intersection areas between masks.
Args:
box_mask_list1: BoxMaskList holding N boxes and masks
box_mask_list2: BoxMaskList holding M boxes and masks
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
return np_mask_ops.intersection(box_mask_list1.get_masks(),
box_mask_list2.get_masks())
def iou(box_mask_list1, box_mask_list2):
"""Computes pairwise intersection-over-union between box and mask collections.
Args:
box_mask_list1: BoxMaskList holding N boxes and masks
box_mask_list2: BoxMaskList holding M boxes and masks
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
return np_mask_ops.iou(box_mask_list1.get_masks(),
box_mask_list2.get_masks())
def ioa(box_mask_list1, box_mask_list2):
"""Computes pairwise intersection-over-area between box and mask collections.
Intersection-over-area (ioa) between two masks mask1 and mask2 is defined as
their intersection area over mask2's area. Note that ioa is not symmetric,
that is, IOA(mask1, mask2) != IOA(mask2, mask1).
Args:
box_mask_list1: np_box_mask_list.BoxMaskList holding N boxes and masks
box_mask_list2: np_box_mask_list.BoxMaskList holding M boxes and masks
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
"""
return np_mask_ops.ioa(box_mask_list1.get_masks(), box_mask_list2.get_masks())
def gather(box_mask_list, indices, fields=None):
"""Gather boxes from np_box_mask_list.BoxMaskList according to indices.
By default, gather returns boxes corresponding to the input index list, as
well as all additional fields stored in the box_mask_list (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
box_mask_list: np_box_mask_list.BoxMaskList holding N boxes
indices: a 1-d numpy array of type int_
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
Returns:
subbox_mask_list: a np_box_mask_list.BoxMaskList corresponding to the subset
of the input box_mask_list specified by indices
Raises:
ValueError: if specified field is not contained in box_mask_list or if the
indices are not of type int_
"""
if fields is not None:
if 'masks' not in fields:
fields.append('masks')
return box_list_to_box_mask_list(
np_box_list_ops.gather(
boxlist=box_mask_list, indices=indices, fields=fields))
def sort_by_field(box_mask_list, field,
order=np_box_list_ops.SortOrder.DESCEND):
"""Sort boxes and associated fields according to a scalar field.
A common use case is reordering the boxes according to descending scores.
Args:
box_mask_list: BoxMaskList holding N boxes.
field: A BoxMaskList field for sorting and reordering the BoxMaskList.
order: (Optional) 'descend' or 'ascend'. Default is descend.
Returns:
sorted_box_mask_list: A sorted BoxMaskList with the field in the specified
order.
"""
return box_list_to_box_mask_list(
np_box_list_ops.sort_by_field(
boxlist=box_mask_list, field=field, order=order))
def non_max_suppression(box_mask_list,
max_output_size=10000,
iou_threshold=1.0,
score_threshold=-10.0):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. In each iteration, the detected bounding box with
highest score in the available pool is selected.
Args:
box_mask_list: np_box_mask_list.BoxMaskList holding N boxes. Must contain
a 'scores' field representing detection scores. All scores belong to the
same class.
max_output_size: maximum number of retained boxes
iou_threshold: intersection over union threshold.
score_threshold: minimum score threshold. Remove the boxes with scores
less than this value. Default value is set to -10. A very
low threshold to pass pretty much all the boxes, unless
the user sets a different score threshold.
Returns:
an np_box_mask_list.BoxMaskList holding M boxes where M <= max_output_size
Raises:
ValueError: if 'scores' field does not exist
ValueError: if threshold is not in [0, 1]
ValueError: if max_output_size < 0
"""
if not box_mask_list.has_field('scores'):
raise ValueError('Field scores does not exist')
if iou_threshold < 0. or iou_threshold > 1.0:
raise ValueError('IOU threshold must be in [0, 1]')
if max_output_size < 0:
raise ValueError('max_output_size must be bigger than 0.')
box_mask_list = filter_scores_greater_than(box_mask_list, score_threshold)
if box_mask_list.num_boxes() == 0:
return box_mask_list
box_mask_list = sort_by_field(box_mask_list, 'scores')
# Prevent further computation if NMS is disabled.
if iou_threshold == 1.0:
if box_mask_list.num_boxes() > max_output_size:
selected_indices = np.arange(max_output_size)
return gather(box_mask_list, selected_indices)
else:
return box_mask_list
masks = box_mask_list.get_masks()
num_masks = box_mask_list.num_boxes()
# is_index_valid is True only for all remaining valid boxes,
is_index_valid = np.full(num_masks, 1, dtype=bool)
selected_indices = []
num_output = 0
for i in range(num_masks):
if num_output < max_output_size:
if is_index_valid[i]:
num_output += 1
selected_indices.append(i)
is_index_valid[i] = False
valid_indices = np.where(is_index_valid)[0]
if valid_indices.size == 0:
break
intersect_over_union = np_mask_ops.iou(
np.expand_dims(masks[i], axis=0), masks[valid_indices])
intersect_over_union = np.squeeze(intersect_over_union, axis=0)
is_index_valid[valid_indices] = np.logical_and(
is_index_valid[valid_indices],
intersect_over_union <= iou_threshold)
return gather(box_mask_list, np.array(selected_indices))
def multi_class_non_max_suppression(box_mask_list, score_thresh, iou_thresh,
max_output_size):
"""Multi-class version of non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. It operates independently for each class for
which scores are provided (via the scores field of the input box_list),
pruning boxes with score less than a provided threshold prior to
applying NMS.
Args:
box_mask_list: np_box_mask_list.BoxMaskList holding N boxes. Must contain a
'scores' field representing detection scores. This scores field is a
tensor that can be 1 dimensional (in the case of a single class) or
2-dimensional, in which case we assume that it takes the
shape [num_boxes, num_classes]. We further assume that this rank is known
statically and that scores.shape[1] is also known (i.e., the number of
classes is fixed and known at graph construction time).
score_thresh: scalar threshold for score (low scoring boxes are removed).
iou_thresh: scalar threshold for IOU (boxes that that high IOU overlap
with previously selected boxes are removed).
max_output_size: maximum number of retained boxes per class.
Returns:
a box_mask_list holding M boxes with a rank-1 scores field representing
corresponding scores for each box with scores sorted in decreasing order
and a rank-1 classes field representing a class label for each box.
Raises:
ValueError: if iou_thresh is not in [0, 1] or if input box_mask_list does
not have a valid scores field.
"""
if not 0 <= iou_thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(box_mask_list, np_box_mask_list.BoxMaskList):
raise ValueError('box_mask_list must be a box_mask_list')
if not box_mask_list.has_field('scores'):
raise ValueError('input box_mask_list must have \'scores\' field')
scores = box_mask_list.get_field('scores')
if len(scores.shape) == 1:
scores = np.reshape(scores, [-1, 1])
elif len(scores.shape) == 2:
if scores.shape[1] is None:
raise ValueError('scores field must have statically defined second '
'dimension')
else:
raise ValueError('scores field must be of rank 1 or 2')
num_boxes = box_mask_list.num_boxes()
num_scores = scores.shape[0]
num_classes = scores.shape[1]
if num_boxes != num_scores:
raise ValueError('Incorrect scores field length: actual vs expected.')
selected_boxes_list = []
for class_idx in range(num_classes):
box_mask_list_and_class_scores = np_box_mask_list.BoxMaskList(
box_data=box_mask_list.get(),
mask_data=box_mask_list.get_masks())
class_scores = np.reshape(scores[0:num_scores, class_idx], [-1])
box_mask_list_and_class_scores.add_field('scores', class_scores)
box_mask_list_filt = filter_scores_greater_than(
box_mask_list_and_class_scores, score_thresh)
nms_result = non_max_suppression(
box_mask_list_filt,
max_output_size=max_output_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh)
nms_result.add_field(
'classes',
np.zeros_like(nms_result.get_field('scores')) + class_idx)
selected_boxes_list.append(nms_result)
selected_boxes = np_box_list_ops.concatenate(selected_boxes_list)
sorted_boxes = np_box_list_ops.sort_by_field(selected_boxes, 'scores')
return box_list_to_box_mask_list(boxlist=sorted_boxes)
def prune_non_overlapping_masks(box_mask_list1, box_mask_list2, minoverlap=0.0):
"""Prunes the boxes in list1 that overlap less than thresh with list2.
For each mask in box_mask_list1, we want its IOA to be more than minoverlap
with at least one of the masks in box_mask_list2. If it does not, we remove
it. If the masks are not full size image, we do the pruning based on boxes.
Args:
box_mask_list1: np_box_mask_list.BoxMaskList holding N boxes and masks.
box_mask_list2: np_box_mask_list.BoxMaskList holding M boxes and masks.
minoverlap: Minimum required overlap between boxes, to count them as
overlapping.
Returns:
A pruned box_mask_list with size [N', 4].
"""
intersection_over_area = ioa(box_mask_list2, box_mask_list1) # [M, N] tensor
intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor
keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap))
keep_inds = np.nonzero(keep_bool)[0]
new_box_mask_list1 = gather(box_mask_list1, keep_inds)
return new_box_mask_list1
def concatenate(box_mask_lists, fields=None):
"""Concatenate list of box_mask_lists.
This op concatenates a list of input box_mask_lists into a larger
box_mask_list. It also
handles concatenation of box_mask_list fields as long as the field tensor
shapes are equal except for the first dimension.
Args:
box_mask_lists: list of np_box_mask_list.BoxMaskList objects
fields: optional list of fields to also concatenate. By default, all
fields from the first BoxMaskList in the list are included in the
concatenation.
Returns:
a box_mask_list with number of boxes equal to
sum([box_mask_list.num_boxes() for box_mask_list in box_mask_list])
Raises:
ValueError: if box_mask_lists is invalid (i.e., is not a list, is empty, or
contains non box_mask_list objects), or if requested fields are not
contained in all box_mask_lists
"""
if fields is not None:
if 'masks' not in fields:
fields.append('masks')
return box_list_to_box_mask_list(
np_box_list_ops.concatenate(boxlists=box_mask_lists, fields=fields))
def filter_scores_greater_than(box_mask_list, thresh):
"""Filter to keep only boxes and masks with score exceeding a given threshold.
This op keeps the collection of boxes and masks whose corresponding scores are
greater than the input threshold.
Args:
box_mask_list: BoxMaskList holding N boxes and masks. Must contain a
'scores' field representing detection scores.
thresh: scalar threshold
Returns:
a BoxMaskList holding M boxes and masks where M <= N
Raises:
ValueError: if box_mask_list not a np_box_mask_list.BoxMaskList object or
if it does not have a scores field
"""
if not isinstance(box_mask_list, np_box_mask_list.BoxMaskList):
raise ValueError('box_mask_list must be a BoxMaskList')
if not box_mask_list.has_field('scores'):
raise ValueError('input box_mask_list must have \'scores\' field')
scores = box_mask_list.get_field('scores')
if len(scores.shape) > 2:
raise ValueError('Scores should have rank 1 or 2')
if len(scores.shape) == 2 and scores.shape[1] != 1:
raise ValueError('Scores should have rank 1 or have shape '
'consistent with [None, 1]')
high_score_indices = np.reshape(np.where(np.greater(scores, thresh)),
[-1]).astype(np.int32)
return gather(box_mask_list, high_score_indices)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/np_box_mask_list_ops.py | np_box_mask_list_ops.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
# Set headless-friendly backend.
import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.utils import shape_utils
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def _get_multiplier_for_color_randomness():
"""Returns a multiplier to get semi-random colors from successive indices.
This function computes a prime number, p, in the range [2, 17] that:
- is closest to len(STANDARD_COLORS) / 10
- does not divide len(STANDARD_COLORS)
If no prime numbers in that range satisfy the constraints, p is returned as 1.
Once p is established, it can be used as a multiplier to select
non-consecutive colors from STANDARD_COLORS:
colors = [(p * i) % len(STANDARD_COLORS) for i in range(20)]
"""
num_colors = len(STANDARD_COLORS)
prime_candidates = [5, 7, 11, 13, 17]
# Remove all prime candidates that divide the number of colors.
prime_candidates = [p for p in prime_candidates if num_colors % p]
if not prime_candidates:
return 1
# Return the closest prime number to num_colors / 10.
abs_distance = [np.abs(num_colors / 10. - p) for p in prime_candidates]
num_candidates = len(abs_distance)
inds = [i for _, i in sorted(zip(abs_distance, range(num_candidates)))]
return prime_candidates[inds[0]]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
if thickness > 0:
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def create_visualization_fn(category_index,
include_masks=False,
include_keypoints=False,
include_keypoint_scores=False,
include_track_ids=False,
**kwargs):
"""Constructs a visualization function that can be wrapped in a py_func.
py_funcs only accept positional arguments. This function returns a suitable
function with the correct positional argument mapping. The positional
arguments in order are:
0: image
1: boxes
2: classes
3: scores
[4]: masks (optional)
[4-5]: keypoints (optional)
[4-6]: keypoint_scores (optional)
[4-7]: track_ids (optional)
-- Example 1 --
vis_only_masks_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=False,
**kwargs)
image = tf.py_func(vis_only_masks_fn,
inp=[image, boxes, classes, scores, masks],
Tout=tf.uint8)
-- Example 2 --
vis_masks_and_track_ids_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=True,
**kwargs)
image = tf.py_func(vis_masks_and_track_ids_fn,
inp=[image, boxes, classes, scores, masks, track_ids],
Tout=tf.uint8)
Args:
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
include_masks: Whether masks should be expected as a positional argument in
the returned function.
include_keypoints: Whether keypoints should be expected as a positional
argument in the returned function.
include_keypoint_scores: Whether keypoint scores should be expected as a
positional argument in the returned function.
include_track_ids: Whether track ids should be expected as a positional
argument in the returned function.
**kwargs: Additional kwargs that will be passed to
visualize_boxes_and_labels_on_image_array.
Returns:
Returns a function that only takes tensors as positional arguments.
"""
def visualization_py_func_fn(*args):
"""Visualization function that can be wrapped in a tf.py_func.
Args:
*args: First 4 positional arguments must be:
image - uint8 numpy array with shape (img_height, img_width, 3).
boxes - a numpy array of shape [N, 4].
classes - a numpy array of shape [N].
scores - a numpy array of shape [N] or None.
-- Optional positional arguments --
instance_masks - a numpy array of shape [N, image_height, image_width].
keypoints - a numpy array of shape [N, num_keypoints, 2].
keypoint_scores - a numpy array of shape [N, num_keypoints].
track_ids - a numpy array of shape [N] with unique track ids.
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid
boxes.
"""
image = args[0]
boxes = args[1]
classes = args[2]
scores = args[3]
masks = keypoints = keypoint_scores = track_ids = None
pos_arg_ptr = 4 # Positional argument for first optional tensor (masks).
if include_masks:
masks = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoints:
keypoints = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoint_scores:
keypoint_scores = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_track_ids:
track_ids = args[pos_arg_ptr]
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
keypoint_scores=keypoint_scores,
track_ids=track_ids,
**kwargs)
return visualization_py_func_fn
def draw_heatmaps_on_image(image, heatmaps):
"""Draws heatmaps on an image.
The heatmaps are handled channel by channel and different colors are used to
paint different heatmap channels.
Args:
image: a PIL.Image object.
heatmaps: a numpy array with shape [image_height, image_width, channel].
Note that the image_height and image_width should match the size of input
image.
"""
draw = ImageDraw.Draw(image)
channel = heatmaps.shape[2]
for c in range(channel):
heatmap = heatmaps[:, :, c] * 255
heatmap = heatmap.astype('uint8')
bitmap = Image.fromarray(heatmap, 'L')
bitmap.convert('1')
draw.bitmap(
xy=[(0, 0)],
bitmap=bitmap,
fill=STANDARD_COLORS[c])
def draw_heatmaps_on_image_array(image, heatmaps):
"""Overlays heatmaps to an image (numpy array).
The function overlays the heatmaps on top of image. The heatmap values will be
painted with different colors depending on the channels. Similar to
"draw_heatmaps_on_image_array" function except the inputs are numpy arrays.
Args:
image: a numpy array with shape [height, width, 3].
heatmaps: a numpy array with shape [height, width, channel].
Returns:
An uint8 numpy array representing the input image painted with heatmap
colors.
"""
if not isinstance(image, np.ndarray):
image = image.numpy()
if not isinstance(heatmaps, np.ndarray):
heatmaps = heatmaps.numpy()
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_heatmaps_on_image(image_pil, heatmaps)
return np.array(image_pil)
def draw_heatmaps_on_image_tensors(images,
heatmaps,
apply_sigmoid=False):
"""Draws heatmaps on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
heatmaps: [N, h, w, channel] float32 tensor of heatmaps. Note that the
heatmaps will be resized to match the input image size before overlaying
the heatmaps with input images. Theoretically the heatmap height width
should have the same aspect ratio as the input image to avoid potential
misalignment introduced by the image resize.
apply_sigmoid: Whether to apply a sigmoid layer on top of the heatmaps. If
the heatmaps come directly from the prediction logits, then we should
apply the sigmoid layer to make sure the values are in between [0.0, 1.0].
Returns:
4D image tensor of type uint8, with heatmaps overlaid on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
_, height, width, _ = shape_utils.combined_static_and_dynamic_shape(images)
if apply_sigmoid:
heatmaps = tf.math.sigmoid(heatmaps)
resized_heatmaps = tf.image.resize(heatmaps, size=[height, width])
elems = [images, resized_heatmaps]
def draw_heatmaps(image_and_heatmaps):
"""Draws heatmaps on image."""
image_with_heatmaps = tf.py_function(
draw_heatmaps_on_image_array,
image_and_heatmaps,
tf.uint8)
return image_with_heatmaps
images = tf.map_fn(draw_heatmaps, elems, dtype=tf.uint8, back_prop=False)
return images
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(
image,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
keypoint_scores=None,
keypoint_edges=None,
track_ids=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
keypoint_scores: A 3D float32 tensor of shape [N, max_detection,
num_keypoints] with keypoint scores.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: [N, max_detections] int32 tensor of unique tracks ids (i.e.
instance ids for each object). If provided, the color-coding of boxes is
dictated by these ids, and not classes.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4,
'keypoint_edges': keypoint_edges
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
visualize_boxes_fn = create_visualization_fn(
category_index,
include_masks=instance_masks is not None,
include_keypoints=keypoints is not None,
include_keypoint_scores=keypoint_scores is not None,
include_track_ids=track_ids is not None,
**visualization_keyword_args)
elems = [true_shapes, original_shapes, images, boxes, classes, scores]
if instance_masks is not None:
elems.append(instance_masks)
if keypoints is not None:
elems.append(keypoints)
if keypoint_scores is not None:
elems.append(keypoint_scores)
if track_ids is not None:
elems.append(track_ids)
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],
tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
keypoint_edges=None):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example() or
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates).
Default is True.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
Returns:
A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left
corresponds to detections, while the subimage on the right corresponds to
groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
images_with_detections_list = []
# Add the batch dimension if the eval_dict is for single example.
if len(eval_dict[detection_fields.detection_classes].shape) == 1:
for key in eval_dict:
if (key != input_data_fields.original_image and
key != input_data_fields.image_additional_channels):
eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
num_gt_boxes = [-1] * eval_dict[input_data_fields.original_image].shape[0]
if input_data_fields.num_groundtruth_boxes in eval_dict:
num_gt_boxes = tf.cast(eval_dict[input_data_fields.num_groundtruth_boxes],
tf.int32)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = tf.cast(
tf.expand_dims(
eval_dict[detection_fields.detection_masks][indx], axis=0),
tf.uint8)
keypoints = None
keypoint_scores = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = tf.expand_dims(
eval_dict[detection_fields.detection_keypoints][indx], axis=0)
if detection_fields.detection_keypoint_scores in eval_dict:
keypoint_scores = tf.expand_dims(
eval_dict[detection_fields.detection_keypoint_scores][indx], axis=0)
else:
keypoint_scores = tf.expand_dims(tf.cast(
keypoint_ops.set_keypoint_visibilities(
eval_dict[detection_fields.detection_keypoints][indx]),
dtype=tf.float32), axis=0)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = tf.cast(
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks][indx],
axis=0), tf.uint8)
groundtruth_keypoints = None
groundtruth_keypoint_scores = None
gt_kpt_vis_fld = input_data_fields.groundtruth_keypoint_visibilities
if input_data_fields.groundtruth_keypoints in eval_dict:
groundtruth_keypoints = tf.expand_dims(
eval_dict[input_data_fields.groundtruth_keypoints][indx], axis=0)
if gt_kpt_vis_fld in eval_dict:
groundtruth_keypoint_scores = tf.expand_dims(
tf.cast(eval_dict[gt_kpt_vis_fld][indx], dtype=tf.float32), axis=0)
else:
groundtruth_keypoint_scores = tf.expand_dims(tf.cast(
keypoint_ops.set_keypoint_visibilities(
eval_dict[input_data_fields.groundtruth_keypoints][indx]),
dtype=tf.float32), axis=0)
images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_classes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_scores][indx], axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=instance_masks,
keypoints=keypoints,
keypoint_scores=keypoint_scores,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
num_gt_boxes_i = num_gt_boxes[indx]
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=groundtruth_keypoints,
keypoint_scores=groundtruth_keypoint_scores,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
images_to_visualize = tf.concat([images_with_detections,
images_with_groundtruth], axis=2)
if input_data_fields.image_additional_channels in eval_dict:
images_with_additional_channels_groundtruth = (
draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.image_additional_channels][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx]
[num_gt_boxes_i],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape]
[indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates))
images_to_visualize = tf.concat(
[images_to_visualize, images_with_additional_channels_groundtruth],
axis=2)
images_with_detections_list.append(images_to_visualize)
return images_with_detections_list
def draw_densepose_visualizations(eval_dict,
max_boxes_to_draw=20,
min_score_thresh=0.2,
num_parts=24,
dp_coord_to_visualize=0):
"""Draws DensePose visualizations.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example().
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
num_parts: The number of different densepose parts.
dp_coord_to_visualize: Whether to visualize v-coordinates (0) or
u-coordinates (0) overlaid on the person masks.
Returns:
A list of [1, H, W, C] uint8 tensor, each element corresponding to an image
in the batch.
Raises:
ValueError: If `dp_coord_to_visualize` is not 0 or 1.
"""
if dp_coord_to_visualize not in (0, 1):
raise ValueError('`dp_coord_to_visualize` must be either 0 for v '
'coordinates), or 1 for u coordinates, but instead got '
'{}'.format(dp_coord_to_visualize))
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
if detection_fields.detection_masks not in eval_dict:
raise ValueError('Expected `detection_masks` in `eval_dict`.')
if detection_fields.detection_surface_coords not in eval_dict:
raise ValueError('Expected `detection_surface_coords` in `eval_dict`.')
images_with_detections_list = []
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
# Note that detection masks have already been resized to the original image
# shapes, but `original_image` has not.
# TODO(ronnyvotel): Consider resizing `original_image` in
# eval_util.result_dict_for_batched_example().
true_shape = eval_dict[input_data_fields.true_image_shape][indx]
original_shape = eval_dict[
input_data_fields.original_image_spatial_shape][indx]
image = eval_dict[input_data_fields.original_image][indx]
image = shape_utils.pad_or_clip_nd(image, [true_shape[0], true_shape[1], 3])
image = _resize_original_image(image, original_shape)
scores = eval_dict[detection_fields.detection_scores][indx]
detection_masks = eval_dict[detection_fields.detection_masks][indx]
surface_coords = eval_dict[detection_fields.detection_surface_coords][indx]
def draw_densepose_py_func(image, detection_masks, surface_coords, scores):
"""Overlays part masks and surface coords on original images."""
surface_coord_image = np.copy(image)
for i, (score, surface_coord, mask) in enumerate(
zip(scores, surface_coords, detection_masks)):
if i == max_boxes_to_draw:
break
if score > min_score_thresh:
draw_part_mask_on_image_array(image, mask, num_parts=num_parts)
draw_float_channel_on_image_array(
surface_coord_image, surface_coord[:, :, dp_coord_to_visualize],
mask)
return np.concatenate([image, surface_coord_image], axis=1)
image_with_densepose = tf.py_func(
draw_densepose_py_func,
[image, detection_masks, surface_coords, scores],
tf.uint8)
images_with_detections_list.append(
image_with_densepose[tf.newaxis, :, :, :])
return images_with_detections_list
def draw_keypoints_on_image_array(image,
keypoints,
keypoint_scores=None,
min_score_thresh=0.5,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
keypoint_scores: a numpy array with shape [num_keypoints]. If provided, only
those keypoints with a score above score_threshold will be visualized.
min_score_thresh: A scalar indicating the minimum keypoint score required
for a keypoint to be visualized. Note that keypoint_scores must be
provided for this threshold to take effect.
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil,
keypoints,
keypoint_scores=keypoint_scores,
min_score_thresh=min_score_thresh,
color=color,
radius=radius,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=keypoint_edge_color,
keypoint_edge_width=keypoint_edge_width)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
keypoint_scores=None,
min_score_thresh=0.5,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
keypoint_scores: a numpy array with shape [num_keypoints].
min_score_thresh: a score threshold for visualizing keypoints. Only used if
keypoint_scores is provided.
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints = np.array(keypoints)
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
if keypoint_scores is not None:
keypoint_scores = np.array(keypoint_scores)
valid_kpt = np.greater(keypoint_scores, min_score_thresh)
else:
valid_kpt = np.where(np.any(np.isnan(keypoints), axis=1),
np.zeros_like(keypoints[:, 0]),
np.ones_like(keypoints[:, 0]))
valid_kpt = [v for v in valid_kpt]
for keypoint_x, keypoint_y, valid in zip(keypoints_x, keypoints_y, valid_kpt):
if valid:
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
if keypoint_edges is not None:
for keypoint_start, keypoint_end in keypoint_edges:
if (keypoint_start < 0 or keypoint_start >= len(keypoints) or
keypoint_end < 0 or keypoint_end >= len(keypoints)):
continue
if not (valid_kpt[keypoint_start] and valid_kpt[keypoint_end]):
continue
edge_coordinates = [
keypoints_x[keypoint_start], keypoints_y[keypoint_start],
keypoints_x[keypoint_end], keypoints_y[keypoint_end]
]
draw.line(
edge_coordinates, fill=keypoint_edge_color, width=keypoint_edge_width)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
values between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*(mask > 0))).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def draw_part_mask_on_image_array(image, mask, alpha=0.4, num_parts=24):
"""Draws part mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
1-indexed parts (0 for background).
alpha: transparency value between 0 and 1 (default: 0.4)
num_parts: the maximum number of parts that may exist in the image (default
24 for DensePose).
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
pil_image = Image.fromarray(image)
part_colors = np.zeros_like(image)
mask_1_channel = mask[:, :, np.newaxis]
for i, color in enumerate(STANDARD_COLORS[:num_parts]):
rgb = np.array(ImageColor.getrgb(color), dtype=np.uint8)
part_colors += (mask_1_channel == i + 1) * rgb[np.newaxis, np.newaxis, :]
pil_part_colors = Image.fromarray(np.uint8(part_colors)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * (mask > 0))).convert('L')
pil_image = Image.composite(pil_part_colors, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def draw_float_channel_on_image_array(image, channel, mask, alpha=0.9,
cmap='YlGn'):
"""Draws a floating point channel on an image array.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
channel: float32 numpy array with shape (img_height, img_height). The values
should be in the range [0, 1], and will be mapped to colors using the
provided colormap `cmap` argument.
mask: a uint8 numpy array of shape (img_height, img_height) with
1-indexed parts (0 for background).
alpha: transparency value between 0 and 1 (default: 0.9)
cmap: string with the colormap to use.
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if channel.dtype != np.float32:
raise ValueError('`channel` not of type np.float32')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if image.shape[:2] != channel.shape:
raise ValueError('The image has spatial dimensions %s but the channel has '
'dimensions %s' % (image.shape[:2], channel.shape))
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
cm = plt.get_cmap(cmap)
pil_image = Image.fromarray(image)
colored_channel = cm(channel)[:, :, :3]
pil_colored_channel = Image.fromarray(
np.uint8(colored_channel * 255)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * (mask > 0))).convert('L')
pil_image = Image.composite(pil_colored_channel, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
keypoint_scores=None,
keypoint_edges=None,
track_ids=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
mask_alpha=.4,
groundtruth_box_visualization_color='black',
skip_boxes=False,
skip_scores=False,
skip_labels=False,
skip_track_ids=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a uint8 numpy array of shape [N, image_height, image_width],
can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None.
keypoint_scores: a numpy array of shape [N, num_keypoints], can be None.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: a numpy array of shape [N] with unique track ids. If provided,
color-coding of boxes will be determined by these ids, and not the class
indices.
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box or keypoint to be
visualized.
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
mask_alpha: transparency value between 0 and 1 (default: 0.4).
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_boxes: whether to skip the drawing of bounding boxes.
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
skip_track_ids: whether to skip track id when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
box_to_keypoint_scores_map = collections.defaultdict(list)
box_to_track_ids_map = {}
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(boxes.shape[0]):
if max_boxes_to_draw == len(box_to_color_map):
break
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if keypoint_scores is not None:
box_to_keypoint_scores_map[box].extend(keypoint_scores[i])
if track_ids is not None:
box_to_track_ids_map[box] = track_ids[i]
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in six.viewkeys(category_index):
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(round(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, round(100*scores[i]))
if not skip_track_ids and track_ids is not None:
if not display_str:
display_str = 'ID {}'.format(track_ids[i])
else:
display_str = '{}: ID {}'.format(display_str, track_ids[i])
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
elif track_ids is not None:
prime_multipler = _get_multiplier_for_color_randomness()
box_to_color_map[box] = STANDARD_COLORS[
(prime_multipler * track_ids[i]) % len(STANDARD_COLORS)]
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color,
alpha=mask_alpha
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=0 if skip_boxes else line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
keypoint_scores_for_box = None
if box_to_keypoint_scores_map:
keypoint_scores_for_box = box_to_keypoint_scores_map[box]
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
keypoint_scores_for_box,
min_score_thresh=min_score_thresh,
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=color,
keypoint_edge_width=line_thickness // 2)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
/ cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
class EvalMetricOpsVisualization(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract base class responsible for visualizations during evaluation.
Currently, summary images are not run during evaluation. One way to produce
evaluation images in Tensorboard is to provide tf.summary.image strings as
`value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is
responsible for accruing images (with overlaid detections and groundtruth)
and returning a dictionary that can be passed to `eval_metric_ops`.
"""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='evaluation_image',
keypoint_edges=None):
"""Creates an EvalMetricOpsVisualization.
Args:
category_index: A category index (dictionary) produced from a labelmap.
max_examples_to_draw: The maximum number of example summaries to produce.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates).
Default is True.
summary_name_prefix: A string prefix for each image summary.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
"""
self._category_index = category_index
self._max_examples_to_draw = max_examples_to_draw
self._max_boxes_to_draw = max_boxes_to_draw
self._min_score_thresh = min_score_thresh
self._use_normalized_coordinates = use_normalized_coordinates
self._summary_name_prefix = summary_name_prefix
self._keypoint_edges = keypoint_edges
self._images = []
def clear(self):
self._images = []
def add_images(self, images):
"""Store a list of images, each with shape [1, H, W, C]."""
if len(self._images) >= self._max_examples_to_draw:
return
# Store images and clip list if necessary.
self._images.extend(images)
if len(self._images) > self._max_examples_to_draw:
self._images[self._max_examples_to_draw:] = []
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns metric ops for use in tf.estimator.EstimatorSpec.
Args:
eval_dict: A dictionary that holds an image, groundtruth, and detections
for a batched example. Note that, we use only the first example for
visualization. See eval_util.result_dict_for_batched_example() for a
convenient method for constructing such a dictionary. The dictionary
contains
fields.InputDataFields.original_image: [batch_size, H, W, 3] image.
fields.InputDataFields.original_image_spatial_shape: [batch_size, 2]
tensor containing the size of the original image.
fields.InputDataFields.true_image_shape: [batch_size, 3]
tensor containing the spatial size of the upadded original image.
fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4]
float32 tensor with groundtruth boxes in range [0.0, 1.0].
fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes]
int64 tensor with 1-indexed groundtruth classes.
fields.InputDataFields.groundtruth_instance_masks - (optional)
[batch_size, num_boxes, H, W] int64 tensor with instance masks.
fields.InputDataFields.groundtruth_keypoints - (optional)
[batch_size, num_boxes, num_keypoints, 2] float32 tensor with
keypoint coordinates in format [y, x].
fields.InputDataFields.groundtruth_keypoint_visibilities - (optional)
[batch_size, num_boxes, num_keypoints] bool tensor with
keypoint visibilities.
fields.DetectionResultFields.detection_boxes - [batch_size,
max_num_boxes, 4] float32 tensor with detection boxes in range [0.0,
1.0].
fields.DetectionResultFields.detection_classes - [batch_size,
max_num_boxes] int64 tensor with 1-indexed detection classes.
fields.DetectionResultFields.detection_scores - [batch_size,
max_num_boxes] float32 tensor with detection scores.
fields.DetectionResultFields.detection_masks - (optional) [batch_size,
max_num_boxes, H, W] float32 tensor of binarized masks.
fields.DetectionResultFields.detection_keypoints - (optional)
[batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with
keypoints.
fields.DetectionResultFields.detection_keypoint_scores - (optional)
[batch_size, max_num_boxes, num_keypoints] float32 tensor with
keypoints scores.
Returns:
A dictionary of image summary names to tuple of (value_op, update_op). The
`update_op` is the same for all items in the dictionary, and is
responsible for saving a single side-by-side image with detections and
groundtruth. Each `value_op` holds the tf.summary.image string for a given
image.
"""
if self._max_examples_to_draw == 0:
return {}
images = self.images_from_evaluation_dict(eval_dict)
def get_images():
"""Returns a list of images, padded to self._max_images_to_draw."""
images = self._images
while len(images) < self._max_examples_to_draw:
images.append(np.array(0, dtype=np.uint8))
self.clear()
return images
def image_summary_or_default_string(summary_name, image):
"""Returns image summaries for non-padded elements."""
return tf.cond(
tf.equal(tf.size(tf.shape(image)), 4),
lambda: tf.summary.image(summary_name, image),
lambda: tf.constant(''))
if tf.executing_eagerly():
update_op = self.add_images([[images[0]]])
image_tensors = get_images()
else:
update_op = tf.py_func(self.add_images, [[images[0]]], [])
image_tensors = tf.py_func(
get_images, [], [tf.uint8] * self._max_examples_to_draw)
eval_metric_ops = {}
for i, image in enumerate(image_tensors):
summary_name = self._summary_name_prefix + '/' + str(i)
value_op = image_summary_or_default_string(summary_name, image)
eval_metric_ops[summary_name] = (value_op, update_op)
return eval_metric_ops
@abc.abstractmethod
def images_from_evaluation_dict(self, eval_dict):
"""Converts evaluation dictionary into a list of image tensors.
To be overridden by implementations.
Args:
eval_dict: A dictionary with all the necessary information for producing
visualizations.
Returns:
A list of [1, H, W, C] uint8 tensors.
"""
raise NotImplementedError
class VisualizeSingleFrameDetections(EvalMetricOpsVisualization):
"""Class responsible for single-frame object detection visualizations."""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='Detections_Left_Groundtruth_Right',
keypoint_edges=None):
super(VisualizeSingleFrameDetections, self).__init__(
category_index=category_index,
max_examples_to_draw=max_examples_to_draw,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates,
summary_name_prefix=summary_name_prefix,
keypoint_edges=keypoint_edges)
def images_from_evaluation_dict(self, eval_dict):
return draw_side_by_side_evaluation_image(eval_dict, self._category_index,
self._max_boxes_to_draw,
self._min_score_thresh,
self._use_normalized_coordinates,
self._keypoint_edges)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/visualization_utils.py | visualization_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.test_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import test_case
from object_detection.utils import test_utils
class TestUtilsTest(test_case.TestCase):
def test_diagonal_gradient_image(self):
"""Tests if a good pyramid image is created."""
pyramid_image = test_utils.create_diagonal_gradient_image(3, 4, 2)
# Test which is easy to understand.
expected_first_channel = np.array([[3, 2, 1, 0],
[4, 3, 2, 1],
[5, 4, 3, 2]], dtype=np.float32)
self.assertAllEqual(np.squeeze(pyramid_image[:, :, 0]),
expected_first_channel)
# Actual test.
expected_image = np.array([[[3, 30],
[2, 20],
[1, 10],
[0, 0]],
[[4, 40],
[3, 30],
[2, 20],
[1, 10]],
[[5, 50],
[4, 40],
[3, 30],
[2, 20]]], dtype=np.float32)
self.assertAllEqual(pyramid_image, expected_image)
def test_random_boxes(self):
"""Tests if valid random boxes are created."""
num_boxes = 1000
max_height = 3
max_width = 5
boxes = test_utils.create_random_boxes(num_boxes,
max_height,
max_width)
true_column = np.ones(shape=(num_boxes)) == 1
self.assertAllEqual(boxes[:, 0] < boxes[:, 2], true_column)
self.assertAllEqual(boxes[:, 1] < boxes[:, 3], true_column)
self.assertGreaterEqual(boxes[:, 0].min(), 0)
self.assertGreaterEqual(boxes[:, 1].min(), 0)
self.assertLessEqual(boxes[:, 2].max(), max_height)
self.assertLessEqual(boxes[:, 3].max(), max_width)
def test_first_rows_close_as_set(self):
a = [1, 2, 3, 0, 0]
b = [3, 2, 1, 0, 0]
k = 3
self.assertTrue(test_utils.first_rows_close_as_set(a, b, k))
a = [[1, 2], [1, 4], [0, 0]]
b = [[1, 4 + 1e-9], [1, 2], [0, 0]]
k = 2
self.assertTrue(test_utils.first_rows_close_as_set(a, b, k))
a = [[1, 2], [1, 4], [0, 0]]
b = [[1, 4 + 1e-9], [2, 2], [0, 0]]
k = 2
self.assertFalse(test_utils.first_rows_close_as_set(a, b, k))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/test_utils_test.py | test_utils_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for reading and updating configuration files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import text_format
import tensorflow.compat.v1 as tf
from tensorflow.python.lib.io import file_io
from object_detection.protos import eval_pb2
from object_detection.protos import graph_rewriter_pb2
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
def get_image_resizer_config(model_config):
"""Returns the image resizer config from a model config.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
An image_resizer_pb2.ImageResizer.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "image_resizer"):
return getattr(meta_architecture_config, "image_resizer")
else:
raise ValueError("{} has no image_reszier_config".format(
meta_architecture))
def get_spatial_image_size(image_resizer_config):
"""Returns expected spatial size of the output image from a given config.
Args:
image_resizer_config: An image_resizer_pb2.ImageResizer.
Returns:
A list of two integers of the form [height, width]. `height` and `width` are
set -1 if they cannot be determined during graph construction.
Raises:
ValueError: If the model type is not recognized.
"""
if image_resizer_config.HasField("fixed_shape_resizer"):
return [
image_resizer_config.fixed_shape_resizer.height,
image_resizer_config.fixed_shape_resizer.width
]
if image_resizer_config.HasField("keep_aspect_ratio_resizer"):
if image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension:
return [image_resizer_config.keep_aspect_ratio_resizer.max_dimension] * 2
else:
return [-1, -1]
if image_resizer_config.HasField(
"identity_resizer") or image_resizer_config.HasField(
"conditional_shape_resizer"):
return [-1, -1]
raise ValueError("Unknown image resizer type.")
def get_max_num_context_features(model_config):
"""Returns maximum number of context features from a given config.
Args:
model_config: A model config file.
Returns:
An integer specifying the max number of context features if the model
config contains context_config, None otherwise
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "context_config"):
return meta_architecture_config.context_config.max_num_context_features
def get_context_feature_length(model_config):
"""Returns context feature length from a given config.
Args:
model_config: A model config file.
Returns:
An integer specifying the fixed length of each feature in context_features.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "context_config"):
return meta_architecture_config.context_config.context_feature_length
def get_configs_from_pipeline_file(pipeline_config_path, config_override=None):
"""Reads config from a file containing pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config_path: Path to pipeline_pb2.TrainEvalPipelineConfig text
proto.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override pipeline_config_path.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Value are the
corresponding config objects.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(pipeline_config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
if config_override:
text_format.Merge(config_override, pipeline_config)
return create_configs_from_pipeline_proto(pipeline_config)
def clear_fine_tune_checkpoint(pipeline_config_path,
new_pipeline_config_path):
"""Clears fine_tune_checkpoint and writes a new pipeline config file."""
configs = get_configs_from_pipeline_file(pipeline_config_path)
configs["train_config"].fine_tune_checkpoint = ""
configs["train_config"].load_all_detection_checkpoint_vars = False
pipeline_proto = create_pipeline_proto_from_configs(configs)
with tf.gfile.Open(new_pipeline_config_path, "wb") as f:
f.write(text_format.MessageToString(pipeline_proto))
def update_fine_tune_checkpoint_type(train_config):
"""Set `fine_tune_checkpoint_type` using `from_detection_checkpoint`.
`train_config.from_detection_checkpoint` field is deprecated. For backward
compatibility, this function sets `train_config.fine_tune_checkpoint_type`
based on `train_config.from_detection_checkpoint`.
Args:
train_config: train_pb2.TrainConfig proto object.
"""
if not train_config.fine_tune_checkpoint_type:
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = "detection"
else:
train_config.fine_tune_checkpoint_type = "classification"
def create_configs_from_pipeline_proto(pipeline_config):
"""Creates a configs dictionary from pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config: pipeline_pb2.TrainEvalPipelineConfig proto object.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_configs`. Value are
the corresponding config objects or list of config objects (only for
eval_input_configs).
"""
configs = {}
configs["model"] = pipeline_config.model
configs["train_config"] = pipeline_config.train_config
configs["train_input_config"] = pipeline_config.train_input_reader
configs["eval_config"] = pipeline_config.eval_config
configs["eval_input_configs"] = pipeline_config.eval_input_reader
# Keeps eval_input_config only for backwards compatibility. All clients should
# read eval_input_configs instead.
if configs["eval_input_configs"]:
configs["eval_input_config"] = configs["eval_input_configs"][0]
if pipeline_config.HasField("graph_rewriter"):
configs["graph_rewriter_config"] = pipeline_config.graph_rewriter
return configs
def get_graph_rewriter_config_from_file(graph_rewriter_config_file):
"""Parses config for graph rewriter.
Args:
graph_rewriter_config_file: file path to the graph rewriter config.
Returns:
graph_rewriter_pb2.GraphRewriter proto
"""
graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
with tf.gfile.GFile(graph_rewriter_config_file, "r") as f:
text_format.Merge(f.read(), graph_rewriter_config)
return graph_rewriter_config
def create_pipeline_proto_from_configs(configs):
"""Creates a pipeline_pb2.TrainEvalPipelineConfig from configs dictionary.
This function performs the inverse operation of
create_configs_from_pipeline_proto().
Args:
configs: Dictionary of configs. See get_configs_from_pipeline_file().
Returns:
A fully populated pipeline_pb2.TrainEvalPipelineConfig.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.CopyFrom(configs["model"])
pipeline_config.train_config.CopyFrom(configs["train_config"])
pipeline_config.train_input_reader.CopyFrom(configs["train_input_config"])
pipeline_config.eval_config.CopyFrom(configs["eval_config"])
pipeline_config.eval_input_reader.extend(configs["eval_input_configs"])
if "graph_rewriter_config" in configs:
pipeline_config.graph_rewriter.CopyFrom(configs["graph_rewriter_config"])
return pipeline_config
def save_pipeline_config(pipeline_config, directory):
"""Saves a pipeline config text file to disk.
Args:
pipeline_config: A pipeline_pb2.TrainEvalPipelineConfig.
directory: The model directory into which the pipeline config file will be
saved.
"""
if not file_io.file_exists(directory):
file_io.recursive_create_dir(directory)
pipeline_config_path = os.path.join(directory, "pipeline.config")
config_text = text_format.MessageToString(pipeline_config)
with tf.gfile.Open(pipeline_config_path, "wb") as f:
tf.logging.info("Writing pipeline config file to %s",
pipeline_config_path)
f.write(config_text)
def get_configs_from_multiple_files(model_config_path="",
train_config_path="",
train_input_config_path="",
eval_config_path="",
eval_input_config_path="",
graph_rewriter_config_path=""):
"""Reads training configuration from multiple config files.
Args:
model_config_path: Path to model_pb2.DetectionModel.
train_config_path: Path to train_pb2.TrainConfig.
train_input_config_path: Path to input_reader_pb2.InputReader.
eval_config_path: Path to eval_pb2.EvalConfig.
eval_input_config_path: Path to input_reader_pb2.InputReader.
graph_rewriter_config_path: Path to graph_rewriter_pb2.GraphRewriter.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Key/Values are
returned only for valid (non-empty) strings.
"""
configs = {}
if model_config_path:
model_config = model_pb2.DetectionModel()
with tf.gfile.GFile(model_config_path, "r") as f:
text_format.Merge(f.read(), model_config)
configs["model"] = model_config
if train_config_path:
train_config = train_pb2.TrainConfig()
with tf.gfile.GFile(train_config_path, "r") as f:
text_format.Merge(f.read(), train_config)
configs["train_config"] = train_config
if train_input_config_path:
train_input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(train_input_config_path, "r") as f:
text_format.Merge(f.read(), train_input_config)
configs["train_input_config"] = train_input_config
if eval_config_path:
eval_config = eval_pb2.EvalConfig()
with tf.gfile.GFile(eval_config_path, "r") as f:
text_format.Merge(f.read(), eval_config)
configs["eval_config"] = eval_config
if eval_input_config_path:
eval_input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(eval_input_config_path, "r") as f:
text_format.Merge(f.read(), eval_input_config)
configs["eval_input_configs"] = [eval_input_config]
if graph_rewriter_config_path:
configs["graph_rewriter_config"] = get_graph_rewriter_config_from_file(
graph_rewriter_config_path)
return configs
def get_number_of_classes(model_config):
"""Returns the number of classes for a detection model.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
Number of classes.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "num_classes"):
return meta_architecture_config.num_classes
else:
raise ValueError("{} does not have num_classes.".format(meta_architecture))
def get_optimizer_type(train_config):
"""Returns the optimizer type for training.
Args:
train_config: A train_pb2.TrainConfig.
Returns:
The type of the optimizer
"""
return train_config.optimizer.WhichOneof("optimizer")
def get_learning_rate_type(optimizer_config):
"""Returns the learning rate type for training.
Args:
optimizer_config: An optimizer_pb2.Optimizer.
Returns:
The type of the learning rate.
"""
return optimizer_config.learning_rate.WhichOneof("learning_rate")
def _is_generic_key(key):
"""Determines whether the key starts with a generic config dictionary key."""
for prefix in [
"graph_rewriter_config",
"model",
"train_input_config",
"train_config",
"eval_config"]:
if key.startswith(prefix + "."):
return True
return False
def _check_and_convert_legacy_input_config_key(key):
"""Checks key and converts legacy input config update to specific update.
Args:
key: string indicates the target of update operation.
Returns:
is_valid_input_config_key: A boolean indicating whether the input key is to
update input config(s).
key_name: 'eval_input_configs' or 'train_input_config' string if
is_valid_input_config_key is true. None if is_valid_input_config_key is
false.
input_name: always returns None since legacy input config key never
specifies the target input config. Keeping this output only to match the
output form defined for input config update.
field_name: the field name in input config. `key` itself if
is_valid_input_config_key is false.
"""
key_name = None
input_name = None
field_name = key
is_valid_input_config_key = True
if field_name == "train_shuffle":
key_name = "train_input_config"
field_name = "shuffle"
elif field_name == "eval_shuffle":
key_name = "eval_input_configs"
field_name = "shuffle"
elif field_name == "train_input_path":
key_name = "train_input_config"
field_name = "input_path"
elif field_name == "eval_input_path":
key_name = "eval_input_configs"
field_name = "input_path"
elif field_name == "append_train_input_path":
key_name = "train_input_config"
field_name = "input_path"
elif field_name == "append_eval_input_path":
key_name = "eval_input_configs"
field_name = "input_path"
else:
is_valid_input_config_key = False
return is_valid_input_config_key, key_name, input_name, field_name
def check_and_parse_input_config_key(configs, key):
"""Checks key and returns specific fields if key is valid input config update.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key: string indicates the target of update operation.
Returns:
is_valid_input_config_key: A boolean indicate whether the input key is to
update input config(s).
key_name: 'eval_input_configs' or 'train_input_config' string if
is_valid_input_config_key is true. None if is_valid_input_config_key is
false.
input_name: the name of the input config to be updated. None if
is_valid_input_config_key is false.
field_name: the field name in input config. `key` itself if
is_valid_input_config_key is false.
Raises:
ValueError: when the input key format doesn't match any known formats.
ValueError: if key_name doesn't match 'eval_input_configs' or
'train_input_config'.
ValueError: if input_name doesn't match any name in train or eval input
configs.
ValueError: if field_name doesn't match any supported fields.
"""
key_name = None
input_name = None
field_name = None
fields = key.split(":")
if len(fields) == 1:
field_name = key
return _check_and_convert_legacy_input_config_key(key)
elif len(fields) == 3:
key_name = fields[0]
input_name = fields[1]
field_name = fields[2]
else:
raise ValueError("Invalid key format when overriding configs.")
# Checks if key_name is valid for specific update.
if key_name not in ["eval_input_configs", "train_input_config"]:
raise ValueError("Invalid key_name when overriding input config.")
# Checks if input_name is valid for specific update. For train input config it
# should match configs[key_name].name, for eval input configs it should match
# the name field of one of the eval_input_configs.
if isinstance(configs[key_name], input_reader_pb2.InputReader):
is_valid_input_name = configs[key_name].name == input_name
else:
is_valid_input_name = input_name in [
eval_input_config.name for eval_input_config in configs[key_name]
]
if not is_valid_input_name:
raise ValueError("Invalid input_name when overriding input config.")
# Checks if field_name is valid for specific update.
if field_name not in [
"input_path", "label_map_path", "shuffle", "mask_type",
"sample_1_of_n_examples"
]:
raise ValueError("Invalid field_name when overriding input config.")
return True, key_name, input_name, field_name
def merge_external_params_with_configs(configs, hparams=None, kwargs_dict=None):
"""Updates `configs` dictionary based on supplied parameters.
This utility is for modifying specific fields in the object detection configs.
Say that one would like to experiment with different learning rates, momentum
values, or batch sizes. Rather than creating a new config text file for each
experiment, one can use a single base config file, and update particular
values.
There are two types of field overrides:
1. Strategy-based overrides, which update multiple relevant configuration
options. For example, updating `learning_rate` will update both the warmup and
final learning rates.
In this case key can be one of the following formats:
1. legacy update: single string that indicates the attribute to be
updated. E.g. 'label_map_path', 'eval_input_path', 'shuffle'.
Note that when updating fields (e.g. eval_input_path, eval_shuffle) in
eval_input_configs, the override will only be applied when
eval_input_configs has exactly 1 element.
2. specific update: colon separated string that indicates which field in
which input_config to update. It should have 3 fields:
- key_name: Name of the input config we should update, either
'train_input_config' or 'eval_input_configs'
- input_name: a 'name' that can be used to identify elements, especially
when configs[key_name] is a repeated field.
- field_name: name of the field that you want to override.
For example, given configs dict as below:
configs = {
'model': {...}
'train_config': {...}
'train_input_config': {...}
'eval_config': {...}
'eval_input_configs': [{ name:"eval_coco", ...},
{ name:"eval_voc", ... }]
}
Assume we want to update the input_path of the eval_input_config
whose name is 'eval_coco'. The `key` would then be:
'eval_input_configs:eval_coco:input_path'
2. Generic key/value, which update a specific parameter based on namespaced
configuration keys. For example,
`model.ssd.loss.hard_example_miner.max_negatives_per_positive` will update the
hard example miner configuration for an SSD model config. Generic overrides
are automatically detected based on the namespaced keys.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
hparams: A `HParams`.
kwargs_dict: Extra keyword arguments that are treated the same way as
attribute/value pairs in `hparams`. Note that hyperparameters with the
same names will override keyword arguments.
Returns:
`configs` dictionary.
Raises:
ValueError: when the key string doesn't match any of its allowed formats.
"""
if kwargs_dict is None:
kwargs_dict = {}
if hparams:
kwargs_dict.update(hparams.values())
for key, value in kwargs_dict.items():
tf.logging.info("Maybe overwriting %s: %s", key, value)
# pylint: disable=g-explicit-bool-comparison
if value == "" or value is None:
continue
# pylint: enable=g-explicit-bool-comparison
elif _maybe_update_config_with_key_value(configs, key, value):
continue
elif _is_generic_key(key):
_update_generic(configs, key, value)
else:
tf.logging.info("Ignoring config override key: %s", key)
return configs
def _maybe_update_config_with_key_value(configs, key, value):
"""Checks key type and updates `configs` with the key value pair accordingly.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key: String indicates the field(s) to be updated.
value: Value used to override existing field value.
Returns:
A boolean value that indicates whether the override succeeds.
Raises:
ValueError: when the key string doesn't match any of the formats above.
"""
is_valid_input_config_key, key_name, input_name, field_name = (
check_and_parse_input_config_key(configs, key))
if is_valid_input_config_key:
update_input_reader_config(
configs,
key_name=key_name,
input_name=input_name,
field_name=field_name,
value=value)
elif field_name == "learning_rate":
_update_initial_learning_rate(configs, value)
elif field_name == "batch_size":
_update_batch_size(configs, value)
elif field_name == "momentum_optimizer_value":
_update_momentum_optimizer_value(configs, value)
elif field_name == "classification_localization_weight_ratio":
# Localization weight is fixed to 1.0.
_update_classification_localization_weight_ratio(configs, value)
elif field_name == "focal_loss_gamma":
_update_focal_loss_gamma(configs, value)
elif field_name == "focal_loss_alpha":
_update_focal_loss_alpha(configs, value)
elif field_name == "train_steps":
_update_train_steps(configs, value)
elif field_name == "label_map_path":
_update_label_map_path(configs, value)
elif field_name == "mask_type":
_update_mask_type(configs, value)
elif field_name == "sample_1_of_n_eval_examples":
_update_all_eval_input_configs(configs, "sample_1_of_n_examples", value)
elif field_name == "eval_num_epochs":
_update_all_eval_input_configs(configs, "num_epochs", value)
elif field_name == "eval_with_moving_averages":
_update_use_moving_averages(configs, value)
elif field_name == "retain_original_images_in_eval":
_update_retain_original_images(configs["eval_config"], value)
elif field_name == "use_bfloat16":
_update_use_bfloat16(configs, value)
elif field_name == "retain_original_image_additional_channels_in_eval":
_update_retain_original_image_additional_channels(configs["eval_config"],
value)
elif field_name == "num_classes":
_update_num_classes(configs["model"], value)
elif field_name == "sample_from_datasets_weights":
_update_sample_from_datasets_weights(configs["train_input_config"], value)
elif field_name == "peak_max_pool_kernel_size":
_update_peak_max_pool_kernel_size(configs["model"], value)
elif field_name == "candidate_search_scale":
_update_candidate_search_scale(configs["model"], value)
elif field_name == "candidate_ranking_mode":
_update_candidate_ranking_mode(configs["model"], value)
elif field_name == "score_distance_offset":
_update_score_distance_offset(configs["model"], value)
elif field_name == "box_scale":
_update_box_scale(configs["model"], value)
elif field_name == "keypoint_candidate_score_threshold":
_update_keypoint_candidate_score_threshold(configs["model"], value)
elif field_name == "rescore_instances":
_update_rescore_instances(configs["model"], value)
elif field_name == "unmatched_keypoint_score":
_update_unmatched_keypoint_score(configs["model"], value)
else:
return False
return True
def _update_tf_record_input_path(input_config, input_path):
"""Updates input configuration to reflect a new input path.
The input_config object is updated in place, and hence not returned.
Args:
input_config: A input_reader_pb2.InputReader.
input_path: A path to data or list of paths.
Raises:
TypeError: if input reader type is not `tf_record_input_reader`.
"""
input_reader_type = input_config.WhichOneof("input_reader")
if input_reader_type == "tf_record_input_reader":
input_config.tf_record_input_reader.ClearField("input_path")
if isinstance(input_path, list):
input_config.tf_record_input_reader.input_path.extend(input_path)
else:
input_config.tf_record_input_reader.input_path.append(input_path)
else:
raise TypeError("Input reader type must be `tf_record_input_reader`.")
def update_input_reader_config(configs,
key_name=None,
input_name=None,
field_name=None,
value=None,
path_updater=_update_tf_record_input_path):
"""Updates specified input reader config field.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key_name: Name of the input config we should update, either
'train_input_config' or 'eval_input_configs'
input_name: String name used to identify input config to update with. Should
be either None or value of the 'name' field in one of the input reader
configs.
field_name: Field name in input_reader_pb2.InputReader.
value: Value used to override existing field value.
path_updater: helper function used to update the input path. Only used when
field_name is "input_path".
Raises:
ValueError: when input field_name is None.
ValueError: when input_name is None and number of eval_input_readers does
not equal to 1.
"""
if isinstance(configs[key_name], input_reader_pb2.InputReader):
# Updates singular input_config object.
target_input_config = configs[key_name]
if field_name == "input_path":
path_updater(input_config=target_input_config, input_path=value)
else:
setattr(target_input_config, field_name, value)
elif input_name is None and len(configs[key_name]) == 1:
# Updates first (and the only) object of input_config list.
target_input_config = configs[key_name][0]
if field_name == "input_path":
path_updater(input_config=target_input_config, input_path=value)
else:
setattr(target_input_config, field_name, value)
elif input_name is not None and len(configs[key_name]):
# Updates input_config whose name matches input_name.
update_count = 0
for input_config in configs[key_name]:
if input_config.name == input_name:
setattr(input_config, field_name, value)
update_count = update_count + 1
if not update_count:
raise ValueError(
"Input name {} not found when overriding.".format(input_name))
elif update_count > 1:
raise ValueError("Duplicate input name found when overriding.")
else:
key_name = "None" if key_name is None else key_name
input_name = "None" if input_name is None else input_name
field_name = "None" if field_name is None else field_name
raise ValueError("Unknown input config overriding: "
"key_name:{}, input_name:{}, field_name:{}.".format(
key_name, input_name, field_name))
def _update_initial_learning_rate(configs, learning_rate):
"""Updates `configs` to reflect the new initial learning rate.
This function updates the initial learning rate. For learning rate schedules,
all other defined learning rates in the pipeline config are scaled to maintain
their same ratio with the initial learning rate.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
learning_rate: Initial learning rate for optimizer.
Raises:
TypeError: if optimizer type is not supported, or if learning rate type is
not supported.
"""
optimizer_type = get_optimizer_type(configs["train_config"])
if optimizer_type == "rms_prop_optimizer":
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
elif optimizer_type == "momentum_optimizer":
optimizer_config = configs["train_config"].optimizer.momentum_optimizer
elif optimizer_type == "adam_optimizer":
optimizer_config = configs["train_config"].optimizer.adam_optimizer
else:
raise TypeError("Optimizer %s is not supported." % optimizer_type)
learning_rate_type = get_learning_rate_type(optimizer_config)
if learning_rate_type == "constant_learning_rate":
constant_lr = optimizer_config.learning_rate.constant_learning_rate
constant_lr.learning_rate = learning_rate
elif learning_rate_type == "exponential_decay_learning_rate":
exponential_lr = (
optimizer_config.learning_rate.exponential_decay_learning_rate)
exponential_lr.initial_learning_rate = learning_rate
elif learning_rate_type == "manual_step_learning_rate":
manual_lr = optimizer_config.learning_rate.manual_step_learning_rate
original_learning_rate = manual_lr.initial_learning_rate
learning_rate_scaling = float(learning_rate) / original_learning_rate
manual_lr.initial_learning_rate = learning_rate
for schedule in manual_lr.schedule:
schedule.learning_rate *= learning_rate_scaling
elif learning_rate_type == "cosine_decay_learning_rate":
cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate
learning_rate_base = cosine_lr.learning_rate_base
warmup_learning_rate = cosine_lr.warmup_learning_rate
warmup_scale_factor = warmup_learning_rate / learning_rate_base
cosine_lr.learning_rate_base = learning_rate
cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate
else:
raise TypeError("Learning rate %s is not supported." % learning_rate_type)
def _update_batch_size(configs, batch_size):
"""Updates `configs` to reflect the new training batch size.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
batch_size: Batch size to use for training (Ideally a power of 2). Inputs
are rounded, and capped to be 1 or greater.
"""
configs["train_config"].batch_size = max(1, int(round(batch_size)))
def _validate_message_has_field(message, field):
if not message.HasField(field):
raise ValueError("Expecting message to have field %s" % field)
def _update_generic(configs, key, value):
"""Update a pipeline configuration parameter based on a generic key/value.
Args:
configs: Dictionary of pipeline configuration protos.
key: A string key, dot-delimited to represent the argument key.
e.g. "model.ssd.train_config.batch_size"
value: A value to set the argument to. The type of the value must match the
type for the protocol buffer. Note that setting the wrong type will
result in a TypeError.
e.g. 42
Raises:
ValueError if the message key does not match the existing proto fields.
TypeError the value type doesn't match the protobuf field type.
"""
fields = key.split(".")
first_field = fields.pop(0)
last_field = fields.pop()
message = configs[first_field]
for field in fields:
_validate_message_has_field(message, field)
message = getattr(message, field)
_validate_message_has_field(message, last_field)
setattr(message, last_field, value)
def _update_momentum_optimizer_value(configs, momentum):
"""Updates `configs` to reflect the new momentum value.
Momentum is only supported for RMSPropOptimizer and MomentumOptimizer. For any
other optimizer, no changes take place. The configs dictionary is updated in
place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
momentum: New momentum value. Values are clipped at 0.0 and 1.0.
Raises:
TypeError: If the optimizer type is not `rms_prop_optimizer` or
`momentum_optimizer`.
"""
optimizer_type = get_optimizer_type(configs["train_config"])
if optimizer_type == "rms_prop_optimizer":
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
elif optimizer_type == "momentum_optimizer":
optimizer_config = configs["train_config"].optimizer.momentum_optimizer
else:
raise TypeError("Optimizer type must be one of `rms_prop_optimizer` or "
"`momentum_optimizer`.")
optimizer_config.momentum_optimizer_value = min(max(0.0, momentum), 1.0)
def _update_classification_localization_weight_ratio(configs, ratio):
"""Updates the classification/localization weight loss ratio.
Detection models usually define a loss weight for both classification and
objectness. This function updates the weights such that the ratio between
classification weight to localization weight is the ratio provided.
Arbitrarily, localization weight is set to 1.0.
Note that in the case of Faster R-CNN, this same ratio is applied to the first
stage objectness loss weight relative to localization loss weight.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
ratio: Desired ratio of classification (and/or objectness) loss weight to
localization loss weight.
"""
meta_architecture = configs["model"].WhichOneof("model")
if meta_architecture == "faster_rcnn":
model = configs["model"].faster_rcnn
model.first_stage_localization_loss_weight = 1.0
model.first_stage_objectness_loss_weight = ratio
model.second_stage_localization_loss_weight = 1.0
model.second_stage_classification_loss_weight = ratio
if meta_architecture == "ssd":
model = configs["model"].ssd
model.loss.localization_weight = 1.0
model.loss.classification_weight = ratio
def _get_classification_loss(model_config):
"""Returns the classification loss for a model."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
model = model_config.faster_rcnn
classification_loss = model.second_stage_classification_loss
elif meta_architecture == "ssd":
model = model_config.ssd
classification_loss = model.loss.classification_loss
else:
raise TypeError("Did not recognize the model architecture.")
return classification_loss
def _update_focal_loss_gamma(configs, gamma):
"""Updates the gamma value for a sigmoid focal loss.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
gamma: Exponent term in focal loss.
Raises:
TypeError: If the classification loss is not `weighted_sigmoid_focal`.
"""
classification_loss = _get_classification_loss(configs["model"])
classification_loss_type = classification_loss.WhichOneof(
"classification_loss")
if classification_loss_type != "weighted_sigmoid_focal":
raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
classification_loss.weighted_sigmoid_focal.gamma = gamma
def _update_focal_loss_alpha(configs, alpha):
"""Updates the alpha value for a sigmoid focal loss.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
alpha: Class weight multiplier for sigmoid loss.
Raises:
TypeError: If the classification loss is not `weighted_sigmoid_focal`.
"""
classification_loss = _get_classification_loss(configs["model"])
classification_loss_type = classification_loss.WhichOneof(
"classification_loss")
if classification_loss_type != "weighted_sigmoid_focal":
raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
classification_loss.weighted_sigmoid_focal.alpha = alpha
def _update_train_steps(configs, train_steps):
"""Updates `configs` to reflect new number of training steps."""
configs["train_config"].num_steps = int(train_steps)
def _update_all_eval_input_configs(configs, field, value):
"""Updates the content of `field` with `value` for all eval input configs."""
for eval_input_config in configs["eval_input_configs"]:
setattr(eval_input_config, field, value)
def _update_label_map_path(configs, label_map_path):
"""Updates the label map path for both train and eval input readers.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
label_map_path: New path to `StringIntLabelMap` pbtxt file.
"""
configs["train_input_config"].label_map_path = label_map_path
_update_all_eval_input_configs(configs, "label_map_path", label_map_path)
def _update_mask_type(configs, mask_type):
"""Updates the mask type for both train and eval input readers.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
mask_type: A string name representing a value of
input_reader_pb2.InstanceMaskType
"""
configs["train_input_config"].mask_type = mask_type
_update_all_eval_input_configs(configs, "mask_type", mask_type)
def _update_use_moving_averages(configs, use_moving_averages):
"""Updates the eval config option to use or not use moving averages.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
use_moving_averages: Boolean indicating whether moving average variables
should be loaded during evaluation.
"""
configs["eval_config"].use_moving_averages = use_moving_averages
def _update_retain_original_images(eval_config, retain_original_images):
"""Updates eval config with option to retain original images.
The eval_config object is updated in place, and hence not returned.
Args:
eval_config: A eval_pb2.EvalConfig.
retain_original_images: Boolean indicating whether to retain original images
in eval mode.
"""
eval_config.retain_original_images = retain_original_images
def _update_use_bfloat16(configs, use_bfloat16):
"""Updates `configs` to reflect the new setup on whether to use bfloat16.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
use_bfloat16: A bool, indicating whether to use bfloat16 for training.
"""
configs["train_config"].use_bfloat16 = use_bfloat16
def _update_retain_original_image_additional_channels(
eval_config,
retain_original_image_additional_channels):
"""Updates eval config to retain original image additional channels or not.
The eval_config object is updated in place, and hence not returned.
Args:
eval_config: A eval_pb2.EvalConfig.
retain_original_image_additional_channels: Boolean indicating whether to
retain original image additional channels in eval mode.
"""
eval_config.retain_original_image_additional_channels = (
retain_original_image_additional_channels)
def remove_unnecessary_ema(variables_to_restore, no_ema_collection=None):
"""Remap and Remove EMA variable that are not created during training.
ExponentialMovingAverage.variables_to_restore() returns a map of EMA names
to tf variables to restore. E.g.:
{
conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
global_step: global_step
}
This function takes care of the extra ExponentialMovingAverage variables
that get created during eval but aren't available in the checkpoint, by
remapping the key to the variable itself, and remove the entry of its EMA from
the variables to restore. An example resulting dictionary would look like:
{
conv/batchnorm/gamma: conv/batchnorm/gamma,
conv_4/conv2d_params: conv_4/conv2d_params,
global_step: global_step
}
Args:
variables_to_restore: A dictionary created by ExponentialMovingAverage.
variables_to_restore().
no_ema_collection: A list of namescope substrings to match the variables
to eliminate EMA.
Returns:
A variables_to_restore dictionary excluding the collection of unwanted
EMA mapping.
"""
if no_ema_collection is None:
return variables_to_restore
restore_map = {}
for key in variables_to_restore:
if ("ExponentialMovingAverage" in key
and any([name in key for name in no_ema_collection])):
new_key = key.replace("/ExponentialMovingAverage", "")
else:
new_key = key
restore_map[new_key] = variables_to_restore[key]
return restore_map
def _update_num_classes(model_config, num_classes):
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
model_config.faster_rcnn.num_classes = num_classes
if meta_architecture == "ssd":
model_config.ssd.num_classes = num_classes
def _update_sample_from_datasets_weights(input_reader_config, weights):
"""Updated sample_from_datasets_weights with overrides."""
if len(weights) != len(input_reader_config.sample_from_datasets_weights):
raise ValueError(
"sample_from_datasets_weights override has a different number of values"
" ({}) than the configured dataset weights ({})."
.format(
len(input_reader_config.sample_from_datasets_weights),
len(weights)))
del input_reader_config.sample_from_datasets_weights[:]
input_reader_config.sample_from_datasets_weights.extend(weights)
def _update_peak_max_pool_kernel_size(model_config, kernel_size):
"""Updates the max pool kernel size (NMS) for keypoints in CenterNet."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.peak_max_pool_kernel_size = kernel_size
else:
tf.logging.warning("Ignoring config override key for "
"peak_max_pool_kernel_size since there are multiple "
"keypoint estimation tasks")
def _update_candidate_search_scale(model_config, search_scale):
"""Updates the keypoint candidate search scale in CenterNet."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.candidate_search_scale = search_scale
else:
tf.logging.warning("Ignoring config override key for "
"candidate_search_scale since there are multiple "
"keypoint estimation tasks")
def _update_candidate_ranking_mode(model_config, mode):
"""Updates how keypoints are snapped to candidates in CenterNet."""
if mode not in ("min_distance", "score_distance_ratio"):
raise ValueError("Attempting to set the keypoint candidate ranking mode "
"to {}, but the only options are 'min_distance' and "
"'score_distance_ratio'.".format(mode))
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.candidate_ranking_mode = mode
else:
tf.logging.warning("Ignoring config override key for "
"candidate_ranking_mode since there are multiple "
"keypoint estimation tasks")
def _update_score_distance_offset(model_config, offset):
"""Updates the keypoint candidate selection metric. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.score_distance_offset = offset
else:
tf.logging.warning("Ignoring config override key for "
"score_distance_offset since there are multiple "
"keypoint estimation tasks")
def _update_box_scale(model_config, box_scale):
"""Updates the keypoint candidate search region. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.box_scale = box_scale
else:
tf.logging.warning("Ignoring config override key for box_scale since "
"there are multiple keypoint estimation tasks")
def _update_keypoint_candidate_score_threshold(model_config, threshold):
"""Updates the keypoint candidate score threshold. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.keypoint_candidate_score_threshold = threshold
else:
tf.logging.warning("Ignoring config override key for "
"keypoint_candidate_score_threshold since there are "
"multiple keypoint estimation tasks")
def _update_rescore_instances(model_config, should_rescore):
"""Updates whether boxes should be rescored based on keypoint confidences."""
if isinstance(should_rescore, str):
should_rescore = True if should_rescore == "True" else False
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.rescore_instances = should_rescore
else:
tf.logging.warning("Ignoring config override key for "
"rescore_instances since there are multiple keypoint "
"estimation tasks")
def _update_unmatched_keypoint_score(model_config, score):
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.unmatched_keypoint_score = score
else:
tf.logging.warning("Ignoring config override key for "
"unmatched_keypoint_score since there are multiple "
"keypoint estimation tasks")
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/config_util.py | config_util.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python context management helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class IdentityContextManager(object):
"""Returns an identity context manager that does nothing.
This is helpful in setting up conditional `with` statement as below:
with slim.arg_scope(x) if use_slim_scope else IdentityContextManager():
do_stuff()
"""
def __enter__(self):
return None
def __exit__(self, exec_type, exec_value, traceback):
del exec_type
del exec_value
del traceback
return False
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/context_manager.py | context_manager.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.dataset_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
from object_detection.utils import dataset_util
class DatasetUtilTest(tf.test.TestCase):
def test_read_examples_list(self):
example_list_data = """example1 1\nexample2 2"""
example_list_path = os.path.join(self.get_temp_dir(), 'examples.txt')
with tf.gfile.Open(example_list_path, 'wb') as f:
f.write(example_list_data)
examples = dataset_util.read_examples_list(example_list_path)
self.assertListEqual(['example1', 'example2'], examples)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/dataset_util_test.py | dataset_util_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.per_image_vrd_evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import per_image_vrd_evaluation
class SingleClassPerImageVrdEvaluationTest(tf.test.TestCase):
def setUp(self):
matching_iou_threshold = 0.5
self.eval = per_image_vrd_evaluation.PerImageVRDEvaluation(
matching_iou_threshold)
box_data_type = np.dtype([('subject', 'f4', (4,)), ('object', 'f4', (4,))])
self.detected_box_tuples = np.array(
[([0, 0, 1.1, 1], [1, 1, 2, 2]), ([0, 0, 1, 1], [1, 1, 2, 2]),
([1, 1, 2, 2], [0, 0, 1.1, 1])],
dtype=box_data_type)
self.detected_scores = np.array([0.8, 0.2, 0.1], dtype=float)
self.groundtruth_box_tuples = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=box_data_type)
def test_tp_fp_eval(self):
tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_box_tuples, self.groundtruth_box_tuples)
expected_tp_fp_labels = np.array([True, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_tp_fp_eval_empty_gt(self):
box_data_type = np.dtype([('subject', 'f4', (4,)), ('object', 'f4', (4,))])
tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_box_tuples, np.array([], dtype=box_data_type))
expected_tp_fp_labels = np.array([False, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
class MultiClassPerImageVrdEvaluationTest(tf.test.TestCase):
def setUp(self):
matching_iou_threshold = 0.5
self.eval = per_image_vrd_evaluation.PerImageVRDEvaluation(
matching_iou_threshold)
box_data_type = np.dtype([('subject', 'f4', (4,)), ('object', 'f4', (4,))])
label_data_type = np.dtype([('subject', 'i4'), ('object', 'i4'),
('relation', 'i4')])
self.detected_box_tuples = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1.1, 1], [1, 1, 2, 2]),
([1, 1, 2, 2], [0, 0, 1.1, 1]), ([0, 0, 1, 1], [3, 4, 5, 6])],
dtype=box_data_type)
self.detected_class_tuples = np.array(
[(1, 2, 3), (1, 2, 3), (1, 2, 3), (1, 4, 5)], dtype=label_data_type)
self.detected_scores = np.array([0.2, 0.8, 0.1, 0.5], dtype=float)
self.groundtruth_box_tuples = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2]), ([1, 1, 2, 2], [0, 0, 1.1, 1]),
([0, 0, 1, 1], [3, 4, 5, 5.5])],
dtype=box_data_type)
self.groundtruth_class_tuples = np.array(
[(1, 2, 3), (1, 7, 3), (1, 4, 5)], dtype=label_data_type)
def test_tp_fp_eval(self):
scores, tp_fp_labels, mapping = self.eval.compute_detection_tp_fp(
self.detected_box_tuples, self.detected_scores,
self.detected_class_tuples, self.groundtruth_box_tuples,
self.groundtruth_class_tuples)
expected_scores = np.array([0.8, 0.5, 0.2, 0.1], dtype=float)
expected_tp_fp_labels = np.array([True, True, False, False], dtype=bool)
expected_mapping = np.array([1, 3, 0, 2])
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
self.assertTrue(np.allclose(expected_mapping, mapping))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/per_image_vrd_evaluation_test.py | per_image_vrd_evaluation_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.patch_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import patch_ops
from object_detection.utils import test_case
class GetPatchMaskTest(test_case.TestCase, parameterized.TestCase):
def testMaskShape(self):
image_shape = [15, 10]
mask = patch_ops.get_patch_mask(
10, 5, patch_size=3, image_shape=image_shape)
self.assertListEqual(mask.shape.as_list(), image_shape)
def testHandleImageShapeWithChannels(self):
image_shape = [15, 10, 3]
mask = patch_ops.get_patch_mask(
10, 5, patch_size=3, image_shape=image_shape)
self.assertListEqual(mask.shape.as_list(), image_shape[:2])
def testMaskDType(self):
mask = patch_ops.get_patch_mask(2, 3, patch_size=2, image_shape=[6, 7])
self.assertDTypeEqual(mask, bool)
def testMaskAreaWithEvenPatchSize(self):
image_shape = [6, 7]
mask = patch_ops.get_patch_mask(2, 3, patch_size=2, image_shape=image_shape)
expected_mask = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
]).reshape(image_shape).astype(bool)
self.assertAllEqual(mask, expected_mask)
def testMaskAreaWithEvenPatchSize4(self):
image_shape = [6, 7]
mask = patch_ops.get_patch_mask(2, 3, patch_size=4, image_shape=image_shape)
expected_mask = np.array([
[0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
]).reshape(image_shape).astype(bool)
self.assertAllEqual(mask, expected_mask)
def testMaskAreaWithOddPatchSize(self):
image_shape = [6, 7]
mask = patch_ops.get_patch_mask(2, 3, patch_size=3, image_shape=image_shape)
expected_mask = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
]).reshape(image_shape).astype(bool)
self.assertAllEqual(mask, expected_mask)
def testMaskAreaPartiallyOutsideImage(self):
image_shape = [6, 7]
mask = patch_ops.get_patch_mask(5, 6, patch_size=5, image_shape=image_shape)
expected_mask = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1],
]).reshape(image_shape).astype(bool)
self.assertAllEqual(mask, expected_mask)
@parameterized.parameters(
{'y': 0, 'x': -1},
{'y': -1, 'x': 0},
{'y': 0, 'x': 11},
{'y': 16, 'x': 0},
)
def testStaticCoordinatesOutsideImageRaisesError(self, y, x):
image_shape = [15, 10]
with self.assertRaises(tf.errors.InvalidArgumentError):
patch_ops.get_patch_mask(y, x, patch_size=3, image_shape=image_shape)
def testDynamicCoordinatesOutsideImageRaisesError(self):
def graph_fn():
image_shape = [15, 10]
x = tf.random_uniform([], minval=-2, maxval=-1, dtype=tf.int32)
y = tf.random_uniform([], minval=0, maxval=1, dtype=tf.int32)
mask = patch_ops.get_patch_mask(
y, x, patch_size=3, image_shape=image_shape)
return mask
with self.assertRaises(tf.errors.InvalidArgumentError):
self.execute(graph_fn, [])
@parameterized.parameters(
{'patch_size': 0},
{'patch_size': -1},
)
def testStaticNonPositivePatchSizeRaisesError(self, patch_size):
image_shape = [6, 7]
with self.assertRaises(tf.errors.InvalidArgumentError):
patch_ops.get_patch_mask(
0, 0, patch_size=patch_size, image_shape=image_shape)
def testDynamicNonPositivePatchSizeRaisesError(self):
def graph_fn():
image_shape = [6, 7]
patch_size = -1 * tf.random_uniform([], minval=0, maxval=3,
dtype=tf.int32)
mask = patch_ops.get_patch_mask(
0, 0, patch_size=patch_size, image_shape=image_shape)
return mask
with self.assertRaises(tf.errors.InvalidArgumentError):
self.execute(graph_fn, [])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/patch_ops_test.py | patch_ops_test.py |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for colab tutorials located in object_detection/colab_tutorials/..."""
import base64
import io
import json
from typing import Dict
from typing import List
from typing import Union
import uuid
from IPython.display import display
from IPython.display import Javascript
import numpy as np
from PIL import Image
from google.colab import output
from google.colab.output import eval_js
def image_from_numpy(image):
"""Open an image at the specified path and encode it in Base64.
Args:
image: np.ndarray
Image represented as a numpy array
Returns:
An encoded Base64 representation of the image
"""
with io.BytesIO() as img_output:
Image.fromarray(image).save(img_output, format='JPEG')
data = img_output.getvalue()
data = str(base64.b64encode(data))[2:-1]
return data
def draw_bbox(image_urls, callbackId): # pylint: disable=invalid-name
"""Open the bounding box UI and send the results to a callback function.
Args:
image_urls: list[str | np.ndarray]
List of locations from where to load the images from. If a np.ndarray is
given, the array is interpretted as an image and sent to the frontend.
If a str is given, the string is interpreted as a path and is read as a
np.ndarray before being sent to the frontend.
callbackId: str
The ID for the callback function to send the bounding box results to
when the user hits submit.
"""
js = Javascript('''
async function load_image(imgs, callbackId) {
//init organizational elements
const div = document.createElement('div');
var image_cont = document.createElement('div');
var errorlog = document.createElement('div');
var crosshair_h = document.createElement('div');
crosshair_h.style.position = "absolute";
crosshair_h.style.backgroundColor = "transparent";
crosshair_h.style.width = "100%";
crosshair_h.style.height = "0px";
crosshair_h.style.zIndex = 9998;
crosshair_h.style.borderStyle = "dotted";
crosshair_h.style.borderWidth = "2px";
crosshair_h.style.borderColor = "rgba(255, 0, 0, 0.75)";
crosshair_h.style.cursor = "crosshair";
var crosshair_v = document.createElement('div');
crosshair_v.style.position = "absolute";
crosshair_v.style.backgroundColor = "transparent";
crosshair_v.style.width = "0px";
crosshair_v.style.height = "100%";
crosshair_v.style.zIndex = 9999;
crosshair_v.style.top = "0px";
crosshair_v.style.borderStyle = "dotted";
crosshair_v.style.borderWidth = "2px";
crosshair_v.style.borderColor = "rgba(255, 0, 0, 0.75)";
crosshair_v.style.cursor = "crosshair";
crosshair_v.style.marginTop = "23px";
var brdiv = document.createElement('br');
//init control elements
var next = document.createElement('button');
var prev = document.createElement('button');
var submit = document.createElement('button');
var deleteButton = document.createElement('button');
var deleteAllbutton = document.createElement('button');
//init image containers
var image = new Image();
var canvas_img = document.createElement('canvas');
var ctx = canvas_img.getContext("2d");
canvas_img.style.cursor = "crosshair";
canvas_img.setAttribute('draggable', false);
crosshair_v.setAttribute('draggable', false);
crosshair_h.setAttribute('draggable', false);
// bounding box containers
const height = 600
var allBoundingBoxes = [];
var curr_image = 0
var im_height = 0;
var im_width = 0;
//initialize bounding boxes
for (var i = 0; i < imgs.length; i++) {
allBoundingBoxes[i] = [];
}
//initialize image view
errorlog.id = 'errorlog';
image.style.display = 'block';
image.setAttribute('draggable', false);
//load the first image
img = imgs[curr_image];
image.src = "data:image/png;base64," + img;
image.onload = function() {
// normalize display height and canvas
image.height = height;
image_cont.height = canvas_img.height = image.height;
image_cont.width = canvas_img.width = image.naturalWidth;
crosshair_v.style.height = image_cont.height + "px";
crosshair_h.style.width = image_cont.width + "px";
// draw the new image
ctx.drawImage(image, 0, 0, image.naturalWidth, image.naturalHeight, 0, 0, canvas_img.width, canvas_img.height);
};
// move to next image in array
next.textContent = "next image";
next.onclick = function(){
if (curr_image < imgs.length - 1){
// clear canvas and load new image
curr_image += 1;
errorlog.innerHTML = "";
}
else{
errorlog.innerHTML = "All images completed!!";
}
resetcanvas();
}
//move forward through list of images
prev.textContent = "prev image"
prev.onclick = function(){
if (curr_image > 0){
// clear canvas and load new image
curr_image -= 1;
errorlog.innerHTML = "";
}
else{
errorlog.innerHTML = "at the beginning";
}
resetcanvas();
}
// on delete, deletes the last bounding box
deleteButton.textContent = "undo bbox";
deleteButton.onclick = function(){
boundingBoxes.pop();
ctx.clearRect(0, 0, canvas_img.width, canvas_img.height);
image.src = "data:image/png;base64," + img;
image.onload = function() {
ctx.drawImage(image, 0, 0, image.naturalWidth, image.naturalHeight, 0, 0, canvas_img.width, canvas_img.height);
boundingBoxes.map(r => {drawRect(r)});
};
}
// on all delete, deletes all of the bounding box
deleteAllbutton.textContent = "delete all"
deleteAllbutton.onclick = function(){
boundingBoxes = [];
ctx.clearRect(0, 0, canvas_img.width, canvas_img.height);
image.src = "data:image/png;base64," + img;
image.onload = function() {
ctx.drawImage(image, 0, 0, image.naturalWidth, image.naturalHeight, 0, 0, canvas_img.width, canvas_img.height);
//boundingBoxes.map(r => {drawRect(r)});
};
}
// on submit, send the boxes to display
submit.textContent = "submit";
submit.onclick = function(){
errorlog.innerHTML = "";
// send box data to callback fucntion
google.colab.kernel.invokeFunction(callbackId, [allBoundingBoxes], {});
}
// init template for annotations
const annotation = {
x: 0,
y: 0,
w: 0,
h: 0,
};
// the array of all rectangles
let boundingBoxes = allBoundingBoxes[curr_image];
// the actual rectangle, the one that is being drawn
let o = {};
// a variable to store the mouse position
let m = {},
// a variable to store the point where you begin to draw the
// rectangle
start = {};
// a boolean variable to store the drawing state
let isDrawing = false;
var elem = null;
function handleMouseDown(e) {
// on mouse click set change the cursor and start tracking the mouse position
start = oMousePos(canvas_img, e);
// configure is drawing to true
isDrawing = true;
}
function handleMouseMove(e) {
// move crosshairs, but only within the bounds of the canvas
if (document.elementsFromPoint(e.pageX, e.pageY).includes(canvas_img)) {
crosshair_h.style.top = e.pageY + "px";
crosshair_v.style.left = e.pageX + "px";
}
// move the bounding box
if(isDrawing){
m = oMousePos(canvas_img, e);
draw();
}
}
function handleMouseUp(e) {
if (isDrawing) {
// on mouse release, push a bounding box to array and draw all boxes
isDrawing = false;
const box = Object.create(annotation);
// calculate the position of the rectangle
if (o.w > 0){
box.x = o.x;
}
else{
box.x = o.x + o.w;
}
if (o.h > 0){
box.y = o.y;
}
else{
box.y = o.y + o.h;
}
box.w = Math.abs(o.w);
box.h = Math.abs(o.h);
// add the bounding box to the image
boundingBoxes.push(box);
draw();
}
}
function draw() {
o.x = (start.x)/image.width; // start position of x
o.y = (start.y)/image.height; // start position of y
o.w = (m.x - start.x)/image.width; // width
o.h = (m.y - start.y)/image.height; // height
ctx.clearRect(0, 0, canvas_img.width, canvas_img.height);
ctx.drawImage(image, 0, 0, image.naturalWidth, image.naturalHeight, 0, 0, canvas_img.width, canvas_img.height);
// draw all the rectangles saved in the rectsRy
boundingBoxes.map(r => {drawRect(r)});
// draw the actual rectangle
drawRect(o);
}
// add the handlers needed for dragging
crosshair_h.addEventListener("mousedown", handleMouseDown);
crosshair_v.addEventListener("mousedown", handleMouseDown);
document.addEventListener("mousemove", handleMouseMove);
document.addEventListener("mouseup", handleMouseUp);
function resetcanvas(){
// clear canvas
ctx.clearRect(0, 0, canvas_img.width, canvas_img.height);
img = imgs[curr_image]
image.src = "data:image/png;base64," + img;
// onload init new canvas and display image
image.onload = function() {
// normalize display height and canvas
image.height = height;
image_cont.height = canvas_img.height = image.height;
image_cont.width = canvas_img.width = image.naturalWidth;
crosshair_v.style.height = image_cont.height + "px";
crosshair_h.style.width = image_cont.width + "px";
// draw the new image
ctx.drawImage(image, 0, 0, image.naturalWidth, image.naturalHeight, 0, 0, canvas_img.width, canvas_img.height);
// draw bounding boxes
boundingBoxes = allBoundingBoxes[curr_image];
boundingBoxes.map(r => {drawRect(r)});
};
}
function drawRect(o){
// draw a predefined rectangle
ctx.strokeStyle = "red";
ctx.lineWidth = 2;
ctx.beginPath(o);
ctx.rect(o.x * image.width, o.y * image.height, o.w * image.width, o.h * image.height);
ctx.stroke();
}
// Function to detect the mouse position
function oMousePos(canvas_img, evt) {
let ClientRect = canvas_img.getBoundingClientRect();
return {
x: evt.clientX - ClientRect.left,
y: evt.clientY - ClientRect.top
};
}
//configure colab output display
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
//build the html document that will be seen in output
div.appendChild(document.createElement('br'))
div.appendChild(image_cont)
image_cont.appendChild(canvas_img)
image_cont.appendChild(crosshair_h)
image_cont.appendChild(crosshair_v)
div.appendChild(document.createElement('br'))
div.appendChild(errorlog)
div.appendChild(prev)
div.appendChild(next)
div.appendChild(deleteButton)
div.appendChild(deleteAllbutton)
div.appendChild(document.createElement('br'))
div.appendChild(brdiv)
div.appendChild(submit)
document.querySelector("#output-area").appendChild(div);
return
}''')
# load the images as a byte array
bytearrays = []
for image in image_urls:
if isinstance(image, np.ndarray):
bytearrays.append(image_from_numpy(image))
else:
raise TypeError('Image has unsupported type {}.'.format(type(image)))
# format arrays for input
image_data = json.dumps(bytearrays)
del bytearrays
# call java script function pass string byte array(image_data) as input
display(js)
eval_js('load_image({}, \'{}\')'.format(image_data, callbackId))
return
def annotate(imgs: List[Union[str, np.ndarray]], # pylint: disable=invalid-name
box_storage_pointer: List[np.ndarray],
callbackId: str = None):
"""Open the bounding box UI and prompt the user for input.
Args:
imgs: list[str | np.ndarray]
List of locations from where to load the images from. If a np.ndarray is
given, the array is interpretted as an image and sent to the frontend. If
a str is given, the string is interpreted as a path and is read as a
np.ndarray before being sent to the frontend.
box_storage_pointer: list[np.ndarray]
Destination list for bounding box arrays. Each array in this list
corresponds to one of the images given in imgs. The array is a
N x 4 array where N is the number of bounding boxes given by the user
for that particular image. If there are no bounding boxes for an image,
None is used instead of an empty array.
callbackId: str, optional
The ID for the callback function that communicates between the fontend
and the backend. If no ID is given, a random UUID string is used instead.
"""
# Set a random ID for the callback function
if callbackId is None:
callbackId = str(uuid.uuid1()).replace('-', '')
def dictToList(input_bbox): # pylint: disable=invalid-name
"""Convert bbox.
This function converts the dictionary from the frontend (if the format
{x, y, w, h} as shown in callbackFunction) into a list
([y_min, x_min, y_max, x_max])
Args:
input_bbox:
Returns:
A list with bbox coordinates in the form [ymin, xmin, ymax, xmax].
"""
return (input_bbox['y'], input_bbox['x'], input_bbox['y'] + input_bbox['h'],
input_bbox['x'] + input_bbox['w'])
def callbackFunction(annotations: List[List[Dict[str, float]]]): # pylint: disable=invalid-name
"""Callback function.
This is the call back function to capture the data from the frontend and
convert the data into a numpy array.
Args:
annotations: list[list[dict[str, float]]]
The input of the call back function is a list of list of objects
corresponding to the annotations. The format of annotations is shown
below
[
// stuff for image 1
[
// stuff for rect 1
{x, y, w, h},
// stuff for rect 2
{x, y, w, h},
...
],
// stuff for image 2
[
// stuff for rect 1
{x, y, w, h},
// stuff for rect 2
{x, y, w, h},
...
],
...
]
"""
# reset the boxes list
nonlocal box_storage_pointer
boxes: List[np.ndarray] = box_storage_pointer
boxes.clear()
# load the new annotations into the boxes list
for annotations_per_img in annotations:
rectangles_as_arrays = [np.clip(dictToList(annotation), 0, 1)
for annotation in annotations_per_img]
if rectangles_as_arrays:
boxes.append(np.stack(rectangles_as_arrays))
else:
boxes.append(None)
# output the annotations to the errorlog
with output.redirect_to_element('#errorlog'):
display('--boxes array populated--')
output.register_callback(callbackId, callbackFunction)
draw_bbox(imgs, callbackId)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/colab_utils.py | colab_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to access TensorShape values.
The rank 4 tensor_shape must be of the form [batch_size, height, width, depth].
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def get_dim_as_int(dim):
"""Utility to get v1 or v2 TensorShape dim as an int.
Args:
dim: The TensorShape dimension to get as an int
Returns:
None or an int.
"""
try:
return dim.value
except AttributeError:
return dim
def get_batch_size(tensor_shape):
"""Returns batch size from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the batch size of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[0])
def get_height(tensor_shape):
"""Returns height from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the height of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[1])
def get_width(tensor_shape):
"""Returns width from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the width of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[2])
def get_depth(tensor_shape):
"""Returns depth from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the depth of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[3])
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/static_shape.py | static_shape.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import logging
import numpy as np
from six import string_types
from six.moves import range
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.protos import string_int_label_map_pb2
_LABEL_OFFSET = 1
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 0:
raise ValueError('Label map ids should be >= 0.')
if (item.id == 0 and item.name != 'background' and
item.display_name != 'background'):
raise ValueError('Label map id 0 is reserved for the background label')
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def get_max_label_map_index(label_map):
"""Get maximum index in label map.
Args:
label_map: a StringIntLabelMapProto
Returns:
an integer
"""
return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Given label map proto returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
'keypoints': (optional) a dictionary of keypoint string 'label' to integer
'id'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field as
category name. If False or if the display_name field does not exist, uses
'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info(
'Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
category = {'id': item.id, 'name': name}
if item.HasField('frequency'):
if item.frequency == string_int_label_map_pb2.LVISFrequency.Value(
'FREQUENT'):
category['frequency'] = 'f'
elif item.frequency == string_int_label_map_pb2.LVISFrequency.Value(
'COMMON'):
category['frequency'] = 'c'
elif item.frequency == string_int_label_map_pb2.LVISFrequency.Value(
'RARE'):
category['frequency'] = 'r'
if item.HasField('instance_count'):
category['instance_count'] = item.instance_count
if item.keypoints:
keypoints = {}
list_of_keypoint_ids = []
for kv in item.keypoints:
if kv.id in list_of_keypoint_ids:
raise ValueError('Duplicate keypoint ids are not allowed. '
'Found {} more than once'.format(kv.id))
keypoints[kv.label] = kv.id
list_of_keypoint_ids.append(kv.id)
category['keypoints'] = keypoints
categories.append(category)
return categories
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.io.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def get_label_map_dict(label_map_path_or_proto,
use_display_name=False,
fill_in_gaps_and_background=False):
"""Reads a label map and returns a dictionary of label names to id.
Args:
label_map_path_or_proto: path to StringIntLabelMap proto text file or the
proto itself.
use_display_name: whether to use the label map items' display names as keys.
fill_in_gaps_and_background: whether to fill in gaps and background with
respect to the id field in the proto. The id: 0 is reserved for the
'background' class and will be added if it is missing. All other missing
ids in range(1, max(id)) will be added with a dummy class name
("class_<id>") if they are missing.
Returns:
A dictionary mapping label names to id.
Raises:
ValueError: if fill_in_gaps_and_background and label_map has non-integer or
negative values.
"""
if isinstance(label_map_path_or_proto, string_types):
label_map = load_labelmap(label_map_path_or_proto)
else:
_validate_label_map(label_map_path_or_proto)
label_map = label_map_path_or_proto
label_map_dict = {}
for item in label_map.item:
if use_display_name:
label_map_dict[item.display_name] = item.id
else:
label_map_dict[item.name] = item.id
if fill_in_gaps_and_background:
values = set(label_map_dict.values())
if 0 not in values:
label_map_dict['background'] = 0
if not all(isinstance(value, int) for value in values):
raise ValueError('The values in label map must be integers in order to'
'fill_in_gaps_and_background.')
if not all(value >= 0 for value in values):
raise ValueError('The values in the label map must be positive.')
if len(values) != max(values) + 1:
# there are gaps in the labels, fill in gaps.
for value in range(1, max(values)):
if value not in values:
# TODO(rathodv): Add a prefix 'class_' here once the tool to generate
# teacher annotation adds this prefix in the data.
label_map_dict[str(value)] = value
return label_map_dict
def get_label_map_hierarchy_lut(label_map_path_or_proto,
include_identity=False):
"""Reads a label map and returns ancestors and descendants in the hierarchy.
The function returns the ancestors and descendants as separate look up tables
(LUT) numpy arrays of shape [max_id, max_id] where lut[i,j] = 1 when there is
a hierarchical relationship between class i and j.
Args:
label_map_path_or_proto: path to StringIntLabelMap proto text file or the
proto itself.
include_identity: Boolean to indicate whether to include a class element
among its ancestors and descendants. Setting this will result in the lut
diagonal being set to 1.
Returns:
ancestors_lut: Look up table with the ancestors.
descendants_lut: Look up table with the descendants.
"""
if isinstance(label_map_path_or_proto, string_types):
label_map = load_labelmap(label_map_path_or_proto)
else:
_validate_label_map(label_map_path_or_proto)
label_map = label_map_path_or_proto
hierarchy_dict = {
'ancestors': collections.defaultdict(list),
'descendants': collections.defaultdict(list)
}
max_id = -1
for item in label_map.item:
max_id = max(max_id, item.id)
for ancestor in item.ancestor_ids:
hierarchy_dict['ancestors'][item.id].append(ancestor)
for descendant in item.descendant_ids:
hierarchy_dict['descendants'][item.id].append(descendant)
def get_graph_relations_tensor(graph_relations):
graph_relations_tensor = np.zeros([max_id, max_id])
for id_val, ids_related in graph_relations.items():
id_val = int(id_val) - _LABEL_OFFSET
for id_related in ids_related:
id_related -= _LABEL_OFFSET
graph_relations_tensor[id_val, id_related] = 1
if include_identity:
graph_relations_tensor += np.eye(max_id)
return graph_relations_tensor
ancestors_lut = get_graph_relations_tensor(hierarchy_dict['ancestors'])
descendants_lut = get_graph_relations_tensor(hierarchy_dict['descendants'])
return ancestors_lut, descendants_lut
def create_categories_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': an integer id uniquely identifying this category.
'name': string representing category name e.g., 'cat', 'dog'.
'keypoints': a dictionary of keypoint string label to integer id. It is only
returned when available in label map proto.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
label_map = load_labelmap(label_map_path)
max_num_classes = max(item.id for item in label_map.item)
return convert_label_map_to_categories(label_map, max_num_classes,
use_display_name)
def create_category_index_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns a category index.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
A category index, which is a dictionary that maps integer ids to dicts
containing categories, e.g.
{1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...}
"""
categories = create_categories_from_labelmap(label_map_path, use_display_name)
return create_category_index(categories)
def create_class_agnostic_category_index():
"""Creates a category index with a single `object` class."""
return {1: {'id': 1, 'name': 'object'}}
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/label_map_util.py | label_map_util.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for dealing with writing json strings.
json_utils wraps json.dump and json.dumps so that they can be used to safely
control the precision of floats when writing to json strings or files.
"""
import json
import re
def FormatFloat(json_str, float_digits):
pattern = re.compile(r'\d+\.\d+')
float_repr = '{:.' + '{}'.format(float_digits) + 'f}'
def MRound(match):
return float_repr.format(float(match.group()))
return re.sub(pattern, MRound, json_str)
def Dump(obj, fid, float_digits=-1, **params):
"""Wrapper of json.dump that allows specifying the float precision used.
Args:
obj: The object to dump.
fid: The file id to write to.
float_digits: The number of digits of precision when writing floats out.
**params: Additional parameters to pass to json.dumps.
"""
json_str = Dumps(obj, float_digits, **params)
fid.write(json_str)
def Dumps(obj, float_digits=-1, **params):
"""Wrapper of json.dumps that allows specifying the float precision used.
Args:
obj: The object to dump.
float_digits: The number of digits of precision when writing floats out.
**params: Additional parameters to pass to json.dumps.
Returns:
output: JSON string representation of obj.
"""
json_str = json.dumps(obj, **params)
if float_digits > -1:
json_str = FormatFloat(json_str, float_digits)
return json_str
def PrettyParams(**params):
"""Returns parameters for use with Dump and Dumps to output pretty json.
Example usage:
```json_str = json_utils.Dumps(obj, **json_utils.PrettyParams())```
```json_str = json_utils.Dumps(
obj, **json_utils.PrettyParams(allow_nans=False))```
Args:
**params: Additional params to pass to json.dump or json.dumps.
Returns:
params: Parameters that are compatible with json_utils.Dump and
json_utils.Dumps.
"""
params['float_digits'] = 4
params['sort_keys'] = True
params['indent'] = 2
params['separators'] = (',', ': ')
return params
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/json_utils.py | json_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.shape_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
from object_detection.utils import test_case
class UtilTest(test_case.TestCase):
def test_pad_tensor_using_integer_input(self):
print('........pad tensor using interger input.')
def graph_fn():
t1 = tf.constant([1], dtype=tf.int32)
pad_t1 = shape_utils.pad_tensor(t1, 2)
t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
pad_t2 = shape_utils.pad_tensor(t2, 2)
return pad_t1, pad_t2
pad_t1_result, pad_t2_result = self.execute(graph_fn, [])
self.assertAllEqual([1, 0], pad_t1_result)
self.assertAllClose([[0.1, 0.2], [0, 0]], pad_t2_result)
def test_pad_tensor_using_tensor_input(self):
def graph_fn():
t1 = tf.constant([1], dtype=tf.int32)
pad_t1 = shape_utils.pad_tensor(t1, tf.constant(2))
t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
pad_t2 = shape_utils.pad_tensor(t2, tf.constant(2))
return pad_t1, pad_t2
pad_t1_result, pad_t2_result = self.execute(graph_fn, [])
self.assertAllEqual([1, 0], pad_t1_result)
self.assertAllClose([[0.1, 0.2], [0, 0]], pad_t2_result)
def test_clip_tensor_using_integer_input(self):
def graph_fn():
t1 = tf.constant([1, 2, 3], dtype=tf.int32)
clip_t1 = shape_utils.clip_tensor(t1, 2)
t2 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
clip_t2 = shape_utils.clip_tensor(t2, 2)
self.assertEqual(2, clip_t1.get_shape()[0])
self.assertEqual(2, clip_t2.get_shape()[0])
return clip_t1, clip_t2
clip_t1_result, clip_t2_result = self.execute(graph_fn, [])
self.assertAllEqual([1, 2], clip_t1_result)
self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], clip_t2_result)
def test_clip_tensor_using_tensor_input(self):
def graph_fn():
t1 = tf.constant([1, 2, 3], dtype=tf.int32)
clip_t1 = shape_utils.clip_tensor(t1, tf.constant(2))
t2 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
clip_t2 = shape_utils.clip_tensor(t2, tf.constant(2))
return clip_t1, clip_t2
clip_t1_result, clip_t2_result = self.execute(graph_fn, [])
self.assertAllEqual([1, 2], clip_t1_result)
self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], clip_t2_result)
def test_pad_or_clip_tensor_using_integer_input(self):
def graph_fn():
t1 = tf.constant([1], dtype=tf.int32)
tt1 = shape_utils.pad_or_clip_tensor(t1, 2)
t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
tt2 = shape_utils.pad_or_clip_tensor(t2, 2)
t3 = tf.constant([1, 2, 3], dtype=tf.int32)
tt3 = shape_utils.clip_tensor(t3, 2)
t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
tt4 = shape_utils.clip_tensor(t4, 2)
self.assertEqual(2, tt1.get_shape()[0])
self.assertEqual(2, tt2.get_shape()[0])
self.assertEqual(2, tt3.get_shape()[0])
self.assertEqual(2, tt4.get_shape()[0])
return tt1, tt2, tt3, tt4
tt1_result, tt2_result, tt3_result, tt4_result = self.execute(graph_fn, [])
self.assertAllEqual([1, 0], tt1_result)
self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result)
self.assertAllEqual([1, 2], tt3_result)
self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result)
def test_pad_or_clip_tensor_using_tensor_input(self):
def graph_fn():
t1 = tf.constant([1], dtype=tf.int32)
tt1 = shape_utils.pad_or_clip_tensor(t1, tf.constant(2))
t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
tt2 = shape_utils.pad_or_clip_tensor(t2, tf.constant(2))
t3 = tf.constant([1, 2, 3], dtype=tf.int32)
tt3 = shape_utils.clip_tensor(t3, tf.constant(2))
t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
tt4 = shape_utils.clip_tensor(t4, tf.constant(2))
return tt1, tt2, tt3, tt4
tt1_result, tt2_result, tt3_result, tt4_result = self.execute(graph_fn, [])
self.assertAllEqual([1, 0], tt1_result)
self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result)
self.assertAllEqual([1, 2], tt3_result)
self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result)
def test_combined_static_dynamic_shape(self):
for n in [2, 3, 4]:
tensor = tf.zeros((n, 2, 3))
combined_shape = shape_utils.combined_static_and_dynamic_shape(
tensor)
self.assertListEqual(combined_shape[1:], [2, 3])
def test_pad_or_clip_nd_tensor(self):
def graph_fn(input_tensor):
output_tensor = shape_utils.pad_or_clip_nd(
input_tensor, [None, 3, 5, tf.constant(6)])
return output_tensor
for n in [2, 3, 4, 5]:
input_np = np.zeros((n, 5, 4, 7))
output_tensor_np = self.execute(graph_fn, [input_np])
self.assertAllEqual(output_tensor_np.shape[1:], [3, 5, 6])
class StaticOrDynamicMapFnTest(test_case.TestCase):
def test_with_dynamic_shape(self):
def fn(input_tensor):
return tf.reduce_sum(input_tensor)
def graph_fn(input_tensor):
return shape_utils.static_or_dynamic_map_fn(fn, input_tensor)
# The input has different shapes, but due to how self.execute()
# works, the shape is known at graph compile time.
result1 = self.execute(
graph_fn, [np.array([[1, 2], [3, 1], [0, 4]]),])
result2 = self.execute(
graph_fn, [np.array([[-1, 1], [0, 9]]),])
self.assertAllEqual(result1, [3, 4, 4])
self.assertAllEqual(result2, [0, 9])
def test_with_static_shape(self):
def fn(input_tensor):
return tf.reduce_sum(input_tensor)
def graph_fn():
input_tensor = tf.constant([[1, 2], [3, 1], [0, 4]], dtype=tf.float32)
return shape_utils.static_or_dynamic_map_fn(fn, input_tensor)
result = self.execute(graph_fn, [])
self.assertAllEqual(result, [3, 4, 4])
def test_with_multiple_dynamic_shapes(self):
def fn(elems):
input_tensor, scalar_index_tensor = elems
return tf.reshape(tf.slice(input_tensor, scalar_index_tensor, [1]), [])
def graph_fn(input_tensor, scalar_index_tensor):
map_fn_output = shape_utils.static_or_dynamic_map_fn(
fn, [input_tensor, scalar_index_tensor], dtype=tf.float32)
return map_fn_output
# The input has different shapes, but due to how self.execute()
# works, the shape is known at graph compile time.
result1 = self.execute(
graph_fn, [
np.array([[1, 2, 3], [4, 5, -1], [0, 6, 9]]),
np.array([[0], [2], [1]]),
])
result2 = self.execute(
graph_fn, [
np.array([[-1, 1, 0], [3, 9, 30]]),
np.array([[1], [0]])
])
self.assertAllEqual(result1, [1, -1, 6])
self.assertAllEqual(result2, [1, 3])
def test_with_multiple_static_shapes(self):
def fn(elems):
input_tensor, scalar_index_tensor = elems
return tf.reshape(tf.slice(input_tensor, scalar_index_tensor, [1]), [])
def graph_fn():
input_tensor = tf.constant([[1, 2, 3], [4, 5, -1], [0, 6, 9]],
dtype=tf.float32)
scalar_index_tensor = tf.constant([[0], [2], [1]], dtype=tf.int32)
map_fn_output = shape_utils.static_or_dynamic_map_fn(
fn, [input_tensor, scalar_index_tensor], dtype=tf.float32)
return map_fn_output
result = self.execute(graph_fn, [])
self.assertAllEqual(result, [1, -1, 6])
def test_fails_with_nested_input(self):
def fn(input_tensor):
return input_tensor
input_tensor1 = tf.constant([1])
input_tensor2 = tf.constant([2])
with self.assertRaisesRegexp(
ValueError, '`elems` must be a Tensor or list of Tensors.'):
shape_utils.static_or_dynamic_map_fn(
fn, [input_tensor1, [input_tensor2]], dtype=tf.float32)
class CheckMinImageShapeTest(test_case.TestCase):
def test_check_min_image_dim_static_shape(self):
input_tensor = tf.constant(np.zeros([1, 42, 42, 3]))
_ = shape_utils.check_min_image_dim(33, input_tensor)
with self.assertRaisesRegexp(
ValueError, 'image size must be >= 64 in both height and width.'):
_ = shape_utils.check_min_image_dim(64, input_tensor)
def test_check_min_image_dim_dynamic_shape(self):
def graph_fn(input_tensor):
return shape_utils.check_min_image_dim(33, input_tensor)
self.execute(graph_fn,
[np.zeros([1, 42, 42, 3])])
self.assertRaises(
ValueError, self.execute,
graph_fn, np.zeros([1, 32, 32, 3])
)
class AssertShapeEqualTest(test_case.TestCase):
def test_unequal_static_shape_raises_exception(self):
shape_a = tf.constant(np.zeros([4, 2, 2, 1]))
shape_b = tf.constant(np.zeros([4, 2, 3, 1]))
self.assertRaisesRegex(
ValueError, 'Unequal shapes',
shape_utils.assert_shape_equal,
shape_utils.combined_static_and_dynamic_shape(shape_a),
shape_utils.combined_static_and_dynamic_shape(shape_b)
)
def test_equal_static_shape_succeeds(self):
def graph_fn():
shape_a = tf.constant(np.zeros([4, 2, 2, 1]))
shape_b = tf.constant(np.zeros([4, 2, 2, 1]))
shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(shape_a),
shape_utils.combined_static_and_dynamic_shape(shape_b))
return tf.constant(0)
self.execute(graph_fn, [])
def test_unequal_dynamic_shape_raises_tf_assert(self):
def graph_fn(tensor_a, tensor_b):
shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(tensor_a),
shape_utils.combined_static_and_dynamic_shape(tensor_b))
return tf.constant(0)
self.assertRaises(ValueError,
self.execute, graph_fn,
[np.zeros([1, 2, 2, 3]), np.zeros([1, 4, 4, 3])])
def test_equal_dynamic_shape_succeeds(self):
def graph_fn(tensor_a, tensor_b):
shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(tensor_a),
shape_utils.combined_static_and_dynamic_shape(tensor_b)
)
return tf.constant(0)
self.execute(graph_fn, [np.zeros([1, 2, 2, 3]),
np.zeros([1, 2, 2, 3])])
def test_unequal_static_shape_along_first_dim_raises_exception(self):
shape_a = tf.constant(np.zeros([4, 2, 2, 1]))
shape_b = tf.constant(np.zeros([6, 2, 3, 1]))
self.assertRaisesRegexp(
ValueError, 'Unequal first dimension',
shape_utils.assert_shape_equal_along_first_dimension,
shape_utils.combined_static_and_dynamic_shape(shape_a),
shape_utils.combined_static_and_dynamic_shape(shape_b)
)
def test_equal_static_shape_along_first_dim_succeeds(self):
def graph_fn():
shape_a = tf.constant(np.zeros([4, 2, 2, 1]))
shape_b = tf.constant(np.zeros([4, 7, 2]))
shape_utils.assert_shape_equal_along_first_dimension(
shape_utils.combined_static_and_dynamic_shape(shape_a),
shape_utils.combined_static_and_dynamic_shape(shape_b))
return tf.constant(0)
self.execute(graph_fn, [])
def test_unequal_dynamic_shape_along_first_dim_raises_tf_assert(self):
def graph_fn(tensor_a, tensor_b):
shape_utils.assert_shape_equal_along_first_dimension(
shape_utils.combined_static_and_dynamic_shape(tensor_a),
shape_utils.combined_static_and_dynamic_shape(tensor_b))
return tf.constant(0)
self.assertRaises(ValueError,
self.execute, graph_fn,
[np.zeros([1, 2, 2, 3]), np.zeros([2, 4, 3])])
def test_equal_dynamic_shape_along_first_dim_succeeds(self):
def graph_fn(tensor_a, tensor_b):
shape_utils.assert_shape_equal_along_first_dimension(
shape_utils.combined_static_and_dynamic_shape(tensor_a),
shape_utils.combined_static_and_dynamic_shape(tensor_b))
return tf.constant(0)
self.execute(graph_fn, [np.zeros([5, 2, 2, 3]), np.zeros([5])])
class FlattenExpandDimensionTest(test_case.TestCase):
def test_flatten_given_dims(self):
def graph_fn():
inputs = tf.random_uniform([5, 2, 10, 10, 3])
actual_flattened = shape_utils.flatten_dimensions(inputs, first=1, last=3)
expected_flattened = tf.reshape(inputs, [5, 20, 10, 3])
return actual_flattened, expected_flattened
(actual_flattened_np,
expected_flattened_np) = self.execute(graph_fn, [])
self.assertAllClose(expected_flattened_np, actual_flattened_np)
def test_raises_value_error_incorrect_dimensions(self):
inputs = tf.random_uniform([5, 2, 10, 10, 3])
self.assertRaises(ValueError,
shape_utils.flatten_dimensions, inputs,
first=0, last=6)
def test_flatten_first_two_dimensions(self):
def graph_fn():
inputs = tf.constant(
[
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]]
], dtype=tf.int32)
flattened_tensor = shape_utils.flatten_first_n_dimensions(
inputs, 2)
return flattened_tensor
flattened_tensor_out = self.execute(graph_fn, [])
expected_output = [[1, 2],
[3, 4],
[5, 6],
[7, 8],
[9, 10],
[11, 12]]
self.assertAllEqual(expected_output, flattened_tensor_out)
def test_expand_first_dimension(self):
def graph_fn():
inputs = tf.constant(
[
[1, 2],
[3, 4],
[5, 6],
[7, 8],
[9, 10],
[11, 12]
], dtype=tf.int32)
dims = [3, 2]
expanded_tensor = shape_utils.expand_first_dimension(
inputs, dims)
return expanded_tensor
expanded_tensor_out = self.execute(graph_fn, [])
expected_output = [
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]]]
self.assertAllEqual(expected_output, expanded_tensor_out)
def test_expand_first_dimension_with_incompatible_dims(self):
def graph_fn():
inputs = tf.constant(
[
[[1, 2]],
[[3, 4]],
[[5, 6]],
], dtype=tf.int32)
dims = [3, 2]
expanded_tensor = shape_utils.expand_first_dimension(
inputs, dims)
return expanded_tensor
self.assertRaises(ValueError, self.execute, graph_fn, [])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/shape_utils_test.py | shape_utils_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for computing metrics like precision, recall, CorLoc and etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
def compute_precision_recall(scores, labels, num_gt):
"""Compute precision and recall.
Args:
scores: A float numpy array representing detection score
labels: A float numpy array representing weighted true/false positive labels
num_gt: Number of ground truth instances
Raises:
ValueError: if the input is not of the correct format
Returns:
precision: Fraction of positive instances over detected ones. This value is
None if no ground truth labels are present.
recall: Fraction of detected positive instance over all positive instances.
This value is None if no ground truth labels are present.
"""
if not isinstance(labels, np.ndarray) or len(labels.shape) != 1:
raise ValueError("labels must be single dimension numpy array")
if labels.dtype != np.float and labels.dtype != np.bool:
raise ValueError("labels type must be either bool or float")
if not isinstance(scores, np.ndarray) or len(scores.shape) != 1:
raise ValueError("scores must be single dimension numpy array")
if num_gt < np.sum(labels):
raise ValueError("Number of true positives must be smaller than num_gt.")
if len(scores) != len(labels):
raise ValueError("scores and labels must be of the same size.")
if num_gt == 0:
return None, None
sorted_indices = np.argsort(scores)
sorted_indices = sorted_indices[::-1]
true_positive_labels = labels[sorted_indices]
false_positive_labels = (true_positive_labels <= 0).astype(float)
cum_true_positives = np.cumsum(true_positive_labels)
cum_false_positives = np.cumsum(false_positive_labels)
precision = cum_true_positives.astype(float) / (
cum_true_positives + cum_false_positives)
recall = cum_true_positives.astype(float) / num_gt
return precision, recall
def compute_average_precision(precision, recall):
"""Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None.
"""
if precision is None:
if recall is not None:
raise ValueError("If precision is None, recall must also be None")
return np.NAN
if not isinstance(precision, np.ndarray) or not isinstance(
recall, np.ndarray):
raise ValueError("precision and recall must be numpy array")
if precision.dtype != np.float or recall.dtype != np.float:
raise ValueError("input must be float numpy array.")
if len(precision) != len(recall):
raise ValueError("precision and recall must be of the same size.")
if not precision.size:
return 0.0
if np.amin(precision) < 0 or np.amax(precision) > 1:
raise ValueError("Precision must be in the range of [0, 1].")
if np.amin(recall) < 0 or np.amax(recall) > 1:
raise ValueError("recall must be in the range of [0, 1].")
if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)):
raise ValueError("recall must be a non-decreasing array")
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
# Preprocess precision to be a non-decreasing array
for i in range(len(precision) - 2, -1, -1):
precision[i] = np.maximum(precision[i], precision[i + 1])
indices = np.where(recall[1:] != recall[:-1])[0] + 1
average_precision = np.sum(
(recall[indices] - recall[indices - 1]) * precision[indices])
return average_precision
def compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class):
"""Compute CorLoc according to the definition in the following paper.
https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf
Returns nans if there are no ground truth images for a class.
Args:
num_gt_imgs_per_class: 1D array, representing number of images containing
at least one object instance of a particular class
num_images_correctly_detected_per_class: 1D array, representing number of
images that are correctly detected at least one object instance of a
particular class
Returns:
corloc_per_class: A float numpy array represents the corloc score of each
class
"""
return np.where(
num_gt_imgs_per_class == 0, np.nan,
num_images_correctly_detected_per_class / num_gt_imgs_per_class)
def compute_median_rank_at_k(tp_fp_list, k):
"""Computes MedianRank@k, where k is the top-scoring labels.
Args:
tp_fp_list: a list of numpy arrays; each numpy array corresponds to the all
detection on a single image, where the detections are sorted by score in
descending order. Further, each numpy array element can have boolean or
float values. True positive elements have either value >0.0 or True;
any other value is considered false positive.
k: number of top-scoring proposals to take.
Returns:
median_rank: median rank of all true positive proposals among top k by
score.
"""
ranks = []
for i in range(len(tp_fp_list)):
ranks.append(
np.where(tp_fp_list[i][0:min(k, tp_fp_list[i].shape[0])] > 0)[0])
concatenated_ranks = np.concatenate(ranks)
return np.median(concatenated_ranks)
def compute_recall_at_k(tp_fp_list, num_gt, k):
"""Computes Recall@k, MedianRank@k, where k is the top-scoring labels.
Args:
tp_fp_list: a list of numpy arrays; each numpy array corresponds to the all
detection on a single image, where the detections are sorted by score in
descending order. Further, each numpy array element can have boolean or
float values. True positive elements have either value >0.0 or True;
any other value is considered false positive.
num_gt: number of groundtruth anotations.
k: number of top-scoring proposals to take.
Returns:
recall: recall evaluated on the top k by score detections.
"""
tp_fp_eval = []
for i in range(len(tp_fp_list)):
tp_fp_eval.append(tp_fp_list[i][0:min(k, tp_fp_list[i].shape[0])])
tp_fp_eval = np.concatenate(tp_fp_eval)
return np.sum(tp_fp_eval) / num_gt
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/metrics.py | metrics.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for helper tensorflow ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import standard_fields as fields
from object_detection.utils import shape_utils
from object_detection.utils import spatial_transform_ops as spatial_ops
from object_detection.utils import static_shape
matmul_crop_and_resize = spatial_ops.matmul_crop_and_resize
multilevel_roi_align = spatial_ops.multilevel_roi_align
native_crop_and_resize = spatial_ops.native_crop_and_resize
def expanded_shape(orig_shape, start_dim, num_dims):
"""Inserts multiple ones into a shape vector.
Inserts an all-1 vector of length num_dims at position start_dim into a shape.
Can be combined with tf.reshape to generalize tf.expand_dims.
Args:
orig_shape: the shape into which the all-1 vector is added (int32 vector)
start_dim: insertion position (int scalar)
num_dims: length of the inserted all-1 vector (int scalar)
Returns:
An int32 vector of length tf.size(orig_shape) + num_dims.
"""
with tf.name_scope('ExpandedShape'):
start_dim = tf.expand_dims(start_dim, 0) # scalar to rank-1
before = tf.slice(orig_shape, [0], start_dim)
add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32)
after = tf.slice(orig_shape, start_dim, [-1])
new_shape = tf.concat([before, add_shape, after], 0)
return new_shape
def normalized_to_image_coordinates(normalized_boxes, image_shape,
parallel_iterations=32):
"""Converts a batch of boxes from normal to image coordinates.
Args:
normalized_boxes: a tensor of shape [None, num_boxes, 4] in
normalized coordinates. The dtype of this tensor must support tf.mul.
image_shape: a tensor of shape [4] containing the image shape, with same
dtype as `normalized_boxes`.
parallel_iterations: parallelism for the map_fn op.
Returns:
absolute_boxes: a tensor of shape [None, num_boxes, 4] containing
the boxes in image coordinates, with same
dtype as `normalized_boxes`.
"""
x_scale = tf.cast(image_shape[2], normalized_boxes.dtype)
y_scale = tf.cast(image_shape[1], normalized_boxes.dtype)
def _to_absolute_coordinates(normalized_boxes):
y_min, x_min, y_max, x_max = tf.split(
value=normalized_boxes, num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxes = tf.concat([y_min, x_min, y_max, x_max], 1)
return scaled_boxes
absolute_boxes = shape_utils.static_or_dynamic_map_fn(
_to_absolute_coordinates,
elems=(normalized_boxes),
dtype=normalized_boxes.dtype,
parallel_iterations=parallel_iterations,
back_prop=True)
return absolute_boxes
def meshgrid(x, y):
"""Tiles the contents of x and y into a pair of grids.
Multidimensional analog of numpy.meshgrid, giving the same behavior if x and y
are vectors. Generally, this will give:
xgrid(i1, ..., i_m, j_1, ..., j_n) = x(j_1, ..., j_n)
ygrid(i1, ..., i_m, j_1, ..., j_n) = y(i_1, ..., i_m)
Keep in mind that the order of the arguments and outputs is reverse relative
to the order of the indices they go into, done for compatibility with numpy.
The output tensors have the same shapes. Specifically:
xgrid.get_shape() = y.get_shape().concatenate(x.get_shape())
ygrid.get_shape() = y.get_shape().concatenate(x.get_shape())
Args:
x: A tensor of arbitrary shape and rank. xgrid will contain these values
varying in its last dimensions.
y: A tensor of arbitrary shape and rank. ygrid will contain these values
varying in its first dimensions.
Returns:
A tuple of tensors (xgrid, ygrid).
"""
with tf.name_scope('Meshgrid'):
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
x_exp_shape = expanded_shape(tf.shape(x), 0, tf.rank(y))
y_exp_shape = expanded_shape(tf.shape(y), tf.rank(y), tf.rank(x))
xgrid = tf.tile(tf.reshape(x, x_exp_shape), y_exp_shape)
ygrid = tf.tile(tf.reshape(y, y_exp_shape), x_exp_shape)
new_shape = y.get_shape().concatenate(x.get_shape())
xgrid.set_shape(new_shape)
ygrid.set_shape(new_shape)
return xgrid, ygrid
def fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def pad_to_multiple(tensor, multiple):
"""Returns the tensor zero padded to the specified multiple.
Appends 0s to the end of the first and second dimension (height and width) of
the tensor until both dimensions are a multiple of the input argument
'multiple'. E.g. given an input tensor of shape [1, 3, 5, 1] and an input
multiple of 4, PadToMultiple will append 0s so that the resulting tensor will
be of shape [1, 4, 8, 1].
Args:
tensor: rank 4 float32 tensor, where
tensor -> [batch_size, height, width, channels].
multiple: the multiple to pad to.
Returns:
padded_tensor: the tensor zero padded to the specified multiple.
"""
if multiple == 1:
return tensor
tensor_shape = tensor.get_shape()
batch_size = static_shape.get_batch_size(tensor_shape)
tensor_height = static_shape.get_height(tensor_shape)
tensor_width = static_shape.get_width(tensor_shape)
tensor_depth = static_shape.get_depth(tensor_shape)
if batch_size is None:
batch_size = tf.shape(tensor)[0]
if tensor_height is None:
tensor_height = tf.shape(tensor)[1]
padded_tensor_height = tf.cast(
tf.ceil(
tf.cast(tensor_height, dtype=tf.float32) /
tf.cast(multiple, dtype=tf.float32)),
dtype=tf.int32) * multiple
else:
padded_tensor_height = int(
math.ceil(float(tensor_height) / multiple) * multiple)
if tensor_width is None:
tensor_width = tf.shape(tensor)[2]
padded_tensor_width = tf.cast(
tf.ceil(
tf.cast(tensor_width, dtype=tf.float32) /
tf.cast(multiple, dtype=tf.float32)),
dtype=tf.int32) * multiple
else:
padded_tensor_width = int(
math.ceil(float(tensor_width) / multiple) * multiple)
if tensor_depth is None:
tensor_depth = tf.shape(tensor)[3]
# Use tf.concat instead of tf.pad to preserve static shape
if padded_tensor_height != tensor_height:
height_pad = tf.zeros([
batch_size, padded_tensor_height - tensor_height, tensor_width,
tensor_depth
], dtype=tensor.dtype)
tensor = tf.concat([tensor, height_pad], 1)
if padded_tensor_width != tensor_width:
width_pad = tf.zeros([
batch_size, padded_tensor_height, padded_tensor_width - tensor_width,
tensor_depth
], dtype=tensor.dtype)
tensor = tf.concat([tensor, width_pad], 2)
return tensor
def padded_one_hot_encoding(indices, depth, left_pad):
"""Returns a zero padded one-hot tensor.
This function converts a sparse representation of indices (e.g., [4]) to a
zero padded one-hot representation (e.g., [0, 0, 0, 0, 1] with depth = 4 and
left_pad = 1). If `indices` is empty, the result will simply be a tensor of
shape (0, depth + left_pad). If depth = 0, then this function just returns
`None`.
Args:
indices: an integer tensor of shape [num_indices].
depth: depth for the one-hot tensor (integer).
left_pad: number of zeros to left pad the one-hot tensor with (integer).
Returns:
padded_onehot: a tensor with shape (num_indices, depth + left_pad). Returns
`None` if the depth is zero.
Raises:
ValueError: if `indices` does not have rank 1 or if `left_pad` or `depth are
either negative or non-integers.
TODO(rathodv): add runtime checks for depth and indices.
"""
if depth < 0 or not isinstance(depth, six.integer_types):
raise ValueError('`depth` must be a non-negative integer.')
if left_pad < 0 or not isinstance(left_pad, six.integer_types):
raise ValueError('`left_pad` must be a non-negative integer.')
if depth == 0:
return None
rank = len(indices.get_shape().as_list())
if rank != 1:
raise ValueError('`indices` must have rank 1, but has rank=%s' % rank)
def one_hot_and_pad():
one_hot = tf.cast(tf.one_hot(tf.cast(indices, tf.int64), depth,
on_value=1, off_value=0), tf.float32)
return tf.pad(one_hot, [[0, 0], [left_pad, 0]], mode='CONSTANT')
result = tf.cond(tf.greater(tf.size(indices), 0), one_hot_and_pad,
lambda: tf.zeros((tf.size(indices), depth + left_pad)))
return tf.reshape(result, [-1, depth + left_pad])
def dense_to_sparse_boxes(dense_locations, dense_num_boxes, num_classes):
"""Converts bounding boxes from dense to sparse form.
Args:
dense_locations: a [max_num_boxes, 4] tensor in which only the first k rows
are valid bounding box location coordinates, where k is the sum of
elements in dense_num_boxes.
dense_num_boxes: a [max_num_classes] tensor indicating the counts of
various bounding box classes e.g. [1, 0, 0, 2] means that the first
bounding box is of class 0 and the second and third bounding boxes are
of class 3. The sum of elements in this tensor is the number of valid
bounding boxes.
num_classes: number of classes
Returns:
box_locations: a [num_boxes, 4] tensor containing only valid bounding
boxes (i.e. the first num_boxes rows of dense_locations)
box_classes: a [num_boxes] tensor containing the classes of each bounding
box (e.g. dense_num_boxes = [1, 0, 0, 2] => box_classes = [0, 3, 3]
"""
num_valid_boxes = tf.reduce_sum(dense_num_boxes)
box_locations = tf.slice(dense_locations,
tf.constant([0, 0]), tf.stack([num_valid_boxes, 4]))
tiled_classes = [tf.tile([i], tf.expand_dims(dense_num_boxes[i], 0))
for i in range(num_classes)]
box_classes = tf.concat(tiled_classes, 0)
box_locations.set_shape([None, 4])
return box_locations, box_classes
def indices_to_dense_vector(indices,
size,
indices_value=1.,
default_value=0,
dtype=tf.float32):
"""Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
"""
size = tf.cast(size, dtype=tf.int32)
zeros = tf.ones([size], dtype=dtype) * default_value
values = tf.ones_like(indices, dtype=dtype) * indices_value
return tf.dynamic_stitch([tf.range(size), tf.cast(indices, dtype=tf.int32)],
[zeros, values])
def reduce_sum_trailing_dimensions(tensor, ndims):
"""Computes sum across all dimensions following first `ndims` dimensions."""
return tf.reduce_sum(tensor, axis=tuple(range(ndims, tensor.shape.ndims)))
def retain_groundtruth(tensor_dict, valid_indices):
"""Retains groundtruth by valid indices.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
fields.InputDataFields.groundtruth_difficult
valid_indices: a tensor with valid indices for the box-level groundtruth.
Returns:
a dictionary of tensors containing only the groundtruth for valid_indices.
Raises:
ValueError: If the shape of valid_indices is invalid.
ValueError: field fields.InputDataFields.groundtruth_boxes is
not present in tensor_dict.
"""
input_shape = valid_indices.get_shape().as_list()
if not (len(input_shape) == 1 or
(len(input_shape) == 2 and input_shape[1] == 1)):
raise ValueError('The shape of valid_indices is invalid.')
valid_indices = tf.reshape(valid_indices, [-1])
valid_dict = {}
if fields.InputDataFields.groundtruth_boxes in tensor_dict:
# Prevents reshape failure when num_boxes is 0.
num_boxes = tf.maximum(tf.shape(
tensor_dict[fields.InputDataFields.groundtruth_boxes])[0], 1)
for key in tensor_dict:
if key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_confidences,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_keypoint_visibilities,
fields.InputDataFields.groundtruth_instance_masks]:
valid_dict[key] = tf.gather(tensor_dict[key], valid_indices)
# Input decoder returns empty tensor when these fields are not provided.
# Needs to reshape into [num_boxes, -1] for tf.gather() to work.
elif key in [fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_label_types]:
valid_dict[key] = tf.reshape(
tf.gather(tf.reshape(tensor_dict[key], [num_boxes, -1]),
valid_indices), [-1])
# Fields that are not associated with boxes.
else:
valid_dict[key] = tensor_dict[key]
else:
raise ValueError('%s not present in input tensor dict.' % (
fields.InputDataFields.groundtruth_boxes))
return valid_dict
def retain_groundtruth_with_positive_classes(tensor_dict):
"""Retains only groundtruth with positive class ids.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
fields.InputDataFields.groundtruth_difficult
Returns:
a dictionary of tensors containing only the groundtruth with positive
classes.
Raises:
ValueError: If groundtruth_classes tensor is not in tensor_dict.
"""
if fields.InputDataFields.groundtruth_classes not in tensor_dict:
raise ValueError('`groundtruth classes` not in tensor_dict.')
keep_indices = tf.where(tf.greater(
tensor_dict[fields.InputDataFields.groundtruth_classes], 0))
return retain_groundtruth(tensor_dict, keep_indices)
def replace_nan_groundtruth_label_scores_with_ones(label_scores):
"""Replaces nan label scores with 1.0.
Args:
label_scores: a tensor containing object annoation label scores.
Returns:
a tensor where NaN label scores have been replaced by ones.
"""
return tf.where(
tf.is_nan(label_scores), tf.ones(tf.shape(label_scores)), label_scores)
def filter_groundtruth_with_crowd_boxes(tensor_dict):
"""Filters out groundtruth with boxes corresponding to crowd.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
is_not_crowd = tf.logical_not(is_crowd)
is_not_crowd_indices = tf.where(is_not_crowd)
tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
return tensor_dict
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
"""Filters out groundtruth with no bounding boxes.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
nan_indicator_vector = tf.greater(tf.reduce_sum(tf.cast(
tf.is_nan(groundtruth_boxes), dtype=tf.int32), reduction_indices=[1]), 0)
valid_indicator_vector = tf.logical_not(nan_indicator_vector)
valid_indices = tf.where(valid_indicator_vector)
return retain_groundtruth(tensor_dict, valid_indices)
def filter_unrecognized_classes(tensor_dict):
"""Filters out class labels that are not unrecognized by the labelmap.
Decoder would parse unrecognized classes (not included in the labelmap) to
a label of value -1. Such targets are unecessary for training, and causes
issue for evaluation, due to labeling mapping logic. This function filters
those labels out for both training and evaluation.
Args:
tensor_dict: dictionary containing input tensors keyed by
fields.InputDataFields.
Returns:
A dictionary keyed by fields.InputDataFields containing the tensors
obtained after applying the filtering.
Raises:
ValueError: If groundtruth_classes tensor is not in tensor_dict.
"""
if fields.InputDataFields.groundtruth_classes not in tensor_dict:
raise ValueError('`groundtruth classes` not in tensor_dict.')
# Refer to tf_example_decoder for how unrecognized labels are handled.
unrecognized_label = -1
recognized_indices = tf.where(
tf.greater(tensor_dict[fields.InputDataFields.groundtruth_classes],
unrecognized_label))
return retain_groundtruth(tensor_dict, recognized_indices)
def normalize_to_target(inputs,
target_norm_value,
dim,
epsilon=1e-7,
trainable=True,
scope='NormalizeToTarget',
summarize=True):
"""L2 normalizes the inputs across the specified dimension to a target norm.
This op implements the L2 Normalization layer introduced in
Liu, Wei, et al. "SSD: Single Shot MultiBox Detector."
and Liu, Wei, Andrew Rabinovich, and Alexander C. Berg.
"Parsenet: Looking wider to see better." and is useful for bringing
activations from multiple layers in a convnet to a standard scale.
Note that the rank of `inputs` must be known and the dimension to which
normalization is to be applied should be statically defined.
TODO(jonathanhuang): Add option to scale by L2 norm of the entire input.
Args:
inputs: A `Tensor` of arbitrary size.
target_norm_value: A float value that specifies an initial target norm or
a list of floats (whose length must be equal to the depth along the
dimension to be normalized) specifying a per-dimension multiplier
after normalization.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
trainable: Whether the norm is trainable or not
scope: Optional scope for variable_scope.
summarize: Whether or not to add a tensorflow summary for the op.
Returns:
The input tensor normalized to the specified target norm.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
ValueError: If target_norm_value is not a float or a list of floats with
length equal to the depth along the dimension to be normalized.
"""
with tf.variable_scope(scope, 'NormalizeToTarget', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_shape = inputs.get_shape().as_list()
input_rank = len(input_shape)
if dim < 0 or dim >= input_rank:
raise ValueError(
'dim must be non-negative but smaller than the input rank.')
if not input_shape[dim]:
raise ValueError('input shape should be statically defined along '
'the specified dimension.')
depth = input_shape[dim]
if not (isinstance(target_norm_value, float) or
(isinstance(target_norm_value, list) and
len(target_norm_value) == depth) and
all([isinstance(val, float) for val in target_norm_value])):
raise ValueError('target_norm_value must be a float or a list of floats '
'with length equal to the depth along the dimension to '
'be normalized.')
if isinstance(target_norm_value, float):
initial_norm = depth * [target_norm_value]
else:
initial_norm = target_norm_value
target_norm = slim.model_variable(
name='weights',
dtype=tf.float32,
initializer=tf.constant(initial_norm, dtype=tf.float32),
trainable=trainable)
if summarize:
mean = tf.reduce_mean(target_norm)
tf.summary.scalar(tf.get_variable_scope().name, mean)
lengths = epsilon + tf.sqrt(tf.reduce_sum(tf.square(inputs), dim, True))
mult_shape = input_rank*[1]
mult_shape[dim] = depth
return tf.reshape(target_norm, mult_shape) * tf.truediv(inputs, lengths)
def batch_position_sensitive_crop_regions(images,
boxes,
crop_size,
num_spatial_bins,
global_pool,
parallel_iterations=64):
"""Position sensitive crop with batches of images and boxes.
This op is exactly like `position_sensitive_crop_regions` below but operates
on batches of images and boxes. See `position_sensitive_crop_regions` function
below for the operation applied per batch element.
Args:
images: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`, `half`, `float32`, `float64`.
A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A `Tensor` of type `float32`.
A 3-D tensor of shape `[batch, num_boxes, 4]`. Each box is specified in
normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value
of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so
as the `[0, 1]` interval of normalized image height is mapped to
`[0, image_height - 1] in image height coordinates. We do allow y1 > y2,
in which case the sampled crop is an up-down flipped version of the
original image. The width dimension is treated similarly.
crop_size: See `position_sensitive_crop_regions` below.
num_spatial_bins: See `position_sensitive_crop_regions` below.
global_pool: See `position_sensitive_crop_regions` below.
parallel_iterations: Number of batch items to process in parallel.
Returns:
"""
def _position_sensitive_crop_fn(inputs):
images, boxes = inputs
return position_sensitive_crop_regions(
images,
boxes,
crop_size=crop_size,
num_spatial_bins=num_spatial_bins,
global_pool=global_pool)
return shape_utils.static_or_dynamic_map_fn(
_position_sensitive_crop_fn,
elems=[images, boxes],
dtype=tf.float32,
parallel_iterations=parallel_iterations)
def position_sensitive_crop_regions(image,
boxes,
crop_size,
num_spatial_bins,
global_pool):
"""Position-sensitive crop and pool rectangular regions from a feature grid.
The output crops are split into `spatial_bins_y` vertical bins
and `spatial_bins_x` horizontal bins. For each intersection of a vertical
and a horizontal bin the output values are gathered by performing
`tf.image.crop_and_resize` (bilinear resampling) on a a separate subset of
channels of the image. This reduces `depth` by a factor of
`(spatial_bins_y * spatial_bins_x)`.
When global_pool is True, this function implements a differentiable version
of position-sensitive RoI pooling used in
[R-FCN detection system](https://arxiv.org/abs/1605.06409).
When global_pool is False, this function implements a differentiable version
of position-sensitive assembling operation used in
[instance FCN](https://arxiv.org/abs/1603.08678).
Args:
image: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`, `half`, `float32`, `float64`.
A 3-D tensor of shape `[image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A `Tensor` of type `float32`.
A 2-D tensor of shape `[num_boxes, 4]`. Each box is specified in
normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value
of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so
as the `[0, 1]` interval of normalized image height is mapped to
`[0, image_height - 1] in image height coordinates. We do allow y1 > y2,
in which case the sampled crop is an up-down flipped version of the
original image. The width dimension is treated similarly.
crop_size: A list of two integers `[crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the
image content is not preserved. Both `crop_height` and `crop_width` need
to be positive.
num_spatial_bins: A list of two integers `[spatial_bins_y, spatial_bins_x]`.
Represents the number of position-sensitive bins in y and x directions.
Both values should be >= 1. `crop_height` should be divisible by
`spatial_bins_y`, and similarly for width.
The number of image channels should be divisible by
(spatial_bins_y * spatial_bins_x).
Suggested value from R-FCN paper: [3, 3].
global_pool: A boolean variable.
If True, we perform average global pooling on the features assembled from
the position-sensitive score maps.
If False, we keep the position-pooled features without global pooling
over the spatial coordinates.
Note that using global_pool=True is equivalent to but more efficient than
running the function with global_pool=False and then performing global
average pooling.
Returns:
position_sensitive_features: A 4-D tensor of shape
`[num_boxes, K, K, crop_channels]`,
where `crop_channels = depth / (spatial_bins_y * spatial_bins_x)`,
where K = 1 when global_pool is True (Average-pooled cropped regions),
and K = crop_size when global_pool is False.
Raises:
ValueError: Raised in four situations:
`num_spatial_bins` is not >= 1;
`num_spatial_bins` does not divide `crop_size`;
`(spatial_bins_y*spatial_bins_x)` does not divide `depth`;
`bin_crop_size` is not square when global_pool=False due to the
constraint in function space_to_depth.
"""
total_bins = 1
bin_crop_size = []
for (num_bins, crop_dim) in zip(num_spatial_bins, crop_size):
if num_bins < 1:
raise ValueError('num_spatial_bins should be >= 1')
if crop_dim % num_bins != 0:
raise ValueError('crop_size should be divisible by num_spatial_bins')
total_bins *= num_bins
bin_crop_size.append(crop_dim // num_bins)
if not global_pool and bin_crop_size[0] != bin_crop_size[1]:
raise ValueError('Only support square bin crop size for now.')
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)
spatial_bins_y, spatial_bins_x = num_spatial_bins
# Split each box into spatial_bins_y * spatial_bins_x bins.
position_sensitive_boxes = []
for bin_y in range(spatial_bins_y):
step_y = (ymax - ymin) / spatial_bins_y
for bin_x in range(spatial_bins_x):
step_x = (xmax - xmin) / spatial_bins_x
box_coordinates = [ymin + bin_y * step_y,
xmin + bin_x * step_x,
ymin + (bin_y + 1) * step_y,
xmin + (bin_x + 1) * step_x,
]
position_sensitive_boxes.append(tf.stack(box_coordinates, axis=1))
image_splits = tf.split(value=image, num_or_size_splits=total_bins, axis=2)
image_crops = []
for (split, box) in zip(image_splits, position_sensitive_boxes):
if split.shape.is_fully_defined() and box.shape.is_fully_defined():
crop = tf.squeeze(
matmul_crop_and_resize(
tf.expand_dims(split, axis=0), tf.expand_dims(box, axis=0),
bin_crop_size),
axis=0)
else:
crop = tf.image.crop_and_resize(
tf.expand_dims(split, 0), box,
tf.zeros(tf.shape(boxes)[0], dtype=tf.int32), bin_crop_size)
image_crops.append(crop)
if global_pool:
# Average over all bins.
position_sensitive_features = tf.add_n(image_crops) / len(image_crops)
# Then average over spatial positions within the bins.
position_sensitive_features = tf.reduce_mean(
position_sensitive_features, [1, 2], keepdims=True)
else:
# Reorder height/width to depth channel.
block_size = bin_crop_size[0]
if block_size >= 2:
image_crops = [tf.space_to_depth(
crop, block_size=block_size) for crop in image_crops]
# Pack image_crops so that first dimension is for position-senstive boxes.
position_sensitive_features = tf.stack(image_crops, axis=0)
# Unroll the position-sensitive boxes to spatial positions.
position_sensitive_features = tf.squeeze(
tf.batch_to_space_nd(position_sensitive_features,
block_shape=[1] + num_spatial_bins,
crops=tf.zeros((3, 2), dtype=tf.int32)),
axis=[0])
# Reorder back the depth channel.
if block_size >= 2:
position_sensitive_features = tf.depth_to_space(
position_sensitive_features, block_size=block_size)
return position_sensitive_features
def reframe_box_masks_to_image_masks(box_masks, boxes, image_height,
image_width, resize_method='bilinear'):
"""Transforms the box masks back to full image masks.
Embeds masks in bounding boxes of larger masks whose shapes correspond to
image shape.
Args:
box_masks: A tensor of size [num_masks, mask_height, mask_width].
boxes: A tf.float32 tensor of size [num_masks, 4] containing the box
corners. Row i contains [ymin, xmin, ymax, xmax] of the box
corresponding to mask i. Note that the box corners are in
normalized coordinates.
image_height: Image height. The output mask will have the same height as
the image height.
image_width: Image width. The output mask will have the same width as the
image width.
resize_method: The resize method, either 'bilinear' or 'nearest'. Note that
'bilinear' is only respected if box_masks is a float.
Returns:
A tensor of size [num_masks, image_height, image_width] with the same dtype
as `box_masks`.
"""
resize_method = 'nearest' if box_masks.dtype == tf.uint8 else resize_method
# TODO(rathodv): Make this a public function.
def reframe_box_masks_to_image_masks_default():
"""The default function when there are more than 0 box masks."""
def transform_boxes_relative_to_boxes(boxes, reference_boxes):
boxes = tf.reshape(boxes, [-1, 2, 2])
min_corner = tf.expand_dims(reference_boxes[:, 0:2], 1)
max_corner = tf.expand_dims(reference_boxes[:, 2:4], 1)
denom = max_corner - min_corner
# Prevent a divide by zero.
denom = tf.math.maximum(denom, 1e-4)
transformed_boxes = (boxes - min_corner) / denom
return tf.reshape(transformed_boxes, [-1, 4])
box_masks_expanded = tf.expand_dims(box_masks, axis=3)
num_boxes = tf.shape(box_masks_expanded)[0]
unit_boxes = tf.concat(
[tf.zeros([num_boxes, 2]), tf.ones([num_boxes, 2])], axis=1)
reverse_boxes = transform_boxes_relative_to_boxes(unit_boxes, boxes)
# TODO(vighneshb) Use matmul_crop_and_resize so that the output shape
# is static. This will help us run and test on TPUs.
resized_crops = tf.image.crop_and_resize(
image=box_masks_expanded,
boxes=reverse_boxes,
box_ind=tf.range(num_boxes),
crop_size=[image_height, image_width],
method=resize_method,
extrapolation_value=0)
return tf.cast(resized_crops, box_masks.dtype)
image_masks = tf.cond(
tf.shape(box_masks)[0] > 0,
reframe_box_masks_to_image_masks_default,
lambda: tf.zeros([0, image_height, image_width, 1], box_masks.dtype))
return tf.squeeze(image_masks, axis=3)
def merge_boxes_with_multiple_labels(boxes,
classes,
confidences,
num_classes,
quantization_bins=10000):
"""Merges boxes with same coordinates and returns K-hot encoded classes.
Args:
boxes: A tf.float32 tensor with shape [N, 4] holding N boxes. Only
normalized coordinates are allowed.
classes: A tf.int32 tensor with shape [N] holding class indices.
The class index starts at 0.
confidences: A tf.float32 tensor with shape [N] holding class confidences.
num_classes: total number of classes to use for K-hot encoding.
quantization_bins: the number of bins used to quantize the box coordinate.
Returns:
merged_boxes: A tf.float32 tensor with shape [N', 4] holding boxes,
where N' <= N.
class_encodings: A tf.int32 tensor with shape [N', num_classes] holding
K-hot encodings for the merged boxes.
confidence_encodings: A tf.float32 tensor with shape [N', num_classes]
holding encodings of confidences for the merged boxes.
merged_box_indices: A tf.int32 tensor with shape [N'] holding original
indices of the boxes.
"""
boxes_shape = tf.shape(boxes)
classes_shape = tf.shape(classes)
confidences_shape = tf.shape(confidences)
box_class_shape_assert = shape_utils.assert_shape_equal_along_first_dimension(
boxes_shape, classes_shape)
box_confidence_shape_assert = (
shape_utils.assert_shape_equal_along_first_dimension(
boxes_shape, confidences_shape))
box_dimension_assert = tf.assert_equal(boxes_shape[1], 4)
box_normalized_assert = shape_utils.assert_box_normalized(boxes)
with tf.control_dependencies(
[box_class_shape_assert, box_confidence_shape_assert,
box_dimension_assert, box_normalized_assert]):
quantized_boxes = tf.to_int64(boxes * (quantization_bins - 1))
ymin, xmin, ymax, xmax = tf.unstack(quantized_boxes, axis=1)
hashcodes = (
ymin +
xmin * quantization_bins +
ymax * quantization_bins * quantization_bins +
xmax * quantization_bins * quantization_bins * quantization_bins)
unique_hashcodes, unique_indices = tf.unique(hashcodes)
num_boxes = tf.shape(boxes)[0]
num_unique_boxes = tf.shape(unique_hashcodes)[0]
merged_box_indices = tf.unsorted_segment_min(
tf.range(num_boxes), unique_indices, num_unique_boxes)
merged_boxes = tf.gather(boxes, merged_box_indices)
unique_indices = tf.to_int64(unique_indices)
classes = tf.to_int64(classes)
def map_box_encodings(i):
"""Produces box K-hot and score encodings for each class index."""
box_mask = tf.equal(
unique_indices, i * tf.ones(num_boxes, dtype=tf.int64))
box_mask = tf.reshape(box_mask, [-1])
box_indices = tf.boolean_mask(classes, box_mask)
box_confidences = tf.boolean_mask(confidences, box_mask)
box_class_encodings = tf.sparse_to_dense(
box_indices, [num_classes], tf.constant(1, dtype=tf.int64),
validate_indices=False)
box_confidence_encodings = tf.sparse_to_dense(
box_indices, [num_classes], box_confidences, validate_indices=False)
return box_class_encodings, box_confidence_encodings
# Important to avoid int32 here since there is no GPU kernel for int32.
# int64 and float32 are fine.
class_encodings, confidence_encodings = tf.map_fn(
map_box_encodings,
tf.range(tf.to_int64(num_unique_boxes)),
back_prop=False,
dtype=(tf.int64, tf.float32))
merged_boxes = tf.reshape(merged_boxes, [-1, 4])
class_encodings = tf.cast(class_encodings, dtype=tf.int32)
class_encodings = tf.reshape(class_encodings, [-1, num_classes])
confidence_encodings = tf.reshape(confidence_encodings, [-1, num_classes])
merged_box_indices = tf.reshape(merged_box_indices, [-1])
return (merged_boxes, class_encodings, confidence_encodings,
merged_box_indices)
def nearest_neighbor_upsampling(input_tensor, scale=None, height_scale=None,
width_scale=None):
"""Nearest neighbor upsampling implementation.
Nearest neighbor upsampling function that maps input tensor with shape
[batch_size, height, width, channels] to [batch_size, height * scale
, width * scale, channels]. This implementation only uses reshape and
broadcasting to make it TPU compatible.
Args:
input_tensor: A float32 tensor of size [batch, height_in, width_in,
channels].
scale: An integer multiple to scale resolution of input data in both height
and width dimensions.
height_scale: An integer multiple to scale the height of input image. This
option when provided overrides `scale` option.
width_scale: An integer multiple to scale the width of input image. This
option when provided overrides `scale` option.
Returns:
data_up: A float32 tensor of size
[batch, height_in*scale, width_in*scale, channels].
Raises:
ValueError: If both scale and height_scale or if both scale and width_scale
are None.
"""
if not scale and (height_scale is None or width_scale is None):
raise ValueError('Provide either `scale` or `height_scale` and'
' `width_scale`.')
with tf.name_scope('nearest_neighbor_upsampling'):
h_scale = scale if height_scale is None else height_scale
w_scale = scale if width_scale is None else width_scale
(batch_size, height, width,
channels) = shape_utils.combined_static_and_dynamic_shape(input_tensor)
output_tensor = tf.stack([input_tensor] * w_scale, axis=3)
output_tensor = tf.stack([output_tensor] * h_scale, axis=2)
return tf.reshape(output_tensor,
[batch_size, height * h_scale, width * w_scale, channels])
def matmul_gather_on_zeroth_axis(params, indices, scope=None):
"""Matrix multiplication based implementation of tf.gather on zeroth axis.
TODO(rathodv, jonathanhuang): enable sparse matmul option.
Args:
params: A float32 Tensor. The tensor from which to gather values.
Must be at least rank 1.
indices: A Tensor. Must be one of the following types: int32, int64.
Must be in range [0, params.shape[0])
scope: A name for the operation (optional).
Returns:
A Tensor. Has the same type as params. Values from params gathered
from indices given by indices, with shape indices.shape + params.shape[1:].
"""
with tf.name_scope(scope, 'MatMulGather'):
params_shape = shape_utils.combined_static_and_dynamic_shape(params)
indices_shape = shape_utils.combined_static_and_dynamic_shape(indices)
params2d = tf.reshape(params, [params_shape[0], -1])
indicator_matrix = tf.one_hot(indices, params_shape[0])
gathered_result_flattened = tf.matmul(indicator_matrix, params2d)
return tf.reshape(gathered_result_flattened,
tf.stack(indices_shape + params_shape[1:]))
def fpn_feature_levels(num_levels, unit_scale_index, image_ratio, boxes):
"""Returns fpn feature level for each box based on its area.
See section 4.2 of https://arxiv.org/pdf/1612.03144.pdf for details.
Args:
num_levels: An integer indicating the number of feature levels to crop boxes
from.
unit_scale_index: An 0-based integer indicating the index of feature map
which most closely matches the resolution of the pretrained model.
image_ratio: A float indicating the ratio of input image area to pretraining
image area.
boxes: A float tensor of shape [batch, num_boxes, 4] containing boxes of the
form [ymin, xmin, ymax, xmax] in normalized coordinates.
Returns:
An int32 tensor of shape [batch_size, num_boxes] containing feature indices.
"""
assert num_levels > 0, (
'`num_levels` must be > 0. Found {}'.format(num_levels))
assert unit_scale_index < num_levels and unit_scale_index >= 0, (
'`unit_scale_index` must be in [0, {}). Found {}.'.format(
num_levels, unit_scale_index))
box_height_width = boxes[:, :, 2:4] - boxes[:, :, 0:2]
areas_sqrt = tf.sqrt(tf.reduce_prod(box_height_width, axis=2))
log_2 = tf.cast(tf.log(2.0), dtype=boxes.dtype)
levels = tf.cast(
tf.floordiv(tf.log(areas_sqrt * image_ratio), log_2)
+
unit_scale_index,
dtype=tf.int32)
levels = tf.maximum(0, tf.minimum(num_levels - 1, levels))
return levels
def bfloat16_to_float32_nested(input_nested):
"""Convert float32 tensors in a nested structure to bfloat16.
Args:
input_nested: A Python dict, values being Tensor or Python list/tuple of
Tensor or Non-Tensor.
Returns:
A Python dict with the same structure as `tensor_dict`,
with all bfloat16 tensors converted to float32.
"""
if isinstance(input_nested, tf.Tensor):
if input_nested.dtype == tf.bfloat16:
return tf.cast(input_nested, dtype=tf.float32)
else:
return input_nested
elif isinstance(input_nested, (list, tuple)):
out_tensor_dict = [bfloat16_to_float32_nested(t) for t in input_nested]
elif isinstance(input_nested, dict):
out_tensor_dict = {
k: bfloat16_to_float32_nested(v) for k, v in input_nested.items()
}
else:
return input_nested
return out_tensor_dict
def gather_with_padding_values(input_tensor, indices, padding_value):
"""Gathers elements from tensor and pads `padding_value` for ignore indices.
Gathers elements from `input_tensor` based on `indices`. If there are ignore
indices (which are "-1"s) in `indices`, `padding_value` will be gathered for
those positions.
Args:
input_tensor: A N-D tensor of shape [M, d_1, d_2 .. d_(N-1)] to gather
values from.
indices: A 1-D tensor in which each element is either an index in the
first dimension of input_tensor or -1.
padding_value: A (N-1)-D tensor of shape [d_1, d_2 .. d_(N-1)] which will be
used as gathered value for each ignore index in `indices`.
Returns:
gathered_tensor: A tensor of shape [L, d_1, d_2 .. d_(N-1)] containing
values gathered from input_tensor. The first dimension L is equal to the
length of `indices`.
"""
padding_value = tf.expand_dims(padding_value, axis=0)
input_tensor = tf.concat([padding_value, input_tensor], axis=0)
gather_indices = indices + 1
gathered_tensor = tf.gather(input_tensor, gather_indices)
return gathered_tensor
EqualizationLossConfig = collections.namedtuple('EqualizationLossConfig',
['weight', 'exclude_prefixes'])
def tile_context_tensors(tensor_dict):
"""Tiles context fields to have num_frames along 0-th dimension."""
num_frames = tf.shape(tensor_dict[fields.InputDataFields.image])[0]
for key in tensor_dict:
if key not in fields.SEQUENCE_FIELDS:
original_tensor = tensor_dict[key]
tensor_shape = shape_utils.combined_static_and_dynamic_shape(
original_tensor)
tensor_dict[key] = tf.tile(
tf.expand_dims(original_tensor, 0),
tf.stack([num_frames] + [1] * len(tensor_shape), axis=0))
return tensor_dict
def decode_image(tensor_dict):
"""Decodes images in a tensor dict."""
tensor_dict[fields.InputDataFields.image] = tf.io.decode_image(
tensor_dict[fields.InputDataFields.image], channels=3)
tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3])
return tensor_dict
def giou(boxes1, boxes2):
"""Computes generalized IOU between two tensors.
Each box should be represented as [ymin, xmin, ymax, xmax].
Args:
boxes1: a tensor with shape [num_boxes, 4]
boxes2: a tensor with shape [num_boxes, 4]
Returns:
a tensor of shape [num_boxes] containing GIoUs
"""
pred_ymin, pred_xmin, pred_ymax, pred_xmax = tf.unstack(boxes1, axis=1)
gt_ymin, gt_xmin, gt_ymax, gt_xmax = tf.unstack(boxes2, axis=1)
gt_area = (gt_ymax - gt_ymin) * (gt_xmax - gt_xmin)
pred_area = (pred_ymax - pred_ymin) * (pred_xmax - pred_xmin)
x1_i = tf.maximum(pred_xmin, gt_xmin)
x2_i = tf.minimum(pred_xmax, gt_xmax)
y1_i = tf.maximum(pred_ymin, gt_ymin)
y2_i = tf.minimum(pred_ymax, gt_ymax)
intersection_area = tf.maximum(0.0, y2_i - y1_i) * tf.maximum(0.0,
x2_i - x1_i)
x1_c = tf.minimum(pred_xmin, gt_xmin)
x2_c = tf.maximum(pred_xmax, gt_xmax)
y1_c = tf.minimum(pred_ymin, gt_ymin)
y2_c = tf.maximum(pred_ymax, gt_ymax)
hull_area = (y2_c - y1_c) * (x2_c - x1_c)
union_area = gt_area + pred_area - intersection_area
iou = tf.where(tf.equal(union_area, 0.0),
tf.zeros_like(union_area), intersection_area / union_area)
giou_ = iou - tf.where(hull_area > 0.0,
(hull_area - union_area) / hull_area, iou)
return giou_
def center_to_corner_coordinate(input_tensor):
"""Converts input boxes from center to corner representation."""
reshaped_encodings = tf.reshape(input_tensor, [-1, 4])
ycenter = tf.gather(reshaped_encodings, [0], axis=1)
xcenter = tf.gather(reshaped_encodings, [1], axis=1)
h = tf.gather(reshaped_encodings, [2], axis=1)
w = tf.gather(reshaped_encodings, [3], axis=1)
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return tf.squeeze(tf.stack([ymin, xmin, ymax, xmax], axis=1))
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/ops.py | ops.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions used by target assigner."""
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
def image_shape_to_grids(height, width):
"""Computes xy-grids given the shape of the image.
Args:
height: The height of the image.
width: The width of the image.
Returns:
A tuple of two tensors:
y_grid: A float tensor with shape [height, width] representing the
y-coordinate of each pixel grid.
x_grid: A float tensor with shape [height, width] representing the
x-coordinate of each pixel grid.
"""
out_height = tf.cast(height, tf.float32)
out_width = tf.cast(width, tf.float32)
x_range = tf.range(out_width, dtype=tf.float32)
y_range = tf.range(out_height, dtype=tf.float32)
x_grid, y_grid = tf.meshgrid(x_range, y_range, indexing='xy')
return (y_grid, x_grid)
def _coordinates_to_heatmap_dense(y_grid, x_grid, y_coordinates, x_coordinates,
sigma, channel_onehot, channel_weights=None):
"""Dense version of coordinates to heatmap that uses an outer product."""
num_instances, num_channels = (
shape_utils.combined_static_and_dynamic_shape(channel_onehot))
x_grid = tf.expand_dims(x_grid, 2)
y_grid = tf.expand_dims(y_grid, 2)
# The raw center coordinates in the output space.
x_diff = x_grid - tf.math.floor(x_coordinates)
y_diff = y_grid - tf.math.floor(y_coordinates)
squared_distance = x_diff**2 + y_diff**2
gaussian_map = tf.exp(-squared_distance / (2 * sigma * sigma))
reshaped_gaussian_map = tf.expand_dims(gaussian_map, axis=-1)
reshaped_channel_onehot = tf.reshape(channel_onehot,
(1, 1, num_instances, num_channels))
gaussian_per_box_per_class_map = (
reshaped_gaussian_map * reshaped_channel_onehot)
if channel_weights is not None:
reshaped_weights = tf.reshape(channel_weights, (1, 1, num_instances, 1))
gaussian_per_box_per_class_map *= reshaped_weights
# Take maximum along the "instance" dimension so that all per-instance
# heatmaps of the same class are merged together.
heatmap = tf.reduce_max(gaussian_per_box_per_class_map, axis=2)
# Maximum of an empty tensor is -inf, the following is to avoid that.
heatmap = tf.maximum(heatmap, 0)
return tf.stop_gradient(heatmap)
def _coordinates_to_heatmap_sparse(y_grid, x_grid, y_coordinates, x_coordinates,
sigma, channel_onehot, channel_weights=None):
"""Sparse version of coordinates to heatmap using tf.scatter."""
if not hasattr(tf, 'tensor_scatter_nd_max'):
raise RuntimeError(
('Please upgrade tensowflow to use `tensor_scatter_nd_max` or set '
'compute_heatmap_sparse=False'))
_, num_channels = (
shape_utils.combined_static_and_dynamic_shape(channel_onehot))
height, width = shape_utils.combined_static_and_dynamic_shape(y_grid)
x_grid = tf.expand_dims(x_grid, 2)
y_grid = tf.expand_dims(y_grid, 2)
# The raw center coordinates in the output space.
x_diff = x_grid - tf.math.floor(x_coordinates)
y_diff = y_grid - tf.math.floor(y_coordinates)
squared_distance = x_diff**2 + y_diff**2
gaussian_map = tf.exp(-squared_distance / (2 * sigma * sigma))
if channel_weights is not None:
gaussian_map = gaussian_map * channel_weights[tf.newaxis, tf.newaxis, :]
channel_indices = tf.argmax(channel_onehot, axis=1)
channel_indices = channel_indices[:, tf.newaxis]
heatmap_init = tf.zeros((num_channels, height, width))
gaussian_map = tf.transpose(gaussian_map, (2, 0, 1))
heatmap = tf.tensor_scatter_nd_max(
heatmap_init, channel_indices, gaussian_map)
# Maximum of an empty tensor is -inf, the following is to avoid that.
heatmap = tf.maximum(heatmap, 0)
return tf.stop_gradient(tf.transpose(heatmap, (1, 2, 0)))
def coordinates_to_heatmap(y_grid,
x_grid,
y_coordinates,
x_coordinates,
sigma,
channel_onehot,
channel_weights=None,
sparse=False):
"""Returns the heatmap targets from a set of point coordinates.
This function maps a set of point coordinates to the output heatmap image
applied using a Gaussian kernel. Note that this function be can used by both
object detection and keypoint estimation tasks. For object detection, the
"channel" refers to the object class. For keypoint estimation, the "channel"
refers to the number of keypoint types.
Args:
y_grid: A 2D tensor with shape [height, width] which contains the grid
y-coordinates given in the (output) image dimensions.
x_grid: A 2D tensor with shape [height, width] which contains the grid
x-coordinates given in the (output) image dimensions.
y_coordinates: A 1D tensor with shape [num_instances] representing the
y-coordinates of the instances in the output space coordinates.
x_coordinates: A 1D tensor with shape [num_instances] representing the
x-coordinates of the instances in the output space coordinates.
sigma: A 1D tensor with shape [num_instances] representing the standard
deviation of the Gaussian kernel to be applied to the point.
channel_onehot: A 2D tensor with shape [num_instances, num_channels]
representing the one-hot encoded channel labels for each point.
channel_weights: A 1D tensor with shape [num_instances] corresponding to the
weight of each instance.
sparse: bool, indicating whether or not to use the sparse implementation
of the function. The sparse version scales better with number of channels,
but in some cases is known to cause OOM error. See (b/170989061).
Returns:
heatmap: A tensor of size [height, width, num_channels] representing the
heatmap. Output (height, width) match the dimensions of the input grids.
"""
if sparse:
return _coordinates_to_heatmap_sparse(
y_grid, x_grid, y_coordinates, x_coordinates, sigma, channel_onehot,
channel_weights)
else:
return _coordinates_to_heatmap_dense(
y_grid, x_grid, y_coordinates, x_coordinates, sigma, channel_onehot,
channel_weights)
def compute_floor_offsets_with_indices(y_source,
x_source,
y_target=None,
x_target=None):
"""Computes offsets from floored source(floored) to target coordinates.
This function computes the offsets from source coordinates ("floored" as if
they were put on the grids) to target coordinates. Note that the input
coordinates should be the "absolute" coordinates in terms of the output image
dimensions as opposed to the normalized coordinates (i.e. values in [0, 1]).
If the input y and x source have the second dimension (representing the
neighboring pixels), then the offsets are computed from each of the
neighboring pixels to their corresponding target (first dimension).
Args:
y_source: A tensor with shape [num_points] (or [num_points, num_neighbors])
representing the absolute y-coordinates (in the output image space) of the
source points.
x_source: A tensor with shape [num_points] (or [num_points, num_neighbors])
representing the absolute x-coordinates (in the output image space) of the
source points.
y_target: A tensor with shape [num_points] representing the absolute
y-coordinates (in the output image space) of the target points. If not
provided, then y_source is used as the targets.
x_target: A tensor with shape [num_points] representing the absolute
x-coordinates (in the output image space) of the target points. If not
provided, then x_source is used as the targets.
Returns:
A tuple of two tensors:
offsets: A tensor with shape [num_points, 2] (or
[num_points, num_neighbors, 2]) representing the offsets of each input
point.
indices: A tensor with shape [num_points, 2] (or
[num_points, num_neighbors, 2]) representing the indices of where the
offsets should be retrieved in the output image dimension space.
Raise:
ValueError: source and target shapes have unexpected values.
"""
y_source_floored = tf.floor(y_source)
x_source_floored = tf.floor(x_source)
source_shape = shape_utils.combined_static_and_dynamic_shape(y_source)
if y_target is None and x_target is None:
y_target = y_source
x_target = x_source
else:
target_shape = shape_utils.combined_static_and_dynamic_shape(y_target)
if len(source_shape) == 2 and len(target_shape) == 1:
_, num_neighbors = source_shape
y_target = tf.tile(
tf.expand_dims(y_target, -1), multiples=[1, num_neighbors])
x_target = tf.tile(
tf.expand_dims(x_target, -1), multiples=[1, num_neighbors])
elif source_shape != target_shape:
raise ValueError('Inconsistent source and target shape.')
y_offset = y_target - y_source_floored
x_offset = x_target - x_source_floored
y_source_indices = tf.cast(y_source_floored, tf.int32)
x_source_indices = tf.cast(x_source_floored, tf.int32)
indices = tf.stack([y_source_indices, x_source_indices], axis=-1)
offsets = tf.stack([y_offset, x_offset], axis=-1)
return offsets, indices
def get_valid_keypoint_mask_for_class(keypoint_coordinates,
class_id,
class_onehot,
class_weights=None,
keypoint_indices=None):
"""Mask keypoints by their class ids and indices.
For a given task, we may want to only consider a subset of instances or
keypoints. This function is used to provide the mask (in terms of weights) to
mark those elements which should be considered based on the classes of the
instances and optionally, their keypoint indices. Note that the NaN values
in the keypoints will also be masked out.
Args:
keypoint_coordinates: A float tensor with shape [num_instances,
num_keypoints, 2] which contains the coordinates of each keypoint.
class_id: An integer representing the target class id to be selected.
class_onehot: A 2D tensor of shape [num_instances, num_classes] repesents
the onehot (or k-hot) encoding of the class for each instance.
class_weights: A 1D tensor of shape [num_instances] repesents the weight of
each instance. If not provided, all instances are weighted equally.
keypoint_indices: A list of integers representing the keypoint indices used
to select the values on the keypoint dimension. If provided, the output
dimension will be [num_instances, len(keypoint_indices)]
Returns:
A tuple of tensors:
mask: A float tensor of shape [num_instances, K], where K is num_keypoints
or len(keypoint_indices) if provided. The tensor has values either 0 or
1 indicating whether an element in the input keypoints should be used.
keypoints_nan_to_zeros: Same as input keypoints with the NaN values
replaced by zeros and selected columns corresponding to the
keypoint_indices (if provided). The shape of this tensor will always be
the same as the output mask.
"""
num_keypoints = tf.shape(keypoint_coordinates)[1]
class_mask = class_onehot[:, class_id]
reshaped_class_mask = tf.tile(
tf.expand_dims(class_mask, axis=-1), multiples=[1, num_keypoints])
not_nan = tf.math.logical_not(tf.math.is_nan(keypoint_coordinates))
mask = reshaped_class_mask * tf.cast(not_nan[:, :, 0], dtype=tf.float32)
keypoints_nan_to_zeros = tf.where(not_nan, keypoint_coordinates,
tf.zeros_like(keypoint_coordinates))
if class_weights is not None:
reshaped_class_weight = tf.tile(
tf.expand_dims(class_weights, axis=-1), multiples=[1, num_keypoints])
mask = mask * reshaped_class_weight
if keypoint_indices is not None:
mask = tf.gather(mask, indices=keypoint_indices, axis=1)
keypoints_nan_to_zeros = tf.gather(
keypoints_nan_to_zeros, indices=keypoint_indices, axis=1)
return mask, keypoints_nan_to_zeros
def blackout_pixel_weights_by_box_regions(height, width, boxes, blackout,
weights=None):
"""Apply weights at pixel locations.
This function is used to generate the pixel weight mask (usually in the output
image dimension). The mask is to ignore some regions when computing loss.
Weights are applied as follows:
- Any region outside of a box gets the default weight 1.0
- Any box for which an explicit weight is specifed gets that weight. If
multiple boxes overlap, the maximum of the weights is applied.
- Any box for which blackout=True is specified will get a weight of 0.0,
regardless of whether an equivalent non-zero weight is specified. Also, the
blackout region takes precedence over other boxes which may overlap with
non-zero weight.
Example:
height = 4
width = 4
boxes = [[0., 0., 2., 2.],
[0., 0., 4., 2.],
[3., 0., 4., 4.]]
blackout = [False, False, True]
weights = [4.0, 3.0, 2.0]
blackout_pixel_weights_by_box_regions(height, width, boxes, blackout,
weights)
>> [[4.0, 4.0, 1.0, 1.0],
[4.0, 4.0, 1.0, 1.0],
[3.0, 3.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]
Args:
height: int, height of the (output) image.
width: int, width of the (output) image.
boxes: A float tensor with shape [num_instances, 4] indicating the
coordinates of the four corners of the boxes.
blackout: A boolean tensor with shape [num_instances] indicating whether to
blackout (zero-out) the weights within the box regions.
weights: An optional float32 tensor with shape [num_instances] indicating
a value to apply in each box region. Note that if blackout=True for a
given box, the weight will be zero. If None, all weights are assumed to be
1.
Returns:
A float tensor with shape [height, width] where all values within the
regions of the blackout boxes are 0.0 and 1.0 (or weights if supplied)
elsewhere.
"""
num_instances, _ = shape_utils.combined_static_and_dynamic_shape(boxes)
# If no annotation instance is provided, return all ones (instead of
# unexpected values) to avoid NaN loss value.
if num_instances == 0:
return tf.ones([height, width], dtype=tf.float32)
(y_grid, x_grid) = image_shape_to_grids(height, width)
y_grid = tf.expand_dims(y_grid, axis=0)
x_grid = tf.expand_dims(x_grid, axis=0)
y_min = tf.expand_dims(boxes[:, 0:1], axis=-1)
x_min = tf.expand_dims(boxes[:, 1:2], axis=-1)
y_max = tf.expand_dims(boxes[:, 2:3], axis=-1)
x_max = tf.expand_dims(boxes[:, 3:], axis=-1)
# Make the mask with all 1.0 in the box regions.
# Shape: [num_instances, height, width]
in_boxes = tf.math.logical_and(
tf.math.logical_and(y_grid >= y_min, y_grid < y_max),
tf.math.logical_and(x_grid >= x_min, x_grid < x_max))
if weights is None:
weights = tf.ones_like(blackout, dtype=tf.float32)
# Compute a [height, width] tensor with the maximum weight in each box, and
# 0.0 elsewhere.
weights_tiled = tf.tile(
weights[:, tf.newaxis, tf.newaxis], [1, height, width])
weights_3d = tf.where(in_boxes, weights_tiled,
tf.zeros_like(weights_tiled))
weights_2d = tf.math.maximum(
tf.math.reduce_max(weights_3d, axis=0), 0.0)
# Add 1.0 to all regions outside a box.
weights_2d = tf.where(
tf.math.reduce_any(in_boxes, axis=0),
weights_2d,
tf.ones_like(weights_2d))
# Now enforce that blackout regions all have zero weights.
keep_region = tf.cast(tf.math.logical_not(blackout), tf.float32)
keep_region_tiled = tf.tile(
keep_region[:, tf.newaxis, tf.newaxis], [1, height, width])
keep_region_3d = tf.where(in_boxes, keep_region_tiled,
tf.ones_like(keep_region_tiled))
keep_region_2d = tf.math.reduce_min(keep_region_3d, axis=0)
return weights_2d * keep_region_2d
def _get_yx_indices_offset_by_radius(radius):
"""Gets the y and x index offsets that are within the radius."""
y_offsets = []
x_offsets = []
for y_offset in range(-radius, radius + 1, 1):
for x_offset in range(-radius, radius + 1, 1):
if x_offset ** 2 + y_offset ** 2 <= radius ** 2:
y_offsets.append(y_offset)
x_offsets.append(x_offset)
return (tf.constant(y_offsets, dtype=tf.float32),
tf.constant(x_offsets, dtype=tf.float32))
def get_surrounding_grids(height, width, y_coordinates, x_coordinates, radius):
"""Gets the indices of the surrounding pixels of the input y, x coordinates.
This function returns the pixel indices corresponding to the (floor of the)
input coordinates and their surrounding pixels within the radius. If the
radius is set to 0, then only the pixels that correspond to the floor of the
coordinates will be returned. If the radius is larger than 0, then all of the
pixels within the radius of the "floor pixels" will also be returned. For
example, if the input coorindate is [2.1, 3.5] and radius is 1, then the five
pixel indices will be returned: [2, 3], [1, 3], [2, 2], [2, 4], [3, 3]. Also,
if the surrounding pixels are outside of valid image region, then the returned
pixel indices will be [0, 0] and its corresponding "valid" value will be
False.
Args:
height: int, the height of the output image.
width: int, the width of the output image.
y_coordinates: A tensor with shape [num_points] representing the absolute
y-coordinates (in the output image space) of the points.
x_coordinates: A tensor with shape [num_points] representing the absolute
x-coordinates (in the output image space) of the points.
radius: int, the radius of the neighboring pixels to be considered and
returned. If set to 0, then only the pixel indices corresponding to the
floor of the input coordinates will be returned.
Returns:
A tuple of three tensors:
y_indices: A [num_points, num_neighbors] float tensor representing the
pixel y indices corresponding to the input points within radius. The
"num_neighbors" is determined by the size of the radius.
x_indices: A [num_points, num_neighbors] float tensor representing the
pixel x indices corresponding to the input points within radius. The
"num_neighbors" is determined by the size of the radius.
valid: A [num_points, num_neighbors] boolean tensor representing whether
each returned index is in valid image region or not.
"""
# Floored y, x: [num_points, 1].
y_center = tf.expand_dims(tf.math.floor(y_coordinates), axis=-1)
x_center = tf.expand_dims(tf.math.floor(x_coordinates), axis=-1)
y_offsets, x_offsets = _get_yx_indices_offset_by_radius(radius)
# Indices offsets: [1, num_neighbors].
y_offsets = tf.expand_dims(y_offsets, axis=0)
x_offsets = tf.expand_dims(x_offsets, axis=0)
# Floor + offsets: [num_points, num_neighbors].
y_output = y_center + y_offsets
x_output = x_center + x_offsets
default_output = tf.zeros_like(y_output)
valid = tf.logical_and(
tf.logical_and(x_output >= 0, x_output < width),
tf.logical_and(y_output >= 0, y_output < height))
y_output = tf.where(valid, y_output, default_output)
x_output = tf.where(valid, x_output, default_output)
return (y_output, x_output, valid)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/target_assigner_utils.py | target_assigner_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy BoxMaskList classes and functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from object_detection.utils import np_box_list
class BoxMaskList(np_box_list.BoxList):
"""Convenience wrapper for BoxList with masks.
BoxMaskList extends the np_box_list.BoxList to contain masks as well.
In particular, its constructor receives both boxes and masks. Note that the
masks correspond to the full image.
"""
def __init__(self, box_data, mask_data):
"""Constructs box collection.
Args:
box_data: a numpy array of shape [N, 4] representing box coordinates
mask_data: a numpy array of shape [N, height, width] representing masks
with values are in {0,1}. The masks correspond to the full
image. The height and the width will be equal to image height and width.
Raises:
ValueError: if bbox data is not a numpy array
ValueError: if invalid dimensions for bbox data
ValueError: if mask data is not a numpy array
ValueError: if invalid dimension for mask data
"""
super(BoxMaskList, self).__init__(box_data)
if not isinstance(mask_data, np.ndarray):
raise ValueError('Mask data must be a numpy array.')
if len(mask_data.shape) != 3:
raise ValueError('Invalid dimensions for mask data.')
if mask_data.dtype != np.uint8:
raise ValueError('Invalid data type for mask data: uint8 is required.')
if mask_data.shape[0] != box_data.shape[0]:
raise ValueError('There should be the same number of boxes and masks.')
self.data['masks'] = mask_data
def get_masks(self):
"""Convenience function for accessing masks.
Returns:
a numpy array of shape [N, height, width] representing masks
"""
return self.get_field('masks')
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/np_box_mask_list.py | np_box_mask_list.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.np_box_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_box_ops
class BoxOpsTests(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=float)
boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxes1 = boxes1
self.boxes2 = boxes2
def testArea(self):
areas = np_box_ops.area(self.boxes1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def testIntersection(self):
intersection = np_box_ops.intersection(self.boxes1, self.boxes2)
expected_intersection = np.array([[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]],
dtype=float)
self.assertAllClose(intersection, expected_intersection)
def testIOU(self):
iou = np_box_ops.iou(self.boxes1, self.boxes2)
expected_iou = np.array([[2.0 / 16.0, 0.0, 6.0 / 400.0],
[1.0 / 16.0, 0.0, 5.0 / 400.0]],
dtype=float)
self.assertAllClose(iou, expected_iou)
def testIOA(self):
boxes1 = np.array([[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75]],
dtype=np.float32)
boxes2 = np.array([[0.5, 0.25, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]],
dtype=np.float32)
ioa21 = np_box_ops.ioa(boxes2, boxes1)
expected_ioa21 = np.array([[0.5, 0.0],
[1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/np_box_ops_test.py | np_box_ops_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.