code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.third_party.tensorflow_models.object_detection.utils.test_case."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import test_case
class TestCaseTest(test_case.TestCase):
def test_simple(self):
def graph_fn(tensora, tensorb):
return tf.tensordot(tensora, tensorb, axes=1)
tensora_np = np.ones(20)
tensorb_np = tensora_np * 2
output = self.execute(graph_fn, [tensora_np, tensorb_np])
self.assertAllClose(output, 40.0)
def test_two_outputs(self):
def graph_fn(tensora, tensorb):
return tensora + tensorb, tensora - tensorb
tensora_np = np.ones(20)
tensorb_np = tensora_np * 2
output = self.execute(graph_fn, [tensora_np, tensorb_np])
self.assertAllClose(output[0], tensora_np + tensorb_np)
self.assertAllClose(output[1], tensora_np - tensorb_np)
def test_function_with_tf_assert(self):
def compute_fn(image):
return tf.image.pad_to_bounding_box(image, 0, 0, 40, 40)
image_np = np.random.rand(2, 20, 30, 3)
output = self.execute(compute_fn, [image_np])
self.assertAllEqual(output.shape, [2, 40, 40, 3])
def test_tf2_only_test(self):
"""Set up tests only to run with TF2."""
if self.is_tf2():
def graph_fn(tensora, tensorb):
return tensora + tensorb, tensora - tensorb
tensora_np = np.ones(20)
tensorb_np = tensora_np * 2
output = self.execute_tf2(graph_fn, [tensora_np, tensorb_np])
self.assertAllClose(output[0], tensora_np + tensorb_np)
self.assertAllClose(output[1], tensora_np - tensorb_np)
def test_tpu_only_test(self):
"""Set up tests only to run with TPU."""
if self.has_tpu():
def graph_fn(tensora, tensorb):
return tensora + tensorb, tensora - tensorb
tensora_np = np.ones(20)
tensorb_np = tensora_np * 2
output = self.execute_tpu(graph_fn, [tensora_np, tensorb_np])
self.assertAllClose(output[0], tensora_np + tensorb_np)
self.assertAllClose(output[1], tensora_np - tensorb_np)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/test_case_test.py | test_case_test.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to manipulate feature map pyramids, such as for FPNs and BiFPNs.
Includes utility functions to facilitate feature pyramid map manipulations,
such as combining multiple feature maps, upsampling or downsampling feature
maps, and applying blocks of convolution, batchnorm, and activation layers.
"""
from six.moves import range
import tensorflow as tf
from object_detection.utils import ops
from object_detection.utils import shape_utils
def create_conv_block(name, num_filters, kernel_size, strides, padding,
use_separable, apply_batchnorm, apply_activation,
conv_hyperparams, is_training, freeze_batchnorm,
conv_bn_act_pattern=True):
"""Create Keras layers for regular or separable convolutions.
Args:
name: String. The name of the layer.
num_filters: Number of filters (channels) for the output feature maps.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
filters, or a single int if both values are the same.
strides: A list of length 2: [stride_height, stride_width], specifying the
convolution stride, or a single int if both strides are the same.
padding: One of 'VALID' or 'SAME'.
use_separable: Bool. Whether to use depthwise separable convolution instead
of regular convolution.
apply_batchnorm: Bool. Whether to apply a batch normalization layer after
convolution, constructed according to the conv_hyperparams.
apply_activation: Bool. Whether to apply an activation layer after
convolution, constructed according to the conv_hyperparams.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Bool. Whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
conv_bn_act_pattern: Bool. By default, when True, the layers returned by
this function are in the order [conv, batchnorm, activation]. Otherwise,
when False, the order of the layers is [activation, conv, batchnorm].
Returns:
A list of keras layers, including (regular or seperable) convolution, and
optionally batch normalization and activation layers.
"""
layers = []
if use_separable:
kwargs = conv_hyperparams.params()
# Both the regularizer and initializer apply to the depthwise layer,
# so we remap the kernel_* to depthwise_* here.
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
# TODO(aom): Verify that the pointwise regularizer/initializer should be set
# here, since this is not the case in feature_map_generators.py
kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['pointwise_initializer'] = kwargs['kernel_initializer']
layers.append(
tf.keras.layers.SeparableConv2D(
filters=num_filters,
kernel_size=kernel_size,
depth_multiplier=1,
padding=padding,
strides=strides,
name=name + 'separable_conv',
**kwargs))
else:
layers.append(
tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=kernel_size,
padding=padding,
strides=strides,
name=name + 'conv',
**conv_hyperparams.params()))
if apply_batchnorm:
layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=name + 'batchnorm'))
if apply_activation:
activation_layer = conv_hyperparams.build_activation_layer(
name=name + 'activation')
if conv_bn_act_pattern:
layers.append(activation_layer)
else:
layers = [activation_layer] + layers
return layers
def create_downsample_feature_map_ops(scale, downsample_method,
conv_hyperparams, is_training,
freeze_batchnorm, name):
"""Creates Keras layers for downsampling feature maps.
Args:
scale: Int. The scale factor by which to downsample input feature maps. For
example, in the case of a typical feature map pyramid, the scale factor
between level_i and level_i+1 is 2.
downsample_method: String. The method used for downsampling. Currently
supported methods include 'max_pooling', 'avg_pooling', and
'depthwise_conv'.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Bool. Whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: String. The name used to prefix the constructed layers.
Returns:
A list of Keras layers which will downsample input feature maps by the
desired scale factor.
"""
layers = []
padding = 'SAME'
stride = int(scale)
kernel_size = stride + 1
if downsample_method == 'max_pooling':
layers.append(
tf.keras.layers.MaxPooling2D(
pool_size=kernel_size,
strides=stride,
padding=padding,
name=name + 'downsample_max_x{}'.format(stride)))
elif downsample_method == 'avg_pooling':
layers.append(
tf.keras.layers.AveragePooling2D(
pool_size=kernel_size,
strides=stride,
padding=padding,
name=name + 'downsample_avg_x{}'.format(stride)))
elif downsample_method == 'depthwise_conv':
layers.append(
tf.keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=stride,
padding=padding,
name=name + 'downsample_depthwise_x{}'.format(stride)))
layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=name + 'downsample_batchnorm'))
layers.append(
conv_hyperparams.build_activation_layer(name=name +
'downsample_activation'))
else:
raise ValueError('Unknown downsample method: {}'.format(downsample_method))
return layers
def create_upsample_feature_map_ops(scale, use_native_resize_op, name):
"""Creates Keras layers for upsampling feature maps.
Args:
scale: Int. The scale factor by which to upsample input feature maps. For
example, in the case of a typical feature map pyramid, the scale factor
between level_i and level_i-1 is 2.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
the upsampling process instead of reshape and broadcasting implementation.
name: String. The name used to prefix the constructed layers.
Returns:
A list of Keras layers which will upsample input feature maps by the
desired scale factor.
"""
layers = []
if use_native_resize_op:
def resize_nearest_neighbor(image):
image_shape = shape_utils.combined_static_and_dynamic_shape(image)
return tf.image.resize_nearest_neighbor(
image, [image_shape[1] * scale, image_shape[2] * scale])
layers.append(
tf.keras.layers.Lambda(
resize_nearest_neighbor,
name=name + 'nearest_neighbor_upsampling_x{}'.format(scale)))
else:
def nearest_neighbor_upsampling(image):
return ops.nearest_neighbor_upsampling(image, scale=scale)
layers.append(
tf.keras.layers.Lambda(
nearest_neighbor_upsampling,
name=name + 'nearest_neighbor_upsampling_x{}'.format(scale)))
return layers
def create_resample_feature_map_ops(input_scale_factor, output_scale_factor,
downsample_method, use_native_resize_op,
conv_hyperparams, is_training,
freeze_batchnorm, name):
"""Creates Keras layers for downsampling or upsampling feature maps.
Args:
input_scale_factor: Int. Scale factor of the input feature map. For example,
for a feature pyramid where each successive level halves its spatial
resolution, the scale factor of a level is 2^level. The input and output
scale factors are used to compute the scale for upsampling or downsamling,
so they should be evenly divisible.
output_scale_factor: Int. Scale factor of the output feature map. See
input_scale_factor for additional details.
downsample_method: String. The method used for downsampling. See
create_downsample_feature_map_ops for details on supported methods.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
the upsampling process instead of reshape and broadcasting implementation.
See create_upsample_feature_map_ops for details.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Bool. Whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: String. The name used to prefix the constructed layers.
Returns:
A list of Keras layers which will downsample or upsample input feature maps
to match the desired output feature map scale.
"""
if input_scale_factor < output_scale_factor:
if output_scale_factor % input_scale_factor != 0:
raise ValueError('Invalid scale factor: input scale 1/{} not divisible by'
'output scale 1/{}'.format(input_scale_factor,
output_scale_factor))
scale = output_scale_factor // input_scale_factor
return create_downsample_feature_map_ops(scale, downsample_method,
conv_hyperparams, is_training,
freeze_batchnorm, name)
elif input_scale_factor > output_scale_factor:
if input_scale_factor % output_scale_factor != 0:
raise ValueError('Invalid scale factor: input scale 1/{} not a divisor of'
'output scale 1/{}'.format(input_scale_factor,
output_scale_factor))
scale = input_scale_factor // output_scale_factor
return create_upsample_feature_map_ops(scale, use_native_resize_op, name)
else:
return []
# TODO(aom): Add tests for this module in a followup CL.
class BiFPNCombineLayer(tf.keras.layers.Layer):
"""Combines multiple input feature maps into a single output feature map.
A Keras layer which combines multiple input feature maps into a single output
feature map, according to the desired combination method. Options for
combining feature maps include simple summation, or several types of weighted
sums using learned weights for each input feature map. These include
'weighted_sum', 'attention', and 'fast_attention'. For more details, see the
EfficientDet paper by Tan et al, see arxiv.org/abs/1911.09070.
Specifically, this layer takes a list of tensors as input, all of the same
shape, and returns a single tensor, also of the same shape.
"""
def __init__(self, combine_method, **kwargs):
"""Constructor.
Args:
combine_method: String. The method used to combine the input feature maps
into a single output feature map. One of 'sum', 'weighted_sum',
'attention', or 'fast_attention'.
**kwargs: Additional Keras layer arguments.
"""
super(BiFPNCombineLayer, self).__init__(**kwargs)
self.combine_method = combine_method
def _combine_weighted_sum(self, inputs):
return tf.squeeze(
tf.linalg.matmul(tf.stack(inputs, axis=-1), self.per_input_weights),
axis=[-1])
def _combine_attention(self, inputs):
normalized_weights = tf.nn.softmax(self.per_input_weights)
return tf.squeeze(
tf.linalg.matmul(tf.stack(inputs, axis=-1), normalized_weights),
axis=[-1])
def _combine_fast_attention(self, inputs):
weights_non_neg = tf.nn.relu(self.per_input_weights)
normalizer = tf.reduce_sum(weights_non_neg) + 0.0001
normalized_weights = weights_non_neg / normalizer
return tf.squeeze(
tf.linalg.matmul(tf.stack(inputs, axis=-1), normalized_weights),
axis=[-1])
def build(self, input_shape):
if not isinstance(input_shape, list):
raise ValueError('A BiFPN combine layer should be called '
'on a list of inputs.')
if len(input_shape) < 2:
raise ValueError('A BiFPN combine layer should be called '
'on a list of at least 2 inputs. '
'Got ' + str(len(input_shape)) + ' inputs.')
if self.combine_method == 'sum':
self._combine_op = tf.keras.layers.Add()
elif self.combine_method == 'weighted_sum':
self._combine_op = self._combine_weighted_sum
elif self.combine_method == 'attention':
self._combine_op = self._combine_attention
elif self.combine_method == 'fast_attention':
self._combine_op = self._combine_fast_attention
else:
raise ValueError('Unknown combine type: {}'.format(self.combine_method))
if self.combine_method in {'weighted_sum', 'attention', 'fast_attention'}:
self.per_input_weights = self.add_weight(
name='bifpn_combine_weights',
shape=(len(input_shape), 1),
initializer='ones',
trainable=True)
super(BiFPNCombineLayer, self).build(input_shape)
def call(self, inputs):
"""Combines multiple input feature maps into a single output feature map.
Executed when calling the `.__call__` method on input.
Args:
inputs: A list of tensors where all tensors have the same shape, [batch,
height_i, width_i, depth_i].
Returns:
A single tensor, with the same shape as the input tensors,
[batch, height_i, width_i, depth_i].
"""
return self._combine_op(inputs)
def compute_output_shape(self, input_shape):
output_shape = input_shape[0]
for i in range(1, len(input_shape)):
if input_shape[i] != output_shape:
raise ValueError(
'Inputs could not be combined. Shapes should match, '
'but input_shape[0] is {} while input_shape[{}] is {}'.format(
output_shape, i, input_shape[i]))
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/bifpn_utils.py | bifpn_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for importing/exporting Object Detection categories."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import tensorflow.compat.v1 as tf
def load_categories_from_csv_file(csv_path):
"""Loads categories from a csv file.
The CSV file should have one comma delimited numeric category id and string
category name pair per line. For example:
0,"cat"
1,"dog"
2,"bird"
...
Args:
csv_path: Path to the csv file to be parsed into categories.
Returns:
categories: A list of dictionaries representing all possible categories.
The categories will contain an integer 'id' field and a string
'name' field.
Raises:
ValueError: If the csv file is incorrectly formatted.
"""
categories = []
with tf.gfile.Open(csv_path, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
if not row:
continue
if len(row) != 2:
raise ValueError('Expected 2 fields per row in csv: %s' % ','.join(row))
category_id = int(row[0])
category_name = row[1]
categories.append({'id': category_id, 'name': category_name})
return categories
def save_categories_to_csv_file(categories, csv_path):
"""Saves categories to a csv file.
Args:
categories: A list of dictionaries representing categories to save to file.
Each category must contain an 'id' and 'name' field.
csv_path: Path to the csv file to be parsed into categories.
"""
categories.sort(key=lambda x: x['id'])
with tf.gfile.Open(csv_path, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for category in categories:
writer.writerow([category['id'], category['name']])
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/category_util.py | category_util.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to check TensorFlow Version."""
from tensorflow.python import tf2 # pylint: disable=import-outside-toplevel
def is_tf1():
"""Whether current TensorFlow Version is 1.X."""
return not tf2.enabled()
def is_tf2():
"""Whether current TensorFlow Version is 2.X."""
return tf2.enabled()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/tf_version.py | tf_version.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A convenience wrapper around tf.test.TestCase to test with TPU, TF1, TF2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
import tensorflow.compat.v1 as tf
from tensorflow.python import tf2 # pylint: disable=import-outside-toplevel
from object_detection.utils import tf_version
if not tf2.enabled():
from tensorflow.contrib import tpu as contrib_tpu # pylint: disable=g-import-not-at-top, line-too-long
flags = tf.app.flags
flags.DEFINE_bool('tpu_test', False, 'Deprecated Flag.')
FLAGS = flags.FLAGS
class TestCase(tf.test.TestCase):
"""Base Test class to handle execution under {TF1.X, TF2.X} x {TPU, CPU}.
This class determines the TF version and availability of TPUs to set up
tests appropriately.
"""
def maybe_extract_single_output(self, outputs):
if isinstance(outputs, list) or isinstance(outputs, tuple):
if isinstance(outputs[0], tf.Tensor):
outputs_np = [output.numpy() for output in outputs]
else:
outputs_np = outputs
if len(outputs_np) == 1:
return outputs_np[0]
else:
return outputs_np
else:
if isinstance(outputs, tf.Tensor):
return outputs.numpy()
else:
return outputs
def has_tpu(self):
"""Returns whether there are any logical TPU devices."""
return bool(tf.config.experimental.list_logical_devices(device_type='TPU'))
def is_tf2(self):
"""Returns whether TF2 is enabled."""
return tf_version.is_tf2()
def execute_tpu_tf1(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on TPU with Tensorflow 1.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single numpy array.
"""
with self.session(graph=(graph or tf.Graph())) as sess:
placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
def wrap_graph_fn(*args, **kwargs):
results = compute_fn(*args, **kwargs)
if (not (isinstance(results, dict) or isinstance(results, tf.Tensor))
and hasattr(results, '__iter__')):
results = list(results)
return results
tpu_computation = contrib_tpu.rewrite(wrap_graph_fn, placeholders)
sess.run(contrib_tpu.initialize_system())
sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
tf.local_variables_initializer()])
materialized_results = sess.run(tpu_computation,
feed_dict=dict(zip(placeholders, inputs)))
sess.run(contrib_tpu.shutdown_system())
return self.maybe_extract_single_output(materialized_results)
def execute_tpu_tf2(self, compute_fn, inputs):
"""Executes compute_fn on TPU with Tensorflow 2.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single numpy array.
"""
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology, num_replicas=1)
strategy = tf.distribute.experimental.TPUStrategy(
resolver, device_assignment=device_assignment)
@tf.function
def run():
tf_inputs = [tf.constant(input_t) for input_t in inputs]
return strategy.run(compute_fn, args=tf_inputs)
outputs = run()
tf.tpu.experimental.shutdown_tpu_system()
return self.maybe_extract_single_output(outputs)
def execute_cpu_tf1(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on CPU with Tensorflow 1.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single numpy array.
"""
if self.is_tf2():
raise ValueError('Required version Tenforflow 1.X is not available.')
with self.session(graph=(graph or tf.Graph())) as sess:
placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
results = compute_fn(*placeholders)
if (not (isinstance(results, dict) or isinstance(results, tf.Tensor)) and
hasattr(results, '__iter__')):
results = list(results)
sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
tf.local_variables_initializer()])
materialized_results = sess.run(results, feed_dict=dict(zip(placeholders,
inputs)))
return self.maybe_extract_single_output(materialized_results)
def execute_cpu_tf2(self, compute_fn, inputs):
"""Executes compute_fn on CPU with Tensorflow 2.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single numpy array.
"""
if not self.is_tf2():
raise ValueError('Required version TensorFlow 2.0 is not available.')
@tf.function
def run():
tf_inputs = [tf.constant(input_t) for input_t in inputs]
return compute_fn(*tf_inputs)
return self.maybe_extract_single_output(run())
def execute_cpu(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on CPU.
Depending on the underlying TensorFlow installation (build deps) runs in
either TF 1.X or TF 2.X style.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if self.is_tf2():
return self.execute_cpu_tf2(compute_fn, inputs)
else:
return self.execute_cpu_tf1(compute_fn, inputs, graph)
def execute_tpu(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on TPU.
Depending on the underlying TensorFlow installation (build deps) runs in
either TF 1.X or TF 2.X style.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if not self.has_tpu():
raise ValueError('No TPU Device found.')
if self.is_tf2():
return self.execute_tpu_tf2(compute_fn, inputs)
else:
return self.execute_tpu_tf1(compute_fn, inputs, graph)
def execute_tf2(self, compute_fn, inputs):
"""Runs compute_fn with TensorFlow 2.0.
Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single tensor.
"""
if not self.is_tf2():
raise ValueError('Required version TensorFlow 2.0 is not available.')
if self.has_tpu():
return self.execute_tpu_tf2(compute_fn, inputs)
else:
return self.execute_cpu_tf2(compute_fn, inputs)
def execute_tf1(self, compute_fn, inputs, graph=None):
"""Runs compute_fn with TensorFlow 1.X.
Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if self.is_tf2():
raise ValueError('Required version Tenforflow 1.X is not available.')
if self.has_tpu():
return self.execute_tpu_tf1(compute_fn, inputs, graph)
else:
return self.execute_cpu_tf1(compute_fn, inputs, graph)
def execute(self, compute_fn, inputs, graph=None):
"""Runs compute_fn with inputs and returns results.
* Executes in either TF1.X or TF2.X style based on the TensorFlow version.
* Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if self.has_tpu() and tf2.enabled():
return self.execute_tpu_tf2(compute_fn, inputs)
elif not self.has_tpu() and tf2.enabled():
return self.execute_cpu_tf2(compute_fn, inputs)
elif self.has_tpu() and not tf2.enabled():
return self.execute_tpu_tf1(compute_fn, inputs, graph)
else:
return self.execute_cpu_tf1(compute_fn, inputs, graph)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/test_case.py | test_case.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import metrics
class MetricsTest(tf.test.TestCase):
def test_compute_cor_loc(self):
num_gt_imgs_per_class = np.array([100, 1, 5, 1, 1], dtype=int)
num_images_correctly_detected_per_class = np.array(
[10, 0, 1, 0, 0], dtype=int)
corloc = metrics.compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class)
expected_corloc = np.array([0.1, 0, 0.2, 0, 0], dtype=float)
self.assertTrue(np.allclose(corloc, expected_corloc))
def test_compute_cor_loc_nans(self):
num_gt_imgs_per_class = np.array([100, 0, 0, 1, 1], dtype=int)
num_images_correctly_detected_per_class = np.array(
[10, 0, 1, 0, 0], dtype=int)
corloc = metrics.compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class)
expected_corloc = np.array([0.1, np.nan, np.nan, 0, 0], dtype=float)
self.assertAllClose(corloc, expected_corloc)
def test_compute_precision_recall(self):
num_gt = 10
scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
labels = np.array([0, 1, 1, 0, 0, 1], dtype=bool)
labels_float_type = np.array([0, 1, 1, 0, 0, 1], dtype=float)
accumulated_tp_count = np.array([0, 1, 1, 2, 2, 3], dtype=float)
expected_precision = accumulated_tp_count / np.array([1, 2, 3, 4, 5, 6])
expected_recall = accumulated_tp_count / num_gt
precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
precision_float_type, recall_float_type = metrics.compute_precision_recall(
scores, labels_float_type, num_gt)
self.assertAllClose(precision, expected_precision)
self.assertAllClose(recall, expected_recall)
self.assertAllClose(precision_float_type, expected_precision)
self.assertAllClose(recall_float_type, expected_recall)
def test_compute_precision_recall_float(self):
num_gt = 10
scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
labels_float = np.array([0, 1, 1, 0.5, 0, 1], dtype=float)
expected_precision = np.array(
[0., 0.5, 0.33333333, 0.5, 0.55555556, 0.63636364], dtype=float)
expected_recall = np.array([0., 0.1, 0.1, 0.2, 0.25, 0.35], dtype=float)
precision, recall = metrics.compute_precision_recall(
scores, labels_float, num_gt)
self.assertAllClose(precision, expected_precision)
self.assertAllClose(recall, expected_recall)
def test_compute_average_precision(self):
precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
processed_precision = np.array(
[0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0], dtype=float)
recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
expected_mean_ap = np.sum(recall_interval * processed_precision)
mean_ap = metrics.compute_average_precision(precision, recall)
self.assertAlmostEqual(expected_mean_ap, mean_ap)
def test_compute_precision_recall_and_ap_no_groundtruth(self):
num_gt = 0
scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
expected_precision = None
expected_recall = None
precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
self.assertEquals(precision, expected_precision)
self.assertEquals(recall, expected_recall)
ap = metrics.compute_average_precision(precision, recall)
self.assertTrue(np.isnan(ap))
def test_compute_recall_at_k(self):
num_gt = 4
tp_fp = [
np.array([1, 0, 0], dtype=float),
np.array([0, 1], dtype=float),
np.array([0, 0, 0, 0, 0], dtype=float)
]
tp_fp_bool = [
np.array([True, False, False], dtype=bool),
np.array([False, True], dtype=float),
np.array([False, False, False, False, False], dtype=float)
]
recall_1 = metrics.compute_recall_at_k(tp_fp, num_gt, 1)
recall_3 = metrics.compute_recall_at_k(tp_fp, num_gt, 3)
recall_5 = metrics.compute_recall_at_k(tp_fp, num_gt, 5)
recall_3_bool = metrics.compute_recall_at_k(tp_fp_bool, num_gt, 3)
self.assertAlmostEqual(recall_1, 0.25)
self.assertAlmostEqual(recall_3, 0.5)
self.assertAlmostEqual(recall_3_bool, 0.5)
self.assertAlmostEqual(recall_5, 0.5)
def test_compute_median_rank_at_k(self):
tp_fp = [
np.array([1, 0, 0], dtype=float),
np.array([0, 0.1], dtype=float),
np.array([0, 0, 0, 0, 0], dtype=float)
]
tp_fp_bool = [
np.array([True, False, False], dtype=bool),
np.array([False, True], dtype=float),
np.array([False, False, False, False, False], dtype=float)
]
median_ranks_1 = metrics.compute_median_rank_at_k(tp_fp, 1)
median_ranks_3 = metrics.compute_median_rank_at_k(tp_fp, 3)
median_ranks_5 = metrics.compute_median_rank_at_k(tp_fp, 5)
median_ranks_3_bool = metrics.compute_median_rank_at_k(tp_fp_bool, 3)
self.assertEquals(median_ranks_1, 0)
self.assertEquals(median_ranks_3, 0.5)
self.assertEquals(median_ranks_3_bool, 0.5)
self.assertEquals(median_ranks_5, 0.5)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/metrics_test.py | metrics_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.config_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
from six.moves import range
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.protos import eval_pb2
from object_detection.protos import image_resizer_pb2
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
from object_detection.utils import config_util
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import training as contrib_training
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
def _write_config(config, config_path):
"""Writes a config object to disk."""
config_text = text_format.MessageToString(config)
with tf.gfile.Open(config_path, "wb") as f:
f.write(config_text)
def _update_optimizer_with_constant_learning_rate(optimizer, learning_rate):
"""Adds a new constant learning rate."""
constant_lr = optimizer.learning_rate.constant_learning_rate
constant_lr.learning_rate = learning_rate
def _update_optimizer_with_exponential_decay_learning_rate(
optimizer, learning_rate):
"""Adds a new exponential decay learning rate."""
exponential_lr = optimizer.learning_rate.exponential_decay_learning_rate
exponential_lr.initial_learning_rate = learning_rate
def _update_optimizer_with_manual_step_learning_rate(
optimizer, initial_learning_rate, learning_rate_scaling):
"""Adds a learning rate schedule."""
manual_lr = optimizer.learning_rate.manual_step_learning_rate
manual_lr.initial_learning_rate = initial_learning_rate
for i in range(3):
schedule = manual_lr.schedule.add()
schedule.learning_rate = initial_learning_rate * learning_rate_scaling**i
def _update_optimizer_with_cosine_decay_learning_rate(
optimizer, learning_rate, warmup_learning_rate):
"""Adds a new cosine decay learning rate."""
cosine_lr = optimizer.learning_rate.cosine_decay_learning_rate
cosine_lr.learning_rate_base = learning_rate
cosine_lr.warmup_learning_rate = warmup_learning_rate
class ConfigUtilTest(tf.test.TestCase):
def _create_and_load_test_configs(self, pipeline_config):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
_write_config(pipeline_config, pipeline_config_path)
return config_util.get_configs_from_pipeline_file(pipeline_config_path)
def test_get_configs_from_pipeline_file(self):
"""Test that proto configs can be read from pipeline config file."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
self.assertProtoEquals(pipeline_config.model, configs["model"])
self.assertProtoEquals(pipeline_config.train_config,
configs["train_config"])
self.assertProtoEquals(pipeline_config.train_input_reader,
configs["train_input_config"])
self.assertProtoEquals(pipeline_config.eval_config,
configs["eval_config"])
self.assertProtoEquals(pipeline_config.eval_input_reader,
configs["eval_input_configs"])
def test_create_configs_from_pipeline_proto(self):
"""Tests creating configs dictionary from pipeline proto."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
configs = config_util.create_configs_from_pipeline_proto(pipeline_config)
self.assertProtoEquals(pipeline_config.model, configs["model"])
self.assertProtoEquals(pipeline_config.train_config,
configs["train_config"])
self.assertProtoEquals(pipeline_config.train_input_reader,
configs["train_input_config"])
self.assertProtoEquals(pipeline_config.eval_config, configs["eval_config"])
self.assertProtoEquals(pipeline_config.eval_input_reader,
configs["eval_input_configs"])
def test_create_pipeline_proto_from_configs(self):
"""Tests that proto can be reconstructed from configs dictionary."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
pipeline_config_reconstructed = (
config_util.create_pipeline_proto_from_configs(configs))
self.assertEqual(pipeline_config, pipeline_config_reconstructed)
def test_save_pipeline_config(self):
"""Tests that the pipeline config is properly saved to disk."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
config_util.save_pipeline_config(pipeline_config, self.get_temp_dir())
configs = config_util.get_configs_from_pipeline_file(
os.path.join(self.get_temp_dir(), "pipeline.config"))
pipeline_config_reconstructed = (
config_util.create_pipeline_proto_from_configs(configs))
self.assertEqual(pipeline_config, pipeline_config_reconstructed)
def test_get_configs_from_multiple_files(self):
"""Tests that proto configs can be read from multiple files."""
temp_dir = self.get_temp_dir()
# Write model config file.
model_config_path = os.path.join(temp_dir, "model.config")
model = model_pb2.DetectionModel()
model.faster_rcnn.num_classes = 10
_write_config(model, model_config_path)
# Write train config file.
train_config_path = os.path.join(temp_dir, "train.config")
train_config = train_config = train_pb2.TrainConfig()
train_config.batch_size = 32
_write_config(train_config, train_config_path)
# Write train input config file.
train_input_config_path = os.path.join(temp_dir, "train_input.config")
train_input_config = input_reader_pb2.InputReader()
train_input_config.label_map_path = "path/to/label_map"
_write_config(train_input_config, train_input_config_path)
# Write eval config file.
eval_config_path = os.path.join(temp_dir, "eval.config")
eval_config = eval_pb2.EvalConfig()
eval_config.num_examples = 20
_write_config(eval_config, eval_config_path)
# Write eval input config file.
eval_input_config_path = os.path.join(temp_dir, "eval_input.config")
eval_input_config = input_reader_pb2.InputReader()
eval_input_config.label_map_path = "path/to/another/label_map"
_write_config(eval_input_config, eval_input_config_path)
configs = config_util.get_configs_from_multiple_files(
model_config_path=model_config_path,
train_config_path=train_config_path,
train_input_config_path=train_input_config_path,
eval_config_path=eval_config_path,
eval_input_config_path=eval_input_config_path)
self.assertProtoEquals(model, configs["model"])
self.assertProtoEquals(train_config, configs["train_config"])
self.assertProtoEquals(train_input_config,
configs["train_input_config"])
self.assertProtoEquals(eval_config, configs["eval_config"])
self.assertProtoEquals(eval_input_config, configs["eval_input_configs"][0])
def _assertOptimizerWithNewLearningRate(self, optimizer_name):
"""Asserts successful updating of all learning rate schemes."""
original_learning_rate = 0.7
learning_rate_scaling = 0.1
warmup_learning_rate = 0.07
hparams = contrib_training.HParams(learning_rate=0.15)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
# Constant learning rate.
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)
_update_optimizer_with_constant_learning_rate(optimizer,
original_learning_rate)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
constant_lr = optimizer.learning_rate.constant_learning_rate
self.assertAlmostEqual(hparams.learning_rate, constant_lr.learning_rate)
# Exponential decay learning rate.
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)
_update_optimizer_with_exponential_decay_learning_rate(
optimizer, original_learning_rate)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
exponential_lr = optimizer.learning_rate.exponential_decay_learning_rate
self.assertAlmostEqual(hparams.learning_rate,
exponential_lr.initial_learning_rate)
# Manual step learning rate.
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)
_update_optimizer_with_manual_step_learning_rate(
optimizer, original_learning_rate, learning_rate_scaling)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
manual_lr = optimizer.learning_rate.manual_step_learning_rate
self.assertAlmostEqual(hparams.learning_rate,
manual_lr.initial_learning_rate)
for i, schedule in enumerate(manual_lr.schedule):
self.assertAlmostEqual(hparams.learning_rate * learning_rate_scaling**i,
schedule.learning_rate)
# Cosine decay learning rate.
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)
_update_optimizer_with_cosine_decay_learning_rate(optimizer,
original_learning_rate,
warmup_learning_rate)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
cosine_lr = optimizer.learning_rate.cosine_decay_learning_rate
self.assertAlmostEqual(hparams.learning_rate, cosine_lr.learning_rate_base)
warmup_scale_factor = warmup_learning_rate / original_learning_rate
self.assertAlmostEqual(hparams.learning_rate * warmup_scale_factor,
cosine_lr.warmup_learning_rate)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testRMSPropWithNewLearingRate(self):
"""Tests new learning rates for RMSProp Optimizer."""
self._assertOptimizerWithNewLearningRate("rms_prop_optimizer")
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testMomentumOptimizerWithNewLearningRate(self):
"""Tests new learning rates for Momentum Optimizer."""
self._assertOptimizerWithNewLearningRate("momentum_optimizer")
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testAdamOptimizerWithNewLearningRate(self):
"""Tests new learning rates for Adam Optimizer."""
self._assertOptimizerWithNewLearningRate("adam_optimizer")
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testGenericConfigOverride(self):
"""Tests generic config overrides for all top-level configs."""
# Set one parameter for each of the top-level pipeline configs:
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.num_classes = 1
pipeline_config.train_config.batch_size = 1
pipeline_config.eval_config.num_visualizations = 1
pipeline_config.train_input_reader.label_map_path = "/some/path"
pipeline_config.eval_input_reader.add().label_map_path = "/some/path"
pipeline_config.graph_rewriter.quantization.weight_bits = 1
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
_write_config(pipeline_config, pipeline_config_path)
# Override each of the parameters:
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
hparams = contrib_training.HParams(
**{
"model.ssd.num_classes": 2,
"train_config.batch_size": 2,
"train_input_config.label_map_path": "/some/other/path",
"eval_config.num_visualizations": 2,
"graph_rewriter_config.quantization.weight_bits": 2
})
configs = config_util.merge_external_params_with_configs(configs, hparams)
# Ensure that the parameters have the overridden values:
self.assertEqual(2, configs["model"].ssd.num_classes)
self.assertEqual(2, configs["train_config"].batch_size)
self.assertEqual("/some/other/path",
configs["train_input_config"].label_map_path)
self.assertEqual(2, configs["eval_config"].num_visualizations)
self.assertEqual(2,
configs["graph_rewriter_config"].quantization.weight_bits)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testNewBatchSize(self):
"""Tests that batch size is updated appropriately."""
original_batch_size = 2
hparams = contrib_training.HParams(batch_size=16)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = original_batch_size
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
new_batch_size = configs["train_config"].batch_size
self.assertEqual(16, new_batch_size)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testNewBatchSizeWithClipping(self):
"""Tests that batch size is clipped to 1 from below."""
original_batch_size = 2
hparams = contrib_training.HParams(batch_size=0.5)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = original_batch_size
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
new_batch_size = configs["train_config"].batch_size
self.assertEqual(1, new_batch_size) # Clipped to 1.0.
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testOverwriteBatchSizeWithKeyValue(self):
"""Tests that batch size is overwritten based on key/value."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = 2
configs = self._create_and_load_test_configs(pipeline_config)
hparams = contrib_training.HParams(**{"train_config.batch_size": 10})
configs = config_util.merge_external_params_with_configs(configs, hparams)
new_batch_size = configs["train_config"].batch_size
self.assertEqual(10, new_batch_size)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testOverwriteSampleFromDatasetWeights(self):
"""Tests config override for sample_from_datasets_weights."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_input_reader.sample_from_datasets_weights.extend(
[1, 2])
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
_write_config(pipeline_config, pipeline_config_path)
# Override parameters:
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
hparams = contrib_training.HParams(sample_from_datasets_weights=[0.5, 0.5])
configs = config_util.merge_external_params_with_configs(configs, hparams)
# Ensure that the parameters have the overridden values:
self.assertListEqual(
[0.5, 0.5],
list(configs["train_input_config"].sample_from_datasets_weights))
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testOverwriteSampleFromDatasetWeightsWrongLength(self):
"""Tests config override for sample_from_datasets_weights."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_input_reader.sample_from_datasets_weights.extend(
[1, 2])
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
_write_config(pipeline_config, pipeline_config_path)
# Try to override parameter with too many weights:
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
hparams = contrib_training.HParams(
sample_from_datasets_weights=[0.5, 0.5, 0.5])
with self.assertRaises(
ValueError,
msg="sample_from_datasets_weights override has a different number of"
" values (3) than the configured dataset weights (2)."
):
config_util.merge_external_params_with_configs(configs, hparams)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testKeyValueOverrideBadKey(self):
"""Tests that overwriting with a bad key causes an exception."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
configs = self._create_and_load_test_configs(pipeline_config)
hparams = contrib_training.HParams(**{"train_config.no_such_field": 10})
with self.assertRaises(ValueError):
config_util.merge_external_params_with_configs(configs, hparams)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testOverwriteBatchSizeWithBadValueType(self):
"""Tests that overwriting with a bad valuye type causes an exception."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = 2
configs = self._create_and_load_test_configs(pipeline_config)
# Type should be an integer, but we're passing a string "10".
hparams = contrib_training.HParams(**{"train_config.batch_size": "10"})
with self.assertRaises(TypeError):
config_util.merge_external_params_with_configs(configs, hparams)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testNewMomentumOptimizerValue(self):
"""Tests that new momentum value is updated appropriately."""
original_momentum_value = 0.4
hparams = contrib_training.HParams(momentum_optimizer_value=1.1)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer_config = pipeline_config.train_config.optimizer.rms_prop_optimizer
optimizer_config.momentum_optimizer_value = original_momentum_value
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
new_momentum_value = optimizer_config.momentum_optimizer_value
self.assertAlmostEqual(1.0, new_momentum_value) # Clipped to 1.0.
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testNewClassificationLocalizationWeightRatio(self):
"""Tests that the loss weight ratio is updated appropriately."""
original_localization_weight = 0.1
original_classification_weight = 0.2
new_weight_ratio = 5.0
hparams = contrib_training.HParams(
classification_localization_weight_ratio=new_weight_ratio)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.loss.localization_weight = (
original_localization_weight)
pipeline_config.model.ssd.loss.classification_weight = (
original_classification_weight)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
loss = configs["model"].ssd.loss
self.assertAlmostEqual(1.0, loss.localization_weight)
self.assertAlmostEqual(new_weight_ratio, loss.classification_weight)
@unittest.skipIf(tf_version.is_tf2(), "Skipping TF1.X only test.")
def testNewFocalLossParameters(self):
"""Tests that the loss weight ratio is updated appropriately."""
original_alpha = 1.0
original_gamma = 1.0
new_alpha = 0.3
new_gamma = 2.0
hparams = contrib_training.HParams(
focal_loss_alpha=new_alpha, focal_loss_gamma=new_gamma)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
classification_loss = pipeline_config.model.ssd.loss.classification_loss
classification_loss.weighted_sigmoid_focal.alpha = original_alpha
classification_loss.weighted_sigmoid_focal.gamma = original_gamma
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
classification_loss = configs["model"].ssd.loss.classification_loss
self.assertAlmostEqual(new_alpha,
classification_loss.weighted_sigmoid_focal.alpha)
self.assertAlmostEqual(new_gamma,
classification_loss.weighted_sigmoid_focal.gamma)
def testMergingKeywordArguments(self):
"""Tests that keyword arguments get merged as do hyperparameters."""
original_num_train_steps = 100
desired_num_train_steps = 10
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.num_steps = original_num_train_steps
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_steps": desired_num_train_steps}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
train_steps = configs["train_config"].num_steps
self.assertEqual(desired_num_train_steps, train_steps)
def testGetNumberOfClasses(self):
"""Tests that number of classes can be retrieved."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 20
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
number_of_classes = config_util.get_number_of_classes(configs["model"])
self.assertEqual(20, number_of_classes)
def testNewTrainInputPath(self):
"""Tests that train input path can be overwritten with single file."""
original_train_path = ["path/to/data"]
new_train_path = "another/path/to/data"
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
reader_config = pipeline_config.train_input_reader.tf_record_input_reader
reader_config.input_path.extend(original_train_path)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_input_path": new_train_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
reader_config = configs["train_input_config"].tf_record_input_reader
final_path = reader_config.input_path
self.assertEqual([new_train_path], final_path)
def testNewTrainInputPathList(self):
"""Tests that train input path can be overwritten with multiple files."""
original_train_path = ["path/to/data"]
new_train_path = ["another/path/to/data", "yet/another/path/to/data"]
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
reader_config = pipeline_config.train_input_reader.tf_record_input_reader
reader_config.input_path.extend(original_train_path)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_input_path": new_train_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
reader_config = configs["train_input_config"].tf_record_input_reader
final_path = reader_config.input_path
self.assertEqual(new_train_path, final_path)
def testNewLabelMapPath(self):
"""Tests that label map path can be overwritten in input readers."""
original_label_map_path = "path/to/original/label_map"
new_label_map_path = "path//to/new/label_map"
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
train_input_reader = pipeline_config.train_input_reader
train_input_reader.label_map_path = original_label_map_path
eval_input_reader = pipeline_config.eval_input_reader.add()
eval_input_reader.label_map_path = original_label_map_path
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"label_map_path": new_label_map_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(new_label_map_path,
configs["train_input_config"].label_map_path)
for eval_input_config in configs["eval_input_configs"]:
self.assertEqual(new_label_map_path, eval_input_config.label_map_path)
def testDontOverwriteEmptyLabelMapPath(self):
"""Tests that label map path will not by overwritten with empty string."""
original_label_map_path = "path/to/original/label_map"
new_label_map_path = ""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
train_input_reader = pipeline_config.train_input_reader
train_input_reader.label_map_path = original_label_map_path
eval_input_reader = pipeline_config.eval_input_reader.add()
eval_input_reader.label_map_path = original_label_map_path
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"label_map_path": new_label_map_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(original_label_map_path,
configs["train_input_config"].label_map_path)
self.assertEqual(original_label_map_path,
configs["eval_input_configs"][0].label_map_path)
def testNewMaskType(self):
"""Tests that mask type can be overwritten in input readers."""
original_mask_type = input_reader_pb2.NUMERICAL_MASKS
new_mask_type = input_reader_pb2.PNG_MASKS
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
train_input_reader = pipeline_config.train_input_reader
train_input_reader.mask_type = original_mask_type
eval_input_reader = pipeline_config.eval_input_reader.add()
eval_input_reader.mask_type = original_mask_type
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"mask_type": new_mask_type}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(new_mask_type, configs["train_input_config"].mask_type)
self.assertEqual(new_mask_type, configs["eval_input_configs"][0].mask_type)
def testUseMovingAverageForEval(self):
use_moving_averages_orig = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = use_moving_averages_orig
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"eval_with_moving_averages": True}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(True, configs["eval_config"].use_moving_averages)
def testGetImageResizerConfig(self):
"""Tests that number of classes can be retrieved."""
model_config = model_pb2.DetectionModel()
model_config.faster_rcnn.image_resizer.fixed_shape_resizer.height = 100
model_config.faster_rcnn.image_resizer.fixed_shape_resizer.width = 300
image_resizer_config = config_util.get_image_resizer_config(model_config)
self.assertEqual(image_resizer_config.fixed_shape_resizer.height, 100)
self.assertEqual(image_resizer_config.fixed_shape_resizer.width, 300)
def testGetSpatialImageSizeFromFixedShapeResizerConfig(self):
image_resizer_config = image_resizer_pb2.ImageResizer()
image_resizer_config.fixed_shape_resizer.height = 100
image_resizer_config.fixed_shape_resizer.width = 200
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [100, 200])
def testGetSpatialImageSizeFromAspectPreservingResizerConfig(self):
image_resizer_config = image_resizer_pb2.ImageResizer()
image_resizer_config.keep_aspect_ratio_resizer.min_dimension = 100
image_resizer_config.keep_aspect_ratio_resizer.max_dimension = 600
image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension = True
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [600, 600])
def testGetSpatialImageSizeFromAspectPreservingResizerDynamic(self):
image_resizer_config = image_resizer_pb2.ImageResizer()
image_resizer_config.keep_aspect_ratio_resizer.min_dimension = 100
image_resizer_config.keep_aspect_ratio_resizer.max_dimension = 600
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [-1, -1])
def testGetSpatialImageSizeFromConditionalShapeResizer(self):
image_resizer_config = image_resizer_pb2.ImageResizer()
image_resizer_config.conditional_shape_resizer.size_threshold = 100
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [-1, -1])
def testGetMaxNumContextFeaturesFromModelConfig(self):
model_config = model_pb2.DetectionModel()
model_config.faster_rcnn.context_config.max_num_context_features = 10
max_num_context_features = config_util.get_max_num_context_features(
model_config)
self.assertAllEqual(max_num_context_features, 10)
def testGetContextFeatureLengthFromModelConfig(self):
model_config = model_pb2.DetectionModel()
model_config.faster_rcnn.context_config.context_feature_length = 100
context_feature_length = config_util.get_context_feature_length(
model_config)
self.assertAllEqual(context_feature_length, 100)
def testEvalShuffle(self):
"""Tests that `eval_shuffle` keyword arguments are applied correctly."""
original_shuffle = True
desired_shuffle = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().shuffle = original_shuffle
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"eval_shuffle": desired_shuffle}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(desired_shuffle, configs["eval_input_configs"][0].shuffle)
def testTrainShuffle(self):
"""Tests that `train_shuffle` keyword arguments are applied correctly."""
original_shuffle = True
desired_shuffle = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_input_reader.shuffle = original_shuffle
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_shuffle": desired_shuffle}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
train_shuffle = configs["train_input_config"].shuffle
self.assertEqual(desired_shuffle, train_shuffle)
def testOverWriteRetainOriginalImages(self):
"""Tests that `train_shuffle` keyword arguments are applied correctly."""
original_retain_original_images = True
desired_retain_original_images = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.retain_original_images = (
original_retain_original_images)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {
"retain_original_images_in_eval": desired_retain_original_images
}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
retain_original_images = configs["eval_config"].retain_original_images
self.assertEqual(desired_retain_original_images, retain_original_images)
def testOverwriteAllEvalSampling(self):
original_num_eval_examples = 1
new_num_eval_examples = 10
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().sample_1_of_n_examples = (
original_num_eval_examples)
pipeline_config.eval_input_reader.add().sample_1_of_n_examples = (
original_num_eval_examples)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"sample_1_of_n_eval_examples": new_num_eval_examples}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
for eval_input_config in configs["eval_input_configs"]:
self.assertEqual(new_num_eval_examples,
eval_input_config.sample_1_of_n_examples)
def testOverwriteAllEvalNumEpochs(self):
original_num_epochs = 10
new_num_epochs = 1
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().num_epochs = original_num_epochs
pipeline_config.eval_input_reader.add().num_epochs = original_num_epochs
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"eval_num_epochs": new_num_epochs}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
for eval_input_config in configs["eval_input_configs"]:
self.assertEqual(new_num_epochs, eval_input_config.num_epochs)
def testUpdateMaskTypeForAllInputConfigs(self):
original_mask_type = input_reader_pb2.NUMERICAL_MASKS
new_mask_type = input_reader_pb2.PNG_MASKS
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
train_config = pipeline_config.train_input_reader
train_config.mask_type = original_mask_type
eval_1 = pipeline_config.eval_input_reader.add()
eval_1.mask_type = original_mask_type
eval_1.name = "eval_1"
eval_2 = pipeline_config.eval_input_reader.add()
eval_2.mask_type = original_mask_type
eval_2.name = "eval_2"
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"mask_type": new_mask_type}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(configs["train_input_config"].mask_type, new_mask_type)
for eval_input_config in configs["eval_input_configs"]:
self.assertEqual(eval_input_config.mask_type, new_mask_type)
def testErrorOverwritingMultipleInputConfig(self):
original_shuffle = False
new_shuffle = True
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
eval_1 = pipeline_config.eval_input_reader.add()
eval_1.shuffle = original_shuffle
eval_1.name = "eval_1"
eval_2 = pipeline_config.eval_input_reader.add()
eval_2.shuffle = original_shuffle
eval_2.name = "eval_2"
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"eval_shuffle": new_shuffle}
with self.assertRaises(ValueError):
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
def testCheckAndParseInputConfigKey(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().name = "eval_1"
pipeline_config.eval_input_reader.add().name = "eval_2"
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
specific_shuffle_update_key = "eval_input_configs:eval_2:shuffle"
is_valid_input_config_key, key_name, input_name, field_name = (
config_util.check_and_parse_input_config_key(
configs, specific_shuffle_update_key))
self.assertTrue(is_valid_input_config_key)
self.assertEqual(key_name, "eval_input_configs")
self.assertEqual(input_name, "eval_2")
self.assertEqual(field_name, "shuffle")
legacy_shuffle_update_key = "eval_shuffle"
is_valid_input_config_key, key_name, input_name, field_name = (
config_util.check_and_parse_input_config_key(configs,
legacy_shuffle_update_key))
self.assertTrue(is_valid_input_config_key)
self.assertEqual(key_name, "eval_input_configs")
self.assertEqual(input_name, None)
self.assertEqual(field_name, "shuffle")
non_input_config_update_key = "label_map_path"
is_valid_input_config_key, key_name, input_name, field_name = (
config_util.check_and_parse_input_config_key(
configs, non_input_config_update_key))
self.assertFalse(is_valid_input_config_key)
self.assertEqual(key_name, None)
self.assertEqual(input_name, None)
self.assertEqual(field_name, "label_map_path")
with self.assertRaisesRegexp(ValueError,
"Invalid key format when overriding configs."):
config_util.check_and_parse_input_config_key(
configs, "train_input_config:shuffle")
with self.assertRaisesRegexp(
ValueError, "Invalid key_name when overriding input config."):
config_util.check_and_parse_input_config_key(
configs, "invalid_key_name:train_name:shuffle")
with self.assertRaisesRegexp(
ValueError, "Invalid input_name when overriding input config."):
config_util.check_and_parse_input_config_key(
configs, "eval_input_configs:unknown_eval_name:shuffle")
with self.assertRaisesRegexp(
ValueError, "Invalid field_name when overriding input config."):
config_util.check_and_parse_input_config_key(
configs, "eval_input_configs:eval_2:unknown_field_name")
def testUpdateInputReaderConfigSuccess(self):
original_shuffle = False
new_shuffle = True
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_input_reader.shuffle = original_shuffle
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
config_util.update_input_reader_config(
configs,
key_name="train_input_config",
input_name=None,
field_name="shuffle",
value=new_shuffle)
self.assertEqual(configs["train_input_config"].shuffle, new_shuffle)
config_util.update_input_reader_config(
configs,
key_name="train_input_config",
input_name=None,
field_name="shuffle",
value=new_shuffle)
self.assertEqual(configs["train_input_config"].shuffle, new_shuffle)
def testUpdateInputReaderConfigErrors(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().name = "same_eval_name"
pipeline_config.eval_input_reader.add().name = "same_eval_name"
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
with self.assertRaisesRegexp(ValueError,
"Duplicate input name found when overriding."):
config_util.update_input_reader_config(
configs,
key_name="eval_input_configs",
input_name="same_eval_name",
field_name="shuffle",
value=False)
with self.assertRaisesRegexp(
ValueError, "Input name name_not_exist not found when overriding."):
config_util.update_input_reader_config(
configs,
key_name="eval_input_configs",
input_name="name_not_exist",
field_name="shuffle",
value=False)
with self.assertRaisesRegexp(ValueError,
"Unknown input config overriding."):
config_util.update_input_reader_config(
configs,
key_name="eval_input_configs",
input_name=None,
field_name="shuffle",
value=False)
def testOverWriteRetainOriginalImageAdditionalChannels(self):
"""Tests that keyword arguments are applied correctly."""
original_retain_original_image_additional_channels = True
desired_retain_original_image_additional_channels = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.retain_original_image_additional_channels = (
original_retain_original_image_additional_channels)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {
"retain_original_image_additional_channels_in_eval":
desired_retain_original_image_additional_channels
}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
retain_original_image_additional_channels = configs[
"eval_config"].retain_original_image_additional_channels
self.assertEqual(desired_retain_original_image_additional_channels,
retain_original_image_additional_channels)
def testUpdateNumClasses(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
self.assertEqual(config_util.get_number_of_classes(configs["model"]), 10)
config_util.merge_external_params_with_configs(
configs, kwargs_dict={"num_classes": 2})
self.assertEqual(config_util.get_number_of_classes(configs["model"]), 2)
def testRemoveUnnecessaryEma(self):
input_dict = {
"expanded_conv_10/project/act_quant/min":
1,
"FeatureExtractor/MobilenetV2_2/expanded_conv_5/expand/act_quant/min":
2,
"expanded_conv_10/expand/BatchNorm/gamma/min/ExponentialMovingAverage":
3,
"expanded_conv_3/depthwise/BatchNorm/beta/max/ExponentialMovingAverage":
4,
"BoxPredictor_1/ClassPredictor_depthwise/act_quant":
5
}
no_ema_collection = ["/min", "/max"]
output_dict = {
"expanded_conv_10/project/act_quant/min":
1,
"FeatureExtractor/MobilenetV2_2/expanded_conv_5/expand/act_quant/min":
2,
"expanded_conv_10/expand/BatchNorm/gamma/min":
3,
"expanded_conv_3/depthwise/BatchNorm/beta/max":
4,
"BoxPredictor_1/ClassPredictor_depthwise/act_quant":
5
}
self.assertEqual(
output_dict,
config_util.remove_unnecessary_ema(input_dict, no_ema_collection))
def testUpdateRescoreInstances(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
kpt_task = pipeline_config.model.center_net.keypoint_estimation_task.add()
kpt_task.rescore_instances = True
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
cn_config = configs["model"].center_net
self.assertEqual(
True, cn_config.keypoint_estimation_task[0].rescore_instances)
config_util.merge_external_params_with_configs(
configs, kwargs_dict={"rescore_instances": False})
cn_config = configs["model"].center_net
self.assertEqual(
False, cn_config.keypoint_estimation_task[0].rescore_instances)
def testUpdateRescoreInstancesWithBooleanString(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
kpt_task = pipeline_config.model.center_net.keypoint_estimation_task.add()
kpt_task.rescore_instances = True
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
cn_config = configs["model"].center_net
self.assertEqual(
True, cn_config.keypoint_estimation_task[0].rescore_instances)
config_util.merge_external_params_with_configs(
configs, kwargs_dict={"rescore_instances": "False"})
cn_config = configs["model"].center_net
self.assertEqual(
False, cn_config.keypoint_estimation_task[0].rescore_instances)
def testUpdateRescoreInstancesWithMultipleTasks(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
kpt_task = pipeline_config.model.center_net.keypoint_estimation_task.add()
kpt_task.rescore_instances = True
kpt_task = pipeline_config.model.center_net.keypoint_estimation_task.add()
kpt_task.rescore_instances = True
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
cn_config = configs["model"].center_net
self.assertEqual(
True, cn_config.keypoint_estimation_task[0].rescore_instances)
config_util.merge_external_params_with_configs(
configs, kwargs_dict={"rescore_instances": False})
cn_config = configs["model"].center_net
self.assertEqual(
True, cn_config.keypoint_estimation_task[0].rescore_instances)
self.assertEqual(
True, cn_config.keypoint_estimation_task[1].rescore_instances)
if __name__ == "__main__":
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/config_util_test.py | config_util_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for [N, 4] numpy arrays representing bounding boxes.
Example box operations that are supported:
* Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def area(boxes):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2)
area1 = area(boxes1)
area2 = area(boxes2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
def ioa(boxes1, boxes2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, IOA(box1, box2) != IOA(box2, box1).
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
"""
intersect = intersection(boxes1, boxes2)
areas = np.expand_dims(area(boxes2), axis=0)
return intersect / areas
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/np_box_ops.py | np_box_ops.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.variables_helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import tensorflow.compat.v1 as tf
from object_detection.utils import test_case
from object_detection.utils import tf_version
from object_detection.utils import variables_helper
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FilterVariablesTest(test_case.TestCase):
def _create_variables(self):
return [tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights'),
tf.Variable(1.0, name='FeatureExtractor/InceptionV3/biases'),
tf.Variable(1.0, name='StackProposalGenerator/weights'),
tf.Variable(1.0, name='StackProposalGenerator/biases')]
def test_return_all_variables_when_empty_regex(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(variables, [''])
self.assertCountEqual(out_variables, variables)
def test_return_variables_which_do_not_match_single_regex(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(variables,
['FeatureExtractor/.*'])
self.assertCountEqual(out_variables, variables[2:])
def test_return_variables_which_do_not_match_any_regex_in_list(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(variables, [
'FeatureExtractor.*biases', 'StackProposalGenerator.*biases'
])
self.assertCountEqual(out_variables, [variables[0], variables[2]])
def test_return_variables_matching_empty_regex_list(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(
variables, [''], invert=True)
self.assertCountEqual(out_variables, [])
def test_return_variables_matching_some_regex_in_list(self):
variables = self._create_variables()
out_variables = variables_helper.filter_variables(
variables,
['FeatureExtractor.*biases', 'StackProposalGenerator.*biases'],
invert=True)
self.assertCountEqual(out_variables, [variables[1], variables[3]])
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class MultiplyGradientsMatchingRegexTest(tf.test.TestCase):
def _create_grads_and_vars(self):
return [(tf.constant(1.0),
tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')),
(tf.constant(2.0),
tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')),
(tf.constant(3.0),
tf.Variable(3.0, name='StackProposalGenerator/weights')),
(tf.constant(4.0),
tf.Variable(4.0, name='StackProposalGenerator/biases'))]
def test_multiply_all_feature_extractor_variables(self):
grads_and_vars = self._create_grads_and_vars()
regex_list = ['FeatureExtractor/.*']
multiplier = 0.0
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars, regex_list, multiplier)
exp_output = [(0.0, 1.0), (0.0, 2.0), (3.0, 3.0), (4.0, 4.0)]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
output = sess.run(grads_and_vars)
self.assertCountEqual(output, exp_output)
def test_multiply_all_bias_variables(self):
grads_and_vars = self._create_grads_and_vars()
regex_list = ['.*/biases']
multiplier = 0.0
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars, regex_list, multiplier)
exp_output = [(1.0, 1.0), (0.0, 2.0), (3.0, 3.0), (0.0, 4.0)]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
output = sess.run(grads_and_vars)
self.assertCountEqual(output, exp_output)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FreezeGradientsMatchingRegexTest(test_case.TestCase):
def _create_grads_and_vars(self):
return [(tf.constant(1.0),
tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')),
(tf.constant(2.0),
tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')),
(tf.constant(3.0),
tf.Variable(3.0, name='StackProposalGenerator/weights')),
(tf.constant(4.0),
tf.Variable(4.0, name='StackProposalGenerator/biases'))]
def test_freeze_all_feature_extractor_variables(self):
grads_and_vars = self._create_grads_and_vars()
regex_list = ['FeatureExtractor/.*']
grads_and_vars = variables_helper.freeze_gradients_matching_regex(
grads_and_vars, regex_list)
exp_output = [(3.0, 3.0), (4.0, 4.0)]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
output = sess.run(grads_and_vars)
self.assertCountEqual(output, exp_output)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class GetVariablesAvailableInCheckpointTest(test_case.TestCase):
def test_return_all_variables_from_checkpoint(self):
with tf.Graph().as_default():
variables = [
tf.Variable(1.0, name='weights'),
tf.Variable(1.0, name='biases')
]
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
out_variables = variables_helper.get_variables_available_in_checkpoint(
variables, checkpoint_path)
self.assertCountEqual(out_variables, variables)
def test_return_all_variables_from_checkpoint_with_partition(self):
with tf.Graph().as_default():
partitioner = tf.fixed_size_partitioner(2)
variables = [
tf.get_variable(
name='weights', shape=(2, 2), partitioner=partitioner),
tf.Variable([1.0, 2.0], name='biases')
]
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
out_variables = variables_helper.get_variables_available_in_checkpoint(
variables, checkpoint_path)
self.assertCountEqual(out_variables, variables)
def test_return_variables_available_in_checkpoint(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
with tf.Graph().as_default():
weight_variable = tf.Variable(1.0, name='weights')
global_step = tf.train.get_or_create_global_step()
graph1_variables = [
weight_variable,
global_step
]
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(graph1_variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
with tf.Graph().as_default():
graph2_variables = graph1_variables + [tf.Variable(1.0, name='biases')]
out_variables = variables_helper.get_variables_available_in_checkpoint(
graph2_variables, checkpoint_path, include_global_step=False)
self.assertCountEqual(out_variables, [weight_variable])
def test_return_variables_available_an_checkpoint_with_dict_inputs(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
with tf.Graph().as_default():
graph1_variables = [
tf.Variable(1.0, name='ckpt_weights'),
]
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(graph1_variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
with tf.Graph().as_default():
graph2_variables_dict = {
'ckpt_weights': tf.Variable(1.0, name='weights'),
'ckpt_biases': tf.Variable(1.0, name='biases')
}
out_variables = variables_helper.get_variables_available_in_checkpoint(
graph2_variables_dict, checkpoint_path)
self.assertIsInstance(out_variables, dict)
self.assertCountEqual(list(out_variables.keys()), ['ckpt_weights'])
self.assertEqual(out_variables['ckpt_weights'].op.name, 'weights')
def test_return_variables_with_correct_sizes(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
with tf.Graph().as_default():
bias_variable = tf.Variable(3.0, name='biases')
global_step = tf.train.get_or_create_global_step()
graph1_variables = [
tf.Variable([[1.0, 2.0], [3.0, 4.0]], name='weights'),
bias_variable,
global_step
]
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(graph1_variables)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
with tf.Graph().as_default():
graph2_variables = [
tf.Variable([1.0, 2.0], name='weights'), # New variable shape.
bias_variable,
global_step
]
out_variables = variables_helper.get_variables_available_in_checkpoint(
graph2_variables, checkpoint_path, include_global_step=True)
self.assertCountEqual(out_variables, [bias_variable, global_step])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/variables_helper_tf1_test.py | variables_helper_tf1_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for manipulating Keras models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def extract_submodel(model, inputs, outputs, name=None):
"""Extracts a section of a Keras model into a new model.
This method walks an existing model from the specified outputs back to the
specified inputs in order to construct a new model containing only a portion
of the old model, while sharing the layers and weights with the original
model.
WARNING: This method does not work for submodels containing layers that have
been used multiple times in the original model, or in other models beyond
the original model. (E.g. does not work for submodels that contain layers that
use shared weights). This also means that multiple overlapping submodels
cannot be extracted from the same model.
It also relies on recursion and will hit python's recursion limit for large
submodels.
Args:
model: The existing Keras model this method extracts a submodel from.
inputs: The layer inputs in the existing model that start the submodel
outputs: The layer outputs in the existing model that should be output by
the submodel
name: The name for the extracted model
Returns:
The extracted submodel specified by the given inputs and outputs
"""
output_to_layer = {}
output_to_layer_input = {}
for layer in model.layers:
layer_output = layer.output
layer_inputs = layer.input
output_to_layer[layer_output.experimental_ref()] = layer
output_to_layer_input[layer_output.experimental_ref()] = layer_inputs
model_inputs_dict = {}
memoized_results = {}
# Relies on recursion, very low limit in python
def _recurse_in_model(tensor):
"""Walk the existing model recursively to copy a submodel."""
if tensor.experimental_ref() in memoized_results:
return memoized_results[tensor.experimental_ref()]
if (tensor.experimental_ref() == inputs.experimental_ref()) or (
isinstance(inputs, list) and tensor in inputs):
if tensor.experimental_ref() not in model_inputs_dict:
model_inputs_dict[tensor.experimental_ref()] = tf.keras.layers.Input(
tensor=tensor)
out = model_inputs_dict[tensor.experimental_ref()]
else:
cur_inputs = output_to_layer_input[tensor.experimental_ref()]
cur_layer = output_to_layer[tensor.experimental_ref()]
if isinstance(cur_inputs, list):
out = cur_layer([_recurse_in_model(inp) for inp in cur_inputs])
else:
out = cur_layer(_recurse_in_model(cur_inputs))
memoized_results[tensor.experimental_ref()] = out
return out
if isinstance(outputs, list):
model_outputs = [_recurse_in_model(tensor) for tensor in outputs]
else:
model_outputs = _recurse_in_model(outputs)
if isinstance(inputs, list):
model_inputs = [
model_inputs_dict[tensor.experimental_ref()] for tensor in inputs
]
else:
model_inputs = model_inputs_dict[inputs.experimental_ref()]
return tf.keras.Model(inputs=model_inputs, outputs=model_outputs, name=name)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/model_util.py | model_util.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.utils import static_shape
get_dim_as_int = static_shape.get_dim_as_int
def _is_tensor(t):
"""Returns a boolean indicating whether the input is a tensor.
Args:
t: the input to be tested.
Returns:
a boolean that indicates whether t is a tensor.
"""
return isinstance(t, (tf.Tensor, tf.SparseTensor, tf.Variable))
def _set_dim_0(t, d0):
"""Sets the 0-th dimension of the input tensor.
Args:
t: the input tensor, assuming the rank is at least 1.
d0: an integer indicating the 0-th dimension of the input tensor.
Returns:
the tensor t with the 0-th dimension set.
"""
t_shape = t.get_shape().as_list()
t_shape[0] = d0
t.set_shape(t_shape)
return t
def pad_tensor(t, length):
"""Pads the input tensor with 0s along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after padding, assuming length <= t.shape[0].
Returns:
padded_t: the padded tensor, whose first dimension is length. If the length
is an integer, the first dimension of padded_t is set to length
statically.
"""
# Computing the padding statically makes the operation work with XLA.
rank = len(t.get_shape())
paddings = [[0 for _ in range(2)] for _ in range(rank)]
t_d0 = tf.shape(t)[0]
if isinstance(length, int) or len(length.get_shape()) == 0: # pylint:disable=g-explicit-length-test
paddings[0][1] = length - t_d0
else:
paddings[0][1] = length[0] - t_d0
return tf.pad(t, paddings)
def clip_tensor(t, length):
"""Clips the input tensor along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after clipping, assuming length <= t.shape[0].
Returns:
clipped_t: the clipped tensor, whose first dimension is length. If the
length is an integer, the first dimension of clipped_t is set to length
statically.
"""
clipped_t = tf.gather(t, tf.range(length))
if not _is_tensor(length):
clipped_t = _set_dim_0(clipped_t, length)
return clipped_t
def pad_or_clip_tensor(t, length):
"""Pad or clip the input tensor along the first dimension.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after processing.
Returns:
processed_t: the processed tensor, whose first dimension is length. If the
length is an integer, the first dimension of the processed tensor is set
to length statically.
"""
return pad_or_clip_nd(t, [length] + t.shape.as_list()[1:])
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def static_or_dynamic_map_fn(fn, elems, dtype=None,
parallel_iterations=32, back_prop=True):
"""Runs map_fn as a (static) for loop when possible.
This function rewrites the map_fn as an explicit unstack input -> for loop
over function calls -> stack result combination. This allows our graphs to
be acyclic when the batch size is static.
For comparison, see https://www.tensorflow.org/api_docs/python/tf/map_fn.
Note that `static_or_dynamic_map_fn` currently is not *fully* interchangeable
with the default tf.map_fn function as it does not accept nested inputs (only
Tensors or lists of Tensors). Likewise, the output of `fn` can only be a
Tensor or list of Tensors.
TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same structure as elems. Its output must have the
same structure as elems.
elems: A tensor or list of tensors, each of which will
be unpacked along their first dimension. The sequence of the
resulting slices will be applied to fn.
dtype: (optional) The output type(s) of fn. If fn returns a structure of
Tensors differing from the structure of elems, then dtype is not optional
and must have the same structure as the output of fn.
parallel_iterations: (optional) number of batch items to process in
parallel. This flag is only used if the native tf.map_fn is used
and defaults to 32 instead of 10 (unlike the standard tf.map_fn default).
back_prop: (optional) True enables support for back propagation.
This flag is only used if the native tf.map_fn is used.
Returns:
A tensor or sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
Raises:
ValueError: if `elems` a Tensor or a list of Tensors.
ValueError: if `fn` does not return a Tensor or list of Tensors
"""
if isinstance(elems, list):
for elem in elems:
if not isinstance(elem, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elem_shapes = [elem.shape.as_list() for elem in elems]
# Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail
# to all be the same size along the batch dimension.
for elem_shape in elem_shapes:
if (not elem_shape or not elem_shape[0]
or elem_shape[0] != elem_shapes[0][0]):
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
arg_tuples = zip(*[tf.unstack(elem) for elem in elems])
outputs = [fn(arg_tuple) for arg_tuple in arg_tuples]
else:
if not isinstance(elems, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elems_shape = elems.shape.as_list()
if not elems_shape or not elems_shape[0]:
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
outputs = [fn(arg) for arg in tf.unstack(elems)]
# Stack `outputs`, which is a list of Tensors or list of lists of Tensors
if all([isinstance(output, tf.Tensor) for output in outputs]):
return tf.stack(outputs)
else:
if all([isinstance(output, list) for output in outputs]):
if all([all(
[isinstance(entry, tf.Tensor) for entry in output_list])
for output_list in outputs]):
return [tf.stack(output_tuple) for output_tuple in zip(*outputs)]
raise ValueError('`fn` should return a Tensor or a list of Tensors.')
def check_min_image_dim(min_dim, image_tensor):
"""Checks that the image width/height are greater than some number.
This function is used to check that the width and height of an image are above
a certain value. If the image shape is static, this function will perform the
check at graph construction time. Otherwise, if the image shape varies, an
Assertion control dependency will be added to the graph.
Args:
min_dim: The minimum number of pixels along the width and height of the
image.
image_tensor: The image tensor to check size for.
Returns:
If `image_tensor` has dynamic size, return `image_tensor` with a Assert
control dependency. Otherwise returns image_tensor.
Raises:
ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`.
"""
image_shape = image_tensor.get_shape()
image_height = static_shape.get_height(image_shape)
image_width = static_shape.get_width(image_shape)
if image_height is None or image_width is None:
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim),
tf.greater_equal(tf.shape(image_tensor)[2], min_dim)),
['image size must be >= {} in both height and width.'.format(min_dim)])
with tf.control_dependencies([shape_assert]):
return tf.identity(image_tensor)
if image_height < min_dim or image_width < min_dim:
raise ValueError(
'image size must be >= %d in both height and width; image dim = %d,%d' %
(min_dim, image_height, image_width))
return image_tensor
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def assert_shape_equal_along_first_dimension(shape_a, shape_b):
"""Asserts that shape_a and shape_b are the same along the 0th-dimension.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
if shape_a[0] != shape_b[0]:
raise ValueError('Unequal first dimension {}, {}'.format(
shape_a[0], shape_b[0]))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a[0], shape_b[0])
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
"""Asserts the input box tensor is normalized.
Args:
boxes: a tensor of shape [N, 4] where N is the number of boxes.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
Returns:
a tf.Assert op which fails when the input box tensor is not normalized.
Raises:
ValueError: When the input box tensor is not normalized.
"""
box_minimum = tf.reduce_min(boxes)
box_maximum = tf.reduce_max(boxes)
return tf.Assert(
tf.logical_and(
tf.less_equal(box_maximum, maximum_normalized_coordinate),
tf.greater_equal(box_minimum, 0)),
[boxes])
def flatten_dimensions(inputs, first, last):
"""Flattens `K-d` tensor along [first, last) dimensions.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0, D1, ..., D(first) * D(first+1) * ... * D(last-1), D(last), ..., D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_dimensions(inputs, first=1, last=3)
new_tensor.shape -> [10, 100, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
first: first value for the range of dimensions to flatten.
last: last value for the range of dimensions to flatten. Note that the last
dimension itself is excluded.
Returns:
a tensor with shape
[D0, D1, ..., D(first) * D(first + 1) * ... * D(last - 1), D(last), ...,
D(K-1)].
Raises:
ValueError: if first and last arguments are incorrect.
"""
if first >= inputs.shape.ndims or last > inputs.shape.ndims:
raise ValueError('`first` and `last` must be less than inputs.shape.ndims. '
'found {} and {} respectively while ndims is {}'.format(
first, last, inputs.shape.ndims))
shape = combined_static_and_dynamic_shape(inputs)
flattened_dim_prod = tf.reduce_prod(shape[first:last],
keepdims=True)
new_shape = tf.concat([shape[:first], flattened_dim_prod,
shape[last:]], axis=0)
return tf.reshape(inputs, new_shape)
def flatten_first_n_dimensions(inputs, n):
"""Flattens `K-d` tensor along first n dimension to be a `(K-n+1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_first_n_dimensions(inputs, 2)
new_tensor.shape -> [50, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
n: The number of dimensions to flatten.
Returns:
a tensor with shape [D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
"""
return flatten_dimensions(inputs, first=0, last=n)
def expand_first_dimension(inputs, dims):
"""Expands `K-d` tensor along first dimension to be a `(K+n-1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
Example:
`inputs` is a tensor with shape [50, 20, 20, 3].
new_tensor = expand_first_dimension(inputs, [10, 5]).
new_tensor.shape -> [10, 5, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
dims: List with new dimensions to expand first axis into. The length of
`dims` is typically 2 or larger.
Returns:
a tensor with shape [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
"""
inputs_shape = combined_static_and_dynamic_shape(inputs)
expanded_shape = tf.stack(dims + inputs_shape[1:])
# Verify that it is possible to expand the first axis of inputs.
assert_op = tf.assert_equal(
inputs_shape[0], tf.reduce_prod(tf.stack(dims)),
message=('First dimension of `inputs` cannot be expanded into provided '
'`dims`'))
with tf.control_dependencies([assert_op]):
inputs_reshaped = tf.reshape(inputs, expanded_shape)
return inputs_reshaped
def resize_images_and_return_shapes(inputs, image_resizer_fn):
"""Resizes images using the given function and returns their true shapes.
Args:
inputs: a float32 Tensor representing a batch of inputs of shape
[batch_size, height, width, channels].
image_resizer_fn: a function which takes in a single image and outputs
a resized image and its original shape.
Returns:
resized_inputs: The inputs resized according to image_resizer_fn.
true_image_shapes: A integer tensor of shape [batch_size, 3]
representing the height, width and number of channels in inputs.
"""
if inputs.dtype is not tf.float32:
raise ValueError('`resize_images_and_return_shapes` expects a'
' tf.float32 tensor')
# TODO(jonathanhuang): revisit whether to always use batch size as
# the number of parallel iterations vs allow for dynamic batching.
outputs = static_or_dynamic_map_fn(
image_resizer_fn,
elems=inputs,
dtype=[tf.float32, tf.int32])
resized_inputs = outputs[0]
true_image_shapes = outputs[1]
return resized_inputs, true_image_shapes
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/shape_utils.py | shape_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.np_mask_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_mask_ops
class MaskOpsTests(tf.test.TestCase):
def setUp(self):
masks1_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks1_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks1 = np.stack([masks1_0, masks1_1])
masks2_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_2 = np.array([[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0]],
dtype=np.uint8)
masks2 = np.stack([masks2_0, masks2_1, masks2_2])
self.masks1 = masks1
self.masks2 = masks2
def testArea(self):
areas = np_mask_ops.area(self.masks1)
expected_areas = np.array([8.0, 10.0], dtype=np.float32)
self.assertAllClose(expected_areas, areas)
def testIntersection(self):
intersection = np_mask_ops.intersection(self.masks1, self.masks2)
expected_intersection = np.array(
[[8.0, 0.0, 8.0], [0.0, 9.0, 7.0]], dtype=np.float32)
self.assertAllClose(intersection, expected_intersection)
def testIOU(self):
iou = np_mask_ops.iou(self.masks1, self.masks2)
expected_iou = np.array(
[[1.0, 0.0, 8.0/25.0], [0.0, 9.0 / 16.0, 7.0 / 28.0]], dtype=np.float32)
self.assertAllClose(iou, expected_iou)
def testIOA(self):
ioa21 = np_mask_ops.ioa(self.masks1, self.masks2)
expected_ioa21 = np.array([[1.0, 0.0, 8.0/25.0],
[0.0, 9.0/15.0, 7.0/25.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/np_mask_ops_test.py | np_mask_ops_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy BoxList classes and functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
class BoxList(object):
"""Box collection.
BoxList represents a list of bounding boxes as numpy array, where each
bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes within a
given list correspond to a single image.
Optionally, users can add additional related fields (such as
objectness/classification scores).
"""
def __init__(self, data):
"""Constructs box collection.
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Raises:
ValueError: if bbox data is not a numpy array
ValueError: if invalid dimensions for bbox data
"""
if not isinstance(data, np.ndarray):
raise ValueError('data must be a numpy array.')
if len(data.shape) != 2 or data.shape[1] != 4:
raise ValueError('Invalid dimensions for box data.')
if data.dtype != np.float32 and data.dtype != np.float64:
raise ValueError('Invalid data type for box data: float is required.')
if not self._is_valid_boxes(data):
raise ValueError('Invalid box data. data must be a numpy array of '
'N*[y_min, x_min, y_max, x_max]')
self.data = {'boxes': data}
def num_boxes(self):
"""Return number of boxes held in collections."""
return self.data['boxes'].shape[0]
def get_extra_fields(self):
"""Return all non-box fields."""
return [k for k in self.data.keys() if k != 'boxes']
def has_field(self, field):
return field in self.data
def add_field(self, field, field_data):
"""Add data to a specified field.
Args:
field: a string parameter used to speficy a related field to be accessed.
field_data: a numpy array of [N, ...] representing the data associated
with the field.
Raises:
ValueError: if the field is already exist or the dimension of the field
data does not matches the number of boxes.
"""
if self.has_field(field):
raise ValueError('Field ' + field + 'already exists')
if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes():
raise ValueError('Invalid dimensions for field data')
self.data[field] = field_data
def get(self):
"""Convenience function for accesssing box coordinates.
Returns:
a numpy array of shape [N, 4] representing box corners
"""
return self.get_field('boxes')
def get_field(self, field):
"""Accesses data associated with the specified field in the box collection.
Args:
field: a string parameter used to speficy a related field to be accessed.
Returns:
a numpy 1-d array representing data of an associated field
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field {} does not exist'.format(field))
return self.data[field]
def get_coordinates(self):
"""Get corner coordinates of boxes.
Returns:
a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max]
"""
box_coordinates = self.get()
y_min = box_coordinates[:, 0]
x_min = box_coordinates[:, 1]
y_max = box_coordinates[:, 2]
x_max = box_coordinates[:, 3]
return [y_min, x_min, y_max, x_max]
def _is_valid_boxes(self, data):
"""Check whether data fullfills the format of N*[ymin, xmin, ymax, xmin].
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Returns:
a boolean indicating whether all ymax of boxes are equal or greater than
ymin, and all xmax of boxes are equal or greater than xmin.
"""
if data.shape[0] > 0:
for i in range(data.shape[0]):
if data[i, 0] > data[i, 2] or data[i, 1] > data[i, 3]:
return False
return True
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/np_box_list.py | np_box_list.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of common learning rate schedules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
def _learning_rate_return_value(eager_decay_rate):
"""Helper function to return proper learning rate based on tf version."""
if tf.executing_eagerly():
return eager_decay_rate
else:
return eager_decay_rate()
def exponential_decay_with_burnin(global_step,
learning_rate_base,
learning_rate_decay_steps,
learning_rate_decay_factor,
burnin_learning_rate=0.0,
burnin_steps=0,
min_learning_rate=0.0,
staircase=True):
"""Exponential decay schedule with burn-in period.
In this schedule, learning rate is fixed at burnin_learning_rate
for a fixed period, before transitioning to a regular exponential
decay schedule.
Args:
global_step: int tensor representing global step.
learning_rate_base: base learning rate.
learning_rate_decay_steps: steps to take between decaying the learning rate.
Note that this includes the number of burn-in steps.
learning_rate_decay_factor: multiplicative factor by which to decay
learning rate.
burnin_learning_rate: initial learning rate during burn-in period. If
0.0 (which is the default), then the burn-in learning rate is simply
set to learning_rate_base.
burnin_steps: number of steps to use burnin learning rate.
min_learning_rate: the minimum learning rate.
staircase: whether use staircase decay.
Returns:
If executing eagerly:
returns a no-arg callable that outputs the (scalar)
float tensor learning rate given the current value of global_step.
If in a graph:
immediately returns a (scalar) float tensor representing learning rate.
"""
if burnin_learning_rate == 0:
burnin_learning_rate = learning_rate_base
def eager_decay_rate():
"""Callable to compute the learning rate."""
post_burnin_learning_rate = tf.train.exponential_decay(
learning_rate_base,
global_step - burnin_steps,
learning_rate_decay_steps,
learning_rate_decay_factor,
staircase=staircase)
if callable(post_burnin_learning_rate):
post_burnin_learning_rate = post_burnin_learning_rate()
return tf.maximum(tf.where(
tf.less(tf.cast(global_step, tf.int32), tf.constant(burnin_steps)),
tf.constant(burnin_learning_rate),
post_burnin_learning_rate), min_learning_rate, name='learning_rate')
return _learning_rate_return_value(eager_decay_rate)
def exponential_decay_with_warmup(global_step,
learning_rate_base,
learning_rate_decay_steps,
learning_rate_decay_factor,
warmup_learning_rate=0.0,
warmup_steps=0,
min_learning_rate=0.0,
staircase=True):
"""Exponential decay schedule with warm up period.
Args:
global_step: int tensor representing global step.
learning_rate_base: base learning rate.
learning_rate_decay_steps: steps to take between decaying the learning rate.
Note that this includes the number of burn-in steps.
learning_rate_decay_factor: multiplicative factor by which to decay learning
rate.
warmup_learning_rate: initial learning rate during warmup period.
warmup_steps: number of steps to use warmup learning rate.
min_learning_rate: the minimum learning rate.
staircase: whether use staircase decay.
Returns:
If executing eagerly:
returns a no-arg callable that outputs the (scalar)
float tensor learning rate given the current value of global_step.
If in a graph:
immediately returns a (scalar) float tensor representing learning rate.
"""
def eager_decay_rate():
"""Callable to compute the learning rate."""
post_warmup_learning_rate = tf.train.exponential_decay(
learning_rate_base,
global_step - warmup_steps,
learning_rate_decay_steps,
learning_rate_decay_factor,
staircase=staircase)
if callable(post_warmup_learning_rate):
post_warmup_learning_rate = post_warmup_learning_rate()
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to '
'warmup_learning_rate.')
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
warmup_rate = slope * tf.cast(global_step,
tf.float32) + warmup_learning_rate
learning_rate = tf.where(
tf.less(tf.cast(global_step, tf.int32), tf.constant(warmup_steps)),
warmup_rate,
tf.maximum(post_warmup_learning_rate, min_learning_rate),
name='learning_rate')
return learning_rate
return _learning_rate_return_value(eager_decay_rate)
def cosine_decay_with_warmup(global_step,
learning_rate_base,
total_steps,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0):
"""Cosine decay schedule with warm up period.
Cosine annealing learning rate as described in:
Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts.
ICLR 2017. https://arxiv.org/abs/1608.03983
In this schedule, the learning rate grows linearly from warmup_learning_rate
to learning_rate_base for warmup_steps, then transitions to a cosine decay
schedule.
Args:
global_step: int64 (scalar) tensor representing global step.
learning_rate_base: base learning rate.
total_steps: total number of training steps.
warmup_learning_rate: initial learning rate for warm up.
warmup_steps: number of warmup steps.
hold_base_rate_steps: Optional number of steps to hold base learning rate
before decaying.
Returns:
If executing eagerly:
returns a no-arg callable that outputs the (scalar)
float tensor learning rate given the current value of global_step.
If in a graph:
immediately returns a (scalar) float tensor representing learning rate.
Raises:
ValueError: if warmup_learning_rate is larger than learning_rate_base,
or if warmup_steps is larger than total_steps.
"""
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to '
'warmup_steps.')
def eager_decay_rate():
"""Callable to compute the learning rate."""
learning_rate = 0.5 * learning_rate_base * (1 + tf.cos(
np.pi *
(tf.cast(global_step, tf.float32) - warmup_steps - hold_base_rate_steps
) / float(total_steps - warmup_steps - hold_base_rate_steps)))
if hold_base_rate_steps > 0:
learning_rate = tf.where(
global_step > warmup_steps + hold_base_rate_steps,
learning_rate, learning_rate_base)
if warmup_steps > 0:
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to '
'warmup_learning_rate.')
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
warmup_rate = slope * tf.cast(global_step,
tf.float32) + warmup_learning_rate
learning_rate = tf.where(global_step < warmup_steps, warmup_rate,
learning_rate)
return tf.where(global_step > total_steps, 0.0, learning_rate,
name='learning_rate')
return _learning_rate_return_value(eager_decay_rate)
def manual_stepping(global_step, boundaries, rates, warmup=False):
"""Manually stepped learning rate schedule.
This function provides fine grained control over learning rates. One must
specify a sequence of learning rates as well as a set of integer steps
at which the current learning rate must transition to the next. For example,
if boundaries = [5, 10] and rates = [.1, .01, .001], then the learning
rate returned by this function is .1 for global_step=0,...,4, .01 for
global_step=5...9, and .001 for global_step=10 and onward.
Args:
global_step: int64 (scalar) tensor representing global step.
boundaries: a list of global steps at which to switch learning
rates. This list is assumed to consist of increasing positive integers.
rates: a list of (float) learning rates corresponding to intervals between
the boundaries. The length of this list must be exactly
len(boundaries) + 1.
warmup: Whether to linearly interpolate learning rate for steps in
[0, boundaries[0]].
Returns:
If executing eagerly:
returns a no-arg callable that outputs the (scalar)
float tensor learning rate given the current value of global_step.
If in a graph:
immediately returns a (scalar) float tensor representing learning rate.
Raises:
ValueError: if one of the following checks fails:
1. boundaries is a strictly increasing list of positive integers
2. len(rates) == len(boundaries) + 1
3. boundaries[0] != 0
"""
if any([b < 0 for b in boundaries]) or any(
[not isinstance(b, int) for b in boundaries]):
raise ValueError('boundaries must be a list of positive integers')
if any([bnext <= b for bnext, b in zip(boundaries[1:], boundaries[:-1])]):
raise ValueError('Entries in boundaries must be strictly increasing.')
if any([not isinstance(r, float) for r in rates]):
raise ValueError('Learning rates must be floats')
if len(rates) != len(boundaries) + 1:
raise ValueError('Number of provided learning rates must exceed '
'number of boundary points by exactly 1.')
if boundaries and boundaries[0] == 0:
raise ValueError('First step cannot be zero.')
if warmup and boundaries:
slope = (rates[1] - rates[0]) * 1.0 / boundaries[0]
warmup_steps = list(range(boundaries[0]))
warmup_rates = [rates[0] + slope * step for step in warmup_steps]
boundaries = warmup_steps + boundaries
rates = warmup_rates + rates[1:]
else:
boundaries = [0] + boundaries
num_boundaries = len(boundaries)
def eager_decay_rate():
"""Callable to compute the learning rate."""
rate_index = tf.reduce_max(tf.where(
tf.greater_equal(global_step, boundaries),
list(range(num_boundaries)),
[0] * num_boundaries))
return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries),
name='learning_rate')
return _learning_rate_return_value(eager_decay_rate)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/learning_schedules.py | learning_schedules.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment util file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import math
import six
import tensorflow.compat.v1 as tf
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import image as contrib_image
from tensorflow.contrib import training as contrib_training
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
# Represents an invalid bounding box that is used for checking for padding
# lists of bounding box coordinates for a few augmentation operations
_INVALID_BOX = [[-1.0, -1.0, -1.0, -1.0]]
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
]
return policy
def policy_v1():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
[('Color', 0.0, 0), ('ShearX_Only_BBoxes', 0.8, 4)],
[('ShearY_Only_BBoxes', 0.8, 2), ('Flip_Only_BBoxes', 0.0, 10)],
[('Equalize', 0.6, 10), ('TranslateX_BBox', 0.2, 2)],
[('Color', 1.0, 10), ('TranslateY_Only_BBoxes', 0.4, 6)],
[('Rotate_BBox', 0.8, 10), ('Contrast', 0.0, 10)],
[('Cutout', 0.2, 2), ('Brightness', 0.8, 10)],
[('Color', 1.0, 6), ('Equalize', 1.0, 2)],
[('Cutout_Only_BBoxes', 0.4, 6), ('TranslateY_Only_BBoxes', 0.8, 2)],
[('Color', 0.2, 8), ('Rotate_BBox', 0.8, 10)],
[('Sharpness', 0.4, 4), ('TranslateY_Only_BBoxes', 0.0, 4)],
[('Sharpness', 1.0, 4), ('SolarizeAdd', 0.4, 4)],
[('Rotate_BBox', 1.0, 8), ('Sharpness', 0.2, 8)],
[('ShearY_BBox', 0.6, 10), ('Equalize_Only_BBoxes', 0.6, 8)],
[('ShearX_BBox', 0.2, 6), ('TranslateY_Only_BBoxes', 0.2, 10)],
[('SolarizeAdd', 0.6, 8), ('Brightness', 0.8, 10)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
def policy_v2():
"""Additional policy that performs well on object detection."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Color', 0.0, 6), ('Cutout', 0.6, 8), ('Sharpness', 0.4, 8)],
[('Rotate_BBox', 0.4, 8), ('Sharpness', 0.4, 2),
('Rotate_BBox', 0.8, 10)],
[('TranslateY_BBox', 1.0, 8), ('AutoContrast', 0.8, 2)],
[('AutoContrast', 0.4, 6), ('ShearX_BBox', 0.8, 8),
('Brightness', 0.0, 10)],
[('SolarizeAdd', 0.2, 6), ('Contrast', 0.0, 10),
('AutoContrast', 0.6, 0)],
[('Cutout', 0.2, 0), ('Solarize', 0.8, 8), ('Color', 1.0, 4)],
[('TranslateY_BBox', 0.0, 4), ('Equalize', 0.6, 8),
('Solarize', 0.0, 10)],
[('TranslateY_BBox', 0.2, 2), ('ShearY_BBox', 0.8, 8),
('Rotate_BBox', 0.8, 8)],
[('Cutout', 0.8, 8), ('Brightness', 0.8, 8), ('Cutout', 0.2, 2)],
[('Color', 0.8, 4), ('TranslateY_BBox', 1.0, 6), ('Rotate_BBox', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('BBox_Cutout', 1.0, 4), ('Cutout', 0.2, 8)],
[('Rotate_BBox', 0.0, 0), ('Equalize', 0.6, 6), ('ShearY_BBox', 0.6, 8)],
[('Brightness', 0.8, 8), ('AutoContrast', 0.4, 2),
('Brightness', 0.2, 2)],
[('TranslateY_BBox', 0.4, 8), ('Solarize', 0.4, 6),
('SolarizeAdd', 0.2, 10)],
[('Contrast', 1.0, 10), ('SolarizeAdd', 0.2, 8), ('Equalize', 0.2, 4)],
]
return policy
def policy_v3():
""""Additional policy that performs well on object detection."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Posterize', 0.8, 2), ('TranslateX_BBox', 1.0, 8)],
[('BBox_Cutout', 0.2, 10), ('Sharpness', 1.0, 8)],
[('Rotate_BBox', 0.6, 8), ('Rotate_BBox', 0.8, 10)],
[('Equalize', 0.8, 10), ('AutoContrast', 0.2, 10)],
[('SolarizeAdd', 0.2, 2), ('TranslateY_BBox', 0.2, 8)],
[('Sharpness', 0.0, 2), ('Color', 0.4, 8)],
[('Equalize', 1.0, 8), ('TranslateY_BBox', 1.0, 8)],
[('Posterize', 0.6, 2), ('Rotate_BBox', 0.0, 10)],
[('AutoContrast', 0.6, 0), ('Rotate_BBox', 1.0, 6)],
[('Equalize', 0.0, 4), ('Cutout', 0.8, 10)],
[('Brightness', 1.0, 2), ('TranslateY_BBox', 1.0, 6)],
[('Contrast', 0.0, 2), ('ShearY_BBox', 0.8, 0)],
[('AutoContrast', 0.8, 10), ('Contrast', 0.2, 10)],
[('Rotate_BBox', 1.0, 10), ('Cutout', 1.0, 10)],
[('SolarizeAdd', 0.8, 6), ('Equalize', 0.8, 8)],
]
return policy
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = contrib_image.rotate(wrap(image), radians)
return unwrap(image, replace)
def random_shift_bbox(image, bbox, pixel_scaling, replace,
new_min_bbox_coords=None):
"""Move the bbox and the image content to a slightly new random location.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
The potential values for the new min corner of the bbox will be between
[old_min - pixel_scaling * bbox_height/2,
old_min - pixel_scaling * bbox_height/2].
pixel_scaling: A float between 0 and 1 that specifies the pixel range
that the new bbox location will be sampled from.
replace: A one or three value 1D tensor to fill empty pixels.
new_min_bbox_coords: If not None, then this is a tuple that specifies the
(min_y, min_x) coordinates of the new bbox. Normally this is randomly
specified, but this allows it to be manually set. The coordinates are
the absolute coordinates between 0 and image height/width and are int32.
Returns:
The new image that will have the shifted bbox location in it along with
the new bbox that contains the new coordinates.
"""
# Obtains image height and width and create helper clip functions.
image_height = tf.to_float(tf.shape(image)[0])
image_width = tf.to_float(tf.shape(image)[1])
def clip_y(val):
return tf.clip_by_value(val, 0, tf.to_int32(image_height) - 1)
def clip_x(val):
return tf.clip_by_value(val, 0, tf.to_int32(image_width) - 1)
# Convert bbox to pixel coordinates.
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = clip_y(tf.to_int32(image_height * bbox[2]))
max_x = clip_x(tf.to_int32(image_width * bbox[3]))
bbox_height, bbox_width = (max_y - min_y + 1, max_x - min_x + 1)
image_height = tf.to_int32(image_height)
image_width = tf.to_int32(image_width)
# Select the new min/max bbox ranges that are used for sampling the
# new min x/y coordinates of the shifted bbox.
minval_y = clip_y(
min_y - tf.to_int32(pixel_scaling * tf.to_float(bbox_height) / 2.0))
maxval_y = clip_y(
min_y + tf.to_int32(pixel_scaling * tf.to_float(bbox_height) / 2.0))
minval_x = clip_x(
min_x - tf.to_int32(pixel_scaling * tf.to_float(bbox_width) / 2.0))
maxval_x = clip_x(
min_x + tf.to_int32(pixel_scaling * tf.to_float(bbox_width) / 2.0))
# Sample and calculate the new unclipped min/max coordinates of the new bbox.
if new_min_bbox_coords is None:
unclipped_new_min_y = tf.random_uniform(
shape=[], minval=minval_y, maxval=maxval_y,
dtype=tf.int32)
unclipped_new_min_x = tf.random_uniform(
shape=[], minval=minval_x, maxval=maxval_x,
dtype=tf.int32)
else:
unclipped_new_min_y, unclipped_new_min_x = (
clip_y(new_min_bbox_coords[0]), clip_x(new_min_bbox_coords[1]))
unclipped_new_max_y = unclipped_new_min_y + bbox_height - 1
unclipped_new_max_x = unclipped_new_min_x + bbox_width - 1
# Determine if any of the new bbox was shifted outside the current image.
# This is used for determining if any of the original bbox content should be
# discarded.
new_min_y, new_min_x, new_max_y, new_max_x = (
clip_y(unclipped_new_min_y), clip_x(unclipped_new_min_x),
clip_y(unclipped_new_max_y), clip_x(unclipped_new_max_x))
shifted_min_y = (new_min_y - unclipped_new_min_y) + min_y
shifted_max_y = max_y - (unclipped_new_max_y - new_max_y)
shifted_min_x = (new_min_x - unclipped_new_min_x) + min_x
shifted_max_x = max_x - (unclipped_new_max_x - new_max_x)
# Create the new bbox tensor by converting pixel integer values to floats.
new_bbox = tf.stack([
tf.to_float(new_min_y) / tf.to_float(image_height),
tf.to_float(new_min_x) / tf.to_float(image_width),
tf.to_float(new_max_y) / tf.to_float(image_height),
tf.to_float(new_max_x) / tf.to_float(image_width)])
# Copy the contents in the bbox and fill the old bbox location
# with gray (128).
bbox_content = image[shifted_min_y:shifted_max_y + 1,
shifted_min_x:shifted_max_x + 1, :]
def mask_and_add_image(
min_y_, min_x_, max_y_, max_x_, mask, content_tensor, image_):
"""Applies mask to bbox region in image then adds content_tensor to it."""
mask = tf.pad(mask,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]], constant_values=1)
content_tensor = tf.pad(content_tensor,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]], constant_values=0)
return image_ * mask + content_tensor
# Zero out original bbox location.
mask = tf.zeros_like(image)[min_y:max_y+1, min_x:max_x+1, :]
grey_tensor = tf.zeros_like(mask) + replace[0]
image = mask_and_add_image(min_y, min_x, max_y, max_x, mask,
grey_tensor, image)
# Fill in bbox content to new bbox location.
mask = tf.zeros_like(bbox_content)
image = mask_and_add_image(new_min_y, new_min_x, new_max_y, new_max_x, mask,
bbox_content, image)
return image, new_bbox
def _clip_bbox(min_y, min_x, max_y, max_x):
"""Clip bounding box coordinates between 0 and 1.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
Returns:
Clipped coordinate values between 0 and 1.
"""
min_y = tf.clip_by_value(min_y, 0.0, 1.0)
min_x = tf.clip_by_value(min_x, 0.0, 1.0)
max_y = tf.clip_by_value(max_y, 0.0, 1.0)
max_x = tf.clip_by_value(max_x, 0.0, 1.0)
return min_y, min_x, max_y, max_x
def _check_bbox_area(min_y, min_x, max_y, max_x, delta=0.05):
"""Adjusts bbox coordinates to make sure the area is > 0.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
delta: Float, this is used to create a gap of size 2 * delta between
bbox min/max coordinates that are the same on the boundary.
This prevents the bbox from having an area of zero.
Returns:
Tuple of new bbox coordinates between 0 and 1 that will now have a
guaranteed area > 0.
"""
height = max_y - min_y
width = max_x - min_x
def _adjust_bbox_boundaries(min_coord, max_coord):
# Make sure max is never 0 and min is never 1.
max_coord = tf.maximum(max_coord, 0.0 + delta)
min_coord = tf.minimum(min_coord, 1.0 - delta)
return min_coord, max_coord
min_y, max_y = tf.cond(tf.equal(height, 0.0),
lambda: _adjust_bbox_boundaries(min_y, max_y),
lambda: (min_y, max_y))
min_x, max_x = tf.cond(tf.equal(width, 0.0),
lambda: _adjust_bbox_boundaries(min_x, max_x),
lambda: (min_x, max_x))
return min_y, min_x, max_y, max_x
def _scale_bbox_only_op_probability(prob):
"""Reduce the probability of the bbox-only operation.
Probability is reduced so that we do not distort the content of too many
bounding boxes that are close to each other. The value of 3.0 was a chosen
hyper parameter when designing the autoaugment algorithm that we found
empirically to work well.
Args:
prob: Float that is the probability of applying the bbox-only operation.
Returns:
Reduced probability.
"""
return prob / 3.0
def _apply_bbox_augmentation(image, bbox, augmentation_func, *args):
"""Applies augmentation_func to the subsection of image indicated by bbox.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where the bbox location in the image will
have `ugmentation_func applied to it.
"""
image_height = tf.to_float(tf.shape(image)[0])
image_width = tf.to_float(tf.shape(image)[1])
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = tf.to_int32(image_height * bbox[2])
max_x = tf.to_int32(image_width * bbox[3])
image_height = tf.to_int32(image_height)
image_width = tf.to_int32(image_width)
# Clip to be sure the max values do not fall out of range.
max_y = tf.minimum(max_y, image_height - 1)
max_x = tf.minimum(max_x, image_width - 1)
# Get the sub-tensor that is the image within the bounding box region.
bbox_content = image[min_y:max_y + 1, min_x:max_x + 1, :]
# Apply the augmentation function to the bbox portion of the image.
augmented_bbox_content = augmentation_func(bbox_content, *args)
# Pad the augmented_bbox_content and the mask to match the shape of original
# image.
augmented_bbox_content = tf.pad(augmented_bbox_content,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]])
# Create a mask that will be used to zero out a part of the original image.
mask_tensor = tf.zeros_like(bbox_content)
mask_tensor = tf.pad(mask_tensor,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]],
constant_values=1)
# Replace the old bbox content with the new augmented content.
image = image * mask_tensor + augmented_bbox_content
return image
def _concat_bbox(bbox, bboxes):
"""Helper function that concates bbox to bboxes along the first dimension."""
# Note if all elements in bboxes are -1 (_INVALID_BOX), then this means
# we discard bboxes and start the bboxes Tensor with the current bbox.
bboxes_sum_check = tf.reduce_sum(bboxes)
bbox = tf.expand_dims(bbox, 0)
# This check will be true when it is an _INVALID_BOX
bboxes = tf.cond(tf.equal(bboxes_sum_check, -4.0),
lambda: bbox,
lambda: tf.concat([bboxes, bbox], 0))
return bboxes
def _apply_bbox_augmentation_wrapper(image, bbox, new_bboxes, prob,
augmentation_func, func_changes_bbox,
*args):
"""Applies _apply_bbox_augmentation with probability prob.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
new_bboxes: 2D Tensor that is a list of the bboxes in the image after they
have been altered by aug_func. These will only be changed when
func_changes_bbox is set to true. Each bbox has 4 elements
(min_y, min_x, max_y, max_x) of type float that are the normalized
bbox coordinates between 0 and 1.
prob: Float that is the probability of applying _apply_bbox_augmentation.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A tuple. Fist element is a modified version of image, where the bbox
location in the image will have augmentation_func applied to it if it is
chosen to be called with probability `prob`. The second element is a
Tensor of Tensors of length 4 that will contain the altered bbox after
applying augmentation_func.
"""
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
if func_changes_bbox:
augmented_image, bbox = tf.cond(
should_apply_op,
lambda: augmentation_func(image, bbox, *args),
lambda: (image, bbox))
else:
augmented_image = tf.cond(
should_apply_op,
lambda: _apply_bbox_augmentation(image, bbox, augmentation_func, *args),
lambda: image)
new_bboxes = _concat_bbox(bbox, new_bboxes)
return augmented_image, new_bboxes
def _apply_multi_bbox_augmentation(image, bboxes, prob, aug_func,
func_changes_bbox, *args):
"""Applies aug_func to the image for each bbox in bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
prob: Float that is the probability of applying aug_func to a specific
bounding box within the image.
aug_func: Augmentation function that will be applied to the
subsections of image indicated by the bbox values in bboxes.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where each bbox location in the image will
have augmentation_func applied to it if it is chosen to be called with
probability prob independently across all bboxes. Also the final
bboxes are returned that will be unchanged if func_changes_bbox is set to
false and if true, the new altered ones will be returned.
"""
# Will keep track of the new altered bboxes after aug_func is repeatedly
# applied. The -1 values are a dummy value and this first Tensor will be
# removed upon appending the first real bbox.
new_bboxes = tf.constant(_INVALID_BOX)
# If the bboxes are empty, then just give it _INVALID_BOX. The result
# will be thrown away.
bboxes = tf.cond(tf.equal(tf.size(bboxes), 0),
lambda: tf.constant(_INVALID_BOX),
lambda: bboxes)
bboxes = tf.ensure_shape(bboxes, (None, 4))
# pylint:disable=g-long-lambda
# pylint:disable=line-too-long
wrapped_aug_func = lambda _image, bbox, _new_bboxes: _apply_bbox_augmentation_wrapper(
_image, bbox, _new_bboxes, prob, aug_func, func_changes_bbox, *args)
# pylint:enable=g-long-lambda
# pylint:enable=line-too-long
# Setup the while_loop.
num_bboxes = tf.shape(bboxes)[0] # We loop until we go over all bboxes.
idx = tf.constant(0) # Counter for the while loop.
# Conditional function when to end the loop once we go over all bboxes
# images_and_bboxes contain (_image, _new_bboxes)
cond = lambda _idx, _images_and_bboxes: tf.less(_idx, num_bboxes)
# Shuffle the bboxes so that the augmentation order is not deterministic if
# we are not changing the bboxes with aug_func.
if not func_changes_bbox:
loop_bboxes = tf.random.shuffle(bboxes)
else:
loop_bboxes = bboxes
# Main function of while_loop where we repeatedly apply augmentation on the
# bboxes in the image.
# pylint:disable=g-long-lambda
body = lambda _idx, _images_and_bboxes: [
_idx + 1, wrapped_aug_func(_images_and_bboxes[0],
loop_bboxes[_idx],
_images_and_bboxes[1])]
# pylint:enable=g-long-lambda
_, (image, new_bboxes) = tf.while_loop(
cond, body, [idx, (image, new_bboxes)],
shape_invariants=[idx.get_shape(),
(image.get_shape(), tf.TensorShape([None, 4]))])
# Either return the altered bboxes or the original ones depending on if
# we altered them in anyway.
if func_changes_bbox:
final_bboxes = new_bboxes
else:
final_bboxes = bboxes
return image, final_bboxes
def _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, aug_func,
func_changes_bbox, *args):
"""Checks to be sure num bboxes > 0 before calling inner function."""
num_bboxes = tf.shape(bboxes)[0]
image, bboxes = tf.cond(
tf.equal(num_bboxes, 0),
lambda: (image, bboxes),
# pylint:disable=g-long-lambda
lambda: _apply_multi_bbox_augmentation(
image, bboxes, prob, aug_func, func_changes_bbox, *args))
# pylint:enable=g-long-lambda
return image, bboxes
def rotate_only_bboxes(image, bboxes, prob, degrees, replace):
"""Apply rotate to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, rotate, func_changes_bbox, degrees, replace)
def shear_x_only_bboxes(image, bboxes, prob, level, replace):
"""Apply shear_x to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, shear_x, func_changes_bbox, level, replace)
def shear_y_only_bboxes(image, bboxes, prob, level, replace):
"""Apply shear_y to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, shear_y, func_changes_bbox, level, replace)
def translate_x_only_bboxes(image, bboxes, prob, pixels, replace):
"""Apply translate_x to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_x, func_changes_bbox, pixels, replace)
def translate_y_only_bboxes(image, bboxes, prob, pixels, replace):
"""Apply translate_y to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_y, func_changes_bbox, pixels, replace)
def flip_only_bboxes(image, bboxes, prob):
"""Apply flip_lr to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, tf.image.flip_left_right, func_changes_bbox)
def solarize_only_bboxes(image, bboxes, prob, threshold):
"""Apply solarize to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, solarize, func_changes_bbox, threshold)
def equalize_only_bboxes(image, bboxes, prob):
"""Apply equalize to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, equalize, func_changes_bbox)
def cutout_only_bboxes(image, bboxes, prob, pad_size, replace):
"""Apply cutout to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, cutout, func_changes_bbox, pad_size, replace)
def _rotate_bbox(bbox, image_height, image_width, degrees):
"""Rotates the bbox coordinated by degrees.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
Returns:
A tensor of the same shape as bbox, but now with the rotated coordinates.
"""
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# Translate the bbox to the center of the image and turn the normalized 0-1
# coordinates to absolute pixel locations.
# Y coordinates are made negative as the y axis of images goes down with
# increasing pixel values, so we negate to make sure x axis and y axis points
# are in the traditionally positive direction.
min_y = -tf.to_int32(image_height * (bbox[0] - 0.5))
min_x = tf.to_int32(image_width * (bbox[1] - 0.5))
max_y = -tf.to_int32(image_height * (bbox[2] - 0.5))
max_x = tf.to_int32(image_width * (bbox[3] - 0.5))
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Rotate the coordinates according to the rotation matrix clockwise if
# radians is positive, else negative
rotation_matrix = tf.stack(
[[tf.cos(radians), tf.sin(radians)],
[-tf.sin(radians), tf.cos(radians)]])
new_coords = tf.cast(
tf.matmul(rotation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to normalized 0-1 floats.
min_y = -(tf.to_float(tf.reduce_max(new_coords[0, :])) / image_height - 0.5)
min_x = tf.to_float(tf.reduce_min(new_coords[1, :])) / image_width + 0.5
max_y = -(tf.to_float(tf.reduce_min(new_coords[0, :])) / image_height - 0.5)
max_x = tf.to_float(tf.reduce_max(new_coords[1, :])) / image_width + 0.5
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def rotate_with_bboxes(image, bboxes, degrees, replace):
"""Equivalent of PIL Rotate that rotates the image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of rotating
image by degrees. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the rotated image.
"""
# Rotate the image.
image = rotate(image, degrees, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_rotate_bbox = lambda bbox: _rotate_bbox(
bbox, image_height, image_width, degrees)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_rotate_bbox, bboxes)
return image, bboxes
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def _shift_bbox(bbox, image_height, image_width, pixels, shift_horizontal):
"""Shifts the bbox coordinates by pixels.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, width of the image.
pixels: An int. How many pixels to shift the bbox.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
pixels = tf.to_int32(pixels)
# Convert bbox to integer pixel locations.
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
if shift_horizontal:
min_x = tf.maximum(0, min_x - pixels)
max_x = tf.minimum(image_width, max_x - pixels)
else:
min_y = tf.maximum(0, min_y - pixels)
max_y = tf.minimum(image_height, max_y - pixels)
# Convert bbox back to floats.
min_y = tf.to_float(min_y) / tf.to_float(image_height)
min_x = tf.to_float(min_x) / tf.to_float(image_width)
max_y = tf.to_float(max_y) / tf.to_float(image_height)
max_x = tf.to_float(max_x) / tf.to_float(image_width)
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def translate_bbox(image, bboxes, pixels, replace, shift_horizontal):
"""Equivalent of PIL Translate in X/Y dimension that shifts image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pixels: An int. How many pixels to shift the image and bboxes
replace: A one or three value 1D tensor to fill empty pixels.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of translating
image by pixels. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the shifted image.
"""
if shift_horizontal:
image = translate_x(image, pixels, replace)
else:
image = translate_y(image, pixels, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shift_bbox = lambda bbox: _shift_bbox(
bbox, image_height, image_width, pixels, shift_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shift_bbox, bboxes)
return image, bboxes
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = contrib_image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = contrib_image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def _shear_bbox(bbox, image_height, image_width, level, shear_horizontal):
"""Shifts the bbox according to how the image was sheared.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
level: Float. How much to shear the image.
shear_horizontal: If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Change bbox coordinates to be pixels.
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = tf.to_int32(image_height * bbox[2])
max_x = tf.to_int32(image_width * bbox[3])
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Shear the coordinates according to the translation matrix.
if shear_horizontal:
translation_matrix = tf.stack(
[[1, 0], [-level, 1]])
else:
translation_matrix = tf.stack(
[[1, -level], [0, 1]])
translation_matrix = tf.cast(translation_matrix, tf.float32)
new_coords = tf.cast(
tf.matmul(translation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to floats.
min_y = tf.to_float(tf.reduce_min(new_coords[0, :])) / image_height
min_x = tf.to_float(tf.reduce_min(new_coords[1, :])) / image_width
max_y = tf.to_float(tf.reduce_max(new_coords[0, :])) / image_height
max_x = tf.to_float(tf.reduce_max(new_coords[1, :])) / image_width
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def shear_with_bboxes(image, bboxes, level, replace, shear_horizontal):
"""Applies Shear Transformation to the image and shifts the bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
level: Float. How much to shear the image. This value will be between
-0.3 to 0.3.
replace: A one or three value 1D tensor to fill empty pixels.
shear_horizontal: Boolean. If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of shearing
image by level. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the sheared image.
"""
if shear_horizontal:
image = shear_x(image, level, replace)
else:
image = shear_y(image, level, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shear_bbox = lambda bbox: _shear_bbox(
bbox, image_height, image_width, level, shear_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shear_bbox, bboxes)
return image, bboxes
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.to_float(tf.reduce_min(image))
hi = tf.to_float(tf.reduce_max(image))
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.to_float(im) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
def _cutout_inside_bbox(image, bbox, pad_fraction):
"""Generates cutout mask and the mean pixel value of the bbox.
First a location is randomly chosen within the image as the center where the
cutout mask will be applied. Note this can be towards the boundaries of the
image, so the full cutout mask may not be applied.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
pad_fraction: Float that specifies how large the cutout mask should be in
in reference to the size of the original bbox. If pad_fraction is 0.25,
then the cutout mask will be of shape
(0.25 * bbox height, 0.25 * bbox width).
Returns:
A tuple. Fist element is a tensor of the same shape as image where each
element is either a 1 or 0 that is used to determine where the image
will have cutout applied. The second element is the mean of the pixels
in the image where the bbox is located.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Transform from shape [1, 4] to [4].
bbox = tf.squeeze(bbox)
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
# Calculate the mean pixel values in the bounding box, which will be used
# to fill the cutout region.
mean = tf.reduce_mean(image[min_y:max_y + 1, min_x:max_x + 1],
reduction_indices=[0, 1])
# Cutout mask will be size pad_size_heigh * 2 by pad_size_width * 2 if the
# region lies entirely within the bbox.
box_height = max_y - min_y + 1
box_width = max_x - min_x + 1
pad_size_height = tf.to_int32(pad_fraction * (box_height / 2))
pad_size_width = tf.to_int32(pad_fraction * (box_width / 2))
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=min_y, maxval=max_y+1,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=min_x, maxval=max_x+1,
dtype=tf.int32)
lower_pad = tf.maximum(
0, cutout_center_height - pad_size_height)
upper_pad = tf.maximum(
0, image_height - cutout_center_height - pad_size_height)
left_pad = tf.maximum(
0, cutout_center_width - pad_size_width)
right_pad = tf.maximum(
0, image_width - cutout_center_width - pad_size_width)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, 2)
mask = tf.tile(mask, [1, 1, 3])
return mask, mean
def bbox_cutout(image, bboxes, pad_fraction, replace_with_mean):
"""Applies cutout to the image according to bbox information.
This is a cutout variant that using bbox information to make more informed
decisions on where to place the cutout mask.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pad_fraction: Float that specifies how large the cutout mask should be in
in reference to the size of the original bbox. If pad_fraction is 0.25,
then the cutout mask will be of shape
(0.25 * bbox height, 0.25 * bbox width).
replace_with_mean: Boolean that specified what value should be filled in
where the cutout mask is applied. Since the incoming image will be of
uint8 and will not have had any mean normalization applied, by default
we set the value to be 128. If replace_with_mean is True then we find
the mean pixel values across the channel dimension and use those to fill
in where the cutout mask is applied.
Returns:
A tuple. First element is a tensor of the same shape as image that has
cutout applied to it. Second element is the bboxes that were passed in
that will be unchanged.
"""
def apply_bbox_cutout(image, bboxes, pad_fraction):
"""Applies cutout to a single bounding box within image."""
# Choose a single bounding box to apply cutout to.
random_index = tf.random_uniform(
shape=[], maxval=tf.shape(bboxes)[0], dtype=tf.int32)
# Select the corresponding bbox and apply cutout.
chosen_bbox = tf.gather(bboxes, random_index)
mask, mean = _cutout_inside_bbox(image, chosen_bbox, pad_fraction)
# When applying cutout we either set the pixel value to 128 or to the mean
# value inside the bbox.
replace = mean if replace_with_mean else 128
# Apply the cutout mask to the image. Where the mask is 0 we fill it with
# `replace`.
image = tf.where(
tf.equal(mask, 0),
tf.cast(tf.ones_like(image, dtype=image.dtype) * replace,
dtype=image.dtype),
image)
return image
# Check to see if there are boxes, if so then apply boxcutout.
image = tf.cond(tf.equal(tf.size(bboxes), 0), lambda: image,
lambda: apply_bbox_cutout(image, bboxes, pad_fraction))
return image, bboxes
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'Cutout': cutout,
'BBox_Cutout': bbox_cutout,
'Rotate_BBox': rotate_with_bboxes,
# pylint:disable=g-long-lambda
'TranslateX_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=True),
'TranslateY_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=False),
'ShearX_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=True),
'ShearY_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=False),
# pylint:enable=g-long-lambda
'Rotate_Only_BBoxes': rotate_only_bboxes,
'ShearX_Only_BBoxes': shear_x_only_bboxes,
'ShearY_Only_BBoxes': shear_y_only_bboxes,
'TranslateX_Only_BBoxes': translate_x_only_bboxes,
'TranslateY_Only_BBoxes': translate_y_only_bboxes,
'Flip_Only_BBoxes': flip_only_bboxes,
'Solarize_Only_BBoxes': solarize_only_bboxes,
'Equalize_Only_BBoxes': equalize_only_bboxes,
'Cutout_Only_BBoxes': cutout_only_bboxes,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _bbox_cutout_level_to_arg(level, hparams):
cutout_pad_fraction = (level/_MAX_LEVEL) * hparams.cutout_max_pad_fraction
return (cutout_pad_fraction,
hparams.cutout_bbox_replace_with_mean)
def level_to_arg(hparams):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'Cutout': lambda level: (int((level/_MAX_LEVEL) * hparams.cutout_const),),
# pylint:disable=g-long-lambda
'BBox_Cutout': lambda level: _bbox_cutout_level_to_arg(
level, hparams),
'TranslateX_BBox': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
'TranslateY_BBox': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
# pylint:enable=g-long-lambda
'ShearX_BBox': _shear_level_to_arg,
'ShearY_BBox': _shear_level_to_arg,
'Rotate_BBox': _rotate_level_to_arg,
'Rotate_Only_BBoxes': _rotate_level_to_arg,
'ShearX_Only_BBoxes': _shear_level_to_arg,
'ShearY_Only_BBoxes': _shear_level_to_arg,
# pylint:disable=g-long-lambda
'TranslateX_Only_BBoxes': lambda level: _translate_level_to_arg(
level, hparams.translate_bbox_const),
'TranslateY_Only_BBoxes': lambda level: _translate_level_to_arg(
level, hparams.translate_bbox_const),
# pylint:enable=g-long-lambda
'Flip_Only_BBoxes': lambda level: (),
'Solarize_Only_BBoxes': lambda level: (int((level/_MAX_LEVEL) * 256),),
'Equalize_Only_BBoxes': lambda level: (),
# pylint:disable=g-long-lambda
'Cutout_Only_BBoxes': lambda level: (
int((level/_MAX_LEVEL) * hparams.cutout_bbox_const),),
# pylint:enable=g-long-lambda
}
def bbox_wrapper(func):
"""Adds a bboxes function argument to func and returns unchanged bboxes."""
def wrapper(images, bboxes, *args, **kwargs):
return (func(images, *args, **kwargs), bboxes)
return wrapper
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
if six.PY2:
# pylint: disable=deprecated-method
arg_spec = inspect.getargspec(func)
# pylint: enable=deprecated-method
else:
arg_spec = inspect.getfullargspec(func)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in arg_spec[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
if 'replace' in arg_spec[0]:
# Make sure replace is the final argument
assert 'replace' == arg_spec[0][-1]
args = tuple(list(args) + [replace_value])
# Add bboxes as the second positional argument for the function if it does
# not already exist.
if 'bboxes' not in arg_spec[0]:
func = bbox_wrapper(func)
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob, bboxes):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
if six.PY2:
# pylint: disable=deprecated-method
arg_spec = inspect.getargspec(func)
# pylint: enable=deprecated-method
else:
arg_spec = inspect.getfullargspec(func)
assert 'bboxes' == arg_spec[0][1]
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
if 'prob' in arg_spec[0]:
prob = 1.0
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image, augmented_bboxes = tf.cond(
should_apply_op,
lambda: func(image, bboxes, *args),
lambda: (image, bboxes))
return augmented_image, augmented_bboxes
def select_and_apply_random_policy(policies, image, bboxes):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image, bboxes = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image, bboxes),
lambda: (image, bboxes))
return (image, bboxes)
def build_and_apply_nas_policy(policies, image, bboxes,
augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
bboxes:
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function. Additionally, returns bboxes if
a value for them is passed in that is not None
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_, bboxes_):
for func, prob, args in tf_policy_:
image_, bboxes_ = _apply_func_with_prob(
func, image_, args, prob, bboxes_)
return image_, bboxes_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_image, augmented_bbox = select_and_apply_random_policy(
tf_policies, image, bboxes)
# If no bounding boxes were specified, then just return the images.
return (augmented_image, augmented_bbox)
# TODO(barretzoph): Add in ArXiv link once paper is out.
def distort_image_with_autoaugment(image, bboxes, augmentation_name):
"""Applies the AutoAugment policy to `image` and `bboxes`.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
bboxes: `Tensor` of shape [N, 4] representing ground truth boxes that are
normalized between [0, 1].
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0`, `v1`, `v2`, `v3` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image` and `bboxes`.
"""
image = tf.cast(image, tf.uint8)
available_policies = {'v0': policy_v0, 'v1': policy_v1, 'v2': policy_v2,
'v3': policy_v3, 'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = contrib_training.HParams(
cutout_max_pad_fraction=0.75,
cutout_bbox_replace_with_mean=False,
cutout_const=100,
translate_const=250,
cutout_bbox_const=50,
translate_bbox_const=120)
augmented_image, augmented_bbox = (
build_and_apply_nas_policy(policy, image, bboxes, augmentation_hparams))
augmented_image = tf.cast(augmented_image, tf.float32)
return augmented_image, augmented_bbox
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/autoaugment_utils.py | autoaugment_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.static_shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.utils import static_shape
class StaticShapeTest(tf.test.TestCase):
def test_return_correct_batchSize(self):
tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
self.assertEqual(32, static_shape.get_batch_size(tensor_shape))
def test_return_correct_height(self):
tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
self.assertEqual(299, static_shape.get_height(tensor_shape))
def test_return_correct_width(self):
tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
self.assertEqual(384, static_shape.get_width(tensor_shape))
def test_return_correct_depth(self):
tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
self.assertEqual(3, static_shape.get_depth(tensor_shape))
def test_die_on_tensor_shape_with_rank_three(self):
tensor_shape = tf.TensorShape(dims=[32, 299, 384])
with self.assertRaises(ValueError):
static_shape.get_batch_size(tensor_shape)
static_shape.get_height(tensor_shape)
static_shape.get_width(tensor_shape)
static_shape.get_depth(tensor_shape)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/static_shape_test.py | static_shape_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_list_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
class AreaRelatedTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=float)
boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist1 = np_box_list.BoxList(boxes1)
self.boxlist2 = np_box_list.BoxList(boxes2)
def test_area(self):
areas = np_box_list_ops.area(self.boxlist1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def test_intersection(self):
intersection = np_box_list_ops.intersection(self.boxlist1, self.boxlist2)
expected_intersection = np.array([[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]],
dtype=float)
self.assertAllClose(intersection, expected_intersection)
def test_iou(self):
iou = np_box_list_ops.iou(self.boxlist1, self.boxlist2)
expected_iou = np.array([[2.0 / 16.0, 0.0, 6.0 / 400.0],
[1.0 / 16.0, 0.0, 5.0 / 400.0]],
dtype=float)
self.assertAllClose(iou, expected_iou)
def test_ioa(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist2 = np_box_list.BoxList(
np.array(
[[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32))
ioa21 = np_box_list_ops.ioa(boxlist2, boxlist1)
expected_ioa21 = np.array([[0.5, 0.0],
[1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
def test_scale(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist_scaled = np_box_list_ops.scale(boxlist, 2.0, 3.0)
expected_boxlist_scaled = np_box_list.BoxList(
np.array(
[[0.5, 0.75, 1.5, 2.25], [0.0, 0.0, 1.0, 2.25]], dtype=np.float32))
self.assertAllClose(expected_boxlist_scaled.get(), boxlist_scaled.get())
def test_clip_to_window(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5]],
dtype=np.float32))
boxlist_clipped = np_box_list_ops.clip_to_window(boxlist,
[0.0, 0.0, 1.0, 1.0])
expected_boxlist_clipped = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[0.0, 0.0, 0.7, 1.0]],
dtype=np.float32))
self.assertAllClose(expected_boxlist_clipped.get(), boxlist_clipped.get())
def test_prune_outside_window(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5]],
dtype=np.float32))
boxlist_pruned, _ = np_box_list_ops.prune_outside_window(
boxlist, [0.0, 0.0, 1.0, 1.0])
expected_boxlist_pruned = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
self.assertAllClose(expected_boxlist_pruned.get(), boxlist_pruned.get())
def test_concatenate(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist2 = np_box_list.BoxList(
np.array(
[[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32))
boxlists = [boxlist1, boxlist2]
boxlist_concatenated = np_box_list_ops.concatenate(boxlists)
boxlist_concatenated_expected = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
dtype=np.float32))
self.assertAllClose(boxlist_concatenated_expected.get(),
boxlist_concatenated.get())
def test_change_coordinate_frame(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist_coord = np_box_list_ops.change_coordinate_frame(
boxlist, np.array([0, 0, 0.5, 0.5], dtype=np.float32))
expected_boxlist_coord = np_box_list.BoxList(
np.array([[0.5, 0.5, 1.5, 1.5], [0, 0, 1.0, 1.5]], dtype=np.float32))
self.assertAllClose(boxlist_coord.get(), expected_boxlist_coord.get())
def test_filter_scores_greater_than(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist.add_field('scores', np.array([0.8, 0.2], np.float32))
boxlist_greater = np_box_list_ops.filter_scores_greater_than(boxlist, 0.5)
expected_boxlist_greater = np_box_list.BoxList(
np.array([[0.25, 0.25, 0.75, 0.75]], dtype=np.float32))
self.assertAllClose(boxlist_greater.get(), expected_boxlist_greater.get())
class GatherOpsTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field('scores', np.array([0.5, 0.7, 0.9], dtype=float))
self.boxlist.add_field('labels',
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0],
[0, 0, 0, 0, 1]],
dtype=int))
def test_gather_with_out_of_range_indices(self):
indices = np.array([3, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_with_invalid_multidimensional_indices(self):
indices = np.array([[0, 1], [1, 2]], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_without_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices)
expected_scores = np.array([0.9, 0.5, 0.7], dtype=float)
self.assertAllClose(expected_scores, subboxlist.get_field('scores'))
expected_boxes = np.array([[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0]],
dtype=float)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0]],
dtype=int)
self.assertAllClose(expected_labels, subboxlist.get_field('labels'))
def test_gather_with_invalid_field_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, 'labels')
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, ['objectness'])
def test_gather_with_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices, ['labels'])
self.assertFalse(subboxlist.has_field('scores'))
expected_boxes = np.array([[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0]],
dtype=float)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0]],
dtype=int)
self.assertAllClose(expected_labels, subboxlist.get_field('labels'))
class SortByFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field('scores', np.array([0.5, 0.9, 0.4], dtype=float))
self.boxlist.add_field('labels',
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0],
[0, 0, 0, 0, 1]],
dtype=int))
def test_with_invalid_field(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, 'objectness')
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, 'labels')
def test_with_invalid_sorting_order(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, 'scores', 'Descending')
def test_with_descending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(self.boxlist, 'scores')
expected_boxes = np.array([[14.0, 14.0, 15.0, 15.0], [3.0, 4.0, 6.0, 8.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.9, 0.5, 0.4], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field('scores'))
def test_with_ascending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(
self.boxlist, 'scores', np_box_list_ops.SortOrder.ASCEND)
expected_boxes = np.array([[0.0, 0.0, 20.0, 20.0],
[3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0],],
dtype=float)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.4, 0.5, 0.9], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field('scores'))
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
self._boxes = np.array([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]],
dtype=float)
self._boxlist = np_box_list.BoxList(self._boxes)
def test_with_no_scores_field(self):
boxlist = np_box_list.BoxList(self._boxes)
max_output_size = 3
iou_threshold = 0.5
with self.assertRaises(ValueError):
np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
def test_nms_disabled_max_output_size_equals_three(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .2, .3], dtype=float))
max_output_size = 3
iou_threshold = 1. # No NMS
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 0.1, 1, 1.1]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .2, .3], dtype=float))
max_output_size = 3
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_two_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .5, .3], dtype=float))
max_output_size = 2
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_thirty_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .5, .3], dtype=float))
max_output_size = 30
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_ten_indentical_boxes(self):
boxes = np.array(10 * [[0, 0, 1, 1]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field('scores', np.array(10 * [0.8]))
iou_threshold = .5
max_output_size = 3
expected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_different_iou_threshold(self):
boxes = np.array([[0, 0, 20, 100], [0, 0, 20, 80], [200, 200, 210, 300],
[200, 200, 210, 250]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field('scores', np.array([0.9, 0.8, 0.7, 0.6]))
max_output_size = 4
iou_threshold = .4
expected_boxes = np.array([[0, 0, 20, 100],
[200, 200, 210, 300],],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = .5
expected_boxes = np.array([[0, 0, 20, 100], [200, 200, 210, 300],
[200, 200, 210, 250]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = .8
expected_boxes = np.array([[0, 0, 20, 100], [0, 0, 20, 80],
[200, 200, 210, 300], [200, 200, 210, 250]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_multiclass_nms(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],
dtype=np.float32))
scores = np.array([[-0.2, 0.1, 0.5, -0.4, 0.3],
[0.7, -0.7, 0.6, 0.2, -0.9],
[0.4, 0.34, -0.9, 0.2, 0.31]],
dtype=np.float32)
boxlist.add_field('scores', scores)
boxlist_clean = np_box_list_ops.multi_class_non_max_suppression(
boxlist, score_thresh=0.25, iou_thresh=0.1, max_output_size=3)
scores_clean = boxlist_clean.get_field('scores')
classes_clean = boxlist_clean.get_field('classes')
boxes = boxlist_clean.get()
expected_scores = np.array([0.7, 0.6, 0.34, 0.31])
expected_classes = np.array([0, 2, 1, 4])
expected_boxes = np.array([[0.4, 0.2, 0.8, 0.8],
[0.4, 0.2, 0.8, 0.8],
[0.6, 0.0, 1.0, 1.0],
[0.6, 0.0, 1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(scores_clean, expected_scores)
self.assertAllClose(classes_clean, expected_classes)
self.assertAllClose(boxes, expected_boxes)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/np_box_list_ops_test.py | np_box_list_ops_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluator class for Visual Relations Detection.
VRDDetectionEvaluator is a class which manages ground truth information of a
visual relations detection (vrd) dataset, and computes frequently used detection
metrics such as Precision, Recall, Recall@k, of the provided vrd detection
results.
It supports the following operations:
1) Adding ground truth information of images sequentially.
2) Adding detection results of images sequentially.
3) Evaluating detection metrics on already inserted detection results.
Note1: groundtruth should be inserted before evaluation.
Note2: This module operates on numpy boxes and box lists.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
import collections
import logging
import numpy as np
import six
from six.moves import range
from object_detection.core import standard_fields
from object_detection.utils import metrics
from object_detection.utils import object_detection_evaluation
from object_detection.utils import per_image_vrd_evaluation
# Below standard input numpy datatypes are defined:
# box_data_type - datatype of the groundtruth visual relations box annotations;
# this datatype consists of two named boxes: subject bounding box and object
# bounding box. Each box is of the format [y_min, x_min, y_max, x_max], each
# coordinate being of type float32.
# label_data_type - corresponding datatype of the visual relations label
# annotaions; it consists of three numerical class labels: subject class label,
# object class label and relation class label, each class label being of type
# int32.
vrd_box_data_type = np.dtype([('subject', 'f4', (4,)), ('object', 'f4', (4,))])
single_box_data_type = np.dtype([('box', 'f4', (4,))])
label_data_type = np.dtype([('subject', 'i4'), ('object', 'i4'), ('relation',
'i4')])
class VRDDetectionEvaluator(object_detection_evaluation.DetectionEvaluator):
"""A class to evaluate VRD detections.
This class serves as a base class for VRD evaluation in two settings:
- phrase detection
- relation detection.
"""
def __init__(self, matching_iou_threshold=0.5, metric_prefix=None):
"""Constructor.
Args:
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
metric_prefix: (optional) string prefix for metric name; if None, no
prefix is used.
"""
super(VRDDetectionEvaluator, self).__init__([])
self._matching_iou_threshold = matching_iou_threshold
self._evaluation = _VRDDetectionEvaluation(
matching_iou_threshold=self._matching_iou_threshold)
self._image_ids = set([])
self._metric_prefix = (metric_prefix + '_') if metric_prefix else ''
self._evaluatable_labels = {}
self._negative_labels = {}
@abstractmethod
def _process_groundtruth_boxes(self, groundtruth_box_tuples):
"""Pre-processes boxes before adding them to the VRDDetectionEvaluation.
Phrase detection and Relation detection subclasses re-implement this method
depending on the task.
Args:
groundtruth_box_tuples: A numpy array of structures with the shape
[M, 1], each structure containing the same number of named bounding
boxes. Each box is of the format [y_min, x_min, y_max, x_max] (see
datatype vrd_box_data_type, single_box_data_type above).
"""
raise NotImplementedError(
'_process_groundtruth_boxes method should be implemented in subclasses'
'of VRDDetectionEvaluator.')
@abstractmethod
def _process_detection_boxes(self, detections_box_tuples):
"""Pre-processes boxes before adding them to the VRDDetectionEvaluation.
Phrase detection and Relation detection subclasses re-implement this method
depending on the task.
Args:
detections_box_tuples: A numpy array of structures with the shape
[M, 1], each structure containing the same number of named bounding
boxes. Each box is of the format [y_min, x_min, y_max, x_max] (see
datatype vrd_box_data_type, single_box_data_type above).
"""
raise NotImplementedError(
'_process_detection_boxes method should be implemented in subclasses'
'of VRDDetectionEvaluator.')
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: A numpy array
of structures with the shape [M, 1], representing M tuples, each tuple
containing the same number of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max] (see
datatype vrd_box_data_type, single_box_data_type above).
standard_fields.InputDataFields.groundtruth_classes: A numpy array of
structures shape [M, 1], representing the class labels of the
corresponding bounding boxes and possibly additional classes (see
datatype label_data_type above).
standard_fields.InputDataFields.groundtruth_image_classes: numpy array
of shape [K] containing verified labels.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
if image_id in self._image_ids:
raise ValueError('Image with id {} already added.'.format(image_id))
groundtruth_class_tuples = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes])
groundtruth_box_tuples = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes])
self._evaluation.add_single_ground_truth_image_info(
image_key=image_id,
groundtruth_box_tuples=self._process_groundtruth_boxes(
groundtruth_box_tuples),
groundtruth_class_tuples=groundtruth_class_tuples)
self._image_ids.update([image_id])
all_classes = []
for field in groundtruth_box_tuples.dtype.fields:
all_classes.append(groundtruth_class_tuples[field])
groudtruth_positive_classes = np.unique(np.concatenate(all_classes))
verified_labels = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_image_classes,
np.array([], dtype=int))
self._evaluatable_labels[image_id] = np.unique(
np.concatenate((verified_labels, groudtruth_positive_classes)))
self._negative_labels[image_id] = np.setdiff1d(verified_labels,
groudtruth_positive_classes)
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: A numpy array of
structures with shape [N, 1], representing N tuples, each tuple
containing the same number of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max] (as an example
see datatype vrd_box_data_type, single_box_data_type above).
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [N] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: A numpy array
of structures shape [N, 1], representing the class labels of the
corresponding bounding boxes and possibly additional classes (see
datatype label_data_type above).
"""
if image_id not in self._image_ids:
logging.warning('No groundtruth for the image with id %s.', image_id)
# Since for the correct work of evaluator it is assumed that groundtruth
# is inserted first we make sure to break the code if is it not the case.
self._image_ids.update([image_id])
self._negative_labels[image_id] = np.array([])
self._evaluatable_labels[image_id] = np.array([])
num_detections = detections_dict[
standard_fields.DetectionResultFields.detection_boxes].shape[0]
detection_class_tuples = detections_dict[
standard_fields.DetectionResultFields.detection_classes]
detection_box_tuples = detections_dict[
standard_fields.DetectionResultFields.detection_boxes]
negative_selector = np.zeros(num_detections, dtype=bool)
selector = np.ones(num_detections, dtype=bool)
# Only check boxable labels
for field in detection_box_tuples.dtype.fields:
# Verify if one of the labels is negative (this is sure FP)
negative_selector |= np.isin(detection_class_tuples[field],
self._negative_labels[image_id])
# Verify if all labels are verified
selector &= np.isin(detection_class_tuples[field],
self._evaluatable_labels[image_id])
selector |= negative_selector
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_box_tuples=self._process_detection_boxes(
detection_box_tuples[selector]),
detected_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores][selector],
detected_class_tuples=detection_class_tuples[selector])
def evaluate(self, relationships=None):
"""Compute evaluation result.
Args:
relationships: A dictionary of numerical label-text label mapping; if
specified, returns per-relationship AP.
Returns:
A dictionary of metrics with the following fields -
summary_metrics:
'weightedAP@<matching_iou_threshold>IOU' : weighted average precision
at the specified IOU threshold.
'AP@<matching_iou_threshold>IOU/<relationship>' : AP per relationship.
'mAP@<matching_iou_threshold>IOU': mean average precision at the
specified IOU threshold.
'Recall@50@<matching_iou_threshold>IOU': recall@50 at the specified IOU
threshold.
'Recall@100@<matching_iou_threshold>IOU': recall@100 at the specified
IOU threshold.
if relationships is specified, returns <relationship> in AP metrics as
readable names, otherwise the names correspond to class numbers.
"""
(weighted_average_precision, mean_average_precision, average_precisions, _,
_, recall_50, recall_100, _, _) = (
self._evaluation.evaluate())
vrd_metrics = {
(self._metric_prefix + 'weightedAP@{}IOU'.format(
self._matching_iou_threshold)):
weighted_average_precision,
self._metric_prefix + 'mAP@{}IOU'.format(self._matching_iou_threshold):
mean_average_precision,
self._metric_prefix + 'Recall@50@{}IOU'.format(
self._matching_iou_threshold):
recall_50,
self._metric_prefix + 'Recall@100@{}IOU'.format(
self._matching_iou_threshold):
recall_100,
}
if relationships:
for key, average_precision in six.iteritems(average_precisions):
vrd_metrics[self._metric_prefix + 'AP@{}IOU/{}'.format(
self._matching_iou_threshold,
relationships[key])] = average_precision
else:
for key, average_precision in six.iteritems(average_precisions):
vrd_metrics[self._metric_prefix + 'AP@{}IOU/{}'.format(
self._matching_iou_threshold, key)] = average_precision
return vrd_metrics
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._evaluation = _VRDDetectionEvaluation(
matching_iou_threshold=self._matching_iou_threshold)
self._image_ids.clear()
self._negative_labels.clear()
self._evaluatable_labels.clear()
class VRDRelationDetectionEvaluator(VRDDetectionEvaluator):
"""A class to evaluate VRD detections in relations setting.
Expected groundtruth box datatype is vrd_box_data_type, expected groudtruth
labels datatype is label_data_type.
Expected detection box datatype is vrd_box_data_type, expected detection
labels
datatype is label_data_type.
"""
def __init__(self, matching_iou_threshold=0.5):
super(VRDRelationDetectionEvaluator, self).__init__(
matching_iou_threshold=matching_iou_threshold,
metric_prefix='VRDMetric_Relationships')
def _process_groundtruth_boxes(self, groundtruth_box_tuples):
"""Pre-processes boxes before adding them to the VRDDetectionEvaluation.
Args:
groundtruth_box_tuples: A numpy array of structures with the shape
[M, 1], each structure containing the same number of named bounding
boxes. Each box is of the format [y_min, x_min, y_max, x_max].
Returns:
Unchanged input.
"""
return groundtruth_box_tuples
def _process_detection_boxes(self, detections_box_tuples):
"""Pre-processes boxes before adding them to the VRDDetectionEvaluation.
Phrase detection and Relation detection subclasses re-implement this method
depending on the task.
Args:
detections_box_tuples: A numpy array of structures with the shape
[M, 1], each structure containing the same number of named bounding
boxes. Each box is of the format [y_min, x_min, y_max, x_max] (see
datatype vrd_box_data_type, single_box_data_type above).
Returns:
Unchanged input.
"""
return detections_box_tuples
class VRDPhraseDetectionEvaluator(VRDDetectionEvaluator):
"""A class to evaluate VRD detections in phrase setting.
Expected groundtruth box datatype is vrd_box_data_type, expected groudtruth
labels datatype is label_data_type.
Expected detection box datatype is single_box_data_type, expected detection
labels datatype is label_data_type.
"""
def __init__(self, matching_iou_threshold=0.5):
super(VRDPhraseDetectionEvaluator, self).__init__(
matching_iou_threshold=matching_iou_threshold,
metric_prefix='VRDMetric_Phrases')
def _process_groundtruth_boxes(self, groundtruth_box_tuples):
"""Pre-processes boxes before adding them to the VRDDetectionEvaluation.
In case of phrase evaluation task, evaluation expects exactly one bounding
box containing all objects in the phrase. This bounding box is computed
as an enclosing box of all groundtruth boxes of a phrase.
Args:
groundtruth_box_tuples: A numpy array of structures with the shape
[M, 1], each structure containing the same number of named bounding
boxes. Each box is of the format [y_min, x_min, y_max, x_max]. See
vrd_box_data_type for an example of structure.
Returns:
result: A numpy array of structures with the shape [M, 1], each
structure containing exactly one named bounding box. i-th output
structure corresponds to the result of processing i-th input structure,
where the named bounding box is computed as an enclosing bounding box
of all bounding boxes of the i-th input structure.
"""
first_box_key = next(six.iterkeys(groundtruth_box_tuples.dtype.fields))
miny = groundtruth_box_tuples[first_box_key][:, 0]
minx = groundtruth_box_tuples[first_box_key][:, 1]
maxy = groundtruth_box_tuples[first_box_key][:, 2]
maxx = groundtruth_box_tuples[first_box_key][:, 3]
for fields in groundtruth_box_tuples.dtype.fields:
miny = np.minimum(groundtruth_box_tuples[fields][:, 0], miny)
minx = np.minimum(groundtruth_box_tuples[fields][:, 1], minx)
maxy = np.maximum(groundtruth_box_tuples[fields][:, 2], maxy)
maxx = np.maximum(groundtruth_box_tuples[fields][:, 3], maxx)
data_result = []
for i in range(groundtruth_box_tuples.shape[0]):
data_result.append(([miny[i], minx[i], maxy[i], maxx[i]],))
result = np.array(data_result, dtype=[('box', 'f4', (4,))])
return result
def _process_detection_boxes(self, detections_box_tuples):
"""Pre-processes boxes before adding them to the VRDDetectionEvaluation.
In case of phrase evaluation task, evaluation expects exactly one bounding
box containing all objects in the phrase. This bounding box is computed
as an enclosing box of all groundtruth boxes of a phrase.
Args:
detections_box_tuples: A numpy array of structures with the shape
[M, 1], each structure containing the same number of named bounding
boxes. Each box is of the format [y_min, x_min, y_max, x_max]. See
vrd_box_data_type for an example of this structure.
Returns:
result: A numpy array of structures with the shape [M, 1], each
structure containing exactly one named bounding box. i-th output
structure corresponds to the result of processing i-th input structure,
where the named bounding box is computed as an enclosing bounding box
of all bounding boxes of the i-th input structure.
"""
first_box_key = next(six.iterkeys(detections_box_tuples.dtype.fields))
miny = detections_box_tuples[first_box_key][:, 0]
minx = detections_box_tuples[first_box_key][:, 1]
maxy = detections_box_tuples[first_box_key][:, 2]
maxx = detections_box_tuples[first_box_key][:, 3]
for fields in detections_box_tuples.dtype.fields:
miny = np.minimum(detections_box_tuples[fields][:, 0], miny)
minx = np.minimum(detections_box_tuples[fields][:, 1], minx)
maxy = np.maximum(detections_box_tuples[fields][:, 2], maxy)
maxx = np.maximum(detections_box_tuples[fields][:, 3], maxx)
data_result = []
for i in range(detections_box_tuples.shape[0]):
data_result.append(([miny[i], minx[i], maxy[i], maxx[i]],))
result = np.array(data_result, dtype=[('box', 'f4', (4,))])
return result
VRDDetectionEvalMetrics = collections.namedtuple('VRDDetectionEvalMetrics', [
'weighted_average_precision', 'mean_average_precision',
'average_precisions', 'precisions', 'recalls', 'recall_50', 'recall_100',
'median_rank_50', 'median_rank_100'
])
class _VRDDetectionEvaluation(object):
"""Performs metric computation for the VRD task. This class is internal.
"""
def __init__(self, matching_iou_threshold=0.5):
"""Constructor.
Args:
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
"""
self._per_image_eval = per_image_vrd_evaluation.PerImageVRDEvaluation(
matching_iou_threshold=matching_iou_threshold)
self._groundtruth_box_tuples = {}
self._groundtruth_class_tuples = {}
self._num_gt_instances = 0
self._num_gt_imgs = 0
self._num_gt_instances_per_relationship = {}
self.clear_detections()
def clear_detections(self):
"""Clears detections."""
self._detection_keys = set()
self._scores = []
self._relation_field_values = []
self._tp_fp_labels = []
self._average_precisions = {}
self._precisions = []
self._recalls = []
def add_single_ground_truth_image_info(
self, image_key, groundtruth_box_tuples, groundtruth_class_tuples):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
groundtruth_box_tuples: A numpy array of structures with the shape
[M, 1], representing M tuples, each tuple containing the same number
of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max].
groundtruth_class_tuples: A numpy array of structures shape [M, 1],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
"""
if image_key in self._groundtruth_box_tuples:
logging.warning(
'image %s has already been added to the ground truth database.',
image_key)
return
self._groundtruth_box_tuples[image_key] = groundtruth_box_tuples
self._groundtruth_class_tuples[image_key] = groundtruth_class_tuples
self._update_groundtruth_statistics(groundtruth_class_tuples)
def add_single_detected_image_info(self, image_key, detected_box_tuples,
detected_scores, detected_class_tuples):
"""Adds detections for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
detected_box_tuples: A numpy array of structures with shape [N, 1],
representing N tuples, each tuple containing the same number of named
bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max].
detected_scores: A float numpy array of shape [N, 1], representing
the confidence scores of the detected N object instances.
detected_class_tuples: A numpy array of structures shape [N, 1],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
"""
self._detection_keys.add(image_key)
if image_key in self._groundtruth_box_tuples:
groundtruth_box_tuples = self._groundtruth_box_tuples[image_key]
groundtruth_class_tuples = self._groundtruth_class_tuples[image_key]
else:
groundtruth_box_tuples = np.empty(
shape=[0, 4], dtype=detected_box_tuples.dtype)
groundtruth_class_tuples = np.array([], dtype=detected_class_tuples.dtype)
scores, tp_fp_labels, mapping = (
self._per_image_eval.compute_detection_tp_fp(
detected_box_tuples=detected_box_tuples,
detected_scores=detected_scores,
detected_class_tuples=detected_class_tuples,
groundtruth_box_tuples=groundtruth_box_tuples,
groundtruth_class_tuples=groundtruth_class_tuples))
self._scores += [scores]
self._tp_fp_labels += [tp_fp_labels]
self._relation_field_values += [detected_class_tuples[mapping]['relation']]
def _update_groundtruth_statistics(self, groundtruth_class_tuples):
"""Updates grouth truth statistics.
Args:
groundtruth_class_tuples: A numpy array of structures shape [M, 1],
representing the class labels of the corresponding bounding boxes and
possibly additional classes.
"""
self._num_gt_instances += groundtruth_class_tuples.shape[0]
self._num_gt_imgs += 1
for relation_field_value in np.unique(groundtruth_class_tuples['relation']):
if relation_field_value not in self._num_gt_instances_per_relationship:
self._num_gt_instances_per_relationship[relation_field_value] = 0
self._num_gt_instances_per_relationship[relation_field_value] += np.sum(
groundtruth_class_tuples['relation'] == relation_field_value)
def evaluate(self):
"""Computes evaluation result.
Returns:
A named tuple with the following fields -
average_precision: a float number corresponding to average precision.
precisions: an array of precisions.
recalls: an array of recalls.
recall@50: recall computed on 50 top-scoring samples.
recall@100: recall computed on 100 top-scoring samples.
median_rank@50: median rank computed on 50 top-scoring samples.
median_rank@100: median rank computed on 100 top-scoring samples.
"""
if self._num_gt_instances == 0:
logging.warning('No ground truth instances')
if not self._scores:
scores = np.array([], dtype=float)
tp_fp_labels = np.array([], dtype=bool)
else:
scores = np.concatenate(self._scores)
tp_fp_labels = np.concatenate(self._tp_fp_labels)
relation_field_values = np.concatenate(self._relation_field_values)
for relation_field_value, _ in (six.iteritems(
self._num_gt_instances_per_relationship)):
precisions, recalls = metrics.compute_precision_recall(
scores[relation_field_values == relation_field_value],
tp_fp_labels[relation_field_values == relation_field_value],
self._num_gt_instances_per_relationship[relation_field_value])
self._average_precisions[
relation_field_value] = metrics.compute_average_precision(
precisions, recalls)
self._mean_average_precision = np.mean(
list(self._average_precisions.values()))
self._precisions, self._recalls = metrics.compute_precision_recall(
scores, tp_fp_labels, self._num_gt_instances)
self._weighted_average_precision = metrics.compute_average_precision(
self._precisions, self._recalls)
self._recall_50 = (
metrics.compute_recall_at_k(self._tp_fp_labels, self._num_gt_instances,
50))
self._median_rank_50 = (
metrics.compute_median_rank_at_k(self._tp_fp_labels, 50))
self._recall_100 = (
metrics.compute_recall_at_k(self._tp_fp_labels, self._num_gt_instances,
100))
self._median_rank_100 = (
metrics.compute_median_rank_at_k(self._tp_fp_labels, 100))
return VRDDetectionEvalMetrics(
self._weighted_average_precision, self._mean_average_precision,
self._average_precisions, self._precisions, self._recalls,
self._recall_50, self._recall_100, self._median_rank_50,
self._median_rank_100)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/vrd_evaluation.py | vrd_evaluation.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.object_detection.utils.vrd_evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.utils import vrd_evaluation
class VRDRelationDetectionEvaluatorTest(tf.test.TestCase):
def test_vrdrelation_evaluator(self):
self.vrd_eval = vrd_evaluation.VRDRelationDetectionEvaluator()
image_key1 = 'img1'
groundtruth_box_tuples1 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1, 1], [1, 2, 2, 3])],
dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples1 = np.array(
[(1, 2, 3), (1, 4, 3)], dtype=vrd_evaluation.label_data_type)
groundtruth_verified_labels1 = np.array([1, 2, 3, 4, 5], dtype=int)
self.vrd_eval.add_single_ground_truth_image_info(
image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_box_tuples1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_tuples1,
standard_fields.InputDataFields.groundtruth_image_classes:
groundtruth_verified_labels1
})
image_key2 = 'img2'
groundtruth_box_tuples2 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples2 = np.array(
[(1, 4, 3)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_box_tuples2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_tuples2,
})
image_key3 = 'img3'
groundtruth_box_tuples3 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples3 = np.array(
[(1, 2, 4)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key3, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_box_tuples3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_tuples3,
})
image_key = 'img1'
detected_box_tuples = np.array(
[([0, 0.3, 1, 1], [1.1, 1, 2, 2]), ([0, 0, 1, 1], [1, 1, 2, 2]),
([0.5, 0, 1, 1], [1, 1, 3, 3])],
dtype=vrd_evaluation.vrd_box_data_type)
detected_class_tuples = np.array(
[(1, 2, 5), (1, 2, 3), (1, 6, 3)], dtype=vrd_evaluation.label_data_type)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
self.vrd_eval.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_box_tuples,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_tuples
})
metrics = self.vrd_eval.evaluate()
self.assertAlmostEqual(metrics['VRDMetric_Relationships_weightedAP@0.5IOU'],
0.25)
self.assertAlmostEqual(metrics['VRDMetric_Relationships_mAP@0.5IOU'],
0.1666666666666666)
self.assertAlmostEqual(metrics['VRDMetric_Relationships_AP@0.5IOU/3'],
0.3333333333333333)
self.assertAlmostEqual(metrics['VRDMetric_Relationships_AP@0.5IOU/4'], 0)
self.assertAlmostEqual(metrics['VRDMetric_Relationships_Recall@50@0.5IOU'],
0.25)
self.assertAlmostEqual(metrics['VRDMetric_Relationships_Recall@100@0.5IOU'],
0.25)
self.vrd_eval.clear()
self.assertFalse(self.vrd_eval._image_ids)
class VRDPhraseDetectionEvaluatorTest(tf.test.TestCase):
def test_vrdphrase_evaluator(self):
self.vrd_eval = vrd_evaluation.VRDPhraseDetectionEvaluator()
image_key1 = 'img1'
groundtruth_box_tuples1 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1, 1], [1, 2, 2, 3])],
dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples1 = np.array(
[(1, 2, 3), (1, 4, 3)], dtype=vrd_evaluation.label_data_type)
groundtruth_verified_labels1 = np.array([1, 2, 3, 4, 5], dtype=int)
self.vrd_eval.add_single_ground_truth_image_info(
image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_box_tuples1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_tuples1,
standard_fields.InputDataFields.groundtruth_image_classes:
groundtruth_verified_labels1
})
image_key2 = 'img2'
groundtruth_box_tuples2 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples2 = np.array(
[(1, 4, 3)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_box_tuples2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_tuples2,
})
image_key3 = 'img3'
groundtruth_box_tuples3 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples3 = np.array(
[(1, 2, 4)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key3, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_box_tuples3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_tuples3,
})
image_key = 'img1'
detected_box_tuples = np.array(
[([0, 0.3, 0.5, 0.5], [0.3, 0.3, 1.0, 1.0]),
([0, 0, 1.2, 1.2], [0.0, 0.0, 2.0, 2.0]),
([0.5, 0, 1, 1], [1, 1, 3, 3])],
dtype=vrd_evaluation.vrd_box_data_type)
detected_class_tuples = np.array(
[(1, 2, 5), (1, 2, 3), (1, 6, 3)], dtype=vrd_evaluation.label_data_type)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
self.vrd_eval.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_box_tuples,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_tuples
})
metrics = self.vrd_eval.evaluate()
self.assertAlmostEqual(metrics['VRDMetric_Phrases_weightedAP@0.5IOU'], 0.25)
self.assertAlmostEqual(metrics['VRDMetric_Phrases_mAP@0.5IOU'],
0.1666666666666666)
self.assertAlmostEqual(metrics['VRDMetric_Phrases_AP@0.5IOU/3'],
0.3333333333333333)
self.assertAlmostEqual(metrics['VRDMetric_Phrases_AP@0.5IOU/4'], 0)
self.assertAlmostEqual(metrics['VRDMetric_Phrases_Recall@50@0.5IOU'], 0.25)
self.assertAlmostEqual(metrics['VRDMetric_Phrases_Recall@100@0.5IOU'], 0.25)
self.vrd_eval.clear()
self.assertFalse(self.vrd_eval._image_ids)
class VRDDetectionEvaluationTest(tf.test.TestCase):
def setUp(self):
self.vrd_eval = vrd_evaluation._VRDDetectionEvaluation(
matching_iou_threshold=0.5)
image_key1 = 'img1'
groundtruth_box_tuples1 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1, 1], [1, 2, 2, 3])],
dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples1 = np.array(
[(1, 2, 3), (1, 4, 3)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key1, groundtruth_box_tuples1, groundtruth_class_tuples1)
image_key2 = 'img2'
groundtruth_box_tuples2 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples2 = np.array(
[(1, 4, 3)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key2, groundtruth_box_tuples2, groundtruth_class_tuples2)
image_key3 = 'img3'
groundtruth_box_tuples3 = np.array(
[([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type)
groundtruth_class_tuples3 = np.array(
[(1, 2, 4)], dtype=vrd_evaluation.label_data_type)
self.vrd_eval.add_single_ground_truth_image_info(
image_key3, groundtruth_box_tuples3, groundtruth_class_tuples3)
image_key = 'img1'
detected_box_tuples = np.array(
[([0, 0.3, 1, 1], [1.1, 1, 2, 2]), ([0, 0, 1, 1], [1, 1, 2, 2])],
dtype=vrd_evaluation.vrd_box_data_type)
detected_class_tuples = np.array(
[(1, 2, 3), (1, 2, 3)], dtype=vrd_evaluation.label_data_type)
detected_scores = np.array([0.7, 0.8], dtype=float)
self.vrd_eval.add_single_detected_image_info(
image_key, detected_box_tuples, detected_scores, detected_class_tuples)
metrics = self.vrd_eval.evaluate()
expected_weighted_average_precision = 0.25
expected_mean_average_precision = 0.16666666666666
expected_precision = np.array([1., 0.5], dtype=float)
expected_recall = np.array([0.25, 0.25], dtype=float)
expected_recall_50 = 0.25
expected_recall_100 = 0.25
expected_median_rank_50 = 0
expected_median_rank_100 = 0
self.assertAlmostEqual(expected_weighted_average_precision,
metrics.weighted_average_precision)
self.assertAlmostEqual(expected_mean_average_precision,
metrics.mean_average_precision)
self.assertAlmostEqual(expected_mean_average_precision,
metrics.mean_average_precision)
self.assertAllClose(expected_precision, metrics.precisions)
self.assertAllClose(expected_recall, metrics.recalls)
self.assertAlmostEqual(expected_recall_50, metrics.recall_50)
self.assertAlmostEqual(expected_recall_100, metrics.recall_100)
self.assertAlmostEqual(expected_median_rank_50, metrics.median_rank_50)
self.assertAlmostEqual(expected_median_rank_100, metrics.median_rank_100)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/vrd_evaluation_test.py | vrd_evaluation_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.object_detection.utils.context_manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.utils import context_manager
class ContextManagerTest(tf.test.TestCase):
def test_identity_context_manager(self):
with context_manager.IdentityContextManager() as identity_context:
self.assertIsNone(identity_context)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/context_manager_test.py | context_manager_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.image.understanding.object_detection.utils.json_utils."""
import os
import tensorflow.compat.v1 as tf
from object_detection.utils import json_utils
class JsonUtilsTest(tf.test.TestCase):
def testDumpReasonablePrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.0, f, float_digits=2)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1.00')
def testDumpPassExtraParams(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump([1.12345], f, float_digits=2, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '[\n 1.12\n]')
def testDumpZeroPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.0, f, float_digits=0, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1')
def testDumpUnspecifiedPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.012345, f)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1.012345')
def testDumpsReasonablePrecision(self):
s = json_utils.Dumps(1.12545, float_digits=2)
self.assertEqual(s, '1.13')
def testDumpsPassExtraParams(self):
s = json_utils.Dumps([1.0], float_digits=2, indent=3)
self.assertEqual(s, '[\n 1.00\n]')
def testDumpsZeroPrecision(self):
s = json_utils.Dumps(1.0, float_digits=0)
self.assertEqual(s, '1')
def testDumpsUnspecifiedPrecision(self):
s = json_utils.Dumps(1.012345)
self.assertEqual(s, '1.012345')
def testPrettyParams(self):
s = json_utils.Dumps({'v': 1.012345, 'n': 2}, **json_utils.PrettyParams())
self.assertEqual(s, '{\n "n": 2,\n "v": 1.0123\n}')
def testPrettyParamsExtraParamsInside(self):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, **json_utils.PrettyParams(allow_nan=True))
self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}')
with self.assertRaises(ValueError):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, **json_utils.PrettyParams(allow_nan=False))
def testPrettyParamsExtraParamsOutside(self):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, allow_nan=True, **json_utils.PrettyParams())
self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}')
with self.assertRaises(ValueError):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, allow_nan=False, **json_utils.PrettyParams())
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/json_utils_test.py | json_utils_test.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utils.target_assigner_utils."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import target_assigner_utils as ta_utils
from object_detection.utils import test_case
class TargetUtilTest(parameterized.TestCase, test_case.TestCase):
def test_image_shape_to_grids(self):
def graph_fn():
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height=2, width=3)
return y_grid, x_grid
expected_y_grid = np.array([[0, 0, 0], [1, 1, 1]])
expected_x_grid = np.array([[0, 1, 2], [0, 1, 2]])
y_grid, x_grid = self.execute(graph_fn, [])
np.testing.assert_array_equal(y_grid, expected_y_grid)
np.testing.assert_array_equal(x_grid, expected_x_grid)
@parameterized.parameters((False,), (True,))
def test_coordinates_to_heatmap(self, sparse):
if not hasattr(tf, 'tensor_scatter_nd_max'):
self.skipTest('Cannot test function due to old TF version.')
def graph_fn():
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height=3, width=5)
y_coordinates = tf.constant([1.5, 0.5], dtype=tf.float32)
x_coordinates = tf.constant([2.5, 4.5], dtype=tf.float32)
sigma = tf.constant([0.1, 0.5], dtype=tf.float32)
channel_onehot = tf.constant([[1, 0, 0], [0, 1, 0]], dtype=tf.float32)
channel_weights = tf.constant([1, 1], dtype=tf.float32)
heatmap = ta_utils.coordinates_to_heatmap(y_grid, x_grid, y_coordinates,
x_coordinates, sigma,
channel_onehot,
channel_weights, sparse=sparse)
return heatmap
heatmap = self.execute(graph_fn, [])
# Peak at (1, 2) for the first class.
self.assertAlmostEqual(1.0, heatmap[1, 2, 0])
# Peak at (0, 4) for the second class.
self.assertAlmostEqual(1.0, heatmap[0, 4, 1])
def test_compute_floor_offsets_with_indices_onlysource(self):
def graph_fn():
y_source = tf.constant([1.5, 0.3], dtype=tf.float32)
x_source = tf.constant([2.5, 4.2], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
np.testing.assert_array_almost_equal(offsets,
np.array([[0.5, 0.5], [0.3, 0.2]]))
np.testing.assert_array_almost_equal(indices,
np.array([[1, 2], [0, 4]]))
def test_compute_floor_offsets_with_indices_and_targets(self):
def graph_fn():
y_source = tf.constant([1.5, 0.3], dtype=tf.float32)
x_source = tf.constant([2.5, 4.2], dtype=tf.float32)
y_target = tf.constant([2.1, 0.1], dtype=tf.float32)
x_target = tf.constant([1.2, 4.5], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source, y_target, x_target)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
np.testing.assert_array_almost_equal(offsets,
np.array([[1.1, -0.8], [0.1, 0.5]]))
np.testing.assert_array_almost_equal(indices, np.array([[1, 2], [0, 4]]))
def test_compute_floor_offsets_with_indices_multisources(self):
def graph_fn():
y_source = tf.constant([[1.0, 0.0], [2.0, 3.0]], dtype=tf.float32)
x_source = tf.constant([[2.0, 4.0], [3.0, 3.0]], dtype=tf.float32)
y_target = tf.constant([2.1, 0.1], dtype=tf.float32)
x_target = tf.constant([1.2, 4.5], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source, y_target, x_target)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
# Offset from the first source to target.
np.testing.assert_array_almost_equal(offsets[:, 0, :],
np.array([[1.1, -0.8], [-1.9, 1.5]]))
# Offset from the second source to target.
np.testing.assert_array_almost_equal(offsets[:, 1, :],
np.array([[2.1, -2.8], [-2.9, 1.5]]))
# Indices from the first source to target.
np.testing.assert_array_almost_equal(indices[:, 0, :],
np.array([[1, 2], [2, 3]]))
# Indices from the second source to target.
np.testing.assert_array_almost_equal(indices[:, 1, :],
np.array([[0, 4], [3, 3]]))
def test_get_valid_keypoints_mask(self):
def graph_fn():
class_onehot = tf.constant(
[[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1]], dtype=tf.float32)
keypoints = tf.constant(
[[0.1, float('nan'), 0.2, 0.0],
[0.0, 0.0, 0.1, 0.9],
[3.2, 4.3, float('nan'), 0.2]],
dtype=tf.float32)
keypoint_coordinates = tf.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoint_coordinates,
class_id=2,
class_onehot=class_onehot,
keypoint_indices=[1, 2])
return mask, keypoints_nan_to_zeros
keypoints = np.array([[0.0, 0.2],
[0.0, 0.1],
[4.3, 0.0]])
expected_mask = np.array([[0, 1], [0, 0], [1, 0]])
expected_keypoints = np.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = self.execute(graph_fn, [])
np.testing.assert_array_equal(mask, expected_mask)
np.testing.assert_array_almost_equal(keypoints_nan_to_zeros,
expected_keypoints)
def test_get_valid_keypoints_with_mask(self):
def graph_fn():
class_onehot = tf.constant(
[[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1]], dtype=tf.float32)
keypoints = tf.constant(
[[0.1, float('nan'), 0.2, 0.0],
[0.0, 0.0, 0.1, 0.9],
[3.2, 4.3, float('nan'), 0.2]],
dtype=tf.float32)
keypoint_coordinates = tf.stack([keypoints, keypoints], axis=2)
weights = tf.constant([0.0, 0.0, 1.0])
mask, keypoints_nan_to_zeros = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoint_coordinates,
class_id=2,
class_onehot=class_onehot,
class_weights=weights,
keypoint_indices=[1, 2])
return mask, keypoints_nan_to_zeros
expected_mask = np.array([[0, 0], [0, 0], [1, 0]])
keypoints = np.array([[0.0, 0.2],
[0.0, 0.1],
[4.3, 0.0]])
expected_keypoints = np.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = self.execute(graph_fn, [])
np.testing.assert_array_equal(mask, expected_mask)
np.testing.assert_array_almost_equal(keypoints_nan_to_zeros,
expected_keypoints)
def test_blackout_pixel_weights_by_box_regions(self):
def graph_fn():
boxes = tf.constant(
[[0.0, 0.0, 5, 5], [0.0, 0.0, 10.0, 20.0], [6.0, 12.0, 8.0, 18.0]],
dtype=tf.float32)
blackout = tf.constant([True, False, True], dtype=tf.bool)
blackout_pixel_weights_by_box_regions = tf.function(
ta_utils.blackout_pixel_weights_by_box_regions)
output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)
return output
output = self.execute(graph_fn, [])
# All zeros in region [0:5, 0:5].
self.assertAlmostEqual(np.sum(output[0:5, 0:5]), 0.0)
# All zeros in region [12:18, 6:8].
self.assertAlmostEqual(np.sum(output[6:8, 12:18]), 0.0)
# All other pixel weights should be 1.0.
# 20 * 10 - 5 * 5 - 2 * 6 = 163.0
self.assertAlmostEqual(np.sum(output), 163.0)
def test_blackout_pixel_weights_by_box_regions_with_weights(self):
def graph_fn():
boxes = tf.constant(
[[0.0, 0.0, 2.0, 2.0],
[0.0, 0.0, 4.0, 2.0],
[3.0, 0.0, 4.0, 4.0]],
dtype=tf.float32)
blackout = tf.constant([False, False, True], dtype=tf.bool)
weights = tf.constant([0.4, 0.3, 0.2], tf.float32)
blackout_pixel_weights_by_box_regions = tf.function(
ta_utils.blackout_pixel_weights_by_box_regions)
output = blackout_pixel_weights_by_box_regions(
4, 4, boxes, blackout, weights)
return output
output = self.execute(graph_fn, [])
expected_weights = [
[0.4, 0.4, 1.0, 1.0],
[0.4, 0.4, 1.0, 1.0],
[0.3, 0.3, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]
np.testing.assert_array_almost_equal(expected_weights, output)
def test_blackout_pixel_weights_by_box_regions_zero_instance(self):
def graph_fn():
boxes = tf.zeros([0, 4], dtype=tf.float32)
blackout = tf.zeros([0], dtype=tf.bool)
blackout_pixel_weights_by_box_regions = tf.function(
ta_utils.blackout_pixel_weights_by_box_regions)
output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)
return output
output = self.execute(graph_fn, [])
# The output should be all 1s since there's no annotation provided.
np.testing.assert_array_equal(output, np.ones([10, 20], dtype=np.float32))
def test_get_surrounding_grids(self):
def graph_fn():
y_coordinates = tf.constant([0.5], dtype=tf.float32)
x_coordinates = tf.constant([4.5], dtype=tf.float32)
output = ta_utils.get_surrounding_grids(
height=3,
width=5,
y_coordinates=y_coordinates,
x_coordinates=x_coordinates,
radius=1)
return output
y_indices, x_indices, valid = self.execute(graph_fn, [])
# Five neighboring indices: [-1, 4] (out of bound), [0, 3], [0, 4],
# [0, 5] (out of bound), [1, 4].
np.testing.assert_array_almost_equal(
y_indices,
np.array([[0.0, 0.0, 0.0, 0.0, 1.0]]))
np.testing.assert_array_almost_equal(
x_indices,
np.array([[0.0, 3.0, 4.0, 0.0, 4.0]]))
self.assertAllEqual(valid, [[False, True, True, False, True]])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/target_assigner_utils_test.py | target_assigner_utils_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spatial transformation ops like RoIAlign, CropAndResize."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
def _coordinate_vector_1d(start, end, size, align_endpoints):
"""Generates uniformly spaced coordinate vector.
Args:
start: A float tensor of shape [batch, num_boxes] indicating start values.
end: A float tensor of shape [batch, num_boxes] indicating end values.
size: Number of points in coordinate vector.
align_endpoints: Whether to align first and last points exactly to
endpoints.
Returns:
A 3D float tensor of shape [batch, num_boxes, size] containing grid
coordinates.
"""
start = tf.expand_dims(start, -1)
end = tf.expand_dims(end, -1)
length = end - start
if align_endpoints:
relative_grid_spacing = tf.linspace(0.0, 1.0, size)
offset = 0 if size > 1 else length / 2
else:
relative_grid_spacing = tf.linspace(0.0, 1.0, size + 1)[:-1]
offset = length / (2 * size)
relative_grid_spacing = tf.reshape(relative_grid_spacing, [1, 1, size])
relative_grid_spacing = tf.cast(relative_grid_spacing, dtype=start.dtype)
absolute_grid = start + offset + relative_grid_spacing * length
return absolute_grid
def box_grid_coordinate_vectors(boxes, size_y, size_x, align_corners=False):
"""Generates coordinate vectors for a `size x size` grid in boxes.
Each box is subdivided uniformly into a grid consisting of size x size
rectangular cells. This function returns coordinate vectors describing
the center of each cell.
If `align_corners` is true, grid points are uniformly spread such that the
corner points on the grid exactly overlap corners of the boxes.
Note that output coordinates are expressed in the same coordinate frame as
input boxes.
Args:
boxes: A float tensor of shape [batch, num_boxes, 4] containing boxes of the
form [ymin, xmin, ymax, xmax].
size_y: Size of the grid in y axis.
size_x: Size of the grid in x axis.
align_corners: Whether to align the corner grid points exactly with box
corners.
Returns:
box_grid_y: A float tensor of shape [batch, num_boxes, size_y] containing y
coordinates for grid points.
box_grid_x: A float tensor of shape [batch, num_boxes, size_x] containing x
coordinates for grid points.
"""
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=-1)
box_grid_y = _coordinate_vector_1d(ymin, ymax, size_y, align_corners)
box_grid_x = _coordinate_vector_1d(xmin, xmax, size_x, align_corners)
return box_grid_y, box_grid_x
def feature_grid_coordinate_vectors(box_grid_y, box_grid_x):
"""Returns feature grid point coordinate vectors for bilinear interpolation.
Box grid is specified in absolute coordinate system with origin at left top
(0, 0). The returned coordinate vectors contain 0-based feature point indices.
This function snaps each point in the box grid to nearest 4 points on the
feature map.
In this function we also follow the convention of treating feature pixels as
point objects with no spatial extent.
Args:
box_grid_y: A float tensor of shape [batch, num_boxes, size] containing y
coordinate vector of the box grid.
box_grid_x: A float tensor of shape [batch, num_boxes, size] containing x
coordinate vector of the box grid.
Returns:
feature_grid_y0: An int32 tensor of shape [batch, num_boxes, size]
containing y coordinate vector for the top neighbors.
feature_grid_x0: A int32 tensor of shape [batch, num_boxes, size]
containing x coordinate vector for the left neighbors.
feature_grid_y1: A int32 tensor of shape [batch, num_boxes, size]
containing y coordinate vector for the bottom neighbors.
feature_grid_x1: A int32 tensor of shape [batch, num_boxes, size]
containing x coordinate vector for the right neighbors.
"""
feature_grid_y0 = tf.floor(box_grid_y)
feature_grid_x0 = tf.floor(box_grid_x)
feature_grid_y1 = tf.floor(box_grid_y + 1)
feature_grid_x1 = tf.floor(box_grid_x + 1)
feature_grid_y0 = tf.cast(feature_grid_y0, dtype=tf.int32)
feature_grid_y1 = tf.cast(feature_grid_y1, dtype=tf.int32)
feature_grid_x0 = tf.cast(feature_grid_x0, dtype=tf.int32)
feature_grid_x1 = tf.cast(feature_grid_x1, dtype=tf.int32)
return (feature_grid_y0, feature_grid_x0, feature_grid_y1, feature_grid_x1)
def _valid_indicator(feature_grid_y, feature_grid_x, true_feature_shapes):
"""Computes a indicator vector for valid indices.
Computes an indicator vector which is true for points on feature map and
false for points off feature map.
Args:
feature_grid_y: An int32 tensor of shape [batch, num_boxes, size_y]
containing y coordinate vector.
feature_grid_x: An int32 tensor of shape [batch, num_boxes, size_x]
containing x coordinate vector.
true_feature_shapes: A int32 tensor of shape [batch, num_boxes, 2]
containing valid height and width of feature maps. Feature maps are
assumed to be aligned to the left top corner.
Returns:
indices: A 1D bool tensor indicating valid feature indices.
"""
height = tf.cast(true_feature_shapes[:, :, 0:1], dtype=feature_grid_y.dtype)
width = tf.cast(true_feature_shapes[:, :, 1:2], dtype=feature_grid_x.dtype)
valid_indicator = tf.logical_and(
tf.expand_dims(
tf.logical_and(feature_grid_y >= 0, tf.less(feature_grid_y, height)),
3),
tf.expand_dims(
tf.logical_and(feature_grid_x >= 0, tf.less(feature_grid_x, width)),
2))
return tf.reshape(valid_indicator, [-1])
def ravel_indices(feature_grid_y, feature_grid_x, num_levels, height, width,
box_levels):
"""Returns grid indices in a flattened feature map of shape [-1, channels].
The returned 1-D array can be used to gather feature grid points from a
feature map that has been flattened from [batch, num_levels, max_height,
max_width, channels] to [batch * num_levels * max_height * max_width,
channels].
Args:
feature_grid_y: An int32 tensor of shape [batch, num_boxes, size_y]
containing y coordinate vector.
feature_grid_x: An int32 tensor of shape [batch, num_boxes, size_x]
containing x coordinate vector.
num_levels: Number of feature levels.
height: An integer indicating the padded height of feature maps.
width: An integer indicating the padded width of feature maps.
box_levels: An int32 tensor of shape [batch, num_boxes] indicating
feature level assigned to each box.
Returns:
indices: A 1D int32 tensor containing feature point indices in a flattened
feature grid.
"""
num_boxes = tf.shape(feature_grid_y)[1]
batch_size = tf.shape(feature_grid_y)[0]
size_y = tf.shape(feature_grid_y)[2]
size_x = tf.shape(feature_grid_x)[2]
height_dim_offset = width
level_dim_offset = height * height_dim_offset
batch_dim_offset = num_levels * level_dim_offset
batch_dim_indices = (
tf.reshape(
tf.range(batch_size) * batch_dim_offset, [batch_size, 1, 1, 1]) *
tf.ones([1, num_boxes, size_y, size_x], dtype=tf.int32))
box_level_indices = (
tf.reshape(box_levels * level_dim_offset, [batch_size, num_boxes, 1, 1]) *
tf.ones([1, 1, size_y, size_x], dtype=tf.int32))
height_indices = (
tf.reshape(feature_grid_y * height_dim_offset,
[batch_size, num_boxes, size_y, 1]) *
tf.ones([1, 1, 1, size_x], dtype=tf.int32))
width_indices = (
tf.reshape(feature_grid_x, [batch_size, num_boxes, 1, size_x])
* tf.ones([1, 1, size_y, 1], dtype=tf.int32))
indices = (
batch_dim_indices + box_level_indices + height_indices + width_indices)
flattened_indices = tf.reshape(indices, [-1])
return flattened_indices
def pad_to_max_size(features):
"""Pads features to max height and max width and stacks them up.
Args:
features: A list of num_levels 4D float tensors of shape [batch, height_i,
width_i, channels] containing feature maps.
Returns:
stacked_features: A 5D float tensor of shape [batch, num_levels, max_height,
max_width, channels] containing stacked features.
true_feature_shapes: A 2D int32 tensor of shape [num_levels, 2] containing
height and width of the feature maps before padding.
"""
if len(features) == 1:
return tf.expand_dims(features[0],
1), tf.expand_dims(tf.shape(features[0])[1:3], 0)
if all([feature.shape.is_fully_defined() for feature in features]):
heights = [feature.shape[1] for feature in features]
widths = [feature.shape[2] for feature in features]
max_height = max(heights)
max_width = max(widths)
else:
heights = [tf.shape(feature)[1] for feature in features]
widths = [tf.shape(feature)[2] for feature in features]
max_height = tf.reduce_max(heights)
max_width = tf.reduce_max(widths)
features_all = [
tf.image.pad_to_bounding_box(feature, 0, 0, max_height,
max_width) for feature in features
]
features_all = tf.stack(features_all, axis=1)
true_feature_shapes = tf.stack([tf.shape(feature)[1:3]
for feature in features])
return features_all, true_feature_shapes
def _gather_valid_indices(tensor, indices, padding_value=0.0):
"""Gather values for valid indices.
TODO(rathodv): We can't use ops.gather_with_padding_values due to cyclic
dependency. Start using it after migrating all users of spatial ops to import
this module directly rather than util/ops.py
Args:
tensor: A tensor to gather valid values from.
indices: A 1-D int32 tensor containing indices along axis 0 of `tensor`.
Invalid indices must be marked with -1.
padding_value: Value to return for invalid indices.
Returns:
A tensor sliced based on indices. For indices that are equal to -1, returns
rows of padding value.
"""
padded_tensor = tf.concat(
[
padding_value *
tf.ones([1, tf.shape(tensor)[-1]], dtype=tensor.dtype), tensor
],
axis=0,
)
# tf.concat gradient op uses tf.where(condition) (which is not
# supported on TPU) when the inputs to it are tf.IndexedSlices instead of
# tf.Tensor. Since gradient op for tf.gather returns tf.IndexedSlices,
# we add a dummy op inbetween tf.concat and tf.gather to ensure tf.concat
# gradient function gets tf.Tensor inputs and not tf.IndexedSlices.
padded_tensor *= 1.0
return tf.gather(padded_tensor, indices + 1)
def multilevel_roi_align(features, boxes, box_levels, output_size,
num_samples_per_cell_y=1, num_samples_per_cell_x=1,
align_corners=False, extrapolation_value=0.0,
scope=None):
"""Applies RoI Align op and returns feature for boxes.
Given multiple features maps indexed by different levels, and a set of boxes
where each box is mapped to a certain level, this function selectively crops
and resizes boxes from the corresponding feature maps.
We follow the RoI Align technique in https://arxiv.org/pdf/1703.06870.pdf
figure 3. Specifically, each box is subdivided uniformly into a grid
consisting of output_size[0] x output_size[1] rectangular cells. Within each
cell we select `num_points` points uniformly and compute feature values using
bilinear interpolation. Finally, we average pool the interpolated values in
each cell to obtain a [output_size[0], output_size[1], channels] feature.
If `align_corners` is true, sampling points are uniformly spread such that
corner points exactly overlap corners of the boxes.
In this function we also follow the convention of treating feature pixels as
point objects with no spatial extent.
Args:
features: A list of 4D float tensors of shape [batch_size, max_height,
max_width, channels] containing features. Note that each feature map must
have the same number of channels.
boxes: A 3D float tensor of shape [batch_size, num_boxes, 4] containing
boxes of the form [ymin, xmin, ymax, xmax] in normalized coordinates.
box_levels: A 3D int32 tensor of shape [batch_size, num_boxes]
representing the feature level index for each box.
output_size: An list of two integers [size_y, size_x] indicating the output
feature size for each box.
num_samples_per_cell_y: Number of grid points to sample along y axis in each
cell.
num_samples_per_cell_x: Number of grid points to sample along x axis in each
cell.
align_corners: Whether to align the corner grid points exactly with box
corners.
extrapolation_value: a float value to use for extrapolation.
scope: Scope name to use for this op.
Returns:
A 5D float tensor of shape [batch_size, num_boxes, output_size[0],
output_size[1], channels] representing the cropped features.
"""
with tf.name_scope(scope, 'MultiLevelRoIAlign'):
features, true_feature_shapes = pad_to_max_size(features)
batch_size = shape_utils.combined_static_and_dynamic_shape(features)[0]
num_levels = features.get_shape().as_list()[1]
max_feature_height = tf.shape(features)[2]
max_feature_width = tf.shape(features)[3]
num_filters = features.get_shape().as_list()[4]
num_boxes = tf.shape(boxes)[1]
# Convert boxes to absolute co-ordinates.
true_feature_shapes = tf.cast(true_feature_shapes, dtype=boxes.dtype)
true_feature_shapes = tf.gather(true_feature_shapes, box_levels)
boxes *= tf.concat([true_feature_shapes - 1] * 2, axis=-1)
size_y = output_size[0] * num_samples_per_cell_y
size_x = output_size[1] * num_samples_per_cell_x
box_grid_y, box_grid_x = box_grid_coordinate_vectors(
boxes, size_y=size_y, size_x=size_x, align_corners=align_corners)
(feature_grid_y0, feature_grid_x0, feature_grid_y1,
feature_grid_x1) = feature_grid_coordinate_vectors(box_grid_y, box_grid_x)
feature_grid_y = tf.reshape(
tf.stack([feature_grid_y0, feature_grid_y1], axis=3),
[batch_size, num_boxes, -1])
feature_grid_x = tf.reshape(
tf.stack([feature_grid_x0, feature_grid_x1], axis=3),
[batch_size, num_boxes, -1])
feature_coordinates = ravel_indices(feature_grid_y, feature_grid_x,
num_levels, max_feature_height,
max_feature_width, box_levels)
valid_indices = _valid_indicator(feature_grid_y, feature_grid_x,
true_feature_shapes)
feature_coordinates = tf.where(valid_indices, feature_coordinates,
-1 * tf.ones_like(feature_coordinates))
flattened_features = tf.reshape(features, [-1, num_filters])
flattened_feature_values = _gather_valid_indices(flattened_features,
feature_coordinates,
extrapolation_value)
features_per_box = tf.reshape(
flattened_feature_values,
[batch_size, num_boxes, size_y * 2, size_x * 2, num_filters])
# Cast tensors into dtype of features.
box_grid_y = tf.cast(box_grid_y, dtype=features_per_box.dtype)
box_grid_x = tf.cast(box_grid_x, dtype=features_per_box.dtype)
feature_grid_y0 = tf.cast(feature_grid_y0, dtype=features_per_box.dtype)
feature_grid_x0 = tf.cast(feature_grid_x0, dtype=features_per_box.dtype)
# RoI Align operation is a bilinear interpolation of four
# neighboring feature points f0, f1, f2, and f3 onto point y, x given by
# f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
# [f10, f11]]
#
# Unrolling the matrix multiplies gives us:
# f(y, x) = (hy * hx) f00 + (hy * lx) f01 + (ly * hx) f10 + (lx * ly) f11
# f(y, x) = w00 * f00 + w01 * f01 + w10 * f10 + w11 * f11
#
# This can be computed by applying pointwise multiplication and sum_pool in
# a 2x2 window.
ly = box_grid_y - feature_grid_y0
lx = box_grid_x - feature_grid_x0
hy = 1.0 - ly
hx = 1.0 - lx
kernel_y = tf.reshape(
tf.stack([hy, ly], axis=3), [batch_size, num_boxes, size_y * 2, 1])
kernel_x = tf.reshape(
tf.stack([hx, lx], axis=3), [batch_size, num_boxes, 1, size_x * 2])
# Multiplier 4 is to make tf.nn.avg_pool behave like sum_pool.
interpolation_kernel = kernel_y * kernel_x * 4
# Interpolate the gathered features with computed interpolation kernels.
features_per_box *= tf.expand_dims(interpolation_kernel, axis=4),
features_per_box = tf.reshape(
features_per_box,
[batch_size * num_boxes, size_y * 2, size_x * 2, num_filters])
# This combines the two pooling operations - sum_pool to perform bilinear
# interpolation and avg_pool to pool the values in each bin.
features_per_box = tf.nn.avg_pool(
features_per_box,
[1, num_samples_per_cell_y * 2, num_samples_per_cell_x * 2, 1],
[1, num_samples_per_cell_y * 2, num_samples_per_cell_x * 2, 1], 'VALID')
features_per_box = tf.reshape(
features_per_box,
[batch_size, num_boxes, output_size[0], output_size[1], num_filters])
return features_per_box
def multilevel_native_crop_and_resize(images, boxes, box_levels,
crop_size, scope=None):
"""Multilevel native crop and resize.
Same as `multilevel_matmul_crop_and_resize` but uses tf.image.crop_and_resize.
Args:
images: A list of 4-D tensor of shape
[batch, image_height, image_width, depth] representing features of
different size.
boxes: A `Tensor` of type `float32`.
A 3-D tensor of shape `[batch, num_boxes, 4]`. The boxes are specified in
normalized coordinates and are of the form `[y1, x1, y2, x2]`. A
normalized coordinate value of `y` is mapped to the image coordinate at
`y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image
height is mapped to `[0, image_height - 1] in image height coordinates.
We do allow y1 > y2, in which case the sampled crop is an up-down flipped
version of the original image. The width dimension is treated similarly.
Normalized coordinates outside the `[0, 1]` range are allowed, in which
case we use `extrapolation_value` to extrapolate the input image values.
box_levels: A 2-D tensor of shape [batch, num_boxes] representing the level
of the box.
crop_size: A list of two integers `[crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the
image content is not preserved. Both `crop_height` and `crop_width` need
to be positive.
scope: A name for the operation (optional).
Returns:
A 5-D float tensor of shape `[batch, num_boxes, crop_height, crop_width,
depth]`
"""
if box_levels is None:
return native_crop_and_resize(images[0], boxes, crop_size, scope)
with tf.name_scope('MultiLevelNativeCropAndResize'):
cropped_feature_list = []
for level, image in enumerate(images):
# For each level, crop the feature according to all boxes
# set the cropped feature not at this level to 0 tensor.
# Consider more efficient way of computing cropped features.
cropped = native_crop_and_resize(image, boxes, crop_size, scope)
cond = tf.tile(
tf.equal(box_levels, level)[:, :, tf.newaxis],
[1, 1] + [tf.math.reduce_prod(cropped.shape.as_list()[2:])])
cond = tf.reshape(cond, cropped.shape)
cropped_final = tf.where(cond, cropped, tf.zeros_like(cropped))
cropped_feature_list.append(cropped_final)
return tf.math.reduce_sum(cropped_feature_list, axis=0)
def native_crop_and_resize(image, boxes, crop_size, scope=None):
"""Same as `matmul_crop_and_resize` but uses tf.image.crop_and_resize."""
def get_box_inds(proposals):
proposals_shape = proposals.shape.as_list()
if any(dim is None for dim in proposals_shape):
proposals_shape = tf.shape(proposals)
ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)
multiplier = tf.expand_dims(
tf.range(start=0, limit=proposals_shape[0]), 1)
return tf.reshape(ones_mat * multiplier, [-1])
with tf.name_scope(scope, 'CropAndResize'):
cropped_regions = tf.image.crop_and_resize(
image, tf.reshape(boxes, [-1] + boxes.shape.as_list()[2:]),
get_box_inds(boxes), crop_size)
final_shape = tf.concat([tf.shape(boxes)[:2],
tf.shape(cropped_regions)[1:]], axis=0)
return tf.reshape(cropped_regions, final_shape)
def multilevel_matmul_crop_and_resize(images, boxes, box_levels, crop_size,
extrapolation_value=0.0, scope=None):
"""Multilevel matmul crop and resize.
Same as `matmul_crop_and_resize` but crop images according to box levels.
Args:
images: A list of 4-D tensor of shape
[batch, image_height, image_width, depth] representing features of
different size.
boxes: A `Tensor` of type `float32` or 'bfloat16'.
A 3-D tensor of shape `[batch, num_boxes, 4]`. The boxes are specified in
normalized coordinates and are of the form `[y1, x1, y2, x2]`. A
normalized coordinate value of `y` is mapped to the image coordinate at
`y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image
height is mapped to `[0, image_height - 1] in image height coordinates.
We do allow y1 > y2, in which case the sampled crop is an up-down flipped
version of the original image. The width dimension is treated similarly.
Normalized coordinates outside the `[0, 1]` range are allowed, in which
case we use `extrapolation_value` to extrapolate the input image values.
box_levels: A 2-D tensor of shape [batch, num_boxes] representing the level
of the box.
crop_size: A list of two integers `[crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the
image content is not preserved. Both `crop_height` and `crop_width` need
to be positive.
extrapolation_value: A float value to use for extrapolation.
scope: A name for the operation (optional).
Returns:
A 5-D float tensor of shape `[batch, num_boxes, crop_height, crop_width,
depth]`
"""
with tf.name_scope(scope, 'MultiLevelMatMulCropAndResize'):
if box_levels is None:
box_levels = tf.zeros(tf.shape(boxes)[:2], dtype=tf.int32)
return multilevel_roi_align(images,
boxes,
box_levels,
crop_size,
align_corners=True,
extrapolation_value=extrapolation_value)
def matmul_crop_and_resize(image, boxes, crop_size, extrapolation_value=0.0,
scope=None):
"""Matrix multiplication based implementation of the crop and resize op.
Extracts crops from the input image tensor and bilinearly resizes them
(possibly with aspect ratio change) to a common output size specified by
crop_size. This is more general than the crop_to_bounding_box op which
extracts a fixed size slice from the input image and does not allow
resizing or aspect ratio change.
Returns a tensor with crops from the input image at positions defined at
the bounding box locations in boxes. The cropped boxes are all resized
(with bilinear interpolation) to a fixed size = `[crop_height, crop_width]`.
The result is a 5-D tensor `[batch, num_boxes, crop_height, crop_width,
depth]`.
Note that this operation is meant to replicate the behavior of the standard
tf.image.crop_and_resize operation but there are a few differences.
Specifically:
1) There is no `box_indices` argument --- to run this op on multiple images,
one must currently call this op independently on each image.
2) The `crop_size` parameter is assumed to be statically defined.
Moreover, the number of boxes must be strictly nonzero.
Args:
image: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`, `half`, 'bfloat16', `float32`, `float64`.
A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A `Tensor` of type `float32` or 'bfloat16'.
A 3-D tensor of shape `[batch, num_boxes, 4]`. The boxes are specified in
normalized coordinates and are of the form `[y1, x1, y2, x2]`. A
normalized coordinate value of `y` is mapped to the image coordinate at
`y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image
height is mapped to `[0, image_height - 1] in image height coordinates.
We do allow y1 > y2, in which case the sampled crop is an up-down flipped
version of the original image. The width dimension is treated similarly.
Normalized coordinates outside the `[0, 1]` range are allowed, in which
case we use `extrapolation_value` to extrapolate the input image values.
crop_size: A list of two integers `[crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the
image content is not preserved. Both `crop_height` and `crop_width` need
to be positive.
extrapolation_value: a float value to use for extrapolation.
scope: A name for the operation (optional).
Returns:
A 5-D tensor of shape `[batch, num_boxes, crop_height, crop_width, depth]`
"""
with tf.name_scope(scope, 'MatMulCropAndResize'):
box_levels = tf.zeros(tf.shape(boxes)[:2], dtype=tf.int32)
return multilevel_roi_align([image],
boxes,
box_levels,
crop_size,
align_corners=True,
extrapolation_value=extrapolation_value)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/spatial_transform_ops.py | spatial_transform_ops.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.object_detection_evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl.testing import parameterized
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection import eval_util
from object_detection.core import standard_fields
from object_detection.utils import object_detection_evaluation
from object_detection.utils import tf_version
class OpenImagesV2EvaluationTest(tf.test.TestCase):
def test_returns_correct_metric_values(self):
categories = [{
'id': 1,
'name': 'cat'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'elephant'
}]
oiv2_evaluator = object_detection_evaluation.OpenImagesDetectionEvaluator(
categories)
image_key1 = 'img1'
groundtruth_boxes1 = np.array(
[[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
oiv2_evaluator.add_single_ground_truth_image_info(image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1,
standard_fields.InputDataFields.groundtruth_group_of:
np.array([], dtype=bool)
})
image_key2 = 'img2'
groundtruth_boxes2 = np.array(
[[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
groundtruth_is_group_of_list2 = np.array([False, True, False], dtype=bool)
oiv2_evaluator.add_single_ground_truth_image_info(image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2,
standard_fields.InputDataFields.groundtruth_group_of:
groundtruth_is_group_of_list2
})
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([2], dtype=int)
oiv2_evaluator.add_single_ground_truth_image_info(image_key3, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels3
})
# Add detections
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([1, 1, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
oiv2_evaluator.add_single_detected_image_info(image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
metrics = oiv2_evaluator.evaluate()
self.assertAlmostEqual(
metrics['OpenImagesV2_PerformanceByCategory/AP@0.5IOU/dog'], 0.0)
self.assertAlmostEqual(
metrics['OpenImagesV2_PerformanceByCategory/AP@0.5IOU/elephant'], 0.0)
self.assertAlmostEqual(
metrics['OpenImagesV2_PerformanceByCategory/AP@0.5IOU/cat'], 0.16666666)
self.assertAlmostEqual(metrics['OpenImagesV2_Precision/mAP@0.5IOU'],
0.05555555)
oiv2_evaluator.clear()
self.assertFalse(oiv2_evaluator._image_ids)
class OpenImagesChallengeEvaluatorTest(tf.test.TestCase):
def test_returns_correct_detection_metric_values(self):
categories = [{
'id': 1,
'name': 'cat'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'elephant'
}]
oivchallenge_evaluator = (
object_detection_evaluation.OpenImagesChallengeEvaluator(
categories, evaluate_masks=False, group_of_weight=0.5))
image_key = 'img1'
groundtruth_boxes = np.array(
[[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
groundtruth_class_labels = np.array([1, 3, 1], dtype=int)
groundtruth_is_group_of_list = np.array([False, False, True], dtype=bool)
groundtruth_verified_labels = np.array([1, 2, 3], dtype=int)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels,
standard_fields.InputDataFields.groundtruth_group_of:
groundtruth_is_group_of_list,
standard_fields.InputDataFields.groundtruth_image_classes:
groundtruth_verified_labels,
})
image_key = 'img2'
groundtruth_boxes = np.array(
[[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)
groundtruth_class_labels = np.array([1, 1, 3], dtype=int)
groundtruth_is_group_of_list = np.array([False, False, True], dtype=bool)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels,
standard_fields.InputDataFields.groundtruth_group_of:
groundtruth_is_group_of_list
})
image_key = 'img3'
groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels = np.array([2], dtype=int)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels
})
image_key = 'img1'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120]], dtype=float)
detected_class_labels = np.array([2, 2], dtype=int)
detected_scores = np.array([0.7, 0.8], dtype=float)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220],
[10, 10, 11, 11]],
dtype=float)
detected_class_labels = np.array([1, 1, 2, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.5, 0.9], dtype=float)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
image_key = 'img3'
detected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
detected_class_labels = np.array([2], dtype=int)
detected_scores = np.array([0.5], dtype=float)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
metrics = oivchallenge_evaluator.evaluate()
expected_metric_name = 'OpenImagesDetectionChallenge'
self.assertAlmostEqual(
metrics[
expected_metric_name + '_PerformanceByCategory/AP@0.5IOU/dog'],
0.3333333333)
self.assertAlmostEqual(
metrics[
expected_metric_name + '_PerformanceByCategory/AP@0.5IOU/elephant'],
0.333333333333)
self.assertAlmostEqual(
metrics[
expected_metric_name + '_PerformanceByCategory/AP@0.5IOU/cat'],
0.142857142857)
self.assertAlmostEqual(
metrics[expected_metric_name + '_Precision/mAP@0.5IOU'],
0.269841269)
oivchallenge_evaluator.clear()
self.assertFalse(oivchallenge_evaluator._image_ids)
def test_returns_correct_instance_segm_metric_values(self):
categories = [{'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}]
oivchallenge_evaluator = (
object_detection_evaluation.OpenImagesChallengeEvaluator(
categories, evaluate_masks=True))
image_key = 'img1'
groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels = np.array([1, 2, 1], dtype=int)
groundtruth_is_group_of_list = np.array([False, False, True], dtype=bool)
groundtruth_verified_labels = np.array([1, 2, 3], dtype=int)
groundtruth_mask_0 = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
zero_mask = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_mask_0, zero_mask, zero_mask],
axis=0)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels,
standard_fields.InputDataFields.groundtruth_group_of:
groundtruth_is_group_of_list,
standard_fields.InputDataFields.groundtruth_image_classes:
groundtruth_verified_labels,
standard_fields.InputDataFields.groundtruth_instance_masks:
groundtruth_masks
})
image_key = 'img3'
groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels = np.array([2], dtype=int)
groundtruth_mask_0 = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_mask_0], axis=0)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels,
standard_fields.InputDataFields.groundtruth_instance_masks:
groundtruth_masks
})
image_key = 'img1'
detected_boxes = np.array([[0, 0, 2, 2], [2, 2, 3, 3]], dtype=float)
detection_mask_0 = np.array([[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks = np.stack([detection_mask_0, zero_mask], axis=0)
detected_class_labels = np.array([2, 1], dtype=int)
detected_scores = np.array([0.7, 0.8], dtype=float)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels,
standard_fields.DetectionResultFields.detection_masks:
detected_masks
})
image_key = 'img3'
detected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
detected_class_labels = np.array([2], dtype=int)
detected_scores = np.array([0.5], dtype=float)
detected_mask_0 = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks = np.stack([detected_mask_0], axis=0)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels,
standard_fields.DetectionResultFields.detection_masks:
detected_masks
})
metrics = oivchallenge_evaluator.evaluate()
expected_metric_name = 'OpenImagesInstanceSegmentationChallenge'
self.assertAlmostEqual(
metrics[expected_metric_name + '_PerformanceByCategory/AP@0.5IOU/dog'],
1.0)
self.assertAlmostEqual(
metrics[
expected_metric_name + '_PerformanceByCategory/AP@0.5IOU/cat'],
0)
self.assertAlmostEqual(
metrics[expected_metric_name + '_Precision/mAP@0.5IOU'], 0.5)
oivchallenge_evaluator.clear()
self.assertFalse(oivchallenge_evaluator._image_ids)
class PascalEvaluationTest(tf.test.TestCase):
def test_returns_correct_metric_values_on_boxes(self):
categories = [{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'},
{'id': 3, 'name': 'elephant'}]
# Add groundtruth
pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
categories)
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
pascal_evaluator.add_single_ground_truth_image_info(
image_key1,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1,
standard_fields.InputDataFields.groundtruth_difficult:
np.array([], dtype=bool)})
image_key2 = 'img2'
groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],
[10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)
pascal_evaluator.add_single_ground_truth_image_info(
image_key2,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2,
standard_fields.InputDataFields.groundtruth_difficult:
groundtruth_is_difficult_list2})
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([2], dtype=int)
pascal_evaluator.add_single_ground_truth_image_info(
image_key3,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels3})
# Add detections
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([1, 1, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
pascal_evaluator.add_single_detected_image_info(
image_key,
{standard_fields.DetectionResultFields.detection_boxes: detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels})
metrics = pascal_evaluator.evaluate()
self.assertAlmostEqual(
metrics['PascalBoxes_PerformanceByCategory/AP@0.5IOU/dog'], 0.0)
self.assertAlmostEqual(
metrics['PascalBoxes_PerformanceByCategory/AP@0.5IOU/elephant'], 0.0)
self.assertAlmostEqual(
metrics['PascalBoxes_PerformanceByCategory/AP@0.5IOU/cat'], 0.16666666)
self.assertAlmostEqual(metrics['PascalBoxes_Precision/mAP@0.5IOU'],
0.05555555)
pascal_evaluator.clear()
self.assertFalse(pascal_evaluator._image_ids)
def test_returns_correct_metric_values_on_masks(self):
categories = [{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'},
{'id': 3, 'name': 'elephant'}]
# Add groundtruth
pascal_evaluator = (
object_detection_evaluation.PascalInstanceSegmentationEvaluator(
categories))
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
groundtruth_masks_1_0 = np.array([[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks_1_1 = np.array([[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0]], dtype=np.uint8)
groundtruth_masks_1_2 = np.array([[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]], dtype=np.uint8)
groundtruth_masks1 = np.stack(
[groundtruth_masks_1_0, groundtruth_masks_1_1, groundtruth_masks_1_2],
axis=0)
pascal_evaluator.add_single_ground_truth_image_info(
image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_instance_masks:
groundtruth_masks1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1,
standard_fields.InputDataFields.groundtruth_difficult:
np.array([], dtype=bool)
})
image_key2 = 'img2'
groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],
[10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)
groundtruth_masks_2_0 = np.array([[1, 1, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks_2_1 = np.array([[0, 0, 0, 0],
[1, 1, 1, 1],
[0, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks_2_2 = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 1, 1, 1]], dtype=np.uint8)
groundtruth_masks2 = np.stack(
[groundtruth_masks_2_0, groundtruth_masks_2_1, groundtruth_masks_2_2],
axis=0)
pascal_evaluator.add_single_ground_truth_image_info(
image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_instance_masks:
groundtruth_masks2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2,
standard_fields.InputDataFields.groundtruth_difficult:
groundtruth_is_difficult_list2
})
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([2], dtype=int)
groundtruth_masks_3_0 = np.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=np.uint8)
groundtruth_masks3 = np.stack([groundtruth_masks_3_0], axis=0)
pascal_evaluator.add_single_ground_truth_image_info(
image_key3, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes3,
standard_fields.InputDataFields.groundtruth_instance_masks:
groundtruth_masks3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels3
})
# Add detections
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([1, 1, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
detected_masks_0 = np.array([[1, 1, 1, 1],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_1 = np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_2 = np.array([[0, 1, 0, 0],
[0, 1, 1, 0],
[0, 1, 0, 0]], dtype=np.uint8)
detected_masks = np.stack(
[detected_masks_0, detected_masks_1, detected_masks_2], axis=0)
pascal_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_masks:
detected_masks,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
metrics = pascal_evaluator.evaluate()
self.assertAlmostEqual(
metrics['PascalMasks_PerformanceByCategory/AP@0.5IOU/dog'], 0.0)
self.assertAlmostEqual(
metrics['PascalMasks_PerformanceByCategory/AP@0.5IOU/elephant'], 0.0)
self.assertAlmostEqual(
metrics['PascalMasks_PerformanceByCategory/AP@0.5IOU/cat'], 0.16666666)
self.assertAlmostEqual(metrics['PascalMasks_Precision/mAP@0.5IOU'],
0.05555555)
pascal_evaluator.clear()
self.assertFalse(pascal_evaluator._image_ids)
class WeightedPascalEvaluationTest(tf.test.TestCase):
def setUp(self):
self.categories = [{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'},
{'id': 3, 'name': 'elephant'}]
def create_and_add_common_ground_truth(self):
# Add groundtruth
self.wp_eval = (
object_detection_evaluation.WeightedPascalDetectionEvaluator(
self.categories))
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
self.wp_eval.add_single_ground_truth_image_info(
image_key1,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1})
# add 'img2' separately
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([2], dtype=int)
self.wp_eval.add_single_ground_truth_image_info(
image_key3,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels3})
def add_common_detected(self):
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([1, 1, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
self.wp_eval.add_single_detected_image_info(
image_key,
{standard_fields.DetectionResultFields.detection_boxes: detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels})
def test_returns_correct_metric_values(self):
self.create_and_add_common_ground_truth()
image_key2 = 'img2'
groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],
[10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
self.wp_eval.add_single_ground_truth_image_info(
image_key2,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2
})
self.add_common_detected()
metrics = self.wp_eval.evaluate()
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/AP@0.5IOU/dog'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/AP@0.5IOU/elephant'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/AP@0.5IOU/cat'], 0.5 / 4)
self.assertAlmostEqual(metrics[self.wp_eval._metric_prefix +
'Precision/mAP@0.5IOU'],
1. / (4 + 1 + 2) / 3)
self.wp_eval.clear()
self.assertFalse(self.wp_eval._image_ids)
def test_returns_correct_metric_values_with_difficult_list(self):
self.create_and_add_common_ground_truth()
image_key2 = 'img2'
groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],
[10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)
self.wp_eval.add_single_ground_truth_image_info(
image_key2,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2,
standard_fields.InputDataFields.groundtruth_difficult:
groundtruth_is_difficult_list2
})
self.add_common_detected()
metrics = self.wp_eval.evaluate()
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/AP@0.5IOU/dog'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/AP@0.5IOU/elephant'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/AP@0.5IOU/cat'], 0.5 / 3)
self.assertAlmostEqual(metrics[self.wp_eval._metric_prefix +
'Precision/mAP@0.5IOU'],
1. / (3 + 1 + 2) / 3)
self.wp_eval.clear()
self.assertFalse(self.wp_eval._image_ids)
class PrecisionAtRecallEvaluationTest(tf.test.TestCase):
def setUp(self):
self.categories = [{
'id': 1,
'name': 'cat'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'elephant'
}]
def create_and_add_common_ground_truth(self):
# Add groundtruth
self.wp_eval = (
object_detection_evaluation.PrecisionAtRecallDetectionEvaluator(
self.categories, recall_lower_bound=0.0, recall_upper_bound=0.5))
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
self.wp_eval.add_single_ground_truth_image_info(
image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1
})
# add 'img2' separately
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([2], dtype=int)
self.wp_eval.add_single_ground_truth_image_info(
image_key3, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels3
})
def add_common_detected(self):
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([1, 1, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
self.wp_eval.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
def test_returns_correct_metric_values(self):
self.create_and_add_common_ground_truth()
image_key2 = 'img2'
groundtruth_boxes2 = np.array(
[[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
self.wp_eval.add_single_ground_truth_image_info(
image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2
})
self.add_common_detected()
metrics = self.wp_eval.evaluate()
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/AP@0.5IOU/dog'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/AP@0.5IOU/elephant'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/AP@0.5IOU/cat'], 0.5 / 4)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'Precision/mAP@0.5IOU@[0.0,0.5]Recall'], 1. / (3 + 1 + 2) / 4)
self.wp_eval.clear()
self.assertFalse(self.wp_eval._image_ids)
def test_returns_correct_metric_values_with_difficult_list(self):
self.create_and_add_common_ground_truth()
image_key2 = 'img2'
groundtruth_boxes2 = np.array(
[[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)
self.wp_eval.add_single_ground_truth_image_info(
image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2,
standard_fields.InputDataFields.groundtruth_difficult:
groundtruth_is_difficult_list2
})
self.add_common_detected()
metrics = self.wp_eval.evaluate()
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/AP@0.5IOU/dog'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/AP@0.5IOU/elephant'], 0.0)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'PerformanceByCategory/AP@0.5IOU/cat'], 0.5 / 3)
self.assertAlmostEqual(
metrics[self.wp_eval._metric_prefix +
'Precision/mAP@0.5IOU@[0.0,0.5]Recall'], 1. / (3 + 1 + 2) / 3)
self.wp_eval.clear()
self.assertFalse(self.wp_eval._image_ids)
class ObjectDetectionEvaluationTest(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 3
self.od_eval = object_detection_evaluation.ObjectDetectionEvaluation(
num_groundtruth_classes)
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([0, 2, 0], dtype=int)
self.od_eval.add_single_ground_truth_image_info(
image_key1, groundtruth_boxes1, groundtruth_class_labels1)
image_key2 = 'img2'
groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],
[10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([0, 0, 2], dtype=int)
groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)
groundtruth_is_group_of_list2 = np.array([False, False, True], dtype=bool)
self.od_eval.add_single_ground_truth_image_info(
image_key2, groundtruth_boxes2, groundtruth_class_labels2,
groundtruth_is_difficult_list2, groundtruth_is_group_of_list2)
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([1], dtype=int)
self.od_eval.add_single_ground_truth_image_info(
image_key3, groundtruth_boxes3, groundtruth_class_labels3)
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([0, 0, 2], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
self.od_eval.add_single_detected_image_info(
image_key, detected_boxes, detected_scores, detected_class_labels)
def test_value_error_on_zero_classes(self):
with self.assertRaises(ValueError):
object_detection_evaluation.ObjectDetectionEvaluation(
num_groundtruth_classes=0)
def test_add_single_ground_truth_image_info(self):
expected_num_gt_instances_per_class = np.array([3, 1, 1], dtype=int)
expected_num_gt_imgs_per_class = np.array([2, 1, 2], dtype=int)
self.assertTrue(np.array_equal(expected_num_gt_instances_per_class,
self.od_eval.num_gt_instances_per_class))
self.assertTrue(np.array_equal(expected_num_gt_imgs_per_class,
self.od_eval.num_gt_imgs_per_class))
groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],
[10, 10, 12, 12]], dtype=float)
self.assertTrue(np.allclose(self.od_eval.groundtruth_boxes['img2'],
groundtruth_boxes2))
groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)
self.assertTrue(np.allclose(
self.od_eval.groundtruth_is_difficult_list['img2'],
groundtruth_is_difficult_list2))
groundtruth_is_group_of_list2 = np.array([False, False, True], dtype=bool)
self.assertTrue(
np.allclose(self.od_eval.groundtruth_is_group_of_list['img2'],
groundtruth_is_group_of_list2))
groundtruth_class_labels1 = np.array([0, 2, 0], dtype=int)
self.assertTrue(np.array_equal(self.od_eval.groundtruth_class_labels[
'img1'], groundtruth_class_labels1))
def test_add_single_detected_image_info(self):
expected_scores_per_class = [[np.array([0.8, 0.7], dtype=float)], [],
[np.array([0.9], dtype=float)]]
expected_tp_fp_labels_per_class = [[np.array([0, 1], dtype=bool)], [],
[np.array([0], dtype=bool)]]
expected_num_images_correctly_detected_per_class = np.array([0, 0, 0],
dtype=int)
for i in range(self.od_eval.num_class):
for j in range(len(expected_scores_per_class[i])):
self.assertTrue(np.allclose(expected_scores_per_class[i][j],
self.od_eval.scores_per_class[i][j]))
self.assertTrue(np.array_equal(expected_tp_fp_labels_per_class[i][
j], self.od_eval.tp_fp_labels_per_class[i][j]))
self.assertTrue(np.array_equal(
expected_num_images_correctly_detected_per_class,
self.od_eval.num_images_correctly_detected_per_class))
def test_evaluate(self):
(average_precision_per_class, mean_ap, precisions_per_class,
recalls_per_class, corloc_per_class,
mean_corloc) = self.od_eval.evaluate()
expected_precisions_per_class = [np.array([0, 0.5], dtype=float),
np.array([], dtype=float),
np.array([0], dtype=float)]
expected_recalls_per_class = [
np.array([0, 1. / 3.], dtype=float), np.array([], dtype=float),
np.array([0], dtype=float)
]
expected_average_precision_per_class = np.array([1. / 6., 0, 0],
dtype=float)
expected_corloc_per_class = np.array([0, 0, 0], dtype=float)
expected_mean_ap = 1. / 18
expected_mean_corloc = 0.0
for i in range(self.od_eval.num_class):
self.assertTrue(np.allclose(expected_precisions_per_class[i],
precisions_per_class[i]))
self.assertTrue(np.allclose(expected_recalls_per_class[i],
recalls_per_class[i]))
self.assertTrue(np.allclose(expected_average_precision_per_class,
average_precision_per_class))
self.assertTrue(np.allclose(expected_corloc_per_class, corloc_per_class))
self.assertAlmostEqual(expected_mean_ap, mean_ap)
self.assertAlmostEqual(expected_mean_corloc, mean_corloc)
def test_merge_internal_state(self):
# Test that if initial state is merged, the results of the evaluation are
# the same.
od_eval_state = self.od_eval.get_internal_state()
copy_od_eval = object_detection_evaluation.ObjectDetectionEvaluation(
self.od_eval.num_class)
copy_od_eval.merge_internal_state(od_eval_state)
(average_precision_per_class, mean_ap, precisions_per_class,
recalls_per_class, corloc_per_class,
mean_corloc) = self.od_eval.evaluate()
(copy_average_precision_per_class, copy_mean_ap, copy_precisions_per_class,
copy_recalls_per_class, copy_corloc_per_class,
copy_mean_corloc) = copy_od_eval.evaluate()
for i in range(self.od_eval.num_class):
self.assertTrue(
np.allclose(copy_precisions_per_class[i], precisions_per_class[i]))
self.assertTrue(
np.allclose(copy_recalls_per_class[i], recalls_per_class[i]))
self.assertTrue(
np.allclose(copy_average_precision_per_class,
average_precision_per_class))
self.assertTrue(np.allclose(copy_corloc_per_class, corloc_per_class))
self.assertAlmostEqual(copy_mean_ap, mean_ap)
self.assertAlmostEqual(copy_mean_corloc, mean_corloc)
@unittest.skipIf(tf_version.is_tf2(), 'Eval Metrics ops are supported in TF1.X '
'only.')
class ObjectDetectionEvaluatorTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
self.categories = [{
'id': 1,
'name': 'person'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'cat'
}]
self.od_eval = object_detection_evaluation.ObjectDetectionEvaluator(
categories=self.categories)
def _make_evaluation_dict(self,
resized_groundtruth_masks=False,
batch_size=1,
max_gt_boxes=None,
scale_to_absolute=False):
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image = tf.zeros(shape=[batch_size, 20, 20, 3], dtype=tf.uint8)
if batch_size == 1:
key = tf.constant('image1')
else:
key = tf.constant([str(i) for i in range(batch_size)])
detection_boxes = tf.concat([
tf.tile(
tf.constant([[[0., 0., 1., 1.]]]), multiples=[batch_size - 1, 1, 1
]),
tf.constant([[[0., 0., 0.5, 0.5]]])
],
axis=0)
detection_scores = tf.concat([
tf.tile(tf.constant([[0.5]]), multiples=[batch_size - 1, 1]),
tf.constant([[0.8]])
],
axis=0)
detection_classes = tf.tile(tf.constant([[0]]), multiples=[batch_size, 1])
detection_masks = tf.tile(
tf.ones(shape=[1, 2, 20, 20], dtype=tf.float32),
multiples=[batch_size, 1, 1, 1])
groundtruth_boxes = tf.constant([[0., 0., 1., 1.]])
groundtruth_classes = tf.constant([1])
groundtruth_instance_masks = tf.ones(shape=[1, 20, 20], dtype=tf.uint8)
num_detections = tf.ones([batch_size])
if resized_groundtruth_masks:
groundtruth_instance_masks = tf.ones(shape=[1, 10, 10], dtype=tf.uint8)
if batch_size > 1:
groundtruth_boxes = tf.tile(
tf.expand_dims(groundtruth_boxes, 0), multiples=[batch_size, 1, 1])
groundtruth_classes = tf.tile(
tf.expand_dims(groundtruth_classes, 0), multiples=[batch_size, 1])
groundtruth_instance_masks = tf.tile(
tf.expand_dims(groundtruth_instance_masks, 0),
multiples=[batch_size, 1, 1, 1])
detections = {
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_masks: detection_masks,
detection_fields.num_detections: num_detections
}
groundtruth = {
input_data_fields.groundtruth_boxes:
groundtruth_boxes,
input_data_fields.groundtruth_classes:
groundtruth_classes,
input_data_fields.groundtruth_instance_masks:
groundtruth_instance_masks,
}
if batch_size > 1:
return eval_util.result_dict_for_batched_example(
image,
key,
detections,
groundtruth,
scale_to_absolute=scale_to_absolute,
max_gt_boxes=max_gt_boxes)
else:
return eval_util.result_dict_for_single_example(
image,
key,
detections,
groundtruth,
scale_to_absolute=scale_to_absolute)
@parameterized.parameters({
'batch_size': 1,
'expected_map': 0,
'max_gt_boxes': None,
'scale_to_absolute': True
}, {
'batch_size': 8,
'expected_map': 0.765625,
'max_gt_boxes': [1],
'scale_to_absolute': True
}, {
'batch_size': 1,
'expected_map': 0,
'max_gt_boxes': None,
'scale_to_absolute': False
}, {
'batch_size': 8,
'expected_map': 0.765625,
'max_gt_boxes': [1],
'scale_to_absolute': False
})
def test_get_estimator_eval_metric_ops(self,
batch_size=1,
expected_map=1,
max_gt_boxes=None,
scale_to_absolute=False):
eval_dict = self._make_evaluation_dict(
batch_size=batch_size,
max_gt_boxes=max_gt_boxes,
scale_to_absolute=scale_to_absolute)
tf.logging.info('eval_dict: {}'.format(eval_dict))
metric_ops = self.od_eval.get_estimator_eval_metric_ops(eval_dict)
_, update_op = metric_ops['Precision/mAP@0.5IOU']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in six.iteritems(metric_ops):
metrics[key] = value_op
sess.run(update_op)
metrics = sess.run(metrics)
self.assertAlmostEqual(expected_map, metrics['Precision/mAP@0.5IOU'])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/object_detection_evaluation_test.py | object_detection_evaluation_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List operations for Numpy BoxLists.
Example box operations that are supported:
* Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from object_detection.utils import np_box_list
from object_detection.utils import np_box_ops
class SortOrder(object):
"""Enum class for sort order.
Attributes:
ascend: ascend order.
descend: descend order.
"""
ASCEND = 1
DESCEND = 2
def area(boxlist):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
y_min, x_min, y_max, x_max = boxlist.get_coordinates()
return (y_max - y_min) * (x_max - x_min)
def intersection(boxlist1, boxlist2):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
return np_box_ops.intersection(boxlist1.get(), boxlist2.get())
def iou(boxlist1, boxlist2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
return np_box_ops.iou(boxlist1.get(), boxlist2.get())
def ioa(boxlist1, boxlist2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, IOA(box1, box2) != IOA(box2, box1).
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
"""
return np_box_ops.ioa(boxlist1.get(), boxlist2.get())
def gather(boxlist, indices, fields=None):
"""Gather boxes from BoxList according to indices and return new BoxList.
By default, gather returns boxes corresponding to the input index list, as
well as all additional fields stored in the boxlist (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
boxlist: BoxList holding N boxes
indices: a 1-d numpy array of type int_
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indices
Raises:
ValueError: if specified field is not contained in boxlist or if the
indices are not of type int_
"""
if indices.size:
if np.amax(indices) >= boxlist.num_boxes() or np.amin(indices) < 0:
raise ValueError('indices are out of valid range.')
subboxlist = np_box_list.BoxList(boxlist.get()[indices, :])
if fields is None:
fields = boxlist.get_extra_fields()
for field in fields:
extra_field_data = boxlist.get_field(field)
subboxlist.add_field(field, extra_field_data[indices, ...])
return subboxlist
def sort_by_field(boxlist, field, order=SortOrder.DESCEND):
"""Sort boxes and associated fields according to a scalar field.
A common use case is reordering the boxes according to descending scores.
Args:
boxlist: BoxList holding N boxes.
field: A BoxList field for sorting and reordering the BoxList.
order: (Optional) 'descend' or 'ascend'. Default is descend.
Returns:
sorted_boxlist: A sorted BoxList with the field in the specified order.
Raises:
ValueError: if specified field does not exist or is not of single dimension.
ValueError: if the order is not either descend or ascend.
"""
if not boxlist.has_field(field):
raise ValueError('Field ' + field + ' does not exist')
if len(boxlist.get_field(field).shape) != 1:
raise ValueError('Field ' + field + 'should be single dimension.')
if order != SortOrder.DESCEND and order != SortOrder.ASCEND:
raise ValueError('Invalid sort order')
field_to_sort = boxlist.get_field(field)
sorted_indices = np.argsort(field_to_sort)
if order == SortOrder.DESCEND:
sorted_indices = sorted_indices[::-1]
return gather(boxlist, sorted_indices)
def non_max_suppression(boxlist,
max_output_size=10000,
iou_threshold=1.0,
score_threshold=-10.0):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. In each iteration, the detected bounding box with
highest score in the available pool is selected.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores. All scores belong to the same class.
max_output_size: maximum number of retained boxes
iou_threshold: intersection over union threshold.
score_threshold: minimum score threshold. Remove the boxes with scores
less than this value. Default value is set to -10. A very
low threshold to pass pretty much all the boxes, unless
the user sets a different score threshold.
Returns:
a BoxList holding M boxes where M <= max_output_size
Raises:
ValueError: if 'scores' field does not exist
ValueError: if threshold is not in [0, 1]
ValueError: if max_output_size < 0
"""
if not boxlist.has_field('scores'):
raise ValueError('Field scores does not exist')
if iou_threshold < 0. or iou_threshold > 1.0:
raise ValueError('IOU threshold must be in [0, 1]')
if max_output_size < 0:
raise ValueError('max_output_size must be bigger than 0.')
boxlist = filter_scores_greater_than(boxlist, score_threshold)
if boxlist.num_boxes() == 0:
return boxlist
boxlist = sort_by_field(boxlist, 'scores')
# Prevent further computation if NMS is disabled.
if iou_threshold == 1.0:
if boxlist.num_boxes() > max_output_size:
selected_indices = np.arange(max_output_size)
return gather(boxlist, selected_indices)
else:
return boxlist
boxes = boxlist.get()
num_boxes = boxlist.num_boxes()
# is_index_valid is True only for all remaining valid boxes,
is_index_valid = np.full(num_boxes, 1, dtype=bool)
selected_indices = []
num_output = 0
for i in range(num_boxes):
if num_output < max_output_size:
if is_index_valid[i]:
num_output += 1
selected_indices.append(i)
is_index_valid[i] = False
valid_indices = np.where(is_index_valid)[0]
if valid_indices.size == 0:
break
intersect_over_union = np_box_ops.iou(
np.expand_dims(boxes[i, :], axis=0), boxes[valid_indices, :])
intersect_over_union = np.squeeze(intersect_over_union, axis=0)
is_index_valid[valid_indices] = np.logical_and(
is_index_valid[valid_indices],
intersect_over_union <= iou_threshold)
return gather(boxlist, np.array(selected_indices))
def multi_class_non_max_suppression(boxlist, score_thresh, iou_thresh,
max_output_size):
"""Multi-class version of non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. It operates independently for each class for
which scores are provided (via the scores field of the input box_list),
pruning boxes with score less than a provided threshold prior to
applying NMS.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores. This scores field is a tensor that can
be 1 dimensional (in the case of a single class) or 2-dimensional, which
which case we assume that it takes the shape [num_boxes, num_classes].
We further assume that this rank is known statically and that
scores.shape[1] is also known (i.e., the number of classes is fixed
and known at graph construction time).
score_thresh: scalar threshold for score (low scoring boxes are removed).
iou_thresh: scalar threshold for IOU (boxes that that high IOU overlap
with previously selected boxes are removed).
max_output_size: maximum number of retained boxes per class.
Returns:
a BoxList holding M boxes with a rank-1 scores field representing
corresponding scores for each box with scores sorted in decreasing order
and a rank-1 classes field representing a class label for each box.
Raises:
ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have
a valid scores field.
"""
if not 0 <= iou_thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(boxlist, np_box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
scores = boxlist.get_field('scores')
if len(scores.shape) == 1:
scores = np.reshape(scores, [-1, 1])
elif len(scores.shape) == 2:
if scores.shape[1] is None:
raise ValueError('scores field must have statically defined second '
'dimension')
else:
raise ValueError('scores field must be of rank 1 or 2')
num_boxes = boxlist.num_boxes()
num_scores = scores.shape[0]
num_classes = scores.shape[1]
if num_boxes != num_scores:
raise ValueError('Incorrect scores field length: actual vs expected.')
selected_boxes_list = []
for class_idx in range(num_classes):
boxlist_and_class_scores = np_box_list.BoxList(boxlist.get())
class_scores = np.reshape(scores[0:num_scores, class_idx], [-1])
boxlist_and_class_scores.add_field('scores', class_scores)
boxlist_filt = filter_scores_greater_than(boxlist_and_class_scores,
score_thresh)
nms_result = non_max_suppression(boxlist_filt,
max_output_size=max_output_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh)
nms_result.add_field(
'classes', np.zeros_like(nms_result.get_field('scores')) + class_idx)
selected_boxes_list.append(nms_result)
selected_boxes = concatenate(selected_boxes_list)
sorted_boxes = sort_by_field(selected_boxes, 'scores')
return sorted_boxes
def scale(boxlist, y_scale, x_scale):
"""Scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: float
x_scale: float
Returns:
boxlist: BoxList holding N boxes
"""
y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = np_box_list.BoxList(np.hstack([y_min, x_min, y_max, x_max]))
fields = boxlist.get_extra_fields()
for field in fields:
extra_field_data = boxlist.get_field(field)
scaled_boxlist.add_field(field, extra_field_data)
return scaled_boxlist
def clip_to_window(boxlist, window, filter_nonoverlapping=True):
"""Clip bounding boxes to a window.
This op clips input bounding boxes (represented by bounding box
corners) to a window, optionally filtering out boxes that do not
overlap at all with the window.
Args:
boxlist: BoxList holding M_in boxes
window: a numpy array of shape [4] representing the
[y_min, x_min, y_max, x_max] window to which the op
should clip boxes.
filter_nonoverlapping: whether to filter out boxes that do not overlap at
all with the window.
Returns:
a BoxList holding M_out boxes where M_out <= M_in
"""
y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
win_y_min = window[0]
win_x_min = window[1]
win_y_max = window[2]
win_x_max = window[3]
y_min_clipped = np.fmax(np.fmin(y_min, win_y_max), win_y_min)
y_max_clipped = np.fmax(np.fmin(y_max, win_y_max), win_y_min)
x_min_clipped = np.fmax(np.fmin(x_min, win_x_max), win_x_min)
x_max_clipped = np.fmax(np.fmin(x_max, win_x_max), win_x_min)
clipped = np_box_list.BoxList(
np.hstack([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped]))
clipped = _copy_extra_fields(clipped, boxlist)
if filter_nonoverlapping:
areas = area(clipped)
nonzero_area_indices = np.reshape(
np.nonzero(np.greater(areas, 0.0)), [-1]).astype(np.int32)
clipped = gather(clipped, nonzero_area_indices)
return clipped
def prune_non_overlapping_boxes(boxlist1, boxlist2, minoverlap=0.0):
"""Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2.
For each box in boxlist1, we want its IOA to be more than minoverlap with
at least one of the boxes in boxlist2. If it does not, we remove it.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
minoverlap: Minimum required overlap between boxes, to count them as
overlapping.
Returns:
A pruned boxlist with size [N', 4].
"""
intersection_over_area = ioa(boxlist2, boxlist1) # [M, N] tensor
intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor
keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap))
keep_inds = np.nonzero(keep_bool)[0]
new_boxlist1 = gather(boxlist1, keep_inds)
return new_boxlist1
def prune_outside_window(boxlist, window):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also ClipToWindow which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a numpy array of size 4, representing [ymin, xmin, ymax, xmax]
of the window.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
win_y_min = window[0]
win_x_min = window[1]
win_y_max = window[2]
win_x_max = window[3]
coordinate_violations = np.hstack([np.less(y_min, win_y_min),
np.less(x_min, win_x_min),
np.greater(y_max, win_y_max),
np.greater(x_max, win_x_max)])
valid_indices = np.reshape(
np.where(np.logical_not(np.max(coordinate_violations, axis=1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def concatenate(boxlists, fields=None):
"""Concatenate list of BoxLists.
This op concatenates a list of input BoxLists into a larger BoxList. It also
handles concatenation of BoxList fields as long as the field tensor shapes
are equal except for the first dimension.
Args:
boxlists: list of BoxList objects
fields: optional list of fields to also concatenate. By default, all
fields from the first BoxList in the list are included in the
concatenation.
Returns:
a BoxList with number of boxes equal to
sum([boxlist.num_boxes() for boxlist in BoxList])
Raises:
ValueError: if boxlists is invalid (i.e., is not a list, is empty, or
contains non BoxList objects), or if requested fields are not contained in
all boxlists
"""
if not isinstance(boxlists, list):
raise ValueError('boxlists should be a list')
if not boxlists:
raise ValueError('boxlists should have nonzero length')
for boxlist in boxlists:
if not isinstance(boxlist, np_box_list.BoxList):
raise ValueError('all elements of boxlists should be BoxList objects')
concatenated = np_box_list.BoxList(
np.vstack([boxlist.get() for boxlist in boxlists]))
if fields is None:
fields = boxlists[0].get_extra_fields()
for field in fields:
first_field_shape = boxlists[0].get_field(field).shape
first_field_shape = first_field_shape[1:]
for boxlist in boxlists:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all requested fields')
field_shape = boxlist.get_field(field).shape
field_shape = field_shape[1:]
if field_shape != first_field_shape:
raise ValueError('field %s must have same shape for all boxlists '
'except for the 0th dimension.' % field)
concatenated_field = np.concatenate(
[boxlist.get_field(field) for boxlist in boxlists], axis=0)
concatenated.add_field(field, concatenated_field)
return concatenated
def filter_scores_greater_than(boxlist, thresh):
"""Filter to keep only boxes with score exceeding a given threshold.
This op keeps the collection of boxes whose corresponding scores are
greater than the input threshold.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
Returns:
a BoxList holding M boxes where M <= N
Raises:
ValueError: if boxlist not a BoxList object or if it does not
have a scores field
"""
if not isinstance(boxlist, np_box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
scores = boxlist.get_field('scores')
if len(scores.shape) > 2:
raise ValueError('Scores should have rank 1 or 2')
if len(scores.shape) == 2 and scores.shape[1] != 1:
raise ValueError('Scores should have rank 1 or have shape '
'consistent with [None, 1]')
high_score_indices = np.reshape(np.where(np.greater(scores, thresh)),
[-1]).astype(np.int32)
return gather(boxlist, high_score_indices)
def change_coordinate_frame(boxlist, window):
"""Change coordinate frame of the boxlist to be relative to window's frame.
Given a window of the form [ymin, xmin, ymax, xmax],
changes bounding box coordinates from boxlist to be relative to this window
(e.g., the min corner maps to (0,0) and the max corner maps to (1,1)).
An example use case is data augmentation: where we are given groundtruth
boxes (boxlist) and would like to randomly crop the image to some
window (window). In this case we need to change the coordinate frame of
each groundtruth box to be relative to this new window.
Args:
boxlist: A BoxList object holding N boxes.
window: a size 4 1-D numpy array.
Returns:
Returns a BoxList object with N boxes.
"""
win_height = window[2] - window[0]
win_width = window[3] - window[1]
boxlist_new = scale(
np_box_list.BoxList(boxlist.get() -
[window[0], window[1], window[0], window[1]]),
1.0 / win_height, 1.0 / win_width)
_copy_extra_fields(boxlist_new, boxlist)
return boxlist_new
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def _update_valid_indices_by_removing_high_iou_boxes(
selected_indices, is_index_valid, intersect_over_union, threshold):
max_iou = np.max(intersect_over_union[:, selected_indices], axis=1)
return np.logical_and(is_index_valid, max_iou <= threshold)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/np_box_list_ops.py | np_box_list_ops.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for creating TFRecord data sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def read_examples_list(path):
"""Read list of training or validation examples.
The file is assumed to contain a single example per line where the first
token in the line is an identifier that allows us to find the image and
annotation xml for that example.
For example, the line:
xyz 3
would allow us to find files xyz.jpg and xyz.xml (the 3 would be ignored).
Args:
path: absolute path to examples list file.
Returns:
list of example identifiers (strings).
"""
with tf.gfile.GFile(path) as fid:
lines = fid.readlines()
return [line.strip().split(' ')[0] for line in lines]
def recursive_parse_xml_to_dict(xml):
"""Recursively parses XML contents to python dict.
We assume that `object` tags are the only ones that can appear
multiple times at the same level of a tree.
Args:
xml: xml tree obtained by parsing XML file contents using lxml.etree
Returns:
Python dictionary holding XML contents.
"""
if not xml:
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = recursive_parse_xml_to_dict(child)
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result:
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result}
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/dataset_util.py | dataset_util.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import standard_fields as fields
from object_detection.utils import ops
from object_detection.utils import test_case
class NormalizedToImageCoordinatesTest(test_case.TestCase):
def test_normalized_to_image_coordinates(self):
normalized_boxes_np = np.array([[[0.0, 0.0, 1.0, 1.0]],
[[0.5, 0.5, 1.0, 1.0]]])
def graph_fn(normalized_boxes):
image_shape = tf.convert_to_tensor([1, 4, 4, 3], dtype=tf.int32)
absolute_boxes = ops.normalized_to_image_coordinates(
normalized_boxes, image_shape, parallel_iterations=2)
return absolute_boxes
expected_boxes = np.array([[[0, 0, 4, 4]],
[[2, 2, 4, 4]]])
absolute_boxes = self.execute(graph_fn, [normalized_boxes_np])
self.assertAllEqual(absolute_boxes, expected_boxes)
class ReduceSumTrailingDimensions(test_case.TestCase):
def test_reduce_sum_trailing_dimensions(self):
def graph_fn(input_tensor):
reduced_tensor = ops.reduce_sum_trailing_dimensions(input_tensor, ndims=2)
return reduced_tensor
reduced_np = self.execute(graph_fn, [np.ones((2, 2, 2), np.float32)])
self.assertAllClose(reduced_np, 2 * np.ones((2, 2), np.float32))
class MeshgridTest(test_case.TestCase):
def test_meshgrid_numpy_comparison(self):
"""Tests meshgrid op with vectors, for which it should match numpy."""
x = np.arange(4)
y = np.arange(6)
def graph_fn():
xgrid, ygrid = ops.meshgrid(x, y)
return xgrid, ygrid
exp_xgrid, exp_ygrid = np.meshgrid(x, y)
xgrid_output, ygrid_output = self.execute(graph_fn, [])
self.assertAllEqual(xgrid_output, exp_xgrid)
self.assertAllEqual(ygrid_output, exp_ygrid)
def test_meshgrid_multidimensional(self):
np.random.seed(18)
x = np.random.rand(4, 1, 2).astype(np.float32)
y = np.random.rand(2, 3).astype(np.float32)
grid_shape = list(y.shape) + list(x.shape)
def graph_fn():
xgrid, ygrid = ops.meshgrid(x, y)
self.assertEqual(xgrid.get_shape().as_list(), grid_shape)
self.assertEqual(ygrid.get_shape().as_list(), grid_shape)
return xgrid, ygrid
xgrid_output, ygrid_output = self.execute(graph_fn, [])
# Check the shape of the output grids
self.assertEqual(xgrid_output.shape, tuple(grid_shape))
self.assertEqual(ygrid_output.shape, tuple(grid_shape))
# Check a few elements
test_elements = [((3, 0, 0), (1, 2)),
((2, 0, 1), (0, 0)),
((0, 0, 0), (1, 1))]
for xind, yind in test_elements:
# These are float equality tests, but the meshgrid op should not introduce
# rounding.
self.assertEqual(xgrid_output[yind + xind], x[xind])
self.assertEqual(ygrid_output[yind + xind], y[yind])
class OpsTestFixedPadding(test_case.TestCase):
def test_3x3_kernel(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
padded_tensor = ops.fixed_padding(tensor, 3)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 4, 4, 1), padded_tensor_out.shape)
def test_5x5_kernel(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
padded_tensor = ops.fixed_padding(tensor, 5)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 6, 6, 1), padded_tensor_out.shape)
def test_3x3_atrous_kernel(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
padded_tensor = ops.fixed_padding(tensor, 3, 2)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 6, 6, 1), padded_tensor_out.shape)
class OpsTestPadToMultiple(test_case.TestCase):
def test_zero_padding(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
padded_tensor = ops.pad_to_multiple(tensor, 1)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 2, 2, 1), padded_tensor_out.shape)
def test_no_padding(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
padded_tensor = ops.pad_to_multiple(tensor, 2)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 2, 2, 1), padded_tensor_out.shape)
def test_non_square_padding(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]]]])
padded_tensor = ops.pad_to_multiple(tensor, 2)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 2, 2, 1), padded_tensor_out.shape)
def test_padding(self):
def graph_fn():
tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]])
padded_tensor = ops.pad_to_multiple(tensor, 4)
return padded_tensor
padded_tensor_out = self.execute(graph_fn, [])
self.assertEqual((1, 4, 4, 1), padded_tensor_out.shape)
class OpsTestPaddedOneHotEncoding(test_case.TestCase):
def test_correct_one_hot_tensor_with_no_pad(self):
def graph_fn():
indices = tf.constant([1, 2, 3, 5])
one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=6, left_pad=0)
return one_hot_tensor
expected_tensor = np.array([[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1]], np.float32)
out_one_hot_tensor = self.execute(graph_fn, [])
self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10,
atol=1e-10)
def test_correct_one_hot_tensor_with_pad_one(self):
def graph_fn():
indices = tf.constant([1, 2, 3, 5])
one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=6, left_pad=1)
return one_hot_tensor
expected_tensor = np.array([[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1]], np.float32)
out_one_hot_tensor = self.execute(graph_fn, [])
self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10,
atol=1e-10)
def test_correct_one_hot_tensor_with_pad_three(self):
def graph_fn():
indices = tf.constant([1, 2, 3, 5])
one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=6, left_pad=3)
return one_hot_tensor
expected_tensor = np.array([[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]], np.float32)
out_one_hot_tensor = self.execute(graph_fn, [])
self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10,
atol=1e-10)
def test_correct_padded_one_hot_tensor_with_empty_indices(self):
depth = 6
pad = 2
def graph_fn():
indices = tf.constant([])
one_hot_tensor = ops.padded_one_hot_encoding(
indices, depth=depth, left_pad=pad)
return one_hot_tensor
expected_tensor = np.zeros((0, depth + pad))
out_one_hot_tensor = self.execute(graph_fn, [])
self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10,
atol=1e-10)
def test_return_none_on_zero_depth(self):
indices = tf.constant([1, 2, 3, 4, 5])
one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=0, left_pad=2)
self.assertEqual(one_hot_tensor, None)
def test_raise_value_error_on_rank_two_input(self):
indices = tf.constant(1.0, shape=(2, 3))
with self.assertRaises(ValueError):
ops.padded_one_hot_encoding(indices, depth=6, left_pad=2)
def test_raise_value_error_on_negative_pad(self):
indices = tf.constant(1.0, shape=(2, 3))
with self.assertRaises(ValueError):
ops.padded_one_hot_encoding(indices, depth=6, left_pad=-1)
def test_raise_value_error_on_float_pad(self):
indices = tf.constant(1.0, shape=(2, 3))
with self.assertRaises(ValueError):
ops.padded_one_hot_encoding(indices, depth=6, left_pad=0.1)
def test_raise_value_error_on_float_depth(self):
indices = tf.constant(1.0, shape=(2, 3))
with self.assertRaises(ValueError):
ops.padded_one_hot_encoding(indices, depth=0.1, left_pad=2)
class OpsDenseToSparseBoxesTest(test_case.TestCase):
def test_return_all_boxes_when_all_input_boxes_are_valid(self):
num_classes = 4
num_valid_boxes = 3
code_size = 4
def graph_fn(dense_location, dense_num_boxes):
box_locations, box_classes = ops.dense_to_sparse_boxes(
dense_location, dense_num_boxes, num_classes)
return box_locations, box_classes
dense_location_np = np.random.uniform(size=[num_valid_boxes, code_size])
dense_num_boxes_np = np.array([1, 0, 0, 2], dtype=np.int32)
expected_box_locations = dense_location_np
expected_box_classses = np.array([0, 3, 3])
# Executing on CPU only since output shape is not constant.
box_locations, box_classes = self.execute_cpu(
graph_fn, [dense_location_np, dense_num_boxes_np])
self.assertAllClose(box_locations, expected_box_locations, rtol=1e-6,
atol=1e-6)
self.assertAllEqual(box_classes, expected_box_classses)
def test_return_only_valid_boxes_when_input_contains_invalid_boxes(self):
num_classes = 4
num_valid_boxes = 3
num_boxes = 10
code_size = 4
def graph_fn(dense_location, dense_num_boxes):
box_locations, box_classes = ops.dense_to_sparse_boxes(
dense_location, dense_num_boxes, num_classes)
return box_locations, box_classes
dense_location_np = np.random.uniform(size=[num_boxes, code_size])
dense_num_boxes_np = np.array([1, 0, 0, 2], dtype=np.int32)
expected_box_locations = dense_location_np[:num_valid_boxes]
expected_box_classses = np.array([0, 3, 3])
# Executing on CPU only since output shape is not constant.
box_locations, box_classes = self.execute_cpu(
graph_fn, [dense_location_np, dense_num_boxes_np])
self.assertAllClose(box_locations, expected_box_locations, rtol=1e-6,
atol=1e-6)
self.assertAllEqual(box_classes, expected_box_classses)
class OpsTestIndicesToDenseVector(test_case.TestCase):
def test_indices_to_dense_vector(self):
size = 10000
num_indices = np.random.randint(size)
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.zeros(size, dtype=np.float32)
expected_output[rand_indices] = 1.
def graph_fn():
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size)
return indicator
output = self.execute(graph_fn, [])
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_size_at_inference(self):
size = 5000
num_indices = 250
all_indices = np.arange(size)
rand_indices = np.random.permutation(all_indices)[0:num_indices]
expected_output = np.zeros(size, dtype=np.float32)
expected_output[rand_indices] = 1.
def graph_fn(tf_all_indices):
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices,
tf.shape(tf_all_indices)[0])
return indicator
output = self.execute(graph_fn, [all_indices])
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_int(self):
size = 500
num_indices = 25
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.zeros(size, dtype=np.int64)
expected_output[rand_indices] = 1
def graph_fn():
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(
tf_rand_indices, size, 1, dtype=tf.int64)
return indicator
output = self.execute(graph_fn, [])
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_custom_values(self):
size = 100
num_indices = 10
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
indices_value = np.random.rand(1)
default_value = np.random.rand(1)
expected_output = np.float32(np.ones(size) * default_value)
expected_output[rand_indices] = indices_value
def graph_fn():
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(
tf_rand_indices,
size,
indices_value=indices_value,
default_value=default_value)
return indicator
output = self.execute(graph_fn, [])
self.assertAllClose(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_all_indices_as_input(self):
size = 500
num_indices = 500
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.ones(size, dtype=np.float32)
def graph_fn():
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size)
return indicator
output = self.execute(graph_fn, [])
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_empty_indices_as_input(self):
size = 500
rand_indices = []
expected_output = np.zeros(size, dtype=np.float32)
def graph_fn():
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size)
return indicator
output = self.execute(graph_fn, [])
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
class GroundtruthFilterTest(test_case.TestCase):
def test_filter_groundtruth(self):
def graph_fn(input_image, input_boxes, input_classes, input_is_crowd,
input_area, input_difficult, input_label_types,
input_confidences, valid_indices):
input_tensors = {
fields.InputDataFields.image: input_image,
fields.InputDataFields.groundtruth_boxes: input_boxes,
fields.InputDataFields.groundtruth_classes: input_classes,
fields.InputDataFields.groundtruth_is_crowd: input_is_crowd,
fields.InputDataFields.groundtruth_area: input_area,
fields.InputDataFields.groundtruth_difficult: input_difficult,
fields.InputDataFields.groundtruth_label_types: input_label_types,
fields.InputDataFields.groundtruth_confidences: input_confidences,
}
output_tensors = ops.retain_groundtruth(input_tensors, valid_indices)
return output_tensors
input_image = np.random.rand(224, 224, 3)
input_boxes = np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]],
dtype=np.float32)
input_classes = np.array([1, 2], dtype=np.int32)
input_is_crowd = np.array([False, True], dtype=np.bool)
input_area = np.array([32, 48], dtype=np.float32)
input_difficult = np.array([True, False], dtype=np.bool)
input_label_types = np.array(['APPROPRIATE', 'INCORRECT'],
dtype=np.string_)
input_confidences = np.array([0.99, 0.5], dtype=np.float32)
valid_indices = np.array([0], dtype=np.int32)
# Strings are not supported on TPU.
output_tensors = self.execute_cpu(
graph_fn,
[input_image, input_boxes, input_classes, input_is_crowd, input_area,
input_difficult, input_label_types, input_confidences, valid_indices]
)
expected_tensors = {
fields.InputDataFields.image: input_image,
fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [1],
fields.InputDataFields.groundtruth_is_crowd: [False],
fields.InputDataFields.groundtruth_area: [32],
fields.InputDataFields.groundtruth_difficult: [True],
fields.InputDataFields.groundtruth_label_types: [six.b('APPROPRIATE')],
fields.InputDataFields.groundtruth_confidences: [0.99],
}
for key in [fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_confidences]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_label_types]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
def test_filter_with_missing_fields(self):
input_boxes = np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]],
dtype=np.float)
input_classes = np.array([1, 2], dtype=np.int32)
valid_indices = np.array([0], dtype=np.int32)
expected_tensors = {
fields.InputDataFields.groundtruth_boxes:
[[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes:
[1]
}
def graph_fn(input_boxes, input_classes, valid_indices):
input_tensors = {
fields.InputDataFields.groundtruth_boxes: input_boxes,
fields.InputDataFields.groundtruth_classes: input_classes
}
output_tensors = ops.retain_groundtruth(input_tensors, valid_indices)
return output_tensors
output_tensors = self.execute(graph_fn, [input_boxes, input_classes,
valid_indices])
for key in [fields.InputDataFields.groundtruth_boxes]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
def test_filter_with_empty_fields(self):
def graph_fn(input_boxes, input_classes, input_is_crowd, input_area,
input_difficult, input_confidences, valid_indices):
input_tensors = {
fields.InputDataFields.groundtruth_boxes: input_boxes,
fields.InputDataFields.groundtruth_classes: input_classes,
fields.InputDataFields.groundtruth_is_crowd: input_is_crowd,
fields.InputDataFields.groundtruth_area: input_area,
fields.InputDataFields.groundtruth_difficult: input_difficult,
fields.InputDataFields.groundtruth_confidences: input_confidences,
}
output_tensors = ops.retain_groundtruth(input_tensors, valid_indices)
return output_tensors
input_boxes = np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]],
dtype=np.float)
input_classes = np.array([1, 2], dtype=np.int32)
input_is_crowd = np.array([False, True], dtype=np.bool)
input_area = np.array([], dtype=np.float32)
input_difficult = np.array([], dtype=np.float32)
input_confidences = np.array([0.99, 0.5], dtype=np.float32)
valid_indices = np.array([0], dtype=np.int32)
expected_tensors = {
fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [1],
fields.InputDataFields.groundtruth_is_crowd: [False],
fields.InputDataFields.groundtruth_area: [],
fields.InputDataFields.groundtruth_difficult: [],
fields.InputDataFields.groundtruth_confidences: [0.99],
}
output_tensors = self.execute(graph_fn, [
input_boxes, input_classes, input_is_crowd, input_area,
input_difficult, input_confidences, valid_indices])
for key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_confidences]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_is_crowd]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
def test_filter_with_empty_groundtruth_boxes(self):
def graph_fn(input_boxes, input_classes, input_is_crowd, input_area,
input_difficult, input_confidences, valid_indices):
input_tensors = {
fields.InputDataFields.groundtruth_boxes: input_boxes,
fields.InputDataFields.groundtruth_classes: input_classes,
fields.InputDataFields.groundtruth_is_crowd: input_is_crowd,
fields.InputDataFields.groundtruth_area: input_area,
fields.InputDataFields.groundtruth_difficult: input_difficult,
fields.InputDataFields.groundtruth_confidences: input_confidences,
}
output_tensors = ops.retain_groundtruth(input_tensors, valid_indices)
return output_tensors
input_boxes = np.array([], dtype=np.float).reshape(0, 4)
input_classes = np.array([], dtype=np.int32)
input_is_crowd = np.array([], dtype=np.bool)
input_area = np.array([], dtype=np.float32)
input_difficult = np.array([], dtype=np.float32)
input_confidences = np.array([], dtype=np.float32)
valid_indices = np.array([], dtype=np.int32)
output_tensors = self.execute(graph_fn, [input_boxes, input_classes,
input_is_crowd, input_area,
input_difficult,
input_confidences,
valid_indices])
for key in output_tensors:
if key == fields.InputDataFields.groundtruth_boxes:
self.assertAllEqual([0, 4], output_tensors[key].shape)
else:
self.assertAllEqual([0], output_tensors[key].shape)
class RetainGroundTruthWithPositiveClasses(test_case.TestCase):
def test_filter_groundtruth_with_positive_classes(self):
def graph_fn(input_image, input_boxes, input_classes, input_is_crowd,
input_area, input_difficult, input_label_types,
input_confidences):
input_tensors = {
fields.InputDataFields.image: input_image,
fields.InputDataFields.groundtruth_boxes: input_boxes,
fields.InputDataFields.groundtruth_classes: input_classes,
fields.InputDataFields.groundtruth_is_crowd: input_is_crowd,
fields.InputDataFields.groundtruth_area: input_area,
fields.InputDataFields.groundtruth_difficult: input_difficult,
fields.InputDataFields.groundtruth_label_types: input_label_types,
fields.InputDataFields.groundtruth_confidences: input_confidences,
}
output_tensors = ops.retain_groundtruth_with_positive_classes(
input_tensors)
return output_tensors
input_image = np.random.rand(224, 224, 3)
input_boxes = np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]],
dtype=np.float)
input_classes = np.array([1, 0], dtype=np.int32)
input_is_crowd = np.array([False, True], dtype=np.bool)
input_area = np.array([32, 48], dtype=np.float32)
input_difficult = np.array([True, False], dtype=np.bool)
input_label_types = np.array(['APPROPRIATE', 'INCORRECT'],
dtype=np.string_)
input_confidences = np.array([0.99, 0.5], dtype=np.float32)
expected_tensors = {
fields.InputDataFields.image: input_image,
fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [1],
fields.InputDataFields.groundtruth_is_crowd: [False],
fields.InputDataFields.groundtruth_area: [32],
fields.InputDataFields.groundtruth_difficult: [True],
fields.InputDataFields.groundtruth_label_types: [six.b('APPROPRIATE')],
fields.InputDataFields.groundtruth_confidences: [0.99],
}
# Executing on CPU because string types are not supported on TPU.
output_tensors = self.execute_cpu(graph_fn,
[input_image, input_boxes,
input_classes, input_is_crowd,
input_area,
input_difficult, input_label_types,
input_confidences])
for key in [fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_confidences]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_label_types]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
class ReplaceNaNGroundtruthLabelScoresWithOnes(test_case.TestCase):
def test_replace_nan_groundtruth_label_scores_with_ones(self):
def graph_fn():
label_scores = tf.constant([np.nan, 1.0, np.nan])
output_tensor = ops.replace_nan_groundtruth_label_scores_with_ones(
label_scores)
return output_tensor
expected_tensor = [1.0, 1.0, 1.0]
output_tensor = self.execute(graph_fn, [])
self.assertAllClose(expected_tensor, output_tensor)
def test_input_equals_output_when_no_nans(self):
input_label_scores = [0.5, 1.0, 1.0]
def graph_fn():
label_scores_tensor = tf.constant(input_label_scores)
output_label_scores = ops.replace_nan_groundtruth_label_scores_with_ones(
label_scores_tensor)
return output_label_scores
output_label_scores = self.execute(graph_fn, [])
self.assertAllClose(input_label_scores, output_label_scores)
class GroundtruthFilterWithCrowdBoxesTest(test_case.TestCase):
def test_filter_groundtruth_with_crowd_boxes(self):
def graph_fn():
input_tensors = {
fields.InputDataFields.groundtruth_boxes:
[[0.1, 0.2, 0.6, 0.8], [0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [1, 2],
fields.InputDataFields.groundtruth_is_crowd: [True, False],
fields.InputDataFields.groundtruth_area: [100.0, 238.7],
fields.InputDataFields.groundtruth_confidences: [0.5, 0.99],
}
output_tensors = ops.filter_groundtruth_with_crowd_boxes(
input_tensors)
return output_tensors
expected_tensors = {
fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [2],
fields.InputDataFields.groundtruth_is_crowd: [False],
fields.InputDataFields.groundtruth_area: [238.7],
fields.InputDataFields.groundtruth_confidences: [0.99],
}
output_tensors = self.execute(graph_fn, [])
for key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_confidences]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_is_crowd]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
class GroundtruthFilterWithNanBoxTest(test_case.TestCase):
def test_filter_groundtruth_with_nan_box_coordinates(self):
def graph_fn():
input_tensors = {
fields.InputDataFields.groundtruth_boxes:
[[np.nan, np.nan, np.nan, np.nan], [0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [1, 2],
fields.InputDataFields.groundtruth_is_crowd: [False, True],
fields.InputDataFields.groundtruth_area: [100.0, 238.7],
fields.InputDataFields.groundtruth_confidences: [0.5, 0.99],
}
output_tensors = ops.filter_groundtruth_with_nan_box_coordinates(
input_tensors)
return output_tensors
expected_tensors = {
fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [2],
fields.InputDataFields.groundtruth_is_crowd: [True],
fields.InputDataFields.groundtruth_area: [238.7],
fields.InputDataFields.groundtruth_confidences: [0.99],
}
output_tensors = self.execute(graph_fn, [])
for key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_confidences]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_is_crowd]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
class GroundtruthFilterWithUnrecognizedClassesTest(test_case.TestCase):
def test_filter_unrecognized_classes(self):
def graph_fn():
input_tensors = {
fields.InputDataFields.groundtruth_boxes:
[[.3, .3, .5, .7], [0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [-1, 2],
fields.InputDataFields.groundtruth_is_crowd: [False, True],
fields.InputDataFields.groundtruth_area: [100.0, 238.7],
fields.InputDataFields.groundtruth_confidences: [0.5, 0.99],
}
output_tensors = ops.filter_unrecognized_classes(input_tensors)
return output_tensors
expected_tensors = {
fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]],
fields.InputDataFields.groundtruth_classes: [2],
fields.InputDataFields.groundtruth_is_crowd: [True],
fields.InputDataFields.groundtruth_area: [238.7],
fields.InputDataFields.groundtruth_confidences: [0.99],
}
output_tensors = self.execute(graph_fn, [])
for key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_confidences]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_is_crowd]:
self.assertAllEqual(expected_tensors[key], output_tensors[key])
class OpsTestNormalizeToTarget(test_case.TestCase):
def test_create_normalize_to_target(self):
if self.is_tf2():
self.skipTest('Skipping as variable names not supported in eager mode.')
inputs = tf.random_uniform([5, 10, 12, 3])
target_norm_value = 4.0
dim = 3
with self.test_session():
output = ops.normalize_to_target(inputs, target_norm_value, dim)
self.assertEqual(output.op.name, 'NormalizeToTarget/mul')
var_name = slim.get_variables()[0].name
self.assertEqual(var_name, 'NormalizeToTarget/weights:0')
def test_invalid_dim(self):
inputs = tf.random_uniform([5, 10, 12, 3])
target_norm_value = 4.0
dim = 10
with self.assertRaisesRegexp(
ValueError,
'dim must be non-negative but smaller than the input rank.'):
ops.normalize_to_target(inputs, target_norm_value, dim)
def test_invalid_target_norm_values(self):
inputs = tf.random_uniform([5, 10, 12, 3])
target_norm_value = [4.0, 4.0]
dim = 3
with self.assertRaisesRegexp(
ValueError, 'target_norm_value must be a float or a list of floats'):
ops.normalize_to_target(inputs, target_norm_value, dim)
def test_correct_output_shape(self):
if self.is_tf2():
self.skipTest('normalize_to_target not supported in eager mode because,'
' it requires creating variables.')
inputs = np.random.uniform(size=(5, 10, 12, 3)).astype(np.float32)
def graph_fn(inputs):
target_norm_value = 4.0
dim = 3
output = ops.normalize_to_target(inputs, target_norm_value, dim)
return output
# Executing on CPU since creating a variable inside a conditional is not
# supported.
outputs = self.execute_cpu(graph_fn, [inputs])
self.assertEqual(outputs.shape, inputs.shape)
def test_correct_initial_output_values(self):
if self.is_tf2():
self.skipTest('normalize_to_target not supported in eager mode because,'
' it requires creating variables.')
def graph_fn():
inputs = tf.constant([[[[3, 4], [7, 24]],
[[5, -12], [-1, 0]]]], tf.float32)
target_norm_value = 10.0
dim = 3
normalized_inputs = ops.normalize_to_target(inputs, target_norm_value,
dim)
return normalized_inputs
expected_output = [[[[30/5.0, 40/5.0], [70/25.0, 240/25.0]],
[[50/13.0, -120/13.0], [-10, 0]]]]
# Executing on CPU since creating a variable inside a conditional is not
# supported.
output = self.execute_cpu(graph_fn, [])
self.assertAllClose(output, expected_output)
def test_multiple_target_norm_values(self):
if self.is_tf2():
self.skipTest('normalize_to_target not supported in eager mode because,'
' it requires creating variables.')
def graph_fn():
inputs = tf.constant([[[[3, 4], [7, 24]],
[[5, -12], [-1, 0]]]], tf.float32)
target_norm_value = [10.0, 20.0]
dim = 3
normalized_inputs = ops.normalize_to_target(inputs, target_norm_value,
dim)
return normalized_inputs
expected_output = [[[[30/5.0, 80/5.0], [70/25.0, 480/25.0]],
[[50/13.0, -240/13.0], [-10, 0]]]]
# Executing on CPU since creating a variable inside a conditional is not
# supported.
output = self.execute_cpu(graph_fn, [])
self.assertAllClose(output, expected_output)
class OpsTestPositionSensitiveCropRegions(test_case.TestCase):
def test_position_sensitive(self):
num_spatial_bins = [3, 2]
image_shape = [3, 2, 6]
# The result for both boxes should be [[1, 2], [3, 4], [5, 6]]
# before averaging.
expected_output = np.array([3.5, 3.5]).reshape([2, 1, 1, 1])
for crop_size_mult in range(1, 3):
crop_size = [3 * crop_size_mult, 2 * crop_size_mult]
def graph_fn():
# First channel is 1's, second channel is 2's, etc.
image = tf.constant(
list(range(1, 3 * 2 + 1)) * 6, dtype=tf.float32, shape=image_shape)
boxes = tf.random_uniform((2, 4))
# pylint:disable=cell-var-from-loop
ps_crop_and_pool = ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=True)
return ps_crop_and_pool
output = self.execute(graph_fn, [])
self.assertAllClose(output, expected_output)
def test_position_sensitive_with_equal_channels(self):
num_spatial_bins = [2, 2]
image_shape = [3, 3, 4]
crop_size = [2, 2]
def graph_fn():
image = tf.constant(
list(range(1, 3 * 3 + 1)), dtype=tf.float32, shape=[3, 3, 1])
tiled_image = tf.tile(image, [1, 1, image_shape[2]])
boxes = tf.random_uniform((3, 4))
box_ind = tf.constant([0, 0, 0], dtype=tf.int32)
# All channels are equal so position-sensitive crop and resize should
# work as the usual crop and resize for just one channel.
crop = tf.image.crop_and_resize(tf.expand_dims(image, axis=0), boxes,
box_ind, crop_size)
crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True)
ps_crop_and_pool = ops.position_sensitive_crop_regions(
tiled_image,
boxes,
crop_size,
num_spatial_bins,
global_pool=True)
return crop_and_pool, ps_crop_and_pool
# Crop and resize op is not supported in TPUs.
expected_output, output = self.execute_cpu(graph_fn, [])
self.assertAllClose(output, expected_output)
def test_raise_value_error_on_num_bins_less_than_one(self):
num_spatial_bins = [1, -1]
image_shape = [1, 1, 2]
crop_size = [2, 2]
image = tf.constant(1, dtype=tf.float32, shape=image_shape)
boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32)
with self.assertRaisesRegexp(ValueError, 'num_spatial_bins should be >= 1'):
ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=True)
def test_raise_value_error_on_non_divisible_crop_size(self):
num_spatial_bins = [2, 3]
image_shape = [1, 1, 6]
crop_size = [3, 2]
image = tf.constant(1, dtype=tf.float32, shape=image_shape)
boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32)
with self.assertRaisesRegexp(
ValueError, 'crop_size should be divisible by num_spatial_bins'):
ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=True)
def test_raise_value_error_on_non_divisible_num_channels(self):
num_spatial_bins = [2, 2]
image_shape = [1, 1, 5]
crop_size = [2, 2]
def graph_fn():
image = tf.constant(1, dtype=tf.float32, shape=image_shape)
boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32)
return ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=True)
with self.assertRaisesRegexp(
ValueError, 'Dimension size must be evenly divisible by 4 but is 5'):
self.execute(graph_fn, [])
def test_position_sensitive_with_global_pool_false(self):
num_spatial_bins = [3, 2]
image_shape = [3, 2, 6]
num_boxes = 2
expected_output = []
# Expected output, when crop_size = [3, 2].
expected_output.append(np.expand_dims(
np.tile(np.array([[1, 2],
[3, 4],
[5, 6]]), (num_boxes, 1, 1)),
axis=-1))
# Expected output, when crop_size = [6, 4].
expected_output.append(np.expand_dims(
np.tile(np.array([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 3, 4, 4],
[3, 3, 4, 4],
[5, 5, 6, 6],
[5, 5, 6, 6]]), (num_boxes, 1, 1)),
axis=-1))
for crop_size_mult in range(1, 3):
crop_size = [3 * crop_size_mult, 2 * crop_size_mult]
# First channel is 1's, second channel is 2's, etc.
def graph_fn():
# pylint:disable=cell-var-from-loop
image = tf.constant(
list(range(1, 3 * 2 + 1)) * 6, dtype=tf.float32, shape=image_shape)
boxes = tf.random_uniform((num_boxes, 4))
ps_crop = ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=False)
return ps_crop
output = self.execute(graph_fn, [])
self.assertAllClose(output, expected_output[crop_size_mult - 1])
def test_position_sensitive_with_global_pool_false_and_do_global_pool(self):
num_spatial_bins = [3, 2]
image_shape = [3, 2, 6]
num_boxes = 2
expected_output = []
# Expected output, when crop_size = [3, 2].
expected_output.append(np.mean(
np.expand_dims(
np.tile(np.array([[1, 2],
[3, 4],
[5, 6]]), (num_boxes, 1, 1)),
axis=-1),
axis=(1, 2), keepdims=True))
# Expected output, when crop_size = [6, 4].
expected_output.append(np.mean(
np.expand_dims(
np.tile(np.array([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 3, 4, 4],
[3, 3, 4, 4],
[5, 5, 6, 6],
[5, 5, 6, 6]]), (num_boxes, 1, 1)),
axis=-1),
axis=(1, 2), keepdims=True))
for crop_size_mult in range(1, 3):
crop_size = [3 * crop_size_mult, 2 * crop_size_mult]
def graph_fn():
# pylint:disable=cell-var-from-loop
# First channel is 1's, second channel is 2's, etc.
image = tf.constant(
list(range(1, 3 * 2 + 1)) * 6, dtype=tf.float32, shape=image_shape)
boxes = tf.random_uniform((num_boxes, 4))
# Perform global_pooling after running the function with
# global_pool=False.
ps_crop = ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=False)
ps_crop_and_pool = tf.reduce_mean(
ps_crop, reduction_indices=(1, 2), keepdims=True)
return ps_crop_and_pool
output = self.execute(graph_fn, [])
self.assertAllClose(output, expected_output[crop_size_mult - 1])
def test_raise_value_error_on_non_square_block_size(self):
num_spatial_bins = [3, 2]
image_shape = [3, 2, 6]
crop_size = [6, 2]
image = tf.constant(1, dtype=tf.float32, shape=image_shape)
boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32)
with self.assertRaisesRegexp(
ValueError, 'Only support square bin crop size for now.'):
ops.position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=False)
class OpsTestBatchPositionSensitiveCropRegions(test_case.TestCase):
def test_position_sensitive_with_single_bin(self):
num_spatial_bins = [1, 1]
image_shape = [2, 3, 3, 4]
crop_size = [2, 2]
def graph_fn():
image = tf.random_uniform(image_shape)
boxes = tf.random_uniform((2, 3, 4))
box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32)
# When a single bin is used, position-sensitive crop and pool should be
# the same as non-position sensitive crop and pool.
crop = tf.image.crop_and_resize(image,
tf.reshape(boxes, [-1, 4]), box_ind,
crop_size)
crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True)
crop_and_pool = tf.reshape(crop_and_pool, [2, 3, 1, 1, 4])
ps_crop_and_pool = ops.batch_position_sensitive_crop_regions(
image, boxes, crop_size, num_spatial_bins, global_pool=True)
return crop_and_pool, ps_crop_and_pool
# Crop and resize is not supported on TPUs.
expected_output, output = self.execute_cpu(graph_fn, [])
self.assertAllClose(output, expected_output)
def test_position_sensitive_with_global_pool_false_and_known_boxes(self):
num_spatial_bins = [2, 2]
image_shape = [2, 2, 2, 4]
crop_size = [2, 2]
# box_ind = tf.constant([0, 1], dtype=tf.int32)
expected_output = []
# Expected output, when the box containing whole image.
expected_output.append(
np.reshape(np.array([[4, 7],
[10, 13]]),
(1, 2, 2, 1))
)
# Expected output, when the box containing only first row.
expected_output.append(
np.reshape(np.array([[3, 6],
[7, 10]]),
(1, 2, 2, 1))
)
expected_output = np.stack(expected_output, axis=0)
def graph_fn():
images = tf.constant(
list(range(1, 2 * 2 * 4 + 1)) * 2, dtype=tf.float32,
shape=image_shape)
# First box contains whole image, and second box contains only first row.
boxes = tf.constant(np.array([[[0., 0., 1., 1.]],
[[0., 0., 0.5, 1.]]]), dtype=tf.float32)
ps_crop = ops.batch_position_sensitive_crop_regions(
images, boxes, crop_size, num_spatial_bins, global_pool=False)
return ps_crop
output = self.execute(graph_fn, [])
self.assertAllEqual(output, expected_output)
def test_position_sensitive_with_global_pool_false_and_single_bin(self):
num_spatial_bins = [1, 1]
image_shape = [2, 3, 3, 4]
crop_size = [1, 1]
def graph_fn():
images = tf.random_uniform(image_shape)
boxes = tf.random_uniform((2, 3, 4))
# box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32)
# Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize),
# the outputs are the same whatever the global_pool value is.
ps_crop_and_pool = ops.batch_position_sensitive_crop_regions(
images, boxes, crop_size, num_spatial_bins, global_pool=True)
ps_crop = ops.batch_position_sensitive_crop_regions(
images, boxes, crop_size, num_spatial_bins, global_pool=False)
return ps_crop_and_pool, ps_crop
pooled_output, unpooled_output = self.execute(graph_fn, [])
self.assertAllClose(pooled_output, unpooled_output)
# The following tests are only executed on CPU because the output
# shape is not constant.
class ReframeBoxMasksToImageMasksTest(test_case.TestCase,
parameterized.TestCase):
@parameterized.parameters(
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'bilinear'},
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'nearest'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'bilinear'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'nearest'},
)
def testZeroImageOnEmptyMask(self, mask_dtype, mask_dtype_np, resize_method):
np_expected_image_masks = np.array([[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]])
def graph_fn():
box_masks = tf.constant([[[0, 0],
[0, 0]]], dtype=mask_dtype)
boxes = tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32)
image_masks = ops.reframe_box_masks_to_image_masks(
box_masks, boxes, image_height=4, image_width=4,
resize_method=resize_method)
return image_masks
np_image_masks = self.execute_cpu(graph_fn, [])
self.assertEqual(np_image_masks.dtype, mask_dtype_np)
self.assertAllClose(np_image_masks, np_expected_image_masks)
@parameterized.parameters(
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'bilinear'},
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'nearest'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'bilinear'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'nearest'},
)
def testZeroBoxMasks(self, mask_dtype, mask_dtype_np, resize_method):
def graph_fn():
box_masks = tf.zeros([0, 3, 3], dtype=mask_dtype)
boxes = tf.zeros([0, 4], dtype=tf.float32)
image_masks = ops.reframe_box_masks_to_image_masks(
box_masks, boxes, image_height=4, image_width=4,
resize_method=resize_method)
return image_masks
np_image_masks = self.execute_cpu(graph_fn, [])
self.assertEqual(np_image_masks.dtype, mask_dtype_np)
self.assertAllEqual(np_image_masks.shape, np.array([0, 4, 4]))
def testBoxWithZeroArea(self):
def graph_fn():
box_masks = tf.zeros([1, 3, 3], dtype=tf.float32)
boxes = tf.constant([[0.1, 0.2, 0.1, 0.7]], dtype=tf.float32)
image_masks = ops.reframe_box_masks_to_image_masks(box_masks, boxes,
image_height=4,
image_width=4)
return image_masks
np_image_masks = self.execute_cpu(graph_fn, [])
self.assertAllEqual(np_image_masks.shape, np.array([1, 4, 4]))
@parameterized.parameters(
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'bilinear'},
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'nearest'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'bilinear'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'nearest'},
)
def testMaskIsCenteredInImageWhenBoxIsCentered(self, mask_dtype,
mask_dtype_np, resize_method):
def graph_fn():
box_masks = tf.constant([[[4, 4],
[4, 4]]], dtype=mask_dtype)
boxes = tf.constant([[0.25, 0.25, 0.75, 0.75]], dtype=tf.float32)
image_masks = ops.reframe_box_masks_to_image_masks(
box_masks, boxes, image_height=4, image_width=4,
resize_method=resize_method)
return image_masks
np_expected_image_masks = np.array([[[0, 0, 0, 0],
[0, 4, 4, 0],
[0, 4, 4, 0],
[0, 0, 0, 0]]], dtype=mask_dtype_np)
np_image_masks = self.execute_cpu(graph_fn, [])
self.assertEqual(np_image_masks.dtype, mask_dtype_np)
self.assertAllClose(np_image_masks, np_expected_image_masks)
@parameterized.parameters(
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'bilinear'},
{'mask_dtype': tf.float32, 'mask_dtype_np': np.float32,
'resize_method': 'nearest'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'bilinear'},
{'mask_dtype': tf.uint8, 'mask_dtype_np': np.uint8,
'resize_method': 'nearest'},
)
def testMaskOffCenterRemainsOffCenterInImage(self, mask_dtype,
mask_dtype_np, resize_method):
def graph_fn():
box_masks = tf.constant([[[1, 0],
[0, 1]]], dtype=mask_dtype)
boxes = tf.constant([[0.25, 0.5, 0.75, 1.0]], dtype=tf.float32)
image_masks = ops.reframe_box_masks_to_image_masks(
box_masks, boxes, image_height=4, image_width=4,
resize_method=resize_method)
return image_masks
if mask_dtype == tf.float32 and resize_method == 'bilinear':
np_expected_image_masks = np.array([[[0, 0, 0, 0],
[0, 0, 0.6111111, 0.16666669],
[0, 0, 0.3888889, 0.83333337],
[0, 0, 0, 0]]], dtype=np.float32)
else:
np_expected_image_masks = np.array([[[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]]], dtype=mask_dtype_np)
np_image_masks = self.execute_cpu(graph_fn, [])
self.assertEqual(np_image_masks.dtype, mask_dtype_np)
self.assertAllClose(np_image_masks, np_expected_image_masks)
class MergeBoxesWithMultipleLabelsTest(test_case.TestCase):
def testMergeBoxesWithMultipleLabels(self):
def graph_fn():
boxes = tf.constant(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[0.25, 0.25, 0.75, 0.75]],
dtype=tf.float32)
class_indices = tf.constant([0, 4, 2], dtype=tf.int32)
class_confidences = tf.constant([0.8, 0.2, 0.1], dtype=tf.float32)
num_classes = 5
merged_boxes, merged_classes, merged_confidences, merged_box_indices = (
ops.merge_boxes_with_multiple_labels(
boxes, class_indices, class_confidences, num_classes))
return (merged_boxes, merged_classes, merged_confidences,
merged_box_indices)
expected_merged_boxes = np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32)
expected_merged_classes = np.array(
[[1, 0, 1, 0, 0], [0, 0, 0, 0, 1]], dtype=np.int32)
expected_merged_confidences = np.array(
[[0.8, 0, 0.1, 0, 0], [0, 0, 0, 0, 0.2]], dtype=np.float32)
expected_merged_box_indices = np.array([0, 1], dtype=np.int32)
# Running on CPU only as tf.unique is not supported on TPU.
(np_merged_boxes, np_merged_classes, np_merged_confidences,
np_merged_box_indices) = self.execute_cpu(graph_fn, [])
self.assertAllClose(np_merged_boxes, expected_merged_boxes)
self.assertAllClose(np_merged_classes, expected_merged_classes)
self.assertAllClose(np_merged_confidences, expected_merged_confidences)
self.assertAllClose(np_merged_box_indices, expected_merged_box_indices)
def testMergeBoxesWithMultipleLabelsCornerCase(self):
def graph_fn():
boxes = tf.constant(
[[0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1],
[1, 1, 1, 1], [1, 0, 1, 1], [0, 1, 1, 1], [0, 0, 1, 1]],
dtype=tf.float32)
class_indices = tf.constant([0, 1, 2, 3, 2, 1, 0, 3], dtype=tf.int32)
class_confidences = tf.constant([0.1, 0.9, 0.2, 0.8, 0.3, 0.7, 0.4, 0.6],
dtype=tf.float32)
num_classes = 4
merged_boxes, merged_classes, merged_confidences, merged_box_indices = (
ops.merge_boxes_with_multiple_labels(
boxes, class_indices, class_confidences, num_classes))
return (merged_boxes, merged_classes, merged_confidences,
merged_box_indices)
expected_merged_boxes = np.array(
[[0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]],
dtype=np.float32)
expected_merged_classes = np.array(
[[1, 0, 0, 1], [1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1]],
dtype=np.int32)
expected_merged_confidences = np.array(
[[0.1, 0, 0, 0.6], [0.4, 0.9, 0, 0],
[0, 0.7, 0.2, 0], [0, 0, 0.3, 0.8]], dtype=np.float32)
expected_merged_box_indices = np.array([0, 1, 2, 3], dtype=np.int32)
# Running on CPU only as tf.unique is not supported on TPU.
(np_merged_boxes, np_merged_classes, np_merged_confidences,
np_merged_box_indices) = self.execute_cpu(graph_fn, [])
self.assertAllClose(np_merged_boxes, expected_merged_boxes)
self.assertAllClose(np_merged_classes, expected_merged_classes)
self.assertAllClose(np_merged_confidences, expected_merged_confidences)
self.assertAllClose(np_merged_box_indices, expected_merged_box_indices)
def testMergeBoxesWithEmptyInputs(self):
def graph_fn():
boxes = tf.zeros([0, 4], dtype=tf.float32)
class_indices = tf.constant([], dtype=tf.int32)
class_confidences = tf.constant([], dtype=tf.float32)
num_classes = 5
merged_boxes, merged_classes, merged_confidences, merged_box_indices = (
ops.merge_boxes_with_multiple_labels(
boxes, class_indices, class_confidences, num_classes))
return (merged_boxes, merged_classes, merged_confidences,
merged_box_indices)
# Running on CPU only as tf.unique is not supported on TPU.
(np_merged_boxes, np_merged_classes, np_merged_confidences,
np_merged_box_indices) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(np_merged_boxes.shape, [0, 4])
self.assertAllEqual(np_merged_classes.shape, [0, 5])
self.assertAllEqual(np_merged_confidences.shape, [0, 5])
self.assertAllEqual(np_merged_box_indices.shape, [0])
def testMergeBoxesWithMultipleLabelsUsesInt64(self):
if self.is_tf2():
self.skipTest('Getting op names is not supported in eager mode.')
boxes = tf.constant(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[0.25, 0.25, 0.75, 0.75]],
dtype=tf.float32)
class_indices = tf.constant([0, 4, 2], dtype=tf.int32)
class_confidences = tf.constant([0.8, 0.2, 0.1], dtype=tf.float32)
num_classes = 5
ops.merge_boxes_with_multiple_labels(
boxes, class_indices, class_confidences, num_classes)
graph = tf.get_default_graph()
def assert_dtype_is_int64(op_name):
op = graph.get_operation_by_name(op_name)
self.assertEqual(op.get_attr('dtype'), tf.int64)
def assert_t_is_int64(op_name):
op = graph.get_operation_by_name(op_name)
self.assertEqual(op.get_attr('T'), tf.int64)
assert_dtype_is_int64('map/TensorArray')
assert_dtype_is_int64('map/TensorArray_1')
assert_dtype_is_int64('map/while/TensorArrayReadV3')
assert_t_is_int64('map/while/TensorArrayWrite/TensorArrayWriteV3')
assert_t_is_int64(
'map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3')
assert_dtype_is_int64('map/TensorArrayStack/TensorArrayGatherV3')
class NearestNeighborUpsamplingTest(test_case.TestCase):
def test_upsampling_with_single_scale(self):
def graph_fn(inputs):
custom_op_output = ops.nearest_neighbor_upsampling(inputs, scale=2)
return custom_op_output
inputs = np.reshape(np.arange(4).astype(np.float32), [1, 2, 2, 1])
custom_op_output = self.execute(graph_fn, [inputs])
expected_output = [[[[0], [0], [1], [1]],
[[0], [0], [1], [1]],
[[2], [2], [3], [3]],
[[2], [2], [3], [3]]]]
self.assertAllClose(custom_op_output, expected_output)
def test_upsampling_with_separate_height_width_scales(self):
def graph_fn(inputs):
custom_op_output = ops.nearest_neighbor_upsampling(inputs,
height_scale=2,
width_scale=3)
return custom_op_output
inputs = np.reshape(np.arange(4).astype(np.float32), [1, 2, 2, 1])
custom_op_output = self.execute(graph_fn, [inputs])
expected_output = [[[[0], [0], [0], [1], [1], [1]],
[[0], [0], [0], [1], [1], [1]],
[[2], [2], [2], [3], [3], [3]],
[[2], [2], [2], [3], [3], [3]]]]
self.assertAllClose(custom_op_output, expected_output)
class MatmulGatherOnZerothAxis(test_case.TestCase):
def test_gather_2d(self):
def graph_fn(params, indices):
return ops.matmul_gather_on_zeroth_axis(params, indices)
params = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[0, 1, 0, 0]], dtype=np.float32)
indices = np.array([2, 2, 1], dtype=np.int32)
expected_output = np.array([[9, 10, 11, 12], [9, 10, 11, 12], [5, 6, 7, 8]])
gather_output = self.execute(graph_fn, [params, indices])
self.assertAllClose(gather_output, expected_output)
def test_gather_3d(self):
def graph_fn(params, indices):
return ops.matmul_gather_on_zeroth_axis(params, indices)
params = np.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]],
[[0, 1], [0, 0]]], dtype=np.float32)
indices = np.array([0, 3, 1], dtype=np.int32)
expected_output = np.array([[[1, 2], [3, 4]],
[[0, 1], [0, 0]],
[[5, 6], [7, 8]]])
gather_output = self.execute(graph_fn, [params, indices])
self.assertAllClose(gather_output, expected_output)
def test_gather_with_many_indices(self):
def graph_fn(params, indices):
return ops.matmul_gather_on_zeroth_axis(params, indices)
params = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[0, 1, 0, 0]], dtype=np.float32)
indices = np.array([0, 0, 0, 0, 0, 0], dtype=np.int32)
expected_output = np.array(6*[[1, 2, 3, 4]])
gather_output = self.execute(graph_fn, [params, indices])
self.assertAllClose(gather_output, expected_output)
def test_gather_with_dynamic_shape_input(self):
def graph_fn(params, indices):
return ops.matmul_gather_on_zeroth_axis(params, indices)
params = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[0, 1, 0, 0]], dtype=np.float32)
indices = np.array([0, 0, 0, 0, 0, 0])
expected_output = np.array(6*[[1, 2, 3, 4]])
gather_output = self.execute(graph_fn, [params, indices])
self.assertAllClose(gather_output, expected_output)
class FpnFeatureLevelsTest(test_case.TestCase):
def test_correct_fpn_levels(self):
image_size = 640
pretraininig_image_size = 224
image_ratio = image_size * 1.0 / pretraininig_image_size
boxes = np.array(
[
[
[0, 0, 111, 111], # Level 0.
[0, 0, 113, 113], # Level 1.
[0, 0, 223, 223], # Level 1.
[0, 0, 225, 225], # Level 2.
[0, 0, 449, 449] # Level 3.
],
],
dtype=np.float32) / image_size
def graph_fn(boxes):
return ops.fpn_feature_levels(
num_levels=5, unit_scale_index=2, image_ratio=image_ratio,
boxes=boxes)
levels = self.execute(graph_fn, [boxes])
self.assertAllEqual([[0, 1, 1, 2, 3]], levels)
class TestBfloat16ToFloat32(test_case.TestCase):
def test_convert_list(self):
var_list = [
tf.constant([1.], dtype=tf.bfloat16),
tf.constant([2], dtype=tf.int32)
]
casted_var_list = ops.bfloat16_to_float32_nested(var_list)
self.assertEqual(casted_var_list[0].dtype, tf.float32)
self.assertEqual(casted_var_list[1].dtype, tf.int32)
def test_convert_tensor_dict(self):
tensor_dict = {
'key1': tf.constant([1.], dtype=tf.bfloat16),
'key2': [
tf.constant([0.5], dtype=tf.bfloat16),
tf.constant([7], dtype=tf.int32),
],
'key3': tf.constant([2], dtype=tf.uint8),
}
tensor_dict = ops.bfloat16_to_float32_nested(tensor_dict)
self.assertEqual(tensor_dict['key1'].dtype, tf.float32)
self.assertEqual(tensor_dict['key2'][0].dtype, tf.float32)
self.assertEqual(tensor_dict['key2'][1].dtype, tf.int32)
self.assertEqual(tensor_dict['key3'].dtype, tf.uint8)
class TestGatherWithPaddingValues(test_case.TestCase):
def test_gather_with_padding_values(self):
expected_gathered_tensor = [
[0, 0, 0.2, 0.2],
[0, 0, 0, 0],
[0, 0, 0.1, 0.1],
[0, 0, 0, 0],
]
def graph_fn():
indices = tf.constant([1, -1, 0, -1])
input_tensor = tf.constant([[0, 0, 0.1, 0.1], [0, 0, 0.2, 0.2]],
dtype=tf.float32)
gathered_tensor = ops.gather_with_padding_values(
input_tensor,
indices=indices,
padding_value=tf.zeros_like(input_tensor[0]))
self.assertEqual(gathered_tensor.dtype, tf.float32)
return gathered_tensor
gathered_tensor_np = self.execute(graph_fn, [])
self.assertAllClose(expected_gathered_tensor, gathered_tensor_np)
class TestGIoU(test_case.TestCase):
def test_giou_with_no_overlap(self):
expected_giou_tensor = [
0, -1/3, -3/4, 0, -98/100
]
def graph_fn():
boxes1 = tf.constant([[3, 4, 5, 6], [3, 3, 5, 5],
[0, 0, 0, 0], [3, 3, 5, 5],
[9, 9, 10, 10]],
dtype=tf.float32)
boxes2 = tf.constant([[3, 2, 5, 4], [3, 7, 5, 9],
[5, 5, 10, 10], [3, 5, 5, 7],
[0, 0, 1, 1]], dtype=tf.float32)
giou = ops.giou(boxes1, boxes2)
self.assertEqual(giou.dtype, tf.float32)
return giou
giou = self.execute(graph_fn, [])
self.assertAllClose(expected_giou_tensor, giou)
def test_giou_with_overlaps(self):
expected_giou_tensor = [
1/25, 1/4, 1/3, 1/7 - 2/9
]
def graph_fn():
boxes1 = tf.constant([[2, 1, 7, 6], [2, 2, 4, 4],
[2, 2, 4, 4], [2, 2, 4, 4]],
dtype=tf.float32)
boxes2 = tf.constant([[4, 3, 5, 4], [3, 3, 4, 4],
[2, 3, 4, 5], [3, 3, 5, 5]], dtype=tf.float32)
giou = ops.giou(boxes1, boxes2)
self.assertEqual(giou.dtype, tf.float32)
return giou
giou = self.execute(graph_fn, [])
self.assertAllClose(expected_giou_tensor, giou)
def test_giou_with_perfect_overlap(self):
expected_giou_tensor = [1]
def graph_fn():
boxes1 = tf.constant([[3, 3, 5, 5]], dtype=tf.float32)
boxes2 = tf.constant([[3, 3, 5, 5]], dtype=tf.float32)
giou = ops.giou(boxes1, boxes2)
self.assertEqual(giou.dtype, tf.float32)
return giou
giou = self.execute(graph_fn, [])
self.assertAllClose(expected_giou_tensor, giou)
def test_giou_with_zero_area_boxes(self):
expected_giou_tensor = [0]
def graph_fn():
boxes1 = tf.constant([[1, 1, 1, 1]], dtype=tf.float32)
boxes2 = tf.constant([[1, 1, 1, 1]], dtype=tf.float32)
giou = ops.giou(boxes1, boxes2)
self.assertEqual(giou.dtype, tf.float32)
return giou
giou = self.execute(graph_fn, [])
self.assertAllClose(expected_giou_tensor, giou)
def test_giou_different_with_l1_same(self):
expected_giou_tensor = [
2/3, 3/5
]
def graph_fn():
boxes1 = tf.constant([[3, 3, 5, 5], [3, 3, 5, 5]], dtype=tf.float32)
boxes2 = tf.constant([[3, 2.5, 5, 5.5], [3, 2.5, 5, 4.5]],
dtype=tf.float32)
giou = ops.giou(boxes1, boxes2)
self.assertEqual(giou.dtype, tf.float32)
return giou
giou = self.execute(graph_fn, [])
self.assertAllClose(expected_giou_tensor, giou)
class TestCoordinateConversion(test_case.TestCase):
def test_coord_conv(self):
expected_box_tensor = [
[0.5, 0.5, 5.5, 5.5], [2, 1, 4, 7], [0, 0, 0, 0]
]
def graph_fn():
boxes = tf.constant([[3, 3, 5, 5], [3, 4, 2, 6], [0, 0, 0, 0]],
dtype=tf.float32)
converted = ops.center_to_corner_coordinate(boxes)
self.assertEqual(converted.dtype, tf.float32)
return converted
converted = self.execute(graph_fn, [])
self.assertAllClose(expected_box_tensor, converted)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/ops_test.py | ops_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions which are convenient for unit testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import anchor_generator
from object_detection.core import box_coder
from object_detection.core import box_list
from object_detection.core import box_predictor
from object_detection.core import matcher
from object_detection.utils import shape_utils
from object_detection.utils import tf_version
# Default size (both width and height) used for testing mask predictions.
DEFAULT_MASK_SIZE = 5
class MockBoxCoder(box_coder.BoxCoder):
"""Simple `difference` BoxCoder."""
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
return boxes.get() - anchors.get()
def _decode(self, rel_codes, anchors):
return box_list.BoxList(rel_codes + anchors.get())
class MockMaskHead(object):
"""Simple maskhead that returns all zeros as mask predictions."""
def __init__(self, num_classes):
self._num_classes = num_classes
def predict(self, features):
batch_size = tf.shape(features)[0]
return tf.zeros((batch_size, 1, self._num_classes, DEFAULT_MASK_SIZE,
DEFAULT_MASK_SIZE),
dtype=tf.float32)
class MockBoxPredictor(box_predictor.BoxPredictor):
"""Simple box predictor that ignores inputs and outputs all zeros."""
def __init__(self, is_training, num_classes, add_background_class=True):
super(MockBoxPredictor, self).__init__(is_training, num_classes)
self._add_background_class = add_background_class
def _predict(self, image_features, num_predictions_per_location):
image_feature = image_features[0]
combined_feature_shape = shape_utils.combined_static_and_dynamic_shape(
image_feature)
batch_size = combined_feature_shape[0]
num_anchors = (combined_feature_shape[1] * combined_feature_shape[2])
code_size = 4
zero = tf.reduce_sum(0 * image_feature)
num_class_slots = self.num_classes
if self._add_background_class:
num_class_slots = num_class_slots + 1
box_encodings = zero + tf.zeros(
(batch_size, num_anchors, 1, code_size), dtype=tf.float32)
class_predictions_with_background = zero + tf.zeros(
(batch_size, num_anchors, num_class_slots), dtype=tf.float32)
predictions_dict = {
box_predictor.BOX_ENCODINGS:
box_encodings,
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND:
class_predictions_with_background
}
return predictions_dict
class MockKerasBoxPredictor(box_predictor.KerasBoxPredictor):
"""Simple box predictor that ignores inputs and outputs all zeros."""
def __init__(self, is_training, num_classes, add_background_class=True):
super(MockKerasBoxPredictor, self).__init__(
is_training, num_classes, False, False)
self._add_background_class = add_background_class
# Dummy variable so that box predictor registers some variables.
self._dummy_var = tf.Variable(0.0, trainable=True,
name='box_predictor_var')
def _predict(self, image_features, **kwargs):
image_feature = image_features[0]
combined_feature_shape = shape_utils.combined_static_and_dynamic_shape(
image_feature)
batch_size = combined_feature_shape[0]
num_anchors = (combined_feature_shape[1] * combined_feature_shape[2])
code_size = 4
zero = tf.reduce_sum(0 * image_feature)
num_class_slots = self.num_classes
if self._add_background_class:
num_class_slots = num_class_slots + 1
box_encodings = zero + tf.zeros(
(batch_size, num_anchors, 1, code_size), dtype=tf.float32)
class_predictions_with_background = zero + tf.zeros(
(batch_size, num_anchors, num_class_slots), dtype=tf.float32)
predictions_dict = {
box_predictor.BOX_ENCODINGS:
box_encodings,
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND:
class_predictions_with_background
}
return predictions_dict
class MockAnchorGenerator(anchor_generator.AnchorGenerator):
"""Mock anchor generator."""
def name_scope(self):
return 'MockAnchorGenerator'
def num_anchors_per_location(self):
return [1]
def _generate(self, feature_map_shape_list):
num_anchors = sum([shape[0] * shape[1] for shape in feature_map_shape_list])
return box_list.BoxList(tf.zeros((num_anchors, 4), dtype=tf.float32))
class MockMatcher(matcher.Matcher):
"""Simple matcher that matches first anchor to first groundtruth box."""
def _match(self, similarity_matrix, valid_rows):
return tf.constant([0, -1, -1, -1], dtype=tf.int32)
def create_diagonal_gradient_image(height, width, depth):
"""Creates pyramid image. Useful for testing.
For example, pyramid_image(5, 6, 1) looks like:
# [[[ 5. 4. 3. 2. 1. 0.]
# [ 6. 5. 4. 3. 2. 1.]
# [ 7. 6. 5. 4. 3. 2.]
# [ 8. 7. 6. 5. 4. 3.]
# [ 9. 8. 7. 6. 5. 4.]]]
Args:
height: height of image
width: width of image
depth: depth of image
Returns:
pyramid image
"""
row = np.arange(height)
col = np.arange(width)[::-1]
image_layer = np.expand_dims(row, 1) + col
image_layer = np.expand_dims(image_layer, 2)
image = image_layer
for i in range(1, depth):
image = np.concatenate((image, image_layer * pow(10, i)), 2)
return image.astype(np.float32)
def create_random_boxes(num_boxes, max_height, max_width):
"""Creates random bounding boxes of specific maximum height and width.
Args:
num_boxes: number of boxes.
max_height: maximum height of boxes.
max_width: maximum width of boxes.
Returns:
boxes: numpy array of shape [num_boxes, 4]. Each row is in form
[y_min, x_min, y_max, x_max].
"""
y_1 = np.random.uniform(size=(1, num_boxes)) * max_height
y_2 = np.random.uniform(size=(1, num_boxes)) * max_height
x_1 = np.random.uniform(size=(1, num_boxes)) * max_width
x_2 = np.random.uniform(size=(1, num_boxes)) * max_width
boxes = np.zeros(shape=(num_boxes, 4))
boxes[:, 0] = np.minimum(y_1, y_2)
boxes[:, 1] = np.minimum(x_1, x_2)
boxes[:, 2] = np.maximum(y_1, y_2)
boxes[:, 3] = np.maximum(x_1, x_2)
return boxes.astype(np.float32)
def first_rows_close_as_set(a, b, k=None, rtol=1e-6, atol=1e-6):
"""Checks if first K entries of two lists are close, up to permutation.
Inputs to this assert are lists of items which can be compared via
numpy.allclose(...) and can be sorted.
Args:
a: list of items which can be compared via numpy.allclose(...) and are
sortable.
b: list of items which can be compared via numpy.allclose(...) and are
sortable.
k: a non-negative integer. If not provided, k is set to be len(a).
rtol: relative tolerance.
atol: absolute tolerance.
Returns:
boolean, True if input lists a and b have the same length and
the first k entries of the inputs satisfy numpy.allclose() after
sorting entries.
"""
if not isinstance(a, list) or not isinstance(b, list) or len(a) != len(b):
return False
if not k:
k = len(a)
k = min(k, len(a))
a_sorted = sorted(a[:k])
b_sorted = sorted(b[:k])
return all([
np.allclose(entry_a, entry_b, rtol, atol)
for (entry_a, entry_b) in zip(a_sorted, b_sorted)
])
class GraphContextOrNone(object):
"""A new Graph context for TF1.X and None for TF2.X.
This is useful to write model tests that work with both TF1.X and TF2.X.
Example test using this pattern:
class ModelTest(test_case.TestCase):
def test_model(self):
with test_utils.GraphContextOrNone() as g:
model = Model()
def compute_fn():
out = model.predict()
return out['detection_boxes']
boxes = self.execute(compute_fn, [], graph=g)
self.assertAllClose(boxes, expected_boxes)
"""
def __init__(self):
if tf_version.is_tf2():
self.graph = None
else:
self.graph = tf.Graph().as_default()
def __enter__(self):
if tf_version.is_tf2():
return None
else:
return self.graph.__enter__()
def __exit__(self, ttype, value, traceback):
if tf_version.is_tf2():
return False
else:
return self.graph.__exit__(ttype, value, traceback)
def image_with_dynamic_shape(height, width, channels):
"""Returns a single image with dynamic shape."""
h = tf.random.uniform([], minval=height, maxval=height+1, dtype=tf.int32)
w = tf.random.uniform([], minval=width, maxval=width+1, dtype=tf.int32)
image = tf.random.uniform([h, w, channels])
return image
def keypoints_with_dynamic_shape(num_instances, num_keypoints, num_coordinates):
"""Returns keypoints with dynamic shape."""
n = tf.random.uniform([], minval=num_instances, maxval=num_instances+1,
dtype=tf.int32)
keypoints = tf.random.uniform([n, num_keypoints, num_coordinates])
return keypoints
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/test_utils.py | test_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for [N, height, width] numpy arrays representing masks.
Example mask operations that are supported:
* Areas: compute mask areas
* IOU: pairwise intersection-over-union scores
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
EPSILON = 1e-7
def area(masks):
"""Computes area of masks.
Args:
masks: Numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*1] representing mask areas.
Raises:
ValueError: If masks.dtype is not np.uint8
"""
if masks.dtype != np.uint8:
raise ValueError('Masks type should be np.uint8')
return np.sum(masks, axis=(1, 2), dtype=np.float32)
def intersection(masks1, masks2):
"""Compute pairwise intersection areas between masks.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding M masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*M] representing pairwise intersection area.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
n = masks1.shape[0]
m = masks2.shape[0]
answer = np.zeros([n, m], dtype=np.float32)
for i in np.arange(n):
for j in np.arange(m):
answer[i, j] = np.sum(np.minimum(masks1[i], masks2[j]), dtype=np.float32)
return answer
def iou(masks1, masks2):
"""Computes pairwise intersection-over-union between mask collections.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
intersect = intersection(masks1, masks2)
area1 = area(masks1)
area2 = area(masks2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / np.maximum(union, EPSILON)
def ioa(masks1, masks2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two masks, mask1 and mask2 is defined as
their intersection area over mask2's area. Note that ioa is not symmetric,
that is, IOA(mask1, mask2) != IOA(mask2, mask1).
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
intersect = intersection(masks1, masks2)
areas = np.expand_dims(area(masks2), axis=0)
return intersect / (areas + EPSILON)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/np_mask_ops.py | np_mask_ops.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for image patches."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def get_patch_mask(y, x, patch_size, image_shape):
"""Creates a 2D mask array for a square patch of a given size and location.
The mask is created with its center at the y and x coordinates, which must be
within the image. While the mask center must be within the image, the mask
itself can be partially outside of it. If patch_size is an even number, then
the mask is created with lower-valued coordinates first (top and left).
Args:
y: An integer or scalar int32 tensor. The vertical coordinate of the
patch mask center. Must be within the range [0, image_height).
x: An integer or scalar int32 tensor. The horizontal coordinate of the
patch mask center. Must be within the range [0, image_width).
patch_size: An integer or scalar int32 tensor. The square size of the
patch mask. Must be at least 1.
image_shape: A list or 1D int32 tensor representing the shape of the image
to which the mask will correspond, with the first two values being image
height and width. For example, [image_height, image_width] or
[image_height, image_width, image_channels].
Returns:
Boolean mask tensor of shape [image_height, image_width] with True values
for the patch.
Raises:
tf.errors.InvalidArgumentError: if x is not in the range [0, image_width), y
is not in the range [0, image_height), or patch_size is not at least 1.
"""
image_hw = image_shape[:2]
mask_center_yx = tf.stack([y, x])
with tf.control_dependencies([
tf.debugging.assert_greater_equal(
patch_size, 1,
message='Patch size must be >= 1'),
tf.debugging.assert_greater_equal(
mask_center_yx, 0,
message='Patch center (y, x) must be >= (0, 0)'),
tf.debugging.assert_less(
mask_center_yx, image_hw,
message='Patch center (y, x) must be < image (h, w)')
]):
mask_center_yx = tf.identity(mask_center_yx)
half_patch_size = tf.cast(patch_size, dtype=tf.float32) / 2
start_yx = mask_center_yx - tf.cast(tf.floor(half_patch_size), dtype=tf.int32)
end_yx = mask_center_yx + tf.cast(tf.ceil(half_patch_size), dtype=tf.int32)
start_yx = tf.maximum(start_yx, 0)
end_yx = tf.minimum(end_yx, image_hw)
start_y = start_yx[0]
start_x = start_yx[1]
end_y = end_yx[0]
end_x = end_yx[1]
lower_pad = image_hw[0] - end_y
upper_pad = start_y
left_pad = start_x
right_pad = image_hw[1] - end_x
mask = tf.ones([end_y - start_y, end_x - start_x], dtype=tf.bool)
return tf.pad(mask, [[upper_pad, lower_pad], [left_pad, right_pad]])
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/patch_ops.py | patch_ops.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.per_image_evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.utils import per_image_evaluation
class SingleClassTpFpWithDifficultBoxesTest(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 1
matching_iou_threshold = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
self.eval = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes, matching_iou_threshold, nms_iou_threshold,
nms_max_output_boxes)
self.detected_boxes = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
self.detected_scores = np.array([0.6, 0.8, 0.5], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_1 = np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_2 = np.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 0, 0]], dtype=np.uint8)
self.detected_masks = np.stack(
[detected_masks_0, detected_masks_1, detected_masks_2], axis=0)
self.groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 10, 10]],
dtype=float)
groundtruth_masks_0 = np.array([[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks_1 = np.array([[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]], dtype=np.uint8)
self.groundtruth_masks = np.stack(
[groundtruth_masks_0, groundtruth_masks_1], axis=0)
def test_match_to_gt_box_0(self):
groundtruth_groundtruth_is_difficult_list = np.array([False, True],
dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, False], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, True, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_match_to_gt_mask_0(self):
groundtruth_groundtruth_is_difficult_list = np.array([False, True],
dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, False], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=self.groundtruth_masks)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([True, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_match_to_gt_box_1(self):
groundtruth_groundtruth_is_difficult_list = np.array([True, False],
dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, False], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = np.array([0.8, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_match_to_gt_mask_1(self):
groundtruth_groundtruth_is_difficult_list = np.array([True, False],
dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, False], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=self.groundtruth_masks)
expected_scores = np.array([0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
class SingleClassTpFpWithGroupOfBoxesTest(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 1
matching_iou_threshold = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
self.eval = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes, matching_iou_threshold, nms_iou_threshold,
nms_max_output_boxes)
self.detected_boxes = np.array(
[[0, 0, 1, 1], [0, 0, 2, 1], [0, 0, 3, 1]], dtype=float)
self.detected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_1 = np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_2 = np.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 0, 0]], dtype=np.uint8)
self.detected_masks = np.stack(
[detected_masks_0, detected_masks_1, detected_masks_2], axis=0)
self.groundtruth_boxes = np.array(
[[0, 0, 1, 1], [0, 0, 5, 5], [10, 10, 20, 20]], dtype=float)
groundtruth_masks_0 = np.array([[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks_1 = np.array([[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0]], dtype=np.uint8)
groundtruth_masks_2 = np.array([[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]], dtype=np.uint8)
self.groundtruth_masks = np.stack(
[groundtruth_masks_0, groundtruth_masks_1, groundtruth_masks_2], axis=0)
def test_match_to_non_group_of_and_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, True, True], dtype=bool)
expected_scores = np.array([0.8], dtype=float)
expected_tp_fp_labels = np.array([True], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_match_to_non_group_of_and_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, True, True], dtype=bool)
expected_scores = np.array([0.6], dtype=float)
expected_tp_fp_labels = np.array([True], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=self.groundtruth_masks)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_match_two_to_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[True, False, True], dtype=bool)
expected_scores = np.array([0.5], dtype=float)
expected_tp_fp_labels = np.array([False], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_match_two_to_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[True, False, True], dtype=bool)
expected_scores = np.array([0.8], dtype=float)
expected_tp_fp_labels = np.array([True], dtype=bool)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=self.groundtruth_masks)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
class SingleClassTpFpWithGroupOfBoxesTestWeighted(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 1
matching_iou_threshold = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
self.group_of_weight = 0.5
self.eval = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes, matching_iou_threshold, nms_iou_threshold,
nms_max_output_boxes, self.group_of_weight)
self.detected_boxes = np.array(
[[0, 0, 1, 1], [0, 0, 2, 1], [0, 0, 3, 1]], dtype=float)
self.detected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
detected_masks_0 = np.array(
[[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]], dtype=np.uint8)
detected_masks_1 = np.array(
[[1, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0]], dtype=np.uint8)
detected_masks_2 = np.array(
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 0, 0]], dtype=np.uint8)
self.detected_masks = np.stack(
[detected_masks_0, detected_masks_1, detected_masks_2], axis=0)
self.groundtruth_boxes = np.array(
[[0, 0, 1, 1], [0, 0, 5, 5], [10, 10, 20, 20]], dtype=float)
groundtruth_masks_0 = np.array(
[[1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks_1 = np.array(
[[0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0]], dtype=np.uint8)
groundtruth_masks_2 = np.array(
[[0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0]], dtype=np.uint8)
self.groundtruth_masks = np.stack(
[groundtruth_masks_0, groundtruth_masks_1, groundtruth_masks_2], axis=0)
def test_match_to_non_group_of_and_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, True, True], dtype=bool)
expected_scores = np.array([0.8, 0.6], dtype=float)
expected_tp_fp_labels = np.array([1.0, self.group_of_weight], dtype=float)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_match_to_non_group_of_and_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, True, True], dtype=bool)
expected_scores = np.array([0.6, 0.8, 0.5], dtype=float)
expected_tp_fp_labels = np.array(
[1.0, self.group_of_weight, self.group_of_weight], dtype=float)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=self.groundtruth_masks)
tf.logging.info(
"test_mask_match_to_non_group_of_and_group_of_box {} {}".format(
tp_fp_labels, expected_tp_fp_labels))
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_match_two_to_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[True, False, True], dtype=bool)
expected_scores = np.array([0.5, 0.8], dtype=float)
expected_tp_fp_labels = np.array([0.0, self.group_of_weight], dtype=float)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
tf.logging.info("test_match_two_to_group_of_box {} {}".format(
tp_fp_labels, expected_tp_fp_labels))
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_match_two_to_group_of_box(self):
groundtruth_groundtruth_is_difficult_list = np.array(
[False, False, False], dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[True, False, True], dtype=bool)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array(
[1.0, self.group_of_weight, self.group_of_weight], dtype=float)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
self.groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=self.groundtruth_masks)
tf.logging.info("test_mask_match_two_to_group_of_box {} {}".format(
tp_fp_labels, expected_tp_fp_labels))
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
class SingleClassTpFpNoDifficultBoxesTest(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 1
matching_iou_threshold_high_iou = 0.5
matching_iou_threshold_low_iou = 0.1
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
self.eval_high_iou = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes, matching_iou_threshold_high_iou,
nms_iou_threshold, nms_max_output_boxes)
self.eval_low_iou = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes, matching_iou_threshold_low_iou,
nms_iou_threshold, nms_max_output_boxes)
self.detected_boxes = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
self.detected_scores = np.array([0.6, 0.8, 0.5], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_1 = np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
detected_masks_2 = np.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 0, 0]], dtype=np.uint8)
self.detected_masks = np.stack(
[detected_masks_0, detected_masks_1, detected_masks_2], axis=0)
def test_no_true_positives(self):
groundtruth_boxes = np.array([[100, 100, 105, 105]], dtype=float)
groundtruth_groundtruth_is_difficult_list = np.zeros(1, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False], dtype=bool)
scores, tp_fp_labels = self.eval_high_iou._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_no_true_positives(self):
groundtruth_boxes = np.array([[100, 100, 105, 105]], dtype=float)
groundtruth_masks_0 = np.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_masks_0], axis=0)
groundtruth_groundtruth_is_difficult_list = np.zeros(1, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False], dtype=bool)
scores, tp_fp_labels = self.eval_high_iou._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=groundtruth_masks)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_one_true_positives_with_large_iou_threshold(self):
groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_groundtruth_is_difficult_list = np.zeros(1, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False], dtype=bool)
scores, tp_fp_labels = self.eval_high_iou._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, True, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_one_true_positives_with_large_iou_threshold(self):
groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_masks_0 = np.array([[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]], dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_masks_0], axis=0)
groundtruth_groundtruth_is_difficult_list = np.zeros(1, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False], dtype=bool)
scores, tp_fp_labels = self.eval_high_iou._compute_tp_fp_for_single_class(
self.detected_boxes,
self.detected_scores,
groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=self.detected_masks,
groundtruth_masks=groundtruth_masks)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([True, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_one_true_positives_with_very_small_iou_threshold(self):
groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_groundtruth_is_difficult_list = np.zeros(1, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False], dtype=bool)
scores, tp_fp_labels = self.eval_low_iou._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([True, False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_two_true_positives_with_large_iou_threshold(self):
groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 3.5, 3.5]], dtype=float)
groundtruth_groundtruth_is_difficult_list = np.zeros(2, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, False], dtype=bool)
scores, tp_fp_labels = self.eval_high_iou._compute_tp_fp_for_single_class(
self.detected_boxes, self.detected_scores, groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, True, True], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
class SingleClassTpFpEmptyMaskAndBoxesTest(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 1
matching_iou_threshold_iou = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
self.eval = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes, matching_iou_threshold_iou, nms_iou_threshold,
nms_max_output_boxes)
def test_mask_tp_and_ignore(self):
# GT: one box with mask, one without
# Det: One mask matches gt1, one matches box gt2 and is ignored
groundtruth_boxes = np.array([[0, 0, 2, 3], [0, 0, 2, 2]], dtype=float)
groundtruth_mask_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_mask_1 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_mask_0, groundtruth_mask_1],
axis=0)
groundtruth_groundtruth_is_difficult_list = np.zeros(2, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False, False],
dtype=bool)
detected_boxes = np.array([[0, 0, 2, 3], [0, 0, 2, 2]], dtype=float)
detected_scores = np.array([0.6, 0.8], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks_1 = np.array([[1, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks = np.stack([detected_masks_0, detected_masks_1], axis=0)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
detected_boxes, detected_scores, groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list, detected_masks,
groundtruth_masks)
expected_scores = np.array([0.6], dtype=float)
expected_tp_fp_labels = np.array([True], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_mask_one_tp_one_fp(self):
# GT: one box with mask, one without
# Det: one mask matches gt1, one is fp (box does not match)
groundtruth_boxes = np.array([[0, 0, 2, 3], [2, 2, 4, 4]], dtype=float)
groundtruth_mask_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_mask_1 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_mask_0, groundtruth_mask_1],
axis=0)
groundtruth_groundtruth_is_difficult_list = np.zeros(2, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False, False],
dtype=bool)
detected_boxes = np.array([[0, 0, 2, 3], [0, 0, 2, 2]], dtype=float)
detected_scores = np.array([0.6, 0.8], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks_1 = np.array([[1, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks = np.stack([detected_masks_0, detected_masks_1], axis=0)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
detected_boxes,
detected_scores,
groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks)
expected_scores = np.array([0.8, 0.6], dtype=float)
expected_tp_fp_labels = np.array([False, True], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_two_mask_one_gt_one_ignore(self):
# GT: one box with mask, one without.
# Det: two mask matches same gt, one is tp, one is passed down to box match
# and ignored.
groundtruth_boxes = np.array([[0, 0, 2, 3], [0, 0, 2, 3]], dtype=float)
groundtruth_mask_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_mask_1 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_mask_0, groundtruth_mask_1],
axis=0)
groundtruth_groundtruth_is_difficult_list = np.zeros(2, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False, False],
dtype=bool)
detected_boxes = np.array([[0, 0, 2, 3], [0, 0, 2, 3]], dtype=float)
detected_scores = np.array([0.6, 0.8], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks_1 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks = np.stack([detected_masks_0, detected_masks_1], axis=0)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
detected_boxes,
detected_scores,
groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks)
expected_scores = np.array([0.8], dtype=float)
expected_tp_fp_labels = np.array([True], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_two_mask_one_gt_one_fp(self):
# GT: one box with mask, one without.
# Det: two mask matches same gt, one is tp, one is passed down to box match
# and is fp.
groundtruth_boxes = np.array([[0, 0, 2, 3], [2, 3, 4, 6]], dtype=float)
groundtruth_mask_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_mask_1 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
groundtruth_masks = np.stack([groundtruth_mask_0, groundtruth_mask_1],
axis=0)
groundtruth_groundtruth_is_difficult_list = np.zeros(2, dtype=bool)
groundtruth_groundtruth_is_group_of_list = np.array([False, False],
dtype=bool)
detected_boxes = np.array([[0, 0, 2, 3], [0, 0, 2, 3]], dtype=float)
detected_scores = np.array([0.6, 0.8], dtype=float)
detected_masks_0 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks_1 = np.array([[0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)
detected_masks = np.stack([detected_masks_0, detected_masks_1], axis=0)
scores, tp_fp_labels = self.eval._compute_tp_fp_for_single_class(
detected_boxes,
detected_scores,
groundtruth_boxes,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks)
expected_scores = np.array([0.8, 0.6], dtype=float)
expected_tp_fp_labels = np.array([True, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
class MultiClassesTpFpTest(tf.test.TestCase):
def test_tp_fp(self):
num_groundtruth_classes = 3
matching_iou_threshold = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
eval1 = per_image_evaluation.PerImageEvaluation(num_groundtruth_classes,
matching_iou_threshold,
nms_iou_threshold,
nms_max_output_boxes)
detected_boxes = np.array([[0, 0, 1, 1], [10, 10, 5, 5], [0, 0, 2, 2],
[5, 10, 10, 5], [10, 5, 5, 10], [0, 0, 3, 3]],
dtype=float)
detected_scores = np.array([0.8, 0.1, 0.8, 0.9, 0.7, 0.8], dtype=float)
detected_class_labels = np.array([0, 1, 1, 2, 0, 2], dtype=int)
groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 3.5, 3.5]], dtype=float)
groundtruth_class_labels = np.array([0, 2], dtype=int)
groundtruth_groundtruth_is_difficult_list = np.zeros(2, dtype=float)
groundtruth_groundtruth_is_group_of_list = np.array(
[False, False], dtype=bool)
scores, tp_fp_labels, _ = eval1.compute_object_detection_metrics(
detected_boxes, detected_scores, detected_class_labels,
groundtruth_boxes, groundtruth_class_labels,
groundtruth_groundtruth_is_difficult_list,
groundtruth_groundtruth_is_group_of_list)
expected_scores = [np.array([0.8], dtype=float)] * 3
expected_tp_fp_labels = [np.array([True]), np.array([False]), np.array([True
])]
for i in range(len(expected_scores)):
self.assertTrue(np.allclose(expected_scores[i], scores[i]))
self.assertTrue(np.array_equal(expected_tp_fp_labels[i], tp_fp_labels[i]))
class CorLocTest(tf.test.TestCase):
def test_compute_corloc_with_normal_iou_threshold(self):
num_groundtruth_classes = 3
matching_iou_threshold = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
eval1 = per_image_evaluation.PerImageEvaluation(num_groundtruth_classes,
matching_iou_threshold,
nms_iou_threshold,
nms_max_output_boxes)
detected_boxes = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3],
[0, 0, 5, 5]], dtype=float)
detected_scores = np.array([0.9, 0.9, 0.1, 0.9], dtype=float)
detected_class_labels = np.array([0, 1, 0, 2], dtype=int)
groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 3, 3], [0, 0, 6, 6]],
dtype=float)
groundtruth_class_labels = np.array([0, 0, 2], dtype=int)
is_class_correctly_detected_in_image = eval1._compute_cor_loc(
detected_boxes, detected_scores, detected_class_labels,
groundtruth_boxes, groundtruth_class_labels)
expected_result = np.array([1, 0, 1], dtype=int)
self.assertTrue(np.array_equal(expected_result,
is_class_correctly_detected_in_image))
def test_compute_corloc_with_very_large_iou_threshold(self):
num_groundtruth_classes = 3
matching_iou_threshold = 0.9
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
eval1 = per_image_evaluation.PerImageEvaluation(num_groundtruth_classes,
matching_iou_threshold,
nms_iou_threshold,
nms_max_output_boxes)
detected_boxes = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3],
[0, 0, 5, 5]], dtype=float)
detected_scores = np.array([0.9, 0.9, 0.1, 0.9], dtype=float)
detected_class_labels = np.array([0, 1, 0, 2], dtype=int)
groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 3, 3], [0, 0, 6, 6]],
dtype=float)
groundtruth_class_labels = np.array([0, 0, 2], dtype=int)
is_class_correctly_detected_in_image = eval1._compute_cor_loc(
detected_boxes, detected_scores, detected_class_labels,
groundtruth_boxes, groundtruth_class_labels)
expected_result = np.array([1, 0, 0], dtype=int)
self.assertTrue(np.array_equal(expected_result,
is_class_correctly_detected_in_image))
if __name__ == "__main__":
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/per_image_evaluation_test.py | per_image_evaluation_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.label_map_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.protos import string_int_label_map_pb2
from object_detection.utils import label_map_util
class LabelMapUtilTest(tf.test.TestCase):
def _generate_label_map(self, num_classes):
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
for i in range(1, num_classes + 1):
item = label_map_proto.item.add()
item.id = i
item.name = 'label_' + str(i)
item.display_name = str(i)
return label_map_proto
def _generate_label_map_with_hierarchy(self, num_classes, ancestors_dict,
descendants_dict):
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
for i in range(1, num_classes + 1):
item = label_map_proto.item.add()
item.id = i
item.name = 'label_' + str(i)
item.display_name = str(i)
if i in ancestors_dict:
for anc_i in ancestors_dict[i]:
item.ancestor_ids.append(anc_i)
if i in descendants_dict:
for desc_i in descendants_dict[i]:
item.descendant_ids.append(desc_i)
return label_map_proto
def test_get_label_map_dict(self):
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['cat'], 2)
def test_get_label_map_dict_from_proto(self):
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_proto = text_format.Parse(
label_map_string, string_int_label_map_pb2.StringIntLabelMap())
label_map_dict = label_map_util.get_label_map_dict(label_map_proto)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['cat'], 2)
def test_get_label_map_dict_display(self):
label_map_string = """
item {
id:2
display_name:'cat'
}
item {
id:1
display_name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_label_map_dict(
label_map_path, use_display_name=True)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['cat'], 2)
def test_load_bad_label_map(self):
label_map_string = """
item {
id:0
name:'class that should not be indexed at zero'
}
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
with self.assertRaises(ValueError):
label_map_util.load_labelmap(label_map_path)
def test_load_label_map_with_background(self):
label_map_string = """
item {
id:0
name:'background'
}
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
self.assertEqual(label_map_dict['background'], 0)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['cat'], 2)
def test_get_label_map_dict_with_fill_in_gaps_and_background(self):
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
label_map_dict = label_map_util.get_label_map_dict(
label_map_path, fill_in_gaps_and_background=True)
self.assertEqual(label_map_dict['background'], 0)
self.assertEqual(label_map_dict['dog'], 1)
self.assertEqual(label_map_dict['2'], 2)
self.assertEqual(label_map_dict['cat'], 3)
self.assertEqual(len(label_map_dict), max(label_map_dict.values()) + 1)
def test_keep_categories_with_unique_id(self):
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'child'
}
item {
id:1
name:'person'
}
item {
id:1
name:'n00007846'
}
"""
text_format.Parse(label_map_string, label_map_proto)
categories = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=3)
self.assertListEqual([{
'id': 2,
'name': u'cat'
}, {
'id': 1,
'name': u'child'
}], categories)
def test_convert_label_map_to_categories_no_label_map(self):
categories = label_map_util.convert_label_map_to_categories(
None, max_num_classes=3)
expected_categories_list = [{
'name': u'category_1',
'id': 1
}, {
'name': u'category_2',
'id': 2
}, {
'name': u'category_3',
'id': 3
}]
self.assertListEqual(expected_categories_list, categories)
def test_convert_label_map_to_categories_lvis_frequency_and_counts(self):
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
label_map_string = """
item {
id:1
name:'person'
frequency: FREQUENT
instance_count: 1000
}
item {
id:2
name:'dog'
frequency: COMMON
instance_count: 100
}
item {
id:3
name:'cat'
frequency: RARE
instance_count: 10
}
"""
text_format.Parse(label_map_string, label_map_proto)
categories = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=3)
self.assertListEqual([{
'id': 1,
'name': u'person',
'frequency': 'f',
'instance_count': 1000
}, {
'id': 2,
'name': u'dog',
'frequency': 'c',
'instance_count': 100
}, {
'id': 3,
'name': u'cat',
'frequency': 'r',
'instance_count': 10
}], categories)
def test_convert_label_map_to_categories(self):
label_map_proto = self._generate_label_map(num_classes=4)
categories = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=3)
expected_categories_list = [{
'name': u'1',
'id': 1,
}, {
'name': u'2',
'id': 2,
}, {
'name': u'3',
'id': 3,
}]
self.assertListEqual(expected_categories_list, categories)
def test_convert_label_map_with_keypoints_to_categories(self):
label_map_str = """
item {
id: 1
name: 'person'
keypoints: {
id: 1
label: 'nose'
}
keypoints: {
id: 2
label: 'ear'
}
}
"""
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
text_format.Parse(label_map_str, label_map_proto)
categories = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=1)
self.assertEqual('person', categories[0]['name'])
self.assertEqual(1, categories[0]['id'])
self.assertEqual(1, categories[0]['keypoints']['nose'])
self.assertEqual(2, categories[0]['keypoints']['ear'])
def test_disallow_duplicate_keypoint_ids(self):
label_map_str = """
item {
id: 1
name: 'person'
keypoints: {
id: 1
label: 'right_elbow'
}
keypoints: {
id: 1
label: 'left_elbow'
}
}
item {
id: 2
name: 'face'
keypoints: {
id: 3
label: 'ear'
}
}
"""
label_map_proto = string_int_label_map_pb2.StringIntLabelMap()
text_format.Parse(label_map_str, label_map_proto)
with self.assertRaises(ValueError):
label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=2)
def test_convert_label_map_to_categories_with_few_classes(self):
label_map_proto = self._generate_label_map(num_classes=4)
cat_no_offset = label_map_util.convert_label_map_to_categories(
label_map_proto, max_num_classes=2)
expected_categories_list = [{
'name': u'1',
'id': 1
}, {
'name': u'2',
'id': 2
}]
self.assertListEqual(expected_categories_list, cat_no_offset)
def test_get_max_label_map_index(self):
num_classes = 4
label_map_proto = self._generate_label_map(num_classes=num_classes)
max_index = label_map_util.get_max_label_map_index(label_map_proto)
self.assertEqual(num_classes, max_index)
def test_create_category_index(self):
categories = [{'name': u'1', 'id': 1}, {'name': u'2', 'id': 2}]
category_index = label_map_util.create_category_index(categories)
self.assertDictEqual({
1: {
'name': u'1',
'id': 1
},
2: {
'name': u'2',
'id': 2
}
}, category_index)
def test_create_categories_from_labelmap(self):
label_map_string = """
item {
id:1
name:'dog'
}
item {
id:2
name:'cat'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
categories = label_map_util.create_categories_from_labelmap(label_map_path)
self.assertListEqual([{
'name': u'dog',
'id': 1
}, {
'name': u'cat',
'id': 2
}], categories)
def test_create_category_index_from_labelmap(self):
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
category_index = label_map_util.create_category_index_from_labelmap(
label_map_path)
self.assertDictEqual({
1: {
'name': u'dog',
'id': 1
},
2: {
'name': u'cat',
'id': 2
}
}, category_index)
def test_create_category_index_from_labelmap_display(self):
label_map_string = """
item {
id:2
name:'cat'
display_name:'meow'
}
item {
id:1
name:'dog'
display_name:'woof'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
self.assertDictEqual({
1: {
'name': u'dog',
'id': 1
},
2: {
'name': u'cat',
'id': 2
}
}, label_map_util.create_category_index_from_labelmap(
label_map_path, False))
self.assertDictEqual({
1: {
'name': u'woof',
'id': 1
},
2: {
'name': u'meow',
'id': 2
}
}, label_map_util.create_category_index_from_labelmap(label_map_path))
def test_get_label_map_hierarchy_lut(self):
num_classes = 5
ancestors = {2: [1, 3], 5: [1]}
descendants = {1: [2], 5: [1, 2]}
label_map = self._generate_label_map_with_hierarchy(num_classes, ancestors,
descendants)
gt_hierarchy_dict_lut = {
'ancestors':
np.array([
[1, 0, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[1, 0, 0, 0, 1],
]),
'descendants':
np.array([
[1, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[1, 1, 0, 0, 1],
]),
}
ancestors_lut, descendants_lut = (
label_map_util.get_label_map_hierarchy_lut(label_map, True))
np.testing.assert_array_equal(gt_hierarchy_dict_lut['ancestors'],
ancestors_lut)
np.testing.assert_array_equal(gt_hierarchy_dict_lut['descendants'],
descendants_lut)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/label_map_util_test.py | label_map_util_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utility functions for manipulating Keras models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import tensorflow.compat.v1 as tf
from object_detection.utils import model_util
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ExtractSubmodelUtilTest(tf.test.TestCase):
def test_simple_model(self):
inputs = tf.keras.Input(shape=(256,)) # Returns a placeholder tensor
# A layer instance is callable on a tensor, and returns a tensor.
x = tf.keras.layers.Dense(128, activation='relu', name='a')(inputs)
x = tf.keras.layers.Dense(64, activation='relu', name='b')(x)
x = tf.keras.layers.Dense(32, activation='relu', name='c')(x)
x = tf.keras.layers.Dense(16, activation='relu', name='d')(x)
x = tf.keras.layers.Dense(8, activation='relu', name='e')(x)
predictions = tf.keras.layers.Dense(10, activation='softmax')(x)
model = tf.keras.Model(inputs=inputs, outputs=predictions)
new_in = model.get_layer(
name='b').input
new_out = model.get_layer(
name='d').output
new_model = model_util.extract_submodel(
model=model,
inputs=new_in,
outputs=new_out)
batch_size = 3
ones = tf.ones((batch_size, 128))
final_out = new_model(ones)
self.assertAllEqual(final_out.shape, (batch_size, 16))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/model_util_tf2_test.py | model_util_tf2_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.learning_schedules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.utils import learning_schedules
from object_detection.utils import test_case
class LearningSchedulesTest(test_case.TestCase):
def testExponentialDecayWithBurnin(self):
def graph_fn(global_step):
learning_rate_base = 1.0
learning_rate_decay_steps = 3
learning_rate_decay_factor = .1
burnin_learning_rate = .5
burnin_steps = 2
min_learning_rate = .05
learning_rate = learning_schedules.exponential_decay_with_burnin(
global_step, learning_rate_base, learning_rate_decay_steps,
learning_rate_decay_factor, burnin_learning_rate, burnin_steps,
min_learning_rate)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(9)
]
exp_rates = [.5, .5, 1, 1, 1, .1, .1, .1, .05]
self.assertAllClose(output_rates, exp_rates, rtol=1e-4)
def testExponentialDecayWithWarmup(self):
def graph_fn(global_step):
learning_rate_base = 1.0
learning_rate_decay_steps = 3
learning_rate_decay_factor = .1
warmup_learning_rate = .5
warmup_steps = 2
min_learning_rate = .05
learning_rate = learning_schedules.exponential_decay_with_warmup(
global_step, learning_rate_base, learning_rate_decay_steps,
learning_rate_decay_factor, warmup_learning_rate, warmup_steps,
min_learning_rate)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(9)
]
exp_rates = [.5, .75, 1, 1, 1, .1, .1, .1, .05]
self.assertAllClose(output_rates, exp_rates, rtol=1e-4)
def testCosineDecayWithWarmup(self):
def graph_fn(global_step):
learning_rate_base = 1.0
total_steps = 100
warmup_learning_rate = 0.1
warmup_steps = 9
learning_rate = learning_schedules.cosine_decay_with_warmup(
global_step, learning_rate_base, total_steps,
warmup_learning_rate, warmup_steps)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
exp_rates = [0.1, 0.5, 0.9, 1.0, 0]
input_global_steps = [0, 4, 8, 9, 100]
output_rates = [
self.execute(graph_fn, [np.array(step).astype(np.int64)])
for step in input_global_steps
]
self.assertAllClose(output_rates, exp_rates)
def testCosineDecayAfterTotalSteps(self):
def graph_fn(global_step):
learning_rate_base = 1.0
total_steps = 100
warmup_learning_rate = 0.1
warmup_steps = 9
learning_rate = learning_schedules.cosine_decay_with_warmup(
global_step, learning_rate_base, total_steps,
warmup_learning_rate, warmup_steps)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
exp_rates = [0]
input_global_steps = [101]
output_rates = [
self.execute(graph_fn, [np.array(step).astype(np.int64)])
for step in input_global_steps
]
self.assertAllClose(output_rates, exp_rates)
def testCosineDecayWithHoldBaseLearningRateSteps(self):
def graph_fn(global_step):
learning_rate_base = 1.0
total_steps = 120
warmup_learning_rate = 0.1
warmup_steps = 9
hold_base_rate_steps = 20
learning_rate = learning_schedules.cosine_decay_with_warmup(
global_step, learning_rate_base, total_steps,
warmup_learning_rate, warmup_steps, hold_base_rate_steps)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
exp_rates = [0.1, 0.5, 0.9, 1.0, 1.0, 1.0, 0.999702, 0.874255, 0.577365,
0.0]
input_global_steps = [0, 4, 8, 9, 10, 29, 30, 50, 70, 120]
output_rates = [
self.execute(graph_fn, [np.array(step).astype(np.int64)])
for step in input_global_steps
]
self.assertAllClose(output_rates, exp_rates)
def testManualStepping(self):
def graph_fn(global_step):
boundaries = [2, 3, 7]
rates = [1.0, 2.0, 3.0, 4.0]
learning_rate = learning_schedules.manual_stepping(
global_step, boundaries, rates)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)])
for i in range(10)
]
exp_rates = [1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0]
self.assertAllClose(output_rates, exp_rates)
def testManualSteppingWithWarmup(self):
def graph_fn(global_step):
boundaries = [4, 6, 8]
rates = [0.02, 0.10, 0.01, 0.001]
learning_rate = learning_schedules.manual_stepping(
global_step, boundaries, rates, warmup=True)
assert learning_rate.op.name.endswith('learning_rate')
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)])
for i in range(9)
]
exp_rates = [0.02, 0.04, 0.06, 0.08, 0.10, 0.10, 0.01, 0.01, 0.001]
self.assertAllClose(output_rates, exp_rates)
def testManualSteppingWithZeroBoundaries(self):
def graph_fn(global_step):
boundaries = []
rates = [0.01]
learning_rate = learning_schedules.manual_stepping(
global_step, boundaries, rates)
return (learning_rate,)
output_rates = [
self.execute(graph_fn, [np.array(i).astype(np.int64)])
for i in range(4)
]
exp_rates = [0.01] * 4
self.assertAllClose(output_rates, exp_rates)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/learning_schedules_test.py | learning_schedules_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_list_test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_box_list
class BoxListTest(tf.test.TestCase):
def test_invalid_box_data(self):
with self.assertRaises(ValueError):
np_box_list.BoxList([0, 0, 1, 1])
with self.assertRaises(ValueError):
np_box_list.BoxList(np.array([[0, 0, 1, 1]], dtype=int))
with self.assertRaises(ValueError):
np_box_list.BoxList(np.array([0, 1, 1, 3, 4], dtype=float))
with self.assertRaises(ValueError):
np_box_list.BoxList(np.array([[0, 1, 1, 3], [3, 1, 1, 5]], dtype=float))
def test_has_field_with_existed_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
self.assertTrue(boxlist.has_field('boxes'))
def test_has_field_with_nonexisted_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
self.assertFalse(boxlist.has_field('scores'))
def test_get_field_with_existed_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
self.assertTrue(np.allclose(boxlist.get_field('boxes'), boxes))
def test_get_field_with_nonexited_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
with self.assertRaises(ValueError):
boxlist.get_field('scores')
class AddExtraFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist = np_box_list.BoxList(boxes)
def test_add_already_existed_field(self):
with self.assertRaises(ValueError):
self.boxlist.add_field('boxes', np.array([[0, 0, 0, 1, 0]], dtype=float))
def test_add_invalid_field_data(self):
with self.assertRaises(ValueError):
self.boxlist.add_field('scores', np.array([0.5, 0.7], dtype=float))
with self.assertRaises(ValueError):
self.boxlist.add_field('scores',
np.array([0.5, 0.7, 0.9, 0.1], dtype=float))
def test_add_single_dimensional_field_data(self):
boxlist = self.boxlist
scores = np.array([0.5, 0.7, 0.9], dtype=float)
boxlist.add_field('scores', scores)
self.assertTrue(np.allclose(scores, self.boxlist.get_field('scores')))
def test_add_multi_dimensional_field_data(self):
boxlist = self.boxlist
labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]],
dtype=int)
boxlist.add_field('labels', labels)
self.assertTrue(np.allclose(labels, self.boxlist.get_field('labels')))
def test_get_extra_fields(self):
boxlist = self.boxlist
self.assertItemsEqual(boxlist.get_extra_fields(), [])
scores = np.array([0.5, 0.7, 0.9], dtype=float)
boxlist.add_field('scores', scores)
self.assertItemsEqual(boxlist.get_extra_fields(), ['scores'])
labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]],
dtype=int)
boxlist.add_field('labels', labels)
self.assertItemsEqual(boxlist.get_extra_fields(), ['scores', 'labels'])
def test_get_coordinates(self):
y_min, x_min, y_max, x_max = self.boxlist.get_coordinates()
expected_y_min = np.array([3.0, 14.0, 0.0], dtype=float)
expected_x_min = np.array([4.0, 14.0, 0.0], dtype=float)
expected_y_max = np.array([6.0, 15.0, 20.0], dtype=float)
expected_x_max = np.array([8.0, 15.0, 20.0], dtype=float)
self.assertTrue(np.allclose(y_min, expected_y_min))
self.assertTrue(np.allclose(x_min, expected_x_min))
self.assertTrue(np.allclose(y_max, expected_y_max))
self.assertTrue(np.allclose(x_max, expected_x_max))
def test_num_boxes(self):
boxes = np.array([[0., 0., 100., 100.], [10., 30., 50., 70.]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
expected_num_boxes = 2
self.assertEquals(boxlist.num_boxes(), expected_num_boxes)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/np_box_list_test.py | np_box_list_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for manipulating collections of variables during training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import re
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.python.ops import variables as tf_variables
# Maps checkpoint types to variable name prefixes that are no longer
# supported
DETECTION_FEATURE_EXTRACTOR_MSG = """\
The checkpoint type 'detection' is not supported when it contains variable
names with 'feature_extractor'. Please download the new checkpoint file
from model zoo.
"""
DEPRECATED_CHECKPOINT_MAP = {
'detection': ('feature_extractor', DETECTION_FEATURE_EXTRACTOR_MSG)
}
# TODO(derekjchow): Consider replacing with tf.contrib.filter_variables in
# tensorflow/contrib/framework/python/ops/variables.py
def filter_variables(variables, filter_regex_list, invert=False):
"""Filters out the variables matching the filter_regex.
Filter out the variables whose name matches the any of the regular
expressions in filter_regex_list and returns the remaining variables.
Optionally, if invert=True, the complement set is returned.
Args:
variables: a list of tensorflow variables.
filter_regex_list: a list of string regular expressions.
invert: (boolean). If True, returns the complement of the filter set; that
is, all variables matching filter_regex are kept and all others discarded.
Returns:
a list of filtered variables.
"""
kept_vars = []
variables_to_ignore_patterns = list([fre for fre in filter_regex_list if fre])
for var in variables:
add = True
for pattern in variables_to_ignore_patterns:
if re.match(pattern, var.op.name):
add = False
break
if add != invert:
kept_vars.append(var)
return kept_vars
def multiply_gradients_matching_regex(grads_and_vars, regex_list, multiplier):
"""Multiply gradients whose variable names match a regular expression.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
regex_list: A list of string regular expressions.
multiplier: A (float) multiplier to apply to each gradient matching the
regular expression.
Returns:
grads_and_vars: A list of gradient to variable pairs (tuples).
"""
variables = [pair[1] for pair in grads_and_vars]
matching_vars = filter_variables(variables, regex_list, invert=True)
for var in matching_vars:
logging.info('Applying multiplier %f to variable [%s]',
multiplier, var.op.name)
grad_multipliers = {var: float(multiplier) for var in matching_vars}
return slim.learning.multiply_gradients(grads_and_vars,
grad_multipliers)
def freeze_gradients_matching_regex(grads_and_vars, regex_list):
"""Freeze gradients whose variable names match a regular expression.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
regex_list: A list of string regular expressions.
Returns:
grads_and_vars: A list of gradient to variable pairs (tuples) that do not
contain the variables and gradients matching the regex.
"""
variables = [pair[1] for pair in grads_and_vars]
matching_vars = filter_variables(variables, regex_list, invert=True)
kept_grads_and_vars = [pair for pair in grads_and_vars
if pair[1] not in matching_vars]
for var in matching_vars:
logging.info('Freezing variable [%s]', var.op.name)
return kept_grads_and_vars
def get_variables_available_in_checkpoint(variables,
checkpoint_path,
include_global_step=True):
"""Returns the subset of variables available in the checkpoint.
Inspects given checkpoint and returns the subset of variables that are
available in it.
TODO(rathodv): force input and output to be a dictionary.
Args:
variables: a list or dictionary of variables to find in checkpoint.
checkpoint_path: path to the checkpoint to restore variables from.
include_global_step: whether to include `global_step` variable, if it
exists. Default True.
Returns:
A list or dictionary of variables.
Raises:
ValueError: if `variables` is not a list or dict.
"""
if isinstance(variables, list):
variable_names_map = {}
for variable in variables:
if isinstance(variable, tf_variables.PartitionedVariable):
name = variable.name
else:
name = variable.op.name
variable_names_map[name] = variable
elif isinstance(variables, dict):
variable_names_map = variables
else:
raise ValueError('`variables` is expected to be a list or dict.')
ckpt_reader = tf.train.NewCheckpointReader(checkpoint_path)
ckpt_vars_to_shape_map = ckpt_reader.get_variable_to_shape_map()
if not include_global_step:
ckpt_vars_to_shape_map.pop(tf.GraphKeys.GLOBAL_STEP, None)
vars_in_ckpt = {}
for variable_name, variable in sorted(variable_names_map.items()):
if variable_name in ckpt_vars_to_shape_map:
if ckpt_vars_to_shape_map[variable_name] == variable.shape.as_list():
vars_in_ckpt[variable_name] = variable
else:
logging.warning('Variable [%s] is available in checkpoint, but has an '
'incompatible shape with model variable. Checkpoint '
'shape: [%s], model variable shape: [%s]. This '
'variable will not be initialized from the checkpoint.',
variable_name, ckpt_vars_to_shape_map[variable_name],
variable.shape.as_list())
else:
logging.warning('Variable [%s] is not available in checkpoint',
variable_name)
if isinstance(variables, list):
return list(vars_in_ckpt.values())
return vars_in_ckpt
def get_global_variables_safely():
"""If not executing eagerly, returns tf.global_variables().
Raises a ValueError if eager execution is enabled,
because the variables are not tracked when executing eagerly.
If executing eagerly, use a Keras model's .variables property instead.
Returns:
The result of tf.global_variables()
"""
with tf.init_scope():
if tf.executing_eagerly():
raise ValueError("Global variables collection is not tracked when "
"executing eagerly. Use a Keras model's `.variables` "
"attribute instead.")
return tf.global_variables()
def ensure_checkpoint_supported(checkpoint_path, checkpoint_type, model_dir):
"""Ensures that the given checkpoint can be properly loaded.
Performs the following checks
1. Raises an error if checkpoint_path and model_dir are same.
2. Checks that checkpoint_path does not contain a deprecated checkpoint file
by inspecting its variables.
Args:
checkpoint_path: str, path to checkpoint.
checkpoint_type: str, denotes the type of checkpoint.
model_dir: The model directory to store intermediate training checkpoints.
Raises:
RuntimeError: If
1. We detect an deprecated checkpoint file.
2. model_dir and checkpoint_path are in the same directory.
"""
variables = tf.train.list_variables(checkpoint_path)
if checkpoint_type in DEPRECATED_CHECKPOINT_MAP:
blocked_prefix, msg = DEPRECATED_CHECKPOINT_MAP[checkpoint_type]
for var_name, _ in variables:
if var_name.startswith(blocked_prefix):
tf.logging.error('Found variable name - %s with prefix %s', var_name,
blocked_prefix)
raise RuntimeError(msg)
checkpoint_path_dir = os.path.abspath(os.path.dirname(checkpoint_path))
model_dir = os.path.abspath(model_dir)
if model_dir == checkpoint_path_dir:
raise RuntimeError(
('Checkpoint dir ({}) and model_dir ({}) cannot be same.'.format(
checkpoint_path_dir, model_dir) +
(' Please set model_dir to a different path.')))
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/utils/variables_helper.py | variables_helper.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.data_decoders.tf_example_parser."""
import numpy as np
import numpy.testing as np_testing
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.metrics import tf_example_parser
class TfExampleDecoderTest(tf.test.TestCase):
def _Int64Feature(self, value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _FloatFeature(self, value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _BytesFeature(self, value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def testParseDetectionsAndGT(self):
source_id = b'abc.jpg'
# y_min, x_min, y_max, x_max
object_bb = np.array([[0.0, 0.5, 0.3], [0.0, 0.1, 0.6], [1.0, 0.6, 0.8],
[1.0, 0.6, 0.7]]).transpose()
detection_bb = np.array([[0.1, 0.2], [0.0, 0.8], [1.0, 0.6],
[1.0, 0.85]]).transpose()
object_class_label = [1, 1, 2]
object_difficult = [1, 0, 0]
object_group_of = [0, 0, 1]
verified_labels = [1, 2, 3, 4]
detection_class_label = [2, 1]
detection_score = [0.5, 0.3]
features = {
fields.TfExampleFields.source_id:
self._BytesFeature(source_id),
fields.TfExampleFields.object_bbox_ymin:
self._FloatFeature(object_bb[:, 0].tolist()),
fields.TfExampleFields.object_bbox_xmin:
self._FloatFeature(object_bb[:, 1].tolist()),
fields.TfExampleFields.object_bbox_ymax:
self._FloatFeature(object_bb[:, 2].tolist()),
fields.TfExampleFields.object_bbox_xmax:
self._FloatFeature(object_bb[:, 3].tolist()),
fields.TfExampleFields.detection_bbox_ymin:
self._FloatFeature(detection_bb[:, 0].tolist()),
fields.TfExampleFields.detection_bbox_xmin:
self._FloatFeature(detection_bb[:, 1].tolist()),
fields.TfExampleFields.detection_bbox_ymax:
self._FloatFeature(detection_bb[:, 2].tolist()),
fields.TfExampleFields.detection_bbox_xmax:
self._FloatFeature(detection_bb[:, 3].tolist()),
fields.TfExampleFields.detection_class_label:
self._Int64Feature(detection_class_label),
fields.TfExampleFields.detection_score:
self._FloatFeature(detection_score),
}
example = tf.train.Example(features=tf.train.Features(feature=features))
parser = tf_example_parser.TfExampleDetectionAndGTParser()
results_dict = parser.parse(example)
self.assertIsNone(results_dict)
features[fields.TfExampleFields.object_class_label] = (
self._Int64Feature(object_class_label))
features[fields.TfExampleFields.object_difficult] = (
self._Int64Feature(object_difficult))
example = tf.train.Example(features=tf.train.Features(feature=features))
results_dict = parser.parse(example)
self.assertIsNotNone(results_dict)
self.assertEqual(source_id, results_dict[fields.DetectionResultFields.key])
np_testing.assert_almost_equal(
object_bb, results_dict[fields.InputDataFields.groundtruth_boxes])
np_testing.assert_almost_equal(
detection_bb,
results_dict[fields.DetectionResultFields.detection_boxes])
np_testing.assert_almost_equal(
detection_score,
results_dict[fields.DetectionResultFields.detection_scores])
np_testing.assert_almost_equal(
detection_class_label,
results_dict[fields.DetectionResultFields.detection_classes])
np_testing.assert_almost_equal(
object_difficult,
results_dict[fields.InputDataFields.groundtruth_difficult])
np_testing.assert_almost_equal(
object_class_label,
results_dict[fields.InputDataFields.groundtruth_classes])
parser = tf_example_parser.TfExampleDetectionAndGTParser()
features[fields.TfExampleFields.object_group_of] = (
self._Int64Feature(object_group_of))
example = tf.train.Example(features=tf.train.Features(feature=features))
results_dict = parser.parse(example)
self.assertIsNotNone(results_dict)
np_testing.assert_equal(
object_group_of,
results_dict[fields.InputDataFields.groundtruth_group_of])
features[fields.TfExampleFields.image_class_label] = (
self._Int64Feature(verified_labels))
example = tf.train.Example(features=tf.train.Features(feature=features))
results_dict = parser.parse(example)
self.assertIsNotNone(results_dict)
np_testing.assert_equal(
verified_labels,
results_dict[fields.InputDataFields.groundtruth_image_classes])
def testParseString(self):
string_val = b'abc'
features = {'string': self._BytesFeature(string_val)}
example = tf.train.Example(features=tf.train.Features(feature=features))
parser = tf_example_parser.StringParser('string')
result = parser.parse(example)
self.assertIsNotNone(result)
self.assertEqual(result, string_val)
parser = tf_example_parser.StringParser('another_string')
result = parser.parse(example)
self.assertIsNone(result)
def testParseFloat(self):
float_array_val = [1.5, 1.4, 2.0]
features = {'floats': self._FloatFeature(float_array_val)}
example = tf.train.Example(features=tf.train.Features(feature=features))
parser = tf_example_parser.FloatParser('floats')
result = parser.parse(example)
self.assertIsNotNone(result)
np_testing.assert_almost_equal(result, float_array_val)
parser = tf_example_parser.StringParser('another_floats')
result = parser.parse(example)
self.assertIsNone(result)
def testInt64Parser(self):
int_val = [1, 2, 3]
features = {'ints': self._Int64Feature(int_val)}
example = tf.train.Example(features=tf.train.Features(feature=features))
parser = tf_example_parser.Int64Parser('ints')
result = parser.parse(example)
self.assertIsNotNone(result)
np_testing.assert_almost_equal(result, int_val)
parser = tf_example_parser.Int64Parser('another_ints')
result = parser.parse(example)
self.assertIsNone(result)
def testBoundingBoxParser(self):
bounding_boxes = np.array([[0.0, 0.5, 0.3], [0.0, 0.1, 0.6],
[1.0, 0.6, 0.8], [1.0, 0.6, 0.7]]).transpose()
features = {
'ymin': self._FloatFeature(bounding_boxes[:, 0]),
'xmin': self._FloatFeature(bounding_boxes[:, 1]),
'ymax': self._FloatFeature(bounding_boxes[:, 2]),
'xmax': self._FloatFeature(bounding_boxes[:, 3])
}
example = tf.train.Example(features=tf.train.Features(feature=features))
parser = tf_example_parser.BoundingBoxParser('xmin', 'ymin', 'xmax', 'ymax')
result = parser.parse(example)
self.assertIsNotNone(result)
np_testing.assert_almost_equal(result, bounding_boxes)
parser = tf_example_parser.BoundingBoxParser('xmin', 'ymin', 'xmax',
'another_ymax')
result = parser.parse(example)
self.assertIsNone(result)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/tf_example_parser_test.py | tf_example_parser_test.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for third party lvis to be used within object_detection.
Usage example: given a set of images with ids in the list image_ids
and corresponding lists of numpy arrays encoding groundtruth (boxes,
masks and classes) and detections (masks, scores and classes), where
elements of each list correspond to detections/annotations of a single image,
then evaluation can be invoked as follows:
groundtruth = lvis_tools.LVISWrapper(groundtruth_dict)
detections = lvis_results.LVISResults(groundtruth, detections_list)
evaluator = lvis_tools.LVISEvalWrapper(groundtruth, detections,
iou_type='segm')
summary_metrics = evaluator.ComputeMetrics()
TODO(jonathanhuang): Add support for exporting to JSON.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from lvis import eval as lvis_eval
from lvis import lvis
import numpy as np
from pycocotools import mask
import six
from six.moves import range
def RleCompress(masks):
"""Compresses mask using Run-length encoding provided by pycocotools.
Args:
masks: uint8 numpy array of shape [mask_height, mask_width] with values in
{0, 1}.
Returns:
A pycocotools Run-length encoding of the mask.
"""
rle = mask.encode(np.asfortranarray(masks))
rle['counts'] = six.ensure_str(rle['counts'])
return rle
def _ConvertBoxToCOCOFormat(box):
"""Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.
This is a utility function for converting from our internal
[ymin, xmin, ymax, xmax] convention to the convention used by the COCO API
i.e., [xmin, ymin, width, height].
Args:
box: a [ymin, xmin, ymax, xmax] numpy array
Returns:
a list of floats representing [xmin, ymin, width, height]
"""
return [float(box[1]), float(box[0]), float(box[3] - box[1]),
float(box[2] - box[0])]
class LVISWrapper(lvis.LVIS):
"""Wrapper for the lvis.LVIS class."""
def __init__(self, dataset, detection_type='bbox'):
"""LVISWrapper constructor.
See https://www.lvisdataset.org/dataset for a description of the format.
By default, the coco.COCO class constructor reads from a JSON file.
This function duplicates the same behavior but loads from a dictionary,
allowing us to perform evaluation without writing to external storage.
Args:
dataset: a dictionary holding bounding box annotations in the COCO format.
detection_type: type of detections being wrapped. Can be one of ['bbox',
'segmentation']
Raises:
ValueError: if detection_type is unsupported.
"""
self.logger = logging.getLogger(__name__)
self.logger.info('Loading annotations.')
self.dataset = dataset
self._create_index()
class LVISEvalWrapper(lvis_eval.LVISEval):
"""LVISEval wrapper."""
def __init__(self, groundtruth=None, detections=None, iou_type='bbox'):
lvis_eval.LVISEval.__init__(
self, groundtruth, detections, iou_type=iou_type)
self._iou_type = iou_type
def ComputeMetrics(self):
self.run()
summary_metrics = {}
summary_metrics = self.results
return summary_metrics
def ExportSingleImageGroundtruthToLVIS(image_id,
next_annotation_id,
category_id_set,
groundtruth_boxes,
groundtruth_classes,
groundtruth_masks=None,
groundtruth_area=None):
"""Export groundtruth of a single image to LVIS format.
This function converts groundtruth detection annotations represented as numpy
arrays to dictionaries that can be ingested by the LVIS evaluation API. Note
that the image_ids provided here must match the ones given to
ExportSingleImageDetectionMasksToLVIS. We assume that boxes, classes and masks
are in correspondence - that is, e.g., groundtruth_boxes[i, :], and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box.
Args:
image_id: a unique image identifier castable to integer.
next_annotation_id: integer specifying the first id to use for the
groundtruth annotations. All annotations are assigned a continuous integer
id starting from this value.
category_id_set: A set of valid class ids. Groundtruth with classes not in
category_id_set are dropped.
groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]
groundtruth_classes: numpy array (int) with shape [num_gt_boxes]
groundtruth_masks: optional uint8 numpy array of shape [num_detections,
image_height, image_width] containing detection_masks.
groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If
provided, then the area values (in the original absolute coordinates) will
be populated instead of calculated from bounding box coordinates.
Returns:
a list of groundtruth annotations for a single image in the COCO format.
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
if len(groundtruth_classes.shape) != 1:
raise ValueError('groundtruth_classes is '
'expected to be of rank 1.')
if len(groundtruth_boxes.shape) != 2:
raise ValueError('groundtruth_boxes is expected to be of '
'rank 2.')
if groundtruth_boxes.shape[1] != 4:
raise ValueError('groundtruth_boxes should have '
'shape[1] == 4.')
num_boxes = groundtruth_classes.shape[0]
if num_boxes != groundtruth_boxes.shape[0]:
raise ValueError('Corresponding entries in groundtruth_classes, '
'and groundtruth_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).'
'Classes shape: %d. Boxes shape: %d. Image ID: %s' % (
groundtruth_classes.shape[0],
groundtruth_boxes.shape[0], image_id))
groundtruth_list = []
for i in range(num_boxes):
if groundtruth_classes[i] in category_id_set:
if groundtruth_area is not None and groundtruth_area[i] > 0:
area = float(groundtruth_area[i])
else:
area = float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) *
(groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1]))
export_dict = {
'id':
next_annotation_id + i,
'image_id':
int(image_id),
'category_id':
int(groundtruth_classes[i]),
'bbox':
list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])),
'area': area,
}
if groundtruth_masks is not None:
export_dict['segmentation'] = RleCompress(groundtruth_masks[i])
groundtruth_list.append(export_dict)
return groundtruth_list
def ExportSingleImageDetectionMasksToLVIS(image_id,
category_id_set,
detection_masks,
detection_scores,
detection_classes):
"""Export detection masks of a single image to LVIS format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the LVIS evaluation API. We assume that
detection_masks, detection_scores, and detection_classes are in correspondence
- that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]
are associated with the same annotation.
Args:
image_id: unique image identifier castable to integer.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_masks: uint8 numpy array of shape [num_detections, image_height,
image_width] containing detection_masks.
detection_scores: float numpy array of shape [num_detections] containing
scores for detection masks.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection masks.
Returns:
a list of detection mask annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_masks, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
num_boxes = detection_classes.shape[0]
if not num_boxes == len(detection_masks) == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_masks should have '
'compatible lengths and shapes '
'Classes length: %d. Masks length: %d. '
'Scores length: %d' % (
detection_classes.shape[0], len(detection_masks),
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
detections_list.append({
'image_id': int(image_id),
'category_id': int(detection_classes[i]),
'segmentation': RleCompress(detection_masks[i]),
'score': float(detection_scores[i])
})
return detections_list
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/lvis_tools.py | lvis_tools.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Object detection calibration metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import metrics_impl
def _safe_div(numerator, denominator):
"""Divides two tensors element-wise, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
t = tf.truediv(numerator, denominator)
zero = tf.zeros_like(t, dtype=denominator.dtype)
condition = tf.greater(denominator, zero)
zero = tf.cast(zero, t.dtype)
return tf.where(condition, t, zero)
def _ece_from_bins(bin_counts, bin_true_sum, bin_preds_sum, name):
"""Calculates Expected Calibration Error from accumulated statistics."""
bin_accuracies = _safe_div(bin_true_sum, bin_counts)
bin_confidences = _safe_div(bin_preds_sum, bin_counts)
abs_bin_errors = tf.abs(bin_accuracies - bin_confidences)
bin_weights = _safe_div(bin_counts, tf.reduce_sum(bin_counts))
return tf.reduce_sum(abs_bin_errors * bin_weights, name=name)
def expected_calibration_error(y_true, y_pred, nbins=20):
"""Calculates Expected Calibration Error (ECE).
ECE is a scalar summary statistic of calibration error. It is the
sample-weighted average of the difference between the predicted and true
probabilities of a positive detection across uniformly-spaced model
confidences [0, 1]. See referenced paper for a thorough explanation.
Reference:
Guo, et. al, "On Calibration of Modern Neural Networks"
Page 2, Expected Calibration Error (ECE).
https://arxiv.org/pdf/1706.04599.pdf
This function creates three local variables, `bin_counts`, `bin_true_sum`, and
`bin_preds_sum` that are used to compute ECE. For estimation of the metric
over a stream of data, the function creates an `update_op` operation that
updates these variables and returns the ECE.
Args:
y_true: 1-D tf.int64 Tensor of binarized ground truth, corresponding to each
prediction in y_pred.
y_pred: 1-D tf.float32 tensor of model confidence scores in range
[0.0, 1.0].
nbins: int specifying the number of uniformly-spaced bins into which y_pred
will be bucketed.
Returns:
value_op: A value metric op that returns ece.
update_op: An operation that increments the `bin_counts`, `bin_true_sum`,
and `bin_preds_sum` variables appropriately and whose value matches `ece`.
Raises:
InvalidArgumentError: if y_pred is not in [0.0, 1.0].
"""
bin_counts = metrics_impl.metric_variable(
[nbins], tf.float32, name='bin_counts')
bin_true_sum = metrics_impl.metric_variable(
[nbins], tf.float32, name='true_sum')
bin_preds_sum = metrics_impl.metric_variable(
[nbins], tf.float32, name='preds_sum')
with tf.control_dependencies([
tf.assert_greater_equal(y_pred, 0.0),
tf.assert_less_equal(y_pred, 1.0),
]):
bin_ids = tf.histogram_fixed_width_bins(y_pred, [0.0, 1.0], nbins=nbins)
with tf.control_dependencies([bin_ids]):
update_bin_counts_op = tf.assign_add(
bin_counts, tf.cast(tf.bincount(bin_ids, minlength=nbins),
dtype=tf.float32))
update_bin_true_sum_op = tf.assign_add(
bin_true_sum,
tf.cast(tf.bincount(bin_ids, weights=y_true, minlength=nbins),
dtype=tf.float32))
update_bin_preds_sum_op = tf.assign_add(
bin_preds_sum,
tf.cast(tf.bincount(bin_ids, weights=y_pred, minlength=nbins),
dtype=tf.float32))
ece_update_op = _ece_from_bins(
update_bin_counts_op,
update_bin_true_sum_op,
update_bin_preds_sum_op,
name='update_op')
ece = _ece_from_bins(bin_counts, bin_true_sum, bin_preds_sum, name='value')
return ece, ece_update_op
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/calibration_metrics.py | calibration_metrics.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for oid_od_challenge_evaluation_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import zlib
import numpy as np
import pandas as pd
from pycocotools import mask as coco_mask
import six
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.metrics import oid_challenge_evaluation_utils as utils
def encode_mask(mask_to_encode):
"""Encodes a binary mask into the Kaggle challenge text format.
The encoding is done in three stages:
- COCO RLE-encoding,
- zlib compression,
- base64 encoding (to use as entry in csv file).
Args:
mask_to_encode: binary np.ndarray of dtype bool and 2d shape.
Returns:
A (base64) text string of the encoded mask.
"""
mask_to_encode = np.squeeze(mask_to_encode)
mask_to_encode = mask_to_encode.reshape(mask_to_encode.shape[0],
mask_to_encode.shape[1], 1)
mask_to_encode = mask_to_encode.astype(np.uint8)
mask_to_encode = np.asfortranarray(mask_to_encode)
encoded_mask = coco_mask.encode(mask_to_encode)[0]['counts']
compressed_mask = zlib.compress(six.ensure_binary(encoded_mask),
zlib.Z_BEST_COMPRESSION)
base64_mask = base64.b64encode(compressed_mask)
return base64_mask
class OidUtilTest(tf.test.TestCase):
def testMaskToNormalizedBox(self):
mask_np = np.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]])
box = utils._to_normalized_box(mask_np)
self.assertAllEqual(np.array([0.25, 0.25, 0.75, 0.5]), box)
mask_np = np.array([[0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 1, 1, 1]])
box = utils._to_normalized_box(mask_np)
self.assertAllEqual(np.array([0.25, 0.25, 1.0, 1.0]), box)
mask_np = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
box = utils._to_normalized_box(mask_np)
self.assertAllEqual(np.array([0.0, 0.0, 0.0, 0.0]), box)
def testDecodeToTensors(self):
mask1 = np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 0]], dtype=np.uint8)
mask2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=np.uint8)
encoding1 = encode_mask(mask1)
encoding2 = encode_mask(mask2)
vals = pd.Series([encoding1, encoding2])
image_widths = pd.Series([mask1.shape[1], mask2.shape[1]])
image_heights = pd.Series([mask1.shape[0], mask2.shape[0]])
segm, bbox = utils._decode_raw_data_into_masks_and_boxes(
vals, image_widths, image_heights)
expected_segm = np.concatenate(
[np.expand_dims(mask1, 0),
np.expand_dims(mask2, 0)], axis=0)
expected_bbox = np.array([[0.0, 0.5, 2.0 / 3.0, 1.0], [0, 0, 0, 0]])
self.assertAllEqual(expected_segm, segm)
self.assertAllEqual(expected_bbox, bbox)
def testDecodeToTensorsNoMasks(self):
vals = pd.Series([None, None])
image_widths = pd.Series([None, None])
image_heights = pd.Series([None, None])
segm, bbox = utils._decode_raw_data_into_masks_and_boxes(
vals, image_widths, image_heights)
self.assertAllEqual(np.zeros((2, 1, 1), dtype=np.uint8), segm)
self.assertAllEqual(np.zeros((2, 4), dtype=np.float32), bbox)
class OidChallengeEvaluationUtilTest(tf.test.TestCase):
def testBuildGroundtruthDictionaryBoxes(self):
np_data = pd.DataFrame(
[['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.3, 0.5, 0.6, 1, None],
['fe58ec1b06db2bb7', '/m/02gy9n', 0.1, 0.2, 0.3, 0.4, 0, None],
['fe58ec1b06db2bb7', '/m/04bcr3', None, None, None, None, None, 1],
['fe58ec1b06db2bb7', '/m/083vt', None, None, None, None, None, 0],
['fe58ec1b06db2bb7', '/m/02gy9n', None, None, None, None, None, 1]],
columns=[
'ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax', 'IsGroupOf',
'ConfidenceImageLabel'
])
class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3}
groundtruth_dictionary = utils.build_groundtruth_dictionary(
np_data, class_label_map)
self.assertIn(standard_fields.InputDataFields.groundtruth_boxes,
groundtruth_dictionary)
self.assertIn(standard_fields.InputDataFields.groundtruth_classes,
groundtruth_dictionary)
self.assertIn(standard_fields.InputDataFields.groundtruth_group_of,
groundtruth_dictionary)
self.assertIn(standard_fields.InputDataFields.groundtruth_image_classes,
groundtruth_dictionary)
self.assertAllEqual(
np.array([1, 3]), groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_classes])
self.assertAllEqual(
np.array([1, 0]), groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_group_of])
expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2]])
self.assertNDArrayNear(
expected_boxes_data, groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_boxes], 1e-5)
self.assertAllEqual(
np.array([1, 2, 3]), groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_image_classes])
def testBuildPredictionDictionaryBoxes(self):
np_data = pd.DataFrame(
[['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.3, 0.5, 0.6, 0.1],
['fe58ec1b06db2bb7', '/m/02gy9n', 0.1, 0.2, 0.3, 0.4, 0.2],
['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.1, 0.2, 0.3, 0.3]],
columns=[
'ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax', 'Score'
])
class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3}
prediction_dictionary = utils.build_predictions_dictionary(
np_data, class_label_map)
self.assertIn(standard_fields.DetectionResultFields.detection_boxes,
prediction_dictionary)
self.assertIn(standard_fields.DetectionResultFields.detection_classes,
prediction_dictionary)
self.assertIn(standard_fields.DetectionResultFields.detection_scores,
prediction_dictionary)
self.assertAllEqual(
np.array([1, 3, 1]), prediction_dictionary[
standard_fields.DetectionResultFields.detection_classes])
expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2],
[0.2, 0.0, 0.3, 0.1]])
self.assertNDArrayNear(
expected_boxes_data, prediction_dictionary[
standard_fields.DetectionResultFields.detection_boxes], 1e-5)
self.assertNDArrayNear(
np.array([0.1, 0.2, 0.3]), prediction_dictionary[
standard_fields.DetectionResultFields.detection_scores], 1e-5)
def testBuildGroundtruthDictionaryMasks(self):
mask1 = np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
mask2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
encoding1 = encode_mask(mask1)
encoding2 = encode_mask(mask2)
np_data = pd.DataFrame(
[[
'fe58ec1b06db2bb7', mask1.shape[1], mask1.shape[0], '/m/04bcr3',
0.0, 0.3, 0.5, 0.6, 0, None, encoding1
],
[
'fe58ec1b06db2bb7', None, None, '/m/02gy9n', 0.1, 0.2, 0.3, 0.4, 1,
None, None
],
[
'fe58ec1b06db2bb7', mask2.shape[1], mask2.shape[0], '/m/02gy9n',
0.5, 0.6, 0.8, 0.9, 0, None, encoding2
],
[
'fe58ec1b06db2bb7', None, None, '/m/04bcr3', None, None, None,
None, None, 1, None
],
[
'fe58ec1b06db2bb7', None, None, '/m/083vt', None, None, None, None,
None, 0, None
],
[
'fe58ec1b06db2bb7', None, None, '/m/02gy9n', None, None, None,
None, None, 1, None
]],
columns=[
'ImageID', 'ImageWidth', 'ImageHeight', 'LabelName', 'XMin', 'XMax',
'YMin', 'YMax', 'IsGroupOf', 'ConfidenceImageLabel', 'Mask'
])
class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3}
groundtruth_dictionary = utils.build_groundtruth_dictionary(
np_data, class_label_map)
self.assertIn(standard_fields.InputDataFields.groundtruth_boxes,
groundtruth_dictionary)
self.assertIn(standard_fields.InputDataFields.groundtruth_classes,
groundtruth_dictionary)
self.assertIn(standard_fields.InputDataFields.groundtruth_group_of,
groundtruth_dictionary)
self.assertIn(standard_fields.InputDataFields.groundtruth_image_classes,
groundtruth_dictionary)
self.assertIn(standard_fields.InputDataFields.groundtruth_instance_masks,
groundtruth_dictionary)
self.assertAllEqual(
np.array([1, 3, 3]), groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_classes])
self.assertAllEqual(
np.array([0, 1, 0]), groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_group_of])
expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2],
[0.8, 0.5, 0.9, 0.6]])
self.assertNDArrayNear(
expected_boxes_data, groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_boxes], 1e-5)
self.assertAllEqual(
np.array([1, 2, 3]), groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_image_classes])
expected_segm = np.concatenate([
np.expand_dims(mask1, 0),
np.zeros((1, 4, 4), dtype=np.uint8),
np.expand_dims(mask2, 0)
],
axis=0)
self.assertAllEqual(
expected_segm, groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_instance_masks])
def testBuildPredictionDictionaryMasks(self):
mask1 = np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
mask2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)
encoding1 = encode_mask(mask1)
encoding2 = encode_mask(mask2)
np_data = pd.DataFrame([[
'fe58ec1b06db2bb7', mask1.shape[1], mask1.shape[0], '/m/04bcr3',
encoding1, 0.8
],
[
'fe58ec1b06db2bb7', mask2.shape[1],
mask2.shape[0], '/m/02gy9n', encoding2, 0.6
]],
columns=[
'ImageID', 'ImageWidth', 'ImageHeight',
'LabelName', 'Mask', 'Score'
])
class_label_map = {'/m/04bcr3': 1, '/m/02gy9n': 3}
prediction_dictionary = utils.build_predictions_dictionary(
np_data, class_label_map)
self.assertIn(standard_fields.DetectionResultFields.detection_boxes,
prediction_dictionary)
self.assertIn(standard_fields.DetectionResultFields.detection_classes,
prediction_dictionary)
self.assertIn(standard_fields.DetectionResultFields.detection_scores,
prediction_dictionary)
self.assertIn(standard_fields.DetectionResultFields.detection_masks,
prediction_dictionary)
self.assertAllEqual(
np.array([1, 3]), prediction_dictionary[
standard_fields.DetectionResultFields.detection_classes])
expected_boxes_data = np.array([[0.0, 0.5, 0.5, 1.0], [0, 0, 0, 0]])
self.assertNDArrayNear(
expected_boxes_data, prediction_dictionary[
standard_fields.DetectionResultFields.detection_boxes], 1e-5)
self.assertNDArrayNear(
np.array([0.8, 0.6]), prediction_dictionary[
standard_fields.DetectionResultFields.detection_scores], 1e-5)
expected_segm = np.concatenate(
[np.expand_dims(mask1, 0),
np.expand_dims(mask2, 0)], axis=0)
self.assertAllEqual(
expected_segm, prediction_dictionary[
standard_fields.DetectionResultFields.detection_masks])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/oid_challenge_evaluation_utils_test.py | oid_challenge_evaluation_utils_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts data from CSV to the OpenImagesDetectionChallengeEvaluator format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import zlib
import numpy as np
import pandas as pd
from pycocotools import mask as coco_mask
from object_detection.core import standard_fields
def _to_normalized_box(mask_np):
"""Decodes binary segmentation masks into np.arrays and boxes.
Args:
mask_np: np.ndarray of size NxWxH.
Returns:
a np.ndarray of the size Nx4, each row containing normalized coordinates
[YMin, XMin, YMax, XMax] of a box computed of axis parallel enclosing box of
a mask.
"""
coord1, coord2 = np.nonzero(mask_np)
if coord1.size > 0:
ymin = float(min(coord1)) / mask_np.shape[0]
ymax = float(max(coord1) + 1) / mask_np.shape[0]
xmin = float(min(coord2)) / mask_np.shape[1]
xmax = float((max(coord2) + 1)) / mask_np.shape[1]
return np.array([ymin, xmin, ymax, xmax])
else:
return np.array([0.0, 0.0, 0.0, 0.0])
def _decode_raw_data_into_masks_and_boxes(segments, image_widths,
image_heights):
"""Decods binary segmentation masks into np.arrays and boxes.
Args:
segments: pandas Series object containing either None entries, or strings
with base64, zlib compressed, COCO RLE-encoded binary masks. All masks are
expected to be the same size.
image_widths: pandas Series of mask widths.
image_heights: pandas Series of mask heights.
Returns:
a np.ndarray of the size NxWxH, where W and H is determined from the encoded
masks; for the None values, zero arrays of size WxH are created. If input
contains only None values, W=1, H=1.
"""
segment_masks = []
segment_boxes = []
ind = segments.first_valid_index()
if ind is not None:
size = [int(image_heights[ind]), int(image_widths[ind])]
else:
# It does not matter which size we pick since no masks will ever be
# evaluated.
return np.zeros((segments.shape[0], 1, 1), dtype=np.uint8), np.zeros(
(segments.shape[0], 4), dtype=np.float32)
for segment, im_width, im_height in zip(segments, image_widths,
image_heights):
if pd.isnull(segment):
segment_masks.append(np.zeros([1, size[0], size[1]], dtype=np.uint8))
segment_boxes.append(np.expand_dims(np.array([0.0, 0.0, 0.0, 0.0]), 0))
else:
compressed_mask = base64.b64decode(segment)
rle_encoded_mask = zlib.decompress(compressed_mask)
decoding_dict = {
'size': [im_height, im_width],
'counts': rle_encoded_mask
}
mask_tensor = coco_mask.decode(decoding_dict)
segment_masks.append(np.expand_dims(mask_tensor, 0))
segment_boxes.append(np.expand_dims(_to_normalized_box(mask_tensor), 0))
return np.concatenate(
segment_masks, axis=0), np.concatenate(
segment_boxes, axis=0)
def merge_boxes_and_masks(box_data, mask_data):
return pd.merge(
box_data,
mask_data,
how='outer',
on=['LabelName', 'ImageID', 'XMin', 'XMax', 'YMin', 'YMax', 'IsGroupOf'])
def build_groundtruth_dictionary(data, class_label_map):
"""Builds a groundtruth dictionary from groundtruth data in CSV file.
Args:
data: Pandas DataFrame with the groundtruth data for a single image.
class_label_map: Class labelmap from string label name to an integer.
Returns:
A dictionary with keys suitable for passing to
OpenImagesDetectionChallengeEvaluator.add_single_ground_truth_image_info:
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.verified_labels: integer 1D numpy array
containing all classes for which labels are verified.
standard_fields.InputDataFields.groundtruth_group_of: Optional length
M numpy boolean array denoting whether a groundtruth box contains a
group of instances.
"""
data_location = data[data.XMin.notnull()]
data_labels = data[data.ConfidenceImageLabel.notnull()]
dictionary = {
standard_fields.InputDataFields.groundtruth_boxes:
data_location[['YMin', 'XMin', 'YMax',
'XMax']].to_numpy().astype(float),
standard_fields.InputDataFields.groundtruth_classes:
data_location['LabelName'].map(lambda x: class_label_map[x]
).to_numpy(),
standard_fields.InputDataFields.groundtruth_group_of:
data_location['IsGroupOf'].to_numpy().astype(int),
standard_fields.InputDataFields.groundtruth_image_classes:
data_labels['LabelName'].map(lambda x: class_label_map[x]).to_numpy(),
}
if 'Mask' in data_location:
segments, _ = _decode_raw_data_into_masks_and_boxes(
data_location['Mask'], data_location['ImageWidth'],
data_location['ImageHeight'])
dictionary[
standard_fields.InputDataFields.groundtruth_instance_masks] = segments
return dictionary
def build_predictions_dictionary(data, class_label_map):
"""Builds a predictions dictionary from predictions data in CSV file.
Args:
data: Pandas DataFrame with the predictions data for a single image.
class_label_map: Class labelmap from string label name to an integer.
Returns:
Dictionary with keys suitable for passing to
OpenImagesDetectionChallengeEvaluator.add_single_detected_image_info:
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
"""
dictionary = {
standard_fields.DetectionResultFields.detection_classes:
data['LabelName'].map(lambda x: class_label_map[x]).to_numpy(),
standard_fields.DetectionResultFields.detection_scores:
data['Score'].to_numpy().astype(float)
}
if 'Mask' in data:
segments, boxes = _decode_raw_data_into_masks_and_boxes(
data['Mask'], data['ImageWidth'], data['ImageHeight'])
dictionary[standard_fields.DetectionResultFields.detection_masks] = segments
dictionary[standard_fields.DetectionResultFields.detection_boxes] = boxes
else:
dictionary[standard_fields.DetectionResultFields.detection_boxes] = data[[
'YMin', 'XMin', 'YMax', 'XMax'
]].to_numpy().astype(float)
return dictionary
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/oid_challenge_evaluation_utils.py | oid_challenge_evaluation_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Evaluation executable for detection data.
This executable evaluates precomputed detections produced by a detection
model and writes the evaluation results into csv file metrics.csv, stored
in the directory, specified by --eval_dir.
The evaluation metrics set is supplied in object_detection.protos.EvalConfig
in metrics_set field.
Currently two set of metrics are supported:
- pascal_voc_metrics: standard PASCAL VOC 2007 metric
- open_images_detection_metrics: Open Image V2 metric
All other field of object_detection.protos.EvalConfig are ignored.
Example usage:
./compute_metrics \
--eval_dir=path/to/eval_dir \
--eval_config_path=path/to/evaluation/configuration/file \
--input_config_path=path/to/input/configuration/file
"""
import csv
import os
import re
import tensorflow.compat.v1 as tf
from object_detection import eval_util
from object_detection.core import standard_fields
from object_detection.metrics import tf_example_parser
from object_detection.utils import config_util
from object_detection.utils import label_map_util
flags = tf.app.flags
tf.logging.set_verbosity(tf.logging.INFO)
flags.DEFINE_string('eval_dir', None, 'Directory to write eval summaries to.')
flags.DEFINE_string('eval_config_path', None,
'Path to an eval_pb2.EvalConfig config file.')
flags.DEFINE_string('input_config_path', None,
'Path to an eval_pb2.InputConfig config file.')
FLAGS = flags.FLAGS
def _generate_sharded_filenames(filename):
m = re.search(r'@(\d{1,})', filename)
if m:
num_shards = int(m.group(1))
return [
re.sub(r'@(\d{1,})', '-%.5d-of-%.5d' % (i, num_shards), filename)
for i in range(num_shards)
]
else:
return [filename]
def _generate_filenames(filenames):
result = []
for filename in filenames:
result += _generate_sharded_filenames(filename)
return result
def read_data_and_evaluate(input_config, eval_config):
"""Reads pre-computed object detections and groundtruth from tf_record.
Args:
input_config: input config proto of type
object_detection.protos.InputReader.
eval_config: evaluation config proto of type
object_detection.protos.EvalConfig.
Returns:
Evaluated detections metrics.
Raises:
ValueError: if input_reader type is not supported or metric type is unknown.
"""
if input_config.WhichOneof('input_reader') == 'tf_record_input_reader':
input_paths = input_config.tf_record_input_reader.input_path
categories = label_map_util.create_categories_from_labelmap(
input_config.label_map_path)
object_detection_evaluators = eval_util.get_evaluators(
eval_config, categories)
# Support a single evaluator
object_detection_evaluator = object_detection_evaluators[0]
skipped_images = 0
processed_images = 0
for input_path in _generate_filenames(input_paths):
tf.logging.info('Processing file: {0}'.format(input_path))
record_iterator = tf.python_io.tf_record_iterator(path=input_path)
data_parser = tf_example_parser.TfExampleDetectionAndGTParser()
for string_record in record_iterator:
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
processed_images)
processed_images += 1
example = tf.train.Example()
example.ParseFromString(string_record)
decoded_dict = data_parser.parse(example)
if decoded_dict:
object_detection_evaluator.add_single_ground_truth_image_info(
decoded_dict[standard_fields.DetectionResultFields.key],
decoded_dict)
object_detection_evaluator.add_single_detected_image_info(
decoded_dict[standard_fields.DetectionResultFields.key],
decoded_dict)
else:
skipped_images += 1
tf.logging.info('Skipped images: {0}'.format(skipped_images))
return object_detection_evaluator.evaluate()
raise ValueError('Unsupported input_reader_config.')
def write_metrics(metrics, output_dir):
"""Write metrics to the output directory.
Args:
metrics: A dictionary containing metric names and values.
output_dir: Directory to write metrics to.
"""
tf.logging.info('Writing metrics.')
with open(os.path.join(output_dir, 'metrics.csv'), 'w') as csvfile:
metrics_writer = csv.writer(csvfile, delimiter=',')
for metric_name, metric_value in metrics.items():
metrics_writer.writerow([metric_name, str(metric_value)])
def main(argv):
del argv
required_flags = ['input_config_path', 'eval_config_path', 'eval_dir']
for flag_name in required_flags:
if not getattr(FLAGS, flag_name):
raise ValueError('Flag --{} is required'.format(flag_name))
configs = config_util.get_configs_from_multiple_files(
eval_input_config_path=FLAGS.input_config_path,
eval_config_path=FLAGS.eval_config_path)
eval_config = configs['eval_config']
input_config = configs['eval_input_config']
metrics = read_data_and_evaluate(input_config, eval_config)
# Save metrics
write_metrics(metrics, FLAGS.eval_dir)
if __name__ == '__main__':
tf.app.run(main)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/offline_eval_map_corloc.py | offline_eval_map_corloc.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for evaluating object detections with calibration metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_list
from object_detection.core import region_similarity_calculator
from object_detection.core import standard_fields
from object_detection.core import target_assigner
from object_detection.matchers import argmax_matcher
from object_detection.metrics import calibration_metrics
from object_detection.utils import object_detection_evaluation
# TODO(zbeaver): Implement metrics per category.
class CalibrationDetectionEvaluator(
object_detection_evaluation.DetectionEvaluator):
"""Class to evaluate calibration detection metrics."""
def __init__(self,
categories,
iou_threshold=0.5):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
iou_threshold: Threshold above which to consider a box as matched during
evaluation.
"""
super(CalibrationDetectionEvaluator, self).__init__(categories)
# Constructing target_assigner to match detections to groundtruth.
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(
matched_threshold=iou_threshold, unmatched_threshold=iou_threshold)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
self._target_assigner = target_assigner.TargetAssigner(
similarity_calc, matcher, box_coder)
def match_single_image_info(self, image_info):
"""Match detections to groundtruth for a single image.
Detections are matched to available groundtruth in the image based on the
IOU threshold from the constructor. The classes of the detections and
groundtruth matches are then compared. Detections that do not have IOU above
the required threshold or have different classes from their match are
considered negative matches. All inputs in `image_info` originate or are
inferred from the eval_dict passed to class method
`get_estimator_eval_metric_ops`.
Args:
image_info: a tuple or list containing the following (in order):
- gt_boxes: tf.float32 tensor of groundtruth boxes.
- gt_classes: tf.int64 tensor of groundtruth classes associated with
groundtruth boxes.
- num_gt_box: scalar indicating the number of groundtruth boxes per
image.
- det_boxes: tf.float32 tensor of detection boxes.
- det_classes: tf.int64 tensor of detection classes associated with
detection boxes.
- num_det_box: scalar indicating the number of detection boxes per
image.
Returns:
is_class_matched: tf.int64 tensor identical in shape to det_boxes,
indicating whether detection boxes matched with and had the same
class as groundtruth annotations.
"""
(gt_boxes, gt_classes, num_gt_box, det_boxes, det_classes,
num_det_box) = image_info
detection_boxes = det_boxes[:num_det_box]
detection_classes = det_classes[:num_det_box]
groundtruth_boxes = gt_boxes[:num_gt_box]
groundtruth_classes = gt_classes[:num_gt_box]
det_boxlist = box_list.BoxList(detection_boxes)
gt_boxlist = box_list.BoxList(groundtruth_boxes)
# Target assigner requires classes in one-hot format. An additional
# dimension is required since gt_classes are 1-indexed; the zero index is
# provided to all non-matches.
one_hot_depth = tf.cast(tf.add(tf.reduce_max(groundtruth_classes), 1),
dtype=tf.int32)
gt_classes_one_hot = tf.one_hot(
groundtruth_classes, one_hot_depth, dtype=tf.float32)
one_hot_cls_targets, _, _, _, _ = self._target_assigner.assign(
det_boxlist,
gt_boxlist,
gt_classes_one_hot,
unmatched_class_label=tf.zeros(shape=one_hot_depth, dtype=tf.float32))
# Transform from one-hot back to indexes.
cls_targets = tf.argmax(one_hot_cls_targets, axis=1)
is_class_matched = tf.cast(
tf.equal(tf.cast(cls_targets, tf.int64), detection_classes),
dtype=tf.int64)
return is_class_matched
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns a dictionary of eval metric ops.
Note that once value_op is called, the detections and groundtruth added via
update_op are cleared.
This function can take in groundtruth and detections for a batch of images,
or for a single image. For the latter case, the batch dimension for input
tensors need not be present.
Args:
eval_dict: A dictionary that holds tensors for evaluating object detection
performance. For single-image evaluation, this dictionary may be
produced from eval_util.result_dict_for_single_example(). If multi-image
evaluation, `eval_dict` should contain the fields
'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to
properly unpad the tensors from the batch.
Returns:
a dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all
update ops must be run together and similarly all value ops must be run
together to guarantee correct behaviour.
"""
# Unpack items from the evaluation dictionary.
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
detection_boxes = eval_dict[detection_fields.detection_boxes]
detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes]
num_gt_boxes_per_image = eval_dict.get(
'num_groundtruth_boxes_per_image', None)
num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None)
is_annotated_batched = eval_dict.get('is_annotated', None)
if not image_id.shape.as_list():
# Apply a batch dimension to all tensors.
image_id = tf.expand_dims(image_id, 0)
groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
detection_boxes = tf.expand_dims(detection_boxes, 0)
detection_scores = tf.expand_dims(detection_scores, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
else:
num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.shape(detection_boxes)[1:2]
else:
num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
if is_annotated_batched is None:
is_annotated_batched = tf.constant([True])
else:
is_annotated_batched = tf.expand_dims(is_annotated_batched, 0)
else:
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.tile(
tf.shape(groundtruth_boxes)[1:2],
multiples=tf.shape(groundtruth_boxes)[0:1])
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.tile(
tf.shape(detection_boxes)[1:2],
multiples=tf.shape(detection_boxes)[0:1])
if is_annotated_batched is None:
is_annotated_batched = tf.ones_like(image_id, dtype=tf.bool)
# Filter images based on is_annotated_batched and match detections.
image_info = [tf.boolean_mask(tensor, is_annotated_batched) for tensor in
[groundtruth_boxes, groundtruth_classes,
num_gt_boxes_per_image, detection_boxes, detection_classes,
num_det_boxes_per_image]]
is_class_matched = tf.map_fn(
self.match_single_image_info, image_info, dtype=tf.int64)
y_true = tf.squeeze(is_class_matched)
y_pred = tf.squeeze(tf.boolean_mask(detection_scores, is_annotated_batched))
ece, update_op = calibration_metrics.expected_calibration_error(
y_true, y_pred)
return {'CalibrationError/ExpectedCalibrationError': (ece, update_op)}
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary of groundtruth numpy arrays required
for evaluations.
"""
raise NotImplementedError
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary of detection numpy arrays required for
evaluation.
"""
raise NotImplementedError
def evaluate(self):
"""Evaluates detections and returns a dictionary of metrics."""
raise NotImplementedError
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
raise NotImplementedError
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/calibration_evaluation.py | calibration_evaluation.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_model.object_detection.metrics.lvis_tools."""
from lvis import results as lvis_results
import numpy as np
from pycocotools import mask
import tensorflow.compat.v1 as tf
from object_detection.metrics import lvis_tools
class LVISToolsTest(tf.test.TestCase):
def setUp(self):
super(LVISToolsTest, self).setUp()
mask1 = np.pad(
np.ones([100, 100], dtype=np.uint8),
((100, 56), (100, 56)), mode='constant')
mask2 = np.pad(
np.ones([50, 50], dtype=np.uint8),
((50, 156), (50, 156)), mode='constant')
mask1_rle = lvis_tools.RleCompress(mask1)
mask2_rle = lvis_tools.RleCompress(mask2)
groundtruth_annotations_list = [
{
'id': 1,
'image_id': 1,
'category_id': 1,
'bbox': [100., 100., 100., 100.],
'area': 100.**2,
'segmentation': mask1_rle
},
{
'id': 2,
'image_id': 2,
'category_id': 1,
'bbox': [50., 50., 50., 50.],
'area': 50.**2,
'segmentation': mask2_rle
},
]
image_list = [
{
'id': 1,
'neg_category_ids': [],
'not_exhaustive_category_ids': [],
'height': 256,
'width': 256
},
{
'id': 2,
'neg_category_ids': [],
'not_exhaustive_category_ids': [],
'height': 256,
'width': 256
}
]
category_list = [{'id': 0, 'name': 'person', 'frequency': 'f'},
{'id': 1, 'name': 'cat', 'frequency': 'c'},
{'id': 2, 'name': 'dog', 'frequency': 'r'}]
self._groundtruth_dict = {
'annotations': groundtruth_annotations_list,
'images': image_list,
'categories': category_list
}
self._detections_list = [
{
'image_id': 1,
'category_id': 1,
'segmentation': mask1_rle,
'score': .8
},
{
'image_id': 2,
'category_id': 1,
'segmentation': mask2_rle,
'score': .7
},
]
def testLVISWrappers(self):
groundtruth = lvis_tools.LVISWrapper(self._groundtruth_dict)
detections = lvis_results.LVISResults(groundtruth, self._detections_list)
evaluator = lvis_tools.LVISEvalWrapper(groundtruth, detections,
iou_type='segm')
summary_metrics = evaluator.ComputeMetrics()
self.assertAlmostEqual(1.0, summary_metrics['AP'])
def testSingleImageDetectionMaskExport(self):
masks = np.array(
[[[1, 1,], [1, 1]],
[[0, 0], [0, 1]],
[[0, 0], [0, 0]]], dtype=np.uint8)
classes = np.array([1, 2, 3], dtype=np.int32)
scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)
lvis_annotations = lvis_tools.ExportSingleImageDetectionMasksToLVIS(
image_id=1,
category_id_set=set([1, 2, 3]),
detection_classes=classes,
detection_scores=scores,
detection_masks=masks)
expected_counts = ['04', '31', '4']
for i, mask_annotation in enumerate(lvis_annotations):
self.assertEqual(mask_annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
mask_annotation['segmentation']), masks[i])))
self.assertEqual(mask_annotation['image_id'], 1)
self.assertEqual(mask_annotation['category_id'], classes[i])
self.assertAlmostEqual(mask_annotation['score'], scores[i])
def testSingleImageGroundtruthExport(self):
masks = np.array(
[[[1, 1,], [1, 1]],
[[0, 0], [0, 1]],
[[0, 0], [0, 0]]], dtype=np.uint8)
boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, 1, 1]], dtype=np.float32)
lvis_boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32)
classes = np.array([1, 2, 3], dtype=np.int32)
next_annotation_id = 1
expected_counts = ['04', '31', '4']
lvis_annotations = lvis_tools.ExportSingleImageGroundtruthToLVIS(
image_id=1,
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_masks=masks)
for i, annotation in enumerate(lvis_annotations):
self.assertEqual(annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
annotation['segmentation']), masks[i])))
self.assertTrue(np.all(np.isclose(annotation['bbox'], lvis_boxes[i])))
self.assertEqual(annotation['image_id'], 1)
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/lvis_tools_test.py | lvis_tools_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.object_detection.metrics.coco_evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.metrics import coco_evaluation
from object_detection.utils import tf_version
def _get_categories_list():
return [{
'id': 1,
'name': 'person'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'cat'
}]
def _get_category_keypoints_dict():
return {
'person': [{
'id': 0,
'name': 'left_eye'
}, {
'id': 3,
'name': 'right_eye'
}],
'dog': [{
'id': 1,
'name': 'tail_start'
}, {
'id': 2,
'name': 'mouth'
}]
}
class CocoDetectionEvaluationTest(tf.test.TestCase):
def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
"""Tests that mAP is calculated correctly on GT and Detections."""
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image2',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image2',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image3',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[25., 25., 50., 50.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image3',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[25., 25., 50., 50.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self):
"""Tests computing mAP with is_crowd GT boxes skipped."""
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1, 2]),
standard_fields.InputDataFields.groundtruth_is_crowd:
np.array([0, 1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsEmptyCrowd(self):
"""Tests computing mAP with empty is_crowd array passed in."""
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1]),
standard_fields.InputDataFields.groundtruth_is_crowd:
np.array([])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
def testRejectionOnDuplicateGroundtruth(self):
"""Tests that groundtruth cannot be added more than once for an image."""
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
# Add groundtruth
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
coco_evaluator.add_single_ground_truth_image_info(image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1
})
groundtruth_lists_len = len(coco_evaluator._groundtruth_list)
# Add groundtruth with the same image id.
coco_evaluator.add_single_ground_truth_image_info(image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1
})
self.assertEqual(groundtruth_lists_len,
len(coco_evaluator._groundtruth_list))
def testRejectionOnDuplicateDetections(self):
"""Tests that detections cannot be added more than once for an image."""
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
# Add groundtruth
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[99., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
detections_lists_len = len(coco_evaluator._detection_boxes_list)
coco_evaluator.add_single_detected_image_info(
image_id='image1', # Note that this image id was previously added.
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
self.assertEqual(detections_lists_len,
len(coco_evaluator._detection_boxes_list))
def testExceptionRaisedWithMissingGroundtruth(self):
"""Tests that exception is raised for detection with missing groundtruth."""
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
with self.assertRaises(ValueError):
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
@unittest.skipIf(tf_version.is_tf2(), 'Only Supported in TF1.X')
class CocoEvaluationPyFuncTest(tf.test.TestCase):
def _MatchingGroundtruthAndDetections(self, coco_evaluator):
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
sess.run(update_op,
feed_dict={
image_id: 'image1',
groundtruth_boxes: np.array([[100., 100., 200., 200.]]),
groundtruth_classes: np.array([1]),
detection_boxes: np.array([[100., 100., 200., 200.]]),
detection_scores: np.array([.8]),
detection_classes: np.array([1])
})
sess.run(update_op,
feed_dict={
image_id: 'image2',
groundtruth_boxes: np.array([[50., 50., 100., 100.]]),
groundtruth_classes: np.array([3]),
detection_boxes: np.array([[50., 50., 100., 100.]]),
detection_scores: np.array([.7]),
detection_classes: np.array([3])
})
sess.run(update_op,
feed_dict={
image_id: 'image3',
groundtruth_boxes: np.array([[25., 25., 50., 50.]]),
groundtruth_classes: np.array([2]),
detection_boxes: np.array([[25., 25., 50., 50.]]),
detection_scores: np.array([.9]),
detection_classes: np.array([2])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_boxes_list)
self.assertFalse(coco_evaluator._image_ids)
def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
self._MatchingGroundtruthAndDetections(coco_evaluator)
# Configured to skip unmatched detector predictions with
# groundtruth_labeled_classes, but reverts to fully-labeled eval since there
# are no groundtruth_labeled_classes set.
def testGetMAPWithSkipUnmatchedPredictionsIgnoreGrountruthLabeledClasses(
self):
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list(), skip_predictions_for_unlabeled_class=True)
self._MatchingGroundtruthAndDetections(coco_evaluator)
# Test skipping unmatched detector predictions with
# groundtruth_labeled_classes.
def testGetMAPWithSkipUnmatchedPredictions(self):
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list(), skip_predictions_for_unlabeled_class=True)
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
groundtruth_labeled_classes = tf.placeholder(tf.float32, shape=(None))
detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key:
image_id,
input_data_fields.groundtruth_boxes:
groundtruth_boxes,
input_data_fields.groundtruth_classes:
groundtruth_classes,
input_data_fields.groundtruth_labeled_classes:
groundtruth_labeled_classes,
detection_fields.detection_boxes:
detection_boxes,
detection_fields.detection_scores:
detection_scores,
detection_fields.detection_classes:
detection_classes
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id:
'image1',
groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
groundtruth_classes:
np.array([1]),
# Only class 1 is exhaustively labeled for image1.
groundtruth_labeled_classes:
np.array([0., 1., 0., 0.]),
detection_boxes:
np.array([[100., 100., 200., 200.], [100., 100., 200.,
200.]]),
detection_scores:
np.array([.8, .95]),
detection_classes:
np.array([1, 2])
})
sess.run(
update_op,
feed_dict={
image_id: 'image2',
groundtruth_boxes: np.array([[50., 50., 100., 100.]]),
groundtruth_classes: np.array([3]),
groundtruth_labeled_classes: np.array([0., 0., 0., 1.]),
detection_boxes: np.array([[50., 50., 100., 100.]]),
detection_scores: np.array([.7]),
detection_classes: np.array([3])
})
sess.run(
update_op,
feed_dict={
image_id: 'image3',
groundtruth_boxes: np.array([[25., 25., 50., 50.]]),
groundtruth_classes: np.array([2]),
groundtruth_labeled_classes: np.array([0., 0., 1., 0.]),
detection_boxes: np.array([[25., 25., 50., 50.]]),
detection_scores: np.array([.9]),
detection_classes: np.array([2])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_boxes_list)
self.assertFalse(coco_evaluator._image_ids)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsIsAnnotated(self):
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
is_annotated = tf.placeholder(tf.bool, shape=())
detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
'is_annotated': is_annotated,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
sess.run(update_op,
feed_dict={
image_id: 'image1',
groundtruth_boxes: np.array([[100., 100., 200., 200.]]),
groundtruth_classes: np.array([1]),
is_annotated: True,
detection_boxes: np.array([[100., 100., 200., 200.]]),
detection_scores: np.array([.8]),
detection_classes: np.array([1])
})
sess.run(update_op,
feed_dict={
image_id: 'image2',
groundtruth_boxes: np.array([[50., 50., 100., 100.]]),
groundtruth_classes: np.array([3]),
is_annotated: True,
detection_boxes: np.array([[50., 50., 100., 100.]]),
detection_scores: np.array([.7]),
detection_classes: np.array([3])
})
sess.run(update_op,
feed_dict={
image_id: 'image3',
groundtruth_boxes: np.array([[25., 25., 50., 50.]]),
groundtruth_classes: np.array([2]),
is_annotated: True,
detection_boxes: np.array([[25., 25., 50., 50.]]),
detection_scores: np.array([.9]),
detection_classes: np.array([2])
})
sess.run(update_op,
feed_dict={
image_id: 'image4',
groundtruth_boxes: np.zeros((0, 4)),
groundtruth_classes: np.zeros((0)),
is_annotated: False, # Note that this image isn't annotated.
detection_boxes: np.array([[25., 25., 50., 50.],
[25., 25., 70., 50.],
[25., 25., 80., 50.],
[25., 25., 90., 50.]]),
detection_scores: np.array([0.6, 0.7, 0.8, 0.9]),
detection_classes: np.array([1, 2, 2, 3])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_boxes_list)
self.assertFalse(coco_evaluator._image_ids)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsPadded(self):
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id:
'image1',
groundtruth_boxes:
np.array([[100., 100., 200., 200.], [-1, -1, -1, -1]]),
groundtruth_classes:
np.array([1, -1]),
detection_boxes:
np.array([[100., 100., 200., 200.], [0., 0., 0., 0.]]),
detection_scores:
np.array([.8, 0.]),
detection_classes:
np.array([1, -1])
})
sess.run(
update_op,
feed_dict={
image_id:
'image2',
groundtruth_boxes:
np.array([[50., 50., 100., 100.], [-1, -1, -1, -1]]),
groundtruth_classes:
np.array([3, -1]),
detection_boxes:
np.array([[50., 50., 100., 100.], [0., 0., 0., 0.]]),
detection_scores:
np.array([.7, 0.]),
detection_classes:
np.array([3, -1])
})
sess.run(
update_op,
feed_dict={
image_id:
'image3',
groundtruth_boxes:
np.array([[25., 25., 50., 50.], [10., 10., 15., 15.]]),
groundtruth_classes:
np.array([2, 2]),
detection_boxes:
np.array([[25., 25., 50., 50.], [10., 10., 15., 15.]]),
detection_scores:
np.array([.95, .9]),
detection_classes:
np.array([2, 2])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 0.83333331)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_boxes_list)
self.assertFalse(coco_evaluator._image_ids)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsBatched(self):
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
batch_size = 3
image_id = tf.placeholder(tf.string, shape=(batch_size))
groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None))
detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
sess.run(update_op,
feed_dict={
image_id: ['image1', 'image2', 'image3'],
groundtruth_boxes: np.array([[[100., 100., 200., 200.]],
[[50., 50., 100., 100.]],
[[25., 25., 50., 50.]]]),
groundtruth_classes: np.array([[1], [3], [2]]),
detection_boxes: np.array([[[100., 100., 200., 200.]],
[[50., 50., 100., 100.]],
[[25., 25., 50., 50.]]]),
detection_scores: np.array([[.8], [.7], [.9]]),
detection_classes: np.array([[1], [3], [2]])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_boxes_list)
self.assertFalse(coco_evaluator._image_ids)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsPaddedBatches(self):
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
batch_size = 3
image_id = tf.placeholder(tf.string, shape=(batch_size))
groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
num_gt_boxes_per_image = tf.placeholder(tf.int32, shape=(None))
detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None))
detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
num_det_boxes_per_image = tf.placeholder(tf.int32, shape=(None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
'num_groundtruth_boxes_per_image': num_gt_boxes_per_image,
'num_det_boxes_per_image': num_det_boxes_per_image
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id: ['image1', 'image2', 'image3'],
groundtruth_boxes:
np.array([[[100., 100., 200., 200.], [-1, -1, -1, -1]],
[[50., 50., 100., 100.], [-1, -1, -1, -1]],
[[25., 25., 50., 50.], [10., 10., 15., 15.]]]),
groundtruth_classes:
np.array([[1, -1], [3, -1], [2, 2]]),
num_gt_boxes_per_image:
np.array([1, 1, 2]),
detection_boxes:
np.array([[[100., 100., 200., 200.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[50., 50., 100., 100.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[25., 25., 50., 50.],
[10., 10., 15., 15.],
[10., 10., 15., 15.]]]),
detection_scores:
np.array([[.8, 0., 0.], [.7, 0., 0.], [.95, .9, 0.9]]),
detection_classes:
np.array([[1, -1, -1], [3, -1, -1], [2, 2, 2]]),
num_det_boxes_per_image:
np.array([1, 1, 3]),
})
# Check the number of bounding boxes added.
self.assertEqual(len(coco_evaluator._groundtruth_list), 4)
self.assertEqual(len(coco_evaluator._detection_boxes_list), 5)
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 0.83333331)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_boxes_list)
self.assertFalse(coco_evaluator._image_ids)
class CocoKeypointEvaluationTest(tf.test.TestCase):
def testGetOneMAPWithMatchingKeypoints(self):
"""Tests that correct mAP for keypoints is calculated."""
category_keypoint_dict = _get_category_keypoints_dict()
coco_evaluator = coco_evaluation.CocoKeypointEvaluator(
category_id=1, category_keypoints=category_keypoint_dict['person'],
class_text='person')
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1]),
standard_fields.InputDataFields.groundtruth_keypoints:
np.array([[[150., 160.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [170., 180.]]]),
standard_fields.InputDataFields.groundtruth_keypoint_visibilities:
np.array([[2, 0, 0, 2]])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_keypoints:
np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]])
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image2',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1]),
standard_fields.InputDataFields.groundtruth_keypoints:
np.array([[[75., 76.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [77., 78.]]]),
standard_fields.InputDataFields.groundtruth_keypoint_visibilities:
np.array([[2, 0, 0, 2]])
})
coco_evaluator.add_single_detected_image_info(
image_id='image2',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_keypoints:
np.array([[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'],
1.0)
def testGroundtruthListValues(self):
category_keypoint_dict = _get_category_keypoints_dict()
coco_evaluator = coco_evaluation.CocoKeypointEvaluator(
category_id=1, category_keypoints=category_keypoint_dict['person'],
class_text='person')
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1]),
standard_fields.InputDataFields.groundtruth_keypoints:
np.array([[[150., 160.], [float('nan'), float('nan')],
[float('nan'), float('nan')], [170., 180.]]]),
standard_fields.InputDataFields.groundtruth_keypoint_visibilities:
np.array([[2, 0, 0, 2]]),
standard_fields.InputDataFields.groundtruth_area: np.array([15.])
})
gt_dict = coco_evaluator._groundtruth_list[0]
self.assertEqual(gt_dict['id'], 1)
self.assertAlmostEqual(gt_dict['bbox'], [100.0, 100.0, 100.0, 100.0])
self.assertAlmostEqual(
gt_dict['keypoints'], [160.0, 150.0, 2, 180.0, 170.0, 2])
self.assertEqual(gt_dict['num_keypoints'], 2)
self.assertAlmostEqual(gt_dict['area'], 15.0)
def testKeypointVisibilitiesAreOptional(self):
"""Tests that evaluator works when visibilities aren't provided."""
category_keypoint_dict = _get_category_keypoints_dict()
coco_evaluator = coco_evaluation.CocoKeypointEvaluator(
category_id=1, category_keypoints=category_keypoint_dict['person'],
class_text='person')
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1]),
standard_fields.InputDataFields.groundtruth_keypoints:
np.array([[[150., 160.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [170., 180.]]])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_keypoints:
np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]])
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image2',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1]),
standard_fields.InputDataFields.groundtruth_keypoints:
np.array([[[75., 76.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [77., 78.]]])
})
coco_evaluator.add_single_detected_image_info(
image_id='image2',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_keypoints:
np.array([[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'],
1.0)
def testFiltersDetectionsFromOtherCategories(self):
"""Tests that the evaluator ignores detections from other categories."""
category_keypoint_dict = _get_category_keypoints_dict()
coco_evaluator = coco_evaluation.CocoKeypointEvaluator(
category_id=2, category_keypoints=category_keypoint_dict['person'],
class_text='dog')
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1]),
standard_fields.InputDataFields.groundtruth_keypoints:
np.array([[[150., 160.], [170., 180.], [110., 120.],
[130., 140.]]]),
standard_fields.InputDataFields.groundtruth_keypoint_visibilities:
np.array([[2, 2, 2, 2]])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.9]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_keypoints:
np.array([[[150., 160.], [170., 180.], [110., 120.],
[130., 140.]]])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/dog'],
-1.0)
def testHandlesUnlabeledKeypointData(self):
"""Tests that the evaluator handles missing keypoints GT."""
category_keypoint_dict = _get_category_keypoints_dict()
coco_evaluator = coco_evaluation.CocoKeypointEvaluator(
category_id=1, category_keypoints=category_keypoint_dict['person'],
class_text='person')
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1]),
standard_fields.InputDataFields.groundtruth_keypoints:
np.array([[[150., 160.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [170., 180.]]]),
standard_fields.InputDataFields.groundtruth_keypoint_visibilities:
np.array([[0, 0, 0, 2]])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_keypoints:
np.array([[[50., 60.], [1., 2.], [3., 4.], [170., 180.]]])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'],
1.0)
def testIgnoresCrowdAnnotations(self):
"""Tests that the evaluator ignores GT marked as crowd."""
category_keypoint_dict = _get_category_keypoints_dict()
coco_evaluator = coco_evaluation.CocoKeypointEvaluator(
category_id=1, category_keypoints=category_keypoint_dict['person'],
class_text='person')
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1]),
standard_fields.InputDataFields.groundtruth_is_crowd:
np.array([1]),
standard_fields.InputDataFields.groundtruth_keypoints:
np.array([[[150., 160.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [170., 180.]]]),
standard_fields.InputDataFields.groundtruth_keypoint_visibilities:
np.array([[2, 0, 0, 2]])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_keypoints:
np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'],
-1.0)
@unittest.skipIf(tf_version.is_tf2(), 'Only Supported in TF1.X')
class CocoKeypointEvaluationPyFuncTest(tf.test.TestCase):
def testGetOneMAPWithMatchingKeypoints(self):
category_keypoint_dict = _get_category_keypoints_dict()
coco_keypoint_evaluator = coco_evaluation.CocoKeypointEvaluator(
category_id=1, category_keypoints=category_keypoint_dict['person'],
class_text='person')
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
groundtruth_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2))
detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
detection_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
input_data_fields.groundtruth_keypoints: groundtruth_keypoints,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_keypoints: detection_keypoints,
}
eval_metric_ops = coco_keypoint_evaluator.get_estimator_eval_metric_ops(
eval_dict)
_, update_op = eval_metric_ops['Keypoints_Precision/mAP ByCategory/person']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id:
'image1',
groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
groundtruth_classes:
np.array([1]),
groundtruth_keypoints:
np.array([[[150., 160.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [170., 180.]]]),
detection_boxes:
np.array([[100., 100., 200., 200.]]),
detection_scores:
np.array([.8]),
detection_classes:
np.array([1]),
detection_keypoints:
np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]])
})
sess.run(
update_op,
feed_dict={
image_id:
'image2',
groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
groundtruth_classes:
np.array([1]),
groundtruth_keypoints:
np.array([[[75., 76.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [77., 78.]]]),
detection_boxes:
np.array([[50., 50., 100., 100.]]),
detection_scores:
np.array([.7]),
detection_classes:
np.array([1]),
detection_keypoints:
np.array([[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'],
1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP@.50IOU ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP@.75IOU ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP (large) ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP (medium) ByCategory/person'], 1.0)
self.assertAlmostEqual(metrics['Keypoints_Recall/AR@1 ByCategory/person'],
1.0)
self.assertAlmostEqual(metrics['Keypoints_Recall/AR@10 ByCategory/person'],
1.0)
self.assertAlmostEqual(metrics['Keypoints_Recall/AR@100 ByCategory/person'],
1.0)
self.assertAlmostEqual(
metrics['Keypoints_Recall/AR@100 (large) ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Recall/AR@100 (medium) ByCategory/person'], 1.0)
self.assertFalse(coco_keypoint_evaluator._groundtruth_list)
self.assertFalse(coco_keypoint_evaluator._detection_boxes_list)
self.assertFalse(coco_keypoint_evaluator._image_ids)
def testGetOneMAPWithMatchingKeypointsAndVisibilities(self):
category_keypoint_dict = _get_category_keypoints_dict()
coco_keypoint_evaluator = coco_evaluation.CocoKeypointEvaluator(
category_id=1, category_keypoints=category_keypoint_dict['person'],
class_text='person')
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
groundtruth_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2))
groundtruth_keypoint_visibilities = tf.placeholder(
tf.float32, shape=(None, 4))
detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
detection_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key:
image_id,
input_data_fields.groundtruth_boxes:
groundtruth_boxes,
input_data_fields.groundtruth_classes:
groundtruth_classes,
input_data_fields.groundtruth_keypoints:
groundtruth_keypoints,
input_data_fields.groundtruth_keypoint_visibilities:
groundtruth_keypoint_visibilities,
detection_fields.detection_boxes:
detection_boxes,
detection_fields.detection_scores:
detection_scores,
detection_fields.detection_classes:
detection_classes,
detection_fields.detection_keypoints:
detection_keypoints,
}
eval_metric_ops = coco_keypoint_evaluator.get_estimator_eval_metric_ops(
eval_dict)
_, update_op = eval_metric_ops['Keypoints_Precision/mAP ByCategory/person']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id:
'image1',
groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
groundtruth_classes:
np.array([1]),
groundtruth_keypoints:
np.array([[[150., 160.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [170., 180.]]]),
groundtruth_keypoint_visibilities:
np.array([[0, 0, 0, 2]]),
detection_boxes:
np.array([[100., 100., 200., 200.]]),
detection_scores:
np.array([.8]),
detection_classes:
np.array([1]),
detection_keypoints:
np.array([[[50., 60.], [1., 2.], [3., 4.], [170., 180.]]])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'],
1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP@.50IOU ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP@.75IOU ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP (large) ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP (medium) ByCategory/person'], -1.0)
self.assertAlmostEqual(metrics['Keypoints_Recall/AR@1 ByCategory/person'],
1.0)
self.assertAlmostEqual(metrics['Keypoints_Recall/AR@10 ByCategory/person'],
1.0)
self.assertAlmostEqual(metrics['Keypoints_Recall/AR@100 ByCategory/person'],
1.0)
self.assertAlmostEqual(
metrics['Keypoints_Recall/AR@100 (large) ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Recall/AR@100 (medium) ByCategory/person'], -1.0)
self.assertFalse(coco_keypoint_evaluator._groundtruth_list)
self.assertFalse(coco_keypoint_evaluator._detection_boxes_list)
self.assertFalse(coco_keypoint_evaluator._image_ids)
def testGetOneMAPWithMatchingKeypointsIsAnnotated(self):
category_keypoint_dict = _get_category_keypoints_dict()
coco_keypoint_evaluator = coco_evaluation.CocoKeypointEvaluator(
category_id=1, category_keypoints=category_keypoint_dict['person'],
class_text='person')
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
groundtruth_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2))
is_annotated = tf.placeholder(tf.bool, shape=())
detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
detection_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
input_data_fields.groundtruth_keypoints: groundtruth_keypoints,
'is_annotated': is_annotated,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_keypoints: detection_keypoints,
}
eval_metric_ops = coco_keypoint_evaluator.get_estimator_eval_metric_ops(
eval_dict)
_, update_op = eval_metric_ops['Keypoints_Precision/mAP ByCategory/person']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id:
'image1',
groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
groundtruth_classes:
np.array([1]),
groundtruth_keypoints:
np.array([[[150., 160.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [170., 180.]]]),
is_annotated:
True,
detection_boxes:
np.array([[100., 100., 200., 200.]]),
detection_scores:
np.array([.8]),
detection_classes:
np.array([1]),
detection_keypoints:
np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]])
})
sess.run(
update_op,
feed_dict={
image_id:
'image2',
groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
groundtruth_classes:
np.array([1]),
groundtruth_keypoints:
np.array([[[75., 76.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [77., 78.]]]),
is_annotated:
True,
detection_boxes:
np.array([[50., 50., 100., 100.]]),
detection_scores:
np.array([.7]),
detection_classes:
np.array([1]),
detection_keypoints:
np.array([[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]])
})
sess.run(
update_op,
feed_dict={
image_id:
'image3',
groundtruth_boxes:
np.zeros((0, 4)),
groundtruth_classes:
np.zeros((0)),
groundtruth_keypoints:
np.zeros((0, 4, 2)),
is_annotated:
False, # Note that this image isn't annotated.
detection_boxes:
np.array([[25., 25., 50., 50.], [25., 25., 70., 50.],
[25., 25., 80., 50.], [25., 25., 90., 50.]]),
detection_scores:
np.array([0.6, 0.7, 0.8, 0.9]),
detection_classes:
np.array([1, 2, 2, 3]),
detection_keypoints:
np.array([[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'],
1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP@.50IOU ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP@.75IOU ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP (large) ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP (medium) ByCategory/person'], 1.0)
self.assertAlmostEqual(metrics['Keypoints_Recall/AR@1 ByCategory/person'],
1.0)
self.assertAlmostEqual(metrics['Keypoints_Recall/AR@10 ByCategory/person'],
1.0)
self.assertAlmostEqual(metrics['Keypoints_Recall/AR@100 ByCategory/person'],
1.0)
self.assertAlmostEqual(
metrics['Keypoints_Recall/AR@100 (large) ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Recall/AR@100 (medium) ByCategory/person'], 1.0)
self.assertFalse(coco_keypoint_evaluator._groundtruth_list)
self.assertFalse(coco_keypoint_evaluator._detection_boxes_list)
self.assertFalse(coco_keypoint_evaluator._image_ids)
def testGetOneMAPWithMatchingKeypointsBatched(self):
category_keypoint_dict = _get_category_keypoints_dict()
coco_keypoint_evaluator = coco_evaluation.CocoKeypointEvaluator(
category_id=1, category_keypoints=category_keypoint_dict['person'],
class_text='person')
batch_size = 2
image_id = tf.placeholder(tf.string, shape=(batch_size))
groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
groundtruth_keypoints = tf.placeholder(
tf.float32, shape=(batch_size, None, 4, 2))
detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None))
detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
detection_keypoints = tf.placeholder(
tf.float32, shape=(batch_size, None, 4, 2))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
input_data_fields.groundtruth_keypoints: groundtruth_keypoints,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_keypoints: detection_keypoints
}
eval_metric_ops = coco_keypoint_evaluator.get_estimator_eval_metric_ops(
eval_dict)
_, update_op = eval_metric_ops['Keypoints_Precision/mAP ByCategory/person']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id: ['image1', 'image2'],
groundtruth_boxes:
np.array([[[100., 100., 200., 200.]], [[50., 50., 100.,
100.]]]),
groundtruth_classes:
np.array([[1], [3]]),
groundtruth_keypoints:
np.array([[[[150., 160.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [170., 180.]]],
[[[75., 76.], [float('nan'),
float('nan')],
[float('nan'), float('nan')], [77., 78.]]]]),
detection_boxes:
np.array([[[100., 100., 200., 200.]], [[50., 50., 100.,
100.]]]),
detection_scores:
np.array([[.8], [.7]]),
detection_classes:
np.array([[1], [3]]),
detection_keypoints:
np.array([[[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]],
[[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]]])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'],
1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP@.50IOU ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP@.75IOU ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP (large) ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Precision/mAP (medium) ByCategory/person'], -1.0)
self.assertAlmostEqual(metrics['Keypoints_Recall/AR@1 ByCategory/person'],
1.0)
self.assertAlmostEqual(metrics['Keypoints_Recall/AR@10 ByCategory/person'],
1.0)
self.assertAlmostEqual(metrics['Keypoints_Recall/AR@100 ByCategory/person'],
1.0)
self.assertAlmostEqual(
metrics['Keypoints_Recall/AR@100 (large) ByCategory/person'], 1.0)
self.assertAlmostEqual(
metrics['Keypoints_Recall/AR@100 (medium) ByCategory/person'], -1.0)
self.assertFalse(coco_keypoint_evaluator._groundtruth_list)
self.assertFalse(coco_keypoint_evaluator._detection_boxes_list)
self.assertFalse(coco_keypoint_evaluator._image_ids)
class CocoMaskEvaluationTest(tf.test.TestCase):
def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list())
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1]),
standard_fields.InputDataFields.groundtruth_instance_masks:
np.pad(np.ones([1, 100, 100], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_masks:
np.pad(np.ones([1, 100, 100], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image2',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1]),
standard_fields.InputDataFields.groundtruth_instance_masks:
np.pad(np.ones([1, 50, 50], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_detected_image_info(
image_id='image2',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_masks:
np.pad(np.ones([1, 50, 50], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image3',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[25., 25., 50., 50.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1]),
standard_fields.InputDataFields.groundtruth_instance_masks:
np.pad(np.ones([1, 25, 25], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_detected_image_info(
image_id='image3',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[25., 25., 50., 50.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_masks:
# The value of 5 is equivalent to 1, since masks will be
# thresholded and binarized before evaluation.
np.pad(5 * np.ones([1, 25, 25], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0)
coco_evaluator.clear()
self.assertFalse(coco_evaluator._image_id_to_mask_shape_map)
self.assertFalse(coco_evaluator._image_ids_with_detections)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_masks_list)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self):
"""Tests computing mAP with is_crowd GT boxes skipped."""
coco_evaluator = coco_evaluation.CocoMaskEvaluator(
_get_categories_list())
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1, 2]),
standard_fields.InputDataFields.groundtruth_is_crowd:
np.array([0, 1]),
standard_fields.InputDataFields.groundtruth_instance_masks:
np.concatenate(
[np.pad(np.ones([1, 100, 100], dtype=np.uint8),
((0, 0), (100, 56), (100, 56)), mode='constant'),
np.pad(np.ones([1, 101, 101], dtype=np.uint8),
((0, 0), (99, 56), (99, 56)), mode='constant')],
axis=0)
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_masks:
np.pad(np.ones([1, 100, 100], dtype=np.uint8),
((0, 0), (100, 56), (100, 56)), mode='constant')
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0)
@unittest.skipIf(tf_version.is_tf2(), 'Only Supported in TF1.X')
class CocoMaskEvaluationPyFuncTest(tf.test.TestCase):
def testAddEvalDict(self):
coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list())
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
groundtruth_masks = tf.placeholder(tf.uint8, shape=(None, None, None))
original_image_spatial_shape = tf.placeholder(tf.int32, shape=(None, 2))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
detection_masks = tf.placeholder(tf.uint8, shape=(None, None, None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
input_data_fields.groundtruth_instance_masks: groundtruth_masks,
input_data_fields.original_image_spatial_shape:
original_image_spatial_shape,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_masks: detection_masks,
}
update_op = coco_evaluator.add_eval_dict(eval_dict)
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id:
'image1',
groundtruth_boxes:
np.array([[100., 100., 200., 200.], [50., 50., 100., 100.]]),
groundtruth_classes:
np.array([1, 2]),
groundtruth_masks:
np.stack([
np.pad(
np.ones([100, 100], dtype=np.uint8), ((10, 10),
(10, 10)),
mode='constant'),
np.pad(
np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)),
mode='constant')
]),
original_image_spatial_shape: np.array([[120, 120]]),
detection_scores:
np.array([.9, .8]),
detection_classes:
np.array([2, 1]),
detection_masks:
np.stack([
np.pad(
np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)),
mode='constant'),
np.pad(
np.ones([100, 100], dtype=np.uint8), ((10, 10),
(10, 10)),
mode='constant'),
])
})
self.assertLen(coco_evaluator._groundtruth_list, 2)
self.assertLen(coco_evaluator._detection_masks_list, 2)
def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list())
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
groundtruth_masks = tf.placeholder(tf.uint8, shape=(None, None, None))
original_image_spatial_shape = tf.placeholder(tf.int32, shape=(None, 2))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
detection_masks = tf.placeholder(tf.uint8, shape=(None, None, None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
input_data_fields.groundtruth_instance_masks: groundtruth_masks,
input_data_fields.original_image_spatial_shape:
original_image_spatial_shape,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_masks: detection_masks,
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id:
'image1',
groundtruth_boxes:
np.array([[100., 100., 200., 200.], [50., 50., 100., 100.]]),
groundtruth_classes:
np.array([1, 2]),
groundtruth_masks:
np.stack([
np.pad(
np.ones([100, 100], dtype=np.uint8), ((10, 10),
(10, 10)),
mode='constant'),
np.pad(
np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)),
mode='constant')
]),
original_image_spatial_shape: np.array([[120, 120], [120, 120]]),
detection_scores:
np.array([.9, .8]),
detection_classes:
np.array([2, 1]),
detection_masks:
np.stack([
np.pad(
np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)),
mode='constant'),
np.pad(
np.ones([100, 100], dtype=np.uint8), ((10, 10),
(10, 10)),
mode='constant'),
])
})
sess.run(update_op,
feed_dict={
image_id: 'image2',
groundtruth_boxes: np.array([[50., 50., 100., 100.]]),
groundtruth_classes: np.array([1]),
groundtruth_masks: np.pad(np.ones([1, 50, 50],
dtype=np.uint8),
((0, 0), (10, 10), (10, 10)),
mode='constant'),
original_image_spatial_shape: np.array([[70, 70]]),
detection_scores: np.array([.8]),
detection_classes: np.array([1]),
detection_masks: np.pad(np.ones([1, 50, 50], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)),
mode='constant')
})
sess.run(update_op,
feed_dict={
image_id: 'image3',
groundtruth_boxes: np.array([[25., 25., 50., 50.]]),
groundtruth_classes: np.array([1]),
groundtruth_masks: np.pad(np.ones([1, 25, 25],
dtype=np.uint8),
((0, 0), (10, 10), (10, 10)),
mode='constant'),
original_image_spatial_shape: np.array([[45, 45]]),
detection_scores: np.array([.8]),
detection_classes: np.array([1]),
detection_masks: np.pad(np.ones([1, 25, 25],
dtype=np.uint8),
((0, 0), (10, 10), (10, 10)),
mode='constant')
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP@.50IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP@.75IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@1'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._image_ids_with_detections)
self.assertFalse(coco_evaluator._image_id_to_mask_shape_map)
self.assertFalse(coco_evaluator._detection_masks_list)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsBatched(self):
coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list())
batch_size = 3
image_id = tf.placeholder(tf.string, shape=(batch_size))
groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
groundtruth_masks = tf.placeholder(
tf.uint8, shape=(batch_size, None, None, None))
original_image_spatial_shape = tf.placeholder(tf.int32, shape=(None, 2))
detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None))
detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
detection_masks = tf.placeholder(
tf.uint8, shape=(batch_size, None, None, None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
input_data_fields.groundtruth_instance_masks: groundtruth_masks,
input_data_fields.original_image_spatial_shape:
original_image_spatial_shape,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_masks: detection_masks,
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id: ['image1', 'image2', 'image3'],
groundtruth_boxes:
np.array([[[100., 100., 200., 200.]],
[[50., 50., 100., 100.]],
[[25., 25., 50., 50.]]]),
groundtruth_classes:
np.array([[1], [1], [1]]),
groundtruth_masks:
np.stack([
np.pad(
np.ones([1, 100, 100], dtype=np.uint8),
((0, 0), (0, 0), (0, 0)),
mode='constant'),
np.pad(
np.ones([1, 50, 50], dtype=np.uint8),
((0, 0), (25, 25), (25, 25)),
mode='constant'),
np.pad(
np.ones([1, 25, 25], dtype=np.uint8),
((0, 0), (37, 38), (37, 38)),
mode='constant')
],
axis=0),
original_image_spatial_shape: np.array(
[[100, 100], [100, 100], [100, 100]]),
detection_scores:
np.array([[.8], [.8], [.8]]),
detection_classes:
np.array([[1], [1], [1]]),
detection_masks:
np.stack([
np.pad(
np.ones([1, 100, 100], dtype=np.uint8),
((0, 0), (0, 0), (0, 0)),
mode='constant'),
np.pad(
np.ones([1, 50, 50], dtype=np.uint8),
((0, 0), (25, 25), (25, 25)),
mode='constant'),
np.pad(
np.ones([1, 25, 25], dtype=np.uint8),
((0, 0), (37, 38), (37, 38)),
mode='constant')
],
axis=0)
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP@.50IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP@.75IOU'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@1'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._image_ids_with_detections)
self.assertFalse(coco_evaluator._image_id_to_mask_shape_map)
self.assertFalse(coco_evaluator._detection_masks_list)
def _get_panoptic_test_data():
# image1 contains 3 people in gt, (2 normal annotation and 1 "is_crowd"
# annotation), and 3 people in prediction.
gt_masks1 = np.zeros((3, 50, 50), dtype=np.uint8)
result_masks1 = np.zeros((3, 50, 50), dtype=np.uint8)
gt_masks1[0, 10:20, 20:30] = 1
result_masks1[0, 10:18, 20:30] = 1
gt_masks1[1, 25:30, 25:35] = 1
result_masks1[1, 18:25, 25:30] = 1
gt_masks1[2, 40:50, 40:50] = 1
result_masks1[2, 47:50, 47:50] = 1
gt_class1 = np.array([1, 1, 1])
gt_is_crowd1 = np.array([0, 0, 1])
result_class1 = np.array([1, 1, 1])
# image2 contains 1 dog and 1 cat in gt, while 1 person and 1 dog in
# prediction.
gt_masks2 = np.zeros((2, 30, 40), dtype=np.uint8)
result_masks2 = np.zeros((2, 30, 40), dtype=np.uint8)
gt_masks2[0, 5:15, 20:35] = 1
gt_masks2[1, 20:30, 0:10] = 1
result_masks2[0, 20:25, 10:15] = 1
result_masks2[1, 6:15, 15:35] = 1
gt_class2 = np.array([2, 3])
gt_is_crowd2 = np.array([0, 0])
result_class2 = np.array([1, 2])
gt_class = [gt_class1, gt_class2]
gt_masks = [gt_masks1, gt_masks2]
gt_is_crowd = [gt_is_crowd1, gt_is_crowd2]
result_class = [result_class1, result_class2]
result_masks = [result_masks1, result_masks2]
return gt_class, gt_masks, gt_is_crowd, result_class, result_masks
class CocoPanopticEvaluationTest(tf.test.TestCase):
def test_panoptic_quality(self):
pq_evaluator = coco_evaluation.CocoPanopticSegmentationEvaluator(
_get_categories_list(), include_metrics_per_category=True)
(gt_class, gt_masks, gt_is_crowd, result_class,
result_masks) = _get_panoptic_test_data()
for i in range(2):
pq_evaluator.add_single_ground_truth_image_info(
image_id='image%d' % i,
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_classes:
gt_class[i],
standard_fields.InputDataFields.groundtruth_instance_masks:
gt_masks[i],
standard_fields.InputDataFields.groundtruth_is_crowd:
gt_is_crowd[i]
})
pq_evaluator.add_single_detected_image_info(
image_id='image%d' % i,
detections_dict={
standard_fields.DetectionResultFields.detection_classes:
result_class[i],
standard_fields.DetectionResultFields.detection_masks:
result_masks[i]
})
metrics = pq_evaluator.evaluate()
self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU_ByCategory/person'],
0.32)
self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU_ByCategory/dog'],
135.0 / 195)
self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU_ByCategory/cat'], 0)
self.assertAlmostEqual(metrics['SegmentationQuality@0.50IOU'],
(0.8 + 135.0 / 195) / 3)
self.assertAlmostEqual(metrics['RecognitionQuality@0.50IOU'], (0.4 + 1) / 3)
self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU'],
(0.32 + 135.0 / 195) / 3)
self.assertEqual(metrics['NumValidClasses'], 3)
self.assertEqual(metrics['NumTotalClasses'], 3)
@unittest.skipIf(tf_version.is_tf2(), 'Only Supported in TF1.X')
class CocoPanopticEvaluationPyFuncTest(tf.test.TestCase):
def testPanopticQualityNoBatch(self):
pq_evaluator = coco_evaluation.CocoPanopticSegmentationEvaluator(
_get_categories_list(), include_metrics_per_category=True)
image_id = tf.placeholder(tf.string, shape=())
groundtruth_classes = tf.placeholder(tf.int32, shape=(None))
groundtruth_masks = tf.placeholder(tf.uint8, shape=(None, None, None))
groundtruth_is_crowd = tf.placeholder(tf.int32, shape=(None))
detection_classes = tf.placeholder(tf.int32, shape=(None))
detection_masks = tf.placeholder(tf.uint8, shape=(None, None, None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_classes: groundtruth_classes,
input_data_fields.groundtruth_instance_masks: groundtruth_masks,
input_data_fields.groundtruth_is_crowd: groundtruth_is_crowd,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_masks: detection_masks,
}
eval_metric_ops = pq_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['PanopticQuality@0.50IOU']
(gt_class, gt_masks, gt_is_crowd, result_class,
result_masks) = _get_panoptic_test_data()
with self.test_session() as sess:
for i in range(2):
sess.run(
update_op,
feed_dict={
image_id: 'image%d' % i,
groundtruth_classes: gt_class[i],
groundtruth_masks: gt_masks[i],
groundtruth_is_crowd: gt_is_crowd[i],
detection_classes: result_class[i],
detection_masks: result_masks[i]
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU'],
(0.32 + 135.0 / 195) / 3)
def testPanopticQualityBatched(self):
pq_evaluator = coco_evaluation.CocoPanopticSegmentationEvaluator(
_get_categories_list(), include_metrics_per_category=True)
batch_size = 2
image_id = tf.placeholder(tf.string, shape=(batch_size))
groundtruth_classes = tf.placeholder(tf.int32, shape=(batch_size, None))
groundtruth_masks = tf.placeholder(
tf.uint8, shape=(batch_size, None, None, None))
groundtruth_is_crowd = tf.placeholder(tf.int32, shape=(batch_size, None))
detection_classes = tf.placeholder(tf.int32, shape=(batch_size, None))
detection_masks = tf.placeholder(
tf.uint8, shape=(batch_size, None, None, None))
num_gt_masks_per_image = tf.placeholder(tf.int32, shape=(batch_size))
num_det_masks_per_image = tf.placeholder(tf.int32, shape=(batch_size))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_classes: groundtruth_classes,
input_data_fields.groundtruth_instance_masks: groundtruth_masks,
input_data_fields.groundtruth_is_crowd: groundtruth_is_crowd,
input_data_fields.num_groundtruth_boxes: num_gt_masks_per_image,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_masks: detection_masks,
detection_fields.num_detections: num_det_masks_per_image,
}
eval_metric_ops = pq_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['PanopticQuality@0.50IOU']
(gt_class, gt_masks, gt_is_crowd, result_class,
result_masks) = _get_panoptic_test_data()
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id: ['image0', 'image1'],
groundtruth_classes:
np.stack([
gt_class[0],
np.pad(gt_class[1], (0, 1), mode='constant')
],
axis=0),
groundtruth_masks:
np.stack([
np.pad(
gt_masks[0], ((0, 0), (0, 10), (0, 10)),
mode='constant'),
np.pad(
gt_masks[1], ((0, 1), (0, 30), (0, 20)),
mode='constant'),
],
axis=0),
groundtruth_is_crowd:
np.stack([
gt_is_crowd[0],
np.pad(gt_is_crowd[1], (0, 1), mode='constant')
],
axis=0),
num_gt_masks_per_image: np.array([3, 2]),
detection_classes:
np.stack([
result_class[0],
np.pad(result_class[1], (0, 1), mode='constant')
],
axis=0),
detection_masks:
np.stack([
np.pad(
result_masks[0], ((0, 0), (0, 10), (0, 10)),
mode='constant'),
np.pad(
result_masks[1], ((0, 1), (0, 30), (0, 20)),
mode='constant'),
],
axis=0),
num_det_masks_per_image: np.array([3, 2]),
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.items():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU'],
(0.32 + 135.0 / 195) / 3)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/coco_evaluation_test.py | coco_evaluation_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts data from CSV format to the VRDDetectionEvaluator format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from object_detection.core import standard_fields
from object_detection.utils import vrd_evaluation
def build_groundtruth_vrd_dictionary(data, class_label_map,
relationship_label_map):
"""Builds a groundtruth dictionary from groundtruth data in CSV file.
Args:
data: Pandas DataFrame with the groundtruth data for a single image.
class_label_map: Class labelmap from string label name to an integer.
relationship_label_map: Relationship type labelmap from string name to an
integer.
Returns:
A dictionary with keys suitable for passing to
VRDDetectionEvaluator.add_single_ground_truth_image_info:
standard_fields.InputDataFields.groundtruth_boxes: A numpy array
of structures with the shape [M, 1], representing M tuples, each tuple
containing the same number of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max] (see
datatype vrd_box_data_type, single_box_data_type above).
standard_fields.InputDataFields.groundtruth_classes: A numpy array of
structures shape [M, 1], representing the class labels of the
corresponding bounding boxes and possibly additional classes (see
datatype label_data_type above).
standard_fields.InputDataFields.verified_labels: numpy array
of shape [K] containing verified labels.
"""
data_boxes = data[data.LabelName.isnull()]
data_labels = data[data.LabelName1.isnull()]
boxes = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.vrd_box_data_type)
boxes['subject'] = data_boxes[['YMin1', 'XMin1', 'YMax1',
'XMax1']].to_numpy()
boxes['object'] = data_boxes[['YMin2', 'XMin2', 'YMax2', 'XMax2']].to_numpy()
labels = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.label_data_type)
labels['subject'] = data_boxes['LabelName1'].map(
lambda x: class_label_map[x]).to_numpy()
labels['object'] = data_boxes['LabelName2'].map(
lambda x: class_label_map[x]).to_numpy()
labels['relation'] = data_boxes['RelationshipLabel'].map(
lambda x: relationship_label_map[x]).to_numpy()
return {
standard_fields.InputDataFields.groundtruth_boxes:
boxes,
standard_fields.InputDataFields.groundtruth_classes:
labels,
standard_fields.InputDataFields.groundtruth_image_classes:
data_labels['LabelName'].map(lambda x: class_label_map[x])
.to_numpy(),
}
def build_predictions_vrd_dictionary(data, class_label_map,
relationship_label_map):
"""Builds a predictions dictionary from predictions data in CSV file.
Args:
data: Pandas DataFrame with the predictions data for a single image.
class_label_map: Class labelmap from string label name to an integer.
relationship_label_map: Relationship type labelmap from string name to an
integer.
Returns:
Dictionary with keys suitable for passing to
VRDDetectionEvaluator.add_single_detected_image_info:
standard_fields.DetectionResultFields.detection_boxes: A numpy array of
structures with shape [N, 1], representing N tuples, each tuple
containing the same number of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max] (as an example
see datatype vrd_box_data_type, single_box_data_type above).
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [N] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: A numpy array
of structures shape [N, 1], representing the class labels of the
corresponding bounding boxes and possibly additional classes (see
datatype label_data_type above).
"""
data_boxes = data
boxes = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.vrd_box_data_type)
boxes['subject'] = data_boxes[['YMin1', 'XMin1', 'YMax1',
'XMax1']].to_numpy()
boxes['object'] = data_boxes[['YMin2', 'XMin2', 'YMax2', 'XMax2']].to_numpy()
labels = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.label_data_type)
labels['subject'] = data_boxes['LabelName1'].map(
lambda x: class_label_map[x]).to_numpy()
labels['object'] = data_boxes['LabelName2'].map(
lambda x: class_label_map[x]).to_numpy()
labels['relation'] = data_boxes['RelationshipLabel'].map(
lambda x: relationship_label_map[x]).to_numpy()
return {
standard_fields.DetectionResultFields.detection_boxes:
boxes,
standard_fields.DetectionResultFields.detection_classes:
labels,
standard_fields.DetectionResultFields.detection_scores:
data_boxes['Score'].to_numpy()
}
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/oid_vrd_challenge_evaluation_utils.py | oid_vrd_challenge_evaluation_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for evaluating object detections with COCO metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.metrics import coco_tools
from object_detection.utils import json_utils
from object_detection.utils import np_mask_ops
from object_detection.utils import object_detection_evaluation
class CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator):
"""Class to evaluate COCO detection metrics."""
def __init__(self,
categories,
include_metrics_per_category=False,
all_metrics_per_category=False,
skip_predictions_for_unlabeled_class=False,
super_categories=None):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
include_metrics_per_category: If True, include metrics for each category.
all_metrics_per_category: Whether to include all the summary metrics for
each category in per_category_ap. Be careful with setting it to true if
you have more than handful of categories, because it will pollute
your mldash.
skip_predictions_for_unlabeled_class: Skip predictions that do not match
with the labeled classes for the image.
super_categories: None or a python dict mapping super-category names
(strings) to lists of categories (corresponding to category names
in the label_map). Metrics are aggregated along these super-categories
and added to the `per_category_ap` and are associated with the name
`PerformanceBySuperCategory/<super-category-name>`.
"""
super(CocoDetectionEvaluator, self).__init__(categories)
# _image_ids is a dictionary that maps unique image ids to Booleans which
# indicate whether a corresponding detection has been added.
self._image_ids = {}
self._groundtruth_list = []
self._detection_boxes_list = []
self._category_id_set = set([cat['id'] for cat in self._categories])
self._annotation_id = 1
self._metrics = None
self._include_metrics_per_category = include_metrics_per_category
self._all_metrics_per_category = all_metrics_per_category
self._skip_predictions_for_unlabeled_class = skip_predictions_for_unlabeled_class
self._groundtruth_labeled_classes = {}
self._super_categories = super_categories
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._image_ids.clear()
self._groundtruth_list = []
self._detection_boxes_list = []
def add_single_ground_truth_image_info(self,
image_id,
groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
If the image has already been added, a warning is logged, and groundtruth is
ignored.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
InputDataFields.groundtruth_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_boxes] containing 1-indexed groundtruth classes for the boxes.
InputDataFields.groundtruth_is_crowd (optional): integer numpy array of
shape [num_boxes] containing iscrowd flag for groundtruth boxes.
InputDataFields.groundtruth_area (optional): float numpy array of
shape [num_boxes] containing the area (in the original absolute
coordinates) of the annotated object.
InputDataFields.groundtruth_keypoints (optional): float numpy array of
keypoints with shape [num_boxes, num_keypoints, 2].
InputDataFields.groundtruth_keypoint_visibilities (optional): integer
numpy array of keypoint visibilities with shape [num_gt_boxes,
num_keypoints]. Integer is treated as an enum with 0=not labeled,
1=labeled but not visible and 2=labeled and visible.
InputDataFields.groundtruth_labeled_classes (optional): a tensor of
shape [num_classes + 1] containing the multi-hot tensor indicating the
classes that each image is labeled for. Note that the classes labels
are 1-indexed.
"""
if image_id in self._image_ids:
tf.logging.warning('Ignoring ground truth with image id %s since it was '
'previously added', image_id)
return
# Drop optional fields if empty tensor.
groundtruth_is_crowd = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_is_crowd)
groundtruth_area = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_area)
groundtruth_keypoints = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_keypoints)
groundtruth_keypoint_visibilities = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_keypoint_visibilities)
if groundtruth_is_crowd is not None and not groundtruth_is_crowd.shape[0]:
groundtruth_is_crowd = None
if groundtruth_area is not None and not groundtruth_area.shape[0]:
groundtruth_area = None
if groundtruth_keypoints is not None and not groundtruth_keypoints.shape[0]:
groundtruth_keypoints = None
if groundtruth_keypoint_visibilities is not None and not groundtruth_keypoint_visibilities.shape[
0]:
groundtruth_keypoint_visibilities = None
self._groundtruth_list.extend(
coco_tools.ExportSingleImageGroundtruthToCoco(
image_id=image_id,
next_annotation_id=self._annotation_id,
category_id_set=self._category_id_set,
groundtruth_boxes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_classes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_classes],
groundtruth_is_crowd=groundtruth_is_crowd,
groundtruth_area=groundtruth_area,
groundtruth_keypoints=groundtruth_keypoints,
groundtruth_keypoint_visibilities=groundtruth_keypoint_visibilities)
)
self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes].shape[0]
if (standard_fields.InputDataFields.groundtruth_labeled_classes
) in groundtruth_dict:
labeled_classes = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_labeled_classes]
if labeled_classes.shape != (len(self._category_id_set) + 1,):
raise ValueError('Invalid shape for groundtruth labeled classes: {}, '
'num_categories_including_background: {}'.format(
labeled_classes,
len(self._category_id_set) + 1))
self._groundtruth_labeled_classes[image_id] = np.flatnonzero(
groundtruth_dict[standard_fields.InputDataFields
.groundtruth_labeled_classes] == 1).tolist()
# Boolean to indicate whether a detection has been added for this image.
self._image_ids[image_id] = False
def add_single_detected_image_info(self,
image_id,
detections_dict):
"""Adds detections for a single image to be used for evaluation.
If a detection has already been added for this image id, a warning is
logged, and the detection is skipped.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
DetectionResultFields.detection_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` detection boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
DetectionResultFields.detection_scores: float32 numpy array of shape
[num_boxes] containing detection scores for the boxes.
DetectionResultFields.detection_classes: integer numpy array of shape
[num_boxes] containing 1-indexed detection classes for the boxes.
DetectionResultFields.detection_keypoints (optional): float numpy array
of keypoints with shape [num_boxes, num_keypoints, 2].
Raises:
ValueError: If groundtruth for the image_id is not available.
"""
if image_id not in self._image_ids:
raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
if self._image_ids[image_id]:
tf.logging.warning('Ignoring detection with image id %s since it was '
'previously added', image_id)
return
# Drop optional fields if empty tensor.
detection_keypoints = detections_dict.get(
standard_fields.DetectionResultFields.detection_keypoints)
if detection_keypoints is not None and not detection_keypoints.shape[0]:
detection_keypoints = None
if self._skip_predictions_for_unlabeled_class:
det_classes = detections_dict[
standard_fields.DetectionResultFields.detection_classes]
num_det_boxes = det_classes.shape[0]
keep_box_ids = []
for box_id in range(num_det_boxes):
if det_classes[box_id] in self._groundtruth_labeled_classes[image_id]:
keep_box_ids.append(box_id)
self._detection_boxes_list.extend(
coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id=image_id,
category_id_set=self._category_id_set,
detection_boxes=detections_dict[
standard_fields.DetectionResultFields.detection_boxes]
[keep_box_ids],
detection_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores]
[keep_box_ids],
detection_classes=detections_dict[
standard_fields.DetectionResultFields.detection_classes]
[keep_box_ids],
detection_keypoints=detection_keypoints))
else:
self._detection_boxes_list.extend(
coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id=image_id,
category_id_set=self._category_id_set,
detection_boxes=detections_dict[
standard_fields.DetectionResultFields.detection_boxes],
detection_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores],
detection_classes=detections_dict[
standard_fields.DetectionResultFields.detection_classes],
detection_keypoints=detection_keypoints))
self._image_ids[image_id] = True
def dump_detections_to_json_file(self, json_output_path):
"""Saves the detections into json_output_path in the format used by MS COCO.
Args:
json_output_path: String containing the output file's path. It can be also
None. In that case nothing will be written to the output file.
"""
if json_output_path and json_output_path is not None:
with tf.gfile.GFile(json_output_path, 'w') as fid:
tf.logging.info('Dumping detections to output json file.')
json_utils.Dump(
obj=self._detection_boxes_list, fid=fid, float_digits=4, indent=2)
def evaluate(self):
"""Evaluates the detection boxes and returns a dictionary of coco metrics.
Returns:
A dictionary holding -
1. summary_metrics:
'DetectionBoxes_Precision/mAP': mean average precision over classes
averaged over IOU thresholds ranging from .5 to .95 with .05
increments.
'DetectionBoxes_Precision/mAP@.50IOU': mean average precision at 50% IOU
'DetectionBoxes_Precision/mAP@.75IOU': mean average precision at 75% IOU
'DetectionBoxes_Precision/mAP (small)': mean average precision for small
objects (area < 32^2 pixels).
'DetectionBoxes_Precision/mAP (medium)': mean average precision for
medium sized objects (32^2 pixels < area < 96^2 pixels).
'DetectionBoxes_Precision/mAP (large)': mean average precision for large
objects (96^2 pixels < area < 10000^2 pixels).
'DetectionBoxes_Recall/AR@1': average recall with 1 detection.
'DetectionBoxes_Recall/AR@10': average recall with 10 detections.
'DetectionBoxes_Recall/AR@100': average recall with 100 detections.
'DetectionBoxes_Recall/AR@100 (small)': average recall for small objects
with 100.
'DetectionBoxes_Recall/AR@100 (medium)': average recall for medium objects
with 100.
'DetectionBoxes_Recall/AR@100 (large)': average recall for large objects
with 100 detections.
2. per_category_ap: if include_metrics_per_category is True, category
specific results with keys of the form:
'Precision mAP ByCategory/category' (without the supercategory part if
no supercategories exist). For backward compatibility
'PerformanceByCategory' is included in the output regardless of
all_metrics_per_category.
If super_categories are provided, then this will additionally include
metrics aggregated along the super_categories with keys of the form:
`PerformanceBySuperCategory/<super-category-name>`
"""
tf.logging.info('Performing evaluation on %d images.', len(self._image_ids))
groundtruth_dict = {
'annotations': self._groundtruth_list,
'images': [{'id': image_id} for image_id in self._image_ids],
'categories': self._categories
}
coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(
self._detection_boxes_list)
box_evaluator = coco_tools.COCOEvalWrapper(
coco_wrapped_groundtruth, coco_wrapped_detections, agnostic_mode=False)
box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics(
include_metrics_per_category=self._include_metrics_per_category,
all_metrics_per_category=self._all_metrics_per_category,
super_categories=self._super_categories)
box_metrics.update(box_per_category_ap)
box_metrics = {'DetectionBoxes_'+ key: value
for key, value in iter(box_metrics.items())}
return box_metrics
def add_eval_dict(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
def update_op(image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched, groundtruth_is_crowd_batched,
groundtruth_labeled_classes_batched, num_gt_boxes_per_image,
detection_boxes_batched, detection_scores_batched,
detection_classes_batched, num_det_boxes_per_image,
is_annotated_batched):
"""Update operation for adding batch of images to Coco evaluator."""
for (image_id, gt_box, gt_class, gt_is_crowd, gt_labeled_classes,
num_gt_box, det_box, det_score, det_class,
num_det_box, is_annotated) in zip(
image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched, groundtruth_is_crowd_batched,
groundtruth_labeled_classes_batched, num_gt_boxes_per_image,
detection_boxes_batched, detection_scores_batched,
detection_classes_batched, num_det_boxes_per_image,
is_annotated_batched):
if is_annotated:
self.add_single_ground_truth_image_info(
image_id, {
'groundtruth_boxes': gt_box[:num_gt_box],
'groundtruth_classes': gt_class[:num_gt_box],
'groundtruth_is_crowd': gt_is_crowd[:num_gt_box],
'groundtruth_labeled_classes': gt_labeled_classes
})
self.add_single_detected_image_info(
image_id,
{'detection_boxes': det_box[:num_det_box],
'detection_scores': det_score[:num_det_box],
'detection_classes': det_class[:num_det_box]})
# Unpack items from the evaluation dictionary.
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_is_crowd = eval_dict.get(
input_data_fields.groundtruth_is_crowd, None)
groundtruth_labeled_classes = eval_dict.get(
input_data_fields.groundtruth_labeled_classes, None)
detection_boxes = eval_dict[detection_fields.detection_boxes]
detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes]
num_gt_boxes_per_image = eval_dict.get(
input_data_fields.num_groundtruth_boxes, None)
num_det_boxes_per_image = eval_dict.get(detection_fields.num_detections,
None)
is_annotated = eval_dict.get('is_annotated', None)
if groundtruth_is_crowd is None:
groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
# If groundtruth_labeled_classes is not provided, make it equal to the
# detection_classes. This assumes that all predictions will be kept to
# compute eval metrics.
if groundtruth_labeled_classes is None:
groundtruth_labeled_classes = tf.reduce_max(
tf.one_hot(
tf.cast(detection_classes, tf.int32),
len(self._category_id_set) + 1),
axis=-2)
if not image_id.shape.as_list():
# Apply a batch dimension to all tensors.
image_id = tf.expand_dims(image_id, 0)
groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
groundtruth_labeled_classes = tf.expand_dims(groundtruth_labeled_classes,
0)
detection_boxes = tf.expand_dims(detection_boxes, 0)
detection_scores = tf.expand_dims(detection_scores, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
else:
num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.shape(detection_boxes)[1:2]
else:
num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
if is_annotated is None:
is_annotated = tf.constant([True])
else:
is_annotated = tf.expand_dims(is_annotated, 0)
else:
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.tile(
tf.shape(groundtruth_boxes)[1:2],
multiples=tf.shape(groundtruth_boxes)[0:1])
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.tile(
tf.shape(detection_boxes)[1:2],
multiples=tf.shape(detection_boxes)[0:1])
if is_annotated is None:
is_annotated = tf.ones_like(image_id, dtype=tf.bool)
return tf.py_func(update_op, [
image_id, groundtruth_boxes, groundtruth_classes, groundtruth_is_crowd,
groundtruth_labeled_classes, num_gt_boxes_per_image, detection_boxes,
detection_scores, detection_classes, num_det_boxes_per_image,
is_annotated
], [])
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns a dictionary of eval metric ops.
Note that once value_op is called, the detections and groundtruth added via
update_op are cleared.
This function can take in groundtruth and detections for a batch of images,
or for a single image. For the latter case, the batch dimension for input
tensors need not be present.
Args:
eval_dict: A dictionary that holds tensors for evaluating object detection
performance. For single-image evaluation, this dictionary may be
produced from eval_util.result_dict_for_single_example(). If multi-image
evaluation, `eval_dict` should contain the fields
'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to
properly unpad the tensors from the batch.
Returns:
a dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all
update ops must be run together and similarly all value ops must be run
together to guarantee correct behaviour.
"""
update_op = self.add_eval_dict(eval_dict)
metric_names = ['DetectionBoxes_Precision/mAP',
'DetectionBoxes_Precision/mAP@.50IOU',
'DetectionBoxes_Precision/mAP@.75IOU',
'DetectionBoxes_Precision/mAP (large)',
'DetectionBoxes_Precision/mAP (medium)',
'DetectionBoxes_Precision/mAP (small)',
'DetectionBoxes_Recall/AR@1',
'DetectionBoxes_Recall/AR@10',
'DetectionBoxes_Recall/AR@100',
'DetectionBoxes_Recall/AR@100 (large)',
'DetectionBoxes_Recall/AR@100 (medium)',
'DetectionBoxes_Recall/AR@100 (small)']
if self._include_metrics_per_category:
for category_dict in self._categories:
metric_names.append('DetectionBoxes_PerformanceByCategory/mAP/' +
category_dict['name'])
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
def convert_masks_to_binary(masks):
"""Converts masks to 0 or 1 and uint8 type."""
return (masks > 0).astype(np.uint8)
class CocoKeypointEvaluator(CocoDetectionEvaluator):
"""Class to evaluate COCO keypoint metrics."""
def __init__(self,
category_id,
category_keypoints,
class_text,
oks_sigmas=None):
"""Constructor.
Args:
category_id: An integer id uniquely identifying this category.
category_keypoints: A list specifying keypoint mappings, with items:
'id': (required) an integer id identifying the keypoint.
'name': (required) a string representing the keypoint name.
class_text: A string representing the category name for which keypoint
metrics are to be computed.
oks_sigmas: A dict of keypoint name to standard deviation values for OKS
metrics. If not provided, default value of 0.05 will be used.
"""
self._category_id = category_id
self._category_name = class_text
self._keypoint_ids = sorted(
[keypoint['id'] for keypoint in category_keypoints])
kpt_id_to_name = {kpt['id']: kpt['name'] for kpt in category_keypoints}
if oks_sigmas:
self._oks_sigmas = np.array([
oks_sigmas[kpt_id_to_name[idx]] for idx in self._keypoint_ids
])
else:
# Default all per-keypoint sigmas to 0.
self._oks_sigmas = np.full((len(self._keypoint_ids)), 0.05)
tf.logging.warning('No default keypoint OKS sigmas provided. Will use '
'0.05')
tf.logging.info('Using the following keypoint OKS sigmas: {}'.format(
self._oks_sigmas))
self._metrics = None
super(CocoKeypointEvaluator, self).__init__([{
'id': self._category_id,
'name': class_text
}])
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image with keypoints.
If the image has already been added, a warning is logged, and groundtruth
is ignored.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
InputDataFields.groundtruth_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_boxes] containing 1-indexed groundtruth classes for the boxes.
InputDataFields.groundtruth_is_crowd (optional): integer numpy array of
shape [num_boxes] containing iscrowd flag for groundtruth boxes.
InputDataFields.groundtruth_area (optional): float numpy array of
shape [num_boxes] containing the area (in the original absolute
coordinates) of the annotated object.
InputDataFields.groundtruth_keypoints: float numpy array of
keypoints with shape [num_boxes, num_keypoints, 2].
InputDataFields.groundtruth_keypoint_visibilities (optional): integer
numpy array of keypoint visibilities with shape [num_gt_boxes,
num_keypoints]. Integer is treated as an enum with 0=not labels,
1=labeled but not visible and 2=labeled and visible.
"""
# Keep only the groundtruth for our category and its keypoints.
groundtruth_classes = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_classes]
groundtruth_boxes = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes]
groundtruth_keypoints = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_keypoints]
class_indices = [
idx for idx, gt_class_id in enumerate(groundtruth_classes)
if gt_class_id == self._category_id
]
filtered_groundtruth_classes = np.take(
groundtruth_classes, class_indices, axis=0)
filtered_groundtruth_boxes = np.take(
groundtruth_boxes, class_indices, axis=0)
filtered_groundtruth_keypoints = np.take(
groundtruth_keypoints, class_indices, axis=0)
filtered_groundtruth_keypoints = np.take(
filtered_groundtruth_keypoints, self._keypoint_ids, axis=1)
filtered_groundtruth_dict = {}
filtered_groundtruth_dict[
standard_fields.InputDataFields
.groundtruth_classes] = filtered_groundtruth_classes
filtered_groundtruth_dict[standard_fields.InputDataFields
.groundtruth_boxes] = filtered_groundtruth_boxes
filtered_groundtruth_dict[
standard_fields.InputDataFields
.groundtruth_keypoints] = filtered_groundtruth_keypoints
if (standard_fields.InputDataFields.groundtruth_is_crowd in
groundtruth_dict.keys()):
groundtruth_is_crowd = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_is_crowd]
filtered_groundtruth_is_crowd = np.take(groundtruth_is_crowd,
class_indices, 0)
filtered_groundtruth_dict[
standard_fields.InputDataFields
.groundtruth_is_crowd] = filtered_groundtruth_is_crowd
if (standard_fields.InputDataFields.groundtruth_area in
groundtruth_dict.keys()):
groundtruth_area = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_area]
filtered_groundtruth_area = np.take(groundtruth_area, class_indices, 0)
filtered_groundtruth_dict[
standard_fields.InputDataFields
.groundtruth_area] = filtered_groundtruth_area
if (standard_fields.InputDataFields.groundtruth_keypoint_visibilities in
groundtruth_dict.keys()):
groundtruth_keypoint_visibilities = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_keypoint_visibilities]
filtered_groundtruth_keypoint_visibilities = np.take(
groundtruth_keypoint_visibilities, class_indices, axis=0)
filtered_groundtruth_keypoint_visibilities = np.take(
filtered_groundtruth_keypoint_visibilities,
self._keypoint_ids,
axis=1)
filtered_groundtruth_dict[
standard_fields.InputDataFields.
groundtruth_keypoint_visibilities] = filtered_groundtruth_keypoint_visibilities
super(CocoKeypointEvaluator,
self).add_single_ground_truth_image_info(image_id,
filtered_groundtruth_dict)
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image and the specific category for which keypoints are evaluated.
If a detection has already been added for this image id, a warning is
logged, and the detection is skipped.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
DetectionResultFields.detection_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` detection boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
DetectionResultFields.detection_scores: float32 numpy array of shape
[num_boxes] containing detection scores for the boxes.
DetectionResultFields.detection_classes: integer numpy array of shape
[num_boxes] containing 1-indexed detection classes for the boxes.
DetectionResultFields.detection_keypoints: float numpy array of
keypoints with shape [num_boxes, num_keypoints, 2].
Raises:
ValueError: If groundtruth for the image_id is not available.
"""
# Keep only the detections for our category and its keypoints.
detection_classes = detections_dict[
standard_fields.DetectionResultFields.detection_classes]
detection_boxes = detections_dict[
standard_fields.DetectionResultFields.detection_boxes]
detection_scores = detections_dict[
standard_fields.DetectionResultFields.detection_scores]
detection_keypoints = detections_dict[
standard_fields.DetectionResultFields.detection_keypoints]
class_indices = [
idx for idx, class_id in enumerate(detection_classes)
if class_id == self._category_id
]
filtered_detection_classes = np.take(
detection_classes, class_indices, axis=0)
filtered_detection_boxes = np.take(detection_boxes, class_indices, axis=0)
filtered_detection_scores = np.take(detection_scores, class_indices, axis=0)
filtered_detection_keypoints = np.take(
detection_keypoints, class_indices, axis=0)
filtered_detection_keypoints = np.take(
filtered_detection_keypoints, self._keypoint_ids, axis=1)
filtered_detections_dict = {}
filtered_detections_dict[standard_fields.DetectionResultFields
.detection_classes] = filtered_detection_classes
filtered_detections_dict[standard_fields.DetectionResultFields
.detection_boxes] = filtered_detection_boxes
filtered_detections_dict[standard_fields.DetectionResultFields
.detection_scores] = filtered_detection_scores
filtered_detections_dict[standard_fields.DetectionResultFields.
detection_keypoints] = filtered_detection_keypoints
super(CocoKeypointEvaluator,
self).add_single_detected_image_info(image_id,
filtered_detections_dict)
def evaluate(self):
"""Evaluates the keypoints and returns a dictionary of coco metrics.
Returns:
A dictionary holding -
1. summary_metrics:
'Keypoints_Precision/mAP': mean average precision over classes
averaged over OKS thresholds ranging from .5 to .95 with .05
increments.
'Keypoints_Precision/mAP@.50IOU': mean average precision at 50% OKS
'Keypoints_Precision/mAP@.75IOU': mean average precision at 75% OKS
'Keypoints_Precision/mAP (medium)': mean average precision for medium
sized objects (32^2 pixels < area < 96^2 pixels).
'Keypoints_Precision/mAP (large)': mean average precision for large
objects (96^2 pixels < area < 10000^2 pixels).
'Keypoints_Recall/AR@1': average recall with 1 detection.
'Keypoints_Recall/AR@10': average recall with 10 detections.
'Keypoints_Recall/AR@100': average recall with 100 detections.
'Keypoints_Recall/AR@100 (medium)': average recall for medium objects with
100.
'Keypoints_Recall/AR@100 (large)': average recall for large objects with
100 detections.
"""
tf.logging.info('Performing evaluation on %d images.', len(self._image_ids))
groundtruth_dict = {
'annotations': self._groundtruth_list,
'images': [{'id': image_id} for image_id in self._image_ids],
'categories': self._categories
}
coco_wrapped_groundtruth = coco_tools.COCOWrapper(
groundtruth_dict, detection_type='bbox')
coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(
self._detection_boxes_list)
keypoint_evaluator = coco_tools.COCOEvalWrapper(
coco_wrapped_groundtruth,
coco_wrapped_detections,
agnostic_mode=False,
iou_type='keypoints',
oks_sigmas=self._oks_sigmas)
keypoint_metrics, _ = keypoint_evaluator.ComputeMetrics(
include_metrics_per_category=False, all_metrics_per_category=False)
keypoint_metrics = {
'Keypoints_' + key: value
for key, value in iter(keypoint_metrics.items())
}
return keypoint_metrics
def add_eval_dict(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
def update_op(
image_id_batched,
groundtruth_boxes_batched,
groundtruth_classes_batched,
groundtruth_is_crowd_batched,
groundtruth_area_batched,
groundtruth_keypoints_batched,
groundtruth_keypoint_visibilities_batched,
num_gt_boxes_per_image,
detection_boxes_batched,
detection_scores_batched,
detection_classes_batched,
detection_keypoints_batched,
num_det_boxes_per_image,
is_annotated_batched):
"""Update operation for adding batch of images to Coco evaluator."""
for (image_id, gt_box, gt_class, gt_is_crowd, gt_area, gt_keyp,
gt_keyp_vis, num_gt_box, det_box, det_score, det_class, det_keyp,
num_det_box, is_annotated) in zip(
image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched, groundtruth_is_crowd_batched,
groundtruth_area_batched, groundtruth_keypoints_batched,
groundtruth_keypoint_visibilities_batched,
num_gt_boxes_per_image, detection_boxes_batched,
detection_scores_batched, detection_classes_batched,
detection_keypoints_batched, num_det_boxes_per_image,
is_annotated_batched):
if is_annotated:
self.add_single_ground_truth_image_info(
image_id, {
'groundtruth_boxes': gt_box[:num_gt_box],
'groundtruth_classes': gt_class[:num_gt_box],
'groundtruth_is_crowd': gt_is_crowd[:num_gt_box],
'groundtruth_area': gt_area[:num_gt_box],
'groundtruth_keypoints': gt_keyp[:num_gt_box],
'groundtruth_keypoint_visibilities': gt_keyp_vis[:num_gt_box]
})
self.add_single_detected_image_info(
image_id, {
'detection_boxes': det_box[:num_det_box],
'detection_scores': det_score[:num_det_box],
'detection_classes': det_class[:num_det_box],
'detection_keypoints': det_keyp[:num_det_box],
})
# Unpack items from the evaluation dictionary.
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_is_crowd = eval_dict.get(input_data_fields.groundtruth_is_crowd,
None)
groundtruth_area = eval_dict.get(input_data_fields.groundtruth_area, None)
groundtruth_keypoints = eval_dict[input_data_fields.groundtruth_keypoints]
groundtruth_keypoint_visibilities = eval_dict.get(
input_data_fields.groundtruth_keypoint_visibilities, None)
detection_boxes = eval_dict[detection_fields.detection_boxes]
detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes]
detection_keypoints = eval_dict[detection_fields.detection_keypoints]
num_gt_boxes_per_image = eval_dict.get(
'num_groundtruth_boxes_per_image', None)
num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None)
is_annotated = eval_dict.get('is_annotated', None)
if groundtruth_is_crowd is None:
groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
if groundtruth_area is None:
groundtruth_area = tf.zeros_like(groundtruth_classes, dtype=tf.float32)
if not image_id.shape.as_list():
# Apply a batch dimension to all tensors.
image_id = tf.expand_dims(image_id, 0)
groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
groundtruth_area = tf.expand_dims(groundtruth_area, 0)
groundtruth_keypoints = tf.expand_dims(groundtruth_keypoints, 0)
detection_boxes = tf.expand_dims(detection_boxes, 0)
detection_scores = tf.expand_dims(detection_scores, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
detection_keypoints = tf.expand_dims(detection_keypoints, 0)
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
else:
num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.shape(detection_boxes)[1:2]
else:
num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
if is_annotated is None:
is_annotated = tf.constant([True])
else:
is_annotated = tf.expand_dims(is_annotated, 0)
if groundtruth_keypoint_visibilities is None:
groundtruth_keypoint_visibilities = tf.fill([
tf.shape(groundtruth_boxes)[1],
tf.shape(groundtruth_keypoints)[2]
], tf.constant(2, dtype=tf.int32))
groundtruth_keypoint_visibilities = tf.expand_dims(
groundtruth_keypoint_visibilities, 0)
else:
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.tile(
tf.shape(groundtruth_boxes)[1:2],
multiples=tf.shape(groundtruth_boxes)[0:1])
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.tile(
tf.shape(detection_boxes)[1:2],
multiples=tf.shape(detection_boxes)[0:1])
if is_annotated is None:
is_annotated = tf.ones_like(image_id, dtype=tf.bool)
if groundtruth_keypoint_visibilities is None:
groundtruth_keypoint_visibilities = tf.fill([
tf.shape(groundtruth_keypoints)[1],
tf.shape(groundtruth_keypoints)[2]
], tf.constant(2, dtype=tf.int32))
groundtruth_keypoint_visibilities = tf.tile(
tf.expand_dims(groundtruth_keypoint_visibilities, 0),
multiples=[tf.shape(groundtruth_keypoints)[0], 1, 1])
return tf.py_func(update_op, [
image_id, groundtruth_boxes, groundtruth_classes, groundtruth_is_crowd,
groundtruth_area, groundtruth_keypoints,
groundtruth_keypoint_visibilities, num_gt_boxes_per_image,
detection_boxes, detection_scores, detection_classes,
detection_keypoints, num_det_boxes_per_image, is_annotated
], [])
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns a dictionary of eval metric ops.
Note that once value_op is called, the detections and groundtruth added via
update_op are cleared.
This function can take in groundtruth and detections for a batch of images,
or for a single image. For the latter case, the batch dimension for input
tensors need not be present.
Args:
eval_dict: A dictionary that holds tensors for evaluating object detection
performance. For single-image evaluation, this dictionary may be
produced from eval_util.result_dict_for_single_example(). If multi-image
evaluation, `eval_dict` should contain the fields
'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to
properly unpad the tensors from the batch.
Returns:
a dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all
update ops must be run together and similarly all value ops must be run
together to guarantee correct behaviour.
"""
update_op = self.add_eval_dict(eval_dict)
category = self._category_name
metric_names = [
'Keypoints_Precision/mAP ByCategory/{}'.format(category),
'Keypoints_Precision/mAP@.50IOU ByCategory/{}'.format(category),
'Keypoints_Precision/mAP@.75IOU ByCategory/{}'.format(category),
'Keypoints_Precision/mAP (large) ByCategory/{}'.format(category),
'Keypoints_Precision/mAP (medium) ByCategory/{}'.format(category),
'Keypoints_Recall/AR@1 ByCategory/{}'.format(category),
'Keypoints_Recall/AR@10 ByCategory/{}'.format(category),
'Keypoints_Recall/AR@100 ByCategory/{}'.format(category),
'Keypoints_Recall/AR@100 (large) ByCategory/{}'.format(category),
'Keypoints_Recall/AR@100 (medium) ByCategory/{}'.format(category)
]
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
class CocoMaskEvaluator(object_detection_evaluation.DetectionEvaluator):
"""Class to evaluate COCO detection metrics."""
def __init__(self, categories,
include_metrics_per_category=False,
all_metrics_per_category=False,
super_categories=None):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
include_metrics_per_category: If True, include metrics for each category.
all_metrics_per_category: Whether to include all the summary metrics for
each category in per_category_ap. Be careful with setting it to true if
you have more than handful of categories, because it will pollute
your mldash.
super_categories: None or a python dict mapping super-category names
(strings) to lists of categories (corresponding to category names
in the label_map). Metrics are aggregated along these super-categories
and added to the `per_category_ap` and are associated with the name
`PerformanceBySuperCategory/<super-category-name>`.
"""
super(CocoMaskEvaluator, self).__init__(categories)
self._image_id_to_mask_shape_map = {}
self._image_ids_with_detections = set([])
self._groundtruth_list = []
self._detection_masks_list = []
self._category_id_set = set([cat['id'] for cat in self._categories])
self._annotation_id = 1
self._include_metrics_per_category = include_metrics_per_category
self._super_categories = super_categories
self._all_metrics_per_category = all_metrics_per_category
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._image_id_to_mask_shape_map.clear()
self._image_ids_with_detections.clear()
self._groundtruth_list = []
self._detection_masks_list = []
def add_single_ground_truth_image_info(self,
image_id,
groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
If the image has already been added, a warning is logged, and groundtruth is
ignored.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
InputDataFields.groundtruth_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_boxes] containing 1-indexed groundtruth classes for the boxes.
InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape
[num_boxes, image_height, image_width] containing groundtruth masks
corresponding to the boxes. The elements of the array must be in
{0, 1}.
InputDataFields.groundtruth_is_crowd (optional): integer numpy array of
shape [num_boxes] containing iscrowd flag for groundtruth boxes.
InputDataFields.groundtruth_area (optional): float numpy array of
shape [num_boxes] containing the area (in the original absolute
coordinates) of the annotated object.
"""
if image_id in self._image_id_to_mask_shape_map:
tf.logging.warning('Ignoring ground truth with image id %s since it was '
'previously added', image_id)
return
# Drop optional fields if empty tensor.
groundtruth_is_crowd = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_is_crowd)
groundtruth_area = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_area)
if groundtruth_is_crowd is not None and not groundtruth_is_crowd.shape[0]:
groundtruth_is_crowd = None
if groundtruth_area is not None and not groundtruth_area.shape[0]:
groundtruth_area = None
groundtruth_instance_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
groundtruth_instance_masks = convert_masks_to_binary(
groundtruth_instance_masks)
self._groundtruth_list.extend(
coco_tools.
ExportSingleImageGroundtruthToCoco(
image_id=image_id,
next_annotation_id=self._annotation_id,
category_id_set=self._category_id_set,
groundtruth_boxes=groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes],
groundtruth_classes=groundtruth_dict[standard_fields.
InputDataFields.
groundtruth_classes],
groundtruth_masks=groundtruth_instance_masks,
groundtruth_is_crowd=groundtruth_is_crowd,
groundtruth_area=groundtruth_area))
self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes].shape[0]
self._image_id_to_mask_shape_map[image_id] = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks].shape
def add_single_detected_image_info(self,
image_id,
detections_dict):
"""Adds detections for a single image to be used for evaluation.
If a detection has already been added for this image id, a warning is
logged, and the detection is skipped.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
DetectionResultFields.detection_scores: float32 numpy array of shape
[num_boxes] containing detection scores for the boxes.
DetectionResultFields.detection_classes: integer numpy array of shape
[num_boxes] containing 1-indexed detection classes for the boxes.
DetectionResultFields.detection_masks: optional uint8 numpy array of
shape [num_boxes, image_height, image_width] containing instance
masks corresponding to the boxes. The elements of the array must be
in {0, 1}.
Raises:
ValueError: If groundtruth for the image_id is not available or if
spatial shapes of groundtruth_instance_masks and detection_masks are
incompatible.
"""
if image_id not in self._image_id_to_mask_shape_map:
raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
if image_id in self._image_ids_with_detections:
tf.logging.warning('Ignoring detection with image id %s since it was '
'previously added', image_id)
return
groundtruth_masks_shape = self._image_id_to_mask_shape_map[image_id]
detection_masks = detections_dict[standard_fields.DetectionResultFields.
detection_masks]
if groundtruth_masks_shape[1:] != detection_masks.shape[1:]:
raise ValueError('Spatial shape of groundtruth masks and detection masks '
'are incompatible: {} vs {}'.format(
groundtruth_masks_shape,
detection_masks.shape))
detection_masks = convert_masks_to_binary(detection_masks)
self._detection_masks_list.extend(
coco_tools.ExportSingleImageDetectionMasksToCoco(
image_id=image_id,
category_id_set=self._category_id_set,
detection_masks=detection_masks,
detection_scores=detections_dict[standard_fields.
DetectionResultFields.
detection_scores],
detection_classes=detections_dict[standard_fields.
DetectionResultFields.
detection_classes]))
self._image_ids_with_detections.update([image_id])
def dump_detections_to_json_file(self, json_output_path):
"""Saves the detections into json_output_path in the format used by MS COCO.
Args:
json_output_path: String containing the output file's path. It can be also
None. In that case nothing will be written to the output file.
"""
if json_output_path and json_output_path is not None:
tf.logging.info('Dumping detections to output json file.')
with tf.gfile.GFile(json_output_path, 'w') as fid:
json_utils.Dump(
obj=self._detection_masks_list, fid=fid, float_digits=4, indent=2)
def evaluate(self):
"""Evaluates the detection masks and returns a dictionary of coco metrics.
Returns:
A dictionary holding -
1. summary_metrics:
'DetectionMasks_Precision/mAP': mean average precision over classes
averaged over IOU thresholds ranging from .5 to .95 with .05 increments.
'DetectionMasks_Precision/mAP@.50IOU': mean average precision at 50% IOU.
'DetectionMasks_Precision/mAP@.75IOU': mean average precision at 75% IOU.
'DetectionMasks_Precision/mAP (small)': mean average precision for small
objects (area < 32^2 pixels).
'DetectionMasks_Precision/mAP (medium)': mean average precision for medium
sized objects (32^2 pixels < area < 96^2 pixels).
'DetectionMasks_Precision/mAP (large)': mean average precision for large
objects (96^2 pixels < area < 10000^2 pixels).
'DetectionMasks_Recall/AR@1': average recall with 1 detection.
'DetectionMasks_Recall/AR@10': average recall with 10 detections.
'DetectionMasks_Recall/AR@100': average recall with 100 detections.
'DetectionMasks_Recall/AR@100 (small)': average recall for small objects
with 100 detections.
'DetectionMasks_Recall/AR@100 (medium)': average recall for medium objects
with 100 detections.
'DetectionMasks_Recall/AR@100 (large)': average recall for large objects
with 100 detections.
2. per_category_ap: if include_metrics_per_category is True, category
specific results with keys of the form:
'Precision mAP ByCategory/category' (without the supercategory part if
no supercategories exist). For backward compatibility
'PerformanceByCategory' is included in the output regardless of
all_metrics_per_category.
If super_categories are provided, then this will additionally include
metrics aggregated along the super_categories with keys of the form:
`PerformanceBySuperCategory/<super-category-name>`
"""
groundtruth_dict = {
'annotations': self._groundtruth_list,
'images': [{'id': image_id, 'height': shape[1], 'width': shape[2]}
for image_id, shape in self._image_id_to_mask_shape_map.
items()],
'categories': self._categories
}
coco_wrapped_groundtruth = coco_tools.COCOWrapper(
groundtruth_dict, detection_type='segmentation')
coco_wrapped_detection_masks = coco_wrapped_groundtruth.LoadAnnotations(
self._detection_masks_list)
mask_evaluator = coco_tools.COCOEvalWrapper(
coco_wrapped_groundtruth, coco_wrapped_detection_masks,
agnostic_mode=False, iou_type='segm')
mask_metrics, mask_per_category_ap = mask_evaluator.ComputeMetrics(
include_metrics_per_category=self._include_metrics_per_category,
super_categories=self._super_categories,
all_metrics_per_category=self._all_metrics_per_category)
mask_metrics.update(mask_per_category_ap)
mask_metrics = {'DetectionMasks_'+ key: value
for key, value in mask_metrics.items()}
return mask_metrics
def add_eval_dict(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
def update_op(image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched,
groundtruth_instance_masks_batched,
groundtruth_is_crowd_batched, num_gt_boxes_per_image,
detection_scores_batched, detection_classes_batched,
detection_masks_batched, num_det_boxes_per_image,
original_image_spatial_shape):
"""Update op for metrics."""
for (image_id, groundtruth_boxes, groundtruth_classes,
groundtruth_instance_masks, groundtruth_is_crowd, num_gt_box,
detection_scores, detection_classes,
detection_masks, num_det_box, original_image_shape) in zip(
image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched, groundtruth_instance_masks_batched,
groundtruth_is_crowd_batched, num_gt_boxes_per_image,
detection_scores_batched, detection_classes_batched,
detection_masks_batched, num_det_boxes_per_image,
original_image_spatial_shape):
self.add_single_ground_truth_image_info(
image_id, {
'groundtruth_boxes':
groundtruth_boxes[:num_gt_box],
'groundtruth_classes':
groundtruth_classes[:num_gt_box],
'groundtruth_instance_masks':
groundtruth_instance_masks[
:num_gt_box,
:original_image_shape[0],
:original_image_shape[1]],
'groundtruth_is_crowd':
groundtruth_is_crowd[:num_gt_box]
})
self.add_single_detected_image_info(
image_id, {
'detection_scores': detection_scores[:num_det_box],
'detection_classes': detection_classes[:num_det_box],
'detection_masks': detection_masks[
:num_det_box,
:original_image_shape[0],
:original_image_shape[1]]
})
# Unpack items from the evaluation dictionary.
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
original_image_spatial_shape = eval_dict[
input_data_fields.original_image_spatial_shape]
groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_instance_masks = eval_dict[
input_data_fields.groundtruth_instance_masks]
groundtruth_is_crowd = eval_dict.get(
input_data_fields.groundtruth_is_crowd, None)
num_gt_boxes_per_image = eval_dict.get(
input_data_fields.num_groundtruth_boxes, None)
detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes]
detection_masks = eval_dict[detection_fields.detection_masks]
num_det_boxes_per_image = eval_dict.get(detection_fields.num_detections,
None)
if groundtruth_is_crowd is None:
groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
if not image_id.shape.as_list():
# Apply a batch dimension to all tensors.
image_id = tf.expand_dims(image_id, 0)
groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0)
groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
detection_scores = tf.expand_dims(detection_scores, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
detection_masks = tf.expand_dims(detection_masks, 0)
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
else:
num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.shape(detection_scores)[1:2]
else:
num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
else:
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.tile(
tf.shape(groundtruth_boxes)[1:2],
multiples=tf.shape(groundtruth_boxes)[0:1])
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.tile(
tf.shape(detection_scores)[1:2],
multiples=tf.shape(detection_scores)[0:1])
return tf.py_func(update_op, [
image_id, groundtruth_boxes, groundtruth_classes,
groundtruth_instance_masks, groundtruth_is_crowd,
num_gt_boxes_per_image, detection_scores, detection_classes,
detection_masks, num_det_boxes_per_image, original_image_spatial_shape
], [])
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns a dictionary of eval metric ops.
Note that once value_op is called, the detections and groundtruth added via
update_op are cleared.
Args:
eval_dict: A dictionary that holds tensors for evaluating object detection
performance. For single-image evaluation, this dictionary may be
produced from eval_util.result_dict_for_single_example(). If multi-image
evaluation, `eval_dict` should contain the fields
'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to
properly unpad the tensors from the batch.
Returns:
a dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all
update ops must be run together and similarly all value ops must be run
together to guarantee correct behaviour.
"""
update_op = self.add_eval_dict(eval_dict)
metric_names = ['DetectionMasks_Precision/mAP',
'DetectionMasks_Precision/mAP@.50IOU',
'DetectionMasks_Precision/mAP@.75IOU',
'DetectionMasks_Precision/mAP (small)',
'DetectionMasks_Precision/mAP (medium)',
'DetectionMasks_Precision/mAP (large)',
'DetectionMasks_Recall/AR@1',
'DetectionMasks_Recall/AR@10',
'DetectionMasks_Recall/AR@100',
'DetectionMasks_Recall/AR@100 (small)',
'DetectionMasks_Recall/AR@100 (medium)',
'DetectionMasks_Recall/AR@100 (large)']
if self._include_metrics_per_category:
for category_dict in self._categories:
metric_names.append('DetectionMasks_PerformanceByCategory/mAP/' +
category_dict['name'])
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
class CocoPanopticSegmentationEvaluator(
object_detection_evaluation.DetectionEvaluator):
"""Class to evaluate PQ (panoptic quality) metric on COCO dataset.
More details about this metric: https://arxiv.org/pdf/1801.00868.pdf.
"""
def __init__(self,
categories,
include_metrics_per_category=False,
iou_threshold=0.5,
ioa_threshold=0.5):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
include_metrics_per_category: If True, include metrics for each category.
iou_threshold: intersection-over-union threshold for mask matching (with
normal groundtruths).
ioa_threshold: intersection-over-area threshold for mask matching with
"is_crowd" groundtruths.
"""
super(CocoPanopticSegmentationEvaluator, self).__init__(categories)
self._groundtruth_masks = {}
self._groundtruth_class_labels = {}
self._groundtruth_is_crowd = {}
self._predicted_masks = {}
self._predicted_class_labels = {}
self._include_metrics_per_category = include_metrics_per_category
self._iou_threshold = iou_threshold
self._ioa_threshold = ioa_threshold
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._groundtruth_masks.clear()
self._groundtruth_class_labels.clear()
self._groundtruth_is_crowd.clear()
self._predicted_masks.clear()
self._predicted_class_labels.clear()
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
If the image has already been added, a warning is logged, and groundtruth is
ignored.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_masks] containing 1-indexed groundtruth classes for the mask.
InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape
[num_masks, image_height, image_width] containing groundtruth masks.
The elements of the array must be in {0, 1}.
InputDataFields.groundtruth_is_crowd (optional): integer numpy array of
shape [num_boxes] containing iscrowd flag for groundtruth boxes.
"""
if image_id in self._groundtruth_masks:
tf.logging.warning(
'Ignoring groundtruth with image %s, since it has already been '
'added to the ground truth database.', image_id)
return
self._groundtruth_masks[image_id] = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
self._groundtruth_class_labels[image_id] = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_classes]
groundtruth_is_crowd = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_is_crowd)
# Drop groundtruth_is_crowd if empty tensor.
if groundtruth_is_crowd is not None and not groundtruth_is_crowd.size > 0:
groundtruth_is_crowd = None
if groundtruth_is_crowd is not None:
self._groundtruth_is_crowd[image_id] = groundtruth_is_crowd
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
If a detection has already been added for this image id, a warning is
logged, and the detection is skipped.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
DetectionResultFields.detection_classes: integer numpy array of shape
[num_masks] containing 1-indexed detection classes for the masks.
DetectionResultFields.detection_masks: optional uint8 numpy array of
shape [num_masks, image_height, image_width] containing instance
masks. The elements of the array must be in {0, 1}.
Raises:
ValueError: If results and groundtruth shape don't match.
"""
if image_id not in self._groundtruth_masks:
raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
detection_masks = detections_dict[
standard_fields.DetectionResultFields.detection_masks]
self._predicted_masks[image_id] = detection_masks
self._predicted_class_labels[image_id] = detections_dict[
standard_fields.DetectionResultFields.detection_classes]
groundtruth_mask_shape = self._groundtruth_masks[image_id].shape
if groundtruth_mask_shape[1:] != detection_masks.shape[1:]:
raise ValueError("The shape of results doesn't match groundtruth.")
def evaluate(self):
"""Evaluates the detection masks and returns a dictionary of coco metrics.
Returns:
A dictionary holding -
1. summary_metric:
'PanopticQuality@%.2fIOU': mean panoptic quality averaged over classes at
the required IOU.
'SegmentationQuality@%.2fIOU': mean segmentation quality averaged over
classes at the required IOU.
'RecognitionQuality@%.2fIOU': mean recognition quality averaged over
classes at the required IOU.
'NumValidClasses': number of valid classes. A valid class should have at
least one normal (is_crowd=0) groundtruth mask or one predicted mask.
'NumTotalClasses': number of total classes.
2. per_category_pq: if include_metrics_per_category is True, category
specific results with keys of the form:
'PanopticQuality@%.2fIOU_ByCategory/category'.
"""
# Evaluate and accumulate the iou/tp/fp/fn.
sum_tp_iou, sum_num_tp, sum_num_fp, sum_num_fn = self._evaluate_all_masks()
# Compute PQ metric for each category and average over all classes.
mask_metrics = self._compute_panoptic_metrics(sum_tp_iou, sum_num_tp,
sum_num_fp, sum_num_fn)
return mask_metrics
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns a dictionary of eval metric ops.
Note that once value_op is called, the detections and groundtruth added via
update_op are cleared.
Args:
eval_dict: A dictionary that holds tensors for evaluating object detection
performance. For single-image evaluation, this dictionary may be
produced from eval_util.result_dict_for_single_example(). If multi-image
evaluation, `eval_dict` should contain the fields
'num_gt_masks_per_image' and 'num_det_masks_per_image' to properly unpad
the tensors from the batch.
Returns:
a dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all
update ops must be run together and similarly all value ops must be run
together to guarantee correct behaviour.
"""
def update_op(image_id_batched, groundtruth_classes_batched,
groundtruth_instance_masks_batched,
groundtruth_is_crowd_batched, num_gt_masks_per_image,
detection_classes_batched, detection_masks_batched,
num_det_masks_per_image):
"""Update op for metrics."""
for (image_id, groundtruth_classes, groundtruth_instance_masks,
groundtruth_is_crowd, num_gt_mask, detection_classes,
detection_masks, num_det_mask) in zip(
image_id_batched, groundtruth_classes_batched,
groundtruth_instance_masks_batched, groundtruth_is_crowd_batched,
num_gt_masks_per_image, detection_classes_batched,
detection_masks_batched, num_det_masks_per_image):
self.add_single_ground_truth_image_info(
image_id, {
'groundtruth_classes':
groundtruth_classes[:num_gt_mask],
'groundtruth_instance_masks':
groundtruth_instance_masks[:num_gt_mask],
'groundtruth_is_crowd':
groundtruth_is_crowd[:num_gt_mask]
})
self.add_single_detected_image_info(
image_id, {
'detection_classes': detection_classes[:num_det_mask],
'detection_masks': detection_masks[:num_det_mask]
})
# Unpack items from the evaluation dictionary.
(image_id, groundtruth_classes, groundtruth_instance_masks,
groundtruth_is_crowd, num_gt_masks_per_image, detection_classes,
detection_masks, num_det_masks_per_image
) = self._unpack_evaluation_dictionary_items(eval_dict)
update_op = tf.py_func(update_op, [
image_id, groundtruth_classes, groundtruth_instance_masks,
groundtruth_is_crowd, num_gt_masks_per_image, detection_classes,
detection_masks, num_det_masks_per_image
], [])
metric_names = [
'PanopticQuality@%.2fIOU' % self._iou_threshold,
'SegmentationQuality@%.2fIOU' % self._iou_threshold,
'RecognitionQuality@%.2fIOU' % self._iou_threshold
]
if self._include_metrics_per_category:
for category_dict in self._categories:
metric_names.append('PanopticQuality@%.2fIOU_ByCategory/%s' %
(self._iou_threshold, category_dict['name']))
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
def _evaluate_all_masks(self):
"""Evaluate all masks and compute sum iou/TP/FP/FN."""
sum_num_tp = {category['id']: 0 for category in self._categories}
sum_num_fp = sum_num_tp.copy()
sum_num_fn = sum_num_tp.copy()
sum_tp_iou = sum_num_tp.copy()
for image_id in self._groundtruth_class_labels:
# Separate normal and is_crowd groundtruth
crowd_gt_indices = self._groundtruth_is_crowd.get(image_id)
(normal_gt_masks, normal_gt_classes, crowd_gt_masks,
crowd_gt_classes) = self._separate_normal_and_crowd_labels(
crowd_gt_indices, self._groundtruth_masks[image_id],
self._groundtruth_class_labels[image_id])
# Mask matching to normal GT.
predicted_masks = self._predicted_masks[image_id]
predicted_class_labels = self._predicted_class_labels[image_id]
(overlaps, pred_matched,
gt_matched) = self._match_predictions_to_groundtruths(
predicted_masks,
predicted_class_labels,
normal_gt_masks,
normal_gt_classes,
self._iou_threshold,
is_crowd=False,
with_replacement=False)
# Accumulate true positives.
for (class_id, is_matched, overlap) in zip(predicted_class_labels,
pred_matched, overlaps):
if is_matched:
sum_num_tp[class_id] += 1
sum_tp_iou[class_id] += overlap
# Accumulate false negatives.
for (class_id, is_matched) in zip(normal_gt_classes, gt_matched):
if not is_matched:
sum_num_fn[class_id] += 1
# Match remaining predictions to crowd gt.
remained_pred_indices = np.logical_not(pred_matched)
remained_pred_masks = predicted_masks[remained_pred_indices, :, :]
remained_pred_classes = predicted_class_labels[remained_pred_indices]
_, pred_matched, _ = self._match_predictions_to_groundtruths(
remained_pred_masks,
remained_pred_classes,
crowd_gt_masks,
crowd_gt_classes,
self._ioa_threshold,
is_crowd=True,
with_replacement=True)
# Accumulate false positives
for (class_id, is_matched) in zip(remained_pred_classes, pred_matched):
if not is_matched:
sum_num_fp[class_id] += 1
return sum_tp_iou, sum_num_tp, sum_num_fp, sum_num_fn
def _compute_panoptic_metrics(self, sum_tp_iou, sum_num_tp, sum_num_fp,
sum_num_fn):
"""Compute PQ metric for each category and average over all classes.
Args:
sum_tp_iou: dict, summed true positive intersection-over-union (IoU) for
each class, keyed by class_id.
sum_num_tp: the total number of true positives for each class, keyed by
class_id.
sum_num_fp: the total number of false positives for each class, keyed by
class_id.
sum_num_fn: the total number of false negatives for each class, keyed by
class_id.
Returns:
mask_metrics: a dictionary containing averaged metrics over all classes,
and per-category metrics if required.
"""
mask_metrics = {}
sum_pq = 0
sum_sq = 0
sum_rq = 0
num_valid_classes = 0
for category in self._categories:
class_id = category['id']
(panoptic_quality, segmentation_quality,
recognition_quality) = self._compute_panoptic_metrics_single_class(
sum_tp_iou[class_id], sum_num_tp[class_id], sum_num_fp[class_id],
sum_num_fn[class_id])
if panoptic_quality is not None:
sum_pq += panoptic_quality
sum_sq += segmentation_quality
sum_rq += recognition_quality
num_valid_classes += 1
if self._include_metrics_per_category:
mask_metrics['PanopticQuality@%.2fIOU_ByCategory/%s' %
(self._iou_threshold,
category['name'])] = panoptic_quality
mask_metrics['PanopticQuality@%.2fIOU' %
self._iou_threshold] = sum_pq / num_valid_classes
mask_metrics['SegmentationQuality@%.2fIOU' %
self._iou_threshold] = sum_sq / num_valid_classes
mask_metrics['RecognitionQuality@%.2fIOU' %
self._iou_threshold] = sum_rq / num_valid_classes
mask_metrics['NumValidClasses'] = num_valid_classes
mask_metrics['NumTotalClasses'] = len(self._categories)
return mask_metrics
def _compute_panoptic_metrics_single_class(self, sum_tp_iou, num_tp, num_fp,
num_fn):
"""Compute panoptic metrics: panoptic/segmentation/recognition quality.
More computation details in https://arxiv.org/pdf/1801.00868.pdf.
Args:
sum_tp_iou: summed true positive intersection-over-union (IoU) for a
specific class.
num_tp: the total number of true positives for a specific class.
num_fp: the total number of false positives for a specific class.
num_fn: the total number of false negatives for a specific class.
Returns:
panoptic_quality: sum_tp_iou / (num_tp + 0.5*num_fp + 0.5*num_fn).
segmentation_quality: sum_tp_iou / num_tp.
recognition_quality: num_tp / (num_tp + 0.5*num_fp + 0.5*num_fn).
"""
denominator = num_tp + 0.5 * num_fp + 0.5 * num_fn
# Calculate metric only if there is at least one GT or one prediction.
if denominator > 0:
recognition_quality = num_tp / denominator
if num_tp > 0:
segmentation_quality = sum_tp_iou / num_tp
else:
# If there is no TP for this category.
segmentation_quality = 0
panoptic_quality = segmentation_quality * recognition_quality
return panoptic_quality, segmentation_quality, recognition_quality
else:
return None, None, None
def _separate_normal_and_crowd_labels(self, crowd_gt_indices,
groundtruth_masks, groundtruth_classes):
"""Separate normal and crowd groundtruth class_labels and masks.
Args:
crowd_gt_indices: None or array of shape [num_groundtruths]. If None, all
groundtruths are treated as normal ones.
groundtruth_masks: array of shape [num_groundtruths, height, width].
groundtruth_classes: array of shape [num_groundtruths].
Returns:
normal_gt_masks: array of shape [num_normal_groundtruths, height, width].
normal_gt_classes: array of shape [num_normal_groundtruths].
crowd_gt_masks: array of shape [num_crowd_groundtruths, height, width].
crowd_gt_classes: array of shape [num_crowd_groundtruths].
Raises:
ValueError: if the shape of groundtruth classes doesn't match groundtruth
masks or if the shape of crowd_gt_indices.
"""
if groundtruth_masks.shape[0] != groundtruth_classes.shape[0]:
raise ValueError(
"The number of masks doesn't match the number of labels.")
if crowd_gt_indices is None:
# All gts are treated as normal
crowd_gt_indices = np.zeros(groundtruth_masks.shape, dtype=np.bool)
else:
if groundtruth_masks.shape[0] != crowd_gt_indices.shape[0]:
raise ValueError(
"The number of masks doesn't match the number of is_crowd labels.")
crowd_gt_indices = crowd_gt_indices.astype(np.bool)
normal_gt_indices = np.logical_not(crowd_gt_indices)
if normal_gt_indices.size:
normal_gt_masks = groundtruth_masks[normal_gt_indices, :, :]
normal_gt_classes = groundtruth_classes[normal_gt_indices]
crowd_gt_masks = groundtruth_masks[crowd_gt_indices, :, :]
crowd_gt_classes = groundtruth_classes[crowd_gt_indices]
else:
# No groundtruths available, groundtruth_masks.shape = (0, h, w)
normal_gt_masks = groundtruth_masks
normal_gt_classes = groundtruth_classes
crowd_gt_masks = groundtruth_masks
crowd_gt_classes = groundtruth_classes
return normal_gt_masks, normal_gt_classes, crowd_gt_masks, crowd_gt_classes
def _match_predictions_to_groundtruths(self,
predicted_masks,
predicted_classes,
groundtruth_masks,
groundtruth_classes,
matching_threshold,
is_crowd=False,
with_replacement=False):
"""Match the predicted masks to groundtruths.
Args:
predicted_masks: array of shape [num_predictions, height, width].
predicted_classes: array of shape [num_predictions].
groundtruth_masks: array of shape [num_groundtruths, height, width].
groundtruth_classes: array of shape [num_groundtruths].
matching_threshold: if the overlap between a prediction and a groundtruth
is larger than this threshold, the prediction is true positive.
is_crowd: whether the groundtruths are crowd annotation or not. If True,
use intersection over area (IoA) as the overlapping metric; otherwise
use intersection over union (IoU).
with_replacement: whether a groundtruth can be matched to multiple
predictions. By default, for normal groundtruths, only 1-1 matching is
allowed for normal groundtruths; for crowd groundtruths, 1-to-many must
be allowed.
Returns:
best_overlaps: array of shape [num_predictions]. Values representing the
IoU
or IoA with best matched groundtruth.
pred_matched: array of shape [num_predictions]. Boolean value representing
whether the ith prediction is matched to a groundtruth.
gt_matched: array of shape [num_groundtruth]. Boolean value representing
whether the ith groundtruth is matched to a prediction.
Raises:
ValueError: if the shape of groundtruth/predicted masks doesn't match
groundtruth/predicted classes.
"""
if groundtruth_masks.shape[0] != groundtruth_classes.shape[0]:
raise ValueError(
"The number of GT masks doesn't match the number of labels.")
if predicted_masks.shape[0] != predicted_classes.shape[0]:
raise ValueError(
"The number of predicted masks doesn't match the number of labels.")
gt_matched = np.zeros(groundtruth_classes.shape, dtype=np.bool)
pred_matched = np.zeros(predicted_classes.shape, dtype=np.bool)
best_overlaps = np.zeros(predicted_classes.shape)
for pid in range(predicted_classes.shape[0]):
best_overlap = 0
matched_gt_id = -1
for gid in range(groundtruth_classes.shape[0]):
if predicted_classes[pid] == groundtruth_classes[gid]:
if (not with_replacement) and gt_matched[gid]:
continue
if not is_crowd:
overlap = np_mask_ops.iou(predicted_masks[pid:pid + 1],
groundtruth_masks[gid:gid + 1])[0, 0]
else:
overlap = np_mask_ops.ioa(groundtruth_masks[gid:gid + 1],
predicted_masks[pid:pid + 1])[0, 0]
if overlap >= matching_threshold and overlap > best_overlap:
matched_gt_id = gid
best_overlap = overlap
if matched_gt_id >= 0:
gt_matched[matched_gt_id] = True
pred_matched[pid] = True
best_overlaps[pid] = best_overlap
return best_overlaps, pred_matched, gt_matched
def _unpack_evaluation_dictionary_items(self, eval_dict):
"""Unpack items from the evaluation dictionary."""
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_instance_masks = eval_dict[
input_data_fields.groundtruth_instance_masks]
groundtruth_is_crowd = eval_dict.get(input_data_fields.groundtruth_is_crowd,
None)
num_gt_masks_per_image = eval_dict.get(
input_data_fields.num_groundtruth_boxes, None)
detection_classes = eval_dict[detection_fields.detection_classes]
detection_masks = eval_dict[detection_fields.detection_masks]
num_det_masks_per_image = eval_dict.get(detection_fields.num_detections,
None)
if groundtruth_is_crowd is None:
groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
if not image_id.shape.as_list():
# Apply a batch dimension to all tensors.
image_id = tf.expand_dims(image_id, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0)
groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
detection_masks = tf.expand_dims(detection_masks, 0)
if num_gt_masks_per_image is None:
num_gt_masks_per_image = tf.shape(groundtruth_classes)[1:2]
else:
num_gt_masks_per_image = tf.expand_dims(num_gt_masks_per_image, 0)
if num_det_masks_per_image is None:
num_det_masks_per_image = tf.shape(detection_classes)[1:2]
else:
num_det_masks_per_image = tf.expand_dims(num_det_masks_per_image, 0)
else:
if num_gt_masks_per_image is None:
num_gt_masks_per_image = tf.tile(
tf.shape(groundtruth_classes)[1:2],
multiples=tf.shape(groundtruth_classes)[0:1])
if num_det_masks_per_image is None:
num_det_masks_per_image = tf.tile(
tf.shape(detection_classes)[1:2],
multiples=tf.shape(detection_classes)[0:1])
return (image_id, groundtruth_classes, groundtruth_instance_masks,
groundtruth_is_crowd, num_gt_masks_per_image, detection_classes,
detection_masks, num_det_masks_per_image)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/coco_evaluation.py | coco_evaluation.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for evaluating object detections with LVIS metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import re
from lvis import results as lvis_results
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.metrics import lvis_tools
from object_detection.utils import object_detection_evaluation
def convert_masks_to_binary(masks):
"""Converts masks to 0 or 1 and uint8 type."""
return (masks > 0).astype(np.uint8)
class LVISMaskEvaluator(object_detection_evaluation.DetectionEvaluator):
"""Class to evaluate LVIS mask metrics."""
def __init__(self,
categories,
include_metrics_per_category=False,
export_path=None):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
include_metrics_per_category: Additionally include per-category metrics
(this option is currently unsupported).
export_path: Path to export detections to LVIS compatible JSON format.
"""
super(LVISMaskEvaluator, self).__init__(categories)
self._image_ids_with_detections = set([])
self._groundtruth_list = []
self._detection_masks_list = []
self._category_id_set = set([cat['id'] for cat in self._categories])
self._annotation_id = 1
self._image_id_to_mask_shape_map = {}
self._image_id_to_verified_neg_classes = {}
self._image_id_to_not_exhaustive_classes = {}
if include_metrics_per_category:
raise ValueError('include_metrics_per_category not yet supported '
'for LVISMaskEvaluator.')
self._export_path = export_path
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._image_id_to_mask_shape_map.clear()
self._image_ids_with_detections.clear()
self._image_id_to_verified_neg_classes.clear()
self._image_id_to_not_exhaustive_classes.clear()
self._groundtruth_list = []
self._detection_masks_list = []
def add_single_ground_truth_image_info(self,
image_id,
groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
If the image has already been added, a warning is logged, and groundtruth is
ignored.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
InputDataFields.groundtruth_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_boxes] containing 1-indexed groundtruth classes for the boxes.
InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape
[num_masks, image_height, image_width] containing groundtruth masks.
The elements of the array must be in {0, 1}.
InputDataFields.groundtruth_verified_neg_classes: [num_classes + 1]
float indicator vector with values in {0, 1}. The length is
num_classes + 1 so as to be compatible with the 1-indexed groundtruth
classes.
InputDataFields.groundtruth_not_exhaustive_classes: [num_classes + 1]
float indicator vector with values in {0, 1}. The length is
num_classes + 1 so as to be compatible with the 1-indexed groundtruth
classes.
InputDataFields.groundtruth_area (optional): float numpy array of
shape [num_boxes] containing the area (in the original absolute
coordinates) of the annotated object.
Raises:
ValueError: if groundtruth_dict is missing a required field
"""
if image_id in self._image_id_to_mask_shape_map:
tf.logging.warning('Ignoring ground truth with image id %s since it was '
'previously added', image_id)
return
for key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_instance_masks,
fields.InputDataFields.groundtruth_verified_neg_classes,
fields.InputDataFields.groundtruth_not_exhaustive_classes]:
if key not in groundtruth_dict.keys():
raise ValueError('groundtruth_dict missing entry: {}'.format(key))
groundtruth_instance_masks = groundtruth_dict[
fields.InputDataFields.groundtruth_instance_masks]
groundtruth_instance_masks = convert_masks_to_binary(
groundtruth_instance_masks)
verified_neg_classes_shape = groundtruth_dict[
fields.InputDataFields.groundtruth_verified_neg_classes].shape
not_exhaustive_classes_shape = groundtruth_dict[
fields.InputDataFields.groundtruth_not_exhaustive_classes].shape
if verified_neg_classes_shape != (len(self._category_id_set) + 1,):
raise ValueError('Invalid shape for verified_neg_classes_shape.')
if not_exhaustive_classes_shape != (len(self._category_id_set) + 1,):
raise ValueError('Invalid shape for not_exhaustive_classes_shape.')
self._image_id_to_verified_neg_classes[image_id] = np.flatnonzero(
groundtruth_dict[
fields.InputDataFields.groundtruth_verified_neg_classes]
== 1).tolist()
self._image_id_to_not_exhaustive_classes[image_id] = np.flatnonzero(
groundtruth_dict[
fields.InputDataFields.groundtruth_not_exhaustive_classes]
== 1).tolist()
# Drop optional fields if empty tensor.
groundtruth_area = groundtruth_dict.get(
fields.InputDataFields.groundtruth_area)
if groundtruth_area is not None and not groundtruth_area.shape[0]:
groundtruth_area = None
self._groundtruth_list.extend(
lvis_tools.ExportSingleImageGroundtruthToLVIS(
image_id=image_id,
next_annotation_id=self._annotation_id,
category_id_set=self._category_id_set,
groundtruth_boxes=groundtruth_dict[
fields.InputDataFields.groundtruth_boxes],
groundtruth_classes=groundtruth_dict[
fields.InputDataFields.groundtruth_classes],
groundtruth_masks=groundtruth_instance_masks,
groundtruth_area=groundtruth_area)
)
self._annotation_id += groundtruth_dict[fields.InputDataFields.
groundtruth_boxes].shape[0]
self._image_id_to_mask_shape_map[image_id] = groundtruth_dict[
fields.InputDataFields.groundtruth_instance_masks].shape
def add_single_detected_image_info(self,
image_id,
detections_dict):
"""Adds detections for a single image to be used for evaluation.
If a detection has already been added for this image id, a warning is
logged, and the detection is skipped.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
DetectionResultFields.detection_scores: float32 numpy array of shape
[num_boxes] containing detection scores for the boxes.
DetectionResultFields.detection_classes: integer numpy array of shape
[num_boxes] containing 1-indexed detection classes for the boxes.
DetectionResultFields.detection_masks: optional uint8 numpy array of
shape [num_boxes, image_height, image_width] containing instance
masks corresponding to the boxes. The elements of the array must be
in {0, 1}.
Raises:
ValueError: If groundtruth for the image_id is not available.
"""
if image_id not in self._image_id_to_mask_shape_map:
raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
if image_id in self._image_ids_with_detections:
tf.logging.warning('Ignoring detection with image id %s since it was '
'previously added', image_id)
return
groundtruth_masks_shape = self._image_id_to_mask_shape_map[image_id]
detection_masks = detections_dict[fields.DetectionResultFields.
detection_masks]
if groundtruth_masks_shape[1:] != detection_masks.shape[1:]:
raise ValueError('Spatial shape of groundtruth masks and detection masks '
'are incompatible: {} vs {}'.format(
groundtruth_masks_shape,
detection_masks.shape))
detection_masks = convert_masks_to_binary(detection_masks)
self._detection_masks_list.extend(
lvis_tools.ExportSingleImageDetectionMasksToLVIS(
image_id=image_id,
category_id_set=self._category_id_set,
detection_masks=detection_masks,
detection_scores=detections_dict[
fields.DetectionResultFields.detection_scores],
detection_classes=detections_dict[
fields.DetectionResultFields.detection_classes]))
self._image_ids_with_detections.update([image_id])
def evaluate(self):
"""Evaluates the detection boxes and returns a dictionary of coco metrics.
Returns:
A dictionary holding
"""
if self._export_path:
tf.logging.info('Dumping detections to json.')
self.dump_detections_to_json_file(self._export_path)
tf.logging.info('Performing evaluation on %d images.',
len(self._image_id_to_mask_shape_map.keys()))
# pylint: disable=g-complex-comprehension
groundtruth_dict = {
'annotations': self._groundtruth_list,
'images': [
{
'id': int(image_id),
'height': shape[1],
'width': shape[2],
'neg_category_ids':
self._image_id_to_verified_neg_classes[image_id],
'not_exhaustive_category_ids':
self._image_id_to_not_exhaustive_classes[image_id]
} for image_id, shape in self._image_id_to_mask_shape_map.items()],
'categories': self._categories
}
# pylint: enable=g-complex-comprehension
lvis_wrapped_groundtruth = lvis_tools.LVISWrapper(groundtruth_dict)
detections = lvis_results.LVISResults(lvis_wrapped_groundtruth,
self._detection_masks_list)
mask_evaluator = lvis_tools.LVISEvalWrapper(
lvis_wrapped_groundtruth, detections, iou_type='segm')
mask_metrics = mask_evaluator.ComputeMetrics()
mask_metrics = {'DetectionMasks_'+ key: value
for key, value in iter(mask_metrics.items())}
return mask_metrics
def add_eval_dict(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
def update_op(image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched,
groundtruth_instance_masks_batched,
groundtruth_verified_neg_classes_batched,
groundtruth_not_exhaustive_classes_batched,
num_gt_boxes_per_image,
detection_scores_batched, detection_classes_batched,
detection_masks_batched, num_det_boxes_per_image,
original_image_spatial_shape):
"""Update op for metrics."""
for (image_id, groundtruth_boxes, groundtruth_classes,
groundtruth_instance_masks, groundtruth_verified_neg_classes,
groundtruth_not_exhaustive_classes, num_gt_box,
detection_scores, detection_classes,
detection_masks, num_det_box, original_image_shape) in zip(
image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched, groundtruth_instance_masks_batched,
groundtruth_verified_neg_classes_batched,
groundtruth_not_exhaustive_classes_batched,
num_gt_boxes_per_image,
detection_scores_batched, detection_classes_batched,
detection_masks_batched, num_det_boxes_per_image,
original_image_spatial_shape):
self.add_single_ground_truth_image_info(
image_id, {
input_data_fields.groundtruth_boxes:
groundtruth_boxes[:num_gt_box],
input_data_fields.groundtruth_classes:
groundtruth_classes[:num_gt_box],
input_data_fields.groundtruth_instance_masks:
groundtruth_instance_masks[
:num_gt_box,
:original_image_shape[0],
:original_image_shape[1]],
input_data_fields.groundtruth_verified_neg_classes:
groundtruth_verified_neg_classes,
input_data_fields.groundtruth_not_exhaustive_classes:
groundtruth_not_exhaustive_classes
})
self.add_single_detected_image_info(
image_id, {
'detection_scores': detection_scores[:num_det_box],
'detection_classes': detection_classes[:num_det_box],
'detection_masks': detection_masks[
:num_det_box,
:original_image_shape[0],
:original_image_shape[1]]
})
# Unpack items from the evaluation dictionary.
input_data_fields = fields.InputDataFields
detection_fields = fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
original_image_spatial_shape = eval_dict[
input_data_fields.original_image_spatial_shape]
groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_instance_masks = eval_dict[
input_data_fields.groundtruth_instance_masks]
groundtruth_verified_neg_classes = eval_dict[
input_data_fields.groundtruth_verified_neg_classes]
groundtruth_not_exhaustive_classes = eval_dict[
input_data_fields.groundtruth_not_exhaustive_classes]
num_gt_boxes_per_image = eval_dict.get(
input_data_fields.num_groundtruth_boxes, None)
detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes]
detection_masks = eval_dict[detection_fields.detection_masks]
num_det_boxes_per_image = eval_dict.get(detection_fields.num_detections,
None)
if not image_id.shape.as_list():
# Apply a batch dimension to all tensors.
image_id = tf.expand_dims(image_id, 0)
groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0)
groundtruth_verified_neg_classes = tf.expand_dims(
groundtruth_verified_neg_classes, 0)
groundtruth_not_exhaustive_classes = tf.expand_dims(
groundtruth_not_exhaustive_classes, 0)
detection_scores = tf.expand_dims(detection_scores, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
detection_masks = tf.expand_dims(detection_masks, 0)
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
else:
num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.shape(detection_scores)[1:2]
else:
num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
else:
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.tile(
tf.shape(groundtruth_boxes)[1:2],
multiples=tf.shape(groundtruth_boxes)[0:1])
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.tile(
tf.shape(detection_scores)[1:2],
multiples=tf.shape(detection_scores)[0:1])
return tf.py_func(update_op, [
image_id, groundtruth_boxes, groundtruth_classes,
groundtruth_instance_masks, groundtruth_verified_neg_classes,
groundtruth_not_exhaustive_classes,
num_gt_boxes_per_image, detection_scores, detection_classes,
detection_masks, num_det_boxes_per_image, original_image_spatial_shape
], [])
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns a dictionary of eval metric ops.
Note that once value_op is called, the detections and groundtruth added via
update_op are cleared.
Args:
eval_dict: A dictionary that holds tensors for evaluating object detection
performance. For single-image evaluation, this dictionary may be
produced from eval_util.result_dict_for_single_example(). If multi-image
evaluation, `eval_dict` should contain the fields
'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to
properly unpad the tensors from the batch.
Returns:
a dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all
update ops must be run together and similarly all value ops must be run
together to guarantee correct behaviour.
"""
update_op = self.add_eval_dict(eval_dict)
metric_names = ['DetectionMasks_Precision/mAP',
'DetectionMasks_Precision/mAP@.50IOU',
'DetectionMasks_Precision/mAP@.75IOU',
'DetectionMasks_Precision/mAP (small)',
'DetectionMasks_Precision/mAP (medium)',
'DetectionMasks_Precision/mAP (large)',
'DetectionMasks_Recall/AR@1',
'DetectionMasks_Recall/AR@10',
'DetectionMasks_Recall/AR@100',
'DetectionMasks_Recall/AR@100 (small)',
'DetectionMasks_Recall/AR@100 (medium)',
'DetectionMasks_Recall/AR@100 (large)']
if self._include_metrics_per_category:
for category_dict in self._categories:
metric_names.append('DetectionMasks_PerformanceByCategory/mAP/' +
category_dict['name'])
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
def dump_detections_to_json_file(self, json_output_path):
"""Saves the detections into json_output_path in the format used by MS COCO.
Args:
json_output_path: String containing the output file's path. It can be also
None. In that case nothing will be written to the output file.
"""
if json_output_path and json_output_path is not None:
pattern = re.compile(r'\d+\.\d{8,}')
def mround(match):
return '{:.2f}'.format(float(match.group()))
with tf.io.gfile.GFile(json_output_path, 'w') as fid:
json_string = json.dumps(self._detection_masks_list)
fid.write(re.sub(pattern, mround, json_string))
tf.logging.info('Dumping detections to output json file: %s',
json_output_path)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/lvis_evaluation.py | lvis_evaluation.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for third party pycocotools to be used within object_detection.
Note that nothing in this file is tensorflow related and thus cannot
be called directly as a slim metric, for example.
TODO(jonathanhuang): wrap as a slim metric in metrics.py
Usage example: given a set of images with ids in the list image_ids
and corresponding lists of numpy arrays encoding groundtruth (boxes and classes)
and detections (boxes, scores and classes), where elements of each list
correspond to detections/annotations of a single image,
then evaluation (in multi-class mode) can be invoked as follows:
groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(
image_ids, groundtruth_boxes_list, groundtruth_classes_list,
max_num_classes, output_path=None)
detections_list = coco_tools.ExportDetectionsToCOCO(
image_ids, detection_boxes_list, detection_scores_list,
detection_classes_list, output_path=None)
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import copy
import time
import numpy as np
from pycocotools import coco
from pycocotools import cocoeval
from pycocotools import mask
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.utils import json_utils
class COCOWrapper(coco.COCO):
"""Wrapper for the pycocotools COCO class."""
def __init__(self, dataset, detection_type='bbox'):
"""COCOWrapper constructor.
See http://mscoco.org/dataset/#format for a description of the format.
By default, the coco.COCO class constructor reads from a JSON file.
This function duplicates the same behavior but loads from a dictionary,
allowing us to perform evaluation without writing to external storage.
Args:
dataset: a dictionary holding bounding box annotations in the COCO format.
detection_type: type of detections being wrapped. Can be one of ['bbox',
'segmentation']
Raises:
ValueError: if detection_type is unsupported.
"""
supported_detection_types = ['bbox', 'segmentation']
if detection_type not in supported_detection_types:
raise ValueError('Unsupported detection type: {}. '
'Supported values are: {}'.format(
detection_type, supported_detection_types))
self._detection_type = detection_type
coco.COCO.__init__(self)
self.dataset = dataset
self.createIndex()
def LoadAnnotations(self, annotations):
"""Load annotations dictionary into COCO datastructure.
See http://mscoco.org/dataset/#format for a description of the annotations
format. As above, this function replicates the default behavior of the API
but does not require writing to external storage.
Args:
annotations: python list holding object detection results where each
detection is encoded as a dict with required keys ['image_id',
'category_id', 'score'] and one of ['bbox', 'segmentation'] based on
`detection_type`.
Returns:
a coco.COCO datastructure holding object detection annotations results
Raises:
ValueError: if annotations is not a list
ValueError: if annotations do not correspond to the images contained
in self.
"""
results = coco.COCO()
results.dataset['images'] = [img for img in self.dataset['images']]
tf.logging.info('Loading and preparing annotation results...')
tic = time.time()
if not isinstance(annotations, list):
raise ValueError('annotations is not a list of objects')
annotation_img_ids = [ann['image_id'] for ann in annotations]
if (set(annotation_img_ids) != (set(annotation_img_ids)
& set(self.getImgIds()))):
raise ValueError('Results do not correspond to current coco set')
results.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
if self._detection_type == 'bbox':
for idx, ann in enumerate(annotations):
bb = ann['bbox']
ann['area'] = bb[2] * bb[3]
ann['id'] = idx + 1
ann['iscrowd'] = 0
elif self._detection_type == 'segmentation':
for idx, ann in enumerate(annotations):
ann['area'] = mask.area(ann['segmentation'])
ann['bbox'] = mask.toBbox(ann['segmentation'])
ann['id'] = idx + 1
ann['iscrowd'] = 0
tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic))
results.dataset['annotations'] = annotations
results.createIndex()
return results
COCO_METRIC_NAMES_AND_INDEX = (
('Precision/mAP', 0),
('Precision/mAP@.50IOU', 1),
('Precision/mAP@.75IOU', 2),
('Precision/mAP (small)', 3),
('Precision/mAP (medium)', 4),
('Precision/mAP (large)', 5),
('Recall/AR@1', 6),
('Recall/AR@10', 7),
('Recall/AR@100', 8),
('Recall/AR@100 (small)', 9),
('Recall/AR@100 (medium)', 10),
('Recall/AR@100 (large)', 11)
)
COCO_KEYPOINT_METRIC_NAMES_AND_INDEX = (
('Precision/mAP', 0),
('Precision/mAP@.50IOU', 1),
('Precision/mAP@.75IOU', 2),
('Precision/mAP (medium)', 3),
('Precision/mAP (large)', 4),
('Recall/AR@1', 5),
('Recall/AR@10', 6),
('Recall/AR@100', 7),
('Recall/AR@100 (medium)', 8),
('Recall/AR@100 (large)', 9)
)
class COCOEvalWrapper(cocoeval.COCOeval):
"""Wrapper for the pycocotools COCOeval class.
To evaluate, create two objects (groundtruth_dict and detections_list)
using the conventions listed at http://mscoco.org/dataset/#format.
Then call evaluation as follows:
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
"""
def __init__(self, groundtruth=None, detections=None, agnostic_mode=False,
iou_type='bbox', oks_sigmas=None):
"""COCOEvalWrapper constructor.
Note that for the area-based metrics to be meaningful, detection and
groundtruth boxes must be in image coordinates measured in pixels.
Args:
groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding
groundtruth annotations
detections: a coco.COCO (or coco_tools.COCOWrapper) object holding
detections
agnostic_mode: boolean (default: False). If True, evaluation ignores
class labels, treating all detections as proposals.
iou_type: IOU type to use for evaluation. Supports `bbox', `segm`,
`keypoints`.
oks_sigmas: Float numpy array holding the OKS variances for keypoints.
"""
cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type)
if oks_sigmas is not None:
self.params.kpt_oks_sigmas = oks_sigmas
if agnostic_mode:
self.params.useCats = 0
self._iou_type = iou_type
def GetCategory(self, category_id):
"""Fetches dictionary holding category information given category id.
Args:
category_id: integer id
Returns:
dictionary holding 'id', 'name'.
"""
return self.cocoGt.cats[category_id]
def GetAgnosticMode(self):
"""Returns true if COCO Eval is configured to evaluate in agnostic mode."""
return self.params.useCats == 0
def GetCategoryIdList(self):
"""Returns list of valid category ids."""
return self.params.catIds
def ComputeMetrics(self,
include_metrics_per_category=False,
all_metrics_per_category=False,
super_categories=None):
"""Computes detection/keypoint metrics.
Args:
include_metrics_per_category: If True, will include metrics per category.
all_metrics_per_category: If true, include all the summery metrics for
each category in per_category_ap. Be careful with setting it to true if
you have more than handful of categories, because it will pollute
your mldash.
super_categories: None or a python dict mapping super-category names
(strings) to lists of categories (corresponding to category names
in the label_map). Metrics are aggregated along these super-categories
and added to the `per_category_ap` and are associated with the name
`PerformanceBySuperCategory/<super-category-name>`.
Returns:
1. summary_metrics: a dictionary holding:
'Precision/mAP': mean average precision over classes averaged over IOU
thresholds ranging from .5 to .95 with .05 increments
'Precision/mAP@.50IOU': mean average precision at 50% IOU
'Precision/mAP@.75IOU': mean average precision at 75% IOU
'Precision/mAP (small)': mean average precision for small objects
(area < 32^2 pixels). NOTE: not present for 'keypoints'
'Precision/mAP (medium)': mean average precision for medium sized
objects (32^2 pixels < area < 96^2 pixels)
'Precision/mAP (large)': mean average precision for large objects
(96^2 pixels < area < 10000^2 pixels)
'Recall/AR@1': average recall with 1 detection
'Recall/AR@10': average recall with 10 detections
'Recall/AR@100': average recall with 100 detections
'Recall/AR@100 (small)': average recall for small objects with 100
detections. NOTE: not present for 'keypoints'
'Recall/AR@100 (medium)': average recall for medium objects with 100
detections
'Recall/AR@100 (large)': average recall for large objects with 100
detections
2. per_category_ap: a dictionary holding category specific results with
keys of the form: 'Precision mAP ByCategory/category'
(without the supercategory part if no supercategories exist).
For backward compatibility 'PerformanceByCategory' is included in the
output regardless of all_metrics_per_category.
If evaluating class-agnostic mode, per_category_ap is an empty
dictionary.
If super_categories are provided, then this will additionally include
metrics aggregated along the super_categories with keys of the form:
`PerformanceBySuperCategory/<super-category-name>`
Raises:
ValueError: If category_stats does not exist.
"""
self.evaluate()
self.accumulate()
self.summarize()
summary_metrics = {}
if self._iou_type in ['bbox', 'segm']:
summary_metrics = OrderedDict(
[(name, self.stats[index]) for name, index in
COCO_METRIC_NAMES_AND_INDEX])
elif self._iou_type == 'keypoints':
category_id = self.GetCategoryIdList()[0]
category_name = self.GetCategory(category_id)['name']
summary_metrics = OrderedDict([])
for metric_name, index in COCO_KEYPOINT_METRIC_NAMES_AND_INDEX:
value = self.stats[index]
summary_metrics['{} ByCategory/{}'.format(
metric_name, category_name)] = value
if not include_metrics_per_category:
return summary_metrics, {}
if not hasattr(self, 'category_stats'):
raise ValueError('Category stats do not exist')
per_category_ap = OrderedDict([])
super_category_ap = OrderedDict([])
if self.GetAgnosticMode():
return summary_metrics, per_category_ap
if super_categories:
for key in super_categories:
super_category_ap['PerformanceBySuperCategory/{}'.format(key)] = 0
if all_metrics_per_category:
for metric_name, _ in COCO_METRIC_NAMES_AND_INDEX:
metric_key = '{} BySuperCategory/{}'.format(metric_name, key)
super_category_ap[metric_key] = 0
for category_index, category_id in enumerate(self.GetCategoryIdList()):
category = self.GetCategory(category_id)['name']
# Kept for backward compatilbility
per_category_ap['PerformanceByCategory/mAP/{}'.format(
category)] = self.category_stats[0][category_index]
if all_metrics_per_category:
for metric_name, index in COCO_METRIC_NAMES_AND_INDEX:
metric_key = '{} ByCategory/{}'.format(metric_name, category)
per_category_ap[metric_key] = self.category_stats[index][
category_index]
if super_categories:
for key in super_categories:
if category in super_categories[key]:
metric_key = 'PerformanceBySuperCategory/{}'.format(key)
super_category_ap[metric_key] += self.category_stats[0][
category_index]
if all_metrics_per_category:
for metric_name, index in COCO_METRIC_NAMES_AND_INDEX:
metric_key = '{} BySuperCategory/{}'.format(metric_name, key)
super_category_ap[metric_key] += (
self.category_stats[index][category_index])
if super_categories:
for key in super_categories:
length = len(super_categories[key])
super_category_ap['PerformanceBySuperCategory/{}'.format(
key)] /= length
if all_metrics_per_category:
for metric_name, _ in COCO_METRIC_NAMES_AND_INDEX:
super_category_ap['{} BySuperCategory/{}'.format(
metric_name, key)] /= length
per_category_ap.update(super_category_ap)
return summary_metrics, per_category_ap
def _ConvertBoxToCOCOFormat(box):
"""Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.
This is a utility function for converting from our internal
[ymin, xmin, ymax, xmax] convention to the convention used by the COCO API
i.e., [xmin, ymin, width, height].
Args:
box: a [ymin, xmin, ymax, xmax] numpy array
Returns:
a list of floats representing [xmin, ymin, width, height]
"""
return [float(box[1]), float(box[0]), float(box[3] - box[1]),
float(box[2] - box[0])]
def _RleCompress(masks):
"""Compresses mask using Run-length encoding provided by pycocotools.
Args:
masks: uint8 numpy array of shape [mask_height, mask_width] with values in
{0, 1}.
Returns:
A pycocotools Run-length encoding of the mask.
"""
rle = mask.encode(np.asfortranarray(masks))
rle['counts'] = six.ensure_str(rle['counts'])
return rle
def ExportSingleImageGroundtruthToCoco(image_id,
next_annotation_id,
category_id_set,
groundtruth_boxes,
groundtruth_classes,
groundtruth_keypoints=None,
groundtruth_keypoint_visibilities=None,
groundtruth_masks=None,
groundtruth_is_crowd=None,
groundtruth_area=None):
"""Export groundtruth of a single image to COCO format.
This function converts groundtruth detection annotations represented as numpy
arrays to dictionaries that can be ingested by the COCO evaluation API. Note
that the image_ids provided here must match the ones given to
ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in
correspondence - that is: groundtruth_boxes[i, :], and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box.
Args:
image_id: a unique image identifier either of type integer or string.
next_annotation_id: integer specifying the first id to use for the
groundtruth annotations. All annotations are assigned a continuous integer
id starting from this value.
category_id_set: A set of valid class ids. Groundtruth with classes not in
category_id_set are dropped.
groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]
groundtruth_classes: numpy array (int) with shape [num_gt_boxes]
groundtruth_keypoints: optional float numpy array of keypoints
with shape [num_gt_boxes, num_keypoints, 2].
groundtruth_keypoint_visibilities: optional integer numpy array of keypoint
visibilities with shape [num_gt_boxes, num_keypoints]. Integer is treated
as an enum with 0=not labels, 1=labeled but not visible and 2=labeled and
visible.
groundtruth_masks: optional uint8 numpy array of shape [num_detections,
image_height, image_width] containing detection_masks.
groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes]
indicating whether groundtruth boxes are crowd.
groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If
provided, then the area values (in the original absolute coordinates) will
be populated instead of calculated from bounding box coordinates.
Returns:
a list of groundtruth annotations for a single image in the COCO format.
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
if len(groundtruth_classes.shape) != 1:
raise ValueError('groundtruth_classes is '
'expected to be of rank 1.')
if len(groundtruth_boxes.shape) != 2:
raise ValueError('groundtruth_boxes is expected to be of '
'rank 2.')
if groundtruth_boxes.shape[1] != 4:
raise ValueError('groundtruth_boxes should have '
'shape[1] == 4.')
num_boxes = groundtruth_classes.shape[0]
if num_boxes != groundtruth_boxes.shape[0]:
raise ValueError('Corresponding entries in groundtruth_classes, '
'and groundtruth_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).'
'Classes shape: %d. Boxes shape: %d. Image ID: %s' % (
groundtruth_classes.shape[0],
groundtruth_boxes.shape[0], image_id))
has_is_crowd = groundtruth_is_crowd is not None
if has_is_crowd and len(groundtruth_is_crowd.shape) != 1:
raise ValueError('groundtruth_is_crowd is expected to be of rank 1.')
has_keypoints = groundtruth_keypoints is not None
has_keypoint_visibilities = groundtruth_keypoint_visibilities is not None
if has_keypoints and not has_keypoint_visibilities:
groundtruth_keypoint_visibilities = np.full(
(num_boxes, groundtruth_keypoints.shape[1]), 2)
groundtruth_list = []
for i in range(num_boxes):
if groundtruth_classes[i] in category_id_set:
iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0
if groundtruth_area is not None and groundtruth_area[i] > 0:
area = float(groundtruth_area[i])
else:
area = float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) *
(groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1]))
export_dict = {
'id':
next_annotation_id + i,
'image_id':
image_id,
'category_id':
int(groundtruth_classes[i]),
'bbox':
list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])),
'area': area,
'iscrowd':
iscrowd
}
if groundtruth_masks is not None:
export_dict['segmentation'] = _RleCompress(groundtruth_masks[i])
if has_keypoints:
keypoints = groundtruth_keypoints[i]
visibilities = np.reshape(groundtruth_keypoint_visibilities[i], [-1])
coco_keypoints = []
num_valid_keypoints = 0
for keypoint, visibility in zip(keypoints, visibilities):
# Convert from [y, x] to [x, y] as mandated by COCO.
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
if int(visibility) > 0:
num_valid_keypoints = num_valid_keypoints + 1
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_valid_keypoints
groundtruth_list.append(export_dict)
return groundtruth_list
def ExportGroundtruthToCOCO(image_ids,
groundtruth_boxes,
groundtruth_classes,
categories,
output_path=None):
"""Export groundtruth detection annotations in numpy arrays to COCO API.
This function converts a set of groundtruth detection annotations represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are three lists: image ids for each groundtruth image,
groundtruth boxes for each image and groundtruth classes respectively.
Note that the image_ids provided here must match the ones given to the
ExportDetectionsToCOCO function in order for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box and "iscrowd" fields are always set to 0.
TODO(jonathanhuang): pass in "iscrowd" array for evaluating on COCO dataset.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4]
(note that num_gt_boxes can be different for each entry in the list)
groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes]
(note that num_gt_boxes can be different for each entry in the list)
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
output_path: (optional) path for exporting result to JSON
Returns:
dictionary that can be read by COCO API
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
category_id_set = set([cat['id'] for cat in categories])
groundtruth_export_list = []
image_export_list = []
if not len(image_ids) == len(groundtruth_boxes) == len(groundtruth_classes):
raise ValueError('Input lists must have the same length')
# For reasons internal to the COCO API, it is important that annotation ids
# are not equal to zero; we thus start counting from 1.
annotation_id = 1
for image_id, boxes, classes in zip(image_ids, groundtruth_boxes,
groundtruth_classes):
image_export_list.append({'id': image_id})
groundtruth_export_list.extend(ExportSingleImageGroundtruthToCoco(
image_id,
annotation_id,
category_id_set,
boxes,
classes))
num_boxes = classes.shape[0]
annotation_id += num_boxes
groundtruth_dict = {
'annotations': groundtruth_export_list,
'images': image_export_list,
'categories': categories
}
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(groundtruth_dict, fid, float_digits=4, indent=2)
return groundtruth_dict
def ExportSingleImageDetectionBoxesToCoco(image_id,
category_id_set,
detection_boxes,
detection_scores,
detection_classes,
detection_keypoints=None,
detection_keypoint_visibilities=None):
"""Export detections of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. Note that the image_ids
provided here must match the ones given to the
ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in
correspondence - that is: boxes[i, :], and classes[i]
are associated with the same groundtruth annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_boxes: float numpy array of shape [num_detections, 4] containing
detection boxes.
detection_scores: float numpy array of shape [num_detections] containing
scored for the detection boxes.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection boxes.
detection_keypoints: optional float numpy array of keypoints
with shape [num_detections, num_keypoints, 2].
detection_keypoint_visibilities: optional integer numpy array of keypoint
visibilities with shape [num_detections, num_keypoints]. Integer is
treated as an enum with 0=not labels, 1=labeled but not visible and
2=labeled and visible.
Returns:
a list of detection annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_boxes, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(detection_boxes.shape) != 2:
raise ValueError('All entries in detection_boxes expected to be of '
'rank 2.')
if detection_boxes.shape[1] != 4:
raise ValueError('All entries in detection_boxes should have '
'shape[1] == 4.')
num_boxes = detection_classes.shape[0]
if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension). '
'Classes shape: %d. Boxes shape: %d. '
'Scores shape: %d' % (
detection_classes.shape[0], detection_boxes.shape[0],
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
export_dict = {
'image_id':
image_id,
'category_id':
int(detection_classes[i]),
'bbox':
list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])),
'score':
float(detection_scores[i]),
}
if detection_keypoints is not None:
keypoints = detection_keypoints[i]
num_keypoints = keypoints.shape[0]
if detection_keypoint_visibilities is None:
detection_keypoint_visibilities = np.full((num_boxes, num_keypoints),
2)
visibilities = np.reshape(detection_keypoint_visibilities[i], [-1])
coco_keypoints = []
for keypoint, visibility in zip(keypoints, visibilities):
# Convert from [y, x] to [x, y] as mandated by COCO.
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_keypoints
detections_list.append(export_dict)
return detections_list
def ExportSingleImageDetectionMasksToCoco(image_id,
category_id_set,
detection_masks,
detection_scores,
detection_classes):
"""Export detection masks of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. We assume that
detection_masks, detection_scores, and detection_classes are in correspondence
- that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]
are associated with the same annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_masks: uint8 numpy array of shape [num_detections, image_height,
image_width] containing detection_masks.
detection_scores: float numpy array of shape [num_detections] containing
scores for detection masks.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection masks.
Returns:
a list of detection mask annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_masks, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
num_boxes = detection_classes.shape[0]
if not num_boxes == len(detection_masks) == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_masks should have '
'compatible lengths and shapes '
'Classes length: %d. Masks length: %d. '
'Scores length: %d' % (
detection_classes.shape[0], len(detection_masks),
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
detections_list.append({
'image_id': image_id,
'category_id': int(detection_classes[i]),
'segmentation': _RleCompress(detection_masks[i]),
'score': float(detection_scores[i])
})
return detections_list
def ExportDetectionsToCOCO(image_ids,
detection_boxes,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Export detection annotations in numpy arrays to COCO API.
This function converts a set of predicted detections represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of boxes, scores and
classes, respectively, corresponding to each image for which detections
have been produced. Note that the image_ids provided here must
match the ones given to the ExportGroundtruthToCOCO function in order
for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: detection_boxes[i, :], detection_scores[i] and
detection_classes[i] are associated with the same detection.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4]
detection_scores: list of numpy arrays (float) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'bbox', 'score'].
Raises:
ValueError: if (1) detection_boxes and detection_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers.
"""
category_id_set = set([cat['id'] for cat in categories])
detections_export_list = []
if not (len(image_ids) == len(detection_boxes) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
for image_id, boxes, scores, classes in zip(image_ids, detection_boxes,
detection_scores,
detection_classes):
detections_export_list.extend(ExportSingleImageDetectionBoxesToCoco(
image_id,
category_id_set,
boxes,
scores,
classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(detections_export_list, fid, float_digits=4, indent=2)
return detections_export_list
def ExportSegmentsToCOCO(image_ids,
detection_masks,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Export segmentation masks in numpy arrays to COCO API.
This function converts a set of predicted instance masks represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of segments, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
Note this function is recommended to use for small dataset.
For large dataset, it should be used with a merge function
(e.g. in map reduce), otherwise the memory consumption is large.
We assume that for each image, masks, scores and classes are in
correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i]
and detection_classes[i] are associated with the same detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_masks: list of numpy arrays with shape [num_detection, h, w, 1]
and type uint8. The height and width should match the shape of
corresponding image.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'segmentation', 'score'].
Raises:
ValueError: if detection_masks and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
"""
if not (len(image_ids) == len(detection_masks) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
segment_export_list = []
for image_id, masks, scores, classes in zip(image_ids, detection_masks,
detection_scores,
detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(masks.shape) != 4:
raise ValueError('All entries in masks expected to be of '
'rank 4. Given {}'.format(masks.shape))
num_boxes = classes.shape[0]
if not num_boxes == masks.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in segment_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
segment_export_list.extend(ExportSingleImageDetectionMasksToCoco(
image_id, category_id_set, np.squeeze(masks, axis=3), scores, classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(segment_export_list, fid, float_digits=4, indent=2)
return segment_export_list
def ExportKeypointsToCOCO(image_ids,
detection_keypoints,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Exports keypoints in numpy arrays to COCO API.
This function converts a set of predicted keypoints represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of keypoints, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
We assume that for each image, keypoints, scores and classes are in
correspondence --- that is: detection_keypoints[i, :, :, :],
detection_scores[i] and detection_classes[i] are associated with the same
detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_keypoints: list of numpy arrays with shape
[num_detection, num_keypoints, 2] and type float32 in absolute
x-y coordinates.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category and an integer 'num_keypoints' key specifying the number of
keypoints the category has.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'keypoints', 'score'].
Raises:
ValueError: if detection_keypoints and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
"""
if not (len(image_ids) == len(detection_keypoints) ==
len(detection_scores) == len(detection_classes)):
raise ValueError('Input lists must have the same length')
keypoints_export_list = []
for image_id, keypoints, scores, classes in zip(
image_ids, detection_keypoints, detection_scores, detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(keypoints.shape) != 3:
raise ValueError('All entries in keypoints expected to be of '
'rank 3. Given {}'.format(keypoints.shape))
num_boxes = classes.shape[0]
if not num_boxes == keypoints.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_keypoints, and detection_scores should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
category_id_to_num_keypoints_map = {
cat['id']: cat['num_keypoints'] for cat in categories
if 'num_keypoints' in cat}
for i in range(num_boxes):
if classes[i] not in category_id_set:
raise ValueError('class id should be in category_id_set\n')
if classes[i] in category_id_to_num_keypoints_map:
num_keypoints = category_id_to_num_keypoints_map[classes[i]]
# Adds extra ones to indicate the visibility for each keypoint as is
# recommended by MSCOCO.
instance_keypoints = np.concatenate(
[keypoints[i, 0:num_keypoints, :],
np.expand_dims(np.ones(num_keypoints), axis=1)],
axis=1).astype(int)
instance_keypoints = instance_keypoints.flatten().tolist()
keypoints_export_list.append({
'image_id': image_id,
'category_id': int(classes[i]),
'keypoints': instance_keypoints,
'score': float(scores[i])
})
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(keypoints_export_list, fid, float_digits=4, indent=2)
return keypoints_export_list
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/coco_tools.py | coco_tools.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs evaluation using OpenImages groundtruth and predictions.
Uses Open Images Challenge 2018, 2019 metrics
Example usage:
python models/research/object_detection/metrics/oid_od_challenge_evaluation.py \
--input_annotations_boxes=/path/to/input/annotations-human-bbox.csv \
--input_annotations_labels=/path/to/input/annotations-label.csv \
--input_class_labelmap=/path/to/input/class_labelmap.pbtxt \
--input_predictions=/path/to/input/predictions.csv \
--output_metrics=/path/to/output/metric.csv \
--input_annotations_segm=[/path/to/input/annotations-human-mask.csv] \
If optional flag has_masks is True, Mask column is also expected in CSV.
CSVs with bounding box annotations, instance segmentations and image label
can be downloaded from the Open Images Challenge website:
https://storage.googleapis.com/openimages/web/challenge.html
The format of the input csv and the metrics itself are described on the
challenge website as well.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from absl import app
from absl import flags
import pandas as pd
from google.protobuf import text_format
from object_detection.metrics import io_utils
from object_detection.metrics import oid_challenge_evaluation_utils as utils
from object_detection.protos import string_int_label_map_pb2
from object_detection.utils import object_detection_evaluation
flags.DEFINE_string('input_annotations_boxes', None,
'File with groundtruth boxes annotations.')
flags.DEFINE_string('input_annotations_labels', None,
'File with groundtruth labels annotations.')
flags.DEFINE_string(
'input_predictions', None,
"""File with detection predictions; NOTE: no postprocessing is applied in the evaluation script."""
)
flags.DEFINE_string('input_class_labelmap', None,
'Open Images Challenge labelmap.')
flags.DEFINE_string('output_metrics', None, 'Output file with csv metrics.')
flags.DEFINE_string(
'input_annotations_segm', None,
'File with groundtruth instance segmentation annotations [OPTIONAL].')
FLAGS = flags.FLAGS
def _load_labelmap(labelmap_path):
"""Loads labelmap from the labelmap path.
Args:
labelmap_path: Path to the labelmap.
Returns:
A dictionary mapping class name to class numerical id
A list with dictionaries, one dictionary per category.
"""
label_map = string_int_label_map_pb2.StringIntLabelMap()
with open(labelmap_path, 'r') as fid:
label_map_string = fid.read()
text_format.Merge(label_map_string, label_map)
labelmap_dict = {}
categories = []
for item in label_map.item:
labelmap_dict[item.name] = item.id
categories.append({'id': item.id, 'name': item.name})
return labelmap_dict, categories
def main(unused_argv):
flags.mark_flag_as_required('input_annotations_boxes')
flags.mark_flag_as_required('input_annotations_labels')
flags.mark_flag_as_required('input_predictions')
flags.mark_flag_as_required('input_class_labelmap')
flags.mark_flag_as_required('output_metrics')
all_location_annotations = pd.read_csv(FLAGS.input_annotations_boxes)
all_label_annotations = pd.read_csv(FLAGS.input_annotations_labels)
all_label_annotations.rename(
columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
is_instance_segmentation_eval = False
if FLAGS.input_annotations_segm:
is_instance_segmentation_eval = True
all_segm_annotations = pd.read_csv(FLAGS.input_annotations_segm)
# Note: this part is unstable as it requires the float point numbers in both
# csvs are exactly the same;
# Will be replaced by more stable solution: merge on LabelName and ImageID
# and filter down by IoU.
all_location_annotations = utils.merge_boxes_and_masks(
all_location_annotations, all_segm_annotations)
all_annotations = pd.concat([all_location_annotations, all_label_annotations])
class_label_map, categories = _load_labelmap(FLAGS.input_class_labelmap)
challenge_evaluator = (
object_detection_evaluation.OpenImagesChallengeEvaluator(
categories, evaluate_masks=is_instance_segmentation_eval))
all_predictions = pd.read_csv(FLAGS.input_predictions)
images_processed = 0
for _, groundtruth in enumerate(all_annotations.groupby('ImageID')):
logging.info('Processing image %d', images_processed)
image_id, image_groundtruth = groundtruth
groundtruth_dictionary = utils.build_groundtruth_dictionary(
image_groundtruth, class_label_map)
challenge_evaluator.add_single_ground_truth_image_info(
image_id, groundtruth_dictionary)
prediction_dictionary = utils.build_predictions_dictionary(
all_predictions.loc[all_predictions['ImageID'] == image_id],
class_label_map)
challenge_evaluator.add_single_detected_image_info(image_id,
prediction_dictionary)
images_processed += 1
metrics = challenge_evaluator.evaluate()
with open(FLAGS.output_metrics, 'w') as fid:
io_utils.write_csv(fid, metrics)
if __name__ == '__main__':
app.run(main)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/oid_challenge_evaluation.py | oid_challenge_evaluation.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.object_detection.metrics.calibration_evaluation.""" # pylint: disable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.metrics import calibration_evaluation
from object_detection.utils import tf_version
def _get_categories_list():
return [{
'id': 1,
'name': 'person'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'cat'
}]
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class CalibrationDetectionEvaluationTest(tf.test.TestCase):
def _get_ece(self, ece_op, update_op):
"""Return scalar expected calibration error."""
with self.test_session() as sess:
metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES)
sess.run(tf.variables_initializer(var_list=metrics_vars))
_ = sess.run(update_op)
return sess.run(ece_op)
def testGetECEWithMatchingGroundtruthAndDetections(self):
"""Tests that ECE is calculated correctly when box matches exist."""
calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator(
_get_categories_list(), iou_threshold=0.5)
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
# All gt and detection boxes match.
base_eval_dict = {
input_data_fields.key:
tf.constant(['image_1', 'image_2', 'image_3']),
input_data_fields.groundtruth_boxes:
tf.constant([[[100., 100., 200., 200.]],
[[50., 50., 100., 100.]],
[[25., 25., 50., 50.]]],
dtype=tf.float32),
detection_fields.detection_boxes:
tf.constant([[[100., 100., 200., 200.]],
[[50., 50., 100., 100.]],
[[25., 25., 50., 50.]]],
dtype=tf.float32),
input_data_fields.groundtruth_classes:
tf.constant([[1], [2], [3]], dtype=tf.int64),
# Note that, in the zero ECE case, the detection class for image_2
# should NOT match groundtruth, since the detection score is zero.
detection_fields.detection_scores:
tf.constant([[1.0], [0.0], [1.0]], dtype=tf.float32)
}
# Zero ECE (perfectly calibrated).
zero_ece_eval_dict = base_eval_dict.copy()
zero_ece_eval_dict[detection_fields.detection_classes] = tf.constant(
[[1], [1], [3]], dtype=tf.int64)
zero_ece_op, zero_ece_update_op = (
calibration_evaluator.get_estimator_eval_metric_ops(zero_ece_eval_dict)
['CalibrationError/ExpectedCalibrationError'])
zero_ece = self._get_ece(zero_ece_op, zero_ece_update_op)
self.assertAlmostEqual(zero_ece, 0.0)
# ECE of 1 (poorest calibration).
one_ece_eval_dict = base_eval_dict.copy()
one_ece_eval_dict[detection_fields.detection_classes] = tf.constant(
[[3], [2], [1]], dtype=tf.int64)
one_ece_op, one_ece_update_op = (
calibration_evaluator.get_estimator_eval_metric_ops(one_ece_eval_dict)
['CalibrationError/ExpectedCalibrationError'])
one_ece = self._get_ece(one_ece_op, one_ece_update_op)
self.assertAlmostEqual(one_ece, 1.0)
def testGetECEWithUnmatchedGroundtruthAndDetections(self):
"""Tests that ECE is correctly calculated when boxes are unmatched."""
calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator(
_get_categories_list(), iou_threshold=0.5)
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
# No gt and detection boxes match.
eval_dict = {
input_data_fields.key:
tf.constant(['image_1', 'image_2', 'image_3']),
input_data_fields.groundtruth_boxes:
tf.constant([[[100., 100., 200., 200.]],
[[50., 50., 100., 100.]],
[[25., 25., 50., 50.]]],
dtype=tf.float32),
detection_fields.detection_boxes:
tf.constant([[[50., 50., 100., 100.]],
[[25., 25., 50., 50.]],
[[100., 100., 200., 200.]]],
dtype=tf.float32),
input_data_fields.groundtruth_classes:
tf.constant([[1], [2], [3]], dtype=tf.int64),
detection_fields.detection_classes:
tf.constant([[1], [1], [3]], dtype=tf.int64),
# Detection scores of zero when boxes are unmatched = ECE of zero.
detection_fields.detection_scores:
tf.constant([[0.0], [0.0], [0.0]], dtype=tf.float32)
}
ece_op, update_op = calibration_evaluator.get_estimator_eval_metric_ops(
eval_dict)['CalibrationError/ExpectedCalibrationError']
ece = self._get_ece(ece_op, update_op)
self.assertAlmostEqual(ece, 0.0)
def testGetECEWithBatchedDetections(self):
"""Tests that ECE is correct with multiple detections per image."""
calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator(
_get_categories_list(), iou_threshold=0.5)
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
# Note that image_2 has mismatched classes and detection scores but should
# still produce ECE of 0 because detection scores are also 0.
eval_dict = {
input_data_fields.key:
tf.constant(['image_1', 'image_2', 'image_3']),
input_data_fields.groundtruth_boxes:
tf.constant([[[100., 100., 200., 200.], [50., 50., 100., 100.]],
[[50., 50., 100., 100.], [100., 100., 200., 200.]],
[[25., 25., 50., 50.], [100., 100., 200., 200.]]],
dtype=tf.float32),
detection_fields.detection_boxes:
tf.constant([[[100., 100., 200., 200.], [50., 50., 100., 100.]],
[[50., 50., 100., 100.], [25., 25., 50., 50.]],
[[25., 25., 50., 50.], [100., 100., 200., 200.]]],
dtype=tf.float32),
input_data_fields.groundtruth_classes:
tf.constant([[1, 2], [2, 3], [3, 1]], dtype=tf.int64),
detection_fields.detection_classes:
tf.constant([[1, 2], [1, 1], [3, 1]], dtype=tf.int64),
detection_fields.detection_scores:
tf.constant([[1.0, 1.0], [0.0, 0.0], [1.0, 1.0]], dtype=tf.float32)
}
ece_op, update_op = calibration_evaluator.get_estimator_eval_metric_ops(
eval_dict)['CalibrationError/ExpectedCalibrationError']
ece = self._get_ece(ece_op, update_op)
self.assertAlmostEqual(ece, 0.0)
def testGetECEWhenImagesFilteredByIsAnnotated(self):
"""Tests that ECE is correct when detections filtered by is_annotated."""
calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator(
_get_categories_list(), iou_threshold=0.5)
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
# ECE will be 0 only if the third image is filtered by is_annotated.
eval_dict = {
input_data_fields.key:
tf.constant(['image_1', 'image_2', 'image_3']),
input_data_fields.groundtruth_boxes:
tf.constant([[[100., 100., 200., 200.]],
[[50., 50., 100., 100.]],
[[25., 25., 50., 50.]]],
dtype=tf.float32),
detection_fields.detection_boxes:
tf.constant([[[100., 100., 200., 200.]],
[[50., 50., 100., 100.]],
[[25., 25., 50., 50.]]],
dtype=tf.float32),
input_data_fields.groundtruth_classes:
tf.constant([[1], [2], [1]], dtype=tf.int64),
detection_fields.detection_classes:
tf.constant([[1], [1], [3]], dtype=tf.int64),
detection_fields.detection_scores:
tf.constant([[1.0], [0.0], [1.0]], dtype=tf.float32),
'is_annotated': tf.constant([True, True, False], dtype=tf.bool)
}
ece_op, update_op = calibration_evaluator.get_estimator_eval_metric_ops(
eval_dict)['CalibrationError/ExpectedCalibrationError']
ece = self._get_ece(ece_op, update_op)
self.assertAlmostEqual(ece, 0.0)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/calibration_evaluation_tf1_test.py | calibration_evaluation_tf1_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common IO utils used in offline metric computation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
def write_csv(fid, metrics):
"""Writes metrics key-value pairs to CSV file.
Args:
fid: File identifier of an opened file.
metrics: A dictionary with metrics to be written.
"""
metrics_writer = csv.writer(fid, delimiter=',')
for metric_name, metric_value in metrics.items():
metrics_writer.writerow([metric_name, str(metric_value)])
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/io_utils.py | io_utils.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for calibration_metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.metrics import calibration_metrics
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class CalibrationLibTest(tf.test.TestCase):
@staticmethod
def _get_calibration_placeholders():
"""Returns TF placeholders for y_true and y_pred."""
return (tf.placeholder(tf.int64, shape=(None)),
tf.placeholder(tf.float32, shape=(None)))
def test_expected_calibration_error_all_bins_filled(self):
"""Test expected calibration error when all bins contain predictions."""
y_true, y_pred = self._get_calibration_placeholders()
expected_ece_op, update_op = calibration_metrics.expected_calibration_error(
y_true, y_pred, nbins=2)
with self.test_session() as sess:
metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES)
sess.run(tf.variables_initializer(var_list=metrics_vars))
# Bin calibration errors (|confidence - accuracy| * bin_weight):
# - [0,0.5): |0.2 - 0.333| * (3/5) = 0.08
# - [0.5, 1]: |0.75 - 0.5| * (2/5) = 0.1
sess.run(
update_op,
feed_dict={
y_pred: np.array([0., 0.2, 0.4, 0.5, 1.0]),
y_true: np.array([0, 0, 1, 0, 1])
})
actual_ece = 0.08 + 0.1
expected_ece = sess.run(expected_ece_op)
self.assertAlmostEqual(actual_ece, expected_ece)
def test_expected_calibration_error_all_bins_not_filled(self):
"""Test expected calibration error when no predictions for one bin."""
y_true, y_pred = self._get_calibration_placeholders()
expected_ece_op, update_op = calibration_metrics.expected_calibration_error(
y_true, y_pred, nbins=2)
with self.test_session() as sess:
metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES)
sess.run(tf.variables_initializer(var_list=metrics_vars))
# Bin calibration errors (|confidence - accuracy| * bin_weight):
# - [0,0.5): |0.2 - 0.333| * (3/5) = 0.08
# - [0.5, 1]: |0.75 - 0.5| * (2/5) = 0.1
sess.run(
update_op,
feed_dict={
y_pred: np.array([0., 0.2, 0.4]),
y_true: np.array([0, 0, 1])
})
actual_ece = np.abs(0.2 - (1 / 3.))
expected_ece = sess.run(expected_ece_op)
self.assertAlmostEqual(actual_ece, expected_ece)
def test_expected_calibration_error_with_multiple_data_streams(self):
"""Test expected calibration error when multiple data batches provided."""
y_true, y_pred = self._get_calibration_placeholders()
expected_ece_op, update_op = calibration_metrics.expected_calibration_error(
y_true, y_pred, nbins=2)
with self.test_session() as sess:
metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES)
sess.run(tf.variables_initializer(var_list=metrics_vars))
# Identical data to test_expected_calibration_error_all_bins_filled,
# except split over three batches.
sess.run(
update_op,
feed_dict={
y_pred: np.array([0., 0.2]),
y_true: np.array([0, 0])
})
sess.run(
update_op,
feed_dict={
y_pred: np.array([0.4, 0.5]),
y_true: np.array([1, 0])
})
sess.run(
update_op, feed_dict={
y_pred: np.array([1.0]),
y_true: np.array([1])
})
actual_ece = 0.08 + 0.1
expected_ece = sess.run(expected_ece_op)
self.assertAlmostEqual(actual_ece, expected_ece)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/calibration_metrics_tf1_test.py | calibration_metrics_tf1_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_model.object_detection.metrics.coco_tools."""
import json
import os
import re
import numpy as np
from pycocotools import mask
import tensorflow.compat.v1 as tf
from object_detection.metrics import coco_tools
class CocoToolsTest(tf.test.TestCase):
def setUp(self):
groundtruth_annotations_list = [
{
'id': 1,
'image_id': 'first',
'category_id': 1,
'bbox': [100., 100., 100., 100.],
'area': 100.**2,
'iscrowd': 0
},
{
'id': 2,
'image_id': 'second',
'category_id': 1,
'bbox': [50., 50., 50., 50.],
'area': 50.**2,
'iscrowd': 0
},
]
image_list = [{'id': 'first'}, {'id': 'second'}]
category_list = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
self._groundtruth_dict = {
'annotations': groundtruth_annotations_list,
'images': image_list,
'categories': category_list
}
self._detections_list = [
{
'image_id': 'first',
'category_id': 1,
'bbox': [100., 100., 100., 100.],
'score': .8
},
{
'image_id': 'second',
'category_id': 1,
'bbox': [50., 50., 50., 50.],
'score': .7
},
]
def testCocoWrappers(self):
groundtruth = coco_tools.COCOWrapper(self._groundtruth_dict)
detections = groundtruth.LoadAnnotations(self._detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections)
summary_metrics, _ = evaluator.ComputeMetrics()
self.assertAlmostEqual(1.0, summary_metrics['Precision/mAP'])
def testExportGroundtruthToCOCO(self):
image_ids = ['first', 'second']
groundtruth_boxes = [np.array([[100, 100, 200, 200]], np.float),
np.array([[50, 50, 100, 100]], np.float)]
groundtruth_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'groundtruth.json')
result = coco_tools.ExportGroundtruthToCOCO(
image_ids,
groundtruth_boxes,
groundtruth_classes,
categories,
output_path=output_path)
self.assertDictEqual(result, self._groundtruth_dict)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
# The json output should have floats written to 4 digits of precision.
matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE)
self.assertTrue(matcher.findall(written_result))
written_result = json.loads(written_result)
self.assertAlmostEqual(result, written_result)
def testExportDetectionsToCOCO(self):
image_ids = ['first', 'second']
detections_boxes = [np.array([[100, 100, 200, 200]], np.float),
np.array([[50, 50, 100, 100]], np.float)]
detections_scores = [np.array([.8], np.float), np.array([.7], np.float)]
detections_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'detections.json')
result = coco_tools.ExportDetectionsToCOCO(
image_ids,
detections_boxes,
detections_scores,
detections_classes,
categories,
output_path=output_path)
self.assertListEqual(result, self._detections_list)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
# The json output should have floats written to 4 digits of precision.
matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE)
self.assertTrue(matcher.findall(written_result))
written_result = json.loads(written_result)
self.assertAlmostEqual(result, written_result)
def testExportSegmentsToCOCO(self):
image_ids = ['first', 'second']
detection_masks = [np.array(
[[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]],
dtype=np.uint8), np.array(
[[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]],
dtype=np.uint8)]
for i, detection_mask in enumerate(detection_masks):
detection_masks[i] = detection_mask[:, :, :, None]
detection_scores = [np.array([.8], np.float), np.array([.7], np.float)]
detection_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'segments.json')
result = coco_tools.ExportSegmentsToCOCO(
image_ids,
detection_masks,
detection_scores,
detection_classes,
categories,
output_path=output_path)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
written_result = json.loads(written_result)
mask_load = mask.decode([written_result[0]['segmentation']])
self.assertTrue(np.allclose(mask_load, detection_masks[0]))
self.assertAlmostEqual(result, written_result)
def testExportKeypointsToCOCO(self):
image_ids = ['first', 'second']
detection_keypoints = [
np.array(
[[[100, 200], [300, 400], [500, 600]],
[[50, 150], [250, 350], [450, 550]]], dtype=np.int32),
np.array(
[[[110, 210], [310, 410], [510, 610]],
[[60, 160], [260, 360], [460, 560]]], dtype=np.int32)]
detection_scores = [np.array([.8, 0.2], np.float),
np.array([.7, 0.3], np.float)]
detection_classes = [np.array([1, 1], np.int32), np.array([1, 1], np.int32)]
categories = [{'id': 1, 'name': 'person', 'num_keypoints': 3},
{'id': 2, 'name': 'cat'},
{'id': 3, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'keypoints.json')
result = coco_tools.ExportKeypointsToCOCO(
image_ids,
detection_keypoints,
detection_scores,
detection_classes,
categories,
output_path=output_path)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
written_result = json.loads(written_result)
self.assertAlmostEqual(result, written_result)
def testSingleImageDetectionBoxesExport(self):
boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, 1, 1]], dtype=np.float32)
classes = np.array([1, 2, 3], dtype=np.int32)
scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32)
coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
detection_boxes=boxes,
detection_classes=classes,
detection_scores=scores)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertAlmostEqual(annotation['score'], scores[i])
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
def testSingleImageDetectionMaskExport(self):
masks = np.array(
[[[1, 1,], [1, 1]],
[[0, 0], [0, 1]],
[[0, 0], [0, 0]]], dtype=np.uint8)
classes = np.array([1, 2, 3], dtype=np.int32)
scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)
coco_annotations = coco_tools.ExportSingleImageDetectionMasksToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
detection_classes=classes,
detection_scores=scores,
detection_masks=masks)
expected_counts = ['04', '31', '4']
for i, mask_annotation in enumerate(coco_annotations):
self.assertEqual(mask_annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
mask_annotation['segmentation']), masks[i])))
self.assertEqual(mask_annotation['image_id'], 'first_image')
self.assertEqual(mask_annotation['category_id'], classes[i])
self.assertAlmostEqual(mask_annotation['score'], scores[i])
def testSingleImageGroundtruthExport(self):
masks = np.array(
[[[1, 1,], [1, 1]],
[[0, 0], [0, 1]],
[[0, 0], [0, 0]]], dtype=np.uint8)
boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, 1, 1]], dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32)
classes = np.array([1, 2, 3], dtype=np.int32)
is_crowd = np.array([0, 1, 0], dtype=np.int32)
next_annotation_id = 1
expected_counts = ['04', '31', '4']
# Tests exporting without passing in is_crowd (for backward compatibility).
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_masks=masks)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
annotation['segmentation']), masks[i])))
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
# Tests exporting with is_crowd.
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_masks=masks,
groundtruth_is_crowd=is_crowd)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
annotation['segmentation']), masks[i])))
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['iscrowd'], is_crowd[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
def testSingleImageGroundtruthExportWithKeypoints(self):
boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, 1, 1]], dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32)
keypoints = np.array([[[0, 0], [0.25, 0.25], [0.75, 0.75]],
[[0, 0], [0.125, 0.125], [0.375, 0.375]],
[[0.5, 0.5], [0.75, 0.75], [1.0, 1.0]]],
dtype=np.float32)
visibilities = np.array([[2, 2, 2],
[2, 2, 0],
[2, 0, 0]], dtype=np.int32)
areas = np.array([15., 16., 17.])
classes = np.array([1, 2, 3], dtype=np.int32)
is_crowd = np.array([0, 1, 0], dtype=np.int32)
next_annotation_id = 1
# Tests exporting without passing in is_crowd (for backward compatibility).
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_keypoints=keypoints,
groundtruth_keypoint_visibilities=visibilities,
groundtruth_area=areas)
for i, annotation in enumerate(coco_annotations):
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
self.assertEqual(annotation['num_keypoints'], 3 - i)
self.assertEqual(annotation['area'], 15.0 + i)
self.assertTrue(
np.all(np.isclose(annotation['keypoints'][0::3], keypoints[i, :, 1])))
self.assertTrue(
np.all(np.isclose(annotation['keypoints'][1::3], keypoints[i, :, 0])))
self.assertTrue(
np.all(np.equal(annotation['keypoints'][2::3], visibilities[i])))
# Tests exporting with is_crowd.
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_keypoints=keypoints,
groundtruth_keypoint_visibilities=visibilities,
groundtruth_is_crowd=is_crowd)
for i, annotation in enumerate(coco_annotations):
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['iscrowd'], is_crowd[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
self.assertEqual(annotation['num_keypoints'], 3 - i)
self.assertTrue(
np.all(np.isclose(annotation['keypoints'][0::3], keypoints[i, :, 1])))
self.assertTrue(
np.all(np.isclose(annotation['keypoints'][1::3], keypoints[i, :, 0])))
self.assertTrue(
np.all(np.equal(annotation['keypoints'][2::3], visibilities[i])))
# Testing the area values are derived from the bounding boxes.
if i == 0:
self.assertAlmostEqual(annotation['area'], 1.0)
else:
self.assertAlmostEqual(annotation['area'], 0.25)
def testSingleImageDetectionBoxesExportWithKeypoints(self):
boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, 1, 1]],
dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, .5, .5]],
dtype=np.float32)
keypoints = np.array([[[0, 0], [0.25, 0.25], [0.75, 0.75]],
[[0, 0], [0.125, 0.125], [0.375, 0.375]],
[[0.5, 0.5], [0.75, 0.75], [1.0, 1.0]]],
dtype=np.float32)
visibilities = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=np.int32)
classes = np.array([1, 2, 3], dtype=np.int32)
scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)
# Tests exporting without passing in is_crowd (for backward compatibility).
coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
detection_boxes=boxes,
detection_scores=scores,
detection_classes=classes,
detection_keypoints=keypoints,
detection_keypoint_visibilities=visibilities)
for i, annotation in enumerate(coco_annotations):
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['score'], scores[i])
self.assertEqual(annotation['num_keypoints'], 3)
self.assertTrue(
np.all(np.isclose(annotation['keypoints'][0::3], keypoints[i, :, 1])))
self.assertTrue(
np.all(np.isclose(annotation['keypoints'][1::3], keypoints[i, :, 0])))
self.assertTrue(
np.all(np.equal(annotation['keypoints'][2::3], visibilities[i])))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/coco_tools_test.py | coco_tools_test.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.object_detection.metrics.coco_evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.metrics import lvis_evaluation
from object_detection.utils import tf_version
def _get_categories_list():
return [{
'id': 1,
'name': 'person',
'frequency': 'f'
}, {
'id': 2,
'name': 'dog',
'frequency': 'c'
}, {
'id': 3,
'name': 'cat',
'frequency': 'r'
}]
class LvisMaskEvaluationTest(tf.test.TestCase):
def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
"""Tests that mAP is calculated correctly on GT and Detections."""
masks1 = np.expand_dims(np.pad(
np.ones([100, 100], dtype=np.uint8),
((100, 56), (100, 56)), mode='constant'), axis=0)
masks2 = np.expand_dims(np.pad(
np.ones([50, 50], dtype=np.uint8),
((50, 156), (50, 156)), mode='constant'), axis=0)
masks3 = np.expand_dims(np.pad(
np.ones([25, 25], dtype=np.uint8),
((25, 206), (25, 206)), mode='constant'), axis=0)
lvis_evaluator = lvis_evaluation.LVISMaskEvaluator(
_get_categories_list())
lvis_evaluator.add_single_ground_truth_image_info(
image_id=1,
groundtruth_dict={
fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
fields.InputDataFields.groundtruth_classes: np.array([1]),
fields.InputDataFields.groundtruth_instance_masks: masks1,
fields.InputDataFields.groundtruth_verified_neg_classes:
np.array([0, 0, 0, 0]),
fields.InputDataFields.groundtruth_not_exhaustive_classes:
np.array([0, 0, 0, 0])
})
lvis_evaluator.add_single_detected_image_info(
image_id=1,
detections_dict={
fields.DetectionResultFields.detection_masks: masks1,
fields.DetectionResultFields.detection_scores:
np.array([.8]),
fields.DetectionResultFields.detection_classes:
np.array([1])
})
lvis_evaluator.add_single_ground_truth_image_info(
image_id=2,
groundtruth_dict={
fields.InputDataFields.groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
fields.InputDataFields.groundtruth_classes: np.array([1]),
fields.InputDataFields.groundtruth_instance_masks: masks2,
fields.InputDataFields.groundtruth_verified_neg_classes:
np.array([0, 0, 0, 0]),
fields.InputDataFields.groundtruth_not_exhaustive_classes:
np.array([0, 0, 0, 0])
})
lvis_evaluator.add_single_detected_image_info(
image_id=2,
detections_dict={
fields.DetectionResultFields.detection_masks: masks2,
fields.DetectionResultFields.detection_scores:
np.array([.8]),
fields.DetectionResultFields.detection_classes:
np.array([1])
})
lvis_evaluator.add_single_ground_truth_image_info(
image_id=3,
groundtruth_dict={
fields.InputDataFields.groundtruth_boxes:
np.array([[25., 25., 50., 50.]]),
fields.InputDataFields.groundtruth_classes: np.array([1]),
fields.InputDataFields.groundtruth_instance_masks: masks3,
fields.InputDataFields.groundtruth_verified_neg_classes:
np.array([0, 0, 0, 0]),
fields.InputDataFields.groundtruth_not_exhaustive_classes:
np.array([0, 0, 0, 0])
})
lvis_evaluator.add_single_detected_image_info(
image_id=3,
detections_dict={
fields.DetectionResultFields.detection_masks: masks3,
fields.DetectionResultFields.detection_scores:
np.array([.8]),
fields.DetectionResultFields.detection_classes:
np.array([1])
})
metrics = lvis_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionMasks_AP'], 1.0)
@unittest.skipIf(tf_version.is_tf1(), 'Only Supported in TF2.X')
class LVISMaskEvaluationPyFuncTest(tf.test.TestCase):
def testAddEvalDict(self):
lvis_evaluator = lvis_evaluation.LVISMaskEvaluator(_get_categories_list())
image_id = tf.constant(1, dtype=tf.int32)
groundtruth_boxes = tf.constant(
np.array([[100., 100., 200., 200.], [50., 50., 100., 100.]]),
dtype=tf.float32)
groundtruth_classes = tf.constant(np.array([1, 2]), dtype=tf.float32)
groundtruth_masks = tf.constant(np.stack([
np.pad(np.ones([100, 100], dtype=np.uint8), ((10, 10), (10, 10)),
mode='constant'),
np.pad(np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)),
mode='constant')
]), dtype=tf.uint8)
original_image_spatial_shapes = tf.constant([[120, 120], [120, 120]],
dtype=tf.int32)
groundtruth_verified_neg_classes = tf.constant(np.array([0, 0, 0, 0]),
dtype=tf.float32)
groundtruth_not_exhaustive_classes = tf.constant(np.array([0, 0, 0, 0]),
dtype=tf.float32)
detection_scores = tf.constant(np.array([.9, .8]), dtype=tf.float32)
detection_classes = tf.constant(np.array([2, 1]), dtype=tf.float32)
detection_masks = tf.constant(np.stack([
np.pad(np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)),
mode='constant'),
np.pad(np.ones([100, 100], dtype=np.uint8), ((10, 10), (10, 10)),
mode='constant'),
]), dtype=tf.uint8)
input_data_fields = fields.InputDataFields
detection_fields = fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
input_data_fields.groundtruth_instance_masks: groundtruth_masks,
input_data_fields.groundtruth_verified_neg_classes:
groundtruth_verified_neg_classes,
input_data_fields.groundtruth_not_exhaustive_classes:
groundtruth_not_exhaustive_classes,
input_data_fields.original_image_spatial_shape:
original_image_spatial_shapes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_masks: detection_masks
}
lvis_evaluator.add_eval_dict(eval_dict)
self.assertLen(lvis_evaluator._groundtruth_list, 2)
self.assertLen(lvis_evaluator._detection_masks_list, 2)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/lvis_evaluation_test.py | lvis_evaluation_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto parser for data loading.
A parser to decode data containing serialized tensorflow.Example
protos into materialized tensors (numpy arrays).
"""
import numpy as np
from object_detection.core import data_parser
from object_detection.core import standard_fields as fields
class FloatParser(data_parser.DataToNumpyParser):
"""Tensorflow Example float parser."""
def __init__(self, field_name):
self.field_name = field_name
def parse(self, tf_example):
return np.array(
tf_example.features.feature[self.field_name].float_list.value,
dtype=np.float).transpose() if tf_example.features.feature[
self.field_name].HasField("float_list") else None
class StringParser(data_parser.DataToNumpyParser):
"""Tensorflow Example string parser."""
def __init__(self, field_name):
self.field_name = field_name
def parse(self, tf_example):
return b"".join(tf_example.features.feature[
self.field_name].bytes_list.value) if tf_example.features.feature[
self.field_name].HasField("bytes_list") else None
class Int64Parser(data_parser.DataToNumpyParser):
"""Tensorflow Example int64 parser."""
def __init__(self, field_name):
self.field_name = field_name
def parse(self, tf_example):
return np.array(
tf_example.features.feature[self.field_name].int64_list.value,
dtype=np.int64).transpose() if tf_example.features.feature[
self.field_name].HasField("int64_list") else None
class BoundingBoxParser(data_parser.DataToNumpyParser):
"""Tensorflow Example bounding box parser."""
def __init__(self, xmin_field_name, ymin_field_name, xmax_field_name,
ymax_field_name):
self.field_names = [
ymin_field_name, xmin_field_name, ymax_field_name, xmax_field_name
]
def parse(self, tf_example):
result = []
parsed = True
for field_name in self.field_names:
result.append(tf_example.features.feature[field_name].float_list.value)
parsed &= (
tf_example.features.feature[field_name].HasField("float_list"))
return np.array(result).transpose() if parsed else None
class TfExampleDetectionAndGTParser(data_parser.DataToNumpyParser):
"""Tensorflow Example proto parser."""
def __init__(self):
self.items_to_handlers = {
fields.DetectionResultFields.key:
StringParser(fields.TfExampleFields.source_id),
# Object ground truth boxes and classes.
fields.InputDataFields.groundtruth_boxes: (BoundingBoxParser(
fields.TfExampleFields.object_bbox_xmin,
fields.TfExampleFields.object_bbox_ymin,
fields.TfExampleFields.object_bbox_xmax,
fields.TfExampleFields.object_bbox_ymax)),
fields.InputDataFields.groundtruth_classes: (
Int64Parser(fields.TfExampleFields.object_class_label)),
# Object detections.
fields.DetectionResultFields.detection_boxes: (BoundingBoxParser(
fields.TfExampleFields.detection_bbox_xmin,
fields.TfExampleFields.detection_bbox_ymin,
fields.TfExampleFields.detection_bbox_xmax,
fields.TfExampleFields.detection_bbox_ymax)),
fields.DetectionResultFields.detection_classes: (
Int64Parser(fields.TfExampleFields.detection_class_label)),
fields.DetectionResultFields.detection_scores: (
FloatParser(fields.TfExampleFields.detection_score)),
}
self.optional_items_to_handlers = {
fields.InputDataFields.groundtruth_difficult:
Int64Parser(fields.TfExampleFields.object_difficult),
fields.InputDataFields.groundtruth_group_of:
Int64Parser(fields.TfExampleFields.object_group_of),
fields.InputDataFields.groundtruth_image_classes:
Int64Parser(fields.TfExampleFields.image_class_label),
}
def parse(self, tf_example):
"""Parses tensorflow example and returns a tensor dictionary.
Args:
tf_example: a tf.Example object.
Returns:
A dictionary of the following numpy arrays:
fields.DetectionResultFields.source_id - string containing original image
id.
fields.InputDataFields.groundtruth_boxes - a numpy array containing
groundtruth boxes.
fields.InputDataFields.groundtruth_classes - a numpy array containing
groundtruth classes.
fields.InputDataFields.groundtruth_group_of - a numpy array containing
groundtruth group of flag (optional, None if not specified).
fields.InputDataFields.groundtruth_difficult - a numpy array containing
groundtruth difficult flag (optional, None if not specified).
fields.InputDataFields.groundtruth_image_classes - a numpy array
containing groundtruth image-level labels.
fields.DetectionResultFields.detection_boxes - a numpy array containing
detection boxes.
fields.DetectionResultFields.detection_classes - a numpy array containing
detection class labels.
fields.DetectionResultFields.detection_scores - a numpy array containing
detection scores.
Returns None if tf.Example was not parsed or non-optional fields were not
found.
"""
results_dict = {}
parsed = True
for key, parser in self.items_to_handlers.items():
results_dict[key] = parser.parse(tf_example)
parsed &= (results_dict[key] is not None)
for key, parser in self.optional_items_to_handlers.items():
results_dict[key] = parser.parse(tf_example)
return results_dict if parsed else None
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/tf_example_parser.py | tf_example_parser.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs evaluation using OpenImages groundtruth and predictions.
Example usage:
python \
models/research/object_detection/metrics/oid_vrd_challenge_evaluation.py \
--input_annotations_vrd=/path/to/input/annotations-human-bbox.csv \
--input_annotations_labels=/path/to/input/annotations-label.csv \
--input_class_labelmap=/path/to/input/class_labelmap.pbtxt \
--input_relationship_labelmap=/path/to/input/relationship_labelmap.pbtxt \
--input_predictions=/path/to/input/predictions.csv \
--output_metrics=/path/to/output/metric.csv \
CSVs with bounding box annotations and image label (including the image URLs)
can be downloaded from the Open Images Challenge website:
https://storage.googleapis.com/openimages/web/challenge.html
The format of the input csv and the metrics itself are described on the
challenge website.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import pandas as pd
from google.protobuf import text_format
from object_detection.metrics import io_utils
from object_detection.metrics import oid_vrd_challenge_evaluation_utils as utils
from object_detection.protos import string_int_label_map_pb2
from object_detection.utils import vrd_evaluation
def _load_labelmap(labelmap_path):
"""Loads labelmap from the labelmap path.
Args:
labelmap_path: Path to the labelmap.
Returns:
A dictionary mapping class name to class numerical id.
"""
label_map = string_int_label_map_pb2.StringIntLabelMap()
with open(labelmap_path, 'r') as fid:
label_map_string = fid.read()
text_format.Merge(label_map_string, label_map)
labelmap_dict = {}
for item in label_map.item:
labelmap_dict[item.name] = item.id
return labelmap_dict
def _swap_labelmap_dict(labelmap_dict):
"""Swaps keys and labels in labelmap.
Args:
labelmap_dict: Input dictionary.
Returns:
A dictionary mapping class name to class numerical id.
"""
return dict((v, k) for k, v in labelmap_dict.iteritems())
def main(parsed_args):
all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes)
all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels)
all_annotations = pd.concat([all_box_annotations, all_label_annotations])
class_label_map = _load_labelmap(parsed_args.input_class_labelmap)
relationship_label_map = _load_labelmap(
parsed_args.input_relationship_labelmap)
relation_evaluator = vrd_evaluation.VRDRelationDetectionEvaluator()
phrase_evaluator = vrd_evaluation.VRDPhraseDetectionEvaluator()
for _, groundtruth in enumerate(all_annotations.groupby('ImageID')):
image_id, image_groundtruth = groundtruth
groundtruth_dictionary = utils.build_groundtruth_vrd_dictionary(
image_groundtruth, class_label_map, relationship_label_map)
relation_evaluator.add_single_ground_truth_image_info(
image_id, groundtruth_dictionary)
phrase_evaluator.add_single_ground_truth_image_info(image_id,
groundtruth_dictionary)
all_predictions = pd.read_csv(parsed_args.input_predictions)
for _, prediction_data in enumerate(all_predictions.groupby('ImageID')):
image_id, image_predictions = prediction_data
prediction_dictionary = utils.build_predictions_vrd_dictionary(
image_predictions, class_label_map, relationship_label_map)
relation_evaluator.add_single_detected_image_info(image_id,
prediction_dictionary)
phrase_evaluator.add_single_detected_image_info(image_id,
prediction_dictionary)
relation_metrics = relation_evaluator.evaluate(
relationships=_swap_labelmap_dict(relationship_label_map))
phrase_metrics = phrase_evaluator.evaluate(
relationships=_swap_labelmap_dict(relationship_label_map))
with open(parsed_args.output_metrics, 'w') as fid:
io_utils.write_csv(fid, relation_metrics)
io_utils.write_csv(fid, phrase_metrics)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=
'Evaluate Open Images Visual Relationship Detection predictions.')
parser.add_argument(
'--input_annotations_vrd',
required=True,
help='File with groundtruth vrd annotations.')
parser.add_argument(
'--input_annotations_labels',
required=True,
help='File with groundtruth labels annotations')
parser.add_argument(
'--input_predictions',
required=True,
help="""File with detection predictions; NOTE: no postprocessing is
applied in the evaluation script.""")
parser.add_argument(
'--input_class_labelmap',
required=True,
help="""OpenImages Challenge labelmap; note: it is expected to include
attributes.""")
parser.add_argument(
'--input_relationship_labelmap',
required=True,
help="""OpenImages Challenge relationship labelmap.""")
parser.add_argument(
'--output_metrics', required=True, help='Output file with csv metrics')
args = parser.parse_args()
main(args)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/oid_vrd_challenge_evaluation.py | oid_vrd_challenge_evaluation.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for oid_vrd_challenge_evaluation_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.metrics import oid_vrd_challenge_evaluation_utils as utils
from object_detection.utils import vrd_evaluation
class OidVrdChallengeEvaluationUtilsTest(tf.test.TestCase):
def testBuildGroundtruthDictionary(self):
np_data = pd.DataFrame(
[[
'fe58ec1b06db2bb7', '/m/04bcr3', '/m/083vt', 0.0, 0.3, 0.5, 0.6,
0.0, 0.3, 0.5, 0.6, 'is', None, None
], [
'fe58ec1b06db2bb7', '/m/04bcr3', '/m/02gy9n', 0.0, 0.3, 0.5, 0.6,
0.1, 0.2, 0.3, 0.4, 'under', None, None
], [
'fe58ec1b06db2bb7', '/m/04bcr3', '/m/083vt', 0.0, 0.1, 0.2, 0.3,
0.0, 0.1, 0.2, 0.3, 'is', None, None
], [
'fe58ec1b06db2bb7', '/m/083vt', '/m/04bcr3', 0.1, 0.2, 0.3, 0.4,
0.5, 0.6, 0.7, 0.8, 'at', None, None
], [
'fe58ec1b06db2bb7', None, None, None, None, None, None, None, None,
None, None, None, '/m/04bcr3', 1.0
], [
'fe58ec1b06db2bb7', None, None, None, None, None, None, None, None,
None, None, None, '/m/083vt', 0.0
], [
'fe58ec1b06db2bb7', None, None, None, None, None, None, None, None,
None, None, None, '/m/02gy9n', 0.0
]],
columns=[
'ImageID', 'LabelName1', 'LabelName2', 'XMin1', 'XMax1', 'YMin1',
'YMax1', 'XMin2', 'XMax2', 'YMin2', 'YMax2', 'RelationshipLabel',
'LabelName', 'Confidence'
])
class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3}
relationship_label_map = {'is': 1, 'under': 2, 'at': 3}
groundtruth_dictionary = utils.build_groundtruth_vrd_dictionary(
np_data, class_label_map, relationship_label_map)
self.assertTrue(standard_fields.InputDataFields.groundtruth_boxes in
groundtruth_dictionary)
self.assertTrue(standard_fields.InputDataFields.groundtruth_classes in
groundtruth_dictionary)
self.assertTrue(standard_fields.InputDataFields.groundtruth_image_classes in
groundtruth_dictionary)
self.assertAllEqual(
np.array(
[(1, 2, 1), (1, 3, 2), (1, 2, 1), (2, 1, 3)],
dtype=vrd_evaluation.label_data_type), groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_classes])
expected_vrd_data = np.array(
[
([0.5, 0.0, 0.6, 0.3], [0.5, 0.0, 0.6, 0.3]),
([0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2]),
([0.2, 0.0, 0.3, 0.1], [0.2, 0.0, 0.3, 0.1]),
([0.3, 0.1, 0.4, 0.2], [0.7, 0.5, 0.8, 0.6]),
],
dtype=vrd_evaluation.vrd_box_data_type)
for field in expected_vrd_data.dtype.fields:
self.assertNDArrayNear(
expected_vrd_data[field], groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_boxes][field], 1e-5)
self.assertAllEqual(
np.array([1, 2, 3]), groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_image_classes])
def testBuildPredictionDictionary(self):
np_data = pd.DataFrame(
[[
'fe58ec1b06db2bb7', '/m/04bcr3', '/m/083vt', 0.0, 0.3, 0.5, 0.6,
0.0, 0.3, 0.5, 0.6, 'is', 0.1
], [
'fe58ec1b06db2bb7', '/m/04bcr3', '/m/02gy9n', 0.0, 0.3, 0.5, 0.6,
0.1, 0.2, 0.3, 0.4, 'under', 0.2
], [
'fe58ec1b06db2bb7', '/m/04bcr3', '/m/083vt', 0.0, 0.1, 0.2, 0.3,
0.0, 0.1, 0.2, 0.3, 'is', 0.3
], [
'fe58ec1b06db2bb7', '/m/083vt', '/m/04bcr3', 0.1, 0.2, 0.3, 0.4,
0.5, 0.6, 0.7, 0.8, 'at', 0.4
]],
columns=[
'ImageID', 'LabelName1', 'LabelName2', 'XMin1', 'XMax1', 'YMin1',
'YMax1', 'XMin2', 'XMax2', 'YMin2', 'YMax2', 'RelationshipLabel',
'Score'
])
class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3}
relationship_label_map = {'is': 1, 'under': 2, 'at': 3}
prediction_dictionary = utils.build_predictions_vrd_dictionary(
np_data, class_label_map, relationship_label_map)
self.assertTrue(standard_fields.DetectionResultFields.detection_boxes in
prediction_dictionary)
self.assertTrue(standard_fields.DetectionResultFields.detection_classes in
prediction_dictionary)
self.assertTrue(standard_fields.DetectionResultFields.detection_scores in
prediction_dictionary)
self.assertAllEqual(
np.array(
[(1, 2, 1), (1, 3, 2), (1, 2, 1), (2, 1, 3)],
dtype=vrd_evaluation.label_data_type), prediction_dictionary[
standard_fields.DetectionResultFields.detection_classes])
expected_vrd_data = np.array(
[
([0.5, 0.0, 0.6, 0.3], [0.5, 0.0, 0.6, 0.3]),
([0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2]),
([0.2, 0.0, 0.3, 0.1], [0.2, 0.0, 0.3, 0.1]),
([0.3, 0.1, 0.4, 0.2], [0.7, 0.5, 0.8, 0.6]),
],
dtype=vrd_evaluation.vrd_box_data_type)
for field in expected_vrd_data.dtype.fields:
self.assertNDArrayNear(
expected_vrd_data[field], prediction_dictionary[
standard_fields.DetectionResultFields.detection_boxes][field],
1e-5)
self.assertNDArrayNear(
np.array([0.1, 0.2, 0.3, 0.4]), prediction_dictionary[
standard_fields.DetectionResultFields.detection_scores], 1e-5)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/oid_vrd_challenge_evaluation_utils_test.py | oid_vrd_challenge_evaluation_utils_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities in offline_eval_map_corloc binary."""
import tensorflow.compat.v1 as tf
from object_detection.metrics import offline_eval_map_corloc as offline_eval
class OfflineEvalMapCorlocTest(tf.test.TestCase):
def test_generateShardedFilenames(self):
test_filename = '/path/to/file'
result = offline_eval._generate_sharded_filenames(test_filename)
self.assertEqual(result, [test_filename])
test_filename = '/path/to/file-00000-of-00050'
result = offline_eval._generate_sharded_filenames(test_filename)
self.assertEqual(result, [test_filename])
result = offline_eval._generate_sharded_filenames('/path/to/@3.record')
self.assertEqual(result, [
'/path/to/-00000-of-00003.record', '/path/to/-00001-of-00003.record',
'/path/to/-00002-of-00003.record'
])
result = offline_eval._generate_sharded_filenames('/path/to/abc@3')
self.assertEqual(result, [
'/path/to/abc-00000-of-00003', '/path/to/abc-00001-of-00003',
'/path/to/abc-00002-of-00003'
])
result = offline_eval._generate_sharded_filenames('/path/to/@1')
self.assertEqual(result, ['/path/to/-00000-of-00001'])
def test_generateFilenames(self):
test_filenames = ['/path/to/file', '/path/to/@3.record']
result = offline_eval._generate_filenames(test_filenames)
self.assertEqual(result, [
'/path/to/file', '/path/to/-00000-of-00003.record',
'/path/to/-00001-of-00003.record', '/path/to/-00002-of-00003.record'
])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/metrics/offline_eval_map_corloc_test.py | offline_eval_map_corloc_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.data_decoders.tf_example_decoder."""
import os
import numpy as np
import six
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
from object_detection.utils import test_case
class TfExampleDecoderTest(test_case.TestCase):
def _create_encoded_and_decoded_data(self, data, encoding_type):
if encoding_type == 'jpeg':
encode_fn = tf.image.encode_jpeg
decode_fn = tf.image.decode_jpeg
elif encoding_type == 'png':
encode_fn = tf.image.encode_png
decode_fn = tf.image.decode_png
else:
raise ValueError('Invalid encoding type.')
def prepare_data_fn():
encoded_data = encode_fn(data)
decoded_data = decode_fn(encoded_data)
return encoded_data, decoded_data
return self.execute_cpu(prepare_data_fn, [])
def testDecodeAdditionalChannels(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(image, 'jpeg')
additional_channel = np.random.randint(256, size=(4, 5, 1)).astype(np.uint8)
(encoded_additional_channel,
decoded_additional_channel) = self._create_encoded_and_decoded_data(
additional_channel, 'jpeg')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/additional_channels/encoded':
dataset_util.bytes_list_feature(
[encoded_additional_channel] * 2),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_additional_channels=2)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
np.concatenate([decoded_additional_channel] * 2, axis=2),
tensor_dict[fields.InputDataFields.image_additional_channels])
def testDecodeJpegImage(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, decoded_jpeg = self._create_encoded_and_decoded_data(
image, 'jpeg')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.image].get_shape().as_list()),
[None, None, 3])
self.assertAllEqual(
(output[fields.InputDataFields.original_image_spatial_shape]
.get_shape().as_list()), [2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image])
self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields.
original_image_spatial_shape])
self.assertEqual(
six.b('image_id'), tensor_dict[fields.InputDataFields.source_id])
def testDecodeImageKeyAndFilename(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(image, 'jpeg')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/key/sha256':
dataset_util.bytes_feature(six.b('abc')),
'image/filename':
dataset_util.bytes_feature(six.b('filename'))
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertEqual(six.b('abc'), tensor_dict[fields.InputDataFields.key])
self.assertEqual(
six.b('filename'), tensor_dict[fields.InputDataFields.filename])
def testDecodePngImage(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_png, decoded_png = self._create_encoded_and_decoded_data(
image, 'png')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id'))
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.image].get_shape().as_list()),
[None, None, 3])
self.assertAllEqual(
(output[fields.InputDataFields.original_image_spatial_shape]
.get_shape().as_list()), [2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(decoded_png, tensor_dict[fields.InputDataFields.image])
self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields.
original_image_spatial_shape])
self.assertEqual(
six.b('image_id'), tensor_dict[fields.InputDataFields.source_id])
def testDecodePngInstanceMasks(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_png, _ = self._create_encoded_and_decoded_data(image, 'png')
mask_1 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8)
mask_2 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8)
encoded_png_1, _ = self._create_encoded_and_decoded_data(mask_1, 'png')
decoded_png_1 = np.squeeze(mask_1.astype(np.float32))
encoded_png_2, _ = self._create_encoded_and_decoded_data(mask_2, 'png')
decoded_png_2 = np.squeeze(mask_2.astype(np.float32))
encoded_masks = [encoded_png_1, encoded_png_2]
decoded_masks = np.stack([decoded_png_1, decoded_png_2])
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/object/mask':
dataset_util.bytes_list_feature(encoded_masks)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True,
instance_mask_type=input_reader_pb2.PNG_MASKS)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
decoded_masks,
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
def testDecodeEmptyPngInstanceMasks(self):
image_tensor = np.random.randint(256, size=(10, 10, 3)).astype(np.uint8)
encoded_png, _ = self._create_encoded_and_decoded_data(image_tensor, 'png')
encoded_masks = []
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/object/mask':
dataset_util.bytes_list_feature(encoded_masks),
'image/height':
dataset_util.int64_feature(10),
'image/width':
dataset_util.int64_feature(10),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True,
instance_mask_type=input_reader_pb2.PNG_MASKS)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_instance_masks].shape,
[0, 10, 10])
def testDecodeBoundingBox(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
def testDecodeKeypointDepth(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
keypoint_depths = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
keypoint_depth_weights = [1.0, 0.9, 0.8, 0.7, 0.6, 0.5]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/z':
dataset_util.float_list_feature(keypoint_depths),
'image/object/keypoint/z/weights':
dataset_util.float_list_feature(keypoint_depth_weights),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_keypoints=3, load_keypoint_depth_features=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_keypoint_depths].get_shape(
).as_list()), [2, 3])
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_keypoint_depth_weights]
.get_shape().as_list()), [2, 3])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_keypoint_depths = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
self.assertAllClose(
expected_keypoint_depths,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depths])
expected_keypoint_depth_weights = [[1.0, 0.9, 0.8], [0.7, 0.6, 0.5]]
self.assertAllClose(
expected_keypoint_depth_weights,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depth_weights])
def testDecodeKeypointDepthNoDepth(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_keypoints=3, load_keypoint_depth_features=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_keypoints_depth_default = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
self.assertAllClose(
expected_keypoints_depth_default,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depths])
self.assertAllClose(
expected_keypoints_depth_default,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depth_weights])
def testDecodeKeypoint(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()),
[2, 3, 2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
expected_keypoints = [
[[0.0, 1.0], [1.0, 2.0], [np.nan, np.nan]],
[[3.0, 4.0], [np.nan, np.nan], [5.0, 6.0]]]
self.assertAllClose(
expected_keypoints,
tensor_dict[fields.InputDataFields.groundtruth_keypoints])
expected_visibility = (
(np.array(keypoint_visibility) > 0).reshape((2, 3)))
self.assertAllEqual(
expected_visibility,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_visibilities])
def testDecodeKeypointNoVisibilities(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()),
[2, 3, 2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
expected_keypoints = (
np.vstack([keypoint_ys, keypoint_xs]).transpose().reshape((2, 3, 2)))
self.assertAllEqual(
expected_keypoints,
tensor_dict[fields.InputDataFields.groundtruth_keypoints])
expected_visibility = np.ones((2, 3))
self.assertAllEqual(
expected_visibility,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_visibilities])
def testDecodeDefaultGroundtruthWeights(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllClose(tensor_dict[fields.InputDataFields.groundtruth_weights],
np.ones(2, dtype=np.float32))
def testDecodeObjectLabel(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeMultiClassScores(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
flattened_multiclass_scores = [100., 50.] + [20., 30.]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/multiclass_scores':
dataset_util.float_list_feature(
flattened_multiclass_scores),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_multiclass_scores=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(flattened_multiclass_scores,
tensor_dict[fields.InputDataFields.multiclass_scores])
def testDecodeEmptyMultiClassScores(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_multiclass_scores=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertEqual(
(0,), tensor_dict[fields.InputDataFields.multiclass_scores].shape)
def testDecodeObjectLabelNoText(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes = [1, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes),
})).SerializeToString()
label_map_string = """
item {
id:1
name:'cat'
}
item {
id:2
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithText(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('dog')]
# Annotation label gets overridden by labelmap id.
annotated_bbox_classes = [3, 4]
expected_bbox_classes = [1, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
'image/object/class/label':
dataset_util.int64_list_feature(annotated_bbox_classes),
})).SerializeToString()
label_map_string = """
item {
id:1
name:'cat'
}
item {
id:2
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(expected_bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelUnrecognizedName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('cheetah')]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([2, -1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithMappingWithDisplayName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('dog')]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:3
display_name:'cat'
}
item {
id:1
display_name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([3, 1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelUnrecognizedNameWithMappingWithDisplayName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('cheetah')]
bbox_classes_id = [5, 6]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes_id),
})).SerializeToString()
label_map_string = """
item {
name:'/m/cat'
id:3
display_name:'cat'
}
item {
name:'/m/dog'
id:1
display_name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([3, -1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithMappingWithName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('dog')]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([3, 1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectArea(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_area = [100., 174.]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/area':
dataset_util.float_list_feature(object_area),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_area].get_shape().as_list()), [2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(object_area,
tensor_dict[fields.InputDataFields.groundtruth_area])
def testDecodeVerifiedNegClasses(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
neg_category_ids = [0, 5, 8]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/neg_category_ids':
dataset_util.int64_list_feature(neg_category_ids),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
neg_category_ids,
tensor_dict[fields.InputDataFields.groundtruth_verified_neg_classes])
def testDecodeNotExhaustiveClasses(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
not_exhaustive_category_ids = [0, 5, 8]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/not_exhaustive_category_ids':
dataset_util.int64_list_feature(
not_exhaustive_category_ids),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
not_exhaustive_category_ids,
tensor_dict[fields.InputDataFields.groundtruth_not_exhaustive_classes])
def testDecodeObjectIsCrowd(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_is_crowd = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/is_crowd':
dataset_util.int64_list_feature(object_is_crowd),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_is_crowd].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[bool(item) for item in object_is_crowd],
tensor_dict[fields.InputDataFields.groundtruth_is_crowd])
def testDecodeObjectDifficult(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_difficult = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/difficult':
dataset_util.int64_list_feature(object_difficult),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_difficult].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[bool(item) for item in object_difficult],
tensor_dict[fields.InputDataFields.groundtruth_difficult])
def testDecodeObjectGroupOf(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_group_of = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/group_of':
dataset_util.int64_list_feature(object_group_of),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_group_of].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[bool(item) for item in object_group_of],
tensor_dict[fields.InputDataFields.groundtruth_group_of])
def testDecodeObjectWeight(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_weights = [0.75, 1.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/weight':
dataset_util.float_list_feature(object_weights),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_weights].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(object_weights,
tensor_dict[fields.InputDataFields.groundtruth_weights])
def testDecodeClassConfidence(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
class_confidence = [0.0, 1.0, 0.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/class/confidence':
dataset_util.float_list_feature(class_confidence),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_image_confidences]
.get_shape().as_list()), [3])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
class_confidence,
tensor_dict[fields.InputDataFields.groundtruth_image_confidences])
def testDecodeInstanceSegmentation(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_instance_masks].get_shape(
).as_list()), [4, 5, 3])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
instance_masks.astype(np.float32),
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_instance_mask_weights],
[1, 1, 1, 1])
self.assertAllEqual(object_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testInstancesNotAvailableByDefault(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks,
tensor_dict)
def testDecodeInstanceSegmentationWithWeights(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
instance_mask_weights = np.array([1, 1, 0, 1], dtype=np.float32)
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/mask/weight':
dataset_util.float_list_feature(instance_mask_weights),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_instance_masks].get_shape(
).as_list()), [4, 5, 3])
self.assertAllEqual(
output[fields.InputDataFields.groundtruth_instance_mask_weights],
[1, 1, 0, 1])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
instance_masks.astype(np.float32),
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
self.assertAllEqual(object_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeImageLabels(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
def graph_fn_1():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature(six.b('jpeg')),
'image/class/label': dataset_util.int64_list_feature([1, 2]),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn_1, [])
self.assertIn(fields.InputDataFields.groundtruth_image_classes, tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_image_classes],
np.array([1, 2]))
def graph_fn_2():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/class/text':
dataset_util.bytes_list_feature(
[six.b('dog'), six.b('cat')]),
})).SerializeToString()
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn_2, [])
self.assertIn(fields.InputDataFields.groundtruth_image_classes, tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_image_classes],
np.array([1, 3]))
def testDecodeContextFeatures(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
num_features = 8
context_feature_length = 10
context_features = np.random.random(num_features*context_feature_length)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/context_features':
dataset_util.float_list_feature(context_features),
'image/context_feature_length':
dataset_util.int64_feature(context_feature_length),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_context_features=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllClose(
context_features.reshape(num_features, context_feature_length),
tensor_dict[fields.InputDataFields.context_features])
self.assertAllEqual(
context_feature_length,
tensor_dict[fields.InputDataFields.context_feature_length])
def testContextFeaturesNotAvailableByDefault(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
num_features = 10
context_feature_length = 10
context_features = np.random.random(num_features*context_feature_length)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/context_features':
dataset_util.float_list_feature(context_features),
'image/context_feature_length':
dataset_util.int64_feature(context_feature_length),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertNotIn(fields.InputDataFields.context_features,
tensor_dict)
def testExpandLabels(self):
label_map_string = """
item {
id:1
name:'cat'
ancestor_ids: 2
}
item {
id:2
name:'animal'
descendant_ids: 1
}
item {
id:3
name:'man'
ancestor_ids: 5
}
item {
id:4
name:'woman'
display_name:'woman'
ancestor_ids: 5
}
item {
id:5
name:'person'
descendant_ids: 3
descendant_ids: 4
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
bbox_classes_text = [six.b('cat'), six.b('cat')]
bbox_group_of = [0, 1]
image_class_text = [six.b('cat'), six.b('person')]
image_confidence = [1.0, 0.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
'image/object/group_of':
dataset_util.int64_list_feature(bbox_group_of),
'image/class/text':
dataset_util.bytes_list_feature(image_class_text),
'image/class/confidence':
dataset_util.float_list_feature(image_confidence),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path, expand_hierarchy_labels=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
expected_boxes = np.stack(
[boxes[0, :], boxes[0, :], boxes[1, :], boxes[1, :]], axis=0)
expected_boxes_class = np.array([1, 2, 1, 2])
expected_boxes_group_of = np.array([0, 0, 1, 1])
expected_image_class = np.array([1, 2, 3, 4, 5])
expected_image_confidence = np.array([1.0, 1.0, 0.0, 0.0, 0.0])
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
self.assertAllEqual(expected_boxes_class,
tensor_dict[fields.InputDataFields.groundtruth_classes])
self.assertAllEqual(
expected_boxes_group_of,
tensor_dict[fields.InputDataFields.groundtruth_group_of])
self.assertAllEqual(
expected_image_class,
tensor_dict[fields.InputDataFields.groundtruth_image_classes])
self.assertAllEqual(
expected_image_confidence,
tensor_dict[fields.InputDataFields.groundtruth_image_confidences])
def testDecodeDensePose(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0, 2.0]
bbox_xmins = [1.0, 5.0, 8.0]
bbox_ymaxs = [2.0, 6.0, 1.0]
bbox_xmaxs = [3.0, 7.0, 3.3]
densepose_num = [0, 4, 2]
densepose_part_index = [2, 2, 3, 4, 2, 9]
densepose_x = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
densepose_y = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4]
densepose_u = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06]
densepose_v = [0.99, 0.98, 0.97, 0.96, 0.95, 0.94]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/densepose/num':
dataset_util.int64_list_feature(densepose_num),
'image/object/densepose/part_index':
dataset_util.int64_list_feature(densepose_part_index),
'image/object/densepose/x':
dataset_util.float_list_feature(densepose_x),
'image/object/densepose/y':
dataset_util.float_list_feature(densepose_y),
'image/object/densepose/u':
dataset_util.float_list_feature(densepose_u),
'image/object/densepose/v':
dataset_util.float_list_feature(densepose_v),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_dense_pose=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
dp_num_points = output[fields.InputDataFields.groundtruth_dp_num_points]
dp_part_ids = output[fields.InputDataFields.groundtruth_dp_part_ids]
dp_surface_coords = output[
fields.InputDataFields.groundtruth_dp_surface_coords]
return dp_num_points, dp_part_ids, dp_surface_coords
dp_num_points, dp_part_ids, dp_surface_coords = self.execute_cpu(
graph_fn, [])
expected_dp_num_points = [0, 4, 2]
expected_dp_part_ids = [
[0, 0, 0, 0],
[2, 2, 3, 4],
[2, 9, 0, 0]
]
expected_dp_surface_coords = np.array(
[
# Instance 0 (no points).
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
# Instance 1 (4 points).
[[0.9, 0.1, 0.99, 0.01],
[0.8, 0.2, 0.98, 0.02],
[0.7, 0.3, 0.97, 0.03],
[0.6, 0.4, 0.96, 0.04]],
# Instance 2 (2 points).
[[0.5, 0.5, 0.95, 0.05],
[0.4, 0.6, 0.94, 0.06],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
], dtype=np.float32)
self.assertAllEqual(dp_num_points, expected_dp_num_points)
self.assertAllEqual(dp_part_ids, expected_dp_part_ids)
self.assertAllClose(dp_surface_coords, expected_dp_surface_coords)
def testDecodeTrack(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0, 2.0]
bbox_xmins = [1.0, 5.0, 8.0]
bbox_ymaxs = [2.0, 6.0, 1.0]
bbox_xmaxs = [3.0, 7.0, 3.3]
track_labels = [0, 1, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/track/label':
dataset_util.int64_list_feature(track_labels),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_track_id=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
track_ids = output[fields.InputDataFields.groundtruth_track_ids]
return track_ids
track_ids = self.execute_cpu(graph_fn, [])
expected_track_labels = [0, 1, 2]
self.assertAllEqual(track_ids, expected_track_labels)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/data_decoders/tf_example_decoder_test.py | tf_example_decoder_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from tf_slim import tfexample_decoder as slim_example_decoder
from object_detection.core import data_decoder
from object_detection.core import standard_fields as fields
from object_detection.protos import input_reader_pb2
from object_detection.utils import label_map_util
from object_detection.utils import shape_utils
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import lookup as contrib_lookup
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
_LABEL_OFFSET = 1
class Visibility(enum.Enum):
"""Visibility definitions.
This follows the MS Coco convention (http://cocodataset.org/#format-data).
"""
# Keypoint is not labeled.
UNLABELED = 0
# Keypoint is labeled but falls outside the object segment (e.g. occluded).
NOT_VISIBLE = 1
# Keypoint is labeled and visible.
VISIBLE = 2
class _ClassTensorHandler(slim_example_decoder.Tensor):
"""An ItemHandler to fetch class ids from class text."""
def __init__(self,
tensor_key,
label_map_proto_file,
shape_keys=None,
shape=None,
default_value=''):
"""Initializes the LookupTensor handler.
Simply calls a vocabulary (most often, a label mapping) lookup.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
label_map_proto_file: File path to a text format LabelMapProto message
mapping class text to id.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
name_to_id = label_map_util.get_label_map_dict(
label_map_proto_file, use_display_name=False)
# We use a default_value of -1, but we expect all labels to be contained
# in the label map.
try:
# Dynamically try to load the tf v2 lookup, falling back to contrib
lookup = tf.compat.v2.lookup
hash_table_class = tf.compat.v2.lookup.StaticHashTable
except AttributeError:
lookup = contrib_lookup
hash_table_class = contrib_lookup.HashTable
name_to_id_table = hash_table_class(
initializer=lookup.KeyValueTensorInitializer(
keys=tf.constant(list(name_to_id.keys())),
values=tf.constant(list(name_to_id.values()), dtype=tf.int64)),
default_value=-1)
display_name_to_id = label_map_util.get_label_map_dict(
label_map_proto_file, use_display_name=True)
# We use a default_value of -1, but we expect all labels to be contained
# in the label map.
display_name_to_id_table = hash_table_class(
initializer=lookup.KeyValueTensorInitializer(
keys=tf.constant(list(display_name_to_id.keys())),
values=tf.constant(
list(display_name_to_id.values()), dtype=tf.int64)),
default_value=-1)
self._name_to_id_table = name_to_id_table
self._display_name_to_id_table = display_name_to_id_table
super(_ClassTensorHandler, self).__init__(tensor_key, shape_keys, shape,
default_value)
def tensors_to_item(self, keys_to_tensors):
unmapped_tensor = super(_ClassTensorHandler,
self).tensors_to_item(keys_to_tensors)
return tf.maximum(self._name_to_id_table.lookup(unmapped_tensor),
self._display_name_to_id_table.lookup(unmapped_tensor))
class TfExampleDecoder(data_decoder.DataDecoder):
"""Tensorflow Example proto decoder."""
def __init__(self,
load_instance_masks=False,
instance_mask_type=input_reader_pb2.NUMERICAL_MASKS,
label_map_proto_file=None,
use_display_name=False,
dct_method='',
num_keypoints=0,
num_additional_channels=0,
load_multiclass_scores=False,
load_context_features=False,
expand_hierarchy_labels=False,
load_dense_pose=False,
load_track_id=False,
load_keypoint_depth_features=False):
"""Constructor sets keys_to_features and items_to_handlers.
Args:
load_instance_masks: whether or not to load and handle instance masks.
instance_mask_type: type of instance masks. Options are provided in
input_reader.proto. This is only used if `load_instance_masks` is True.
label_map_proto_file: a file path to a
object_detection.protos.StringIntLabelMap proto. If provided, then the
mapped IDs of 'image/object/class/text' will take precedence over the
existing 'image/object/class/label' ID. Also, if provided, it is
assumed that 'image/object/class/text' will be in the data.
use_display_name: whether or not to use the `display_name` for label
mapping (instead of `name`). Only used if label_map_proto_file is
provided.
dct_method: An optional string. Defaults to None. It only takes
effect when image format is jpeg, used to specify a hint about the
algorithm used for jpeg decompression. Currently valid values
are ['INTEGER_FAST', 'INTEGER_ACCURATE']. The hint may be ignored, for
example, the jpeg library does not have that specific option.
num_keypoints: the number of keypoints per object.
num_additional_channels: how many additional channels to use.
load_multiclass_scores: Whether to load multiclass scores associated with
boxes.
load_context_features: Whether to load information from context_features,
to provide additional context to a detection model for training and/or
inference.
expand_hierarchy_labels: Expands the object and image labels taking into
account the provided hierarchy in the label_map_proto_file. For positive
classes, the labels are extended to ancestor. For negative classes,
the labels are expanded to descendants.
load_dense_pose: Whether to load DensePose annotations.
load_track_id: Whether to load tracking annotations.
load_keypoint_depth_features: Whether to load the keypoint depth features
including keypoint relative depths and weights. If this field is set to
True but no keypoint depth features are in the input tf.Example, then
default values will be populated.
Raises:
ValueError: If `instance_mask_type` option is not one of
input_reader_pb2.DEFAULT, input_reader_pb2.NUMERICAL, or
input_reader_pb2.PNG_MASKS.
ValueError: If `expand_labels_hierarchy` is True, but the
`label_map_proto_file` is not provided.
"""
# TODO(rathodv): delete unused `use_display_name` argument once we change
# other decoders to handle label maps similarly.
del use_display_name
self.keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/filename':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/key/sha256':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/source_id':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/height':
tf.FixedLenFeature((), tf.int64, default_value=1),
'image/width':
tf.FixedLenFeature((), tf.int64, default_value=1),
# Image-level labels.
'image/class/text':
tf.VarLenFeature(tf.string),
'image/class/label':
tf.VarLenFeature(tf.int64),
'image/neg_category_ids':
tf.VarLenFeature(tf.int64),
'image/not_exhaustive_category_ids':
tf.VarLenFeature(tf.int64),
'image/class/confidence':
tf.VarLenFeature(tf.float32),
# Object boxes and classes.
'image/object/bbox/xmin':
tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(tf.float32),
'image/object/class/label':
tf.VarLenFeature(tf.int64),
'image/object/class/text':
tf.VarLenFeature(tf.string),
'image/object/area':
tf.VarLenFeature(tf.float32),
'image/object/is_crowd':
tf.VarLenFeature(tf.int64),
'image/object/difficult':
tf.VarLenFeature(tf.int64),
'image/object/group_of':
tf.VarLenFeature(tf.int64),
'image/object/weight':
tf.VarLenFeature(tf.float32),
}
# We are checking `dct_method` instead of passing it directly in order to
# ensure TF version 1.6 compatibility.
if dct_method:
image = slim_example_decoder.Image(
image_key='image/encoded',
format_key='image/format',
channels=3,
dct_method=dct_method)
additional_channel_image = slim_example_decoder.Image(
image_key='image/additional_channels/encoded',
format_key='image/format',
channels=1,
repeated=True,
dct_method=dct_method)
else:
image = slim_example_decoder.Image(
image_key='image/encoded', format_key='image/format', channels=3)
additional_channel_image = slim_example_decoder.Image(
image_key='image/additional_channels/encoded',
format_key='image/format',
channels=1,
repeated=True)
self.items_to_handlers = {
fields.InputDataFields.image:
image,
fields.InputDataFields.source_id: (
slim_example_decoder.Tensor('image/source_id')),
fields.InputDataFields.key: (
slim_example_decoder.Tensor('image/key/sha256')),
fields.InputDataFields.filename: (
slim_example_decoder.Tensor('image/filename')),
# Image-level labels.
fields.InputDataFields.groundtruth_image_confidences: (
slim_example_decoder.Tensor('image/class/confidence')),
fields.InputDataFields.groundtruth_verified_neg_classes: (
slim_example_decoder.Tensor('image/neg_category_ids')),
fields.InputDataFields.groundtruth_not_exhaustive_classes: (
slim_example_decoder.Tensor('image/not_exhaustive_category_ids')),
# Object boxes and classes.
fields.InputDataFields.groundtruth_boxes: (
slim_example_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],
'image/object/bbox/')),
fields.InputDataFields.groundtruth_area:
slim_example_decoder.Tensor('image/object/area'),
fields.InputDataFields.groundtruth_is_crowd: (
slim_example_decoder.Tensor('image/object/is_crowd')),
fields.InputDataFields.groundtruth_difficult: (
slim_example_decoder.Tensor('image/object/difficult')),
fields.InputDataFields.groundtruth_group_of: (
slim_example_decoder.Tensor('image/object/group_of')),
fields.InputDataFields.groundtruth_weights: (
slim_example_decoder.Tensor('image/object/weight')),
}
if load_multiclass_scores:
self.keys_to_features[
'image/object/class/multiclass_scores'] = tf.VarLenFeature(tf.float32)
self.items_to_handlers[fields.InputDataFields.multiclass_scores] = (
slim_example_decoder.Tensor('image/object/class/multiclass_scores'))
if load_context_features:
self.keys_to_features[
'image/context_features'] = tf.VarLenFeature(tf.float32)
self.items_to_handlers[fields.InputDataFields.context_features] = (
slim_example_decoder.ItemHandlerCallback(
['image/context_features', 'image/context_feature_length'],
self._reshape_context_features))
self.keys_to_features[
'image/context_feature_length'] = tf.FixedLenFeature((), tf.int64)
self.items_to_handlers[fields.InputDataFields.context_feature_length] = (
slim_example_decoder.Tensor('image/context_feature_length'))
if num_additional_channels > 0:
self.keys_to_features[
'image/additional_channels/encoded'] = tf.FixedLenFeature(
(num_additional_channels,), tf.string)
self.items_to_handlers[
fields.InputDataFields.
image_additional_channels] = additional_channel_image
self._num_keypoints = num_keypoints
if num_keypoints > 0:
self.keys_to_features['image/object/keypoint/x'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/keypoint/y'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/keypoint/visibility'] = (
tf.VarLenFeature(tf.int64))
self.items_to_handlers[fields.InputDataFields.groundtruth_keypoints] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/keypoint/y', 'image/object/keypoint/x'],
self._reshape_keypoints))
kpt_vis_field = fields.InputDataFields.groundtruth_keypoint_visibilities
self.items_to_handlers[kpt_vis_field] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/keypoint/x', 'image/object/keypoint/visibility'],
self._reshape_keypoint_visibilities))
if load_keypoint_depth_features:
self.keys_to_features['image/object/keypoint/z'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/keypoint/z/weights'] = (
tf.VarLenFeature(tf.float32))
self.items_to_handlers[
fields.InputDataFields.groundtruth_keypoint_depths] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/keypoint/x', 'image/object/keypoint/z'],
self._reshape_keypoint_depths))
self.items_to_handlers[
fields.InputDataFields.groundtruth_keypoint_depth_weights] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/keypoint/x',
'image/object/keypoint/z/weights'],
self._reshape_keypoint_depth_weights))
if load_instance_masks:
if instance_mask_type in (input_reader_pb2.DEFAULT,
input_reader_pb2.NUMERICAL_MASKS):
self.keys_to_features['image/object/mask'] = (
tf.VarLenFeature(tf.float32))
self.items_to_handlers[
fields.InputDataFields.groundtruth_instance_masks] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/mask', 'image/height', 'image/width'],
self._reshape_instance_masks))
elif instance_mask_type == input_reader_pb2.PNG_MASKS:
self.keys_to_features['image/object/mask'] = tf.VarLenFeature(tf.string)
self.items_to_handlers[
fields.InputDataFields.groundtruth_instance_masks] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/mask', 'image/height', 'image/width'],
self._decode_png_instance_masks))
else:
raise ValueError('Did not recognize the `instance_mask_type` option.')
self.keys_to_features['image/object/mask/weight'] = (
tf.VarLenFeature(tf.float32))
self.items_to_handlers[
fields.InputDataFields.groundtruth_instance_mask_weights] = (
slim_example_decoder.Tensor('image/object/mask/weight'))
if load_dense_pose:
self.keys_to_features['image/object/densepose/num'] = (
tf.VarLenFeature(tf.int64))
self.keys_to_features['image/object/densepose/part_index'] = (
tf.VarLenFeature(tf.int64))
self.keys_to_features['image/object/densepose/x'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/densepose/y'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/densepose/u'] = (
tf.VarLenFeature(tf.float32))
self.keys_to_features['image/object/densepose/v'] = (
tf.VarLenFeature(tf.float32))
self.items_to_handlers[
fields.InputDataFields.groundtruth_dp_num_points] = (
slim_example_decoder.Tensor('image/object/densepose/num'))
self.items_to_handlers[fields.InputDataFields.groundtruth_dp_part_ids] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/densepose/part_index',
'image/object/densepose/num'], self._dense_pose_part_indices))
self.items_to_handlers[
fields.InputDataFields.groundtruth_dp_surface_coords] = (
slim_example_decoder.ItemHandlerCallback(
['image/object/densepose/x', 'image/object/densepose/y',
'image/object/densepose/u', 'image/object/densepose/v',
'image/object/densepose/num'],
self._dense_pose_surface_coordinates))
if load_track_id:
self.keys_to_features['image/object/track/label'] = (
tf.VarLenFeature(tf.int64))
self.items_to_handlers[
fields.InputDataFields.groundtruth_track_ids] = (
slim_example_decoder.Tensor('image/object/track/label'))
if label_map_proto_file:
# If the label_map_proto is provided, try to use it in conjunction with
# the class text, and fall back to a materialized ID.
label_handler = slim_example_decoder.BackupHandler(
_ClassTensorHandler(
'image/object/class/text', label_map_proto_file,
default_value=''),
slim_example_decoder.Tensor('image/object/class/label'))
image_label_handler = slim_example_decoder.BackupHandler(
_ClassTensorHandler(
fields.TfExampleFields.image_class_text,
label_map_proto_file,
default_value=''),
slim_example_decoder.Tensor(fields.TfExampleFields.image_class_label))
else:
label_handler = slim_example_decoder.Tensor('image/object/class/label')
image_label_handler = slim_example_decoder.Tensor(
fields.TfExampleFields.image_class_label)
self.items_to_handlers[
fields.InputDataFields.groundtruth_classes] = label_handler
self.items_to_handlers[
fields.InputDataFields.groundtruth_image_classes] = image_label_handler
self._expand_hierarchy_labels = expand_hierarchy_labels
self._ancestors_lut = None
self._descendants_lut = None
if expand_hierarchy_labels:
if label_map_proto_file:
ancestors_lut, descendants_lut = (
label_map_util.get_label_map_hierarchy_lut(label_map_proto_file,
True))
self._ancestors_lut = tf.constant(ancestors_lut, dtype=tf.int64)
self._descendants_lut = tf.constant(descendants_lut, dtype=tf.int64)
else:
raise ValueError('In order to expand labels, the label_map_proto_file '
'has to be provided.')
def decode(self, tf_example_string_tensor):
"""Decodes serialized tensorflow example and returns a tensor dictionary.
Args:
tf_example_string_tensor: a string tensor holding a serialized tensorflow
example proto.
Returns:
A dictionary of the following tensors.
fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, 3]
containing image.
fields.InputDataFields.original_image_spatial_shape - 1D int32 tensor of
shape [2] containing shape of the image.
fields.InputDataFields.source_id - string tensor containing original
image id.
fields.InputDataFields.key - string tensor with unique sha256 hash key.
fields.InputDataFields.filename - string tensor with original dataset
filename.
fields.InputDataFields.groundtruth_boxes - 2D float32 tensor of shape
[None, 4] containing box corners.
fields.InputDataFields.groundtruth_classes - 1D int64 tensor of shape
[None] containing classes for the boxes.
fields.InputDataFields.groundtruth_weights - 1D float32 tensor of
shape [None] indicating the weights of groundtruth boxes.
fields.InputDataFields.groundtruth_area - 1D float32 tensor of shape
[None] containing containing object mask area in pixel squared.
fields.InputDataFields.groundtruth_is_crowd - 1D bool tensor of shape
[None] indicating if the boxes enclose a crowd.
Optional:
fields.InputDataFields.groundtruth_image_confidences - 1D float tensor of
shape [None] indicating if a class is present in the image (1.0) or
a class is not present in the image (0.0).
fields.InputDataFields.image_additional_channels - 3D uint8 tensor of
shape [None, None, num_additional_channels]. 1st dim is height; 2nd dim
is width; 3rd dim is the number of additional channels.
fields.InputDataFields.groundtruth_difficult - 1D bool tensor of shape
[None] indicating if the boxes represent `difficult` instances.
fields.InputDataFields.groundtruth_group_of - 1D bool tensor of shape
[None] indicating if the boxes represent `group_of` instances.
fields.InputDataFields.groundtruth_keypoints - 3D float32 tensor of
shape [None, num_keypoints, 2] containing keypoints, where the
coordinates of the keypoints are ordered (y, x).
fields.InputDataFields.groundtruth_keypoint_visibilities - 2D bool
tensor of shape [None, num_keypoints] containing keypoint visibilites.
fields.InputDataFields.groundtruth_instance_masks - 3D float32 tensor of
shape [None, None, None] containing instance masks.
fields.InputDataFields.groundtruth_instance_mask_weights - 1D float32
tensor of shape [None] containing weights. These are typically values
in {0.0, 1.0} which indicate whether to consider the mask related to an
object.
fields.InputDataFields.groundtruth_image_classes - 1D int64 of shape
[None] containing classes for the boxes.
fields.InputDataFields.multiclass_scores - 1D float32 tensor of shape
[None * num_classes] containing flattened multiclass scores for
groundtruth boxes.
fields.InputDataFields.context_features - 1D float32 tensor of shape
[context_feature_length * num_context_features]
fields.InputDataFields.context_feature_length - int32 tensor specifying
the length of each feature in context_features
"""
serialized_example = tf.reshape(tf_example_string_tensor, shape=[])
decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features,
self.items_to_handlers)
keys = decoder.list_items()
tensors = decoder.decode(serialized_example, items=keys)
tensor_dict = dict(zip(keys, tensors))
is_crowd = fields.InputDataFields.groundtruth_is_crowd
tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool)
tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3])
tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.shape(
tensor_dict[fields.InputDataFields.image])[:2]
if fields.InputDataFields.image_additional_channels in tensor_dict:
channels = tensor_dict[fields.InputDataFields.image_additional_channels]
channels = tf.squeeze(channels, axis=3)
channels = tf.transpose(channels, perm=[1, 2, 0])
tensor_dict[fields.InputDataFields.image_additional_channels] = channels
def default_groundtruth_weights():
return tf.ones(
[tf.shape(tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]],
dtype=tf.float32)
tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond(
tf.greater(
tf.shape(
tensor_dict[fields.InputDataFields.groundtruth_weights])[0],
0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights],
default_groundtruth_weights)
if fields.InputDataFields.groundtruth_instance_masks in tensor_dict:
gt_instance_masks = tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
num_gt_instance_masks = tf.shape(gt_instance_masks)[0]
gt_instance_mask_weights = tensor_dict[
fields.InputDataFields.groundtruth_instance_mask_weights]
num_gt_instance_mask_weights = tf.shape(gt_instance_mask_weights)[0]
def default_groundtruth_instance_mask_weights():
return tf.ones([num_gt_instance_masks], dtype=tf.float32)
tensor_dict[fields.InputDataFields.groundtruth_instance_mask_weights] = (
tf.cond(tf.greater(num_gt_instance_mask_weights, 0),
lambda: gt_instance_mask_weights,
default_groundtruth_instance_mask_weights))
if fields.InputDataFields.groundtruth_keypoints in tensor_dict:
# Set all keypoints that are not labeled to NaN.
gt_kpt_fld = fields.InputDataFields.groundtruth_keypoints
gt_kpt_vis_fld = fields.InputDataFields.groundtruth_keypoint_visibilities
visibilities_tiled = tf.tile(
tf.expand_dims(tensor_dict[gt_kpt_vis_fld], -1),
[1, 1, 2])
tensor_dict[gt_kpt_fld] = tf.where(
visibilities_tiled,
tensor_dict[gt_kpt_fld],
np.nan * tf.ones_like(tensor_dict[gt_kpt_fld]))
if self._expand_hierarchy_labels:
input_fields = fields.InputDataFields
image_classes, image_confidences = self._expand_image_label_hierarchy(
tensor_dict[input_fields.groundtruth_image_classes],
tensor_dict[input_fields.groundtruth_image_confidences])
tensor_dict[input_fields.groundtruth_image_classes] = image_classes
tensor_dict[input_fields.groundtruth_image_confidences] = (
image_confidences)
box_fields = [
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_weights,
]
def expand_field(field_name):
return self._expansion_box_field_labels(
tensor_dict[input_fields.groundtruth_classes],
tensor_dict[field_name])
# pylint: disable=cell-var-from-loop
for field in box_fields:
if field in tensor_dict:
tensor_dict[field] = tf.cond(
tf.size(tensor_dict[field]) > 0, lambda: expand_field(field),
lambda: tensor_dict[field])
# pylint: enable=cell-var-from-loop
tensor_dict[input_fields.groundtruth_classes] = (
self._expansion_box_field_labels(
tensor_dict[input_fields.groundtruth_classes],
tensor_dict[input_fields.groundtruth_classes], True))
if fields.InputDataFields.groundtruth_group_of in tensor_dict:
group_of = fields.InputDataFields.groundtruth_group_of
tensor_dict[group_of] = tf.cast(tensor_dict[group_of], dtype=tf.bool)
if fields.InputDataFields.groundtruth_dp_num_points in tensor_dict:
tensor_dict[fields.InputDataFields.groundtruth_dp_num_points] = tf.cast(
tensor_dict[fields.InputDataFields.groundtruth_dp_num_points],
dtype=tf.int32)
tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids] = tf.cast(
tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids],
dtype=tf.int32)
if fields.InputDataFields.groundtruth_track_ids in tensor_dict:
tensor_dict[fields.InputDataFields.groundtruth_track_ids] = tf.cast(
tensor_dict[fields.InputDataFields.groundtruth_track_ids],
dtype=tf.int32)
return tensor_dict
def _reshape_keypoints(self, keys_to_tensors):
"""Reshape keypoints.
The keypoints are reshaped to [num_instances, num_keypoints, 2].
Args:
keys_to_tensors: a dictionary from keys to tensors. Expected keys are:
'image/object/keypoint/x'
'image/object/keypoint/y'
Returns:
A 3-D float tensor of shape [num_instances, num_keypoints, 2] with values
in [0, 1].
"""
y = keys_to_tensors['image/object/keypoint/y']
if isinstance(y, tf.SparseTensor):
y = tf.sparse_tensor_to_dense(y)
y = tf.expand_dims(y, 1)
x = keys_to_tensors['image/object/keypoint/x']
if isinstance(x, tf.SparseTensor):
x = tf.sparse_tensor_to_dense(x)
x = tf.expand_dims(x, 1)
keypoints = tf.concat([y, x], 1)
keypoints = tf.reshape(keypoints, [-1, self._num_keypoints, 2])
return keypoints
def _reshape_keypoint_depths(self, keys_to_tensors):
"""Reshape keypoint depths.
The keypoint depths are reshaped to [num_instances, num_keypoints]. The
keypoint depth tensor is expected to have the same shape as the keypoint x
(or y) tensors. If not (usually because the example does not have the depth
groundtruth), then default depth values (zero) are provided.
Args:
keys_to_tensors: a dictionary from keys to tensors. Expected keys are:
'image/object/keypoint/x'
'image/object/keypoint/z'
Returns:
A 2-D float tensor of shape [num_instances, num_keypoints] with values
representing the keypoint depths.
"""
x = keys_to_tensors['image/object/keypoint/x']
z = keys_to_tensors['image/object/keypoint/z']
if isinstance(z, tf.SparseTensor):
z = tf.sparse_tensor_to_dense(z)
if isinstance(x, tf.SparseTensor):
x = tf.sparse_tensor_to_dense(x)
default_z = tf.zeros_like(x)
# Use keypoint depth groundtruth if provided, otherwise use the default
# depth value.
z = tf.cond(tf.equal(tf.size(x), tf.size(z)),
true_fn=lambda: z,
false_fn=lambda: default_z)
z = tf.reshape(z, [-1, self._num_keypoints])
return z
def _reshape_keypoint_depth_weights(self, keys_to_tensors):
"""Reshape keypoint depth weights.
The keypoint depth weights are reshaped to [num_instances, num_keypoints].
The keypoint depth weights tensor is expected to have the same shape as the
keypoint x (or y) tensors. If not (usually because the example does not have
the depth weights groundtruth), then default weight values (zero) are
provided.
Args:
keys_to_tensors: a dictionary from keys to tensors. Expected keys are:
'image/object/keypoint/x'
'image/object/keypoint/z/weights'
Returns:
A 2-D float tensor of shape [num_instances, num_keypoints] with values
representing the keypoint depth weights.
"""
x = keys_to_tensors['image/object/keypoint/x']
z = keys_to_tensors['image/object/keypoint/z/weights']
if isinstance(z, tf.SparseTensor):
z = tf.sparse_tensor_to_dense(z)
if isinstance(x, tf.SparseTensor):
x = tf.sparse_tensor_to_dense(x)
default_z = tf.zeros_like(x)
# Use keypoint depth weights if provided, otherwise use the default
# values.
z = tf.cond(tf.equal(tf.size(x), tf.size(z)),
true_fn=lambda: z,
false_fn=lambda: default_z)
z = tf.reshape(z, [-1, self._num_keypoints])
return z
def _reshape_keypoint_visibilities(self, keys_to_tensors):
"""Reshape keypoint visibilities.
The keypoint visibilities are reshaped to [num_instances,
num_keypoints].
The raw keypoint visibilities are expected to conform to the
MSCoco definition. See Visibility enum.
The returned boolean is True for the labeled case (either
Visibility.NOT_VISIBLE or Visibility.VISIBLE). These are the same categories
that COCO uses to evaluate keypoint detection performance:
http://cocodataset.org/#keypoints-eval
If image/object/keypoint/visibility is not provided, visibilities will be
set to True for finite keypoint coordinate values, and 0 if the coordinates
are NaN.
Args:
keys_to_tensors: a dictionary from keys to tensors. Expected keys are:
'image/object/keypoint/x'
'image/object/keypoint/visibility'
Returns:
A 2-D bool tensor of shape [num_instances, num_keypoints] with values
in {0, 1}. 1 if the keypoint is labeled, 0 otherwise.
"""
x = keys_to_tensors['image/object/keypoint/x']
vis = keys_to_tensors['image/object/keypoint/visibility']
if isinstance(vis, tf.SparseTensor):
vis = tf.sparse_tensor_to_dense(vis)
if isinstance(x, tf.SparseTensor):
x = tf.sparse_tensor_to_dense(x)
default_vis = tf.where(
tf.math.is_nan(x),
Visibility.UNLABELED.value * tf.ones_like(x, dtype=tf.int64),
Visibility.VISIBLE.value * tf.ones_like(x, dtype=tf.int64))
# Use visibility if provided, otherwise use the default visibility.
vis = tf.cond(tf.equal(tf.size(x), tf.size(vis)),
true_fn=lambda: vis,
false_fn=lambda: default_vis)
vis = tf.math.logical_or(
tf.math.equal(vis, Visibility.NOT_VISIBLE.value),
tf.math.equal(vis, Visibility.VISIBLE.value))
vis = tf.reshape(vis, [-1, self._num_keypoints])
return vis
def _reshape_instance_masks(self, keys_to_tensors):
"""Reshape instance segmentation masks.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)
masks = keys_to_tensors['image/object/mask']
if isinstance(masks, tf.SparseTensor):
masks = tf.sparse_tensor_to_dense(masks)
masks = tf.reshape(
tf.cast(tf.greater(masks, 0.0), dtype=tf.float32), to_shape)
return tf.cast(masks, tf.float32)
def _reshape_context_features(self, keys_to_tensors):
"""Reshape context features.
The instance context_features are reshaped to
[num_context_features, context_feature_length]
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 2-D float tensor of shape [num_context_features, context_feature_length]
"""
context_feature_length = keys_to_tensors['image/context_feature_length']
to_shape = tf.cast(tf.stack([-1, context_feature_length]), tf.int32)
context_features = keys_to_tensors['image/context_features']
if isinstance(context_features, tf.SparseTensor):
context_features = tf.sparse_tensor_to_dense(context_features)
context_features = tf.reshape(context_features, to_shape)
return context_features
def _decode_png_instance_masks(self, keys_to_tensors):
"""Decode PNG instance segmentation masks and stack into dense tensor.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
def decode_png_mask(image_buffer):
image = tf.squeeze(
tf.image.decode_image(image_buffer, channels=1), axis=2)
image.set_shape([None, None])
image = tf.cast(tf.greater(image, 0), dtype=tf.float32)
return image
png_masks = keys_to_tensors['image/object/mask']
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
if isinstance(png_masks, tf.SparseTensor):
png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='')
return tf.cond(
tf.greater(tf.size(png_masks), 0),
lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32),
lambda: tf.zeros(tf.cast(tf.stack([0, height, width]), dtype=tf.int32)))
def _dense_pose_part_indices(self, keys_to_tensors):
"""Creates a tensor that contains part indices for each DensePose point.
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 2-D int32 tensor of shape [num_instances, num_points] where each element
contains the DensePose part index (0-23). The value `num_points`
corresponds to the maximum number of sampled points across all instances
in the image. Note that instances with less sampled points will be padded
with zeros in the last dimension.
"""
num_points_per_instances = keys_to_tensors['image/object/densepose/num']
part_index = keys_to_tensors['image/object/densepose/part_index']
if isinstance(num_points_per_instances, tf.SparseTensor):
num_points_per_instances = tf.sparse_tensor_to_dense(
num_points_per_instances)
if isinstance(part_index, tf.SparseTensor):
part_index = tf.sparse_tensor_to_dense(part_index)
part_index = tf.cast(part_index, dtype=tf.int32)
max_points_per_instance = tf.cast(
tf.math.reduce_max(num_points_per_instances), dtype=tf.int32)
num_points_cumulative = tf.concat([
[0], tf.math.cumsum(num_points_per_instances)], axis=0)
def pad_parts_tensor(instance_ind):
points_range_start = num_points_cumulative[instance_ind]
points_range_end = num_points_cumulative[instance_ind + 1]
part_inds = part_index[points_range_start:points_range_end]
return shape_utils.pad_or_clip_nd(part_inds,
output_shape=[max_points_per_instance])
return tf.map_fn(pad_parts_tensor,
tf.range(tf.size(num_points_per_instances)),
dtype=tf.int32)
def _dense_pose_surface_coordinates(self, keys_to_tensors):
"""Creates a tensor that contains surface coords for each DensePose point.
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float32 tensor of shape [num_instances, num_points, 4] where each
point contains (y, x, v, u) data for each sampled DensePose point. The
(y, x) coordinate has normalized image locations for the point, and (v, u)
contains the surface coordinate (also normalized) for the part. The value
`num_points` corresponds to the maximum number of sampled points across
all instances in the image. Note that instances with less sampled points
will be padded with zeros in dim=1.
"""
num_points_per_instances = keys_to_tensors['image/object/densepose/num']
dp_y = keys_to_tensors['image/object/densepose/y']
dp_x = keys_to_tensors['image/object/densepose/x']
dp_v = keys_to_tensors['image/object/densepose/v']
dp_u = keys_to_tensors['image/object/densepose/u']
if isinstance(num_points_per_instances, tf.SparseTensor):
num_points_per_instances = tf.sparse_tensor_to_dense(
num_points_per_instances)
if isinstance(dp_y, tf.SparseTensor):
dp_y = tf.sparse_tensor_to_dense(dp_y)
if isinstance(dp_x, tf.SparseTensor):
dp_x = tf.sparse_tensor_to_dense(dp_x)
if isinstance(dp_v, tf.SparseTensor):
dp_v = tf.sparse_tensor_to_dense(dp_v)
if isinstance(dp_u, tf.SparseTensor):
dp_u = tf.sparse_tensor_to_dense(dp_u)
max_points_per_instance = tf.cast(
tf.math.reduce_max(num_points_per_instances), dtype=tf.int32)
num_points_cumulative = tf.concat([
[0], tf.math.cumsum(num_points_per_instances)], axis=0)
def pad_surface_coordinates_tensor(instance_ind):
"""Pads DensePose surface coordinates for each instance."""
points_range_start = num_points_cumulative[instance_ind]
points_range_end = num_points_cumulative[instance_ind + 1]
y = dp_y[points_range_start:points_range_end]
x = dp_x[points_range_start:points_range_end]
v = dp_v[points_range_start:points_range_end]
u = dp_u[points_range_start:points_range_end]
# Create [num_points_i, 4] tensor, where num_points_i is the number of
# sampled points for instance i.
unpadded_tensor = tf.stack([y, x, v, u], axis=1)
return shape_utils.pad_or_clip_nd(
unpadded_tensor, output_shape=[max_points_per_instance, 4])
return tf.map_fn(pad_surface_coordinates_tensor,
tf.range(tf.size(num_points_per_instances)),
dtype=tf.float32)
def _expand_image_label_hierarchy(self, image_classes, image_confidences):
"""Expand image level labels according to the hierarchy.
Args:
image_classes: Int64 tensor with the image level class ids for a sample.
image_confidences: Float tensor signaling whether a class id is present in
the image (1.0) or not present (0.0).
Returns:
new_image_classes: Int64 tensor equal to expanding image_classes.
new_image_confidences: Float tensor equal to expanding image_confidences.
"""
def expand_labels(relation_tensor, confidence_value):
"""Expand to ancestors or descendants depending on arguments."""
mask = tf.equal(image_confidences, confidence_value)
target_image_classes = tf.boolean_mask(image_classes, mask)
expanded_indices = tf.reduce_any((tf.gather(
relation_tensor, target_image_classes - _LABEL_OFFSET, axis=0) > 0),
axis=0)
expanded_indices = tf.where(expanded_indices)[:, 0] + _LABEL_OFFSET
new_groundtruth_image_classes = (
tf.concat([
tf.boolean_mask(image_classes, tf.logical_not(mask)),
expanded_indices,
],
axis=0))
new_groundtruth_image_confidences = (
tf.concat([
tf.boolean_mask(image_confidences, tf.logical_not(mask)),
tf.ones([tf.shape(expanded_indices)[0]],
dtype=image_confidences.dtype) * confidence_value,
],
axis=0))
return new_groundtruth_image_classes, new_groundtruth_image_confidences
image_classes, image_confidences = expand_labels(self._ancestors_lut, 1.0)
new_image_classes, new_image_confidences = expand_labels(
self._descendants_lut, 0.0)
return new_image_classes, new_image_confidences
def _expansion_box_field_labels(self,
object_classes,
object_field,
copy_class_id=False):
"""Expand the labels of a specific object field according to the hierarchy.
Args:
object_classes: Int64 tensor with the class id for each element in
object_field.
object_field: Tensor to be expanded.
copy_class_id: Boolean to choose whether to use class id values in the
output tensor instead of replicating the original values.
Returns:
A tensor with the result of expanding object_field.
"""
expanded_indices = tf.gather(
self._ancestors_lut, object_classes - _LABEL_OFFSET, axis=0)
if copy_class_id:
new_object_field = tf.where(expanded_indices > 0)[:, 1] + _LABEL_OFFSET
else:
new_object_field = tf.repeat(
object_field, tf.reduce_sum(expanded_indices, axis=1), axis=0)
return new_object_field
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/data_decoders/tf_example_decoder.py | tf_example_decoder.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence example decoder for object detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
import tensorflow.compat.v1 as tf
from tf_slim import tfexample_decoder as slim_example_decoder
from object_detection.core import data_decoder
from object_detection.core import standard_fields as fields
from object_detection.utils import label_map_util
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import lookup as contrib_lookup
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
class _ClassTensorHandler(slim_example_decoder.Tensor):
"""An ItemHandler to fetch class ids from class text."""
def __init__(self,
tensor_key,
label_map_proto_file,
shape_keys=None,
shape=None,
default_value=''):
"""Initializes the LookupTensor handler.
Simply calls a vocabulary (most often, a label mapping) lookup.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
label_map_proto_file: File path to a text format LabelMapProto message
mapping class text to id.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
name_to_id = label_map_util.get_label_map_dict(
label_map_proto_file, use_display_name=False)
# We use a default_value of -1, but we expect all labels to be contained
# in the label map.
try:
# Dynamically try to load the tf v2 lookup, falling back to contrib
lookup = tf.compat.v2.lookup
hash_table_class = tf.compat.v2.lookup.StaticHashTable
except AttributeError:
lookup = contrib_lookup
hash_table_class = contrib_lookup.HashTable
name_to_id_table = hash_table_class(
initializer=lookup.KeyValueTensorInitializer(
keys=tf.constant(list(name_to_id.keys())),
values=tf.constant(list(name_to_id.values()), dtype=tf.int64)),
default_value=-1)
self._name_to_id_table = name_to_id_table
super(_ClassTensorHandler, self).__init__(tensor_key, shape_keys, shape,
default_value)
def tensors_to_item(self, keys_to_tensors):
unmapped_tensor = super(_ClassTensorHandler,
self).tensors_to_item(keys_to_tensors)
return self._name_to_id_table.lookup(unmapped_tensor)
class TfSequenceExampleDecoder(data_decoder.DataDecoder):
"""Tensorflow Sequence Example proto decoder for Object Detection.
Sequence examples contain sequences of images which share common
features. The structure of TfSequenceExamples can be seen in
dataset_tools/seq_example_util.py
For the TFODAPI, the following fields are required:
Shared features:
'image/format'
'image/height'
'image/width'
Features with an entry for each image, where bounding box features can
be empty lists if the image does not contain any objects:
'image/encoded'
'image/source_id'
'region/bbox/xmin'
'region/bbox/xmax'
'region/bbox/ymin'
'region/bbox/ymax'
'region/label/string'
Optionally, the sequence example can include context_features for use in
Context R-CNN (see https://arxiv.org/abs/1912.03538):
'image/context_features'
'image/context_feature_length'
'image/context_features_image_id_list'
"""
def __init__(self,
label_map_proto_file,
load_context_features=False,
load_context_image_ids=False,
use_display_name=False,
fully_annotated=False):
"""Constructs `TfSequenceExampleDecoder` object.
Args:
label_map_proto_file: a file path to a
object_detection.protos.StringIntLabelMap proto. The
label map will be used to map IDs of 'region/label/string'.
It is assumed that 'region/label/string' will be in the data.
load_context_features: Whether to load information from context_features,
to provide additional context to a detection model for training and/or
inference
load_context_image_ids: Whether to load the corresponding image ids for
the context_features in order to visualize attention.
use_display_name: whether or not to use the `display_name` for label
mapping (instead of `name`). Only used if label_map_proto_file is
provided.
fully_annotated: If True, will assume that every frame (whether it has
boxes or not), has been fully annotated. If False, a
'region/is_annotated' field must be provided in the dataset which
indicates which frames have annotations. Default False.
"""
# Specifies how the tf.SequenceExamples are decoded.
self._context_keys_to_features = {
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height': tf.FixedLenFeature((), tf.int64),
'image/width': tf.FixedLenFeature((), tf.int64),
}
self._sequence_keys_to_feature_lists = {
'image/encoded': tf.FixedLenSequenceFeature([], dtype=tf.string),
'image/source_id': tf.FixedLenSequenceFeature([], dtype=tf.string),
'region/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'region/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'region/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'region/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'region/label/string': tf.VarLenFeature(dtype=tf.string),
'region/label/confidence': tf.VarLenFeature(dtype=tf.float32),
}
self._items_to_handlers = {
# Context.
fields.InputDataFields.image_height:
slim_example_decoder.Tensor('image/height'),
fields.InputDataFields.image_width:
slim_example_decoder.Tensor('image/width'),
# Sequence.
fields.InputDataFields.num_groundtruth_boxes:
slim_example_decoder.NumBoxesSequence('region/bbox/xmin'),
fields.InputDataFields.groundtruth_boxes:
slim_example_decoder.BoundingBoxSequence(
prefix='region/bbox/', default_value=0.0),
fields.InputDataFields.groundtruth_weights:
slim_example_decoder.Tensor('region/label/confidence'),
}
# If the dataset is sparsely annotated, parse sequence features which
# indicate which frames have been labeled.
if not fully_annotated:
self._sequence_keys_to_feature_lists['region/is_annotated'] = (
tf.FixedLenSequenceFeature([], dtype=tf.int64))
self._items_to_handlers[fields.InputDataFields.is_annotated] = (
slim_example_decoder.Tensor('region/is_annotated'))
self._items_to_handlers[fields.InputDataFields.image] = (
slim_example_decoder.Tensor('image/encoded'))
self._items_to_handlers[fields.InputDataFields.source_id] = (
slim_example_decoder.Tensor('image/source_id'))
label_handler = _ClassTensorHandler(
'region/label/string', label_map_proto_file, default_value='')
self._items_to_handlers[
fields.InputDataFields.groundtruth_classes] = label_handler
if load_context_features:
self._context_keys_to_features['image/context_features'] = (
tf.VarLenFeature(dtype=tf.float32))
self._items_to_handlers[fields.InputDataFields.context_features] = (
slim_example_decoder.ItemHandlerCallback(
['image/context_features', 'image/context_feature_length'],
self._reshape_context_features))
self._context_keys_to_features['image/context_feature_length'] = (
tf.FixedLenFeature((), tf.int64))
self._items_to_handlers[fields.InputDataFields.context_feature_length] = (
slim_example_decoder.Tensor('image/context_feature_length'))
if load_context_image_ids:
self._context_keys_to_features['image/context_features_image_id_list'] = (
tf.VarLenFeature(dtype=tf.string))
self._items_to_handlers[
fields.InputDataFields.context_features_image_id_list] = (
slim_example_decoder.Tensor(
'image/context_features_image_id_list',
default_value=''))
self._fully_annotated = fully_annotated
def decode(self, tf_seq_example_string_tensor):
"""Decodes serialized `tf.SequenceExample`s and returns a tensor dictionary.
Args:
tf_seq_example_string_tensor: a string tensor holding a serialized
`tf.SequenceExample`.
Returns:
A list of dictionaries with (at least) the following tensors:
fields.InputDataFields.source_id: a [num_frames] string tensor with a
unique ID for each frame.
fields.InputDataFields.num_groundtruth_boxes: a [num_frames] int32 tensor
specifying the number of boxes in each frame.
fields.InputDataFields.groundtruth_boxes: a [num_frames, num_boxes, 4]
float32 tensor with bounding boxes for each frame. Note that num_boxes
is the maximum boxes seen in any individual frame. Any frames with fewer
boxes are padded with 0.0.
fields.InputDataFields.groundtruth_classes: a [num_frames, num_boxes]
int32 tensor with class indices for each box in each frame.
fields.InputDataFields.groundtruth_weights: a [num_frames, num_boxes]
float32 tensor with weights of the groundtruth boxes.
fields.InputDataFields.is_annotated: a [num_frames] bool tensor specifying
whether the image was annotated or not. If False, the corresponding
entries in the groundtruth tensor will be ignored.
fields.InputDataFields.context_features - 1D float32 tensor of shape
[context_feature_length * num_context_features]
fields.InputDataFields.context_feature_length - int32 tensor specifying
the length of each feature in context_features
fields.InputDataFields.image: a [num_frames] string tensor with
the encoded images.
fields.inputDataFields.context_features_image_id_list: a 1D vector
of shape [num_context_features] containing string tensors.
"""
serialized_example = tf.reshape(tf_seq_example_string_tensor, shape=[])
decoder = slim_example_decoder.TFSequenceExampleDecoder(
self._context_keys_to_features, self._sequence_keys_to_feature_lists,
self._items_to_handlers)
keys = decoder.list_items()
tensors = decoder.decode(serialized_example, items=keys)
tensor_dict = dict(list(zip(keys, tensors)))
tensor_dict[fields.InputDataFields.groundtruth_boxes].set_shape(
[None, None, 4])
tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = tf.cast(
tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
dtype=tf.int32)
tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.cast(
tensor_dict[fields.InputDataFields.groundtruth_classes], dtype=tf.int32)
tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.cast(
tf.stack([
tensor_dict[fields.InputDataFields.image_height],
tensor_dict[fields.InputDataFields.image_width]
]),
dtype=tf.int32)
tensor_dict.pop(fields.InputDataFields.image_height)
tensor_dict.pop(fields.InputDataFields.image_width)
def default_groundtruth_weights():
"""Produces weights of 1.0 for each valid box, and 0.0 otherwise."""
num_boxes_per_frame = tensor_dict[
fields.InputDataFields.num_groundtruth_boxes]
max_num_boxes = tf.reduce_max(num_boxes_per_frame)
num_boxes_per_frame_tiled = tf.tile(
tf.expand_dims(num_boxes_per_frame, axis=-1),
multiples=tf.stack([1, max_num_boxes]))
range_tiled = tf.tile(
tf.expand_dims(tf.range(max_num_boxes), axis=0),
multiples=tf.stack([tf.shape(num_boxes_per_frame)[0], 1]))
return tf.cast(
tf.greater(num_boxes_per_frame_tiled, range_tiled), tf.float32)
tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond(
tf.greater(
tf.size(tensor_dict[fields.InputDataFields.groundtruth_weights]),
0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights],
default_groundtruth_weights)
if self._fully_annotated:
tensor_dict[fields.InputDataFields.is_annotated] = tf.ones_like(
tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
dtype=tf.bool)
else:
tensor_dict[fields.InputDataFields.is_annotated] = tf.cast(
tensor_dict[fields.InputDataFields.is_annotated], dtype=tf.bool)
return tensor_dict
def _reshape_context_features(self, keys_to_tensors):
"""Reshape context features.
The instance context_features are reshaped to
[num_context_features, context_feature_length]
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 2-D float tensor of shape [num_context_features, context_feature_length]
"""
context_feature_length = keys_to_tensors['image/context_feature_length']
to_shape = tf.cast(tf.stack([-1, context_feature_length]), tf.int32)
context_features = keys_to_tensors['image/context_features']
if isinstance(context_features, tf.SparseTensor):
context_features = tf.sparse_tensor_to_dense(context_features)
context_features = tf.reshape(context_features, to_shape)
return context_features
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/data_decoders/tf_sequence_example_decoder.py | tf_sequence_example_decoder.py |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf_sequence_example_decoder.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_sequence_example_decoder
from object_detection.dataset_tools import seq_example_util
from object_detection.utils import test_case
class TfSequenceExampleDecoderTest(test_case.TestCase):
def _create_label_map(self, path):
label_map_text = """
item {
name: "dog"
id: 1
}
item {
name: "cat"
id: 2
}
item {
name: "panda"
id: 4
}
"""
with tf.gfile.Open(path, 'wb') as f:
f.write(label_map_text)
def _make_random_serialized_jpeg_images(self, num_frames, image_height,
image_width):
def graph_fn():
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
return [tf.io.encode_jpeg(image) for image in images_list]
encoded_images = self.execute(graph_fn, [])
return encoded_images
def test_decode_sequence_example(self):
num_frames = 4
image_height = 20
image_width = 30
expected_groundtruth_boxes = [
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
[[0.2, 0.2, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
]
expected_groundtruth_classes = [
[-1, -1],
[-1, 1],
[1, 2],
[-1, -1]
]
flds = fields.InputDataFields
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
def graph_fn():
label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
self._create_label_map(label_map_proto_file)
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=[str(i) for i in range(num_frames)],
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[0., 0., 1., 1.]], # Frame 0.
[[0.2, 0.2, 1., 1.],
[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.], # Frame 2.
[0.1, 0.1, 0.2, 0.2]],
[[]], # Frame 3.
],
label_strings=[
['fox'], # Frame 0. Fox will be filtered out.
['fox', 'dog'], # Frame 1. Fox will be filtered out.
['dog', 'cat'], # Frame 2.
[], # Frame 3
]).SerializeToString()
example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
return decoder.decode(example_string_tensor)
tensor_dict_out = self.execute(graph_fn, [])
self.assertAllClose(expected_groundtruth_boxes,
tensor_dict_out[flds.groundtruth_boxes])
self.assertAllEqual(expected_groundtruth_classes,
tensor_dict_out[flds.groundtruth_classes])
def test_decode_sequence_example_context(self):
num_frames = 4
image_height = 20
image_width = 30
expected_groundtruth_boxes = [
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
[[0.2, 0.2, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
]
expected_groundtruth_classes = [
[-1, -1],
[-1, 1],
[1, 2],
[-1, -1]
]
expected_context_features = np.array(
[[0.0, 0.1, 0.2], [0.3, 0.4, 0.5]], dtype=np.float32)
flds = fields.InputDataFields
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
def graph_fn():
label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
self._create_label_map(label_map_proto_file)
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file,
load_context_features=True)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=[str(i) for i in range(num_frames)],
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[0., 0., 1., 1.]], # Frame 0.
[[0.2, 0.2, 1., 1.],
[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.], # Frame 2.
[0.1, 0.1, 0.2, 0.2]],
[[]], # Frame 3.
],
label_strings=[
['fox'], # Frame 0. Fox will be filtered out.
['fox', 'dog'], # Frame 1. Fox will be filtered out.
['dog', 'cat'], # Frame 2.
[], # Frame 3
],
context_features=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5],
context_feature_length=[3],
context_features_image_id_list=[b'im_1', b'im_2']
).SerializeToString()
example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
return decoder.decode(example_string_tensor)
tensor_dict_out = self.execute(graph_fn, [])
self.assertAllClose(expected_groundtruth_boxes,
tensor_dict_out[flds.groundtruth_boxes])
self.assertAllEqual(expected_groundtruth_classes,
tensor_dict_out[flds.groundtruth_classes])
self.assertAllClose(expected_context_features,
tensor_dict_out[flds.context_features])
def test_decode_sequence_example_context_image_id_list(self):
num_frames = 4
image_height = 20
image_width = 30
expected_groundtruth_boxes = [
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
[[0.2, 0.2, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
]
expected_groundtruth_classes = [
[-1, -1],
[-1, 1],
[1, 2],
[-1, -1]
]
expected_context_image_ids = [b'im_1', b'im_2']
flds = fields.InputDataFields
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
def graph_fn():
label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
self._create_label_map(label_map_proto_file)
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file,
load_context_image_ids=True)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=[str(i) for i in range(num_frames)],
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[0., 0., 1., 1.]], # Frame 0.
[[0.2, 0.2, 1., 1.],
[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.], # Frame 2.
[0.1, 0.1, 0.2, 0.2]],
[[]], # Frame 3.
],
label_strings=[
['fox'], # Frame 0. Fox will be filtered out.
['fox', 'dog'], # Frame 1. Fox will be filtered out.
['dog', 'cat'], # Frame 2.
[], # Frame 3
],
context_features=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5],
context_feature_length=[3],
context_features_image_id_list=[b'im_1', b'im_2']
).SerializeToString()
example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
return decoder.decode(example_string_tensor)
tensor_dict_out = self.execute(graph_fn, [])
self.assertAllClose(expected_groundtruth_boxes,
tensor_dict_out[flds.groundtruth_boxes])
self.assertAllEqual(expected_groundtruth_classes,
tensor_dict_out[flds.groundtruth_classes])
self.assertAllEqual(expected_context_image_ids,
tensor_dict_out[flds.context_features_image_id_list])
def test_decode_sequence_example_negative_clip(self):
num_frames = 4
image_height = 20
image_width = 30
expected_groundtruth_boxes = -1 * np.ones((4, 0, 4))
expected_groundtruth_classes = -1 * np.ones((4, 0))
flds = fields.InputDataFields
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
def graph_fn():
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=[str(i) for i in range(num_frames)],
bboxes=[
[[]],
[[]],
[[]],
[[]]
],
label_strings=[
[],
[],
[],
[]
]).SerializeToString()
example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
self._create_label_map(label_map_proto_file)
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file)
return decoder.decode(example_string_tensor)
tensor_dict_out = self.execute(graph_fn, [])
self.assertAllClose(expected_groundtruth_boxes,
tensor_dict_out[flds.groundtruth_boxes])
self.assertAllEqual(expected_groundtruth_classes,
tensor_dict_out[flds.groundtruth_classes])
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/data_decoders/tf_sequence_example_decoder_test.py | tf_sequence_example_decoder_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generators.flexible_grid_anchor_generator_test.py."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import flexible_grid_anchor_generator as fg
from object_detection.utils import test_case
class FlexibleGridAnchorGeneratorTest(test_case.TestCase):
def test_construct_single_anchor(self):
def graph_fn():
anchor_strides = [(32, 32),]
anchor_offsets = [(16, 16),]
base_sizes = [(128.0,)]
aspect_ratios = [(1.0,)]
im_height = 64
im_width = 64
feature_map_shape_list = [(2, 2)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
anchor_corners_out = self.execute(graph_fn, [])
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_unit_dimensions(self):
def graph_fn():
anchor_strides = [(32, 32),]
anchor_offsets = [(16, 16),]
base_sizes = [(32.0,)]
aspect_ratios = [(1.0,)]
im_height = 1
im_width = 1
feature_map_shape_list = [(2, 2)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
# Positive offsets are produced.
exp_anchor_corners = [[0, 0, 32, 32],
[0, 32, 32, 64],
[32, 0, 64, 32],
[32, 32, 64, 64]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_normalized_anchors_fails_with_unit_dimensions(self):
anchor_generator = fg.FlexibleGridAnchorGenerator(
[(32.0,)], [(1.0,)], [(32, 32),], [(16, 16),],
normalize_coordinates=True)
with self.assertRaisesRegexp(ValueError, 'Normalized coordinates'):
anchor_generator.generate(
feature_map_shape_list=[(2, 2)], im_height=1, im_width=1)
def test_construct_single_anchor_in_normalized_coordinates(self):
def graph_fn():
anchor_strides = [(32, 32),]
anchor_offsets = [(16, 16),]
base_sizes = [(128.0,)]
aspect_ratios = [(1.0,)]
im_height = 64
im_width = 128
feature_map_shape_list = [(2, 2)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=True)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
exp_anchor_corners = [[-48./64, -48./128, 80./64, 80./128],
[-48./64, -16./128, 80./64, 112./128],
[-16./64, -48./128, 112./64, 80./128],
[-16./64, -16./128, 112./64, 112./128]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_num_anchors_per_location(self):
anchor_strides = [(32, 32), (64, 64)]
anchor_offsets = [(16, 16), (32, 32)]
base_sizes = [(32.0, 64.0, 96.0, 32.0, 64.0, 96.0),
(64.0, 128.0, 172.0, 64.0, 128.0, 172.0)]
aspect_ratios = [(1.0, 1.0, 1.0, 2.0, 2.0, 2.0),
(1.0, 1.0, 1.0, 2.0, 2.0, 2.0)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
self.assertEqual(anchor_generator.num_anchors_per_location(), [6, 6])
def test_construct_single_anchor_dynamic_size(self):
def graph_fn():
anchor_strides = [(32, 32),]
anchor_offsets = [(0, 0),]
base_sizes = [(128.0,)]
aspect_ratios = [(1.0,)]
im_height = tf.constant(64)
im_width = tf.constant(64)
feature_map_shape_list = [(2, 2)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
# Zero offsets are used.
exp_anchor_corners = [[-64, -64, 64, 64],
[-64, -32, 64, 96],
[-32, -64, 96, 64],
[-32, -32, 96, 96]]
anchor_corners_out = self.execute_cpu(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_odd_input_dimension(self):
def graph_fn():
anchor_strides = [(32, 32),]
anchor_offsets = [(0, 0),]
base_sizes = [(128.0,)]
aspect_ratios = [(1.0,)]
im_height = 65
im_width = 65
feature_map_shape_list = [(3, 3)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
anchor_corners_out = self.execute(graph_fn, [])
exp_anchor_corners = [[-64, -64, 64, 64],
[-64, -32, 64, 96],
[-64, 0, 64, 128],
[-32, -64, 96, 64],
[-32, -32, 96, 96],
[-32, 0, 96, 128],
[0, -64, 128, 64],
[0, -32, 128, 96],
[0, 0, 128, 128]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_on_two_feature_maps(self):
def graph_fn():
anchor_strides = [(32, 32), (64, 64)]
anchor_offsets = [(16, 16), (32, 32)]
base_sizes = [(128.0,), (256.0,)]
aspect_ratios = [(1.0,), (1.0,)]
im_height = 64
im_width = 64
feature_map_shape_list = [(2, 2), (1, 1)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0)
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112],
[-96, -96, 160, 160]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_two_scales_per_octave(self):
def graph_fn():
anchor_strides = [(64, 64),]
anchor_offsets = [(32, 32),]
base_sizes = [(256.0, 362.03867)]
aspect_ratios = [(1.0, 1.0)]
im_height = 64
im_width = 64
feature_map_shape_list = [(1, 1)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
# There are 4 set of anchors in this configuration. The order is:
# [[2**0.0 intermediate scale + 1.0 aspect],
# [2**0.5 intermediate scale + 1.0 aspect]]
exp_anchor_corners = [[-96., -96., 160., 160.],
[-149.0193, -149.0193, 213.0193, 213.0193]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_two_scales_per_octave_and_aspect(self):
def graph_fn():
anchor_strides = [(64, 64),]
anchor_offsets = [(32, 32),]
base_sizes = [(256.0, 362.03867, 256.0, 362.03867)]
aspect_ratios = [(1.0, 1.0, 2.0, 2.0)]
im_height = 64
im_width = 64
feature_map_shape_list = [(1, 1)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
# There are 4 set of anchors in this configuration. The order is:
# [[2**0.0 intermediate scale + 1.0 aspect],
# [2**0.5 intermediate scale + 1.0 aspect],
# [2**0.0 intermediate scale + 2.0 aspect],
# [2**0.5 intermediate scale + 2.0 aspect]]
exp_anchor_corners = [[-96., -96., 160., 160.],
[-149.0193, -149.0193, 213.0193, 213.0193],
[-58.50967, -149.0193, 122.50967, 213.0193],
[-96., -224., 160., 288.]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchors_on_feature_maps_with_dynamic_shape(self):
def graph_fn(feature_map1_height, feature_map1_width, feature_map2_height,
feature_map2_width):
anchor_strides = [(32, 32), (64, 64)]
anchor_offsets = [(16, 16), (32, 32)]
base_sizes = [(128.0,), (256.0,)]
aspect_ratios = [(1.0,), (1.0,)]
im_height = 64
im_width = 64
feature_map_shape_list = [(feature_map1_height, feature_map1_width),
(feature_map2_height, feature_map2_width)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
anchor_corners_out = np.concatenate(
self.execute_cpu(graph_fn, [
np.array(2, dtype=np.int32),
np.array(2, dtype=np.int32),
np.array(1, dtype=np.int32),
np.array(1, dtype=np.int32)
]),
axis=0)
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112],
[-96, -96, 160, 160]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/anchor_generators/flexible_grid_anchor_generator_test.py | flexible_grid_anchor_generator_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.grid_anchor_generator."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.utils import test_case
class GridAnchorGeneratorTest(test_case.TestCase):
def test_construct_single_anchor(self):
"""Builds a 1x1 anchor grid to test the size of the output boxes."""
def graph_fn():
scales = [0.5, 1.0, 2.0]
aspect_ratios = [0.25, 1.0, 4.0]
anchor_offset = [7, -3]
anchor_generator = grid_anchor_generator.GridAnchorGenerator(
scales, aspect_ratios, anchor_offset=anchor_offset)
anchors_list = anchor_generator.generate(feature_map_shape_list=[(1, 1)])
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
exp_anchor_corners = [[-121, -35, 135, 29], [-249, -67, 263, 61],
[-505, -131, 519, 125], [-57, -67, 71, 61],
[-121, -131, 135, 125], [-249, -259, 263, 253],
[-25, -131, 39, 125], [-57, -259, 71, 253],
[-121, -515, 135, 509]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid(self):
def graph_fn():
base_anchor_size = [10, 10]
anchor_stride = [19, 19]
anchor_offset = [0, 0]
scales = [0.5, 1.0, 2.0]
aspect_ratios = [1.0]
anchor_generator = grid_anchor_generator.GridAnchorGenerator(
scales,
aspect_ratios,
base_anchor_size=base_anchor_size,
anchor_stride=anchor_stride,
anchor_offset=anchor_offset)
anchors_list = anchor_generator.generate(feature_map_shape_list=[(2, 2)])
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.],
[-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5],
[-5., 14., 5, 24], [-10., 9., 10, 29],
[16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5],
[9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5],
[14., 14., 24, 24], [9., 9., 29, 29]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid_with_dynamic_feature_map_shapes(self):
def graph_fn(feature_map_height, feature_map_width):
base_anchor_size = [10, 10]
anchor_stride = [19, 19]
anchor_offset = [0, 0]
scales = [0.5, 1.0, 2.0]
aspect_ratios = [1.0]
anchor_generator = grid_anchor_generator.GridAnchorGenerator(
scales,
aspect_ratios,
base_anchor_size=base_anchor_size,
anchor_stride=anchor_stride,
anchor_offset=anchor_offset)
anchors_list = anchor_generator.generate(
feature_map_shape_list=[(feature_map_height, feature_map_width)])
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.],
[-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5],
[-5., 14., 5, 24], [-10., 9., 10, 29],
[16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5],
[9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5],
[14., 14., 24, 24], [9., 9., 29, 29]]
anchor_corners_out = self.execute_cpu(graph_fn,
[np.array(2, dtype=np.int32),
np.array(2, dtype=np.int32)])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/anchor_generators/grid_anchor_generator_test.py | grid_anchor_generator_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates grid anchors on the fly corresponding to multiple CNN layers."""
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.core import anchor_generator
from object_detection.core import box_list_ops
class FlexibleGridAnchorGenerator(anchor_generator.AnchorGenerator):
"""Generate a grid of anchors for multiple CNN layers of different scale."""
def __init__(self, base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=True):
"""Constructs a FlexibleGridAnchorGenerator.
This generator is more flexible than the multiple_grid_anchor_generator
and multiscale_grid_anchor_generator, and can generate any of the anchors
that they can generate, plus additional anchor configurations. In
particular, it allows the explicit specification of scale and aspect ratios
at each layer without making any assumptions between the relationship
between scales and aspect ratios between layers.
Args:
base_sizes: list of tuples of anchor base sizes. For example, setting
base_sizes=[(1, 2, 3), (4, 5)] means that we want 3 anchors at each
grid point on the first layer with the base sizes of 1, 2, and 3, and 2
anchors at each grid point on the second layer with the base sizes of
4 and 5.
aspect_ratios: list or tuple of aspect ratios. For example, setting
aspect_ratios=[(1.0, 2.0, 0.5), (1.0, 2.0)] means that we want 3 anchors
at each grid point on the first layer with aspect ratios of 1.0, 2.0,
and 0.5, and 2 anchors at each grid point on the sercond layer with the
base sizes of 1.0 and 2.0.
anchor_strides: list of pairs of strides in pixels (in y and x directions
respectively). For example, setting anchor_strides=[(25, 25), (50, 50)]
means that we want the anchors corresponding to the first layer to be
strided by 25 pixels and those in the second layer to be strided by 50
pixels in both y and x directions.
anchor_offsets: list of pairs of offsets in pixels (in y and x directions
respectively). The offset specifies where we want the center of the
(0, 0)-th anchor to lie for each layer. For example, setting
anchor_offsets=[(10, 10), (20, 20)]) means that we want the
(0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space
and likewise that we want the (0, 0)-th anchor of the second layer to
lie at (25, 25) in pixel space.
normalize_coordinates: whether to produce anchors in normalized
coordinates. (defaults to True).
"""
self._base_sizes = base_sizes
self._aspect_ratios = aspect_ratios
self._anchor_strides = anchor_strides
self._anchor_offsets = anchor_offsets
self._normalize_coordinates = normalize_coordinates
def name_scope(self):
return 'FlexibleGridAnchorGenerator'
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the Generate function.
"""
return [len(size) for size in self._base_sizes]
def _generate(self, feature_map_shape_list, im_height=1, im_width=1):
"""Generates a collection of bounding boxes to be used as anchors.
Currently we require the input image shape to be statically defined. That
is, im_height and im_width should be integers rather than tensors.
Args:
feature_map_shape_list: list of pairs of convnet layer resolutions in the
format [(height_0, width_0), (height_1, width_1), ...]. For example,
setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that
correspond to an 8x8 layer followed by a 7x7 layer.
im_height: the height of the image to generate the grid for. If both
im_height and im_width are 1, anchors can only be generated in
absolute coordinates.
im_width: the width of the image to generate the grid for. If both
im_height and im_width are 1, anchors can only be generated in
absolute coordinates.
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if im_height and im_width are 1, but normalized coordinates
were requested.
"""
anchor_grid_list = []
for (feat_shape, base_sizes, aspect_ratios, anchor_stride, anchor_offset
) in zip(feature_map_shape_list, self._base_sizes, self._aspect_ratios,
self._anchor_strides, self._anchor_offsets):
anchor_grid = grid_anchor_generator.tile_anchors(
feat_shape[0],
feat_shape[1],
tf.cast(tf.convert_to_tensor(base_sizes), dtype=tf.float32),
tf.cast(tf.convert_to_tensor(aspect_ratios), dtype=tf.float32),
tf.constant([1.0, 1.0]),
tf.cast(tf.convert_to_tensor(anchor_stride), dtype=tf.float32),
tf.cast(tf.convert_to_tensor(anchor_offset), dtype=tf.float32))
num_anchors = anchor_grid.num_boxes_static()
if num_anchors is None:
num_anchors = anchor_grid.num_boxes()
anchor_indices = tf.zeros([num_anchors])
anchor_grid.add_field('feature_map_index', anchor_indices)
if self._normalize_coordinates:
if im_height == 1 or im_width == 1:
raise ValueError(
'Normalized coordinates were requested upon construction of the '
'FlexibleGridAnchorGenerator, but a subsequent call to '
'generate did not supply dimension information.')
anchor_grid = box_list_ops.to_normalized_coordinates(
anchor_grid, im_height, im_width, check_range=False)
anchor_grid_list.append(anchor_grid)
return anchor_grid_list
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/anchor_generators/flexible_grid_anchor_generator.py | flexible_grid_anchor_generator.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates grid anchors on the fly as used in Faster RCNN.
Generates grid anchors on the fly as described in:
"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks"
Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun.
"""
import tensorflow.compat.v1 as tf
from object_detection.core import anchor_generator
from object_detection.core import box_list
from object_detection.utils import ops
class GridAnchorGenerator(anchor_generator.AnchorGenerator):
"""Generates a grid of anchors at given scales and aspect ratios."""
def __init__(self,
scales=(0.5, 1.0, 2.0),
aspect_ratios=(0.5, 1.0, 2.0),
base_anchor_size=None,
anchor_stride=None,
anchor_offset=None):
"""Constructs a GridAnchorGenerator.
Args:
scales: a list of (float) scales, default=(0.5, 1.0, 2.0)
aspect_ratios: a list of (float) aspect ratios, default=(0.5, 1.0, 2.0)
base_anchor_size: base anchor size as height, width (
(length-2 float32 list or tensor, default=[256, 256])
anchor_stride: difference in centers between base anchors for adjacent
grid positions (length-2 float32 list or tensor,
default=[16, 16])
anchor_offset: center of the anchor with scale and aspect ratio 1 for the
upper left element of the grid, this should be zero for
feature networks with only VALID padding and even receptive
field size, but may need additional calculation if other
padding is used (length-2 float32 list or tensor,
default=[0, 0])
"""
# Handle argument defaults
if base_anchor_size is None:
base_anchor_size = [256, 256]
if anchor_stride is None:
anchor_stride = [16, 16]
if anchor_offset is None:
anchor_offset = [0, 0]
self._scales = scales
self._aspect_ratios = aspect_ratios
self._base_anchor_size = base_anchor_size
self._anchor_stride = anchor_stride
self._anchor_offset = anchor_offset
def name_scope(self):
return 'GridAnchorGenerator'
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the `generate` function.
"""
return [len(self._scales) * len(self._aspect_ratios)]
def _generate(self, feature_map_shape_list):
"""Generates a collection of bounding boxes to be used as anchors.
Args:
feature_map_shape_list: list of pairs of convnet layer resolutions in the
format [(height_0, width_0)]. For example, setting
feature_map_shape_list=[(8, 8)] asks for anchors that correspond
to an 8x8 layer. For this anchor generator, only lists of length 1 are
allowed.
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if feature_map_shape_list, box_specs_list do not have the same
length.
ValueError: if feature_map_shape_list does not consist of pairs of
integers
"""
if not (isinstance(feature_map_shape_list, list)
and len(feature_map_shape_list) == 1):
raise ValueError('feature_map_shape_list must be a list of length 1.')
if not all([isinstance(list_item, tuple) and len(list_item) == 2
for list_item in feature_map_shape_list]):
raise ValueError('feature_map_shape_list must be a list of pairs.')
# Create constants in init_scope so they can be created in tf.functions
# and accessed from outside of the function.
with tf.init_scope():
self._base_anchor_size = tf.cast(tf.convert_to_tensor(
self._base_anchor_size), dtype=tf.float32)
self._anchor_stride = tf.cast(tf.convert_to_tensor(
self._anchor_stride), dtype=tf.float32)
self._anchor_offset = tf.cast(tf.convert_to_tensor(
self._anchor_offset), dtype=tf.float32)
grid_height, grid_width = feature_map_shape_list[0]
scales_grid, aspect_ratios_grid = ops.meshgrid(self._scales,
self._aspect_ratios)
scales_grid = tf.reshape(scales_grid, [-1])
aspect_ratios_grid = tf.reshape(aspect_ratios_grid, [-1])
anchors = tile_anchors(grid_height,
grid_width,
scales_grid,
aspect_ratios_grid,
self._base_anchor_size,
self._anchor_stride,
self._anchor_offset)
num_anchors = anchors.num_boxes_static()
if num_anchors is None:
num_anchors = anchors.num_boxes()
anchor_indices = tf.zeros([num_anchors])
anchors.add_field('feature_map_index', anchor_indices)
return [anchors]
def tile_anchors(grid_height,
grid_width,
scales,
aspect_ratios,
base_anchor_size,
anchor_stride,
anchor_offset):
"""Create a tiled set of anchors strided along a grid in image space.
This op creates a set of anchor boxes by placing a "basis" collection of
boxes with user-specified scales and aspect ratios centered at evenly
distributed points along a grid. The basis collection is specified via the
scale and aspect_ratios arguments. For example, setting scales=[.1, .2, .2]
and aspect ratios = [2,2,1/2] means that we create three boxes: one with scale
.1, aspect ratio 2, one with scale .2, aspect ratio 2, and one with scale .2
and aspect ratio 1/2. Each box is multiplied by "base_anchor_size" before
placing it over its respective center.
Grid points are specified via grid_height, grid_width parameters as well as
the anchor_stride and anchor_offset parameters.
Args:
grid_height: size of the grid in the y direction (int or int scalar tensor)
grid_width: size of the grid in the x direction (int or int scalar tensor)
scales: a 1-d (float) tensor representing the scale of each box in the
basis set.
aspect_ratios: a 1-d (float) tensor representing the aspect ratio of each
box in the basis set. The length of the scales and aspect_ratios tensors
must be equal.
base_anchor_size: base anchor size as [height, width]
(float tensor of shape [2])
anchor_stride: difference in centers between base anchors for adjacent grid
positions (float tensor of shape [2])
anchor_offset: center of the anchor with scale and aspect ratio 1 for the
upper left element of the grid, this should be zero for
feature networks with only VALID padding and even receptive
field size, but may need some additional calculation if other
padding is used (float tensor of shape [2])
Returns:
a BoxList holding a collection of N anchor boxes
"""
ratio_sqrts = tf.sqrt(aspect_ratios)
heights = scales / ratio_sqrts * base_anchor_size[0]
widths = scales * ratio_sqrts * base_anchor_size[1]
# Get a grid of box centers
y_centers = tf.cast(tf.range(grid_height), dtype=tf.float32)
y_centers = y_centers * anchor_stride[0] + anchor_offset[0]
x_centers = tf.cast(tf.range(grid_width), dtype=tf.float32)
x_centers = x_centers * anchor_stride[1] + anchor_offset[1]
x_centers, y_centers = ops.meshgrid(x_centers, y_centers)
widths_grid, x_centers_grid = ops.meshgrid(widths, x_centers)
heights_grid, y_centers_grid = ops.meshgrid(heights, y_centers)
bbox_centers = tf.stack([y_centers_grid, x_centers_grid], axis=3)
bbox_sizes = tf.stack([heights_grid, widths_grid], axis=3)
bbox_centers = tf.reshape(bbox_centers, [-1, 2])
bbox_sizes = tf.reshape(bbox_sizes, [-1, 2])
bbox_corners = _center_size_bbox_to_corners_bbox(bbox_centers, bbox_sizes)
return box_list.BoxList(bbox_corners)
def _center_size_bbox_to_corners_bbox(centers, sizes):
"""Converts bbox center-size representation to corners representation.
Args:
centers: a tensor with shape [N, 2] representing bounding box centers
sizes: a tensor with shape [N, 2] representing bounding boxes
Returns:
corners: tensor with shape [N, 4] representing bounding boxes in corners
representation
"""
return tf.concat([centers - .5 * sizes, centers + .5 * sizes], 1)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/anchor_generators/grid_anchor_generator.py | grid_anchor_generator.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates grid anchors on the fly corresponding to multiple CNN layers.
Generates grid anchors on the fly corresponding to multiple CNN layers as
described in:
"Focal Loss for Dense Object Detection" (https://arxiv.org/abs/1708.02002)
T.-Y. Lin, P. Goyal, R. Girshick, K. He, P. Dollar
"""
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.core import anchor_generator
from object_detection.core import box_list_ops
class MultiscaleGridAnchorGenerator(anchor_generator.AnchorGenerator):
"""Generate a grid of anchors for multiple CNN layers of different scale."""
def __init__(self, min_level, max_level, anchor_scale, aspect_ratios,
scales_per_octave, normalize_coordinates=True):
"""Constructs a MultiscaleGridAnchorGenerator.
To construct anchors, at multiple scale resolutions, one must provide a
the minimum level and maximum levels on a scale pyramid. To define the size
of anchor, the anchor scale is provided to decide the size relatively to the
stride of the corresponding feature map. The generator allows one pixel
location on feature map maps to multiple anchors, that have different aspect
ratios and intermediate scales.
Args:
min_level: minimum level in feature pyramid.
max_level: maximum level in feature pyramid.
anchor_scale: anchor scale and feature stride define the size of the base
anchor on an image. For example, given a feature pyramid with strides
[2^3, ..., 2^7] and anchor scale 4. The base anchor size is
4 * [2^3, ..., 2^7].
aspect_ratios: list or tuple of (float) aspect ratios to place on each
grid point.
scales_per_octave: integer number of intermediate scales per scale octave.
normalize_coordinates: whether to produce anchors in normalized
coordinates. (defaults to True).
"""
self._anchor_grid_info = []
self._aspect_ratios = aspect_ratios
self._scales_per_octave = scales_per_octave
self._normalize_coordinates = normalize_coordinates
scales = [2**(float(scale) / scales_per_octave)
for scale in range(scales_per_octave)]
aspects = list(aspect_ratios)
for level in range(min_level, max_level + 1):
anchor_stride = [2**level, 2**level]
base_anchor_size = [2**level * anchor_scale, 2**level * anchor_scale]
self._anchor_grid_info.append({
'level': level,
'info': [scales, aspects, base_anchor_size, anchor_stride]
})
def name_scope(self):
return 'MultiscaleGridAnchorGenerator'
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the Generate function.
"""
return len(self._anchor_grid_info) * [
len(self._aspect_ratios) * self._scales_per_octave]
def _generate(self, feature_map_shape_list, im_height=1, im_width=1):
"""Generates a collection of bounding boxes to be used as anchors.
For training, we require the input image shape to be statically defined.
That is, im_height and im_width should be integers rather than tensors.
For inference, im_height and im_width can be either integers (for fixed
image size), or tensors (for arbitrary image size).
Args:
feature_map_shape_list: list of pairs of convnet layer resolutions in the
format [(height_0, width_0), (height_1, width_1), ...]. For example,
setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that
correspond to an 8x8 layer followed by a 7x7 layer.
im_height: the height of the image to generate the grid for. If both
im_height and im_width are 1, anchors can only be generated in
absolute coordinates.
im_width: the width of the image to generate the grid for. If both
im_height and im_width are 1, anchors can only be generated in
absolute coordinates.
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if im_height and im_width are not integers.
ValueError: if im_height and im_width are 1, but normalized coordinates
were requested.
"""
anchor_grid_list = []
for feat_shape, grid_info in zip(feature_map_shape_list,
self._anchor_grid_info):
# TODO(rathodv) check the feature_map_shape_list is consistent with
# self._anchor_grid_info
level = grid_info['level']
stride = 2**level
scales, aspect_ratios, base_anchor_size, anchor_stride = grid_info['info']
feat_h = feat_shape[0]
feat_w = feat_shape[1]
anchor_offset = [0, 0]
if isinstance(im_height, int) and isinstance(im_width, int):
if im_height % 2.0**level == 0 or im_height == 1:
anchor_offset[0] = stride / 2.0
if im_width % 2.0**level == 0 or im_width == 1:
anchor_offset[1] = stride / 2.0
if tf.is_tensor(im_height) and tf.is_tensor(im_width):
anchor_offset[0] = stride / 2.0
anchor_offset[1] = stride / 2.0
ag = grid_anchor_generator.GridAnchorGenerator(
scales,
aspect_ratios,
base_anchor_size=base_anchor_size,
anchor_stride=anchor_stride,
anchor_offset=anchor_offset)
(anchor_grid,) = ag.generate(feature_map_shape_list=[(feat_h, feat_w)])
if self._normalize_coordinates:
if im_height == 1 or im_width == 1:
raise ValueError(
'Normalized coordinates were requested upon construction of the '
'MultiscaleGridAnchorGenerator, but a subsequent call to '
'generate did not supply dimension information.')
anchor_grid = box_list_ops.to_normalized_coordinates(
anchor_grid, im_height, im_width, check_range=False)
anchor_grid_list.append(anchor_grid)
return anchor_grid_list
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/anchor_generators/multiscale_grid_anchor_generator.py | multiscale_grid_anchor_generator.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generators.multiple_grid_anchor_generator_test.py."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import multiple_grid_anchor_generator as ag
from object_detection.utils import test_case
class MultipleGridAnchorGeneratorTest(test_case.TestCase):
def test_construct_single_anchor_grid(self):
"""Builds a 1x1 anchor grid to test the size of the output boxes."""
def graph_fn():
box_specs_list = [[(.5, .25), (1.0, .25), (2.0, .25),
(.5, 1.0), (1.0, 1.0), (2.0, 1.0),
(.5, 4.0), (1.0, 4.0), (2.0, 4.0)]]
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([256, 256], dtype=tf.float32),
anchor_strides=[(16, 16)],
anchor_offsets=[(7, -3)])
anchors_list = anchor_generator.generate(feature_map_shape_list=[(1, 1)])
return anchors_list[0].get()
exp_anchor_corners = [[-121, -35, 135, 29], [-249, -67, 263, 61],
[-505, -131, 519, 125], [-57, -67, 71, 61],
[-121, -131, 135, 125], [-249, -259, 263, 253],
[-25, -131, 39, 125], [-57, -259, 71, 253],
[-121, -515, 135, 509]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid(self):
def graph_fn():
box_specs_list = [[(0.5, 1.0), (1.0, 1.0), (2.0, 1.0)]]
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([10, 10], dtype=tf.float32),
anchor_strides=[(19, 19)],
anchor_offsets=[(0, 0)])
anchors_list = anchor_generator.generate(feature_map_shape_list=[(2, 2)])
return anchors_list[0].get()
exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.],
[-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5],
[-5., 14., 5, 24], [-10., 9., 10, 29],
[16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5],
[9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5],
[14., 14., 24, 24], [9., 9., 29, 29]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid_non_square(self):
def graph_fn():
box_specs_list = [[(1.0, 1.0)]]
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list, base_anchor_size=tf.constant([1, 1],
dtype=tf.float32))
anchors_list = anchor_generator.generate(feature_map_shape_list=[(
tf.constant(1, dtype=tf.int32), tf.constant(2, dtype=tf.int32))])
return anchors_list[0].get()
exp_anchor_corners = [[0., -0.25, 1., 0.75], [0., 0.25, 1., 1.25]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_dynamic_size_anchor_grid(self):
def graph_fn(height, width):
box_specs_list = [[(1.0, 1.0)]]
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list, base_anchor_size=tf.constant([1, 1],
dtype=tf.float32))
anchors_list = anchor_generator.generate(feature_map_shape_list=[(height,
width)])
return anchors_list[0].get()
exp_anchor_corners = [[0., -0.25, 1., 0.75], [0., 0.25, 1., 1.25]]
anchor_corners_out = self.execute_cpu(graph_fn,
[np.array(1, dtype=np.int32),
np.array(2, dtype=np.int32)])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid_normalized(self):
def graph_fn():
box_specs_list = [[(1.0, 1.0)]]
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list, base_anchor_size=tf.constant([1, 1],
dtype=tf.float32))
anchors_list = anchor_generator.generate(
feature_map_shape_list=[(tf.constant(1, dtype=tf.int32), tf.constant(
2, dtype=tf.int32))],
im_height=320,
im_width=640)
return anchors_list[0].get()
exp_anchor_corners = [[0., 0., 1., 0.5], [0., 0.5, 1., 1.]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_multiple_grids(self):
def graph_fn():
box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)],
[(1.0, 1.0), (1.0, 0.5)]]
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.25, .25), (.5, .5)],
anchor_offsets=[(.125, .125), (.25, .25)])
anchors_list = anchor_generator.generate(feature_map_shape_list=[(4, 4), (
2, 2)])
return [anchors.get() for anchors in anchors_list]
# height and width of box with .5 aspect ratio
h = np.sqrt(2)
w = 1.0/np.sqrt(2)
exp_small_grid_corners = [[-.25, -.25, .75, .75],
[.25-.5*h, .25-.5*w, .25+.5*h, .25+.5*w],
[-.25, .25, .75, 1.25],
[.25-.5*h, .75-.5*w, .25+.5*h, .75+.5*w],
[.25, -.25, 1.25, .75],
[.75-.5*h, .25-.5*w, .75+.5*h, .25+.5*w],
[.25, .25, 1.25, 1.25],
[.75-.5*h, .75-.5*w, .75+.5*h, .75+.5*w]]
# only test first entry of larger set of anchors
exp_big_grid_corners = [[.125-.5, .125-.5, .125+.5, .125+.5],
[.125-1.0, .125-1.0, .125+1.0, .125+1.0],
[.125-.5*h, .125-.5*w, .125+.5*h, .125+.5*w],]
anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0)
self.assertEquals(anchor_corners_out.shape, (56, 4))
big_grid_corners = anchor_corners_out[0:3, :]
small_grid_corners = anchor_corners_out[48:, :]
self.assertAllClose(small_grid_corners, exp_small_grid_corners)
self.assertAllClose(big_grid_corners, exp_big_grid_corners)
def test_construct_multiple_grids_with_clipping(self):
def graph_fn():
box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)],
[(1.0, 1.0), (1.0, 0.5)]]
clip_window = tf.constant([0, 0, 1, 1], dtype=tf.float32)
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
clip_window=clip_window)
anchors_list = anchor_generator.generate(feature_map_shape_list=[(4, 4), (
2, 2)])
return [anchors.get() for anchors in anchors_list]
# height and width of box with .5 aspect ratio
h = np.sqrt(2)
w = 1.0/np.sqrt(2)
exp_small_grid_corners = [[0, 0, .75, .75],
[0, 0, .25+.5*h, .25+.5*w],
[0, .25, .75, 1],
[0, .75-.5*w, .25+.5*h, 1],
[.25, 0, 1, .75],
[.75-.5*h, 0, 1, .25+.5*w],
[.25, .25, 1, 1],
[.75-.5*h, .75-.5*w, 1, 1]]
anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0)
small_grid_corners = anchor_corners_out[48:, :]
self.assertAllClose(small_grid_corners, exp_small_grid_corners)
def test_invalid_box_specs(self):
# not all box specs are pairs
box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)],
[(1.0, 1.0), (1.0, 0.5, .3)]]
with self.assertRaises(ValueError):
ag.MultipleGridAnchorGenerator(box_specs_list)
# box_specs_list is not a list of lists
box_specs_list = [(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)]
with self.assertRaises(ValueError):
ag.MultipleGridAnchorGenerator(box_specs_list)
def test_invalid_generate_arguments(self):
box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)],
[(1.0, 1.0), (1.0, 0.5)]]
# incompatible lengths with box_specs_list
with self.assertRaises(ValueError):
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.25, .25)],
anchor_offsets=[(.125, .125), (.25, .25)])
anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)])
with self.assertRaises(ValueError):
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.25, .25), (.5, .5)],
anchor_offsets=[(.125, .125), (.25, .25)])
anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2), (1, 1)])
with self.assertRaises(ValueError):
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.5, .5)],
anchor_offsets=[(.25, .25)])
anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)])
# not pairs
with self.assertRaises(ValueError):
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.25, .25), (.5, .5)],
anchor_offsets=[(.125, .125), (.25, .25)])
anchor_generator.generate(feature_map_shape_list=[(4, 4, 4), (2, 2)])
with self.assertRaises(ValueError):
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.25, .25, .1), (.5, .5)],
anchor_offsets=[(.125, .125), (.25, .25)])
anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)])
with self.assertRaises(ValueError):
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.25, .25), (.5, .5)],
anchor_offsets=[(.125, .125), (.25, .25)])
anchor_generator.generate(feature_map_shape_list=[(4), (2, 2)])
class CreateSSDAnchorsTest(test_case.TestCase):
def test_create_ssd_anchors_returns_correct_shape(self):
def graph_fn1():
anchor_generator = ag.create_ssd_anchors(
num_layers=6,
min_scale=0.2,
max_scale=0.95,
aspect_ratios=(1.0, 2.0, 3.0, 1.0 / 2, 1.0 / 3),
reduce_boxes_in_lowest_layer=True)
feature_map_shape_list = [(38, 38), (19, 19), (10, 10),
(5, 5), (3, 3), (1, 1)]
anchors_list = anchor_generator.generate(
feature_map_shape_list=feature_map_shape_list)
return [anchors.get() for anchors in anchors_list]
anchor_corners_out = np.concatenate(self.execute(graph_fn1, []), axis=0)
self.assertEquals(anchor_corners_out.shape, (7308, 4))
def graph_fn2():
anchor_generator = ag.create_ssd_anchors(
num_layers=6, min_scale=0.2, max_scale=0.95,
aspect_ratios=(1.0, 2.0, 3.0, 1.0/2, 1.0/3),
reduce_boxes_in_lowest_layer=False)
feature_map_shape_list = [(38, 38), (19, 19), (10, 10),
(5, 5), (3, 3), (1, 1)]
anchors_list = anchor_generator.generate(
feature_map_shape_list=feature_map_shape_list)
return [anchors.get() for anchors in anchors_list]
anchor_corners_out = np.concatenate(self.execute(graph_fn2, []), axis=0)
self.assertEquals(anchor_corners_out.shape, (11640, 4))
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/anchor_generators/multiple_grid_anchor_generator_test.py | multiple_grid_anchor_generator_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates grid anchors on the fly corresponding to multiple CNN layers.
Generates grid anchors on the fly corresponding to multiple CNN layers as
described in:
"SSD: Single Shot MultiBox Detector"
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
Cheng-Yang Fu, Alexander C. Berg
(see Section 2.2: Choosing scales and aspect ratios for default boxes)
"""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.core import anchor_generator
from object_detection.core import box_list_ops
class MultipleGridAnchorGenerator(anchor_generator.AnchorGenerator):
"""Generate a grid of anchors for multiple CNN layers."""
def __init__(self,
box_specs_list,
base_anchor_size=None,
anchor_strides=None,
anchor_offsets=None,
clip_window=None):
"""Constructs a MultipleGridAnchorGenerator.
To construct anchors, at multiple grid resolutions, one must provide a
list of feature_map_shape_list (e.g., [(8, 8), (4, 4)]), and for each grid
size, a corresponding list of (scale, aspect ratio) box specifications.
For example:
box_specs_list = [[(.1, 1.0), (.1, 2.0)], # for 8x8 grid
[(.2, 1.0), (.3, 1.0), (.2, 2.0)]] # for 4x4 grid
To support the fully convolutional setting, we pass grid sizes in at
generation time, while scale and aspect ratios are fixed at construction
time.
Args:
box_specs_list: list of list of (scale, aspect ratio) pairs with the
outside list having the same number of entries as feature_map_shape_list
(which is passed in at generation time).
base_anchor_size: base anchor size as [height, width]
(length-2 float numpy or Tensor, default=[1.0, 1.0]).
The height and width values are normalized to the
minimum dimension of the input height and width, so that
when the base anchor height equals the base anchor
width, the resulting anchor is square even if the input
image is not square.
anchor_strides: list of pairs of strides in pixels (in y and x directions
respectively). For example, setting anchor_strides=[(25, 25), (50, 50)]
means that we want the anchors corresponding to the first layer to be
strided by 25 pixels and those in the second layer to be strided by 50
pixels in both y and x directions. If anchor_strides=None, they are set
to be the reciprocal of the corresponding feature map shapes.
anchor_offsets: list of pairs of offsets in pixels (in y and x directions
respectively). The offset specifies where we want the center of the
(0, 0)-th anchor to lie for each layer. For example, setting
anchor_offsets=[(10, 10), (20, 20)]) means that we want the
(0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space
and likewise that we want the (0, 0)-th anchor of the second layer to
lie at (25, 25) in pixel space. If anchor_offsets=None, then they are
set to be half of the corresponding anchor stride.
clip_window: a tensor of shape [4] specifying a window to which all
anchors should be clipped. If clip_window is None, then no clipping
is performed.
Raises:
ValueError: if box_specs_list is not a list of list of pairs
ValueError: if clip_window is not either None or a tensor of shape [4]
"""
if isinstance(box_specs_list, list) and all(
[isinstance(list_item, list) for list_item in box_specs_list]):
self._box_specs = box_specs_list
else:
raise ValueError('box_specs_list is expected to be a '
'list of lists of pairs')
if base_anchor_size is None:
base_anchor_size = [256, 256]
self._base_anchor_size = base_anchor_size
self._anchor_strides = anchor_strides
self._anchor_offsets = anchor_offsets
if clip_window is not None and clip_window.get_shape().as_list() != [4]:
raise ValueError('clip_window must either be None or a shape [4] tensor')
self._clip_window = clip_window
self._scales = []
self._aspect_ratios = []
for box_spec in self._box_specs:
if not all([isinstance(entry, tuple) and len(entry) == 2
for entry in box_spec]):
raise ValueError('box_specs_list is expected to be a '
'list of lists of pairs')
scales, aspect_ratios = zip(*box_spec)
self._scales.append(scales)
self._aspect_ratios.append(aspect_ratios)
for arg, arg_name in zip([self._anchor_strides, self._anchor_offsets],
['anchor_strides', 'anchor_offsets']):
if arg and not (isinstance(arg, list) and
len(arg) == len(self._box_specs)):
raise ValueError('%s must be a list with the same length '
'as self._box_specs' % arg_name)
if arg and not all([
isinstance(list_item, tuple) and len(list_item) == 2
for list_item in arg
]):
raise ValueError('%s must be a list of pairs.' % arg_name)
def name_scope(self):
return 'MultipleGridAnchorGenerator'
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the Generate function.
"""
return [len(box_specs) for box_specs in self._box_specs]
def _generate(self, feature_map_shape_list, im_height=1, im_width=1):
"""Generates a collection of bounding boxes to be used as anchors.
The number of anchors generated for a single grid with shape MxM where we
place k boxes over each grid center is k*M^2 and thus the total number of
anchors is the sum over all grids. In our box_specs_list example
(see the constructor docstring), we would place two boxes over each grid
point on an 8x8 grid and three boxes over each grid point on a 4x4 grid and
thus end up with 2*8^2 + 3*4^2 = 176 anchors in total. The layout of the
output anchors follows the order of how the grid sizes and box_specs are
specified (with box_spec index varying the fastest, followed by width
index, then height index, then grid index).
Args:
feature_map_shape_list: list of pairs of convnet layer resolutions in the
format [(height_0, width_0), (height_1, width_1), ...]. For example,
setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that
correspond to an 8x8 layer followed by a 7x7 layer.
im_height: the height of the image to generate the grid for. If both
im_height and im_width are 1, the generated anchors default to
absolute coordinates, otherwise normalized coordinates are produced.
im_width: the width of the image to generate the grid for. If both
im_height and im_width are 1, the generated anchors default to
absolute coordinates, otherwise normalized coordinates are produced.
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if feature_map_shape_list, box_specs_list do not have the same
length.
ValueError: if feature_map_shape_list does not consist of pairs of
integers
"""
if not (isinstance(feature_map_shape_list, list)
and len(feature_map_shape_list) == len(self._box_specs)):
raise ValueError('feature_map_shape_list must be a list with the same '
'length as self._box_specs')
if not all([isinstance(list_item, tuple) and len(list_item) == 2
for list_item in feature_map_shape_list]):
raise ValueError('feature_map_shape_list must be a list of pairs.')
im_height = tf.cast(im_height, dtype=tf.float32)
im_width = tf.cast(im_width, dtype=tf.float32)
if not self._anchor_strides:
anchor_strides = [(1.0 / tf.cast(pair[0], dtype=tf.float32),
1.0 / tf.cast(pair[1], dtype=tf.float32))
for pair in feature_map_shape_list]
else:
anchor_strides = [(tf.cast(stride[0], dtype=tf.float32) / im_height,
tf.cast(stride[1], dtype=tf.float32) / im_width)
for stride in self._anchor_strides]
if not self._anchor_offsets:
anchor_offsets = [(0.5 * stride[0], 0.5 * stride[1])
for stride in anchor_strides]
else:
anchor_offsets = [(tf.cast(offset[0], dtype=tf.float32) / im_height,
tf.cast(offset[1], dtype=tf.float32) / im_width)
for offset in self._anchor_offsets]
for arg, arg_name in zip([anchor_strides, anchor_offsets],
['anchor_strides', 'anchor_offsets']):
if not (isinstance(arg, list) and len(arg) == len(self._box_specs)):
raise ValueError('%s must be a list with the same length '
'as self._box_specs' % arg_name)
if not all([isinstance(list_item, tuple) and len(list_item) == 2
for list_item in arg]):
raise ValueError('%s must be a list of pairs.' % arg_name)
anchor_grid_list = []
min_im_shape = tf.minimum(im_height, im_width)
scale_height = min_im_shape / im_height
scale_width = min_im_shape / im_width
if not tf.is_tensor(self._base_anchor_size):
base_anchor_size = [
scale_height * tf.constant(self._base_anchor_size[0],
dtype=tf.float32),
scale_width * tf.constant(self._base_anchor_size[1],
dtype=tf.float32)
]
else:
base_anchor_size = [
scale_height * self._base_anchor_size[0],
scale_width * self._base_anchor_size[1]
]
for feature_map_index, (grid_size, scales, aspect_ratios, stride,
offset) in enumerate(
zip(feature_map_shape_list, self._scales,
self._aspect_ratios, anchor_strides,
anchor_offsets)):
tiled_anchors = grid_anchor_generator.tile_anchors(
grid_height=grid_size[0],
grid_width=grid_size[1],
scales=scales,
aspect_ratios=aspect_ratios,
base_anchor_size=base_anchor_size,
anchor_stride=stride,
anchor_offset=offset)
if self._clip_window is not None:
tiled_anchors = box_list_ops.clip_to_window(
tiled_anchors, self._clip_window, filter_nonoverlapping=False)
num_anchors_in_layer = tiled_anchors.num_boxes_static()
if num_anchors_in_layer is None:
num_anchors_in_layer = tiled_anchors.num_boxes()
anchor_indices = feature_map_index * tf.ones([num_anchors_in_layer])
tiled_anchors.add_field('feature_map_index', anchor_indices)
anchor_grid_list.append(tiled_anchors)
return anchor_grid_list
def create_ssd_anchors(num_layers=6,
min_scale=0.2,
max_scale=0.95,
scales=None,
aspect_ratios=(1.0, 2.0, 3.0, 1.0 / 2, 1.0 / 3),
interpolated_scale_aspect_ratio=1.0,
base_anchor_size=None,
anchor_strides=None,
anchor_offsets=None,
reduce_boxes_in_lowest_layer=True):
"""Creates MultipleGridAnchorGenerator for SSD anchors.
This function instantiates a MultipleGridAnchorGenerator that reproduces
``default box`` construction proposed by Liu et al in the SSD paper.
See Section 2.2 for details. Grid sizes are assumed to be passed in
at generation time from finest resolution to coarsest resolution --- this is
used to (linearly) interpolate scales of anchor boxes corresponding to the
intermediate grid sizes.
Anchors that are returned by calling the `generate` method on the returned
MultipleGridAnchorGenerator object are always in normalized coordinates
and clipped to the unit square: (i.e. all coordinates lie in [0, 1]x[0, 1]).
Args:
num_layers: integer number of grid layers to create anchors for (actual
grid sizes passed in at generation time)
min_scale: scale of anchors corresponding to finest resolution (float)
max_scale: scale of anchors corresponding to coarsest resolution (float)
scales: As list of anchor scales to use. When not None and not empty,
min_scale and max_scale are not used.
aspect_ratios: list or tuple of (float) aspect ratios to place on each
grid point.
interpolated_scale_aspect_ratio: An additional anchor is added with this
aspect ratio and a scale interpolated between the scale for a layer
and the scale for the next layer (1.0 for the last layer).
This anchor is not included if this value is 0.
base_anchor_size: base anchor size as [height, width].
The height and width values are normalized to the minimum dimension of the
input height and width, so that when the base anchor height equals the
base anchor width, the resulting anchor is square even if the input image
is not square.
anchor_strides: list of pairs of strides in pixels (in y and x directions
respectively). For example, setting anchor_strides=[(25, 25), (50, 50)]
means that we want the anchors corresponding to the first layer to be
strided by 25 pixels and those in the second layer to be strided by 50
pixels in both y and x directions. If anchor_strides=None, they are set to
be the reciprocal of the corresponding feature map shapes.
anchor_offsets: list of pairs of offsets in pixels (in y and x directions
respectively). The offset specifies where we want the center of the
(0, 0)-th anchor to lie for each layer. For example, setting
anchor_offsets=[(10, 10), (20, 20)]) means that we want the
(0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space
and likewise that we want the (0, 0)-th anchor of the second layer to lie
at (25, 25) in pixel space. If anchor_offsets=None, then they are set to
be half of the corresponding anchor stride.
reduce_boxes_in_lowest_layer: a boolean to indicate whether the fixed 3
boxes per location is used in the lowest layer.
Returns:
a MultipleGridAnchorGenerator
"""
if base_anchor_size is None:
base_anchor_size = [1.0, 1.0]
box_specs_list = []
if scales is None or not scales:
scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1)
for i in range(num_layers)] + [1.0]
else:
# Add 1.0 to the end, which will only be used in scale_next below and used
# for computing an interpolated scale for the largest scale in the list.
scales += [1.0]
for layer, scale, scale_next in zip(
range(num_layers), scales[:-1], scales[1:]):
layer_box_specs = []
if layer == 0 and reduce_boxes_in_lowest_layer:
layer_box_specs = [(0.1, 1.0), (scale, 2.0), (scale, 0.5)]
else:
for aspect_ratio in aspect_ratios:
layer_box_specs.append((scale, aspect_ratio))
# Add one more anchor, with a scale between the current scale, and the
# scale for the next layer, with a specified aspect ratio (1.0 by
# default).
if interpolated_scale_aspect_ratio > 0.0:
layer_box_specs.append((np.sqrt(scale*scale_next),
interpolated_scale_aspect_ratio))
box_specs_list.append(layer_box_specs)
return MultipleGridAnchorGenerator(box_specs_list, base_anchor_size,
anchor_strides, anchor_offsets)
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/anchor_generators/multiple_grid_anchor_generator.py | multiple_grid_anchor_generator.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generators.multiscale_grid_anchor_generator_test.py."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import multiscale_grid_anchor_generator as mg
from object_detection.utils import test_case
class MultiscaleGridAnchorGeneratorTest(test_case.TestCase):
def test_construct_single_anchor(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 64
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_unit_dimensions(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 1.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 1
im_width = 1
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
# Positive offsets are produced.
exp_anchor_corners = [[0, 0, 32, 32],
[0, 32, 32, 64],
[32, 0, 64, 32],
[32, 32, 64, 64]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_normalized_anchors_fails_with_unit_dimensions(self):
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level=5, max_level=5, anchor_scale=1.0, aspect_ratios=[1.0],
scales_per_octave=1, normalize_coordinates=True)
with self.assertRaisesRegexp(ValueError, 'Normalized coordinates'):
anchor_generator.generate(
feature_map_shape_list=[(2, 2)], im_height=1, im_width=1)
def test_construct_single_anchor_in_normalized_coordinates(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 128
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=True)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
exp_anchor_corners = [[-48./64, -48./128, 80./64, 80./128],
[-48./64, -16./128, 80./64, 112./128],
[-16./64, -48./128, 112./64, 80./128],
[-16./64, -16./128, 112./64, 112./128]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_num_anchors_per_location(self):
min_level = 5
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0, 2.0]
scales_per_octave = 3
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
self.assertEqual(anchor_generator.num_anchors_per_location(), [6, 6])
def test_construct_single_anchor_dynamic_size(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = tf.constant(64)
im_width = tf.constant(64)
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
exp_anchor_corners = [[-64, -64, 64, 64],
[-64, -32, 64, 96],
[-32, -64, 96, 64],
[-32, -32, 96, 96]]
# Add anchor offset.
anchor_offset = 2.0**5 / 2.0
exp_anchor_corners = [
[b + anchor_offset for b in a] for a in exp_anchor_corners
]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_odd_input_dimension(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 65
im_width = 65
feature_map_shape_list = [(3, 3)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
anchor_corners_out = self.execute(graph_fn, [])
exp_anchor_corners = [[-64, -64, 64, 64],
[-64, -32, 64, 96],
[-64, 0, 64, 128],
[-32, -64, 96, 64],
[-32, -32, 96, 96],
[-32, 0, 96, 128],
[0, -64, 128, 64],
[0, -32, 128, 96],
[0, 0, 128, 128]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_on_two_feature_maps(self):
def graph_fn():
min_level = 5
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 64
feature_map_shape_list = [(2, 2), (1, 1)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0)
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112],
[-96, -96, 160, 160]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_two_scales_per_octave(self):
def graph_fn():
min_level = 6
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 2
im_height = 64
im_width = 64
feature_map_shape_list = [(1, 1)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
# There are 4 set of anchors in this configuration. The order is:
# [[2**0.0 intermediate scale + 1.0 aspect],
# [2**0.5 intermediate scale + 1.0 aspect]]
exp_anchor_corners = [[-96., -96., 160., 160.],
[-149.0193, -149.0193, 213.0193, 213.0193]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_two_scales_per_octave_and_aspect(self):
def graph_fn():
min_level = 6
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0, 2.0]
scales_per_octave = 2
im_height = 64
im_width = 64
feature_map_shape_list = [(1, 1)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
# There are 4 set of anchors in this configuration. The order is:
# [[2**0.0 intermediate scale + 1.0 aspect],
# [2**0.5 intermediate scale + 1.0 aspect],
# [2**0.0 intermediate scale + 2.0 aspect],
# [2**0.5 intermediate scale + 2.0 aspect]]
exp_anchor_corners = [[-96., -96., 160., 160.],
[-149.0193, -149.0193, 213.0193, 213.0193],
[-58.50967, -149.0193, 122.50967, 213.0193],
[-96., -224., 160., 288.]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchors_on_feature_maps_with_dynamic_shape(self):
def graph_fn(feature_map1_height, feature_map1_width, feature_map2_height,
feature_map2_width):
min_level = 5
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 64
feature_map_shape_list = [(feature_map1_height, feature_map1_width),
(feature_map2_height, feature_map2_width)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
anchor_corners_out = np.concatenate(
self.execute_cpu(graph_fn, [
np.array(2, dtype=np.int32),
np.array(2, dtype=np.int32),
np.array(1, dtype=np.int32),
np.array(1, dtype=np.int32)
]),
axis=0)
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112],
[-96, -96, 160, 160]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
if __name__ == '__main__':
tf.test.main()
| 123-object-detection | /123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/anchor_generators/multiscale_grid_anchor_generator_test.py | multiscale_grid_anchor_generator_test.py |
# 12306-booking
12306订票工具
## 12306booking vs 12306 vs 第三方订票平台
为什么要写一个订票工具?
1. 12306订票体验太差。验证码识别太逆天,人眼无法识别。刷新、刷新、刷新,刷到手疼。票就在那里,你就是定不上
2. 第三方订票平台太流氓。收集用户数据,还收不可接受的手续费(美其名曰技术服务费,其实就是 CPU和 RAM),最恐怖的是还要将用户数据拿到市场交易
解决了什么问题,有什么优点?
1. 两次扫码就完成了登录、查询余票、下单到支付的所有流程
2. 本地运行,不收集任何用户数据,不用输入用户密码,不用担心任何数据泄露、交易行为
3. 完全开源,没有任何黑箱操作
4. 刷新、订票流程快,先人一步抢到票
5. 支持多车次、多席别、多乘客抢票
## 使用说明
安装
```sh
pip install 12306-booking -U --user;
```
>如果使用MacOS,使用虚拟环境安装`virtualenv venv; source venv/bin/activate; pip install 12306-booking -U`
订票
```sh
12306-booking --train-date 2020-01-01 --train-names K571 --seat-types 硬卧 --from-station 北京 --to-station 麻城 --pay-channel 微信 --passengers 任正非,王石
```
> 多车次、多席别、多乘客之间用英文的','分割
## 订票流程

## 订票状态机

## 赞助
如果有帮助到你订到票,请扫描二维码赞赏我们,你的鼓励是我们持续改进优化的动力。
<img src="https://share-static.oss-cn-hangzhou.aliyuncs.com/wx/%E5%BE%AE%E4%BF%A1%E8%B5%9E%E8%B5%8F.jpg" width="50%" height="50%" />
| 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/README.md | README.md |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="12306-booking",
version="0.1.18",
author="Meng.yangyang",
author_email="mengyy_linux@163.com",
description="12306 booking assistant",
long_description=long_description,
long_description_content_type="text/markdown",
license='MIT',
url="https://github.com/hack12306/12306-booking",
packages=setuptools.find_packages(include=['booking']),
# include_package_data = True,
package_data={'': ['station_list.json', 'train.mp3', 'login.wav', '*.html']},
install_requires=["hack12306>=0.1.16", "click==7.0", "six>=1.12.0", "prettytable==0.7.2"],
entry_points={
'console_scripts': [
'12306-booking=booking.command:booking',
'12306=booking.command:cli',
]
},
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/setup.py | setup.py |
# encoding: utf8
"""
_logging.py
@author Meng.yangyang
@description
@created Thu Jan 10 2019 10:14:02 GMT+0800 (CST)
"""
import logging
__all__ = ('LogLevelFilter',)
class LogLevelFilter(logging.Filter):
def __init__(self, log_level_no=logging.INFO):
self._log_level_no = log_level_no
def filter(self, record):
if record.levelno == self._log_level_no:
return 1
else:
return 0
| 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/_logging.py | _logging.py |
# encoding: utf8
"""
command.py
@author Meng.yangyang
@description
@created Tue Jan 08 2019 23:39:26 GMT+0800 (CST)
"""
import re
import json
import click
import logging
import datetime
import requests
import prettytable
from .run import initialize, run as booking_run_loop
from .utils import check_seat_types, time_to_str
from .query import query_station_code_map, query_code_station_map
from hack12306.constants import BANK_ID_WX, BANK_ID_ALIPAY
from hack12306.query import TrainInfoQueryAPI
_logger = logging.getLogger('booking')
@click.group()
def cli():
pass
def do_booking(train_date, train_names, seat_types, from_station, to_station, pay_channel, passengers):
initialize()
date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}$')
assert date_pattern.match(train_date), '乘车日期无效. %s' % train_date
today = datetime.date.today()
train_date_time = datetime.datetime.strptime(train_date, '%Y-%m-%d').date()
assert train_date_time >= today, u'无效的乘车日期,乘车日期必须大于今天. %s' % train_date
train_names = train_names.split(',')
assert check_seat_types(seat_types.split(',')), u'无效的座席. %s' % seat_types
seat_types = seat_types.split(',')
station_code_map = query_station_code_map()
assert from_station in station_code_map.keys(), u'未找到车站. %s' % from_station
assert to_station in station_code_map.keys(), u'未找到车站. %s' % to_station
from_station = station_code_map[from_station]
to_station = station_code_map[to_station]
assert pay_channel in (u'微信', u'支付宝'), u'不支持的支付通道. %s' % pay_channel
if pay_channel == u'微信':
pay_channel = BANK_ID_WX
elif pay_channel == u'支付宝':
pay_channel = BANK_ID_ALIPAY
else:
assert False
if passengers:
passengers = passengers.split(',')
_logger.info(u'订票信息。乘车日期:%s 车次:%s 座席:%s 始发站:%s to_station:%s 支付通道:%s' %
(train_date, json.dumps(train_names, ensure_ascii=False),
json.dumps(seat_types, ensure_ascii=False),
from_station, to_station, pay_channel))
booking_run_loop(train_date, train_names, seat_types, from_station, to_station, pay_channel, passengers=passengers)
@cli.command('booking')
@click.option('--train-date', required=True, help=u'乘车日期,格式:YYYY-mm-dd')
@click.option('--train-names', required=True, help=u'车次')
@click.option('--seat-types', required=True, help=u'座位席别, 例如:硬卧,硬座')
@click.option('--from-station', required=True, help=u'始发站')
@click.option('--to-station', required=True, help=u'到达站')
@click.option('--pay-channel', type=click.Choice([u'微信', u'支付宝']), default=u'微信', help=u'支付通道,微信,支付宝')
@click.option('--passengers', help=u'乘客,例如:任正非,王石')
def booking_sub_cmd(train_date, train_names, seat_types, from_station, to_station, pay_channel, passengers):
"""
定火车票
"""
do_booking(train_date, train_names, seat_types, from_station, to_station, pay_channel, passengers)
@cli.command('qtrain')
@click.argument('train-code', metavar=u'<车次>')
def query_train(train_code):
"""
查询车次
"""
def _query_train_code(train_code):
resp = requests.get('http://trip.kdreader.com/api/v1/train/%s/' % train_code)
return json.loads(resp.content)
def _query_train_schedule(train_code):
resp = requests.get('http://trip.kdreader.com/api/v1/train/schedule/%s/' % train_code)
return json.loads(resp.content)
train_code = train_code.encode('utf8')
# train_info = _query_train_code(train_code)
train_schedule = _query_train_schedule(train_code)
pt = prettytable.PrettyTable(
field_names=['车次', '站次', '站名', '到达时间', '开车时间', '停车时间', '运行时间'],
border=True,)
for station in train_schedule:
duration_time = time_to_str(station['duration_time'])
pt.add_row([station['train_code'], station['station_no'], station['station_name'], station['arrive_time'][:5],
station['start_time'][:5], station['stopover_time'], duration_time])
print '%s车次,%s从【%s】出发,运行%s, %s到达【%s】' % (
train_code,
train_schedule[0]['start_time'][:5].encode('utf8'),
train_schedule[0]['station_name'].encode('utf8'),
time_to_str(train_schedule[-1]['duration_time']),
train_schedule[-1]['start_time'][:5].encode('utf8'),
train_schedule[-1]['station_name'].encode('utf8'))
print '途径车站列表:'
print pt
@cli.command('qticket')
@click.option('--date', help=u'乘车日期,格式:YYYY-mm-dd')
@click.argument('from_station', metavar=u'<始发站>')
@click.argument('to_station', metavar=u'<终点站>')
def query_left_ticket(from_station, to_station, date):
"""
查询余票
"""
print '正在查询【%s】到【%s】的车票信息,请稍等...' % (from_station.encode('utf8'), to_station.encode('utf8'))
station_code_map = query_station_code_map()
if from_station not in station_code_map.keys():
print '未找到【%s】车站' % from_station
return
from_station = station_code_map[from_station]
if to_station not in station_code_map.keys():
print u'未找到【%s】车站' % to_station
return
to_station = station_code_map[to_station]
if date:
date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}$')
assert date_pattern.match(date), '乘车日期无效. %s' % date
else:
date = (datetime.date.today() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
try_times = 3
while try_times > 0:
try:
trains = TrainInfoQueryAPI().info_query_left_tickets(date, from_station, to_station)
break
except Exception as e:
pass
try_times -= 1
else:
print '网络请求失败,请重试...'
return
pt = prettytable.PrettyTable(
field_names=['车次', '始发站', '目的站', '运行时间', '发车时间', '到达时间',
'商务座', '一等座', '二等座', '软卧', '硬卧', '软座', '硬座', '无座', '备注'],
border=True)
code_station_map = query_code_station_map()
for train in trains:
from_station = code_station_map[train['from_station']]
to_station = code_station_map[train['to_station']]
pt.add_row([train['train_name'], from_station, to_station,
train['duration'], train['departure_time'], train['arrival_time'],
train[u'商务座'] or '--', train[u'一等座'] or '--', train[u'二等座'] or '--',
train[u'软卧'] or '--', train[u'硬卧'] or '---', train[u'软座'] or '--',
train[u'硬座'] or '--', train[u'无座'] or '--', train[u'remark'] or '--'])
print pt
@click.command('booking')
@click.option('--train-date', required=True, help=u'乘车日期,格式:YYYY-mm-dd')
@click.option('--train-names', required=True, help=u'车次')
@click.option('--seat-types', required=True, help=u'座位席别, 例如:硬卧,硬座')
@click.option('--from-station', required=True, help=u'始发站')
@click.option('--to-station', required=True, help=u'到达站')
@click.option('--pay-channel', type=click.Choice([u'微信', u'支付宝']), default=u'微信', help=u'支付通道,微信,支付宝')
@click.option('--passengers', help=u'乘客,例如:任正非,王石')
def booking(train_date, train_names, seat_types, from_station, to_station, pay_channel, passengers):
"""
定火车票
"""
do_booking(train_date, train_names, seat_types, from_station, to_station, pay_channel, passengers)
| 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/command.py | command.py |
# encoding: utf8
import os
import logging
INIT_DONE = False
# Auth settings
AUTH_UAMTK = None
AUTH_REAUTH_INTERVAL = 60 * 3 # 单位:秒
STATION_LIST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'station_list.json')
QUERY_LEFT_TICKET_COUNTER_FILE = '/tmp/12306-booking/left_ticket_counter'
SLEEP_INTERVAL = 0.6
COOKIES = {}
PAY_FILEPATH = './{date}-{order_no}-{bank_id}.html'
STATION_CODE_MAP = {}
CHROME_APP_OPEN_CMD_MacOS = 'open -a /Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome {filepath}'
CHROME_APP_OPEN_CMD_LINUX = None # TODO Linux Chrome 打开文件cmd
CHROME_APP_OPEN_CMD_WINDOWS = None # TODO Windows Chrome 打开文件cmd
CHROME_APP_OPEN_CMD = CHROME_APP_OPEN_CMD_MacOS
LOGIN_AUDIO_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'login.wav')
TRAIN_AUDIO_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'train.mp3')
LOGIN_QR_TPL = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates/login_qr.html')
LOGGING = {
'version': 1,
'formatters': {
'default': {
'format': '%(asctime)s - %(levelname)s - %(message)s'
},
'app': {
'format': '%(asctime)s - %(levelname)s - %(module)s::%(funcName)s:%(lineno)d - %(message)s'
},
},
'filters': {
'log_level': {
'()': 'booking._logging.LogLevelFilter',
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'level': 'INFO',
'filters': ['log_level'],
},
'app': {
'class': 'logging.handlers.WatchedFileHandler',
'filename': 'app.log',
'formatter': 'app',
'level': 'DEBUG',
}
},
'loggers': {
'booking': {
'handlers': ['console', 'app'],
'level': os.getenv('BOOKING_LOG_LEVEL', 'INFO'),
}
},
}
| 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/settings.py | settings.py |
# encoding: utf8
"""
auth.py
@author Meng.yangyang
@description 认证
@created Mon Jan 07 2019 16:35:01 GMT+0800 (CST)
"""
import os
import time
import uuid
import json
import base64
import logging
from hack12306.auth import TrainAuthAPI
from hack12306.user import TrainUserAPI
from hack12306.exceptions import TrainUserNotLogin
from . import settings
from .remind import remind_login_qr
_logger = logging.getLogger('booking')
__all__ = ('auth_qr', 'auth_reauth', 'auth_is_login')
LOGIN_QR_HTML_TPL = """
<html>
<head>
<title>12306-booking 扫码登录</title>
<meta charset="utf8">
</head>
<body>
<h1>12306订票助手</h1>
<img src="{filepath}", alt="12306登录二维码"><br>
<b>请用12306 APP 扫码二维码登录</b>
</body>
<script>
setTimeout("window.close()", 30000)
</script>
</html>
"""
def _uamtk_set(uamtk):
settings.AUTH_UAMTK = uamtk
def _uamtk_get():
return settings.AUTH_UAMTK
def auth_is_login(cookies=None):
"""
检查用户是否登录
:param cookies JSON对象
:return True已登录, False未登录
"""
result = TrainAuthAPI().auth_check_login(cookies=cookies)
if not result:
_logger.debug('会话已过期,请重新登录!')
return result
def auth_reauth(uamtk, cookie_dict):
"""
重新认证
:param aumtk
:param cookie_dict
:return JSON对象
"""
assert uamtk is not None
assert isinstance(cookie_dict, dict)
train_auth_api = TrainAuthAPI()
uamtk_result = train_auth_api.auth_uamtk(uamtk, cookies=cookie_dict)
_logger.debug('4. auth uamtk result. %s' % json.dumps(uamtk_result, ensure_ascii=False))
uamauth_result = train_auth_api.auth_uamauth(uamtk_result['newapptk'], cookies=cookie_dict)
_logger.debug('5. auth uamauth result. %s' % json.dumps(uamauth_result, ensure_ascii=False))
return uamauth_result
def auth_qr():
"""
认证-二维码登录
"""
try:
qr_img_path = '/tmp/12306/booking/login-qr-%s.jpeg' % uuid.uuid1().hex
login_html_path = '/tmp/12306/booking/login-qr-%s.html' % uuid.uuid1().hex
train_auth_api = TrainAuthAPI()
_logger.debug('1. auth init')
cookie_dict = train_auth_api.auth_init()
_logger.debug('2. auth get qr')
result = train_auth_api.auth_qr_get(cookies=cookie_dict)
assert isinstance(result, dict)
qr_uuid = result['uuid']
if not os.path.exists(os.path.dirname(qr_img_path)):
os.makedirs(os.path.dirname(qr_img_path))
with open(qr_img_path, 'wb') as f:
f.write(base64.b64decode(result['image']))
with open(login_html_path, 'w+') as f:
f.write(LOGIN_QR_HTML_TPL.format(filepath=qr_img_path))
# open qr image with browser
cmd = 'open %s' % login_html_path
os.system(cmd)
_logger.debug('3. auth check qr')
for _ in range(10):
_logger.info('请扫描二维码登录!')
remind_login_qr()
qr_check_result = train_auth_api.auth_qr_check(qr_uuid, cookies=cookie_dict)
_logger.debug('check qr result. %s' % json.dumps(qr_check_result, ensure_ascii=False))
if qr_check_result['result_code'] == "2":
_logger.debug('qr check success result. %s' % json.dumps(qr_check_result, ensure_ascii=False))
_logger.info('二维码扫描成功!')
break
time.sleep(3)
else:
_logger.error('二维码扫描失败,重新生成二维码')
raise TrainUserNotLogin('扫描述二维码失败')
_uamtk_set(qr_check_result['uamtk'])
uamauth_result = auth_reauth(_uamtk_get(), cookie_dict)
_logger.info('%s 登录成功。' % uamauth_result['username'].encode('utf8'))
cookies = {
'tk': uamauth_result['apptk']
}
cookies.update(**cookie_dict)
_logger.debug('cookies. %s' % json.dumps(cookies, ensure_ascii=False,))
# user_info_result = TrainUserAPI().user_info(cookies=cookies)
# _logger.debug('%s login successfully.' % user_info_result['name'])
return cookies
finally:
if os.path.exists(qr_img_path):
os.remove(qr_img_path)
if os.path.exists(login_html_path):
os.remove(login_html_path) | 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/auth.py | auth.py |
# encoding: utf8
"""
run.py
@author Meng.yangyang
@description Booking entry point
@created Tue Jan 08 2019 19:38:32 GMT+0800 (CST)
"""
import os
import re
import time
import json
import fcntl
import logging
import platform
import logging.config
from hack12306.constants import (BANK_ID_WX, BANK_ID_MAP, SEAT_TYPE_CODE_MAP,)
from hack12306.exceptions import TrainUserNotLogin, TrainBaseException
from . import settings
from . import exceptions
from .pay import pay_order
from .user import user_passengers
from .auth import auth_qr, auth_is_login, auth_reauth
from .order import order_submit, order_check_no_complete
from .query import query_left_tickets, query_station_code_map
from .remind import remind_left_ticket
_logger = logging.getLogger('booking')
__all__ = ('initialize', 'run')
BOOKING_STATUS_QUERY_LEFT_TICKET = 2
BOOKING_STATUS_ORDER_SUBMIT = 3
BOOKING_STATUS_PAY_ORDER = 4
BOOKING_STATUS_MAP = [
(BOOKING_STATUS_QUERY_LEFT_TICKET, '查询余票'),
(BOOKING_STATUS_ORDER_SUBMIT, '提交订单'),
(BOOKING_STATUS_PAY_ORDER, '支付订单'),
]
def initialize():
"""
Initialization.
"""
if settings.INIT_DONE:
return
station_list = []
station_code_map = {}
with open(settings.STATION_LIST_FILE) as f:
station_list = json.loads(f.read())
for station in station_list:
station_code_map[station['name']] = station['code']
settings.STATION_CODE_MAP = station_code_map
del station_list
logging.config.dictConfig(settings.LOGGING)
if platform.system() == "Windows":
settings.CHROME_APP_OPEN_CMD = settings.CHROME_APP_OPEN_CMD_WINDOWS
elif platform.system() == 'Linux':
settings.CHROME_APP_OPEN_CMD = settings.CHROME_APP_OPEN_CMD_LINUX
elif platform.mac_ver()[0]:
settings.CHROME_APP_OPEN_CMD = settings.CHROME_APP_OPEN_CMD_MacOS
else:
settings.CHROME_APP_OPEN_CMD = settings.CHROME_APP_OPEN_CMD_MacOS
settings.INIT_DONE = True
def _query_left_ticket_counter_get():
if not os.path.exists(settings.QUERY_LEFT_TICKET_COUNTER_FILE):
return 0
with open(settings.QUERY_LEFT_TICKET_COUNTER_FILE) as f:
flag = fcntl.fcntl(f.fileno(), fcntl.F_GETFD)
fcntl.fcntl(f, fcntl.F_SETFD, flag | os.O_NONBLOCK)
flag = fcntl.fcntl(f, fcntl.F_GETFD)
counter = f.read()
return int(counter)
def _query_left_ticket_counter_inc():
counter = _query_left_ticket_counter_get() + 1
if not os.path.exists(settings.QUERY_LEFT_TICKET_COUNTER_FILE):
os.makedirs(os.path.dirname(settings.QUERY_LEFT_TICKET_COUNTER_FILE))
with open(settings.QUERY_LEFT_TICKET_COUNTER_FILE, 'w') as f:
f.write(str(counter))
def run(train_date, train_names, seat_types, from_station, to_station, pay_channel=BANK_ID_WX, passengers=None, **kwargs):
"""
Booking entry point.
"""
initialize()
assert settings.INIT_DONE is True, 'No Initialization'
date_patten = re.compile(r'^\d{4}-\d{2}-\d{2}$')
assert date_patten.match(train_date), 'Invalid train_date param. %s' % train_date
assert isinstance(seat_types, (list, tuple)), u'Invalid seat_types param. %s' % seat_types
assert frozenset(seat_types) <= frozenset(dict(SEAT_TYPE_CODE_MAP).keys()
), u'Invalid seat_types param. %s' % seat_types
assert from_station in settings.STATION_CODE_MAP.values(), 'Invalid from_station param. %s' % from_station
assert to_station in settings.STATION_CODE_MAP.values(), 'Invalid to_station param. %s' % to_station
assert pay_channel in dict(BANK_ID_MAP).keys(), 'Invalid pay_channel param. %s' % pay_channel
train_info = {}
order_no = None
check_passengers = False
passenger_id_nos = []
booking_status = BOOKING_STATUS_QUERY_LEFT_TICKET
last_auth_time = int(time.time())
while True:
try:
# auth
if booking_status != BOOKING_STATUS_QUERY_LEFT_TICKET and(
not settings.COOKIES or not auth_is_login(settings.COOKIES)):
cookies = auth_qr()
settings.COOKIES = cookies
# reauth
if booking_status != BOOKING_STATUS_QUERY_LEFT_TICKET and settings.AUTH_UAMTK and settings.COOKIES:
if int(time.time()) - last_auth_time >= settings.AUTH_REAUTH_INTERVAL:
uamauth_result = auth_reauth(settings.AUTH_UAMTK, settings.COOKIES)
settings.COOKIES.update(tk=uamauth_result['apptk'])
last_auth_time = int(time.time())
_logger.info('%s 重新认证成功' % uamauth_result['username'].encode('utf8'))
# check passengers
if booking_status != BOOKING_STATUS_QUERY_LEFT_TICKET and not check_passengers:
passenger_infos = user_passengers()
if passengers:
passenger_name_id_map = {}
for passenger_info in passenger_infos:
passenger_name_id_map[passenger_info['passenger_name']] = passenger_info['passenger_id_no']
assert frozenset(passengers) <= frozenset(passenger_name_id_map.keys()), u'无效的乘客. %s' % json.dumps(
list(frozenset(passengers) - frozenset(passenger_name_id_map.keys())), ensure_ascii=False)
for passenger in passengers:
_logger.info(u'订票乘客信息。姓名:%s 身份证号:%s' % (passenger, passenger_name_id_map[passenger]))
passenger_id_nos.append(passenger_name_id_map[passenger])
else:
passenger_id_nos = [passenger_infos[0]['passenger_id_no']]
_logger.info(
u'订票乘客信息。姓名:%s 身份证号:%s' %
(passenger_infos[0]['passenger_name'],
passenger_info['passenger_id_no']))
check_passengers = True
# order not complete
if booking_status != BOOKING_STATUS_QUERY_LEFT_TICKET and order_check_no_complete():
booking_status = BOOKING_STATUS_PAY_ORDER
_logger.debug('booking status. %s' % dict(BOOKING_STATUS_MAP).get(booking_status, '未知状态'))
# query left tickets
if booking_status == BOOKING_STATUS_QUERY_LEFT_TICKET:
_query_left_ticket_counter_inc()
_logger.info('查询余票, 已查询%s次!' % _query_left_ticket_counter_get())
train_info = query_left_tickets(train_date, from_station, to_station, seat_types, train_names)
booking_status = BOOKING_STATUS_ORDER_SUBMIT
remind_left_ticket()
# subit order
elif booking_status == BOOKING_STATUS_ORDER_SUBMIT:
try:
_logger.info('提交订单')
order_no = order_submit(passenger_id_nos, **train_info)
except (TrainBaseException, exceptions.BookingBaseException) as e:
_logger.info('提交订单失败')
booking_status = BOOKING_STATUS_QUERY_LEFT_TICKET
_logger.exception(e)
continue
else:
# submit order successfully
if order_no:
_logger.info('提交订单成功')
booking_status = BOOKING_STATUS_PAY_ORDER
# pay
elif booking_status == BOOKING_STATUS_PAY_ORDER:
_logger.info('支付订单')
pay_order(pay_channel)
# pay success and exit
return
else:
assert 'Unkown booking status. %s' % booking_status
except TrainUserNotLogin:
_logger.warn('用户未登录,请重新扫码登录')
continue
except TrainBaseException as e:
_logger.error(e)
_logger.exception(e)
except exceptions.BookingTrainNoLeftTicket as e:
_logger.debug(e)
except Exception as e:
if isinstance(e, AssertionError):
_logger.exception(e)
_logger.error('系统内部运行异常,请重新执行程序!')
os._exit(-1)
elif isinstance(e, exceptions.BookingOrderCancelExceedLimit):
_logger.exception(e)
_logger.error('用户今日订单取消次数超限,请明天再重新抢票!')
os._exit(-2)
else:
_logger.exception(e)
time.sleep(settings.SLEEP_INTERVAL)
| 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/run.py | run.py |
# encoding: utf8
"""
utils.py
@author Meng.yangyang
@description 工具函数
@created Mon Jan 07 2019 13:22:25 GMT+0800 (CST)
"""
import os
import json
import platform
import requests
# def qr_terminal_draw(filepath):
# from PIL import Image
# assert isinstance(filepath, (str))
# if not os.path.exists(filepath):
# raise Exception('file not exists. %s' % filepath)
# if platform.system() == "Windows":
# white_block = '▇'
# black_block = ' '
# new_line = '\n'
# else:
# white_block = '\033[1;37;47m '
# black_block = '\033[1;37;40m '
# new_line = '\033[0m\n'
# output = ''
# im = Image.open(filepath)
# im = im.resize((21, 21))
# pixels = im.load()
# output += white_block * (im.width + 2) + new_line
# for h in range(im.height):
# output += white_block
# for w in range(im.width):
# pixel = pixels[w,h] # NOQA
# if pixel[0] == 0:
# output += black_block
# elif pixel[0] == 255:
# output += white_block
# else:
# assert 'Unsupported pixel. %s' % pixel
# else:
# output += white_block + new_line
# output += white_block * (im.width + 2) + new_line
# return output
def get_public_ip():
resp = requests.get('http://httpbin.org/ip')
if resp.status_code != 200:
raise Exception('Network error')
return json.loads(resp.content)['origin'].encode('utf8')
def check_seat_types(seat_types):
from hack12306.constants import SEAT_TYPE_CODE_MAP
assert isinstance(seat_types, (list, tuple))
if not frozenset(seat_types) <= frozenset(dict(SEAT_TYPE_CODE_MAP).keys()):
return False
return True
def time_to_str(tm_sec, inc_seconds=False):
days = tm_sec / (3600*12)
hours = (tm_sec % (3600*12)) / 3600
mins = (tm_sec % (3600*12)) % 3600 / 60
seconds = tm_sec % 60
msg = ''
if days:
msg += '%s天' % days
if hours:
msg += '%s小时' % hours
if mins:
msg += '%s分钟' % mins
if seconds:
msg += '%s秒' % seconds
return msg
| 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/utils.py | utils.py |
# encoding: utf8
"""
user.py
@author Meng.yangyang
@description User
@created Wed Jan 09 2019 21:17:23 GMT+0800 (CST)
"""
import json
import logging
from hack12306.user import TrainUserAPI
from . import settings
__all__ = ('user_passengers',)
_logger = logging.getLogger('booking')
def user_passengers():
"""
User passengers
"""
passengers = TrainUserAPI().user_passengers(cookies=settings.COOKIES)
_logger.debug(json.dumps(passengers, ensure_ascii=False))
return passengers
| 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/user.py | user.py |
# encoding: utf8
"""
order.py
@author Meng.yangyang
@description 下单
@created Tue Jan 08 2019 17:56:17 GMT+0800 (CST)
"""
import re
import json
import copy
import time
import logging
from hack12306 import constants
from hack12306.order import TrainOrderAPI
from hack12306.query import TrainInfoQueryAPI
from hack12306.user import TrainUserAPI
from hack12306.utils import (tomorrow, JSONEncoder,
gen_old_passenge_tuple, gen_passenger_ticket_tuple)
from . import settings
from . import exceptions
_logger = logging.getLogger('booking')
__all__ = ('order_check_no_complete', 'order_submit', 'order_no_complete')
def order_no_complete():
"""
订单-未支付订单
"""
orders = TrainOrderAPI().order_query_no_complete(cookies=settings.COOKIES)
_logger.debug('order no complete orders. %s' % json.dumps(orders, ensure_ascii=False))
if not orders:
return None
return orders[0]['sequence_no']
def order_check_no_complete():
"""
订单-检查是有未支付订单
:return True:有支付订单 False:没有未支付订单
"""
if order_no_complete():
return True
else:
return False
def order_submit(passenger_id_nos, **train_info):
"""
订单-提交订单
:param passenger_id_nos 乘客身份证列表
:param **train_info 乘车信息
:return order_no 订单号
"""
assert isinstance(
passenger_id_nos, (list, tuple)), 'Invalid passenger_id_nos param. %s' % json.dumps(
passenger_id_nos, ensure_ascii=False)
assert passenger_id_nos, 'Invalid passenger_id_nos param. %s' % json.dumps(passenger_id_nos, ensure_ascii=False)
train_order_api = TrainOrderAPI()
# 1. 下单-提交订单
submit_order_result = train_order_api.order_submit_order(
train_info['secret'],
train_info['train_date'],
cookies=settings.COOKIES)
_logger.debug('order submit order result. %s' % submit_order_result)
# 2. 下单-确认乘客
confirm_passenger_result = train_order_api.order_confirm_passenger(cookies=settings.COOKIES)
_logger.debug('order confirm passenger result. %s' % json.dumps(
confirm_passenger_result, ensure_ascii=False, cls=JSONEncoder))
# 3. 下单-检查订单信息
passengers = TrainUserAPI().user_passengers(cookies=settings.COOKIES)
select_passengers = []
for passenger in passengers:
if passenger['passenger_id_no'] in passenger_id_nos:
select_passengers.append(copy.deepcopy(passenger))
assert select_passengers, '乘客不存在. %s' % json.dumps(passenger_id_nos, ensure_ascii=False)
passenger_ticket_list = []
old_passenger_list = []
for passenger_info in select_passengers:
passenger_ticket_list.append(gen_passenger_ticket_tuple(
train_info['seat_type_code'],
passenger_info['passenger_flag'],
passenger_info['passenger_type'],
passenger_info['passenger_name'],
passenger_info['passenger_id_type_code'],
passenger_info['passenger_id_no'],
passenger_info['mobile_no']))
old_passenger_list.append(
gen_old_passenge_tuple(
passenger_info['passenger_name'],
passenger_info['passenger_id_type_code'],
passenger_info['passenger_id_no'],
passenger_info['passenger_type']))
passenger_ticket_str = '_'.join([','.join(p) for p in passenger_ticket_list])
old_passenger_str = ''.join([','.join(p) for p in old_passenger_list])
check_order_result = train_order_api.order_confirm_passenger_check_order(
confirm_passenger_result['token'],
passenger_ticket_str, old_passenger_str, cookies=settings.COOKIES)
_logger.debug('order check order result. %s' % json.dumps(check_order_result, ensure_ascii=False, cls=JSONEncoder))
if not check_order_result['submitStatus']:
raise exceptions.BookingSubmitOrderError(check_order_result.get('errMsg', u'提交订单失败').encode('utf8'))
# 4. 下单-获取排队数量
queue_count_result = train_order_api.order_confirm_passenger_get_queue_count(
train_info['train_date'],
train_info['train_num'],
train_info['seat_type_code'],
train_info['from_station'],
train_info['to_station'],
confirm_passenger_result['ticket_info']['leftTicketStr'],
confirm_passenger_result['token'],
confirm_passenger_result['order_request_params']['station_train_code'],
confirm_passenger_result['ticket_info']['queryLeftTicketRequestDTO']['purpose_codes'],
confirm_passenger_result['ticket_info']['train_location'],
cookies=settings.COOKIES,
)
_logger.debug('order confirm passenger get queue count result. %s' % json.dumps(
queue_count_result, ensure_ascii=False, cls=JSONEncoder))
# 5. 下单-确认车票
confirm_ticket_result = train_order_api.order_confirm_passenger_confirm_single_for_queue(
passenger_ticket_str, old_passenger_str,
confirm_passenger_result['ticket_info']['queryLeftTicketRequestDTO']['purpose_codes'],
confirm_passenger_result['ticket_info']['key_check_isChange'],
confirm_passenger_result['ticket_info']['leftTicketStr'],
confirm_passenger_result['ticket_info']['train_location'],
confirm_passenger_result['token'], cookies=settings.COOKIES)
_logger.debug('order confirm passenger confirm ticket result. %s' % json.dumps(
confirm_ticket_result, ensure_ascii=False, cls=JSONEncoder))
# 6. 下单-查询订单
try_times = 4
while try_times > 0:
query_order_result = train_order_api.order_confirm_passenger_query_order(
confirm_passenger_result['token'], cookies=settings.COOKIES)
_logger.debug('order confirm passenger query order result. %s' % json.dumps(
query_order_result, ensure_ascii=False, cls=JSONEncoder))
if query_order_result['orderId']:
# order submit successfully
break
else:
# 今日订单取消次数超限,无法继续订票
error_code = query_order_result.get('errorcode')
error_msg = query_order_result.get('msg', '').encode('utf8')
order_cancel_exceed_limit_pattern = re.compile(r'取消次数过多')
if error_code == '0' and order_cancel_exceed_limit_pattern.search(error_msg):
raise exceptions.BookingOrderCancelExceedLimit(query_order_result['msg'].encode('utf8'))
time.sleep(0.5)
try_times -= 1
else:
raise exceptions.BookingOrderQueryTimeOut()
# 7. 下单-订单结果查询
order_result = train_order_api.order_confirm_passenger_result_order(
query_order_result['orderId'], confirm_passenger_result['token'], cookies=settings.COOKIES)
_logger.debug('order result. %s' % json.dumps(order_result, ensure_ascii=False))
_logger.info(
'恭喜你!抢票成功。订单号:%s 车次:%s 座位席别:%s 乘车日期:%s 出发站:%s 到达站:%s 历时:%s' %
(query_order_result['orderId'],
train_info['train_name'],
train_info['seat_type'],
train_info['train_date'],
train_info['from_station'],
train_info['to_station'],
train_info['duration']))
return query_order_result['orderId']
| 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/order.py | order.py |
# encoding: utf8
"""
query.py
@author Meng.yangyang
@description 信息查询
@created Mon Jan 07 2019 16:50:59 GMT+0800 (CST)
"""
import re
import copy
import json
import logging
from hack12306.constants import SEAT_TYPE_CODE_MAP
from hack12306.query import TrainInfoQueryAPI
from hack12306.constants import SEAT_TYPE_CODE_MAP
from . import exceptions
_logger = logging.getLogger('booking')
__all__ = ('query_left_tickets', 'query_station_code_map',)
def _check_seat_type_is_booking(left_ticket):
if left_ticket and left_ticket != u'无' and left_ticket != u'*':
return True
else:
return False
def _select_train_and_seat_type(train_names, seat_types, query_trains):
"""
选择订票车次、席别
:param train_names 预定的车次列表
:param seat_types 预定席别列表
:param query_trains 查询到火车车次列表
:return select_train, select_seat_type
"""
def _select_trains(query_trains, train_names=None):
if train_names:
select_trains = []
# 根据订票车次次序,选择车次
for train_name in train_names:
for train in query_trains:
if train['train_name'] == train_name:
select_trains.append(copy.deepcopy(train))
return select_trains
else:
return query_trains
def _select_types(trains, seat_types):
select_train = None
select_seat_type = None
for train in trains:
for seat_type in seat_types:
seat_type_left_ticket = train.get(seat_type, '')
if _check_seat_type_is_booking(seat_type_left_ticket):
select_seat_type = seat_type
select_train = copy.deepcopy(train)
return select_train, select_seat_type
else:
return None, None
_logger.debug('train_names:%s seat_types:%s' % (json.dumps(train_names, ensure_ascii=False),
json.dumps(seat_types, ensure_ascii=False)))
trains = _select_trains(query_trains, train_names)
# debug trains
for i in range(min(len(trains), len(train_names or ['']))):
_logger.debug('query left tickets train info. %s' % json.dumps(trains[i], ensure_ascii=False))
return _select_types(trains, seat_types)
def query_left_tickets(train_date, from_station, to_station, seat_types, train_names=None):
"""
信息查询-剩余车票
:param train_date
:param from_station
:param to_station
:param seat_types
:param train_names
:return JSON 对象
"""
date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}$')
assert date_pattern.match(train_date), 'Invalid train_date param. %s' % train_date
assert isinstance(seat_types, list), u'Invalid seat_types param. %s' % seat_types
assert frozenset(seat_types) <= frozenset(dict(SEAT_TYPE_CODE_MAP).keys()
), u'Invalid seat_types param. %s' % seat_types
train_info = {}
trains = TrainInfoQueryAPI().info_query_left_tickets(train_date, from_station, to_station)
train_info, select_seat_type = _select_train_and_seat_type(train_names, seat_types, trains)
if not train_info or not select_seat_type:
raise exceptions.BookingTrainNoLeftTicket('无票')
_logger.debug('select train info. %s' % json.dumps(train_info, ensure_ascii=False))
result = {
'train_date': train_date,
'from_station': train_info['from_station'],
'to_station': train_info['to_station'],
'seat_type': select_seat_type,
'seat_type_code': dict(SEAT_TYPE_CODE_MAP)[select_seat_type],
'departure_time': train_info['departure_time'],
'arrival_time': train_info['arrival_time'],
'secret': train_info['secret'],
'train_name': train_info['train_name'],
'duration': train_info['duration'],
'train_num': train_info['train_num']
}
return result
def query_station_code_map():
"""
信息查询-查询车站编码列表
:return JSON对象
"""
station_code_map = {}
stations = TrainInfoQueryAPI().info_query_station_list()
for station in stations:
station_code_map[station['name']] = station['code']
return station_code_map
def query_code_station_map():
"""
信息查询-查询车站编码列表
:return JSON对象
"""
code_station_map = {}
stations = TrainInfoQueryAPI().info_query_station_list()
for station in stations:
code_station_map[station['code']] = station['name']
return code_station_map
| 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/query.py | query.py |
# encoding: utf8
"""
pay.py
@author Meng.yangyang
@description 支付
@created Mon Jan 07 2019 17:33:55 GMT+0800 (CST)
"""
import os
import json
import copy
import logging
import datetime
import platform
from hack12306 import constants
from hack12306.pay import TrainPayAPI
from hack12306.utils import tomorrow, JSONEncoder
from . import settings
from . import exceptions
from .order import order_no_complete
from .utils import get_public_ip
_logger = logging.getLogger('booking')
__all__ = ('pay_order', )
def pay_order(bank_id=constants.BANK_ID_WX, **kwargs):
"""
支付订单
:param sequence_no 订单后
:bank_id 支付渠道ID
:return None
"""
train_pay_api = TrainPayAPI()
# 0.查询未支付订单
sequence_no = order_no_complete()
if not sequence_no:
raise exceptions.BookingOrderNoExists('')
# 1.支付未完成订单
pay_no_complete_order_result = train_pay_api.pay_no_complete_order(sequence_no, cookies=settings.COOKIES)
_logger.debug('pay no complete order result. %s' % json.dumps(pay_no_complete_order_result, ensure_ascii=False,))
if pay_no_complete_order_result['existError'] != 'N':
raise exceptions.BookingOrderNoExists('%s订单不存在' % sequence_no)
# 2.支付初始化
train_pay_api.pay_init(cookies=settings.COOKIES)
# 3.发起支付
pay_check_new_result = train_pay_api.pay_check_new(cookies=settings.COOKIES)
_logger.debug('pay check new result. %s' % json.dumps(pay_check_new_result, ensure_ascii=False))
# 4.交易
pay_business_result = train_pay_api.pay_web_business(
pay_check_new_result['payForm']['tranData'],
pay_check_new_result['payForm']['merSignMsg'],
pay_check_new_result['payForm']['transType'],
get_public_ip(), pay_check_new_result['payForm']['tranDataParsed']['order_timeout_date'],
bank_id, cookies=settings.COOKIES)
_logger.debug('pay business result. %s' % json.dumps(pay_business_result, ensure_ascii=False))
# 5.跳转第三方支付
pay_business_third_pay_resp = train_pay_api.submit(
pay_business_result['url'],
pay_business_result['params'],
method=pay_business_result['method'],
parse_resp=False,
cookies=settings.COOKIES,
allow_redirects=True)
_logger.debug('pay third resp status code. %s' % pay_business_third_pay_resp.status_code)
_logger.debug('pay third resp. %s' % pay_business_third_pay_resp.content)
# 6.打开浏览器扫码完成支付
try:
pay_filepath = settings.PAY_FILEPATH.format(date=datetime.date.today().strftime('%Y%m%d'),
order_no=sequence_no, bank_id=bank_id)
if not os.path.exists(os.path.dirname(pay_filepath)):
os.makedirs(os.path.dirname(pay_filepath))
with open(pay_filepath, 'w') as f:
f.write(pay_business_third_pay_resp.content)
_logger.info('请用浏览器打开%s,完成支付!' % pay_filepath)
finally:
if os.path.exists(pay_filepath):
if platform.mac_ver()[0]:
os.system('open %s' % pay_filepath)
os.remove(pay_filepath)
| 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/pay.py | pay.py |
# encoding: utf8
"""
remind.py
@author Meng.yangyang
@description 提醒
@created Mon Jan 28 2019 09:49:14 GMT+0800 (CST)
"""
import os
from . import settings
def remind_left_ticket():
"""
有票提醒
"""
cmd = 'open %s --hide --background' % settings.TRAIN_AUDIO_FILE
os.system(cmd)
def remind_login_qr():
"""
登录提醒
"""
cmd = 'open %s --hide --background' % settings.LOGIN_AUDIO_FILE
os.system(cmd) | 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/remind.py | remind.py |
# encoding: utf8
"""
exceptions.py
@author Meng.yangyang
@description 异常
@created Mon Jan 07 2019 17:48:48 GMT+0800 (CST)
"""
class BookingBaseException(Exception):
"""
订票异常
"""
class BookingOrderNoExists(BookingBaseException):
"""
订单不存在
"""
class BookingTrainNoLeftTicket(BookingBaseException):
"""
无票
"""
class BookingOrderQueryTimeOut(BookingBaseException):
"""
订单查询超时
"""
class BookingOrderCancelExceedLimit(BookingBaseException):
"""
订单取消次数超限
"""
class BookingSubmitOrderError(BookingBaseException):
"""
提交订单失败
"""
| 12306-booking | /12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/exceptions.py | exceptions.py |
# Instructions to set up the package
| 123TestUpload | /123TestUpload-1.0.tar.gz/123TestUpload-1.0/README.md | README.md |
from setuptools import setup
setup(name='123TestUpload',
version='1.0',
description='Gaussian distributions',
packages=['123TestUpload'],
zip_safe=False)
| 123TestUpload | /123TestUpload-1.0.tar.gz/123TestUpload-1.0/setup.py | setup.py |
a test program | 123hibob789 | /123hibob789-0.0.1.tar.gz/123hibob789-0.0.1/README.txt | README.txt |
def sayhi():
print("hi") | 123hibob789 | /123hibob789-0.0.1.tar.gz/123hibob789-0.0.1/__init__.py | __init__.py |
from setuptools import setup, find_packages
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
]
setup(
name='123hibob789',
version='0.0.1',
description='is a test',
long_description=open('README.txt').read() + '\n\n' + open('CHANGELOG.txt').read(),
url='',
author='a g',
author_email='gutfreundamishadai@gmail.com',
license='MIT',
classifiers=classifiers,
keywords='calculator',
packages=find_packages(),
install_requires=['']
) | 123hibob789 | /123hibob789-0.0.1.tar.gz/123hibob789-0.0.1/setup.py | setup.py |
#!/usr/bin/env python3
import argparse
import asyncio
import base64
import binascii
import functools
import hashlib
import io
import json
import logging
import pathlib
import re
import sys
import textwrap
import time
from typing import Any, Awaitable, Callable, Dict, List, Optional
import aiohttp
import netifaces
import pyaes
from aiohttp import web
from furl import furl
from pyaes.util import strip_PKCS7_padding
from tqdm.asyncio import tqdm
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
Channel = TypedDict('Channel', {'id': int, 'stream_id': str, 'tvguide_id': str,
'name': str, 'category': str, 'language': str,
'stream_url': furl, 'referer_url': str,
'refresh_lock': asyncio.Lock, 'last_time_refreshed': float})
# Fix for https://github.com/pyinstaller/pyinstaller/issues/1113
''.encode('idna')
# Usage:
# $ ./123tv_iptv.py
# $ ./123tv_iptv.py --icons-for-light-bg
# $ ./123tv_iptv.py --access-logs --port 1234
#
# Install / uninstall service (Linux only)
# $ sudo -E ./123tv_iptv.py --icons-for-light-bg install-service
# $ sudo -E ./123tv_iptv.py uninstall-service
# $ sudo -E env "PATH=$PATH" 123tv-iptv --port 1234 install-service
# $ sudo -E env "PATH=$PATH" 123tv-iptv uninstall-service
#
# Run:
# mpv http://127.0.0.1:6464/123tv.m3u8
# vlc http://127.0.0.1:6464
VERSION = '0.1.3'
USER_AGENT = ('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/102.0.5005.63 Safari/537.36')
HEADERS = {'User-Agent': USER_AGENT}
logging.basicConfig(
level=logging.INFO, format='%(asctime)s :: %(levelname)s :: %(message)s',
datefmt='%H:%M:%S'
)
logger = logging.getLogger(__name__)
def root_dir() -> pathlib.Path:
"""Root directory."""
if hasattr(sys, '_MEIPASS'):
return pathlib.Path(sys._MEIPASS) # type: ignore
else:
return pathlib.Path(__file__).parent
def load_dict(filename: str) -> Any:
"""Load root dictionary."""
filepath = root_dir() / filename
with open(filepath, encoding='utf-8') as f:
return json.load(f)
def local_ip_addresses() -> List[str]:
"""Finding all local IP addresses."""
ip_addresses: List[str] = []
interfaces = netifaces.interfaces()
for i in interfaces:
iface = netifaces.ifaddresses(i).get(netifaces.AF_INET, [])
ip_addresses.extend(x['addr'] for x in iface)
return ip_addresses
async def gather_with_concurrency(n: int, *tasks: Awaitable[Any],
show_progress: bool = True,
progress_title: Optional[str] = None) -> Any:
"""Gather tasks with concurrency."""
semaphore = asyncio.Semaphore(n)
async def sem_task(task: Awaitable[Any]) -> Any:
async with semaphore:
return await task
gather = functools.partial(tqdm.gather, desc=progress_title) if show_progress \
else asyncio.gather
return await gather(*[sem_task(x) for x in tasks]) # type: ignore
def extract_obfuscated_link(content: str) -> str:
"""Extract and decode obfuscated stream URL from 123TV channel page."""
start_text = textwrap.dedent(
"""
var post_id = parseInt($('#'+'v'+'i'+'d'+'eo'+'-i'+'d').val());
$(document).ready(function(){
"""
)
data_part, key = '', ''
ob_line = content[content.index(start_text) + len(start_text):].splitlines()[0]
try:
a, b, c = ob_line.split('};')
except ValueError:
return ''
# Parse data
arr = re.search(r'(?<=\[)[^\]]+(?=\])', a)
if arr:
for part in arr.group().split(','):
data_part += part.strip('\'')
try:
data = json.loads(base64.b64decode(data_part))
except (binascii.Error, json.JSONDecodeError):
return ''
# Parse key
for arr in re.findall(r'(?<=\[)[\d+\,]+(?=\])', b):
for dig in arr.split(','): # type: ignore
key = chr(int(dig)) + key
# Decode playlist data
data['iterations'] = 999 if data['iterations'] <= 0 else data['iterations']
dec_key = hashlib.pbkdf2_hmac('sha512', key.encode('utf8'), bytes.fromhex(data['salt']),
data['iterations'], dklen=256 // 8)
aes = pyaes.AESModeOfOperationCBC(dec_key, iv=bytes.fromhex(data['iv']))
ciphertext = base64.b64decode(data['ciphertext'])
decrypted = b''
for idx in range(0, len(ciphertext), 16):
decrypted += aes.decrypt(ciphertext[idx: idx + 16])
target_link: str = strip_PKCS7_padding(decrypted).decode('utf8')
path = re.search(r'(?<=\')\S+(?=\';}$)', c)
if path:
target_link += path.group()
return target_link
def ensure_absolute_url(url: str, relative_url: str) -> str:
"""Ensure url is absolute. Makes it absolute if it's relative."""
if not url.startswith('http'):
url = furl(relative_url).origin + '/' + \
'/'.join(furl(relative_url).path.segments[:-1] + [url])
return url
async def retrieve_stream_url(channel: Channel, max_retries: int = 5) -> Optional[Channel]:
"""Retrieve stream URL from web player with retries."""
url = 'http://123tv.live/watch/' + channel['stream_id']
timeout, max_timeout = 2, 10
exceptions = (asyncio.TimeoutError, aiohttp.ClientConnectionError,
aiohttp.ClientResponseError, aiohttp.ServerDisconnectedError)
async def is_valid_playlist_url(url: str, headers: Dict[str, str],
session: aiohttp.ClientSession) -> bool:
async with session.get(url=url, timeout=timeout,
headers=headers) as response:
m3u8_content = await response.text()
return m3u8_content.startswith('#EXTM3U') # type: ignore
async def retrieve_regular_channel(html_content: str, session: aiohttp.ClientSession) -> bool:
iframe_match = re.search(r'"(?P<iframe_url>https?://.*?\.m3u8\?embed=true)"', html_content)
if not iframe_match:
return False
# Get channel playlist URL
headers = {**HEADERS, 'Referer': url}
embed_url = iframe_match.group('iframe_url')
async with session.get(url=embed_url, timeout=timeout, headers=headers) as response:
html_frame = await response.text()
playlist_match = re.search(r'\'(?P<playlist_url>https?://.*?\.m3u8)\'', html_frame)
if not playlist_match:
return False
# Check if it's a valid playlist
playlist_url = playlist_match.group('playlist_url')
referer_url = 'http://azureedge.xyz/'
headers = {**HEADERS, 'Referer': referer_url}
if await is_valid_playlist_url(playlist_url, headers, session):
channel['stream_url'] = furl(playlist_url)
channel['referer_url'] = referer_url
return True
return False
async def retrieve_obfuscated_channel(html_content: str, session: aiohttp.ClientSession) -> bool:
decoded_url = extract_obfuscated_link(html_content)
if not decoded_url:
return False
referer_url = url.rstrip('/') + '/'
headers = {**HEADERS, 'Referer': referer_url}
async with session.get(url=decoded_url, timeout=timeout, headers=headers) as response:
master_playlist_obj = await response.json()
master_playlist_url = master_playlist_obj[0]['file']
async with session.get(url=master_playlist_url, timeout=timeout, headers=headers) as response:
master_playlist_content = await response.text()
playlist_match = re.search(r'(?P<playlist_url>^[^#].*\.m3u8.*$)',
master_playlist_content, re.M)
if playlist_match:
playlist_url = ensure_absolute_url(playlist_match.group('playlist_url'),
master_playlist_url)
if await is_valid_playlist_url(playlist_url, headers, session):
channel['stream_url'] = furl(playlist_url)
channel['referer_url'] = referer_url
return True
return False
for key in ('stream_url', 'referer_url'):
channel.pop(key, None) # type: ignore
while True:
try:
async with aiohttp.TCPConnector(ssl=False) as connector:
async with aiohttp.ClientSession(raise_for_status=True, connector=connector) as session:
async with session.get(url=url, timeout=timeout) as response:
html_content = await response.text()
for retriever in (retrieve_regular_channel, retrieve_obfuscated_channel):
if await retriever(html_content, session):
return channel
logger.info('No stream URL found for channel "%s".', channel['name'])
return None
except Exception as e:
is_exc_valid = any(isinstance(e, exc) for exc in exceptions)
if not is_exc_valid:
raise
timeout = min(timeout + 1, max_timeout)
max_retries -= 1
if max_retries <= 0:
logger.debug('Failed to retrieve channel "%s" (%s).', channel['name'], url)
return None
def render_playlist(channels: List[Channel], host: str, use_uncompressed_tvguide: bool) -> str:
"""Render master playlist."""
with io.StringIO() as f:
base_url = furl(netloc=host, scheme='http')
tvg_compressed_ext = '' if use_uncompressed_tvguide else '.gz'
tvg_url = base_url / f'tvguide.xml{tvg_compressed_ext}'
f.write('#EXTM3U url-tvg="%s" refresh="3600"\n\n' % tvg_url)
for channel in channels:
tvg_logo = base_url / 'logos' / (channel['stream_id'] + '.png')
stream_url = base_url / (channel['stream_id'] + '.m3u8')
f.write(('#EXTINF:-1 tvg-id="{0[stream_id]}" tvg-logo="{1}" '
'group-title="{0[category]}",{0[name]}\n'.format(channel, tvg_logo)))
f.write(f'{stream_url}\n\n')
return f.getvalue()
async def collect_urls(channels: List[Channel], parallel: int,
keep_all_channels: bool) -> List[Channel]:
"""Collect channel stream URLs from 123tv.live web players."""
logger.info('Extracting stream URLs from 123TV. Parallel requests: %d.', parallel)
retrieve_tasks = [retrieve_stream_url(channel) for channel in channels]
retrieved_channels = await gather_with_concurrency(parallel, *retrieve_tasks,
progress_title='Collect URLs')
channels_ok = channels if keep_all_channels else \
[x for x in retrieved_channels if x]
report_msg = 'Extracted %d channels out of %d.'
logger.info(report_msg, len(channels_ok), len(channels))
return channels_ok
def preprocess_playlist(content: str, referer_url: str, response_url: Optional[str] = None) -> str:
"""Augment playlist with referer argument, make relative URLs absolute
if `response_url` is specified, add chunks prefix path for absolute URLs."""
if content.startswith('#EXTM3U'):
content_lines = []
for line in content.splitlines():
if not line.startswith('#'):
# Ensure URL is absolute
if response_url:
line = ensure_absolute_url(line, response_url)
# Add referer argument
line = furl(line).add(args={'referer': referer_url}).url
content_lines.append(line)
content = '\n'.join(content_lines)
# Add chunks redirect prefix path
content = re.sub(r'(?<=\n)(https?)://', r'/chunks/\1/', content)
return content
async def playlist_server(port: int, parallel: bool, tvguide_base_url: str,
access_logs: bool, icons_for_light_bg: bool,
use_uncompressed_tvguide: bool,
keep_all_channels: bool) -> None:
async def refresh_auth_key(channel: Channel) -> None:
"""Refresh auth key for the channel."""
async with channel['refresh_lock']:
if time.time() - channel['last_time_refreshed'] > 5:
logger.info('Refreshing auth key for channel %s.', channel['name'])
await retrieve_stream_url(channel, 2)
channel['last_time_refreshed'] = time.time()
"""Run proxying server with keys rotation."""
async def master_handler(request: web.Request) -> web.Response:
"""Master playlist handler."""
return web.Response(
text=render_playlist(channels, request.host, use_uncompressed_tvguide)
)
async def logos_handler(request: web.Request) -> web.Response:
"""Channel logos handler."""
color_scheme = 'for-light-bg' if icons_for_light_bg else 'for-dark-bg'
logo_url = (furl(tvguide_base_url) / 'images/icons/channels' /
color_scheme / request.match_info.get('filename')).url
async with aiohttp.TCPConnector(ssl=False, force_close=True) as connector:
async with aiohttp.request(method=request.method, url=logo_url,
connector=connector) as response:
content = await response.read()
return web.Response(
body=content, status=response.status,
content_type='image/png'
)
async def keys_handler(request: web.Request) -> web.Response:
"""AES keys handler."""
url = furl('http://hls.123tv.live/key/').add(path=request.match_info.get('keypath')).url
async with aiohttp.TCPConnector(ssl=False, force_close=True) as connector:
headers = {**HEADERS, 'Referer': 'http://azureedge.xyz/'}
async with aiohttp.request(
method=request.method, url=url,
headers=headers, connector=connector
) as response:
content = await response.read()
return web.Response(
body=content, status=response.status
)
async def tvguide_handler(request: web.Request) -> web.Response:
"""TV Guide handler."""
is_compressed = request.path.endswith('.gz')
compressed_ext = '.gz' if is_compressed else ''
color_scheme = 'for-light-bg' if icons_for_light_bg else 'for-dark-bg'
tvguide_filename = f'123tv.{color_scheme}.xml{compressed_ext}'
tvguide_url = furl(tvguide_base_url).add(path=tvguide_filename).url
async with aiohttp.TCPConnector(ssl=False, force_close=True) as connector:
async with aiohttp.request(method=request.method, url=tvguide_url,
connector=connector) as response:
content = await response.read()
content_type = 'application/gzip' if is_compressed else 'application/xml'
return web.Response(
body=content, status=response.status,
content_type=content_type
)
async def playlist_handler(request: web.Request) -> web.Response:
"""Channel playlist handler."""
stream_id = request.match_info.get('stream_id')
if stream_id not in streams:
return web.Response(text='Stream not found!', status=404)
channel = streams[stream_id]
# If you specified --do-not-filter-channels
# you could meet a channel without stream_url
if 'stream_url' not in channel:
# Try to refresh empty channel
await refresh_auth_key(channel)
if 'stream_url' in channel:
headers = {name: value for name, value in request.headers.items()
if name not in (aiohttp.hdrs.HOST, aiohttp.hdrs.USER_AGENT)}
headers = {**headers, **HEADERS, 'Referer': channel['referer_url']}
max_retries = 2 # Second retry for auth keys refreshing
for _ in range(max_retries):
async with aiohttp.TCPConnector(ssl=False, force_close=True) as connector:
async with aiohttp.request(
method=request.method, url=channel['stream_url'].url,
headers=headers, connector=connector
) as response:
# Get playlist content
content = await response.text()
# Check if the channel's key is expired
if not content.startswith('#EXTM3U'):
# Lazy auth key update
await refresh_auth_key(channel)
continue
# Preprocess playlist content
content = preprocess_playlist(content, channel['referer_url'],
channel['stream_url'].url)
# OK
return web.Response(text=content, status=200)
# Empty channel, not a valid playlist, failed to get new auth key
logger.warning('Channel "%s" returned invalid playlist.', channel['name'])
notfound_segment_url = furl(tvguide_base_url) / 'assets/404.ts'
return web.Response(text=(
'#EXTM3U\n#EXT-X-VERSION:3\n#EXT-X-TARGETDURATION:10\n'
f'#EXTINF:10.000\n{notfound_segment_url}\n#EXT-X-ENDLIST'
))
async def chunks_handler(request: web.Request) -> web.Response:
"""Chunks handler."""
upstream_url = '{0[schema]}://{0[chunk_url]}'.format(request.match_info)
upstream_query = {**request.query}
referer_url = upstream_query.pop('referer', 'http://123tv.live/')
headers = {name: value for name, value in request.headers.items()
if name not in (aiohttp.hdrs.HOST, aiohttp.hdrs.USER_AGENT)}
headers = {**headers, **HEADERS, 'Referer': referer_url}
max_retries = 2 # Second retry for response payload errors recovering
for retry in range(1, max_retries + 1):
try:
async with aiohttp.TCPConnector(ssl=False, force_close=True) as connector:
async with aiohttp.request(
method=request.method, url=upstream_url, params=upstream_query,
headers=headers, raise_for_status=True, connector=connector
) as response:
content = await response.read()
headers = {name: value for name, value in response.headers.items()
if name not in
(aiohttp.hdrs.CONTENT_ENCODING, aiohttp.hdrs.CONTENT_LENGTH,
aiohttp.hdrs.TRANSFER_ENCODING, aiohttp.hdrs.CONNECTION)}
return web.Response(
body=content, status=response.status,
headers=headers
)
except aiohttp.ClientResponseError as e:
if retry >= max_retries:
return web.Response(text=e.message, status=e.status)
except aiohttp.ClientPayloadError as e:
if retry >= max_retries:
return web.Response(text=str(e), status=500)
except aiohttp.ClientError as e:
logger.error('[Retry %d/%d] Error occured during handling request: %s',
retry, max_retries, e, exc_info=True)
if retry >= max_retries:
return web.Response(text=str(e), status=500)
return web.Response(text='', status=500)
# Load channels info
channels = load_dict('channels.json')
# Retrieve available channels with their stream urls
channels = await collect_urls(channels, parallel, keep_all_channels)
if not channels:
logger.error('No channels were retrieved!')
return
# Add channels sync tools
for channel in channels:
channel['refresh_lock'] = asyncio.Lock() # Lock on a key refresh
channel['last_time_refreshed'] = .0 # Time of the last key refresh
# Transform list into a map for better accessibility
streams = {x['stream_id']: x for x in channels}
# Setup access logging
access_logger = logging.getLogger('aiohttp.access')
if access_logs:
access_logger.setLevel('INFO')
else:
access_logger.setLevel('ERROR')
# Run server
for ip_address in local_ip_addresses():
logger.info(f'Serving http://{ip_address}:{port}/123tv.m3u8')
logger.info(f'Serving http://{ip_address}:{port}/tvguide.xml')
app = web.Application()
app.router.add_get('/', master_handler) # master shortcut
app.router.add_get('/123tv.m3u8', master_handler) # master
app.router.add_get('/tvguide.xml', tvguide_handler) # tvguide
app.router.add_get('/tvguide.xml.gz', tvguide_handler) # tvguide compressed
app.router.add_get('/logos/{filename:[^/]+}', logos_handler) # logos
app.router.add_get('/{stream_id}.m3u8', playlist_handler) # playlist
app.router.add_get('/chunks/{schema}/{chunk_url:.+}', chunks_handler) # chunks
app.router.add_get('/key/{keypath:.+}', keys_handler) # AES
runner = web.AppRunner(app)
try:
await runner.setup()
site = web.TCPSite(runner, port=port)
await site.start()
# Sleep forever by 1 hour intervals,
# on Windows before Python 3.8 wake up every 1 second to handle
# Ctrl+C smoothly.
if sys.platform == 'win32' and sys.version_info < (3, 8):
delay = 1
else:
delay = 3600
while True:
await asyncio.sleep(delay)
finally:
await runner.cleanup() # Cleanup used resources, release port
def service_command_handler(command: str, *exec_args: str) -> bool:
"""Linux service command handler."""
import os
import subprocess
import textwrap
service_path = '/etc/systemd/system/123tv-iptv.service'
service_name = os.path.basename(service_path)
ret_failed = True
def run_shell_commands(*commands: str) -> None:
for command in commands:
subprocess.run(command, shell=True)
def install_service() -> bool:
"""Install systemd service."""
service_content = textwrap.dedent(f'''
[Unit]
Description=123TV Free IPTV
After=network.target
StartLimitInterval=0
[Service]
User={os.getlogin()}
Type=simple
Restart=always
RestartSec=5
ExecStart={' '.join(exec_args)}
[Install]
WantedBy=multi-user.target
''')
if os.path.isfile(service_path):
logger.error('Service %s already exists!', service_path)
return True
with open(service_path, 'w') as f_srv:
f_srv.write(service_content.strip())
os.chmod(service_path, 0o644)
run_shell_commands(
'systemctl daemon-reload',
'systemctl enable %s' % service_name,
'systemctl start %s' % service_name
)
return False
def uninstall_service() -> bool:
"""Uninstall systemd service."""
if not os.path.isfile(service_path):
logger.error('Service %s does not exist!', service_path)
return True
run_shell_commands(
'systemctl stop %s' % service_name,
'systemctl disable %s' % service_name
)
os.remove(service_path)
run_shell_commands(
'systemctl daemon-reload',
'systemctl reset-failed'
)
return False
try:
if command == 'install-service':
ret_failed = install_service()
elif command == 'uninstall-service':
ret_failed = uninstall_service()
else:
logger.error('Unknown command "%s"', command)
except PermissionError:
logger.error(('Permission denied, try command: '
f'sudo -E {" ".join(exec_args)} {command}'))
except Exception as e:
logger.error('Error occured: %s', e)
return ret_failed
def args_parser() -> argparse.ArgumentParser:
"""Command line arguments parser."""
def int_range(min_value: int = -sys.maxsize - 1,
max_value: int = sys.maxsize) -> Callable[[str], int]:
def constrained_int(arg: str) -> int:
value = int(arg)
if not min_value <= value <= max_value:
raise argparse.ArgumentTypeError(
f'{min_value} <= {arg} <= {max_value}'
)
return value
return constrained_int
parser = argparse.ArgumentParser(
'123tv-iptv', description='123TV Free IPTV.', add_help=False
)
parser.add_argument(
'-p', '--port', metavar='PORT',
type=int_range(min_value=1, max_value=65535), default=6464,
help='Serving port (default: %(default)s)'
)
parser.add_argument(
'-t', '--parallel', metavar='N',
type=int_range(min_value=1), default=15,
help='Number of parallel parsing requests (default: %(default)s)'
)
parser.add_argument(
'--icons-for-light-bg', action='store_true',
help='Put channel icons adapted for apps with light background'
)
parser.add_argument(
'--access-logs',
action='store_true',
help='Enable access logging'
)
parser.add_argument(
'--keep-all-channels',
action='store_true',
help='Do not filter out not working channels'
),
parser.add_argument(
'--tvguide-base-url', metavar='URL',
default='https://raw.githubusercontent.com/interlark/123tv-tvguide/master',
help='Base TV Guide URL'
)
parser.add_argument(
'--use-uncompressed-tvguide',
action='store_true',
help='Use uncompressed version of TV Guide in "url-tvg" attribute'
)
parser.add_argument(
'-v', '--version', action='version', version=f'%(prog)s {VERSION}',
help='Show program\'s version number and exit'
)
parser.add_argument(
'-h', '--help', action='help', default=argparse.SUPPRESS,
help='Show this help message and exit'
)
# Linux service subcommands
if sys.platform.startswith('linux'):
subparsers = parser.add_subparsers(help='Subcommands')
install_service_parser = subparsers.add_parser(
'install-service', help='Install autostart service'
)
install_service_parser.set_defaults(
invoke_subcommand=functools.partial(service_command_handler, 'install-service')
)
uninstall_service_parser = subparsers.add_parser(
'uninstall-service', help='Uninstall autostart service'
)
uninstall_service_parser.set_defaults(
invoke_subcommand=functools.partial(service_command_handler, 'uninstall-service')
)
return parser
def main() -> None:
"""Entry point."""
# Parse CLI arguments
parser = args_parser()
args = parser.parse_args()
# Invoke subcommands
if 'invoke_subcommand' in args:
exec_args = [arg for idx, arg in enumerate(sys.argv)
if arg.startswith('-') or idx == 0]
exit(args.invoke_subcommand(*exec_args))
# Run server
try:
asyncio.run(playlist_server(**vars(args)))
except KeyboardInterrupt:
logger.info('Server shutdown.')
if __name__ == '__main__':
main()
| 123tv-iptv | /123tv-iptv-0.1.3.tar.gz/123tv-iptv-0.1.3/123tv_iptv.py | 123tv_iptv.py |
<div align="center">
<h1>
<a href="#">
<img alt="123TV-IPTV Logo" width="50%" src="https://user-images.githubusercontent.com/20641837/188281506-11413220-3c65-4e26-a0d1-ed3f248ea564.svg"/>
</a>
</h1>
</div>
<div align="center">
<a href="https://github.com/interlark/123tv-tvguide/actions/workflows/tvguide.yml"><img alt="TV Guide status" src="https://github.com/interlark/123tv-tvguide/actions/workflows/tvguide.yml/badge.svg"/></a>
<a href="https://pypi.org/project/123tv-iptv"><img alt="PyPi version" src="https://badgen.net/pypi/v/123tv-iptv"/></a>
<a href="https://pypi.org/project/123tv-iptv"><img alt="Supported platforms" src="https://badgen.net/badge/platform/Linux,macOS,Windows?list=|"/></a>
</div><br>
**123TV-IPTV** is an app that allows you to watch **free IPTV**.
It **extracts stream URLs** from [123tv.live](http://123tv.live/) website, **generates master playlist** with available TV channels for IPTV players and **proxies the traffic** between your IPTV players and streaming backends.
> **Note**: This is a port of [ustvgo-iptv app](https://github.com/interlark/ustvgo-iptv) for 123TV service.
## ✨ Features
- 🔑 Auto auth-key rotation
> As server proxies the traffic it can detect if your auth key is expired and refresh it on the fly.
- 📺 Available [TV Guide](https://github.com/interlark/123tv-tvguide)
> [TV Guide](https://github.com/interlark/123tv-tvguide) repo generates EPG XML for upcoming programs of all the channels once an hour.
- [](https://github.com/interlark/123tv-tvguide/tree/master/images/icons/channels)
Two iconsets for IPTV players with light and dark backgrounds
> There are 2 channel iconsets adapted for apps with light and dark UI themes.
- 🗔 Cross-platform GUI
> GUI is available for Windows, Linux and MacOS for people who are not that much into CLI.
## 🚀 Installation
- **CLI**
```bash
pip install 123tv-iptv
```
- **GUI**
You can download GUI app from [Releases](https://github.com/interlark/123tv-iptv/releases/latest) for your OS.
- **Docker**
```bash
docker run -d --name=123tv-iptv -p 6464:6464 --restart unless-stopped ghcr.io/interlark/123tv-iptv:latest
```
> For dark icons append following argument: `--icons-for-light-bg`
## ⚙️ Usage - CLI
You can run the app without any arguments.
```
123tv-iptv
```
<img alt="123TV-IPTV CLI screencast" width="666" src="https://user-images.githubusercontent.com/20641837/192028421-592d8b27-bfa0-4444-aa20-e7b1ea1f05da.gif"/>
| Optional argument | Description |
| :--- | :---- |
| --icons-for-light-bg | Switch to dark iconset for players with light UI. |
| --access-logs | Enable access logs for tracking requests activity. |
| --port 6464 | Server port. By default, the port is **6464**. |
| --parallel 10 | Number of parallel parsing requests. Default is **10**. |
| --use-uncompressed-tvguide| By default, master playlist has a link to **compressed** version of TV Guide:<br/>`url-tvg="http://127.0.0.1:6464/tvguide.xml.gz"`<br/>With this argument you can switch it to uncompressed:<br/>`url-tvg="http://127.0.0.1:6464/tvguide.xml"` |
| --keep-all-channels | Do not filter out not working channels.
<br />
**Linux** users can install **systemd service** that automatically runs 123tv-iptv on start-ups ⏰.
```bash
# Install "123tv-iptv" service
sudo -E env "PATH=$PATH" 123tv-iptv install-service
# You can specify any optional arguments you want
sudo -E env "PATH=$PATH" 123tv-iptv --port 1234 --access-logs install-service
# Uninstall "123tv-iptv" service
sudo -E env "PATH=$PATH" 123tv-iptv uninstall-service
```
## ⚙️ Usage - GUI
<img alt="123TV-IPTV GUI screenshot" width="614" src="https://user-images.githubusercontent.com/20641837/192025393-c5b089e6-8311-4f57-af78-8f125cce71cc.png"/>
If you don't like command line stuff, you can run GUI app and click "Start", simple as that.
GUI uses **config file** on following path:
* **Linux**: ~/.config/123tv-iptv/settings.cfg
* **Mac**: ~/Library/Application Support/123tv-iptv/settings.cfg
* **Windows**: C:\Users\\%USERPROFILE%\AppData\Local\123tv-iptv\settings.cfg
## 🔗 URLs
To play and enjoy your free IPTV you need 2 URLs that this app provides:
1) Your generated **master playlist**: 🔗 http://127.0.0.1:6464/123tv.m3u8
2) **TV Guide** (content updates once an hour): 🔗 http://127.0.0.1:6464/tvguide.xml
## ▶️ Players
Here is a **list** of popular IPTV players.
**123TV**'s channels have **EIA-608** embedded subtitles. In case if you're not a native speaker and use *TV*, *Cartoons*, *Movies* and *Shows* to learn English and Spanish languages I would recommend you following free open-source cross-platform IPTV players that can handle EIA-608 subtitles:
- **[VLC](https://github.com/videolan/vlc)**
This old beast could play **any subtitles**. Unfortunately it **doesn't support TV Guide**.
- **Play**
```bash
vlc http://127.0.0.1:6464/123tv.m3u8
```
- **[MPV](https://github.com/mpv-player/mpv)**
Fast and extensible player. It **supports subtitles**, but not that good as VLC, sometimes you could encounter troubles playing roll-up subtitles. Unfortunately it **doesn't suppport TV Guide**.
- **Play**
```bash
mpv http://127.0.0.1:6464/123tv.m3u8
```
- **[Jellyfin Media Player](https://github.com/jellyfin/jellyfin-media-player)**
<img alt="Jellyfin Media Player screenshot" width="49%" src="https://user-images.githubusercontent.com/20641837/173175969-cbfe5adc-1dc8-4e3b-946c-fa4e295d8b8c.jpg"/>
<img alt="Jellyfin Media Player screenshot" width="49%" src="https://user-images.githubusercontent.com/20641837/173175973-8acb076c-e1ac-4d06-96a8-b10a72b2f7d7.jpg"/>
Comfortable, handy, extensible with smooth UI player. **Supports TV Guide**, has **mpv** as a backend.
**Supports subtitles**, but there is no option to enable them via user interface. If you want to enable IPTV subtitles you have to use following "Mute" hack.
- **Enable IPTV subtitles**
I found a quick hack to force play embedded IPTV subtitles, all you need is to create one file:
> Linux: `~/.local/share/jellyfinmediaplayer/scripts/subtitles.lua`
> Linux(Flatpak): `~/.var/app/com.github.iwalton3.jellyfin-media-player/data/jellyfinmediaplayer/scripts/subtitles.lua`
> MacOS: `~/Library/Application Support/Jellyfin Media Player/scripts/subtitles.lua`
> Windows: `%LOCALAPPDATA%\JellyfinMediaPlayer\scripts\subtitles.lua`
And paste following text in there:
```lua
-- File: subtitles.lua
function on_mute_change(name, value)
if value then
local subs_id = mp.get_property("sid")
if subs_id == "1" then
mp.osd_message("Subtitles off")
mp.set_property("sid", "0")
else
mp.osd_message("Subtitles on")
mp.set_property("sid", "1")
end
end
end
mp.observe_property("mute", "bool", on_mute_change)
```
After that every time you mute a video *(🅼 key pressed)*, you toggle subtitles on/off as a side effect.
- **Play**
```
1) Settings -> Dashboard -> Live TV -> Tuner Devices -> Add -> M3U Tuner -> URL -> http://127.0.0.1:6464/123tv.m3u8
2) Settings -> Dashboard -> Live TV -> TV Guide Data Providers -> Add -> XMLTV -> URL -> http://127.0.0.1:6464/tvguide.xml
3) Settings -> Dashboard -> Scheduled Tasks -> Live TV -> Refresh Guide -> Task Triggers -> "Every 30 minutes"
```
- **Note**
```
Some versions does not support compressed (*.xml.gz) TV Guides.
```
- **[IPTVnator](https://github.com/4gray/iptvnator)**
<img alt="IPTVnator screenshot" width="666" src="https://user-images.githubusercontent.com/20641837/173176009-a2e86f74-46ef-464a-bbdf-9137f1d48201.jpg"/>
Player built with [Electron](https://github.com/electron/electron) so you can run it even in browser, has light and dark themes.
**Support subtitles and TV Guide.**
- **Play**
```
1) Add via URL -> http://127.0.0.1:6464/123tv.m3u8
2) Settings -> EPG Url -> http://127.0.0.1:6464/tvguide.xml.gz
```
## 👍 Support
- [123tv.live](http://123tv.live/) is wonderful project which can offer you a free IPTV, please support these guys buying VPN with their referral link.
- Also I would highly appreciate your support on this project ⠀<a href="https://www.buymeacoffee.com/interlark" target="_blank"><img alt="Buy Me A Coffee" src="https://cdn.buymeacoffee.com/buttons/default-orange.png" width="178" height="41"></a>
| 123tv-iptv | /123tv-iptv-0.1.3.tar.gz/123tv-iptv-0.1.3/README.md | README.md |
"""
123TV Free IPTV.
"""
import os
import importlib.util
import_list = ('main', 'args_parser', 'playlist_server', 'VERSION')
dir_path = os.path.dirname(os.path.abspath(__file__))
module_path = os.path.join(dir_path, '123tv_iptv.py')
spec = importlib.util.spec_from_file_location('123tv_iptv', module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
for attr in import_list:
locals()[attr] = getattr(module, attr)
__version__ = module.VERSION
__all__ = import_list
| 123tv-iptv | /123tv-iptv-0.1.3.tar.gz/123tv-iptv-0.1.3/__init__.py | __init__.py |
import pathlib
import re
from setuptools import setup
SCRIPT_PATH = pathlib.Path(__file__).parent / '123tv_iptv.py'
match = re.search(r'^VERSION\s*=\s*[\'"](?P<version>.+?)[\'"]\s*',
SCRIPT_PATH.read_text(encoding='utf-8'), re.M)
assert match
VERSION = match.group('version')
if __name__ == '__main__':
setup(version=VERSION)
| 123tv-iptv | /123tv-iptv-0.1.3.tar.gz/123tv-iptv-0.1.3/setup.py | setup.py |
# -*- coding: utf-8 -*-
# @Time : 2020/11/6-16:50
# @Author : 贾志凯 15716539228@163.com
# @File : test.py
# @Software: win10 python3.6 PyCharm
from pysoftNLP.utils import down
down.download_resource()
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/test.py | test.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/12-22:05
# @Author : 贾志凯
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
# @Time : 2020/11/6-17:01
# @Author : 贾志凯 15716539228@163.com
# @File : setup.py.py
# @Software: win10 python3.6 PyCharm
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/setup.py | setup.py |
# -*- coding: utf-8 -*-
# @Time : 2020/11/3-13:23
# @Author : 贾志凯 15716539228@163.com
# @File : similar.py
# @Software: win10 python3.6 PyCharm
import numpy as np
# from bert_serving.client import BertClient
# bc = BertClient(ip='localhost',check_version=False,port=5555, port_out=5556, check_length=False,timeout=10000)
# topk = 3
#
# sentences = ['逍遥派掌门人无崖子为寻找一个色艺双全、聪明伶俐的徒弟,设下“珍珑”棋局,为少林寺虚字辈弟子虚竹误撞解开。',
# '慕容复为应召拒绝王语嫣的爱情;众人救起伤心自杀的王语嫣,后段誉终于获得她的芳心。',
# '鸠摩智贪练少林武功,走火入魔,幸被段誉吸去全身功力,保住性命,大彻大悟,成为一代高僧。',
# '张无忌历尽艰辛,备受误解,化解恩仇,最终也查明了丐帮史火龙之死乃是成昆、陈友谅师徒所为',
# '武氏与柯镇恶带着垂死的陆氏夫妇和几个小孩相聚,不料李莫愁尾随追来,打伤武三通',
# '人工智能亦称智械、机器智能,指由人制造出来的机器所表现出来的智能。',
# '人工智能的研究是高度技术性和专业的,各分支领域都是深入且各不相通的,因而涉及范围极广。',
# '自然语言认知和理解是让计算机把输入的语言变成有意思的符号和关系,然后根据目的再处理。']
#
# sentences_vec = bc.encode(sentences)
# print(type(sentences_vec))
# test_vec = bc.encode(['自然语言处理与人工智能'])
# score = np.sum(test_vec * sentences_vec, axis=1) / np.linalg.norm(sentences_vec, axis=1)
# topk_idx = np.argsort(score)[::-1][:topk]
# for idx in topk_idx:
# print('> 相似度:%s\t相似句子:%s' % (score[idx], sentences[idx]))
from pysoftNLP.bert.extract_feature import BertVector
import pandas as pd
def similar(sentences,test_vec,args,topk):
bert_model = BertVector(pooling_strategy="REDUCE_MEAN", max_seq_len=args['sentence_length']) # bert词向量
f = lambda text: bert_model.encode([text])["encodes"][0]
sentences_vec = pd.Series(sentences).apply(f)
test_vec = pd.Series(test_vec).apply(f)
sentences_vec = np.array([vec for vec in sentences_vec])
test_vec = np.array([vec for vec in test_vec])
score = np.sum(test_vec * sentences_vec, axis=1) / np.linalg.norm(sentences_vec, axis=1)
topk_idx = np.argsort(score)[::-1][:topk]
for idx in topk_idx:
print('> 相似度:%s\t相似句子:%s' % (score[idx], sentences[idx]))
if __name__ == '__main__':
test_vec = '自然语言处理与人工智能'
sentences = ['逍遥派掌门人无崖子为寻找一个色艺双全、聪明伶俐的徒弟,设下“珍珑”棋局,为少林寺虚字辈弟子虚竹误撞解开。',
'慕容复为应召拒绝王语嫣的爱情;众人救起伤心自杀的王语嫣,后段誉终于获得她的芳心。',
'鸠摩智贪练少林武功,走火入魔,幸被段誉吸去全身功力,保住性命,大彻大悟,成为一代高僧。',
'张无忌历尽艰辛,备受误解,化解恩仇,最终也查明了丐帮史火龙之死乃是成昆、陈友谅师徒所为',
'武氏与柯镇恶带着垂死的陆氏夫妇和几个小孩相聚,不料李莫愁尾随追来,打伤武三通',
'人工智能亦称智械、机器智能,指由人制造出来的机器所表现出来的智能。',
'人工智能的研究是高度技术性和专业的,各分支领域都是深入且各不相通的,因而涉及范围极广。',
'自然语言认知和理解是让计算机把输入的语言变成有意思的符号和关系,然后根据目的再处理。']
args = {'encode': 'bert', 'sentence_length': 50, 'num_classes': 9, 'batch_size': 128, 'epochs': 100}
similar(sentences,test_vec,args,3)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/similarities/similar.py | similar.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/12-22:05
# @Author : 贾志凯
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
__all__ = ['similar'] | 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/similarities/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/12-22:05
# @Author : 贾志凯
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/tokenizer/__init__.py | __init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.